From 18b5021121211a430fe0949f047c18c89cb837a4 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 26 Mar 2024 09:01:12 -0700 Subject: [PATCH 001/112] auto-gen files --- sdk/ai/azure-ai-inference/CHANGELOG.md | 5 + sdk/ai/azure-ai-inference/LICENSE | 21 + sdk/ai/azure-ai-inference/MANIFEST.in | 7 + sdk/ai/azure-ai-inference/README.md | 45 + sdk/ai/azure-ai-inference/azure/__init__.py | 1 + .../azure-ai-inference/azure/ai/__init__.py | 1 + .../azure/ai/inference/__init__.py | 26 + .../azure/ai/inference/_client.py | 98 + .../azure/ai/inference/_configuration.py | 68 + .../azure/ai/inference/_model_base.py | 874 +++++++ .../ai/inference/_operations/__init__.py | 19 + .../ai/inference/_operations/_operations.py | 492 ++++ .../azure/ai/inference/_operations/_patch.py | 20 + .../azure/ai/inference/_patch.py | 20 + .../azure/ai/inference/_serialization.py | 1998 +++++++++++++++++ .../azure/ai/inference/_vendor.py | 26 + .../azure/ai/inference/_version.py | 9 + .../azure/ai/inference/aio/__init__.py | 23 + .../azure/ai/inference/aio/_client.py | 100 + .../azure/ai/inference/aio/_configuration.py | 68 + .../ai/inference/aio/_operations/__init__.py | 19 + .../inference/aio/_operations/_operations.py | 467 ++++ .../ai/inference/aio/_operations/_patch.py | 20 + .../azure/ai/inference/aio/_patch.py | 20 + .../azure/ai/inference/aio/_vendor.py | 26 + .../azure/ai/inference/models/__init__.py | 45 + .../azure/ai/inference/models/_enums.py | 33 + .../azure/ai/inference/models/_models.py | 589 +++++ .../azure/ai/inference/models/_patch.py | 20 + .../azure/ai/inference/py.typed | 1 + .../azure-ai-inference/dev_requirements.txt | 4 + sdk/ai/azure-ai-inference/setup.py | 71 + sdk/ai/azure-ai-inference/tsp-location.yaml | 4 + 33 files changed, 5240 insertions(+) create mode 100644 sdk/ai/azure-ai-inference/CHANGELOG.md create mode 100644 sdk/ai/azure-ai-inference/LICENSE create mode 100644 sdk/ai/azure-ai-inference/MANIFEST.in create mode 100644 sdk/ai/azure-ai-inference/README.md create mode 100644 sdk/ai/azure-ai-inference/azure/__init__.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/__init__.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/_client.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/_model_base.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/_operations/__init__.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_patch.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/_serialization.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/_vendor.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/_version.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/__init__.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_patch.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/aio/_vendor.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py create mode 100644 sdk/ai/azure-ai-inference/azure/ai/inference/py.typed create mode 100644 sdk/ai/azure-ai-inference/dev_requirements.txt create mode 100644 sdk/ai/azure-ai-inference/setup.py create mode 100644 sdk/ai/azure-ai-inference/tsp-location.yaml diff --git a/sdk/ai/azure-ai-inference/CHANGELOG.md b/sdk/ai/azure-ai-inference/CHANGELOG.md new file mode 100644 index 000000000000..628743d283a9 --- /dev/null +++ b/sdk/ai/azure-ai-inference/CHANGELOG.md @@ -0,0 +1,5 @@ +# Release History + +## 1.0.0b1 (1970-01-01) + +- Initial version diff --git a/sdk/ai/azure-ai-inference/LICENSE b/sdk/ai/azure-ai-inference/LICENSE new file mode 100644 index 000000000000..63447fd8bbbf --- /dev/null +++ b/sdk/ai/azure-ai-inference/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) Microsoft Corporation. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/sdk/ai/azure-ai-inference/MANIFEST.in b/sdk/ai/azure-ai-inference/MANIFEST.in new file mode 100644 index 000000000000..6af49607c8e6 --- /dev/null +++ b/sdk/ai/azure-ai-inference/MANIFEST.in @@ -0,0 +1,7 @@ +include *.md +include LICENSE +include azure/ai/inference/py.typed +recursive-include tests *.py +recursive-include samples *.py *.md +include azure/__init__.py +include azure/ai/__init__.py \ No newline at end of file diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md new file mode 100644 index 000000000000..497f318baeed --- /dev/null +++ b/sdk/ai/azure-ai-inference/README.md @@ -0,0 +1,45 @@ + + +# Azure Ai Inference client library for Python + + +## Getting started + +### Installating the package + +```bash +python -m pip install azure-ai-inference +``` + +#### Prequisites + +- Python 3.8 or later is required to use this package. +- You need an [Azure subscription][azure_sub] to use this package. +- An existing Azure Ai Inference instance. + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, +see the Code of Conduct FAQ or contact opencode@microsoft.com with any +additional questions or comments. + + +[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ +[authenticate_with_token]: https://docs.microsoft.com/azure/cognitive-services/authentication?tabs=powershell#authenticate-with-an-authentication-token +[azure_identity_credentials]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#credentials +[azure_identity_pip]: https://pypi.org/project/azure-identity/ +[default_azure_credential]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#defaultazurecredential +[pip]: https://pypi.org/project/pip/ +[azure_sub]: https://azure.microsoft.com/free/ + diff --git a/sdk/ai/azure-ai-inference/azure/__init__.py b/sdk/ai/azure-ai-inference/azure/__init__.py new file mode 100644 index 000000000000..d55ccad1f573 --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/__init__.py @@ -0,0 +1 @@ +__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/ai/azure-ai-inference/azure/ai/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/__init__.py new file mode 100644 index 000000000000..d55ccad1f573 --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/__init__.py @@ -0,0 +1 @@ +__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py new file mode 100644 index 000000000000..c92dce37ed18 --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py @@ -0,0 +1,26 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._client import ModelClient +from ._version import VERSION + +__version__ = VERSION + +try: + from ._patch import __all__ as _patch_all + from ._patch import * # pylint: disable=unused-wildcard-import +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "ModelClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) + +_patch_sdk() diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_client.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_client.py new file mode 100644 index 000000000000..fc6789d4cf6d --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_client.py @@ -0,0 +1,98 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, TYPE_CHECKING, Union + +from azure.core import PipelineClient +from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline import policies +from azure.core.rest import HttpRequest, HttpResponse + +from ._configuration import ModelClientConfiguration +from ._operations import ModelClientOperationsMixin +from ._serialization import Deserializer, Serializer + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import TokenCredential + + +class ModelClient(ModelClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword + """ModelClient. + + :param credential: Credential needed for the client to connect to Azure. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword endpoint: Service host. Required. + :paramtype endpoint: str + :keyword api_version: The API version to use for this operation. Default value is + "2024-04-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, credential: Union[AzureKeyCredential, "TokenCredential"], *, endpoint: str, **kwargs: Any + ) -> None: + self._config = ModelClientConfiguration(credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: PipelineClient = PipelineClient(base_url=endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse + """ + + request_copy = deepcopy(request) + request_copy.url = self._client.format_url(request_copy.url) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> "ModelClient": + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py new file mode 100644 index 000000000000..570277f2be83 --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py @@ -0,0 +1,68 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING, Union + +from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline import policies + +from ._version import VERSION + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import TokenCredential + + +class ModelClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for ModelClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credential: Credential needed for the client to connect to Azure. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-04-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__(self, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + api_version: str = kwargs.pop("api_version", "2024-04-01-preview") + + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://widget.contoso.com/.default"]) + kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _infer_policy(self, **kwargs): + if isinstance(self.credential, AzureKeyCredential): + return policies.AzureKeyCredentialPolicy(self.credential, "api-key", **kwargs) + if hasattr(self.credential, "get_token"): + return policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) + raise TypeError(f"Unsupported credential: {self.credential}") + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = self._infer_policy(**kwargs) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_model_base.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_model_base.py new file mode 100644 index 000000000000..1ddc071517d6 --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_model_base.py @@ -0,0 +1,874 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=protected-access, arguments-differ, signature-differs, broad-except + +import calendar +import decimal +import functools +import sys +import logging +import base64 +import re +import copy +import typing +import enum +import email.utils +from datetime import datetime, date, time, timedelta, timezone +from json import JSONEncoder +from typing_extensions import Self +import isodate +from azure.core.exceptions import DeserializationError +from azure.core import CaseInsensitiveEnumMeta +from azure.core.pipeline import PipelineResponse +from azure.core.serialization import _Null + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping + +_LOGGER = logging.getLogger(__name__) + +__all__ = ["SdkJSONEncoder", "Model", "rest_field", "rest_discriminator"] + +TZ_UTC = timezone.utc +_T = typing.TypeVar("_T") + + +def _timedelta_as_isostr(td: timedelta) -> str: + """Converts a datetime.timedelta object into an ISO 8601 formatted string, e.g. 'P4DT12H30M05S' + + Function adapted from the Tin Can Python project: https://github.com/RusticiSoftware/TinCanPython + + :param timedelta td: The timedelta to convert + :rtype: str + :return: ISO8601 version of this timedelta + """ + + # Split seconds to larger units + seconds = td.total_seconds() + minutes, seconds = divmod(seconds, 60) + hours, minutes = divmod(minutes, 60) + days, hours = divmod(hours, 24) + + days, hours, minutes = list(map(int, (days, hours, minutes))) + seconds = round(seconds, 6) + + # Build date + date_str = "" + if days: + date_str = "%sD" % days + + if hours or minutes or seconds: + # Build time + time_str = "T" + + # Hours + bigger_exists = date_str or hours + if bigger_exists: + time_str += "{:02}H".format(hours) + + # Minutes + bigger_exists = bigger_exists or minutes + if bigger_exists: + time_str += "{:02}M".format(minutes) + + # Seconds + try: + if seconds.is_integer(): + seconds_string = "{:02}".format(int(seconds)) + else: + # 9 chars long w/ leading 0, 6 digits after decimal + seconds_string = "%09.6f" % seconds + # Remove trailing zeros + seconds_string = seconds_string.rstrip("0") + except AttributeError: # int.is_integer() raises + seconds_string = "{:02}".format(seconds) + + time_str += "{}S".format(seconds_string) + else: + time_str = "" + + return "P" + date_str + time_str + + +def _serialize_bytes(o, format: typing.Optional[str] = None) -> str: + encoded = base64.b64encode(o).decode() + if format == "base64url": + return encoded.strip("=").replace("+", "-").replace("/", "_") + return encoded + + +def _serialize_datetime(o, format: typing.Optional[str] = None): + if hasattr(o, "year") and hasattr(o, "hour"): + if format == "rfc7231": + return email.utils.format_datetime(o, usegmt=True) + if format == "unix-timestamp": + return int(calendar.timegm(o.utctimetuple())) + + # astimezone() fails for naive times in Python 2.7, so make make sure o is aware (tzinfo is set) + if not o.tzinfo: + iso_formatted = o.replace(tzinfo=TZ_UTC).isoformat() + else: + iso_formatted = o.astimezone(TZ_UTC).isoformat() + # Replace the trailing "+00:00" UTC offset with "Z" (RFC 3339: https://www.ietf.org/rfc/rfc3339.txt) + return iso_formatted.replace("+00:00", "Z") + # Next try datetime.date or datetime.time + return o.isoformat() + + +def _is_readonly(p): + try: + return p._visibility == ["read"] # pylint: disable=protected-access + except AttributeError: + return False + + +class SdkJSONEncoder(JSONEncoder): + """A JSON encoder that's capable of serializing datetime objects and bytes.""" + + def __init__(self, *args, exclude_readonly: bool = False, format: typing.Optional[str] = None, **kwargs): + super().__init__(*args, **kwargs) + self.exclude_readonly = exclude_readonly + self.format = format + + def default(self, o): # pylint: disable=too-many-return-statements + if _is_model(o): + if self.exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + return {k: v for k, v in o.items() if k not in readonly_props} + return dict(o.items()) + try: + return super(SdkJSONEncoder, self).default(o) + except TypeError: + if isinstance(o, _Null): + return None + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, self.format) + try: + # First try datetime.datetime + return _serialize_datetime(o, self.format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return super(SdkJSONEncoder, self).default(o) + + +_VALID_DATE = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" + r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") +_VALID_RFC7231 = re.compile( + r"(Mon|Tue|Wed|Thu|Fri|Sat|Sun),\s\d{2}\s" + r"(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s\d{2}:\d{2}:\d{2}\sGMT" +) + + +def _deserialize_datetime(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + attr = attr.upper() + match = _VALID_DATE.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + return date_obj + + +def _deserialize_datetime_rfc7231(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize RFC7231 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + match = _VALID_RFC7231.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + return email.utils.parsedate_to_datetime(attr) + + +def _deserialize_datetime_unix_timestamp(attr: typing.Union[float, datetime]) -> datetime: + """Deserialize unix timestamp into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + return datetime.fromtimestamp(attr, TZ_UTC) + + +def _deserialize_date(attr: typing.Union[str, date]) -> date: + """Deserialize ISO-8601 formatted string into Date object. + :param str attr: response string to be deserialized. + :rtype: date + :returns: The date object from that input + """ + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + if isinstance(attr, date): + return attr + return isodate.parse_date(attr, defaultmonth=None, defaultday=None) # type: ignore + + +def _deserialize_time(attr: typing.Union[str, time]) -> time: + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :rtype: datetime.time + :returns: The time object from that input + """ + if isinstance(attr, time): + return attr + return isodate.parse_time(attr) + + +def _deserialize_bytes(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + return bytes(base64.b64decode(attr)) + + +def _deserialize_bytes_base64(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return bytes(base64.b64decode(encoded)) + + +def _deserialize_duration(attr): + if isinstance(attr, timedelta): + return attr + return isodate.parse_duration(attr) + + +def _deserialize_decimal(attr): + if isinstance(attr, decimal.Decimal): + return attr + return decimal.Decimal(str(attr)) + + +_DESERIALIZE_MAPPING = { + datetime: _deserialize_datetime, + date: _deserialize_date, + time: _deserialize_time, + bytes: _deserialize_bytes, + bytearray: _deserialize_bytes, + timedelta: _deserialize_duration, + typing.Any: lambda x: x, + decimal.Decimal: _deserialize_decimal, +} + +_DESERIALIZE_MAPPING_WITHFORMAT = { + "rfc3339": _deserialize_datetime, + "rfc7231": _deserialize_datetime_rfc7231, + "unix-timestamp": _deserialize_datetime_unix_timestamp, + "base64": _deserialize_bytes, + "base64url": _deserialize_bytes_base64, +} + + +def get_deserializer(annotation: typing.Any, rf: typing.Optional["_RestField"] = None): + if rf and rf._format: + return _DESERIALIZE_MAPPING_WITHFORMAT.get(rf._format) + return _DESERIALIZE_MAPPING.get(annotation) + + +def _get_type_alias_type(module_name: str, alias_name: str): + types = { + k: v + for k, v in sys.modules[module_name].__dict__.items() + if isinstance(v, typing._GenericAlias) # type: ignore + } + if alias_name not in types: + return alias_name + return types[alias_name] + + +def _get_model(module_name: str, model_name: str): + models = {k: v for k, v in sys.modules[module_name].__dict__.items() if isinstance(v, type)} + module_end = module_name.rsplit(".", 1)[0] + models.update({k: v for k, v in sys.modules[module_end].__dict__.items() if isinstance(v, type)}) + if isinstance(model_name, str): + model_name = model_name.split(".")[-1] + if model_name not in models: + return model_name + return models[model_name] + + +_UNSET = object() + + +class _MyMutableMapping(MutableMapping[str, typing.Any]): # pylint: disable=unsubscriptable-object + def __init__(self, data: typing.Dict[str, typing.Any]) -> None: + self._data = copy.deepcopy(data) + + def __contains__(self, key: typing.Any) -> bool: + return key in self._data + + def __getitem__(self, key: str) -> typing.Any: + return self._data.__getitem__(key) + + def __setitem__(self, key: str, value: typing.Any) -> None: + self._data.__setitem__(key, value) + + def __delitem__(self, key: str) -> None: + self._data.__delitem__(key) + + def __iter__(self) -> typing.Iterator[typing.Any]: + return self._data.__iter__() + + def __len__(self) -> int: + return self._data.__len__() + + def __ne__(self, other: typing.Any) -> bool: + return not self.__eq__(other) + + def keys(self) -> typing.KeysView[str]: + return self._data.keys() + + def values(self) -> typing.ValuesView[typing.Any]: + return self._data.values() + + def items(self) -> typing.ItemsView[str, typing.Any]: + return self._data.items() + + def get(self, key: str, default: typing.Any = None) -> typing.Any: + try: + return self[key] + except KeyError: + return default + + @typing.overload + def pop(self, key: str) -> typing.Any: + ... + + @typing.overload + def pop(self, key: str, default: _T) -> _T: + ... + + @typing.overload + def pop(self, key: str, default: typing.Any) -> typing.Any: + ... + + def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + if default is _UNSET: + return self._data.pop(key) + return self._data.pop(key, default) + + def popitem(self) -> typing.Tuple[str, typing.Any]: + return self._data.popitem() + + def clear(self) -> None: + self._data.clear() + + def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: + self._data.update(*args, **kwargs) + + @typing.overload + def setdefault(self, key: str, default: None = None) -> None: + ... + + @typing.overload + def setdefault(self, key: str, default: typing.Any) -> typing.Any: + ... + + def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + if default is _UNSET: + return self._data.setdefault(key) + return self._data.setdefault(key, default) + + def __eq__(self, other: typing.Any) -> bool: + try: + other_model = self.__class__(other) + except Exception: + return False + return self._data == other_model._data + + def __repr__(self) -> str: + return str(self._data) + + +def _is_model(obj: typing.Any) -> bool: + return getattr(obj, "_is_model", False) + + +def _serialize(o, format: typing.Optional[str] = None): # pylint: disable=too-many-return-statements + if isinstance(o, list): + return [_serialize(x, format) for x in o] + if isinstance(o, dict): + return {k: _serialize(v, format) for k, v in o.items()} + if isinstance(o, set): + return {_serialize(x, format) for x in o} + if isinstance(o, tuple): + return tuple(_serialize(x, format) for x in o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, format) + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, enum.Enum): + return o.value + try: + # First try datetime.datetime + return _serialize_datetime(o, format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return o + + +def _get_rest_field( + attr_to_rest_field: typing.Dict[str, "_RestField"], rest_name: str +) -> typing.Optional["_RestField"]: + try: + return next(rf for rf in attr_to_rest_field.values() if rf._rest_name == rest_name) + except StopIteration: + return None + + +def _create_value(rf: typing.Optional["_RestField"], value: typing.Any) -> typing.Any: + if not rf: + return _serialize(value, None) + if rf._is_multipart_file_input: + return value + if rf._is_model: + return _deserialize(rf._type, value) + return _serialize(value, rf._format) + + +class Model(_MyMutableMapping): + _is_model = True + + def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None: + class_name = self.__class__.__name__ + if len(args) > 1: + raise TypeError(f"{class_name}.__init__() takes 2 positional arguments but {len(args) + 1} were given") + dict_to_pass = { + rest_field._rest_name: rest_field._default + for rest_field in self._attr_to_rest_field.values() + if rest_field._default is not _UNSET + } + if args: + dict_to_pass.update( + {k: _create_value(_get_rest_field(self._attr_to_rest_field, k), v) for k, v in args[0].items()} + ) + else: + non_attr_kwargs = [k for k in kwargs if k not in self._attr_to_rest_field] + if non_attr_kwargs: + # actual type errors only throw the first wrong keyword arg they see, so following that. + raise TypeError(f"{class_name}.__init__() got an unexpected keyword argument '{non_attr_kwargs[0]}'") + dict_to_pass.update( + { + self._attr_to_rest_field[k]._rest_name: _create_value(self._attr_to_rest_field[k], v) + for k, v in kwargs.items() + if v is not None + } + ) + super().__init__(dict_to_pass) + + def copy(self) -> "Model": + return Model(self.__dict__) + + def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: # pylint: disable=unused-argument + # we know the last three classes in mro are going to be 'Model', 'dict', and 'object' + mros = cls.__mro__[:-3][::-1] # ignore model, dict, and object parents, and reverse the mro order + attr_to_rest_field: typing.Dict[str, _RestField] = { # map attribute name to rest_field property + k: v for mro_class in mros for k, v in mro_class.__dict__.items() if k[0] != "_" and hasattr(v, "_type") + } + annotations = { + k: v + for mro_class in mros + if hasattr(mro_class, "__annotations__") # pylint: disable=no-member + for k, v in mro_class.__annotations__.items() # pylint: disable=no-member + } + for attr, rf in attr_to_rest_field.items(): + rf._module = cls.__module__ + if not rf._type: + rf._type = rf._get_deserialize_callable_from_annotation(annotations.get(attr, None)) + if not rf._rest_name_input: + rf._rest_name_input = attr + cls._attr_to_rest_field: typing.Dict[str, _RestField] = dict(attr_to_rest_field.items()) + + return super().__new__(cls) # pylint: disable=no-value-for-parameter + + def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: + for base in cls.__bases__: + if hasattr(base, "__mapping__"): # pylint: disable=no-member + base.__mapping__[discriminator or cls.__name__] = cls # type: ignore # pylint: disable=no-member + + @classmethod + def _get_discriminator(cls, exist_discriminators) -> typing.Optional[str]: + for v in cls.__dict__.values(): + if ( + isinstance(v, _RestField) and v._is_discriminator and v._rest_name not in exist_discriminators + ): # pylint: disable=protected-access + return v._rest_name # pylint: disable=protected-access + return None + + @classmethod + def _deserialize(cls, data, exist_discriminators): + if not hasattr(cls, "__mapping__"): # pylint: disable=no-member + return cls(data) + discriminator = cls._get_discriminator(exist_discriminators) + exist_discriminators.append(discriminator) + mapped_cls = cls.__mapping__.get(data.get(discriminator), cls) # pyright: ignore # pylint: disable=no-member + if mapped_cls == cls: + return cls(data) + return mapped_cls._deserialize(data, exist_discriminators) # pylint: disable=protected-access + + def as_dict(self, *, exclude_readonly: bool = False) -> typing.Dict[str, typing.Any]: + """Return a dict that can be JSONify using json.dump. + + :keyword bool exclude_readonly: Whether to remove the readonly properties. + :returns: A dict JSON compatible object + :rtype: dict + """ + + result = {} + if exclude_readonly: + readonly_props = [p._rest_name for p in self._attr_to_rest_field.values() if _is_readonly(p)] + for k, v in self.items(): + if exclude_readonly and k in readonly_props: # pyright: ignore + continue + is_multipart_file_input = False + try: + is_multipart_file_input = next( + rf for rf in self._attr_to_rest_field.values() if rf._rest_name == k + )._is_multipart_file_input + except StopIteration: + pass + result[k] = v if is_multipart_file_input else Model._as_dict_value(v, exclude_readonly=exclude_readonly) + return result + + @staticmethod + def _as_dict_value(v: typing.Any, exclude_readonly: bool = False) -> typing.Any: + if v is None or isinstance(v, _Null): + return None + if isinstance(v, (list, tuple, set)): + return type(v)(Model._as_dict_value(x, exclude_readonly=exclude_readonly) for x in v) + if isinstance(v, dict): + return {dk: Model._as_dict_value(dv, exclude_readonly=exclude_readonly) for dk, dv in v.items()} + return v.as_dict(exclude_readonly=exclude_readonly) if hasattr(v, "as_dict") else v + + +def _get_deserialize_callable_from_annotation( # pylint: disable=R0911, R0915, R0912 + annotation: typing.Any, + module: typing.Optional[str], + rf: typing.Optional["_RestField"] = None, +) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + if not annotation or annotation in [int, float]: + return None + + # is it a type alias? + if isinstance(annotation, str): + if module is not None: + annotation = _get_type_alias_type(module, annotation) + + # is it a forward ref / in quotes? + if isinstance(annotation, (str, typing.ForwardRef)): + try: + model_name = annotation.__forward_arg__ # type: ignore + except AttributeError: + model_name = annotation + if module is not None: + annotation = _get_model(module, model_name) + + try: + if module and _is_model(annotation): + if rf: + rf._is_model = True + + def _deserialize_model(model_deserializer: typing.Optional[typing.Callable], obj): + if _is_model(obj): + return obj + return _deserialize(model_deserializer, obj) + + return functools.partial(_deserialize_model, annotation) # pyright: ignore + except Exception: + pass + + # is it a literal? + try: + if annotation.__origin__ is typing.Literal: # pyright: ignore + return None + except AttributeError: + pass + + # is it optional? + try: + if any(a for a in annotation.__args__ if a == type(None)): # pyright: ignore + if_obj_deserializer = _get_deserialize_callable_from_annotation( + next(a for a in annotation.__args__ if a != type(None)), module, rf # pyright: ignore + ) + + def _deserialize_with_optional(if_obj_deserializer: typing.Optional[typing.Callable], obj): + if obj is None: + return obj + return _deserialize_with_callable(if_obj_deserializer, obj) + + return functools.partial(_deserialize_with_optional, if_obj_deserializer) + except AttributeError: + pass + + if getattr(annotation, "__origin__", None) is typing.Union: + # initial ordering is we make `string` the last deserialization option, because it is often them most generic + deserializers = [ + _get_deserialize_callable_from_annotation(arg, module, rf) + for arg in sorted( + annotation.__args__, key=lambda x: hasattr(x, "__name__") and x.__name__ == "str" # pyright: ignore + ) + ] + + def _deserialize_with_union(deserializers, obj): + for deserializer in deserializers: + try: + return _deserialize(deserializer, obj) + except DeserializationError: + pass + raise DeserializationError() + + return functools.partial(_deserialize_with_union, deserializers) + + try: + if annotation._name == "Dict": # pyright: ignore + value_deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[1], module, rf # pyright: ignore + ) + + def _deserialize_dict( + value_deserializer: typing.Optional[typing.Callable], + obj: typing.Dict[typing.Any, typing.Any], + ): + if obj is None: + return obj + return {k: _deserialize(value_deserializer, v, module) for k, v in obj.items()} + + return functools.partial( + _deserialize_dict, + value_deserializer, + ) + except (AttributeError, IndexError): + pass + try: + if annotation._name in ["List", "Set", "Tuple", "Sequence"]: # pyright: ignore + if len(annotation.__args__) > 1: # pyright: ignore + + def _deserialize_multiple_sequence( + entry_deserializers: typing.List[typing.Optional[typing.Callable]], + obj, + ): + if obj is None: + return obj + return type(obj)( + _deserialize(deserializer, entry, module) + for entry, deserializer in zip(obj, entry_deserializers) + ) + + entry_deserializers = [ + _get_deserialize_callable_from_annotation(dt, module, rf) + for dt in annotation.__args__ # pyright: ignore + ] + return functools.partial(_deserialize_multiple_sequence, entry_deserializers) + deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[0], module, rf # pyright: ignore + ) + + def _deserialize_sequence( + deserializer: typing.Optional[typing.Callable], + obj, + ): + if obj is None: + return obj + return type(obj)(_deserialize(deserializer, entry, module) for entry in obj) + + return functools.partial(_deserialize_sequence, deserializer) + except (TypeError, IndexError, AttributeError, SyntaxError): + pass + + def _deserialize_default( + deserializer, + obj, + ): + if obj is None: + return obj + try: + return _deserialize_with_callable(deserializer, obj) + except Exception: + pass + return obj + + if get_deserializer(annotation, rf): + return functools.partial(_deserialize_default, get_deserializer(annotation, rf)) + + return functools.partial(_deserialize_default, annotation) + + +def _deserialize_with_callable( + deserializer: typing.Optional[typing.Callable[[typing.Any], typing.Any]], + value: typing.Any, +): + try: + if value is None or isinstance(value, _Null): + return None + if deserializer is None: + return value + if isinstance(deserializer, CaseInsensitiveEnumMeta): + try: + return deserializer(value) + except ValueError: + # for unknown value, return raw value + return value + if isinstance(deserializer, type) and issubclass(deserializer, Model): + return deserializer._deserialize(value, []) + return typing.cast(typing.Callable[[typing.Any], typing.Any], deserializer)(value) + except Exception as e: + raise DeserializationError() from e + + +def _deserialize( + deserializer: typing.Any, + value: typing.Any, + module: typing.Optional[str] = None, + rf: typing.Optional["_RestField"] = None, + format: typing.Optional[str] = None, +) -> typing.Any: + if isinstance(value, PipelineResponse): + value = value.http_response.json() + if rf is None and format: + rf = _RestField(format=format) + if not isinstance(deserializer, functools.partial): + deserializer = _get_deserialize_callable_from_annotation(deserializer, module, rf) + return _deserialize_with_callable(deserializer, value) + + +class _RestField: + def __init__( + self, + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + is_discriminator: bool = False, + visibility: typing.Optional[typing.List[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, + ): + self._type = type + self._rest_name_input = name + self._module: typing.Optional[str] = None + self._is_discriminator = is_discriminator + self._visibility = visibility + self._is_model = False + self._default = default + self._format = format + self._is_multipart_file_input = is_multipart_file_input + + @property + def _class_type(self) -> typing.Any: + return getattr(self._type, "args", [None])[0] + + @property + def _rest_name(self) -> str: + if self._rest_name_input is None: + raise ValueError("Rest name was never set") + return self._rest_name_input + + def __get__(self, obj: Model, type=None): # pylint: disable=redefined-builtin + # by this point, type and rest_name will have a value bc we default + # them in __new__ of the Model class + item = obj.get(self._rest_name) + if item is None: + return item + if self._is_model: + return item + return _deserialize(self._type, _serialize(item, self._format), rf=self) + + def __set__(self, obj: Model, value) -> None: + if value is None: + # we want to wipe out entries if users set attr to None + try: + obj.__delitem__(self._rest_name) + except KeyError: + pass + return + if self._is_model: + if not _is_model(value): + value = _deserialize(self._type, value) + obj.__setitem__(self._rest_name, value) + return + obj.__setitem__(self._rest_name, _serialize(value, self._format)) + + def _get_deserialize_callable_from_annotation( + self, annotation: typing.Any + ) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + return _get_deserialize_callable_from_annotation(annotation, self._module, self) + + +def rest_field( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[typing.List[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, +) -> typing.Any: + return _RestField( + name=name, + type=type, + visibility=visibility, + default=default, + format=format, + is_multipart_file_input=is_multipart_file_input, + ) + + +def rest_discriminator( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin +) -> typing.Any: + return _RestField(name=name, type=type, is_discriminator=True) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/__init__.py new file mode 100644 index 000000000000..886bf4218356 --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/__init__.py @@ -0,0 +1,19 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._operations import ModelClientOperationsMixin + +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "ModelClientOperationsMixin", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py new file mode 100644 index 000000000000..43bc4f9a94d2 --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -0,0 +1,492 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import json +import sys +from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict + +from .. import models as _models +from .._model_base import SdkJSONEncoder, _deserialize +from .._serialization import Serializer +from .._vendor import ModelClientMixinABC + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_model_get_chat_completions_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-04-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v1/chat/completions" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +class ModelClientOperationsMixin(ModelClientMixinABC): + @overload + def get_chat_completions( + self, + chat_completion_options: _models.ChatCompletionsOptions, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.ChatCompletions: + # pylint: disable=line-too-long + """Gets chat completions for the provided chat messages. + Completions support a wide variety of tasks and generate text that continues from or + "completes" + provided prompt data. + + :param chat_completion_options: The JSON payload containing chat completion options. Required. + :type chat_completion_options: ~azure.ai.inference.models.ChatCompletionsOptions + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ChatCompletions + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # The input is polymorphic. The following are possible polymorphic inputs based off + discriminator "type": + + # JSON input template for discriminator value "json_object": + chat_completions_response_format = { + "type": "json_object" + } + + # JSON input template for discriminator value "text": + chat_completions_response_format = { + "type": "text" + } + + # JSON input template you can fill out and use as your body input. + chat_completion_options = { + "messages": [ + chat_request_message + ], + "frequency_penalty": 0.0, # Optional. A value that influences the + probability of generated tokens appearing based on their cumulative frequency in + generated text. Positive values will make tokens less likely to appear as their + frequency increases and decrease the likelihood of the model repeating the same + statements verbatim. + "max_tokens": 0, # Optional. The maximum number of tokens to generate. + "presence_penalty": 0.0, # Optional. A value that influences the probability + of generated tokens appearing based on their existing presence in generated text. + Positive values will make tokens less likely to appear when they already exist + and increase the model's likelihood to output new topics. + "response_format": chat_completions_response_format, + "seed": 0, # Optional. If specified, the system will make a best effort to + sample deterministically such that repeated requests with the same seed and + parameters should return the same result. Determinism is not guaranteed, and you + should refer to the system_fingerprint response parameter to monitor changes in + the backend.". + "stop": [ + "str" # Optional. A collection of textual sequences that will end + completions generation. + ], + "stream": bool, # Optional. A value indicating whether chat completions + should be streamed for this request. + "temperature": 0.0, # Optional. The sampling temperature to use that + controls the apparent creativity of generated completions. Higher values will + make output more random while lower values will make results more focused and + deterministic. It is not recommended to modify temperature and top_p for the same + completions request as the interaction of these two settings is difficult to + predict. + "top_p": 0.0 # Optional. An alternative to sampling with temperature called + nucleus sampling. This value causes the model to consider the results of tokens + with the provided probability mass. As an example, a value of 0.15 will cause + only the tokens comprising the top 15% of probability mass to be considered. It + is not recommended to modify temperature and top_p for the same completions + request as the interaction of these two settings is difficult to predict. + } + + # response body for status code(s): 200 + response == { + "choices": [ + { + "finish_reason": "str", # The reason that this chat + completions choice completed its generated. Required. Known values are: + "stop", "length", and "content_filter". + "index": 0, # The ordered index associated with this chat + completions choice. Required. + "delta": { + "content": "str", # The content of the message. + Required. + "role": "str" # The chat role associated with the + message. Required. Known values are: "system", "assistant", and + "user". + }, + "message": { + "content": "str", # The content of the message. + Required. + "role": "str" # The chat role associated with the + message. Required. Known values are: "system", "assistant", and + "user". + } + } + ], + "created": "2020-02-20 00:00:00", # The first timestamp associated with + generation activity for this completions response, represented as seconds since + the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. + "id": "str", # A unique identifier associated with this chat completions + response. Required. + "model": "str", # The model used for the chat completion. Required. + "object": "str", # The response object type, which is always + ``chat.completion``. Required. + "usage": { + "completion_tokens": 0, # The number of tokens generated across all + completions emissions. Required. + "prompt_tokens": 0, # The number of tokens in the provided prompts + for the completions request. Required. + "total_tokens": 0 # The total number of tokens processed for the + completions request and response. Required. + } + } + """ + + @overload + def get_chat_completions( + self, chat_completion_options: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ChatCompletions: + # pylint: disable=line-too-long + """Gets chat completions for the provided chat messages. + Completions support a wide variety of tasks and generate text that continues from or + "completes" + provided prompt data. + + :param chat_completion_options: The JSON payload containing chat completion options. Required. + :type chat_completion_options: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ChatCompletions + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "choices": [ + { + "finish_reason": "str", # The reason that this chat + completions choice completed its generated. Required. Known values are: + "stop", "length", and "content_filter". + "index": 0, # The ordered index associated with this chat + completions choice. Required. + "delta": { + "content": "str", # The content of the message. + Required. + "role": "str" # The chat role associated with the + message. Required. Known values are: "system", "assistant", and + "user". + }, + "message": { + "content": "str", # The content of the message. + Required. + "role": "str" # The chat role associated with the + message. Required. Known values are: "system", "assistant", and + "user". + } + } + ], + "created": "2020-02-20 00:00:00", # The first timestamp associated with + generation activity for this completions response, represented as seconds since + the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. + "id": "str", # A unique identifier associated with this chat completions + response. Required. + "model": "str", # The model used for the chat completion. Required. + "object": "str", # The response object type, which is always + ``chat.completion``. Required. + "usage": { + "completion_tokens": 0, # The number of tokens generated across all + completions emissions. Required. + "prompt_tokens": 0, # The number of tokens in the provided prompts + for the completions request. Required. + "total_tokens": 0 # The total number of tokens processed for the + completions request and response. Required. + } + } + """ + + @overload + def get_chat_completions( + self, chat_completion_options: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ChatCompletions: + # pylint: disable=line-too-long + """Gets chat completions for the provided chat messages. + Completions support a wide variety of tasks and generate text that continues from or + "completes" + provided prompt data. + + :param chat_completion_options: The JSON payload containing chat completion options. Required. + :type chat_completion_options: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ChatCompletions + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "choices": [ + { + "finish_reason": "str", # The reason that this chat + completions choice completed its generated. Required. Known values are: + "stop", "length", and "content_filter". + "index": 0, # The ordered index associated with this chat + completions choice. Required. + "delta": { + "content": "str", # The content of the message. + Required. + "role": "str" # The chat role associated with the + message. Required. Known values are: "system", "assistant", and + "user". + }, + "message": { + "content": "str", # The content of the message. + Required. + "role": "str" # The chat role associated with the + message. Required. Known values are: "system", "assistant", and + "user". + } + } + ], + "created": "2020-02-20 00:00:00", # The first timestamp associated with + generation activity for this completions response, represented as seconds since + the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. + "id": "str", # A unique identifier associated with this chat completions + response. Required. + "model": "str", # The model used for the chat completion. Required. + "object": "str", # The response object type, which is always + ``chat.completion``. Required. + "usage": { + "completion_tokens": 0, # The number of tokens generated across all + completions emissions. Required. + "prompt_tokens": 0, # The number of tokens in the provided prompts + for the completions request. Required. + "total_tokens": 0 # The total number of tokens processed for the + completions request and response. Required. + } + } + """ + + @distributed_trace + def get_chat_completions( + self, chat_completion_options: Union[_models.ChatCompletionsOptions, JSON, IO[bytes]], **kwargs: Any + ) -> _models.ChatCompletions: + # pylint: disable=line-too-long + """Gets chat completions for the provided chat messages. + Completions support a wide variety of tasks and generate text that continues from or + "completes" + provided prompt data. + + :param chat_completion_options: The JSON payload containing chat completion options. Is one of + the following types: ChatCompletionsOptions, JSON, IO[bytes] Required. + :type chat_completion_options: ~azure.ai.inference.models.ChatCompletionsOptions or JSON or + IO[bytes] + :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ChatCompletions + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # The input is polymorphic. The following are possible polymorphic inputs based off + discriminator "type": + + # JSON input template for discriminator value "json_object": + chat_completions_response_format = { + "type": "json_object" + } + + # JSON input template for discriminator value "text": + chat_completions_response_format = { + "type": "text" + } + + # JSON input template you can fill out and use as your body input. + chat_completion_options = { + "messages": [ + chat_request_message + ], + "frequency_penalty": 0.0, # Optional. A value that influences the + probability of generated tokens appearing based on their cumulative frequency in + generated text. Positive values will make tokens less likely to appear as their + frequency increases and decrease the likelihood of the model repeating the same + statements verbatim. + "max_tokens": 0, # Optional. The maximum number of tokens to generate. + "presence_penalty": 0.0, # Optional. A value that influences the probability + of generated tokens appearing based on their existing presence in generated text. + Positive values will make tokens less likely to appear when they already exist + and increase the model's likelihood to output new topics. + "response_format": chat_completions_response_format, + "seed": 0, # Optional. If specified, the system will make a best effort to + sample deterministically such that repeated requests with the same seed and + parameters should return the same result. Determinism is not guaranteed, and you + should refer to the system_fingerprint response parameter to monitor changes in + the backend.". + "stop": [ + "str" # Optional. A collection of textual sequences that will end + completions generation. + ], + "stream": bool, # Optional. A value indicating whether chat completions + should be streamed for this request. + "temperature": 0.0, # Optional. The sampling temperature to use that + controls the apparent creativity of generated completions. Higher values will + make output more random while lower values will make results more focused and + deterministic. It is not recommended to modify temperature and top_p for the same + completions request as the interaction of these two settings is difficult to + predict. + "top_p": 0.0 # Optional. An alternative to sampling with temperature called + nucleus sampling. This value causes the model to consider the results of tokens + with the provided probability mass. As an example, a value of 0.15 will cause + only the tokens comprising the top 15% of probability mass to be considered. It + is not recommended to modify temperature and top_p for the same completions + request as the interaction of these two settings is difficult to predict. + } + + # response body for status code(s): 200 + response == { + "choices": [ + { + "finish_reason": "str", # The reason that this chat + completions choice completed its generated. Required. Known values are: + "stop", "length", and "content_filter". + "index": 0, # The ordered index associated with this chat + completions choice. Required. + "delta": { + "content": "str", # The content of the message. + Required. + "role": "str" # The chat role associated with the + message. Required. Known values are: "system", "assistant", and + "user". + }, + "message": { + "content": "str", # The content of the message. + Required. + "role": "str" # The chat role associated with the + message. Required. Known values are: "system", "assistant", and + "user". + } + } + ], + "created": "2020-02-20 00:00:00", # The first timestamp associated with + generation activity for this completions response, represented as seconds since + the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. + "id": "str", # A unique identifier associated with this chat completions + response. Required. + "model": "str", # The model used for the chat completion. Required. + "object": "str", # The response object type, which is always + ``chat.completion``. Required. + "usage": { + "completion_tokens": 0, # The number of tokens generated across all + completions emissions. Required. + "prompt_tokens": 0, # The number of tokens in the provided prompts + for the completions request. Required. + "total_tokens": 0 # The total number of tokens processed for the + completions request and response. Required. + } + } + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) + cls: ClsType[_models.ChatCompletions] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(chat_completion_options, (IOBase, bytes)): + _content = chat_completion_options + else: + _content = json.dumps(chat_completion_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_model_get_chat_completions_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ChatCompletions, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_serialization.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_serialization.py new file mode 100644 index 000000000000..2f781d740827 --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_serialization.py @@ -0,0 +1,1998 @@ +# -------------------------------------------------------------------------- +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the ""Software""), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# -------------------------------------------------------------------------- + +# pylint: skip-file +# pyright: reportUnnecessaryTypeIgnoreComment=false + +from base64 import b64decode, b64encode +import calendar +import datetime +import decimal +import email +from enum import Enum +import json +import logging +import re +import sys +import codecs +from typing import ( + Dict, + Any, + cast, + Optional, + Union, + AnyStr, + IO, + Mapping, + Callable, + TypeVar, + MutableMapping, + Type, + List, + Mapping, +) + +try: + from urllib import quote # type: ignore +except ImportError: + from urllib.parse import quote +import xml.etree.ElementTree as ET + +import isodate # type: ignore + +from azure.core.exceptions import DeserializationError, SerializationError +from azure.core.serialization import NULL as CoreNull + +_BOM = codecs.BOM_UTF8.decode(encoding="utf-8") + +ModelType = TypeVar("ModelType", bound="Model") +JSON = MutableMapping[str, Any] + + +class RawDeserializer: + + # Accept "text" because we're open minded people... + JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$") + + # Name used in context + CONTEXT_NAME = "deserialized_data" + + @classmethod + def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any: + """Decode data according to content-type. + + Accept a stream of data as well, but will be load at once in memory for now. + + If no content-type, will return the string version (not bytes, not stream) + + :param data: Input, could be bytes or stream (will be decoded with UTF8) or text + :type data: str or bytes or IO + :param str content_type: The content type. + """ + if hasattr(data, "read"): + # Assume a stream + data = cast(IO, data).read() + + if isinstance(data, bytes): + data_as_str = data.decode(encoding="utf-8-sig") + else: + # Explain to mypy the correct type. + data_as_str = cast(str, data) + + # Remove Byte Order Mark if present in string + data_as_str = data_as_str.lstrip(_BOM) + + if content_type is None: + return data + + if cls.JSON_REGEXP.match(content_type): + try: + return json.loads(data_as_str) + except ValueError as err: + raise DeserializationError("JSON is invalid: {}".format(err), err) + elif "xml" in (content_type or []): + try: + + try: + if isinstance(data, unicode): # type: ignore + # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string + data_as_str = data_as_str.encode(encoding="utf-8") # type: ignore + except NameError: + pass + + return ET.fromstring(data_as_str) # nosec + except ET.ParseError as err: + # It might be because the server has an issue, and returned JSON with + # content-type XML.... + # So let's try a JSON load, and if it's still broken + # let's flow the initial exception + def _json_attemp(data): + try: + return True, json.loads(data) + except ValueError: + return False, None # Don't care about this one + + success, json_result = _json_attemp(data) + if success: + return json_result + # If i'm here, it's not JSON, it's not XML, let's scream + # and raise the last context in this block (the XML exception) + # The function hack is because Py2.7 messes up with exception + # context otherwise. + _LOGGER.critical("Wasn't XML not JSON, failing") + raise DeserializationError("XML is invalid") from err + raise DeserializationError("Cannot deserialize content-type: {}".format(content_type)) + + @classmethod + def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any: + """Deserialize from HTTP response. + + Use bytes and headers to NOT use any requests/aiohttp or whatever + specific implementation. + Headers will tested for "content-type" + """ + # Try to use content-type from headers if available + content_type = None + if "content-type" in headers: + content_type = headers["content-type"].split(";")[0].strip().lower() + # Ouch, this server did not declare what it sent... + # Let's guess it's JSON... + # Also, since Autorest was considering that an empty body was a valid JSON, + # need that test as well.... + else: + content_type = "application/json" + + if body_bytes: + return cls.deserialize_from_text(body_bytes, content_type) + return None + + +_LOGGER = logging.getLogger(__name__) + +try: + _long_type = long # type: ignore +except NameError: + _long_type = int + + +class UTC(datetime.tzinfo): + """Time Zone info for handling UTC""" + + def utcoffset(self, dt): + """UTF offset for UTC is 0.""" + return datetime.timedelta(0) + + def tzname(self, dt): + """Timestamp representation.""" + return "Z" + + def dst(self, dt): + """No daylight saving for UTC.""" + return datetime.timedelta(hours=1) + + +try: + from datetime import timezone as _FixedOffset # type: ignore +except ImportError: # Python 2.7 + + class _FixedOffset(datetime.tzinfo): # type: ignore + """Fixed offset in minutes east from UTC. + Copy/pasted from Python doc + :param datetime.timedelta offset: offset in timedelta format + """ + + def __init__(self, offset): + self.__offset = offset + + def utcoffset(self, dt): + return self.__offset + + def tzname(self, dt): + return str(self.__offset.total_seconds() / 3600) + + def __repr__(self): + return "".format(self.tzname(None)) + + def dst(self, dt): + return datetime.timedelta(0) + + def __getinitargs__(self): + return (self.__offset,) + + +try: + from datetime import timezone + + TZ_UTC = timezone.utc +except ImportError: + TZ_UTC = UTC() # type: ignore + +_FLATTEN = re.compile(r"(? None: + self.additional_properties: Optional[Dict[str, Any]] = {} + for k in kwargs: + if k not in self._attribute_map: + _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__) + elif k in self._validation and self._validation[k].get("readonly", False): + _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__) + else: + setattr(self, k, kwargs[k]) + + def __eq__(self, other: Any) -> bool: + """Compare objects by comparing all attributes.""" + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other: Any) -> bool: + """Compare objects by comparing all attributes.""" + return not self.__eq__(other) + + def __str__(self) -> str: + return str(self.__dict__) + + @classmethod + def enable_additional_properties_sending(cls) -> None: + cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"} + + @classmethod + def is_xml_model(cls) -> bool: + try: + cls._xml_map # type: ignore + except AttributeError: + return False + return True + + @classmethod + def _create_xml_node(cls): + """Create XML node.""" + try: + xml_map = cls._xml_map # type: ignore + except AttributeError: + xml_map = {} + + return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None)) + + def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON: + """Return the JSON that would be sent to server from this model. + + This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`. + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize(self, keep_readonly=keep_readonly, **kwargs) # type: ignore + + def as_dict( + self, + keep_readonly: bool = True, + key_transformer: Callable[[str, Dict[str, Any], Any], Any] = attribute_transformer, + **kwargs: Any + ) -> JSON: + """Return a dict that can be serialized using json.dump. + + Advanced usage might optionally use a callback as parameter: + + .. code::python + + def my_key_transformer(key, attr_desc, value): + return key + + Key is the attribute name used in Python. Attr_desc + is a dict of metadata. Currently contains 'type' with the + msrest type and 'key' with the RestAPI encoded key. + Value is the current value in this object. + + The string returned will be used to serialize the key. + If the return type is a list, this is considered hierarchical + result dict. + + See the three examples in this file: + + - attribute_transformer + - full_restapi_key_transformer + - last_restapi_key_transformer + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param function key_transformer: A key transformer function. + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize(self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs) # type: ignore + + @classmethod + def _infer_class_models(cls): + try: + str_models = cls.__module__.rsplit(".", 1)[0] + models = sys.modules[str_models] + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + if cls.__name__ not in client_models: + raise ValueError("Not Autorest generated code") + except Exception: + # Assume it's not Autorest generated (tests?). Add ourselves as dependencies. + client_models = {cls.__name__: cls} + return client_models + + @classmethod + def deserialize(cls: Type[ModelType], data: Any, content_type: Optional[str] = None) -> ModelType: + """Parse a str using the RestAPI syntax and return a model. + + :param str data: A str using RestAPI structure. JSON by default. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises: DeserializationError if something went wrong + """ + deserializer = Deserializer(cls._infer_class_models()) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def from_dict( + cls: Type[ModelType], + data: Any, + key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, + content_type: Optional[str] = None, + ) -> ModelType: + """Parse a dict using given key extractor return a model. + + By default consider key + extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor + and last_rest_key_case_insensitive_extractor) + + :param dict data: A dict using RestAPI structure + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises: DeserializationError if something went wrong + """ + deserializer = Deserializer(cls._infer_class_models()) + deserializer.key_extractors = ( # type: ignore + [ # type: ignore + attribute_key_case_insensitive_extractor, + rest_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + if key_extractors is None + else key_extractors + ) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def _flatten_subtype(cls, key, objects): + if "_subtype_map" not in cls.__dict__: + return {} + result = dict(cls._subtype_map[key]) + for valuetype in cls._subtype_map[key].values(): + result.update(objects[valuetype]._flatten_subtype(key, objects)) + return result + + @classmethod + def _classify(cls, response, objects): + """Check the class _subtype_map for any child classes. + We want to ignore any inherited _subtype_maps. + Remove the polymorphic key from the initial data. + """ + for subtype_key in cls.__dict__.get("_subtype_map", {}).keys(): + subtype_value = None + + if not isinstance(response, ET.Element): + rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1] + subtype_value = response.pop(rest_api_response_key, None) or response.pop(subtype_key, None) + else: + subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response) + if subtype_value: + # Try to match base class. Can be class name only + # (bug to fix in Autorest to support x-ms-discriminator-name) + if cls.__name__ == subtype_value: + return cls + flatten_mapping_type = cls._flatten_subtype(subtype_key, objects) + try: + return objects[flatten_mapping_type[subtype_value]] # type: ignore + except KeyError: + _LOGGER.warning( + "Subtype value %s has no mapping, use base class %s.", + subtype_value, + cls.__name__, + ) + break + else: + _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__) + break + return cls + + @classmethod + def _get_rest_key_parts(cls, attr_key): + """Get the RestAPI key of this attr, split it and decode part + :param str attr_key: Attribute key must be in attribute_map. + :returns: A list of RestAPI part + :rtype: list + """ + rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"]) + return [_decode_attribute_map_key(key_part) for key_part in rest_split_key] + + +def _decode_attribute_map_key(key): + """This decode a key in an _attribute_map to the actual key we want to look at + inside the received data. + + :param str key: A key string from the generated code + """ + return key.replace("\\.", ".") + + +class Serializer(object): + """Request object model serializer.""" + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()} + days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"} + months = { + 1: "Jan", + 2: "Feb", + 3: "Mar", + 4: "Apr", + 5: "May", + 6: "Jun", + 7: "Jul", + 8: "Aug", + 9: "Sep", + 10: "Oct", + 11: "Nov", + 12: "Dec", + } + validation = { + "min_length": lambda x, y: len(x) < y, + "max_length": lambda x, y: len(x) > y, + "minimum": lambda x, y: x < y, + "maximum": lambda x, y: x > y, + "minimum_ex": lambda x, y: x <= y, + "maximum_ex": lambda x, y: x >= y, + "min_items": lambda x, y: len(x) < y, + "max_items": lambda x, y: len(x) > y, + "pattern": lambda x, y: not re.match(y, x, re.UNICODE), + "unique": lambda x, y: len(x) != len(set(x)), + "multiple": lambda x, y: x % y != 0, + } + + def __init__(self, classes: Optional[Mapping[str, type]] = None): + self.serialize_type = { + "iso-8601": Serializer.serialize_iso, + "rfc-1123": Serializer.serialize_rfc, + "unix-time": Serializer.serialize_unix, + "duration": Serializer.serialize_duration, + "date": Serializer.serialize_date, + "time": Serializer.serialize_time, + "decimal": Serializer.serialize_decimal, + "long": Serializer.serialize_long, + "bytearray": Serializer.serialize_bytearray, + "base64": Serializer.serialize_base64, + "object": self.serialize_object, + "[]": self.serialize_iter, + "{}": self.serialize_dict, + } + self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.key_transformer = full_restapi_key_transformer + self.client_side_validation = True + + def _serialize(self, target_obj, data_type=None, **kwargs): + """Serialize data into a string according to type. + + :param target_obj: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, dict + :raises: SerializationError if serialization fails. + """ + key_transformer = kwargs.get("key_transformer", self.key_transformer) + keep_readonly = kwargs.get("keep_readonly", False) + if target_obj is None: + return None + + attr_name = None + class_name = target_obj.__class__.__name__ + + if data_type: + return self.serialize_data(target_obj, data_type, **kwargs) + + if not hasattr(target_obj, "_attribute_map"): + data_type = type(target_obj).__name__ + if data_type in self.basic_types.values(): + return self.serialize_data(target_obj, data_type, **kwargs) + + # Force "is_xml" kwargs if we detect a XML model + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model()) + + serialized = {} + if is_xml_model_serialization: + serialized = target_obj._create_xml_node() + try: + attributes = target_obj._attribute_map + for attr, attr_desc in attributes.items(): + attr_name = attr + if not keep_readonly and target_obj._validation.get(attr_name, {}).get("readonly", False): + continue + + if attr_name == "additional_properties" and attr_desc["key"] == "": + if target_obj.additional_properties is not None: + serialized.update(target_obj.additional_properties) + continue + try: + + orig_attr = getattr(target_obj, attr) + if is_xml_model_serialization: + pass # Don't provide "transformer" for XML for now. Keep "orig_attr" + else: # JSON + keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr) + keys = keys if isinstance(keys, list) else [keys] + + kwargs["serialization_ctxt"] = attr_desc + new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs) + + if is_xml_model_serialization: + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + xml_prefix = xml_desc.get("prefix", None) + xml_ns = xml_desc.get("ns", None) + if xml_desc.get("attr", False): + if xml_ns: + ET.register_namespace(xml_prefix, xml_ns) + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + serialized.set(xml_name, new_attr) # type: ignore + continue + if xml_desc.get("text", False): + serialized.text = new_attr # type: ignore + continue + if isinstance(new_attr, list): + serialized.extend(new_attr) # type: ignore + elif isinstance(new_attr, ET.Element): + # If the down XML has no XML/Name, we MUST replace the tag with the local tag. But keeping the namespaces. + if "name" not in getattr(orig_attr, "_xml_map", {}): + splitted_tag = new_attr.tag.split("}") + if len(splitted_tag) == 2: # Namespace + new_attr.tag = "}".join([splitted_tag[0], xml_name]) + else: + new_attr.tag = xml_name + serialized.append(new_attr) # type: ignore + else: # That's a basic type + # Integrate namespace if necessary + local_node = _create_xml_node(xml_name, xml_prefix, xml_ns) + local_node.text = str(new_attr) + serialized.append(local_node) # type: ignore + else: # JSON + for k in reversed(keys): # type: ignore + new_attr = {k: new_attr} + + _new_attr = new_attr + _serialized = serialized + for k in keys: # type: ignore + if k not in _serialized: + _serialized.update(_new_attr) # type: ignore + _new_attr = _new_attr[k] # type: ignore + _serialized = _serialized[k] + except ValueError as err: + if isinstance(err, SerializationError): + raise + + except (AttributeError, KeyError, TypeError) as err: + msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj)) + raise SerializationError(msg) from err + else: + return serialized + + def body(self, data, data_type, **kwargs): + """Serialize data intended for a request body. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: dict + :raises: SerializationError if serialization fails. + :raises: ValueError if data is None + """ + + # Just in case this is a dict + internal_data_type_str = data_type.strip("[]{}") + internal_data_type = self.dependencies.get(internal_data_type_str, None) + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + if internal_data_type and issubclass(internal_data_type, Model): + is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model()) + else: + is_xml_model_serialization = False + if internal_data_type and not isinstance(internal_data_type, Enum): + try: + deserializer = Deserializer(self.dependencies) + # Since it's on serialization, it's almost sure that format is not JSON REST + # We're not able to deal with additional properties for now. + deserializer.additional_properties_detection = False + if is_xml_model_serialization: + deserializer.key_extractors = [ # type: ignore + attribute_key_case_insensitive_extractor, + ] + else: + deserializer.key_extractors = [ + rest_key_case_insensitive_extractor, + attribute_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + data = deserializer._deserialize(data_type, data) + except DeserializationError as err: + raise SerializationError("Unable to build a model: " + str(err)) from err + + return self._serialize(data, data_type, **kwargs) + + def url(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL path. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + + if kwargs.get("skip_quote") is True: + output = str(output) + output = output.replace("{", quote("{")).replace("}", quote("}")) + else: + output = quote(str(output), safe="") + except SerializationError: + raise TypeError("{} must be type {}.".format(name, data_type)) + else: + return output + + def query(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL query. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :keyword bool skip_quote: Whether to skip quote the serialized result. + Defaults to False. + :rtype: str, list + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + # Treat the list aside, since we don't want to encode the div separator + if data_type.startswith("["): + internal_data_type = data_type[1:-1] + do_quote = not kwargs.get("skip_quote", False) + return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs) + + # Not a list, regular serialization + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + if kwargs.get("skip_quote") is True: + output = str(output) + else: + output = quote(str(output), safe="") + except SerializationError: + raise TypeError("{} must be type {}.".format(name, data_type)) + else: + return str(output) + + def header(self, name, data, data_type, **kwargs): + """Serialize data intended for a request header. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + if data_type in ["[str]"]: + data = ["" if d is None else d for d in data] + + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + except SerializationError: + raise TypeError("{} must be type {}.".format(name, data_type)) + else: + return str(output) + + def serialize_data(self, data, data_type, **kwargs): + """Serialize generic data according to supplied data type. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :param bool required: Whether it's essential that the data not be + empty or None + :raises: AttributeError if required data is None. + :raises: ValueError if data is None + :raises: SerializationError if serialization fails. + """ + if data is None: + raise ValueError("No value for given attribute") + + try: + if data is CoreNull: + return None + if data_type in self.basic_types.values(): + return self.serialize_basic(data, data_type, **kwargs) + + elif data_type in self.serialize_type: + return self.serialize_type[data_type](data, **kwargs) + + # If dependencies is empty, try with current data class + # It has to be a subclass of Enum anyway + enum_type = self.dependencies.get(data_type, data.__class__) + if issubclass(enum_type, Enum): + return Serializer.serialize_enum(data, enum_obj=enum_type) + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.serialize_type: + return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs) + + except (ValueError, TypeError) as err: + msg = "Unable to serialize value: {!r} as type: {!r}." + raise SerializationError(msg.format(data, data_type)) from err + else: + return self._serialize(data, **kwargs) + + @classmethod + def _get_custom_serializers(cls, data_type, **kwargs): + custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type) + if custom_serializer: + return custom_serializer + if kwargs.get("is_xml", False): + return cls._xml_basic_types_serializers.get(data_type) + + @classmethod + def serialize_basic(cls, data, data_type, **kwargs): + """Serialize basic builting data type. + Serializes objects to str, int, float or bool. + + Possible kwargs: + - basic_types_serializers dict[str, callable] : If set, use the callable as serializer + - is_xml bool : If set, use xml_basic_types_serializers + + :param data: Object to be serialized. + :param str data_type: Type of object in the iterable. + """ + custom_serializer = cls._get_custom_serializers(data_type, **kwargs) + if custom_serializer: + return custom_serializer(data) + if data_type == "str": + return cls.serialize_unicode(data) + return eval(data_type)(data) # nosec + + @classmethod + def serialize_unicode(cls, data): + """Special handling for serializing unicode strings in Py2. + Encode to UTF-8 if unicode, otherwise handle as a str. + + :param data: Object to be serialized. + :rtype: str + """ + try: # If I received an enum, return its value + return data.value + except AttributeError: + pass + + try: + if isinstance(data, unicode): # type: ignore + # Don't change it, JSON and XML ElementTree are totally able + # to serialize correctly u'' strings + return data + except NameError: + return str(data) + else: + return str(data) + + def serialize_iter(self, data, iter_type, div=None, **kwargs): + """Serialize iterable. + + Supported kwargs: + - serialization_ctxt dict : The current entry of _attribute_map, or same format. + serialization_ctxt['type'] should be same as data_type. + - is_xml bool : If set, serialize as XML + + :param list attr: Object to be serialized. + :param str iter_type: Type of object in the iterable. + :param bool required: Whether the objects in the iterable must + not be None or empty. + :param str div: If set, this str will be used to combine the elements + in the iterable into a combined string. Default is 'None'. + :keyword bool do_quote: Whether to quote the serialized result of each iterable element. + Defaults to False. + :rtype: list, str + """ + if isinstance(data, str): + raise SerializationError("Refuse str type as a valid iter type.") + + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + is_xml = kwargs.get("is_xml", False) + + serialized = [] + for d in data: + try: + serialized.append(self.serialize_data(d, iter_type, **kwargs)) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized.append(None) + + if kwargs.get("do_quote", False): + serialized = ["" if s is None else quote(str(s), safe="") for s in serialized] + + if div: + serialized = ["" if s is None else str(s) for s in serialized] + serialized = div.join(serialized) + + if "xml" in serialization_ctxt or is_xml: + # XML serialization is more complicated + xml_desc = serialization_ctxt.get("xml", {}) + xml_name = xml_desc.get("name") + if not xml_name: + xml_name = serialization_ctxt["key"] + + # Create a wrap node if necessary (use the fact that Element and list have "append") + is_wrapped = xml_desc.get("wrapped", False) + node_name = xml_desc.get("itemsName", xml_name) + if is_wrapped: + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + else: + final_result = [] + # All list elements to "local_node" + for el in serialized: + if isinstance(el, ET.Element): + el_node = el + else: + el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + if el is not None: # Otherwise it writes "None" :-p + el_node.text = str(el) + final_result.append(el_node) + return final_result + return serialized + + def serialize_dict(self, attr, dict_type, **kwargs): + """Serialize a dictionary of objects. + + :param dict attr: Object to be serialized. + :param str dict_type: Type of object in the dictionary. + :param bool required: Whether the objects in the dictionary must + not be None or empty. + :rtype: dict + """ + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized[self.serialize_unicode(key)] = None + + if "xml" in serialization_ctxt: + # XML serialization is more complicated + xml_desc = serialization_ctxt["xml"] + xml_name = xml_desc["name"] + + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + for key, value in serialized.items(): + ET.SubElement(final_result, key).text = value + return final_result + + return serialized + + def serialize_object(self, attr, **kwargs): + """Serialize a generic object. + This will be handled as a dictionary. If object passed in is not + a basic type (str, int, float, dict, list) it will simply be + cast to str. + + :param dict attr: Object to be serialized. + :rtype: dict or str + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + return attr + obj_type = type(attr) + if obj_type in self.basic_types: + return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) + if obj_type is _long_type: + return self.serialize_long(attr) + if obj_type is str: + return self.serialize_unicode(attr) + if obj_type is datetime.datetime: + return self.serialize_iso(attr) + if obj_type is datetime.date: + return self.serialize_date(attr) + if obj_type is datetime.time: + return self.serialize_time(attr) + if obj_type is datetime.timedelta: + return self.serialize_duration(attr) + if obj_type is decimal.Decimal: + return self.serialize_decimal(attr) + + # If it's a model or I know this dependency, serialize as a Model + elif obj_type in self.dependencies.values() or isinstance(attr, Model): + return self._serialize(attr) + + if obj_type == dict: + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs) + except ValueError: + serialized[self.serialize_unicode(key)] = None + return serialized + + if obj_type == list: + serialized = [] + for obj in attr: + try: + serialized.append(self.serialize_object(obj, **kwargs)) + except ValueError: + pass + return serialized + return str(attr) + + @staticmethod + def serialize_enum(attr, enum_obj=None): + try: + result = attr.value + except AttributeError: + result = attr + try: + enum_obj(result) # type: ignore + return result + except ValueError: + for enum_value in enum_obj: # type: ignore + if enum_value.value.lower() == str(attr).lower(): + return enum_value.value + error = "{!r} is not valid value for enum {!r}" + raise SerializationError(error.format(attr, enum_obj)) + + @staticmethod + def serialize_bytearray(attr, **kwargs): + """Serialize bytearray into base-64 string. + + :param attr: Object to be serialized. + :rtype: str + """ + return b64encode(attr).decode() + + @staticmethod + def serialize_base64(attr, **kwargs): + """Serialize str into base-64 string. + + :param attr: Object to be serialized. + :rtype: str + """ + encoded = b64encode(attr).decode("ascii") + return encoded.strip("=").replace("+", "-").replace("/", "_") + + @staticmethod + def serialize_decimal(attr, **kwargs): + """Serialize Decimal object to float. + + :param attr: Object to be serialized. + :rtype: float + """ + return float(attr) + + @staticmethod + def serialize_long(attr, **kwargs): + """Serialize long (Py2) or int (Py3). + + :param attr: Object to be serialized. + :rtype: int/long + """ + return _long_type(attr) + + @staticmethod + def serialize_date(attr, **kwargs): + """Serialize Date object into ISO-8601 formatted string. + + :param Date attr: Object to be serialized. + :rtype: str + """ + if isinstance(attr, str): + attr = isodate.parse_date(attr) + t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day) + return t + + @staticmethod + def serialize_time(attr, **kwargs): + """Serialize Time object into ISO-8601 formatted string. + + :param datetime.time attr: Object to be serialized. + :rtype: str + """ + if isinstance(attr, str): + attr = isodate.parse_time(attr) + t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second) + if attr.microsecond: + t += ".{:02}".format(attr.microsecond) + return t + + @staticmethod + def serialize_duration(attr, **kwargs): + """Serialize TimeDelta object into ISO-8601 formatted string. + + :param TimeDelta attr: Object to be serialized. + :rtype: str + """ + if isinstance(attr, str): + attr = isodate.parse_duration(attr) + return isodate.duration_isoformat(attr) + + @staticmethod + def serialize_rfc(attr, **kwargs): + """Serialize Datetime object into RFC-1123 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: TypeError if format invalid. + """ + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + except AttributeError: + raise TypeError("RFC1123 object must be valid Datetime object.") + + return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format( + Serializer.days[utc.tm_wday], + utc.tm_mday, + Serializer.months[utc.tm_mon], + utc.tm_year, + utc.tm_hour, + utc.tm_min, + utc.tm_sec, + ) + + @staticmethod + def serialize_iso(attr, **kwargs): + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: SerializationError if format invalid. + """ + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0") + if microseconds: + microseconds = "." + microseconds + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec + ) + return date + microseconds + "Z" + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise SerializationError(msg) from err + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise TypeError(msg) from err + + @staticmethod + def serialize_unix(attr, **kwargs): + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param Datetime attr: Object to be serialized. + :rtype: int + :raises: SerializationError if format invalid + """ + if isinstance(attr, int): + return attr + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + return int(calendar.timegm(attr.utctimetuple())) + except AttributeError: + raise TypeError("Unix time object must be valid Datetime object.") + + +def rest_key_extractor(attr, attr_desc, data): + key = attr_desc["key"] + working_data = data + + while "." in key: + # Need the cast, as for some reasons "split" is typed as list[str | Any] + dict_keys = cast(List[str], _FLATTEN.split(key)) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = working_data.get(working_key, data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + return working_data.get(key) + + +def rest_key_case_insensitive_extractor(attr, attr_desc, data): + key = attr_desc["key"] + working_data = data + + while "." in key: + dict_keys = _FLATTEN.split(key) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + if working_data: + return attribute_key_case_insensitive_extractor(key, None, working_data) + + +def last_rest_key_extractor(attr, attr_desc, data): + """Extract the attribute in "data" based on the last part of the JSON path key.""" + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_extractor(dict_keys[-1], None, data) + + +def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): + """Extract the attribute in "data" based on the last part of the JSON path key. + + This is the case insensitive version of "last_rest_key_extractor" + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data) + + +def attribute_key_extractor(attr, _, data): + return data.get(attr) + + +def attribute_key_case_insensitive_extractor(attr, _, data): + found_key = None + lower_attr = attr.lower() + for key in data: + if lower_attr == key.lower(): + found_key = key + break + + return data.get(found_key) + + +def _extract_name_from_internal_type(internal_type): + """Given an internal type XML description, extract correct XML name with namespace. + + :param dict internal_type: An model type + :rtype: tuple + :returns: A tuple XML name + namespace dict + """ + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + xml_name = internal_type_xml_map.get("name", internal_type.__name__) + xml_ns = internal_type_xml_map.get("ns", None) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + return xml_name + + +def xml_key_extractor(attr, attr_desc, data): + if isinstance(data, dict): + return None + + # Test if this model is XML ready first + if not isinstance(data, ET.Element): + return None + + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + + # Look for a children + is_iter_type = attr_desc["type"].startswith("[") + is_wrapped = xml_desc.get("wrapped", False) + internal_type = attr_desc.get("internalType", None) + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + + # Integrate namespace if necessary + xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None)) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + + # If it's an attribute, that's simple + if xml_desc.get("attr", False): + return data.get(xml_name) + + # If it's x-ms-text, that's simple too + if xml_desc.get("text", False): + return data.text + + # Scenario where I take the local name: + # - Wrapped node + # - Internal type is an enum (considered basic types) + # - Internal type has no XML/Name node + if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)): + children = data.findall(xml_name) + # If internal type has a local name and it's not a list, I use that name + elif not is_iter_type and internal_type and "name" in internal_type_xml_map: + xml_name = _extract_name_from_internal_type(internal_type) + children = data.findall(xml_name) + # That's an array + else: + if internal_type: # Complex type, ignore itemsName and use the complex type name + items_name = _extract_name_from_internal_type(internal_type) + else: + items_name = xml_desc.get("itemsName", xml_name) + children = data.findall(items_name) + + if len(children) == 0: + if is_iter_type: + if is_wrapped: + return None # is_wrapped no node, we want None + else: + return [] # not wrapped, assume empty list + return None # Assume it's not there, maybe an optional node. + + # If is_iter_type and not wrapped, return all found children + if is_iter_type: + if not is_wrapped: + return children + else: # Iter and wrapped, should have found one node only (the wrap one) + if len(children) != 1: + raise DeserializationError( + "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( + xml_name + ) + ) + return list(children[0]) # Might be empty list and that's ok. + + # Here it's not a itertype, we should have found one element only or empty + if len(children) > 1: + raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name)) + return children[0] + + +class Deserializer(object): + """Response object model deserializer. + + :param dict classes: Class type dictionary for deserializing complex types. + :ivar list key_extractors: Ordered list of extractors to be used by this deserializer. + """ + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") + + def __init__(self, classes: Optional[Mapping[str, type]] = None): + self.deserialize_type = { + "iso-8601": Deserializer.deserialize_iso, + "rfc-1123": Deserializer.deserialize_rfc, + "unix-time": Deserializer.deserialize_unix, + "duration": Deserializer.deserialize_duration, + "date": Deserializer.deserialize_date, + "time": Deserializer.deserialize_time, + "decimal": Deserializer.deserialize_decimal, + "long": Deserializer.deserialize_long, + "bytearray": Deserializer.deserialize_bytearray, + "base64": Deserializer.deserialize_base64, + "object": self.deserialize_object, + "[]": self.deserialize_iter, + "{}": self.deserialize_dict, + } + self.deserialize_expected_types = { + "duration": (isodate.Duration, datetime.timedelta), + "iso-8601": (datetime.datetime), + } + self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.key_extractors = [rest_key_extractor, xml_key_extractor] + # Additional properties only works if the "rest_key_extractor" is used to + # extract the keys. Making it to work whatever the key extractor is too much + # complicated, with no real scenario for now. + # So adding a flag to disable additional properties detection. This flag should be + # used if your expect the deserialization to NOT come from a JSON REST syntax. + # Otherwise, result are unexpected + self.additional_properties_detection = True + + def __call__(self, target_obj, response_data, content_type=None): + """Call the deserializer to process a REST response. + + :param str target_obj: Target data type to deserialize to. + :param requests.Response response_data: REST response object. + :param str content_type: Swagger "produces" if available. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + """ + data = self._unpack_content(response_data, content_type) + return self._deserialize(target_obj, data) + + def _deserialize(self, target_obj, data): + """Call the deserializer on a model. + + Data needs to be already deserialized as JSON or XML ElementTree + + :param str target_obj: Target data type to deserialize to. + :param object data: Object to deserialize. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + """ + # This is already a model, go recursive just in case + if hasattr(data, "_attribute_map"): + constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")] + try: + for attr, mapconfig in data._attribute_map.items(): + if attr in constants: + continue + value = getattr(data, attr) + if value is None: + continue + local_type = mapconfig["type"] + internal_data_type = local_type.strip("[]{}") + if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum): + continue + setattr(data, attr, self._deserialize(local_type, value)) + return data + except AttributeError: + return + + response, class_name = self._classify_target(target_obj, data) + + if isinstance(response, str): + return self.deserialize_data(data, response) + elif isinstance(response, type) and issubclass(response, Enum): + return self.deserialize_enum(data, response) + + if data is None: + return data + try: + attributes = response._attribute_map # type: ignore + d_attrs = {} + for attr, attr_desc in attributes.items(): + # Check empty string. If it's not empty, someone has a real "additionalProperties"... + if attr == "additional_properties" and attr_desc["key"] == "": + continue + raw_value = None + # Enhance attr_desc with some dynamic data + attr_desc = attr_desc.copy() # Do a copy, do not change the real one + internal_data_type = attr_desc["type"].strip("[]{}") + if internal_data_type in self.dependencies: + attr_desc["internalType"] = self.dependencies[internal_data_type] + + for key_extractor in self.key_extractors: + found_value = key_extractor(attr, attr_desc, data) + if found_value is not None: + if raw_value is not None and raw_value != found_value: + msg = ( + "Ignoring extracted value '%s' from %s for key '%s'" + " (duplicate extraction, follow extractors order)" + ) + _LOGGER.warning(msg, found_value, key_extractor, attr) + continue + raw_value = found_value + + value = self.deserialize_data(raw_value, attr_desc["type"]) + d_attrs[attr] = value + except (AttributeError, TypeError, KeyError) as err: + msg = "Unable to deserialize to object: " + class_name # type: ignore + raise DeserializationError(msg) from err + else: + additional_properties = self._build_additional_properties(attributes, data) + return self._instantiate_model(response, d_attrs, additional_properties) + + def _build_additional_properties(self, attribute_map, data): + if not self.additional_properties_detection: + return None + if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "": + # Check empty string. If it's not empty, someone has a real "additionalProperties" + return None + if isinstance(data, ET.Element): + data = {el.tag: el.text for el in data} + + known_keys = { + _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0]) + for desc in attribute_map.values() + if desc["key"] != "" + } + present_keys = set(data.keys()) + missing_keys = present_keys - known_keys + return {key: data[key] for key in missing_keys} + + def _classify_target(self, target, data): + """Check to see whether the deserialization target object can + be classified into a subclass. + Once classification has been determined, initialize object. + + :param str target: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + """ + if target is None: + return None, None + + if isinstance(target, str): + try: + target = self.dependencies[target] + except KeyError: + return target, target + + try: + target = target._classify(data, self.dependencies) # type: ignore + except AttributeError: + pass # Target is not a Model, no classify + return target, target.__class__.__name__ # type: ignore + + def failsafe_deserialize(self, target_obj, data, content_type=None): + """Ignores any errors encountered in deserialization, + and falls back to not deserializing the object. Recommended + for use in error deserialization, as we want to return the + HttpResponseError to users, and not have them deal with + a deserialization error. + + :param str target_obj: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :param str content_type: Swagger "produces" if available. + """ + try: + return self(target_obj, data, content_type=content_type) + except: + _LOGGER.debug( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + @staticmethod + def _unpack_content(raw_data, content_type=None): + """Extract the correct structure for deserialization. + + If raw_data is a PipelineResponse, try to extract the result of RawDeserializer. + if we can't, raise. Your Pipeline should have a RawDeserializer. + + If not a pipeline response and raw_data is bytes or string, use content-type + to decode it. If no content-type, try JSON. + + If raw_data is something else, bypass all logic and return it directly. + + :param raw_data: Data to be processed. + :param content_type: How to parse if raw_data is a string/bytes. + :raises JSONDecodeError: If JSON is requested and parsing is impossible. + :raises UnicodeDecodeError: If bytes is not UTF8 + """ + # Assume this is enough to detect a Pipeline Response without importing it + context = getattr(raw_data, "context", {}) + if context: + if RawDeserializer.CONTEXT_NAME in context: + return context[RawDeserializer.CONTEXT_NAME] + raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize") + + # Assume this is enough to recognize universal_http.ClientResponse without importing it + if hasattr(raw_data, "body"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers) + + # Assume this enough to recognize requests.Response without importing it. + if hasattr(raw_data, "_content_consumed"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers) + + if isinstance(raw_data, (str, bytes)) or hasattr(raw_data, "read"): + return RawDeserializer.deserialize_from_text(raw_data, content_type) # type: ignore + return raw_data + + def _instantiate_model(self, response, attrs, additional_properties=None): + """Instantiate a response model passing in deserialized args. + + :param response: The response model class. + :param d_attrs: The deserialized response attributes. + """ + if callable(response): + subtype = getattr(response, "_subtype_map", {}) + try: + readonly = [k for k, v in response._validation.items() if v.get("readonly")] + const = [k for k, v in response._validation.items() if v.get("constant")] + kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const} + response_obj = response(**kwargs) + for attr in readonly: + setattr(response_obj, attr, attrs.get(attr)) + if additional_properties: + response_obj.additional_properties = additional_properties + return response_obj + except TypeError as err: + msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore + raise DeserializationError(msg + str(err)) + else: + try: + for attr, value in attrs.items(): + setattr(response, attr, value) + return response + except Exception as exp: + msg = "Unable to populate response model. " + msg += "Type: {}, Error: {}".format(type(response), exp) + raise DeserializationError(msg) + + def deserialize_data(self, data, data_type): + """Process data for deserialization according to data type. + + :param str data: The response string to be deserialized. + :param str data_type: The type to deserialize to. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + """ + if data is None: + return data + + try: + if not data_type: + return data + if data_type in self.basic_types.values(): + return self.deserialize_basic(data, data_type) + if data_type in self.deserialize_type: + if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())): + return data + + is_a_text_parsing_type = lambda x: x not in ["object", "[]", r"{}"] + if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text: + return None + data_val = self.deserialize_type[data_type](data) + return data_val + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.deserialize_type: + return self.deserialize_type[iter_type](data, data_type[1:-1]) + + obj_type = self.dependencies[data_type] + if issubclass(obj_type, Enum): + if isinstance(data, ET.Element): + data = data.text + return self.deserialize_enum(data, obj_type) + + except (ValueError, TypeError, AttributeError) as err: + msg = "Unable to deserialize response data." + msg += " Data: {}, {}".format(data, data_type) + raise DeserializationError(msg) from err + else: + return self._deserialize(obj_type, data) + + def deserialize_iter(self, attr, iter_type): + """Deserialize an iterable. + + :param list attr: Iterable to be deserialized. + :param str iter_type: The type of object in the iterable. + :rtype: list + """ + if attr is None: + return None + if isinstance(attr, ET.Element): # If I receive an element here, get the children + attr = list(attr) + if not isinstance(attr, (list, set)): + raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr))) + return [self.deserialize_data(a, iter_type) for a in attr] + + def deserialize_dict(self, attr, dict_type): + """Deserialize a dictionary. + + :param dict/list attr: Dictionary to be deserialized. Also accepts + a list of key, value pairs. + :param str dict_type: The object type of the items in the dictionary. + :rtype: dict + """ + if isinstance(attr, list): + return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr} + + if isinstance(attr, ET.Element): + # Transform value into {"Key": "value"} + attr = {el.tag: el.text for el in attr} + return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()} + + def deserialize_object(self, attr, **kwargs): + """Deserialize a generic object. + This will be handled as a dictionary. + + :param dict attr: Dictionary to be deserialized. + :rtype: dict + :raises: TypeError if non-builtin datatype encountered. + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + # Do no recurse on XML, just return the tree as-is + return attr + if isinstance(attr, str): + return self.deserialize_basic(attr, "str") + obj_type = type(attr) + if obj_type in self.basic_types: + return self.deserialize_basic(attr, self.basic_types[obj_type]) + if obj_type is _long_type: + return self.deserialize_long(attr) + + if obj_type == dict: + deserialized = {} + for key, value in attr.items(): + try: + deserialized[key] = self.deserialize_object(value, **kwargs) + except ValueError: + deserialized[key] = None + return deserialized + + if obj_type == list: + deserialized = [] + for obj in attr: + try: + deserialized.append(self.deserialize_object(obj, **kwargs)) + except ValueError: + pass + return deserialized + + else: + error = "Cannot deserialize generic object with type: " + raise TypeError(error + str(obj_type)) + + def deserialize_basic(self, attr, data_type): + """Deserialize basic builtin data type from string. + Will attempt to convert to str, int, float and bool. + This function will also accept '1', '0', 'true' and 'false' as + valid bool values. + + :param str attr: response string to be deserialized. + :param str data_type: deserialization data type. + :rtype: str, int, float or bool + :raises: TypeError if string format is not valid. + """ + # If we're here, data is supposed to be a basic type. + # If it's still an XML node, take the text + if isinstance(attr, ET.Element): + attr = attr.text + if not attr: + if data_type == "str": + # None or '', node is empty string. + return "" + else: + # None or '', node with a strong type is None. + # Don't try to model "empty bool" or "empty int" + return None + + if data_type == "bool": + if attr in [True, False, 1, 0]: + return bool(attr) + elif isinstance(attr, str): + if attr.lower() in ["true", "1"]: + return True + elif attr.lower() in ["false", "0"]: + return False + raise TypeError("Invalid boolean value: {}".format(attr)) + + if data_type == "str": + return self.deserialize_unicode(attr) + return eval(data_type)(attr) # nosec + + @staticmethod + def deserialize_unicode(data): + """Preserve unicode objects in Python 2, otherwise return data + as a string. + + :param str data: response string to be deserialized. + :rtype: str or unicode + """ + # We might be here because we have an enum modeled as string, + # and we try to deserialize a partial dict with enum inside + if isinstance(data, Enum): + return data + + # Consider this is real string + try: + if isinstance(data, unicode): # type: ignore + return data + except NameError: + return str(data) + else: + return str(data) + + @staticmethod + def deserialize_enum(data, enum_obj): + """Deserialize string into enum object. + + If the string is not a valid enum value it will be returned as-is + and a warning will be logged. + + :param str data: Response string to be deserialized. If this value is + None or invalid it will be returned as-is. + :param Enum enum_obj: Enum object to deserialize to. + :rtype: Enum + """ + if isinstance(data, enum_obj) or data is None: + return data + if isinstance(data, Enum): + data = data.value + if isinstance(data, int): + # Workaround. We might consider remove it in the future. + try: + return list(enum_obj.__members__.values())[data] + except IndexError: + error = "{!r} is not a valid index for enum {!r}" + raise DeserializationError(error.format(data, enum_obj)) + try: + return enum_obj(str(data)) + except ValueError: + for enum_value in enum_obj: + if enum_value.value.lower() == str(data).lower(): + return enum_value + # We don't fail anymore for unknown value, we deserialize as a string + _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj) + return Deserializer.deserialize_unicode(data) + + @staticmethod + def deserialize_bytearray(attr): + """Deserialize string into bytearray. + + :param str attr: response string to be deserialized. + :rtype: bytearray + :raises: TypeError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return bytearray(b64decode(attr)) # type: ignore + + @staticmethod + def deserialize_base64(attr): + """Deserialize base64 encoded string into string. + + :param str attr: response string to be deserialized. + :rtype: bytearray + :raises: TypeError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return b64decode(encoded) + + @staticmethod + def deserialize_decimal(attr): + """Deserialize string into Decimal object. + + :param str attr: response string to be deserialized. + :rtype: Decimal + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + return decimal.Decimal(str(attr)) # type: ignore + except decimal.DecimalException as err: + msg = "Invalid decimal {}".format(attr) + raise DeserializationError(msg) from err + + @staticmethod + def deserialize_long(attr): + """Deserialize string into long (Py2) or int (Py3). + + :param str attr: response string to be deserialized. + :rtype: long or int + :raises: ValueError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return _long_type(attr) # type: ignore + + @staticmethod + def deserialize_duration(attr): + """Deserialize ISO-8601 formatted string into TimeDelta object. + + :param str attr: response string to be deserialized. + :rtype: TimeDelta + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + duration = isodate.parse_duration(attr) + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize duration object." + raise DeserializationError(msg) from err + else: + return duration + + @staticmethod + def deserialize_date(attr): + """Deserialize ISO-8601 formatted string into Date object. + + :param str attr: response string to be deserialized. + :rtype: Date + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + return isodate.parse_date(attr, defaultmonth=0, defaultday=0) + + @staticmethod + def deserialize_time(attr): + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :rtype: datetime.time + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + return isodate.parse_time(attr) + + @staticmethod + def deserialize_rfc(attr): + """Deserialize RFC-1123 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: Datetime + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + parsed_date = email.utils.parsedate_tz(attr) # type: ignore + date_obj = datetime.datetime( + *parsed_date[:6], tzinfo=_FixedOffset(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) + ) + if not date_obj.tzinfo: + date_obj = date_obj.astimezone(tz=TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to rfc datetime object." + raise DeserializationError(msg) from err + else: + return date_obj + + @staticmethod + def deserialize_iso(attr): + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: Datetime + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + attr = attr.upper() # type: ignore + match = Deserializer.valid_date.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize datetime object." + raise DeserializationError(msg) from err + else: + return date_obj + + @staticmethod + def deserialize_unix(attr): + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param int attr: Object to be serialized. + :rtype: Datetime + :raises: DeserializationError if format invalid + """ + if isinstance(attr, ET.Element): + attr = int(attr.text) # type: ignore + try: + attr = int(attr) + date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to unix datetime object." + raise DeserializationError(msg) from err + else: + return date_obj diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_vendor.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_vendor.py new file mode 100644 index 000000000000..554de774a90f --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_vendor.py @@ -0,0 +1,26 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from abc import ABC +from typing import TYPE_CHECKING + +from ._configuration import ModelClientConfiguration + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core import PipelineClient + + from ._serialization import Deserializer, Serializer + + +class ModelClientMixinABC(ABC): + """DO NOT use this class. It is for internal typing use only.""" + + _client: "PipelineClient" + _config: ModelClientConfiguration + _serialize: "Serializer" + _deserialize: "Deserializer" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_version.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_version.py new file mode 100644 index 000000000000..be71c81bd282 --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_version.py @@ -0,0 +1,9 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +VERSION = "1.0.0b1" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py new file mode 100644 index 000000000000..2f40b73be25b --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py @@ -0,0 +1,23 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._client import ModelClient + +try: + from ._patch import __all__ as _patch_all + from ._patch import * # pylint: disable=unused-wildcard-import +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "ModelClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) + +_patch_sdk() diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py new file mode 100644 index 000000000000..89a809d11985 --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py @@ -0,0 +1,100 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, Awaitable, TYPE_CHECKING, Union + +from azure.core import AsyncPipelineClient +from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline import policies +from azure.core.rest import AsyncHttpResponse, HttpRequest + +from .._serialization import Deserializer, Serializer +from ._configuration import ModelClientConfiguration +from ._operations import ModelClientOperationsMixin + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + + +class ModelClient(ModelClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword + """ModelClient. + + :param credential: Credential needed for the client to connect to Azure. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :keyword endpoint: Service host. Required. + :paramtype endpoint: str + :keyword api_version: The API version to use for this operation. Default value is + "2024-04-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], *, endpoint: str, **kwargs: Any + ) -> None: + self._config = ModelClientConfiguration(credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request( + self, request: HttpRequest, *, stream: bool = False, **kwargs: Any + ) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse + """ + + request_copy = deepcopy(request) + request_copy.url = self._client.format_url(request_copy.url) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> "ModelClient": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py new file mode 100644 index 000000000000..f8849f1c207d --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py @@ -0,0 +1,68 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING, Union + +from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline import policies + +from .._version import VERSION + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + + +class ModelClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for ModelClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credential: Credential needed for the client to connect to Azure. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-04-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__(self, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any) -> None: + api_version: str = kwargs.pop("api_version", "2024-04-01-preview") + + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://widget.contoso.com/.default"]) + kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _infer_policy(self, **kwargs): + if isinstance(self.credential, AzureKeyCredential): + return policies.AzureKeyCredentialPolicy(self.credential, "api-key", **kwargs) + if hasattr(self.credential, "get_token"): + return policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) + raise TypeError(f"Unsupported credential: {self.credential}") + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = self._infer_policy(**kwargs) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/__init__.py new file mode 100644 index 000000000000..886bf4218356 --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/__init__.py @@ -0,0 +1,19 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._operations import ModelClientOperationsMixin + +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "ModelClientOperationsMixin", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py new file mode 100644 index 000000000000..c6cfbec9538d --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -0,0 +1,467 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import json +import sys +from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict + +from ... import models as _models +from ..._model_base import SdkJSONEncoder, _deserialize +from ..._operations._operations import build_model_get_chat_completions_request +from .._vendor import ModelClientMixinABC + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class ModelClientOperationsMixin(ModelClientMixinABC): + @overload + async def get_chat_completions( + self, + chat_completion_options: _models.ChatCompletionsOptions, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.ChatCompletions: + # pylint: disable=line-too-long + """Gets chat completions for the provided chat messages. + Completions support a wide variety of tasks and generate text that continues from or + "completes" + provided prompt data. + + :param chat_completion_options: The JSON payload containing chat completion options. Required. + :type chat_completion_options: ~azure.ai.inference.models.ChatCompletionsOptions + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ChatCompletions + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # The input is polymorphic. The following are possible polymorphic inputs based off + discriminator "type": + + # JSON input template for discriminator value "json_object": + chat_completions_response_format = { + "type": "json_object" + } + + # JSON input template for discriminator value "text": + chat_completions_response_format = { + "type": "text" + } + + # JSON input template you can fill out and use as your body input. + chat_completion_options = { + "messages": [ + chat_request_message + ], + "frequency_penalty": 0.0, # Optional. A value that influences the + probability of generated tokens appearing based on their cumulative frequency in + generated text. Positive values will make tokens less likely to appear as their + frequency increases and decrease the likelihood of the model repeating the same + statements verbatim. + "max_tokens": 0, # Optional. The maximum number of tokens to generate. + "presence_penalty": 0.0, # Optional. A value that influences the probability + of generated tokens appearing based on their existing presence in generated text. + Positive values will make tokens less likely to appear when they already exist + and increase the model's likelihood to output new topics. + "response_format": chat_completions_response_format, + "seed": 0, # Optional. If specified, the system will make a best effort to + sample deterministically such that repeated requests with the same seed and + parameters should return the same result. Determinism is not guaranteed, and you + should refer to the system_fingerprint response parameter to monitor changes in + the backend.". + "stop": [ + "str" # Optional. A collection of textual sequences that will end + completions generation. + ], + "stream": bool, # Optional. A value indicating whether chat completions + should be streamed for this request. + "temperature": 0.0, # Optional. The sampling temperature to use that + controls the apparent creativity of generated completions. Higher values will + make output more random while lower values will make results more focused and + deterministic. It is not recommended to modify temperature and top_p for the same + completions request as the interaction of these two settings is difficult to + predict. + "top_p": 0.0 # Optional. An alternative to sampling with temperature called + nucleus sampling. This value causes the model to consider the results of tokens + with the provided probability mass. As an example, a value of 0.15 will cause + only the tokens comprising the top 15% of probability mass to be considered. It + is not recommended to modify temperature and top_p for the same completions + request as the interaction of these two settings is difficult to predict. + } + + # response body for status code(s): 200 + response == { + "choices": [ + { + "finish_reason": "str", # The reason that this chat + completions choice completed its generated. Required. Known values are: + "stop", "length", and "content_filter". + "index": 0, # The ordered index associated with this chat + completions choice. Required. + "delta": { + "content": "str", # The content of the message. + Required. + "role": "str" # The chat role associated with the + message. Required. Known values are: "system", "assistant", and + "user". + }, + "message": { + "content": "str", # The content of the message. + Required. + "role": "str" # The chat role associated with the + message. Required. Known values are: "system", "assistant", and + "user". + } + } + ], + "created": "2020-02-20 00:00:00", # The first timestamp associated with + generation activity for this completions response, represented as seconds since + the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. + "id": "str", # A unique identifier associated with this chat completions + response. Required. + "model": "str", # The model used for the chat completion. Required. + "object": "str", # The response object type, which is always + ``chat.completion``. Required. + "usage": { + "completion_tokens": 0, # The number of tokens generated across all + completions emissions. Required. + "prompt_tokens": 0, # The number of tokens in the provided prompts + for the completions request. Required. + "total_tokens": 0 # The total number of tokens processed for the + completions request and response. Required. + } + } + """ + + @overload + async def get_chat_completions( + self, chat_completion_options: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ChatCompletions: + # pylint: disable=line-too-long + """Gets chat completions for the provided chat messages. + Completions support a wide variety of tasks and generate text that continues from or + "completes" + provided prompt data. + + :param chat_completion_options: The JSON payload containing chat completion options. Required. + :type chat_completion_options: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ChatCompletions + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "choices": [ + { + "finish_reason": "str", # The reason that this chat + completions choice completed its generated. Required. Known values are: + "stop", "length", and "content_filter". + "index": 0, # The ordered index associated with this chat + completions choice. Required. + "delta": { + "content": "str", # The content of the message. + Required. + "role": "str" # The chat role associated with the + message. Required. Known values are: "system", "assistant", and + "user". + }, + "message": { + "content": "str", # The content of the message. + Required. + "role": "str" # The chat role associated with the + message. Required. Known values are: "system", "assistant", and + "user". + } + } + ], + "created": "2020-02-20 00:00:00", # The first timestamp associated with + generation activity for this completions response, represented as seconds since + the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. + "id": "str", # A unique identifier associated with this chat completions + response. Required. + "model": "str", # The model used for the chat completion. Required. + "object": "str", # The response object type, which is always + ``chat.completion``. Required. + "usage": { + "completion_tokens": 0, # The number of tokens generated across all + completions emissions. Required. + "prompt_tokens": 0, # The number of tokens in the provided prompts + for the completions request. Required. + "total_tokens": 0 # The total number of tokens processed for the + completions request and response. Required. + } + } + """ + + @overload + async def get_chat_completions( + self, chat_completion_options: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ChatCompletions: + # pylint: disable=line-too-long + """Gets chat completions for the provided chat messages. + Completions support a wide variety of tasks and generate text that continues from or + "completes" + provided prompt data. + + :param chat_completion_options: The JSON payload containing chat completion options. Required. + :type chat_completion_options: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ChatCompletions + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "choices": [ + { + "finish_reason": "str", # The reason that this chat + completions choice completed its generated. Required. Known values are: + "stop", "length", and "content_filter". + "index": 0, # The ordered index associated with this chat + completions choice. Required. + "delta": { + "content": "str", # The content of the message. + Required. + "role": "str" # The chat role associated with the + message. Required. Known values are: "system", "assistant", and + "user". + }, + "message": { + "content": "str", # The content of the message. + Required. + "role": "str" # The chat role associated with the + message. Required. Known values are: "system", "assistant", and + "user". + } + } + ], + "created": "2020-02-20 00:00:00", # The first timestamp associated with + generation activity for this completions response, represented as seconds since + the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. + "id": "str", # A unique identifier associated with this chat completions + response. Required. + "model": "str", # The model used for the chat completion. Required. + "object": "str", # The response object type, which is always + ``chat.completion``. Required. + "usage": { + "completion_tokens": 0, # The number of tokens generated across all + completions emissions. Required. + "prompt_tokens": 0, # The number of tokens in the provided prompts + for the completions request. Required. + "total_tokens": 0 # The total number of tokens processed for the + completions request and response. Required. + } + } + """ + + @distributed_trace_async + async def get_chat_completions( + self, chat_completion_options: Union[_models.ChatCompletionsOptions, JSON, IO[bytes]], **kwargs: Any + ) -> _models.ChatCompletions: + # pylint: disable=line-too-long + """Gets chat completions for the provided chat messages. + Completions support a wide variety of tasks and generate text that continues from or + "completes" + provided prompt data. + + :param chat_completion_options: The JSON payload containing chat completion options. Is one of + the following types: ChatCompletionsOptions, JSON, IO[bytes] Required. + :type chat_completion_options: ~azure.ai.inference.models.ChatCompletionsOptions or JSON or + IO[bytes] + :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ChatCompletions + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # The input is polymorphic. The following are possible polymorphic inputs based off + discriminator "type": + + # JSON input template for discriminator value "json_object": + chat_completions_response_format = { + "type": "json_object" + } + + # JSON input template for discriminator value "text": + chat_completions_response_format = { + "type": "text" + } + + # JSON input template you can fill out and use as your body input. + chat_completion_options = { + "messages": [ + chat_request_message + ], + "frequency_penalty": 0.0, # Optional. A value that influences the + probability of generated tokens appearing based on their cumulative frequency in + generated text. Positive values will make tokens less likely to appear as their + frequency increases and decrease the likelihood of the model repeating the same + statements verbatim. + "max_tokens": 0, # Optional. The maximum number of tokens to generate. + "presence_penalty": 0.0, # Optional. A value that influences the probability + of generated tokens appearing based on their existing presence in generated text. + Positive values will make tokens less likely to appear when they already exist + and increase the model's likelihood to output new topics. + "response_format": chat_completions_response_format, + "seed": 0, # Optional. If specified, the system will make a best effort to + sample deterministically such that repeated requests with the same seed and + parameters should return the same result. Determinism is not guaranteed, and you + should refer to the system_fingerprint response parameter to monitor changes in + the backend.". + "stop": [ + "str" # Optional. A collection of textual sequences that will end + completions generation. + ], + "stream": bool, # Optional. A value indicating whether chat completions + should be streamed for this request. + "temperature": 0.0, # Optional. The sampling temperature to use that + controls the apparent creativity of generated completions. Higher values will + make output more random while lower values will make results more focused and + deterministic. It is not recommended to modify temperature and top_p for the same + completions request as the interaction of these two settings is difficult to + predict. + "top_p": 0.0 # Optional. An alternative to sampling with temperature called + nucleus sampling. This value causes the model to consider the results of tokens + with the provided probability mass. As an example, a value of 0.15 will cause + only the tokens comprising the top 15% of probability mass to be considered. It + is not recommended to modify temperature and top_p for the same completions + request as the interaction of these two settings is difficult to predict. + } + + # response body for status code(s): 200 + response == { + "choices": [ + { + "finish_reason": "str", # The reason that this chat + completions choice completed its generated. Required. Known values are: + "stop", "length", and "content_filter". + "index": 0, # The ordered index associated with this chat + completions choice. Required. + "delta": { + "content": "str", # The content of the message. + Required. + "role": "str" # The chat role associated with the + message. Required. Known values are: "system", "assistant", and + "user". + }, + "message": { + "content": "str", # The content of the message. + Required. + "role": "str" # The chat role associated with the + message. Required. Known values are: "system", "assistant", and + "user". + } + } + ], + "created": "2020-02-20 00:00:00", # The first timestamp associated with + generation activity for this completions response, represented as seconds since + the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. + "id": "str", # A unique identifier associated with this chat completions + response. Required. + "model": "str", # The model used for the chat completion. Required. + "object": "str", # The response object type, which is always + ``chat.completion``. Required. + "usage": { + "completion_tokens": 0, # The number of tokens generated across all + completions emissions. Required. + "prompt_tokens": 0, # The number of tokens in the provided prompts + for the completions request. Required. + "total_tokens": 0 # The total number of tokens processed for the + completions request and response. Required. + } + } + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) + cls: ClsType[_models.ChatCompletions] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(chat_completion_options, (IOBase, bytes)): + _content = chat_completion_options + else: + _content = json.dumps(chat_completion_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_model_get_chat_completions_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ChatCompletions, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_vendor.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_vendor.py new file mode 100644 index 000000000000..97e248d5f6fb --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_vendor.py @@ -0,0 +1,26 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from abc import ABC +from typing import TYPE_CHECKING + +from ._configuration import ModelClientConfiguration + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core import AsyncPipelineClient + + from .._serialization import Deserializer, Serializer + + +class ModelClientMixinABC(ABC): + """DO NOT use this class. It is for internal typing use only.""" + + _client: "AsyncPipelineClient" + _config: ModelClientConfiguration + _serialize: "Serializer" + _deserialize: "Deserializer" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py new file mode 100644 index 000000000000..af27c51657ba --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._models import ChatChoice +from ._models import ChatCompletions +from ._models import ChatCompletionsJsonResponseFormat +from ._models import ChatCompletionsOptions +from ._models import ChatCompletionsResponseFormat +from ._models import ChatCompletionsTextResponseFormat +from ._models import ChatRequestAssistantMessage +from ._models import ChatRequestMessage +from ._models import ChatRequestSystemMessage +from ._models import ChatRequestUserMessage +from ._models import ChatResponseMessage +from ._models import CompletionsUsage + +from ._enums import ChatRole +from ._enums import CompletionsFinishReason +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "ChatChoice", + "ChatCompletions", + "ChatCompletionsJsonResponseFormat", + "ChatCompletionsOptions", + "ChatCompletionsResponseFormat", + "ChatCompletionsTextResponseFormat", + "ChatRequestAssistantMessage", + "ChatRequestMessage", + "ChatRequestSystemMessage", + "ChatRequestUserMessage", + "ChatResponseMessage", + "CompletionsUsage", + "ChatRole", + "CompletionsFinishReason", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py new file mode 100644 index 000000000000..70938ba01774 --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum +from azure.core import CaseInsensitiveEnumMeta + + +class ChatRole(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """A description of the intended purpose of a message within a chat completions interaction.""" + + SYSTEM = "system" + """The role that instructs or sets the behavior of the assistant.""" + ASSISTANT = "assistant" + """The role that provides responses to system-instructed, user-prompted input.""" + USER = "user" + """The role that provides input for chat completions.""" + + +class CompletionsFinishReason(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Representation of the manner in which a completions response concluded.""" + + STOPPED = "stop" + """Completions ended normally and reached its end of token generation.""" + TOKEN_LIMIT_REACHED = "length" + """Completions exhausted available token limits before generation could complete.""" + CONTENT_FILTERED = "content_filter" + """Completions generated a response that was identified as potentially sensitive per content + moderation policies.""" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py new file mode 100644 index 000000000000..c7a48e7da526 --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py @@ -0,0 +1,589 @@ +# coding=utf-8 +# pylint: disable=too-many-lines +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import datetime +from typing import Any, Dict, List, Literal, Mapping, Optional, TYPE_CHECKING, Union, overload + +from .. import _model_base +from .._model_base import rest_discriminator, rest_field +from ._enums import ChatRole + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from .. import models as _models + + +class ChatChoice(_model_base.Model): + """The representation of a single prompt completion as part of an overall chat completions + request. + Generally, ``n`` choices are generated per provided prompt with a default value of 1. + Token limits and other settings may limit the number of choices generated. + + All required parameters must be populated in order to send to server. + + :ivar message: The chat message for a given chat completions prompt. + :vartype message: ~azure.ai.inference.models.ChatResponseMessage + :ivar index: The ordered index associated with this chat completions choice. Required. + :vartype index: int + :ivar finish_reason: The reason that this chat completions choice completed its generated. + Required. Known values are: "stop", "length", and "content_filter". + :vartype finish_reason: str or ~azure.ai.inference.models.CompletionsFinishReason + :ivar delta: The delta message content for a streaming response. + :vartype delta: ~azure.ai.inference.models.ChatResponseMessage + """ + + message: Optional["_models.ChatResponseMessage"] = rest_field() + """The chat message for a given chat completions prompt.""" + index: int = rest_field() + """The ordered index associated with this chat completions choice. Required.""" + finish_reason: Union[str, "_models.CompletionsFinishReason"] = rest_field() + """The reason that this chat completions choice completed its generated. Required. Known values + are: \"stop\", \"length\", and \"content_filter\".""" + delta: Optional["_models.ChatResponseMessage"] = rest_field() + """The delta message content for a streaming response.""" + + @overload + def __init__( + self, + *, + index: int, + finish_reason: Union[str, "_models.CompletionsFinishReason"], + message: Optional["_models.ChatResponseMessage"] = None, + delta: Optional["_models.ChatResponseMessage"] = None, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ChatCompletions(_model_base.Model): + """Representation of the response data from a chat completions request. + Completions support a wide variety of tasks and generate text that continues from or + "completes" + provided prompt data. + + All required parameters must be populated in order to send to server. + + :ivar id: A unique identifier associated with this chat completions response. Required. + :vartype id: str + :ivar created: The first timestamp associated with generation activity for this completions + response, + represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. + :vartype created: ~datetime.datetime + :ivar choices: The collection of completions choices associated with this completions response. + Generally, ``n`` choices are generated per provided prompt with a default value of 1. + Token limits and other settings may limit the number of choices generated. Required. + :vartype choices: list[~azure.ai.inference.models.ChatChoice] + :ivar usage: Usage information for tokens processed and generated as part of this completions + operation. Required. + :vartype usage: ~azure.ai.inference.models.CompletionsUsage + :ivar object: The response object type, which is always ``chat.completion``. Required. + :vartype object: str + :ivar model: The model used for the chat completion. Required. + :vartype model: str + """ + + id: str = rest_field() + """A unique identifier associated with this chat completions response. Required.""" + created: datetime.datetime = rest_field(format="unix-timestamp") + """The first timestamp associated with generation activity for this completions response, + represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required.""" + choices: List["_models.ChatChoice"] = rest_field() + """The collection of completions choices associated with this completions response. + Generally, ``n`` choices are generated per provided prompt with a default value of 1. + Token limits and other settings may limit the number of choices generated. Required.""" + usage: "_models.CompletionsUsage" = rest_field() + """Usage information for tokens processed and generated as part of this completions operation. + Required.""" + object: str = rest_field() + """The response object type, which is always ``chat.completion``. Required.""" + model: str = rest_field() + """The model used for the chat completion. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created: datetime.datetime, + choices: List["_models.ChatChoice"], + usage: "_models.CompletionsUsage", + object: str, + model: str, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ChatCompletionsResponseFormat(_model_base.Model): + """An abstract representation of a response format configuration usable by Chat Completions. Can + be used to enable JSON + mode. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + ChatCompletionsJsonResponseFormat, ChatCompletionsTextResponseFormat + + All required parameters must be populated in order to send to server. + + :ivar type: The discriminated type for the response format. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The discriminated type for the response format. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ChatCompletionsJsonResponseFormat(ChatCompletionsResponseFormat, discriminator="json_object"): + """A response format for Chat Completions that restricts responses to emitting valid JSON objects. + + All required parameters must be populated in order to send to server. + + :ivar type: The discriminated object type, which is always 'json_object' for this format. + Required. Default value is "json_object". + :vartype type: str + """ + + type: Literal["json_object"] = rest_discriminator(name="type") # type: ignore + """The discriminated object type, which is always 'json_object' for this format. Required. Default + value is \"json_object\".""" + + +class ChatCompletionsOptions(_model_base.Model): + """The configuration information for a chat completions request. + Completions support a wide variety of tasks and generate text that continues from or + "completes" + provided prompt data. + + All required parameters must be populated in order to send to server. + + :ivar messages: The collection of context messages associated with this chat completions + request. + Typical usage begins with a chat message for the System role that provides instructions for + the behavior of the assistant, followed by alternating messages between the User and + Assistant roles. Required. + :vartype messages: list[~azure.ai.inference.models.ChatRequestMessage] + :ivar max_tokens: The maximum number of tokens to generate. + :vartype max_tokens: int + :ivar temperature: The sampling temperature to use that controls the apparent creativity of + generated completions. + Higher values will make output more random while lower values will make results more focused + and deterministic. + It is not recommended to modify temperature and top_p for the same completions request as the + interaction of these two settings is difficult to predict. + :vartype temperature: float + :ivar top_p: An alternative to sampling with temperature called nucleus sampling. This value + causes the + model to consider the results of tokens with the provided probability mass. As an example, a + value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be + considered. + It is not recommended to modify temperature and top_p for the same completions request as the + interaction of these two settings is difficult to predict. + :vartype top_p: float + :ivar stop: A collection of textual sequences that will end completions generation. + :vartype stop: list[str] + :ivar presence_penalty: A value that influences the probability of generated tokens appearing + based on their existing + presence in generated text. + Positive values will make tokens less likely to appear when they already exist and increase + the + model's likelihood to output new topics. + :vartype presence_penalty: float + :ivar frequency_penalty: A value that influences the probability of generated tokens appearing + based on their cumulative + frequency in generated text. + Positive values will make tokens less likely to appear as their frequency increases and + decrease the likelihood of the model repeating the same statements verbatim. + :vartype frequency_penalty: float + :ivar stream: A value indicating whether chat completions should be streamed for this request. + :vartype stream: bool + :ivar seed: If specified, the system will make a best effort to sample deterministically such + that repeated requests with the + same seed and parameters should return the same result. Determinism is not guaranteed, and you + should refer to the + system_fingerprint response parameter to monitor changes in the backend.". + :vartype seed: int + :ivar response_format: An object specifying the format that the model must output. Used to + enable JSON mode. + :vartype response_format: ~azure.ai.inference.models.ChatCompletionsResponseFormat + """ + + messages: List["_models.ChatRequestMessage"] = rest_field() + """The collection of context messages associated with this chat completions request. + Typical usage begins with a chat message for the System role that provides instructions for + the behavior of the assistant, followed by alternating messages between the User and + Assistant roles. Required.""" + max_tokens: Optional[int] = rest_field() + """The maximum number of tokens to generate.""" + temperature: Optional[float] = rest_field() + """The sampling temperature to use that controls the apparent creativity of generated completions. + Higher values will make output more random while lower values will make results more focused + and deterministic. + It is not recommended to modify temperature and top_p for the same completions request as the + interaction of these two settings is difficult to predict.""" + top_p: Optional[float] = rest_field() + """An alternative to sampling with temperature called nucleus sampling. This value causes the + model to consider the results of tokens with the provided probability mass. As an example, a + value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be + considered. + It is not recommended to modify temperature and top_p for the same completions request as the + interaction of these two settings is difficult to predict.""" + stop: Optional[List[str]] = rest_field() + """A collection of textual sequences that will end completions generation.""" + presence_penalty: Optional[float] = rest_field() + """A value that influences the probability of generated tokens appearing based on their existing + presence in generated text. + Positive values will make tokens less likely to appear when they already exist and increase the + model's likelihood to output new topics.""" + frequency_penalty: Optional[float] = rest_field() + """A value that influences the probability of generated tokens appearing based on their cumulative + frequency in generated text. + Positive values will make tokens less likely to appear as their frequency increases and + decrease the likelihood of the model repeating the same statements verbatim.""" + stream: Optional[bool] = rest_field() + """A value indicating whether chat completions should be streamed for this request.""" + seed: Optional[int] = rest_field() + """If specified, the system will make a best effort to sample deterministically such that repeated + requests with the + same seed and parameters should return the same result. Determinism is not guaranteed, and you + should refer to the + system_fingerprint response parameter to monitor changes in the backend.\".""" + response_format: Optional["_models.ChatCompletionsResponseFormat"] = rest_field() + """An object specifying the format that the model must output. Used to enable JSON mode.""" + + @overload + def __init__( + self, + *, + messages: List["_models.ChatRequestMessage"], + max_tokens: Optional[int] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + stop: Optional[List[str]] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + stream: Optional[bool] = None, + seed: Optional[int] = None, + response_format: Optional["_models.ChatCompletionsResponseFormat"] = None, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ChatCompletionsTextResponseFormat(ChatCompletionsResponseFormat, discriminator="text"): + """The standard Chat Completions response format that can freely generate text and is not + guaranteed to produce response + content that adheres to a specific schema. + + All required parameters must be populated in order to send to server. + + :ivar type: The discriminated object type, which is always 'text' for this format. Required. + Default value is "text". + :vartype type: str + """ + + type: Literal["text"] = rest_discriminator(name="type") # type: ignore + """The discriminated object type, which is always 'text' for this format. Required. Default value + is \"text\".""" + + +class ChatRequestMessage(_model_base.Model): + """An abstract representation of a chat message as provided in a request. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + ChatRequestAssistantMessage, ChatRequestSystemMessage, ChatRequestUserMessage + + All required parameters must be populated in order to send to server. + + :ivar role: The chat role associated with this message. Required. Known values are: "system", + "assistant", and "user". + :vartype role: str or ~azure.ai.inference.models.ChatRole + """ + + __mapping__: Dict[str, _model_base.Model] = {} + role: str = rest_discriminator(name="role") + """The chat role associated with this message. Required. Known values are: \"system\", + \"assistant\", and \"user\".""" + + @overload + def __init__( + self, + *, + role: str, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ChatRequestAssistantMessage(ChatRequestMessage, discriminator="assistant"): + """A request chat message representing response or action from the assistant. + + All required parameters must be populated in order to send to server. + + :ivar role: The chat role associated with this message, which is always 'assistant' for + assistant messages. Required. The role that provides responses to system-instructed, + user-prompted input. + :vartype role: str or ~azure.ai.inference.models.ASSISTANT + :ivar content: The content of the message. Required. + :vartype content: str + :ivar name: An optional name for the participant. + :vartype name: str + """ + + role: Literal[ChatRole.ASSISTANT] = rest_discriminator(name="role") # type: ignore + """The chat role associated with this message, which is always 'assistant' for assistant messages. + Required. The role that provides responses to system-instructed, user-prompted input.""" + content: str = rest_field() + """The content of the message. Required.""" + name: Optional[str] = rest_field() + """An optional name for the participant.""" + + @overload + def __init__( + self, + *, + content: str, + name: Optional[str] = None, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, role=ChatRole.ASSISTANT, **kwargs) + + +class ChatRequestSystemMessage(ChatRequestMessage, discriminator="system"): + """A request chat message containing system instructions that influence how the model will + generate a chat completions + response. + + All required parameters must be populated in order to send to server. + + :ivar role: The chat role associated with this message, which is always 'system' for system + messages. Required. The role that instructs or sets the behavior of the assistant. + :vartype role: str or ~azure.ai.inference.models.SYSTEM + :ivar content: The contents of the system message. Required. + :vartype content: str + :ivar name: An optional name for the participant. + :vartype name: str + """ + + role: Literal[ChatRole.SYSTEM] = rest_discriminator(name="role") # type: ignore + """The chat role associated with this message, which is always 'system' for system messages. + Required. The role that instructs or sets the behavior of the assistant.""" + content: str = rest_field() + """The contents of the system message. Required.""" + name: Optional[str] = rest_field() + """An optional name for the participant.""" + + @overload + def __init__( + self, + *, + content: str, + name: Optional[str] = None, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, role=ChatRole.SYSTEM, **kwargs) + + +class ChatRequestUserMessage(ChatRequestMessage, discriminator="user"): + """A request chat message representing user input to the assistant. + + All required parameters must be populated in order to send to server. + + :ivar role: The chat role associated with this message, which is always 'user' for user + messages. Required. The role that provides input for chat completions. + :vartype role: str or ~azure.ai.inference.models.USER + :ivar content: The contents of the user message, with available input types varying by selected + model. Required. + :vartype content: str + :ivar name: An optional name for the participant. + :vartype name: str + """ + + role: Literal[ChatRole.USER] = rest_discriminator(name="role") # type: ignore + """The chat role associated with this message, which is always 'user' for user messages. Required. + The role that provides input for chat completions.""" + content: str = rest_field() + """The contents of the user message, with available input types varying by selected model. + Required.""" + name: Optional[str] = rest_field() + """An optional name for the participant.""" + + @overload + def __init__( + self, + *, + content: str, + name: Optional[str] = None, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, role=ChatRole.USER, **kwargs) + + +class ChatResponseMessage(_model_base.Model): + """A representation of a chat message as received in a response. + + All required parameters must be populated in order to send to server. + + :ivar role: The chat role associated with the message. Required. Known values are: "system", + "assistant", and "user". + :vartype role: str or ~azure.ai.inference.models.ChatRole + :ivar content: The content of the message. Required. + :vartype content: str + """ + + role: Union[str, "_models.ChatRole"] = rest_field() + """The chat role associated with the message. Required. Known values are: \"system\", + \"assistant\", and \"user\".""" + content: str = rest_field() + """The content of the message. Required.""" + + @overload + def __init__( + self, + *, + role: Union[str, "_models.ChatRole"], + content: str, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class CompletionsUsage(_model_base.Model): + """Representation of the token counts processed for a completions request. + Counts consider all tokens across prompts, choices, choice alternates, best_of generations, and + other consumers. + + All required parameters must be populated in order to send to server. + + :ivar completion_tokens: The number of tokens generated across all completions emissions. + Required. + :vartype completion_tokens: int + :ivar prompt_tokens: The number of tokens in the provided prompts for the completions request. + Required. + :vartype prompt_tokens: int + :ivar total_tokens: The total number of tokens processed for the completions request and + response. Required. + :vartype total_tokens: int + """ + + completion_tokens: int = rest_field() + """The number of tokens generated across all completions emissions. Required.""" + prompt_tokens: int = rest_field() + """The number of tokens in the provided prompts for the completions request. Required.""" + total_tokens: int = rest_field() + """The total number of tokens processed for the completions request and response. Required.""" + + @overload + def __init__( + self, + *, + completion_tokens: int, + prompt_tokens: int, + total_tokens: int, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/py.typed b/sdk/ai/azure-ai-inference/azure/ai/inference/py.typed new file mode 100644 index 000000000000..e5aff4f83af8 --- /dev/null +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/ai/azure-ai-inference/dev_requirements.txt b/sdk/ai/azure-ai-inference/dev_requirements.txt new file mode 100644 index 000000000000..ff12ab35dd01 --- /dev/null +++ b/sdk/ai/azure-ai-inference/dev_requirements.txt @@ -0,0 +1,4 @@ +-e ../../../tools/azure-devtools +-e ../../../tools/azure-sdk-tools +../../core/azure-core +aiohttp \ No newline at end of file diff --git a/sdk/ai/azure-ai-inference/setup.py b/sdk/ai/azure-ai-inference/setup.py new file mode 100644 index 000000000000..fc6a19e4a776 --- /dev/null +++ b/sdk/ai/azure-ai-inference/setup.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# coding: utf-8 + +import os +import re +from setuptools import setup, find_packages + + +PACKAGE_NAME = "azure-ai-inference" +PACKAGE_PPRINT_NAME = "Azure Ai Inference" + +# a-b-c => a/b/c +package_folder_path = PACKAGE_NAME.replace("-", "/") + +# Version extraction inspired from 'requests' +with open(os.path.join(package_folder_path, "_version.py"), "r") as fd: + version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) + +if not version: + raise RuntimeError("Cannot find version information") + + +setup( + name=PACKAGE_NAME, + version=version, + description="Microsoft {} Client Library for Python".format(PACKAGE_PPRINT_NAME), + long_description=open("README.md", "r").read(), + long_description_content_type="text/markdown", + license="MIT License", + author="Microsoft Corporation", + author_email="azpysdkhelp@microsoft.com", + url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk", + keywords="azure, azure sdk", + classifiers=[ + "Development Status :: 4 - Beta", + "Programming Language :: Python", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "License :: OSI Approved :: MIT License", + ], + zip_safe=False, + packages=find_packages( + exclude=[ + "tests", + # Exclude packages that will be covered by PEP420 or nspkg + "azure", + "azure.ai", + ] + ), + include_package_data=True, + package_data={ + "azure.ai.inference": ["py.typed"], + }, + install_requires=[ + "isodate<1.0.0,>=0.6.1", + "azure-core<2.0.0,>=1.30.0", + "typing-extensions>=4.6.0", + ], + python_requires=">=3.8", +) diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml new file mode 100644 index 000000000000..e983ed2d4070 --- /dev/null +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -0,0 +1,4 @@ +directory: specification/ai/ModelInference +commit: 553ccaadec0b913e4b438b42fdaac823d910d656 +repo: Azure/azure-rest-api-specs +additionalDirectories: From b8ea0cdd4a88eaa1015be21906e72139ab5374f8 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 26 Mar 2024 17:13:05 -0700 Subject: [PATCH 002/112] first pass at writing tests --- sdk/ai/azure-ai-inference/tests/README.md | 58 ++++++ sdk/ai/azure-ai-inference/tests/conftest.py | 12 ++ .../tests/model_inference_test_base.py | 189 ++++++++++++++++++ .../test_model_inference_async_client.py | 90 +++++++++ .../tests/test_model_inference_client.py | 78 ++++++++ sdk/ai/azure-ai-inference/tsp-location.yaml | 2 +- 6 files changed, 428 insertions(+), 1 deletion(-) create mode 100644 sdk/ai/azure-ai-inference/tests/README.md create mode 100644 sdk/ai/azure-ai-inference/tests/conftest.py create mode 100644 sdk/ai/azure-ai-inference/tests/model_inference_test_base.py create mode 100644 sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py create mode 100644 sdk/ai/azure-ai-inference/tests/test_model_inference_client.py diff --git a/sdk/ai/azure-ai-inference/tests/README.md b/sdk/ai/azure-ai-inference/tests/README.md new file mode 100644 index 000000000000..bc4df8af54ad --- /dev/null +++ b/sdk/ai/azure-ai-inference/tests/README.md @@ -0,0 +1,58 @@ +# Azure Image Analysis client library tests for Python + +## Running tests locally, on a Windows PC, against the live service + +### Prerequisites + +See [Prerequisites](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/vision/azure-ai-vision-imageanalysis/README.md#prerequisites). Create an Azure resource in one of the GPU-supported regions, otherwise some of the tests will fail. + +### Setup + +* Clone or download this sample repository. +* Open a command prompt window in the folder `sdk\vision\azure-ai-vision-imageanalysis`. +* If you want to run tests against the latest public Image Analysis client library, install it by running: + ```bash + pip install azure-ai-vision-imageanalysis + ``` +* If you want to run tests against a locally built Image Analysis client library: + * First build the wheel: + ```bash + pip install wheel + pip install -r dev_requirements.txt + python setup.py bdist_wheel + ``` + * Then install the resulting local wheel (update version `1.0.0b1` to the current one): + ```bash + pip install dist\azure_ai_vision_imageanalysis-1.0.0b1-py3-none-any.whl --user --force-reinstall + ``` + + +### Set environment variables + +See [Set environment variables](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/vision/azure-ai-vision-imageanalysis/README.md#set-environment-variables). + +In addition, the following environment values **must be** defined, although not used. Assign any value to them: +``` +set VISION_TENANT_ID=not-used +set VISION_CLIENT_ID=not-used +set VISION_CLIENT_SECRET=not-used +``` + +### Configure test proxy + +Configure the test proxy to run live service tests without recordings: +``` +set AZURE_TEST_RUN_LIVE=true +set AZURE_SKIP_LIVE_RECORDING=true +``` + +### Run tests + +To run all tests, type: +``` +pytest +``` + +### Additional information + +See [test documentation](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/tests.md) for additional information, including how to set proxy recordings and run tests using recordings. \ No newline at end of file diff --git a/sdk/ai/azure-ai-inference/tests/conftest.py b/sdk/ai/azure-ai-inference/tests/conftest.py new file mode 100644 index 000000000000..91e541e4d1bd --- /dev/null +++ b/sdk/ai/azure-ai-inference/tests/conftest.py @@ -0,0 +1,12 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import pytest +from devtools_testutils import test_proxy + +# autouse=True will trigger this fixture on each pytest run, even if it's not explicitly used by a test method +@pytest.fixture(scope="session", autouse=True) +def start_proxy(test_proxy): + return diff --git a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py new file mode 100644 index 000000000000..755d86d6f681 --- /dev/null +++ b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py @@ -0,0 +1,189 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import functools +import logging +import sys +import azure.ai.inference as sdk +import azure.ai.inference as async_sdk + +from os import path +from typing import List, Optional, Union +from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader +from azure.core.credentials import AzureKeyCredential +from azure.core.exceptions import AzureError +from azure.core.pipeline import PipelineRequest + +# Set to True to enable SDK logging +LOGGING_ENABLED = True + +if LOGGING_ENABLED: + # Create a logger for the 'azure' SDK + # See https://docs.python.org/3/library/logging.html + logger = logging.getLogger("azure") + logger.setLevel(logging.INFO) # INFO or DEBUG + + # Configure a console output + handler = logging.StreamHandler(stream=sys.stdout) + logger.addHandler(handler) + +ServicePreparer = functools.partial( + EnvironmentVariableLoader, + "model", + model_endpoint="https://your-azure-resource-name.your-azure-region.inference.ai.azure.com", + model_key="00000000000000000000000000000000" +) + + +# The test class name needs to start with "Test" to get collected by pytest +class ModelInferenceTestBase(AzureRecordedTestCase): + + client: sdk.ModelClient + async_client: async_sdk.ModelClient + connection_url: str + + # Set to True to print out all analysis results + PRINT_CHAT_COMPLETION_RESULTS = True + + def _create_client_for_standard_test(self, sync: bool, get_connection_url: bool = False, **kwargs): + endpoint = kwargs.pop("model_endpoint") + key = kwargs.pop("model_key") + self._create_client(endpoint, key, sync, get_connection_url) + + def _create_client_for_authentication_failure(self, sync: bool, **kwargs): + endpoint = kwargs.pop("model_endpoint") + key = "00000000000000000000000000000000" + self._create_client(endpoint, key, sync, False) + + def _create_client(self, endpoint: str, key: str, sync: bool, get_connection_url: bool): + credential = AzureKeyCredential(key) + if sync: + self.client = sdk.ModelClient( + endpoint=endpoint, + credential=credential, + logging_enable=LOGGING_ENABLED, + raw_request_hook=self._raw_request_check if get_connection_url else None, + ) + assert self.client is not None + else: + self.async_client = async_sdk.ModelClient( + endpoint=endpoint, + credential=credential, + logging_enable=LOGGING_ENABLED, + raw_request_hook=self._raw_request_check if get_connection_url else None, + ) + assert self.async_client is not None + + def _raw_request_check(self, request: PipelineRequest): + self.connection_url = request.http_request.url + print(f"Connection URL: {request.http_request.url}") + + def _do_chat_completions( + self, + options: sdk.models.ChatCompletionsOptions, + query_params: Optional[dict] = None, + **kwargs, + ): + + result = self.client.get_chat_completions(options=options, params=query_params) + + # Optional: console printout of all results + if ModelInferenceTestBase.PRINT_CHAT_COMPLETION_RESULTS: + ModelInferenceTestBase._print_chat_completion_results(result) + + # Validate all results + ModelInferenceTestBase._validate_chat_completion_results(result) + + # Validate that additional query parameters exists in the connection URL, if specify + if query_params is not None: + ModelInferenceTestBase._validate_query_parameters(query_params, self.connection_url) + + async def _do_async_chat_completion( + self, + options: sdk.models.ChatCompletionsOptions, + query_params: Optional[dict] = None, + **kwargs, + ): + + result = await self.async_client.chat_completions(options=options, params=query_params) + + # Optional: console printout of all results + if ModelInferenceTestBase.PRINT_CHAT_COMPLETION_RESULTS: + ModelInferenceTestBase._print_chat_completion_results(result) + + # Validate all results + ModelInferenceTestBase._validate_chat_completion_results(result) + + # Validate that additional query parameters exists in the connection URL, if specify + if query_params is not None: + ModelInferenceTestBase._validate_query_parameters(query_params, self.connection_url) + + def _do_chat_completion_with_error( + self, + options: sdk.models.ChatCompletionsOptions, + expected_status_code: int, + expected_message_contains: str, + **kwargs, + ): + + try: + result = self.client.get_chat_completions(options=options) + + except AzureError as e: + print(e) + assert hasattr(e, "status_code") + assert e.status_code == expected_status_code + assert expected_message_contains in e.message + return + assert False # We should not get here + + async def _do_async_chat_completion_with_error( + self, + options: sdk.models.ChatCompletionsOptions, + expected_status_code: int, + expected_message_contains: str, + **kwargs, + ): + + try: + result = await self.async_client.get_chat_completions(options=options) + + except AzureError as e: + print(e) + assert hasattr(e, "status_code") + assert e.status_code == expected_status_code + assert expected_message_contains in e.message + return + assert False # We should not get here + + @staticmethod + def _validate_query_parameters(query_params: dict, connection_url: str): + assert len(query_params) > 0 + query_string = "" + for key, value in query_params.items(): + query_string += "&" + key + "=" + value + query_string = "?" + query_string[1:] + assert query_string in connection_url + + @staticmethod + def _validate_result(result: sdk.models.ChatCompletions): + assert True + + @staticmethod + def _print_analysis_results(result: sdk.models.ChatCompletions): + + for choice in result.choices: + print(" choices[0].message.content: {}".format(choice.message.content)) + print(" choices[0].message.role: {}".format(choice.message.role)) + print(" choices[0].finish_reason: {}".format(choice.finish_reason)) + print(" choices[0].index: {}".format(choice.index)) + + print(" id: {}".format(result.id)) + print(" created: {}".format(result.created)) + print(" model: {}".format(result.model)) + print(" object: {}".format(result.object)) + print(" usage.completion_tokens: {}".format(result.usage.completion_tokens)) + print(" usage.prompt_tokens: {}".format(result.usage.prompt_tokens)) + print(" usage.completion_tokens: {}".format(result.usage.completion_tokens)) + diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py new file mode 100644 index 000000000000..136d3221e2bf --- /dev/null +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -0,0 +1,90 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import inspect +import azure.ai.vision.imageanalysis as sdk + +from model_inference_test_base import ModelInferenceTestBase, ServicePreparer +from devtools_testutils.aio import recorded_by_proxy_async + +# The test class name needs to start with "Test" to get collected by pytest +class TestImageAnalysisAsyncClient(ModelInferenceTestBase): + + # ********************************************************************************** + # + # HAPPY PATH TESTS + # + # ********************************************************************************** + + # Test all visual features from a local image, using default settings + @ServicePreparer() + @recorded_by_proxy_async + async def test_async_chat_completion(self, **kwargs): + + self._create_client_for_standard_analysis(sync=False, **kwargs) + + await self._do_async_chat_completions( + options=[ + messages=[ + role=sdk.models.ChatRole.USER, + content="How many feet are in a mile?" + ] + ], + **kwargs + ) + + await self.async_client.close() + + # Test some visual features, one after the other, from image URL, with relevant settings specified +""" @ServicePreparer() + @recorded_by_proxy_async + async def test_analyze_async_single_feature_from_url(self, **kwargs): + + self._create_client_for_standard_analysis(sync=False, **kwargs) + + await self._do_async_analysis( + image_source=self.IMAGE_URL, + visual_features=[sdk.models.VisualFeatures.DENSE_CAPTIONS], + gender_neutral_caption=True, + **kwargs + ) + + await self._do_async_analysis( + image_source=self.IMAGE_URL, + visual_features=[sdk.models.VisualFeatures.SMART_CROPS], + smart_crops_aspect_ratios=[0.9, 1.33], + **kwargs + ) + + await self._do_async_analysis( + image_source=self.IMAGE_URL, visual_features=[sdk.models.VisualFeatures.TAGS], language="en", **kwargs + ) + + await self._do_async_analysis( + image_source=self.IMAGE_URL, visual_features=[sdk.models.VisualFeatures.PEOPLE], **kwargs + ) + + await self.async_client.close() """ + + # ********************************************************************************** + # + # ERROR TESTS + # + # ********************************************************************************** + + """ @ServicePreparer() + @recorded_by_proxy_async + async def test_analyze_async_authentication_failure(self, **kwargs): + + self._create_client_for_authentication_failure(sync=False, **kwargs) + + await self._do_async_analysis_with_error( + image_source=self.IMAGE_URL, + visual_features=[sdk.models.VisualFeatures.TAGS], + expected_status_code=401, + expected_message_contains="Access denied", + **kwargs + ) + + await self.async_client.close() """ diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py new file mode 100644 index 000000000000..4e27940d0cf9 --- /dev/null +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -0,0 +1,78 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import inspect +import azure.ai.inference as sdk + +from model_inference_test_base import ModelInferenceTestBase, ServicePreparer +from devtools_testutils import recorded_by_proxy + + +# The test class name needs to start with "Test" to get collected by pytest +class TestModelClient(ModelInferenceTestBase): + + # ********************************************************************************** + # + # HAPPY PATH TESTS + # + # ********************************************************************************** + + # Test one chat completion + @ServicePreparer() + @recorded_by_proxy + def test_sync_chat_completion(self, **kwargs): + + self._create_client_for_standard_test(sync=True, **kwargs) + + options = sdk.models.ChatCompletionsOptions(messages=[sdk.models.ChatRequestUserMessage(content="How many feet are in a mile?")]) + + self._do_chat_completions( + options=options, + **kwargs + ) + + self.client.close() + + # Test some visual features, one after the other, from file, using default settings +""" @ServicePreparer() + @recorded_by_proxy + def test_analyze_sync_single_feature_from_file(self, **kwargs): + + self._create_client_for_standard_analysis(sync=True, get_connection_url=True, **kwargs) + + self._do_analysis( + image_source=self.IMAGE_FILE, + visual_features=[sdk.models.VisualFeatures.CAPTION], + query_params={"key1": "value1", "key2": "value2"}, + **kwargs + ) + + self._do_analysis(image_source=self.IMAGE_FILE, visual_features=[sdk.models.VisualFeatures.READ], **kwargs) + + self._do_analysis(image_source=self.IMAGE_FILE, visual_features=[sdk.models.VisualFeatures.TAGS], **kwargs) + + self.client.close() """ + + # ********************************************************************************** + # + # ERROR TESTS + # + # ********************************************************************************** + +""" @ServicePreparer() + @recorded_by_proxy + def test_analyze_sync_image_url_does_not_exist(self, **kwargs): + + self._create_client_for_standard_analysis(sync=True, **kwargs) + + self._do_analysis_with_error( + image_source="https://www.this.is.a.bad.url.com/for/sure.jpg", + visual_features=[sdk.models.VisualFeatures.CAPTION], + expected_status_code=400, + expected_message_contains="image url is not accessible", + **kwargs + ) + + self.client.close() + """ \ No newline at end of file diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml index e983ed2d4070..45de5aad2a68 100644 --- a/sdk/ai/azure-ai-inference/tsp-location.yaml +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/ModelInference -commit: 553ccaadec0b913e4b438b42fdaac823d910d656 +commit: 3b8c933cdb9ff186419286e6a4b6a0dbacabad41 repo: Azure/azure-rest-api-specs additionalDirectories: From ee78c9cf1a8bf506992c1bce1b8f3e7911035670 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 26 Mar 2024 17:31:41 -0700 Subject: [PATCH 003/112] Re-emit client library with fixed variable name --- .../ai/inference/_operations/_operations.py | 34 +++++++++---------- .../inference/aio/_operations/_operations.py | 34 +++++++++---------- 2 files changed, 34 insertions(+), 34 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index 43bc4f9a94d2..b587419402b0 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -67,7 +67,7 @@ class ModelClientOperationsMixin(ModelClientMixinABC): @overload def get_chat_completions( self, - chat_completion_options: _models.ChatCompletionsOptions, + chat_completions_options: _models.ChatCompletionsOptions, *, content_type: str = "application/json", **kwargs: Any @@ -78,8 +78,8 @@ def get_chat_completions( "completes" provided prompt data. - :param chat_completion_options: The JSON payload containing chat completion options. Required. - :type chat_completion_options: ~azure.ai.inference.models.ChatCompletionsOptions + :param chat_completions_options: The JSON payload containing chat completion options. Required. + :type chat_completions_options: ~azure.ai.inference.models.ChatCompletionsOptions :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -104,7 +104,7 @@ def get_chat_completions( } # JSON input template you can fill out and use as your body input. - chat_completion_options = { + chat_completions_options = { "messages": [ chat_request_message ], @@ -190,7 +190,7 @@ def get_chat_completions( @overload def get_chat_completions( - self, chat_completion_options: JSON, *, content_type: str = "application/json", **kwargs: Any + self, chat_completions_options: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: # pylint: disable=line-too-long """Gets chat completions for the provided chat messages. @@ -198,8 +198,8 @@ def get_chat_completions( "completes" provided prompt data. - :param chat_completion_options: The JSON payload containing chat completion options. Required. - :type chat_completion_options: JSON + :param chat_completions_options: The JSON payload containing chat completion options. Required. + :type chat_completions_options: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -256,7 +256,7 @@ def get_chat_completions( @overload def get_chat_completions( - self, chat_completion_options: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + self, chat_completions_options: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: # pylint: disable=line-too-long """Gets chat completions for the provided chat messages. @@ -264,8 +264,8 @@ def get_chat_completions( "completes" provided prompt data. - :param chat_completion_options: The JSON payload containing chat completion options. Required. - :type chat_completion_options: IO[bytes] + :param chat_completions_options: The JSON payload containing chat completion options. Required. + :type chat_completions_options: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -322,7 +322,7 @@ def get_chat_completions( @distributed_trace def get_chat_completions( - self, chat_completion_options: Union[_models.ChatCompletionsOptions, JSON, IO[bytes]], **kwargs: Any + self, chat_completions_options: Union[_models.ChatCompletionsOptions, JSON, IO[bytes]], **kwargs: Any ) -> _models.ChatCompletions: # pylint: disable=line-too-long """Gets chat completions for the provided chat messages. @@ -330,9 +330,9 @@ def get_chat_completions( "completes" provided prompt data. - :param chat_completion_options: The JSON payload containing chat completion options. Is one of + :param chat_completions_options: The JSON payload containing chat completion options. Is one of the following types: ChatCompletionsOptions, JSON, IO[bytes] Required. - :type chat_completion_options: ~azure.ai.inference.models.ChatCompletionsOptions or JSON or + :type chat_completions_options: ~azure.ai.inference.models.ChatCompletionsOptions or JSON or IO[bytes] :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping :rtype: ~azure.ai.inference.models.ChatCompletions @@ -355,7 +355,7 @@ def get_chat_completions( } # JSON input template you can fill out and use as your body input. - chat_completion_options = { + chat_completions_options = { "messages": [ chat_request_message ], @@ -454,10 +454,10 @@ def get_chat_completions( content_type = content_type or "application/json" _content = None - if isinstance(chat_completion_options, (IOBase, bytes)): - _content = chat_completion_options + if isinstance(chat_completions_options, (IOBase, bytes)): + _content = chat_completions_options else: - _content = json.dumps(chat_completion_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(chat_completions_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_model_get_chat_completions_request( content_type=content_type, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index c6cfbec9538d..96d397615fc3 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -42,7 +42,7 @@ class ModelClientOperationsMixin(ModelClientMixinABC): @overload async def get_chat_completions( self, - chat_completion_options: _models.ChatCompletionsOptions, + chat_completions_options: _models.ChatCompletionsOptions, *, content_type: str = "application/json", **kwargs: Any @@ -53,8 +53,8 @@ async def get_chat_completions( "completes" provided prompt data. - :param chat_completion_options: The JSON payload containing chat completion options. Required. - :type chat_completion_options: ~azure.ai.inference.models.ChatCompletionsOptions + :param chat_completions_options: The JSON payload containing chat completion options. Required. + :type chat_completions_options: ~azure.ai.inference.models.ChatCompletionsOptions :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -79,7 +79,7 @@ async def get_chat_completions( } # JSON input template you can fill out and use as your body input. - chat_completion_options = { + chat_completions_options = { "messages": [ chat_request_message ], @@ -165,7 +165,7 @@ async def get_chat_completions( @overload async def get_chat_completions( - self, chat_completion_options: JSON, *, content_type: str = "application/json", **kwargs: Any + self, chat_completions_options: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: # pylint: disable=line-too-long """Gets chat completions for the provided chat messages. @@ -173,8 +173,8 @@ async def get_chat_completions( "completes" provided prompt data. - :param chat_completion_options: The JSON payload containing chat completion options. Required. - :type chat_completion_options: JSON + :param chat_completions_options: The JSON payload containing chat completion options. Required. + :type chat_completions_options: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -231,7 +231,7 @@ async def get_chat_completions( @overload async def get_chat_completions( - self, chat_completion_options: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + self, chat_completions_options: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: # pylint: disable=line-too-long """Gets chat completions for the provided chat messages. @@ -239,8 +239,8 @@ async def get_chat_completions( "completes" provided prompt data. - :param chat_completion_options: The JSON payload containing chat completion options. Required. - :type chat_completion_options: IO[bytes] + :param chat_completions_options: The JSON payload containing chat completion options. Required. + :type chat_completions_options: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -297,7 +297,7 @@ async def get_chat_completions( @distributed_trace_async async def get_chat_completions( - self, chat_completion_options: Union[_models.ChatCompletionsOptions, JSON, IO[bytes]], **kwargs: Any + self, chat_completions_options: Union[_models.ChatCompletionsOptions, JSON, IO[bytes]], **kwargs: Any ) -> _models.ChatCompletions: # pylint: disable=line-too-long """Gets chat completions for the provided chat messages. @@ -305,9 +305,9 @@ async def get_chat_completions( "completes" provided prompt data. - :param chat_completion_options: The JSON payload containing chat completion options. Is one of + :param chat_completions_options: The JSON payload containing chat completion options. Is one of the following types: ChatCompletionsOptions, JSON, IO[bytes] Required. - :type chat_completion_options: ~azure.ai.inference.models.ChatCompletionsOptions or JSON or + :type chat_completions_options: ~azure.ai.inference.models.ChatCompletionsOptions or JSON or IO[bytes] :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping :rtype: ~azure.ai.inference.models.ChatCompletions @@ -330,7 +330,7 @@ async def get_chat_completions( } # JSON input template you can fill out and use as your body input. - chat_completion_options = { + chat_completions_options = { "messages": [ chat_request_message ], @@ -429,10 +429,10 @@ async def get_chat_completions( content_type = content_type or "application/json" _content = None - if isinstance(chat_completion_options, (IOBase, bytes)): - _content = chat_completion_options + if isinstance(chat_completions_options, (IOBase, bytes)): + _content = chat_completions_options else: - _content = json.dumps(chat_completion_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(chat_completions_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_model_get_chat_completions_request( content_type=content_type, From 25b5c6b1de5a47d614ba7afcd49c1af8fa96688c Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 26 Mar 2024 18:02:42 -0700 Subject: [PATCH 004/112] Fix test --- .../azure-ai-inference/tests/model_inference_test_base.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py index 755d86d6f681..322320a44d45 100644 --- a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py +++ b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py @@ -86,7 +86,7 @@ def _do_chat_completions( **kwargs, ): - result = self.client.get_chat_completions(options=options, params=query_params) + result = self.client.get_chat_completions(chat_completions_options=options, params=query_params) # Optional: console printout of all results if ModelInferenceTestBase.PRINT_CHAT_COMPLETION_RESULTS: @@ -128,7 +128,7 @@ def _do_chat_completion_with_error( ): try: - result = self.client.get_chat_completions(options=options) + result = self.client.get_chat_completions(chat_completions_options=options) except AzureError as e: print(e) @@ -147,7 +147,7 @@ async def _do_async_chat_completion_with_error( ): try: - result = await self.async_client.get_chat_completions(options=options) + result = await self.async_client.get_chat_completions(chat_completions_options=options) except AzureError as e: print(e) From 5072f004eaed94f9c79cf9edcc4123ea7c45cb28 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 27 Mar 2024 20:33:05 -0700 Subject: [PATCH 005/112] First test working! --- .../azure/ai/inference/_client.py | 16 ++---- .../azure/ai/inference/_configuration.py | 26 +++------- .../ai/inference/_operations/_operations.py | 52 +++++-------------- .../azure/ai/inference/aio/_client.py | 16 ++---- .../azure/ai/inference/aio/_configuration.py | 26 +++------- .../inference/aio/_operations/_operations.py | 46 ++++------------ .../azure/ai/inference/models/_enums.py | 6 ++- .../azure/ai/inference/models/_models.py | 17 +++--- .../tests/model_inference_test_base.py | 47 +++++++++++++---- sdk/ai/azure-ai-inference/tsp-location.yaml | 2 +- 10 files changed, 90 insertions(+), 164 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_client.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_client.py index fc6789d4cf6d..c0c4a2c537aa 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_client.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_client.py @@ -7,7 +7,7 @@ # -------------------------------------------------------------------------- from copy import deepcopy -from typing import Any, TYPE_CHECKING, Union +from typing import Any from azure.core import PipelineClient from azure.core.credentials import AzureKeyCredential @@ -18,18 +18,12 @@ from ._operations import ModelClientOperationsMixin from ._serialization import Deserializer, Serializer -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from azure.core.credentials import TokenCredential - class ModelClient(ModelClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword """ModelClient. - :param credential: Credential needed for the client to connect to Azure. Is either a - AzureKeyCredential type or a TokenCredential type. Required. - :type credential: ~azure.core.credentials.AzureKeyCredential or - ~azure.core.credentials.TokenCredential + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential :keyword endpoint: Service host. Required. :paramtype endpoint: str :keyword api_version: The API version to use for this operation. Default value is @@ -38,9 +32,7 @@ class ModelClient(ModelClientOperationsMixin): # pylint: disable=client-accepts :paramtype api_version: str """ - def __init__( - self, credential: Union[AzureKeyCredential, "TokenCredential"], *, endpoint: str, **kwargs: Any - ) -> None: + def __init__(self, credential: AzureKeyCredential, *, endpoint: str, **kwargs: Any) -> None: self._config = ModelClientConfiguration(credential=credential, **kwargs) _policies = kwargs.pop("policies", None) if _policies is None: diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py index 570277f2be83..7527fe7e3c5a 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py @@ -6,17 +6,13 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any, TYPE_CHECKING, Union +from typing import Any from azure.core.credentials import AzureKeyCredential from azure.core.pipeline import policies from ._version import VERSION -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from azure.core.credentials import TokenCredential - class ModelClientConfiguration: # pylint: disable=too-many-instance-attributes """Configuration for ModelClient. @@ -24,17 +20,15 @@ class ModelClientConfiguration: # pylint: disable=too-many-instance-attributes Note that all parameters used to create this instance are saved as instance attributes. - :param credential: Credential needed for the client to connect to Azure. Is either a - AzureKeyCredential type or a TokenCredential type. Required. - :type credential: ~azure.core.credentials.AzureKeyCredential or - ~azure.core.credentials.TokenCredential + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential :keyword api_version: The API version to use for this operation. Default value is "2024-04-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ - def __init__(self, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + def __init__(self, credential: AzureKeyCredential, **kwargs: Any) -> None: api_version: str = kwargs.pop("api_version", "2024-04-01-preview") if credential is None: @@ -42,18 +36,10 @@ def __init__(self, credential: Union[AzureKeyCredential, "TokenCredential"], **k self.credential = credential self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://widget.contoso.com/.default"]) kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) - def _infer_policy(self, **kwargs): - if isinstance(self.credential, AzureKeyCredential): - return policies.AzureKeyCredentialPolicy(self.credential, "api-key", **kwargs) - if hasattr(self.credential, "get_token"): - return policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) - raise TypeError(f"Unsupported credential: {self.credential}") - def _configure(self, **kwargs: Any) -> None: self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) @@ -65,4 +51,6 @@ def _configure(self, **kwargs: Any) -> None: self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) self.authentication_policy = kwargs.get("authentication_policy") if self.credential and not self.authentication_policy: - self.authentication_policy = self._infer_policy(**kwargs) + self.authentication_policy = policies.AzureKeyCredentialPolicy( + self.credential, "Authorization", prefix="Bearer", **kwargs + ) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index b587419402b0..dbf1f647f8da 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -45,7 +45,7 @@ def build_model_get_chat_completions_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-04-01-preview")) accept = _headers.pop("Accept", "application/json") @@ -56,9 +56,9 @@ def build_model_get_chat_completions_request(**kwargs: Any) -> HttpRequest: _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - if content_type is not None: - _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) @@ -153,19 +153,12 @@ def get_chat_completions( "stop", "length", and "content_filter". "index": 0, # The ordered index associated with this chat completions choice. Required. - "delta": { - "content": "str", # The content of the message. - Required. - "role": "str" # The chat role associated with the - message. Required. Known values are: "system", "assistant", and - "user". - }, "message": { "content": "str", # The content of the message. Required. "role": "str" # The chat role associated with the - message. Required. Known values are: "system", "assistant", and - "user". + message. Required. Known values are: "system", "user", "assistant", + and "tool". } } ], @@ -219,19 +212,12 @@ def get_chat_completions( "stop", "length", and "content_filter". "index": 0, # The ordered index associated with this chat completions choice. Required. - "delta": { - "content": "str", # The content of the message. - Required. - "role": "str" # The chat role associated with the - message. Required. Known values are: "system", "assistant", and - "user". - }, "message": { "content": "str", # The content of the message. Required. "role": "str" # The chat role associated with the - message. Required. Known values are: "system", "assistant", and - "user". + message. Required. Known values are: "system", "user", "assistant", + and "tool". } } ], @@ -285,19 +271,12 @@ def get_chat_completions( "stop", "length", and "content_filter". "index": 0, # The ordered index associated with this chat completions choice. Required. - "delta": { - "content": "str", # The content of the message. - Required. - "role": "str" # The chat role associated with the - message. Required. Known values are: "system", "assistant", and - "user". - }, "message": { "content": "str", # The content of the message. Required. "role": "str" # The chat role associated with the - message. Required. Known values are: "system", "assistant", and - "user". + message. Required. Known values are: "system", "user", "assistant", + and "tool". } } ], @@ -404,19 +383,12 @@ def get_chat_completions( "stop", "length", and "content_filter". "index": 0, # The ordered index associated with this chat completions choice. Required. - "delta": { - "content": "str", # The content of the message. - Required. - "role": "str" # The chat role associated with the - message. Required. Known values are: "system", "assistant", and - "user". - }, "message": { "content": "str", # The content of the message. Required. "role": "str" # The chat role associated with the - message. Required. Known values are: "system", "assistant", and - "user". + message. Required. Known values are: "system", "user", "assistant", + and "tool". } } ], @@ -449,7 +421,7 @@ def get_chat_completions( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.ChatCompletions] = kwargs.pop("cls", None) content_type = content_type or "application/json" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py index 89a809d11985..4551c8d4a36c 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py @@ -7,7 +7,7 @@ # -------------------------------------------------------------------------- from copy import deepcopy -from typing import Any, Awaitable, TYPE_CHECKING, Union +from typing import Any, Awaitable from azure.core import AsyncPipelineClient from azure.core.credentials import AzureKeyCredential @@ -18,18 +18,12 @@ from ._configuration import ModelClientConfiguration from ._operations import ModelClientOperationsMixin -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from azure.core.credentials_async import AsyncTokenCredential - class ModelClient(ModelClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword """ModelClient. - :param credential: Credential needed for the client to connect to Azure. Is either a - AzureKeyCredential type or a TokenCredential type. Required. - :type credential: ~azure.core.credentials.AzureKeyCredential or - ~azure.core.credentials_async.AsyncTokenCredential + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential :keyword endpoint: Service host. Required. :paramtype endpoint: str :keyword api_version: The API version to use for this operation. Default value is @@ -38,9 +32,7 @@ class ModelClient(ModelClientOperationsMixin): # pylint: disable=client-accepts :paramtype api_version: str """ - def __init__( - self, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], *, endpoint: str, **kwargs: Any - ) -> None: + def __init__(self, credential: AzureKeyCredential, *, endpoint: str, **kwargs: Any) -> None: self._config = ModelClientConfiguration(credential=credential, **kwargs) _policies = kwargs.pop("policies", None) if _policies is None: diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py index f8849f1c207d..8fbd844c2808 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py @@ -6,17 +6,13 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any, TYPE_CHECKING, Union +from typing import Any from azure.core.credentials import AzureKeyCredential from azure.core.pipeline import policies from .._version import VERSION -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from azure.core.credentials_async import AsyncTokenCredential - class ModelClientConfiguration: # pylint: disable=too-many-instance-attributes """Configuration for ModelClient. @@ -24,17 +20,15 @@ class ModelClientConfiguration: # pylint: disable=too-many-instance-attributes Note that all parameters used to create this instance are saved as instance attributes. - :param credential: Credential needed for the client to connect to Azure. Is either a - AzureKeyCredential type or a TokenCredential type. Required. - :type credential: ~azure.core.credentials.AzureKeyCredential or - ~azure.core.credentials_async.AsyncTokenCredential + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential :keyword api_version: The API version to use for this operation. Default value is "2024-04-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ - def __init__(self, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any) -> None: + def __init__(self, credential: AzureKeyCredential, **kwargs: Any) -> None: api_version: str = kwargs.pop("api_version", "2024-04-01-preview") if credential is None: @@ -42,18 +36,10 @@ def __init__(self, credential: Union[AzureKeyCredential, "AsyncTokenCredential"] self.credential = credential self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://widget.contoso.com/.default"]) kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) - def _infer_policy(self, **kwargs): - if isinstance(self.credential, AzureKeyCredential): - return policies.AzureKeyCredentialPolicy(self.credential, "api-key", **kwargs) - if hasattr(self.credential, "get_token"): - return policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) - raise TypeError(f"Unsupported credential: {self.credential}") - def _configure(self, **kwargs: Any) -> None: self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) @@ -65,4 +51,6 @@ def _configure(self, **kwargs: Any) -> None: self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) self.authentication_policy = kwargs.get("authentication_policy") if self.credential and not self.authentication_policy: - self.authentication_policy = self._infer_policy(**kwargs) + self.authentication_policy = policies.AzureKeyCredentialPolicy( + self.credential, "Authorization", prefix="Bearer", **kwargs + ) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index 96d397615fc3..d6c2f9b95671 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -128,19 +128,12 @@ async def get_chat_completions( "stop", "length", and "content_filter". "index": 0, # The ordered index associated with this chat completions choice. Required. - "delta": { - "content": "str", # The content of the message. - Required. - "role": "str" # The chat role associated with the - message. Required. Known values are: "system", "assistant", and - "user". - }, "message": { "content": "str", # The content of the message. Required. "role": "str" # The chat role associated with the - message. Required. Known values are: "system", "assistant", and - "user". + message. Required. Known values are: "system", "user", "assistant", + and "tool". } } ], @@ -194,19 +187,12 @@ async def get_chat_completions( "stop", "length", and "content_filter". "index": 0, # The ordered index associated with this chat completions choice. Required. - "delta": { - "content": "str", # The content of the message. - Required. - "role": "str" # The chat role associated with the - message. Required. Known values are: "system", "assistant", and - "user". - }, "message": { "content": "str", # The content of the message. Required. "role": "str" # The chat role associated with the - message. Required. Known values are: "system", "assistant", and - "user". + message. Required. Known values are: "system", "user", "assistant", + and "tool". } } ], @@ -260,19 +246,12 @@ async def get_chat_completions( "stop", "length", and "content_filter". "index": 0, # The ordered index associated with this chat completions choice. Required. - "delta": { - "content": "str", # The content of the message. - Required. - "role": "str" # The chat role associated with the - message. Required. Known values are: "system", "assistant", and - "user". - }, "message": { "content": "str", # The content of the message. Required. "role": "str" # The chat role associated with the - message. Required. Known values are: "system", "assistant", and - "user". + message. Required. Known values are: "system", "user", "assistant", + and "tool". } } ], @@ -379,19 +358,12 @@ async def get_chat_completions( "stop", "length", and "content_filter". "index": 0, # The ordered index associated with this chat completions choice. Required. - "delta": { - "content": "str", # The content of the message. - Required. - "role": "str" # The chat role associated with the - message. Required. Known values are: "system", "assistant", and - "user". - }, "message": { "content": "str", # The content of the message. Required. "role": "str" # The chat role associated with the - message. Required. Known values are: "system", "assistant", and - "user". + message. Required. Known values are: "system", "user", "assistant", + and "tool". } } ], @@ -424,7 +396,7 @@ async def get_chat_completions( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.ChatCompletions] = kwargs.pop("cls", None) content_type = content_type or "application/json" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py index 70938ba01774..67f126b67ef6 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py @@ -15,10 +15,12 @@ class ChatRole(str, Enum, metaclass=CaseInsensitiveEnumMeta): SYSTEM = "system" """The role that instructs or sets the behavior of the assistant.""" - ASSISTANT = "assistant" - """The role that provides responses to system-instructed, user-prompted input.""" USER = "user" """The role that provides input for chat completions.""" + ASSISTANT = "assistant" + """The role that provides responses to system-instructed, user-prompted input.""" + TOOL = "tool" + """The role that represents extension tool activity within a chat completions operation.""" class CompletionsFinishReason(str, Enum, metaclass=CaseInsensitiveEnumMeta): diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py index c7a48e7da526..47c81a6312f4 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py @@ -34,8 +34,6 @@ class ChatChoice(_model_base.Model): :ivar finish_reason: The reason that this chat completions choice completed its generated. Required. Known values are: "stop", "length", and "content_filter". :vartype finish_reason: str or ~azure.ai.inference.models.CompletionsFinishReason - :ivar delta: The delta message content for a streaming response. - :vartype delta: ~azure.ai.inference.models.ChatResponseMessage """ message: Optional["_models.ChatResponseMessage"] = rest_field() @@ -45,8 +43,6 @@ class ChatChoice(_model_base.Model): finish_reason: Union[str, "_models.CompletionsFinishReason"] = rest_field() """The reason that this chat completions choice completed its generated. Required. Known values are: \"stop\", \"length\", and \"content_filter\".""" - delta: Optional["_models.ChatResponseMessage"] = rest_field() - """The delta message content for a streaming response.""" @overload def __init__( @@ -55,7 +51,6 @@ def __init__( index: int, finish_reason: Union[str, "_models.CompletionsFinishReason"], message: Optional["_models.ChatResponseMessage"] = None, - delta: Optional["_models.ChatResponseMessage"] = None, ): ... @@ -346,14 +341,14 @@ class ChatRequestMessage(_model_base.Model): All required parameters must be populated in order to send to server. :ivar role: The chat role associated with this message. Required. Known values are: "system", - "assistant", and "user". + "user", "assistant", and "tool". :vartype role: str or ~azure.ai.inference.models.ChatRole """ __mapping__: Dict[str, _model_base.Model] = {} role: str = rest_discriminator(name="role") - """The chat role associated with this message. Required. Known values are: \"system\", - \"assistant\", and \"user\".""" + """The chat role associated with this message. Required. Known values are: \"system\", \"user\", + \"assistant\", and \"tool\".""" @overload def __init__( @@ -511,15 +506,15 @@ class ChatResponseMessage(_model_base.Model): All required parameters must be populated in order to send to server. :ivar role: The chat role associated with the message. Required. Known values are: "system", - "assistant", and "user". + "user", "assistant", and "tool". :vartype role: str or ~azure.ai.inference.models.ChatRole :ivar content: The content of the message. Required. :vartype content: str """ role: Union[str, "_models.ChatRole"] = rest_field() - """The chat role associated with the message. Required. Known values are: \"system\", - \"assistant\", and \"user\".""" + """The chat role associated with the message. Required. Known values are: \"system\", \"user\", + \"assistant\", and \"tool\".""" content: str = rest_field() """The content of the message. Required.""" diff --git a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py index 322320a44d45..a93b76c1e58a 100644 --- a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py +++ b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py @@ -32,7 +32,7 @@ EnvironmentVariableLoader, "model", model_endpoint="https://your-azure-resource-name.your-azure-region.inference.ai.azure.com", - model_key="00000000000000000000000000000000" + model_key="00000000000000000000000000000000", ) @@ -46,16 +46,19 @@ class ModelInferenceTestBase(AzureRecordedTestCase): # Set to True to print out all analysis results PRINT_CHAT_COMPLETION_RESULTS = True + def _create_client_for_standard_test(self, sync: bool, get_connection_url: bool = False, **kwargs): endpoint = kwargs.pop("model_endpoint") key = kwargs.pop("model_key") self._create_client(endpoint, key, sync, get_connection_url) + def _create_client_for_authentication_failure(self, sync: bool, **kwargs): endpoint = kwargs.pop("model_endpoint") key = "00000000000000000000000000000000" self._create_client(endpoint, key, sync, False) + def _create_client(self, endpoint: str, key: str, sync: bool, get_connection_url: bool): credential = AzureKeyCredential(key) if sync: @@ -75,10 +78,12 @@ def _create_client(self, endpoint: str, key: str, sync: bool, get_connection_url ) assert self.async_client is not None + def _raw_request_check(self, request: PipelineRequest): self.connection_url = request.http_request.url print(f"Connection URL: {request.http_request.url}") + def _do_chat_completions( self, options: sdk.models.ChatCompletionsOptions, @@ -90,15 +95,16 @@ def _do_chat_completions( # Optional: console printout of all results if ModelInferenceTestBase.PRINT_CHAT_COMPLETION_RESULTS: - ModelInferenceTestBase._print_chat_completion_results(result) + ModelInferenceTestBase._print_chat_completions_results(result) # Validate all results - ModelInferenceTestBase._validate_chat_completion_results(result) + ModelInferenceTestBase._validate_chat_completions_results(result) # Validate that additional query parameters exists in the connection URL, if specify if query_params is not None: ModelInferenceTestBase._validate_query_parameters(query_params, self.connection_url) + async def _do_async_chat_completion( self, options: sdk.models.ChatCompletionsOptions, @@ -110,15 +116,16 @@ async def _do_async_chat_completion( # Optional: console printout of all results if ModelInferenceTestBase.PRINT_CHAT_COMPLETION_RESULTS: - ModelInferenceTestBase._print_chat_completion_results(result) + ModelInferenceTestBase._print_chat_completions_results(result) # Validate all results - ModelInferenceTestBase._validate_chat_completion_results(result) + ModelInferenceTestBase._validate_chat_completions_results(result) # Validate that additional query parameters exists in the connection URL, if specify if query_params is not None: ModelInferenceTestBase._validate_query_parameters(query_params, self.connection_url) + def _do_chat_completion_with_error( self, options: sdk.models.ChatCompletionsOptions, @@ -138,6 +145,7 @@ def _do_chat_completion_with_error( return assert False # We should not get here + async def _do_async_chat_completion_with_error( self, options: sdk.models.ChatCompletionsOptions, @@ -157,6 +165,7 @@ async def _do_async_chat_completion_with_error( return assert False # We should not get here + @staticmethod def _validate_query_parameters(query_params: dict, connection_url: str): assert len(query_params) > 0 @@ -166,15 +175,32 @@ def _validate_query_parameters(query_params: dict, connection_url: str): query_string = "?" + query_string[1:] assert query_string in connection_url + @staticmethod - def _validate_result(result: sdk.models.ChatCompletions): - assert True + def _validate_chat_completions_results(result: sdk.models.ChatCompletions): + + assert "5,280" in result.choices[0].message.content or "5280" in result.choices[0].message.content + assert result.choices[0].message.role == sdk.models.ChatRole.ASSISTANT + assert result.choices[0].finish_reason == sdk.models.CompletionsFinishReason.STOPPED + assert result.choices[0].index == 0 + + assert result.id is not None + assert result.id != "" + assert result.created is not None + assert result.created != "" + assert result.model is not None + assert result.model != "" + assert result.object == "chat.completion" + assert result.usage.prompt_tokens > 0 + assert result.usage.completion_tokens > 0 + assert result.usage.total_tokens == result.usage.prompt_tokens + result.usage.completion_tokens + @staticmethod - def _print_analysis_results(result: sdk.models.ChatCompletions): + def _print_chat_completions_results(result: sdk.models.ChatCompletions): for choice in result.choices: - print(" choices[0].message.content: {}".format(choice.message.content)) + print(f" choices[0].message.content: {choice.message.content}") print(" choices[0].message.role: {}".format(choice.message.role)) print(" choices[0].finish_reason: {}".format(choice.finish_reason)) print(" choices[0].index: {}".format(choice.index)) @@ -183,7 +209,6 @@ def _print_analysis_results(result: sdk.models.ChatCompletions): print(" created: {}".format(result.created)) print(" model: {}".format(result.model)) print(" object: {}".format(result.object)) - print(" usage.completion_tokens: {}".format(result.usage.completion_tokens)) print(" usage.prompt_tokens: {}".format(result.usage.prompt_tokens)) print(" usage.completion_tokens: {}".format(result.usage.completion_tokens)) - + print(" usage.total_tokens: {}".format(result.usage.total_tokens)) diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml index 45de5aad2a68..04190fe5d499 100644 --- a/sdk/ai/azure-ai-inference/tsp-location.yaml +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/ModelInference -commit: 3b8c933cdb9ff186419286e6a4b6a0dbacabad41 +commit: 1b950ebcf6b58804a387e17641b1c933aeb13f4b repo: Azure/azure-rest-api-specs additionalDirectories: From 62c476adcaa4886b0814b8a96f560ac6edc55468 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 28 Mar 2024 19:09:43 -0700 Subject: [PATCH 006/112] Re-emit after adding tools. Add async test --- .../ai/inference/_operations/_operations.py | 34 +- .../inference/aio/_operations/_operations.py | 34 +- .../azure/ai/inference/models/__init__.py | 18 + .../azure/ai/inference/models/_enums.py | 14 + .../azure/ai/inference/models/_models.py | 438 +++++++++++++++--- .../tests/model_inference_test_base.py | 48 +- .../test_model_inference_async_client.py | 21 +- .../tests/test_model_inference_client.py | 10 +- sdk/ai/azure-ai-inference/tsp-location.yaml | 2 +- 9 files changed, 529 insertions(+), 90 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index dbf1f647f8da..926eb75f37ec 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -136,6 +136,13 @@ def get_chat_completions( deterministic. It is not recommended to modify temperature and top_p for the same completions request as the interaction of these two settings is difficult to predict. + "tool_choice": "str", # Optional. If specified, the model will configure + which of the provided tools it can use for the chat completions response. Is + either a Union[str, "_models.ChatCompletionsToolSelectionPreset"] type or a + ChatCompletionsNamedToolSelection type. + "tools": [ + chat_completions_tool_definition + ], "top_p": 0.0 # Optional. An alternative to sampling with temperature called nucleus sampling. This value causes the model to consider the results of tokens with the provided probability mass. As an example, a value of 0.15 will cause @@ -156,9 +163,12 @@ def get_chat_completions( "message": { "content": "str", # The content of the message. Required. - "role": "str" # The chat role associated with the + "role": "str", # The chat role associated with the message. Required. Known values are: "system", "user", "assistant", and "tool". + "tool_calls": [ + chat_completions_tool_call + ] } } ], @@ -215,9 +225,12 @@ def get_chat_completions( "message": { "content": "str", # The content of the message. Required. - "role": "str" # The chat role associated with the + "role": "str", # The chat role associated with the message. Required. Known values are: "system", "user", "assistant", and "tool". + "tool_calls": [ + chat_completions_tool_call + ] } } ], @@ -274,9 +287,12 @@ def get_chat_completions( "message": { "content": "str", # The content of the message. Required. - "role": "str" # The chat role associated with the + "role": "str", # The chat role associated with the message. Required. Known values are: "system", "user", "assistant", and "tool". + "tool_calls": [ + chat_completions_tool_call + ] } } ], @@ -366,6 +382,13 @@ def get_chat_completions( deterministic. It is not recommended to modify temperature and top_p for the same completions request as the interaction of these two settings is difficult to predict. + "tool_choice": "str", # Optional. If specified, the model will configure + which of the provided tools it can use for the chat completions response. Is + either a Union[str, "_models.ChatCompletionsToolSelectionPreset"] type or a + ChatCompletionsNamedToolSelection type. + "tools": [ + chat_completions_tool_definition + ], "top_p": 0.0 # Optional. An alternative to sampling with temperature called nucleus sampling. This value causes the model to consider the results of tokens with the provided probability mass. As an example, a value of 0.15 will cause @@ -386,9 +409,12 @@ def get_chat_completions( "message": { "content": "str", # The content of the message. Required. - "role": "str" # The chat role associated with the + "role": "str", # The chat role associated with the message. Required. Known values are: "system", "user", "assistant", and "tool". + "tool_calls": [ + chat_completions_tool_call + ] } } ], diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index d6c2f9b95671..292a5fce4be3 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -111,6 +111,13 @@ async def get_chat_completions( deterministic. It is not recommended to modify temperature and top_p for the same completions request as the interaction of these two settings is difficult to predict. + "tool_choice": "str", # Optional. If specified, the model will configure + which of the provided tools it can use for the chat completions response. Is + either a Union[str, "_models.ChatCompletionsToolSelectionPreset"] type or a + ChatCompletionsNamedToolSelection type. + "tools": [ + chat_completions_tool_definition + ], "top_p": 0.0 # Optional. An alternative to sampling with temperature called nucleus sampling. This value causes the model to consider the results of tokens with the provided probability mass. As an example, a value of 0.15 will cause @@ -131,9 +138,12 @@ async def get_chat_completions( "message": { "content": "str", # The content of the message. Required. - "role": "str" # The chat role associated with the + "role": "str", # The chat role associated with the message. Required. Known values are: "system", "user", "assistant", and "tool". + "tool_calls": [ + chat_completions_tool_call + ] } } ], @@ -190,9 +200,12 @@ async def get_chat_completions( "message": { "content": "str", # The content of the message. Required. - "role": "str" # The chat role associated with the + "role": "str", # The chat role associated with the message. Required. Known values are: "system", "user", "assistant", and "tool". + "tool_calls": [ + chat_completions_tool_call + ] } } ], @@ -249,9 +262,12 @@ async def get_chat_completions( "message": { "content": "str", # The content of the message. Required. - "role": "str" # The chat role associated with the + "role": "str", # The chat role associated with the message. Required. Known values are: "system", "user", "assistant", and "tool". + "tool_calls": [ + chat_completions_tool_call + ] } } ], @@ -341,6 +357,13 @@ async def get_chat_completions( deterministic. It is not recommended to modify temperature and top_p for the same completions request as the interaction of these two settings is difficult to predict. + "tool_choice": "str", # Optional. If specified, the model will configure + which of the provided tools it can use for the chat completions response. Is + either a Union[str, "_models.ChatCompletionsToolSelectionPreset"] type or a + ChatCompletionsNamedToolSelection type. + "tools": [ + chat_completions_tool_definition + ], "top_p": 0.0 # Optional. An alternative to sampling with temperature called nucleus sampling. This value causes the model to consider the results of tokens with the provided probability mass. As an example, a value of 0.15 will cause @@ -361,9 +384,12 @@ async def get_chat_completions( "message": { "content": "str", # The content of the message. Required. - "role": "str" # The chat role associated with the + "role": "str", # The chat role associated with the message. Required. Known values are: "system", "user", "assistant", and "tool". + "tool_calls": [ + chat_completions_tool_call + ] } } ], diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py index af27c51657ba..100137e8ca4c 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py @@ -8,17 +8,26 @@ from ._models import ChatChoice from ._models import ChatCompletions +from ._models import ChatCompletionsFunctionToolCall +from ._models import ChatCompletionsFunctionToolDefinition from ._models import ChatCompletionsJsonResponseFormat +from ._models import ChatCompletionsNamedToolSelection from ._models import ChatCompletionsOptions from ._models import ChatCompletionsResponseFormat from ._models import ChatCompletionsTextResponseFormat +from ._models import ChatCompletionsToolCall +from ._models import ChatCompletionsToolDefinition from ._models import ChatRequestAssistantMessage from ._models import ChatRequestMessage from ._models import ChatRequestSystemMessage +from ._models import ChatRequestToolMessage from ._models import ChatRequestUserMessage from ._models import ChatResponseMessage from ._models import CompletionsUsage +from ._models import FunctionCall +from ._models import FunctionDefinition +from ._enums import ChatCompletionsToolSelectionPreset from ._enums import ChatRole from ._enums import CompletionsFinishReason from ._patch import __all__ as _patch_all @@ -28,16 +37,25 @@ __all__ = [ "ChatChoice", "ChatCompletions", + "ChatCompletionsFunctionToolCall", + "ChatCompletionsFunctionToolDefinition", "ChatCompletionsJsonResponseFormat", + "ChatCompletionsNamedToolSelection", "ChatCompletionsOptions", "ChatCompletionsResponseFormat", "ChatCompletionsTextResponseFormat", + "ChatCompletionsToolCall", + "ChatCompletionsToolDefinition", "ChatRequestAssistantMessage", "ChatRequestMessage", "ChatRequestSystemMessage", + "ChatRequestToolMessage", "ChatRequestUserMessage", "ChatResponseMessage", "CompletionsUsage", + "FunctionCall", + "FunctionDefinition", + "ChatCompletionsToolSelectionPreset", "ChatRole", "CompletionsFinishReason", ] diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py index 67f126b67ef6..d226ae417513 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py @@ -10,6 +10,20 @@ from azure.core import CaseInsensitiveEnumMeta +class ChatCompletionsToolSelectionPreset(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Represents a generic policy for how a chat completions tool may be selected.""" + + AUTO = "auto" + """Specifies that the model may either use any of the tools provided in this chat completions + request or + instead return a standard chat completions response as if no tools were provided.""" + NONE = "none" + """Specifies that the model should not respond with a tool call and should instead provide a + standard chat + completions response. Response content may still be influenced by the provided tool + definitions.""" + + class ChatRole(str, Enum, metaclass=CaseInsensitiveEnumMeta): """A description of the intended purpose of a message within a chat completions interaction.""" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py index 47c81a6312f4..18f07916556c 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py @@ -27,19 +27,19 @@ class ChatChoice(_model_base.Model): All required parameters must be populated in order to send to server. - :ivar message: The chat message for a given chat completions prompt. - :vartype message: ~azure.ai.inference.models.ChatResponseMessage :ivar index: The ordered index associated with this chat completions choice. Required. :vartype index: int + :ivar message: The chat message for a given chat completions prompt. Required. + :vartype message: ~azure.ai.inference.models.ChatResponseMessage :ivar finish_reason: The reason that this chat completions choice completed its generated. Required. Known values are: "stop", "length", and "content_filter". :vartype finish_reason: str or ~azure.ai.inference.models.CompletionsFinishReason """ - message: Optional["_models.ChatResponseMessage"] = rest_field() - """The chat message for a given chat completions prompt.""" index: int = rest_field() """The ordered index associated with this chat completions choice. Required.""" + message: "_models.ChatResponseMessage" = rest_field() + """The chat message for a given chat completions prompt. Required.""" finish_reason: Union[str, "_models.CompletionsFinishReason"] = rest_field() """The reason that this chat completions choice completed its generated. Required. Known values are: \"stop\", \"length\", and \"content_filter\".""" @@ -49,8 +49,8 @@ def __init__( self, *, index: int, + message: "_models.ChatResponseMessage", finish_reason: Union[str, "_models.CompletionsFinishReason"], - message: Optional["_models.ChatResponseMessage"] = None, ): ... @@ -75,10 +75,14 @@ class ChatCompletions(_model_base.Model): :ivar id: A unique identifier associated with this chat completions response. Required. :vartype id: str + :ivar object: The response object type, which is always ``chat.completion``. Required. + :vartype object: str :ivar created: The first timestamp associated with generation activity for this completions response, represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. :vartype created: ~datetime.datetime + :ivar model: The model used for the chat completion. Required. + :vartype model: str :ivar choices: The collection of completions choices associated with this completions response. Generally, ``n`` choices are generated per provided prompt with a default value of 1. Token limits and other settings may limit the number of choices generated. Required. @@ -86,17 +90,17 @@ class ChatCompletions(_model_base.Model): :ivar usage: Usage information for tokens processed and generated as part of this completions operation. Required. :vartype usage: ~azure.ai.inference.models.CompletionsUsage - :ivar object: The response object type, which is always ``chat.completion``. Required. - :vartype object: str - :ivar model: The model used for the chat completion. Required. - :vartype model: str """ id: str = rest_field() """A unique identifier associated with this chat completions response. Required.""" + object: str = rest_field() + """The response object type, which is always ``chat.completion``. Required.""" created: datetime.datetime = rest_field(format="unix-timestamp") """The first timestamp associated with generation activity for this completions response, represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required.""" + model: str = rest_field() + """The model used for the chat completion. Required.""" choices: List["_models.ChatChoice"] = rest_field() """The collection of completions choices associated with this completions response. Generally, ``n`` choices are generated per provided prompt with a default value of 1. @@ -104,21 +108,17 @@ class ChatCompletions(_model_base.Model): usage: "_models.CompletionsUsage" = rest_field() """Usage information for tokens processed and generated as part of this completions operation. Required.""" - object: str = rest_field() - """The response object type, which is always ``chat.completion``. Required.""" - model: str = rest_field() - """The model used for the chat completion. Required.""" @overload def __init__( self, *, id: str, # pylint: disable=redefined-builtin + object: str, created: datetime.datetime, + model: str, choices: List["_models.ChatChoice"], usage: "_models.CompletionsUsage", - object: str, - model: str, ): ... @@ -133,6 +133,161 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, **kwargs) +class ChatCompletionsToolCall(_model_base.Model): + """An abstract representation of a tool call that must be resolved in a subsequent request to + perform the requested + chat completion. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + ChatCompletionsFunctionToolCall + + All required parameters must be populated in order to send to server. + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + :ivar id: The ID of the tool call. Required. + :vartype id: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Default value is None.""" + id: str = rest_field() + """The ID of the tool call. Required.""" + + @overload + def __init__( + self, + *, + type: str, + id: str, # pylint: disable=redefined-builtin + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ChatCompletionsFunctionToolCall(ChatCompletionsToolCall, discriminator="function"): + """A tool call to a function tool, issued by the model in evaluation of a configured function + tool, that represents + a function invocation needed for a subsequent chat completions request to resolve. + + All required parameters must be populated in order to send to server. + + :ivar id: The ID of the tool call. Required. + :vartype id: str + :ivar type: The type of tool call, in this case always 'function'. Required. Default value is + "function". + :vartype type: str + :ivar function: The details of the function invocation requested by the tool call. Required. + :vartype function: ~azure.ai.inference.models.FunctionCall + """ + + type: Literal["function"] = rest_discriminator(name="type") # type: ignore + """The type of tool call, in this case always 'function'. Required. Default value is \"function\".""" + function: "_models.FunctionCall" = rest_field() + """The details of the function invocation requested by the tool call. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + function: "_models.FunctionCall", + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="function", **kwargs) + + +class ChatCompletionsToolDefinition(_model_base.Model): + """An abstract representation of a tool that can be used by the model to improve a chat + completions response. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + ChatCompletionsFunctionToolDefinition + + All required parameters must be populated in order to send to server. + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ChatCompletionsFunctionToolDefinition(ChatCompletionsToolDefinition, discriminator="function"): + """The definition information for a chat completions function tool that can call a function in + response to a tool call. + + All required parameters must be populated in order to send to server. + + :ivar type: The object name, which is always 'function'. Required. Default value is "function". + :vartype type: str + :ivar function: The function definition details for the function tool. Required. + :vartype function: ~azure.ai.inference.models.FunctionDefinition + """ + + type: Literal["function"] = rest_discriminator(name="type") # type: ignore + """The object name, which is always 'function'. Required. Default value is \"function\".""" + function: "_models.FunctionDefinition" = rest_field() + """The function definition details for the function tool. Required.""" + + @overload + def __init__( + self, + *, + function: "_models.FunctionDefinition", + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="function", **kwargs) + + class ChatCompletionsResponseFormat(_model_base.Model): """An abstract representation of a response format configuration usable by Chat Completions. Can be used to enable JSON @@ -185,7 +340,21 @@ class ChatCompletionsJsonResponseFormat(ChatCompletionsResponseFormat, discrimin value is \"json_object\".""" -class ChatCompletionsOptions(_model_base.Model): +class ChatCompletionsNamedToolSelection(_model_base.Model): + """An abstract representation of an explicit, named tool selection to use for a chat completions + request. + + All required parameters must be populated in order to send to server. + + :ivar type: The object type. Required. + :vartype type: str + """ + + type: str = rest_discriminator(name="type") + """The object type. Required.""" + + +class ChatCompletionsOptions(_model_base.Model): # pylint: disable=too-many-instance-attributes """The configuration information for a chat completions request. Completions support a wide variety of tasks and generate text that continues from or "completes" @@ -199,8 +368,19 @@ class ChatCompletionsOptions(_model_base.Model): the behavior of the assistant, followed by alternating messages between the User and Assistant roles. Required. :vartype messages: list[~azure.ai.inference.models.ChatRequestMessage] - :ivar max_tokens: The maximum number of tokens to generate. - :vartype max_tokens: int + :ivar frequency_penalty: A value that influences the probability of generated tokens appearing + based on their cumulative + frequency in generated text. + Positive values will make tokens less likely to appear as their frequency increases and + decrease the likelihood of the model repeating the same statements verbatim. + :vartype frequency_penalty: float + :ivar presence_penalty: A value that influences the probability of generated tokens appearing + based on their existing + presence in generated text. + Positive values will make tokens less likely to appear when they already exist and increase + the + model's likelihood to output new topics. + :vartype presence_penalty: float :ivar temperature: The sampling temperature to use that controls the apparent creativity of generated completions. Higher values will make output more random while lower values will make results more focused @@ -216,32 +396,29 @@ class ChatCompletionsOptions(_model_base.Model): It is not recommended to modify temperature and top_p for the same completions request as the interaction of these two settings is difficult to predict. :vartype top_p: float + :ivar max_tokens: The maximum number of tokens to generate. + :vartype max_tokens: int + :ivar response_format: An object specifying the format that the model must output. Used to + enable JSON mode. + :vartype response_format: ~azure.ai.inference.models.ChatCompletionsResponseFormat :ivar stop: A collection of textual sequences that will end completions generation. :vartype stop: list[str] - :ivar presence_penalty: A value that influences the probability of generated tokens appearing - based on their existing - presence in generated text. - Positive values will make tokens less likely to appear when they already exist and increase - the - model's likelihood to output new topics. - :vartype presence_penalty: float - :ivar frequency_penalty: A value that influences the probability of generated tokens appearing - based on their cumulative - frequency in generated text. - Positive values will make tokens less likely to appear as their frequency increases and - decrease the likelihood of the model repeating the same statements verbatim. - :vartype frequency_penalty: float :ivar stream: A value indicating whether chat completions should be streamed for this request. :vartype stream: bool + :ivar tools: The available tool definitions that the chat completions request can use, + including caller-defined functions. + :vartype tools: list[~azure.ai.inference.models.ChatCompletionsToolDefinition] + :ivar tool_choice: If specified, the model will configure which of the provided tools it can + use for the chat completions response. Is either a Union[str, + "_models.ChatCompletionsToolSelectionPreset"] type or a ChatCompletionsNamedToolSelection type. + :vartype tool_choice: str or ~azure.ai.inference.models.ChatCompletionsToolSelectionPreset or + ~azure.ai.inference.models.ChatCompletionsNamedToolSelection :ivar seed: If specified, the system will make a best effort to sample deterministically such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.". :vartype seed: int - :ivar response_format: An object specifying the format that the model must output. Used to - enable JSON mode. - :vartype response_format: ~azure.ai.inference.models.ChatCompletionsResponseFormat """ messages: List["_models.ChatRequestMessage"] = rest_field() @@ -249,8 +426,16 @@ class ChatCompletionsOptions(_model_base.Model): Typical usage begins with a chat message for the System role that provides instructions for the behavior of the assistant, followed by alternating messages between the User and Assistant roles. Required.""" - max_tokens: Optional[int] = rest_field() - """The maximum number of tokens to generate.""" + frequency_penalty: Optional[float] = rest_field() + """A value that influences the probability of generated tokens appearing based on their cumulative + frequency in generated text. + Positive values will make tokens less likely to appear as their frequency increases and + decrease the likelihood of the model repeating the same statements verbatim.""" + presence_penalty: Optional[float] = rest_field() + """A value that influences the probability of generated tokens appearing based on their existing + presence in generated text. + Positive values will make tokens less likely to appear when they already exist and increase the + model's likelihood to output new topics.""" temperature: Optional[float] = rest_field() """The sampling temperature to use that controls the apparent creativity of generated completions. Higher values will make output more random while lower values will make results more focused @@ -264,43 +449,48 @@ class ChatCompletionsOptions(_model_base.Model): considered. It is not recommended to modify temperature and top_p for the same completions request as the interaction of these two settings is difficult to predict.""" + max_tokens: Optional[int] = rest_field() + """The maximum number of tokens to generate.""" + response_format: Optional["_models.ChatCompletionsResponseFormat"] = rest_field() + """An object specifying the format that the model must output. Used to enable JSON mode.""" stop: Optional[List[str]] = rest_field() """A collection of textual sequences that will end completions generation.""" - presence_penalty: Optional[float] = rest_field() - """A value that influences the probability of generated tokens appearing based on their existing - presence in generated text. - Positive values will make tokens less likely to appear when they already exist and increase the - model's likelihood to output new topics.""" - frequency_penalty: Optional[float] = rest_field() - """A value that influences the probability of generated tokens appearing based on their cumulative - frequency in generated text. - Positive values will make tokens less likely to appear as their frequency increases and - decrease the likelihood of the model repeating the same statements verbatim.""" stream: Optional[bool] = rest_field() """A value indicating whether chat completions should be streamed for this request.""" + tools: Optional[List["_models.ChatCompletionsToolDefinition"]] = rest_field() + """The available tool definitions that the chat completions request can use, including + caller-defined functions.""" + tool_choice: Optional[ + Union[str, "_models.ChatCompletionsToolSelectionPreset", "_models.ChatCompletionsNamedToolSelection"] + ] = rest_field() + """If specified, the model will configure which of the provided tools it can use for the chat + completions response. Is either a Union[str, \"_models.ChatCompletionsToolSelectionPreset\"] + type or a ChatCompletionsNamedToolSelection type.""" seed: Optional[int] = rest_field() """If specified, the system will make a best effort to sample deterministically such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.\".""" - response_format: Optional["_models.ChatCompletionsResponseFormat"] = rest_field() - """An object specifying the format that the model must output. Used to enable JSON mode.""" @overload def __init__( self, *, messages: List["_models.ChatRequestMessage"], - max_tokens: Optional[int] = None, + frequency_penalty: Optional[float] = None, + presence_penalty: Optional[float] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, + max_tokens: Optional[int] = None, + response_format: Optional["_models.ChatCompletionsResponseFormat"] = None, stop: Optional[List[str]] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, stream: Optional[bool] = None, + tools: Optional[List["_models.ChatCompletionsToolDefinition"]] = None, + tool_choice: Optional[ + Union[str, "_models.ChatCompletionsToolSelectionPreset", "_models.ChatCompletionsNamedToolSelection"] + ] = None, seed: Optional[int] = None, - response_format: Optional["_models.ChatCompletionsResponseFormat"] = None, ): ... @@ -336,7 +526,8 @@ class ChatRequestMessage(_model_base.Model): """An abstract representation of a chat message as provided in a request. You probably want to use the sub-classes and not this class directly. Known sub-classes are: - ChatRequestAssistantMessage, ChatRequestSystemMessage, ChatRequestUserMessage + ChatRequestAssistantMessage, ChatRequestSystemMessage, ChatRequestToolMessage, + ChatRequestUserMessage All required parameters must be populated in order to send to server. @@ -456,6 +647,49 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, role=ChatRole.SYSTEM, **kwargs) +class ChatRequestToolMessage(ChatRequestMessage, discriminator="tool"): + """A request chat message representing requested output from a configured tool. + + All required parameters must be populated in order to send to server. + + :ivar role: The chat role associated with this message, which is always 'tool' for tool + messages. Required. The role that represents extension tool activity within a chat completions + operation. + :vartype role: str or ~azure.ai.inference.models.TOOL + :ivar content: The content of the message. Required. + :vartype content: str + :ivar tool_call_id: The ID of the tool call resolved by the provided content. Required. + :vartype tool_call_id: str + """ + + role: Literal[ChatRole.TOOL] = rest_discriminator(name="role") # type: ignore + """The chat role associated with this message, which is always 'tool' for tool messages. Required. + The role that represents extension tool activity within a chat completions operation.""" + content: str = rest_field() + """The content of the message. Required.""" + tool_call_id: str = rest_field() + """The ID of the tool call resolved by the provided content. Required.""" + + @overload + def __init__( + self, + *, + content: str, + tool_call_id: str, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, role=ChatRole.TOOL, **kwargs) + + class ChatRequestUserMessage(ChatRequestMessage, discriminator="user"): """A request chat message representing user input to the assistant. @@ -510,6 +744,10 @@ class ChatResponseMessage(_model_base.Model): :vartype role: str or ~azure.ai.inference.models.ChatRole :ivar content: The content of the message. Required. :vartype content: str + :ivar tool_calls: The tool calls that must be resolved and have their outputs appended to + subsequent input messages for the chat + completions request to resolve as configured. + :vartype tool_calls: list[~azure.ai.inference.models.ChatCompletionsToolCall] """ role: Union[str, "_models.ChatRole"] = rest_field() @@ -517,6 +755,10 @@ class ChatResponseMessage(_model_base.Model): \"assistant\", and \"tool\".""" content: str = rest_field() """The content of the message. Required.""" + tool_calls: Optional[List["_models.ChatCompletionsToolCall"]] = rest_field() + """The tool calls that must be resolved and have their outputs appended to subsequent input + messages for the chat + completions request to resolve as configured.""" @overload def __init__( @@ -524,6 +766,7 @@ def __init__( *, role: Union[str, "_models.ChatRole"], content: str, + tool_calls: Optional[List["_models.ChatCompletionsToolCall"]] = None, ): ... @@ -582,3 +825,92 @@ def __init__(self, mapping: Mapping[str, Any]): def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation super().__init__(*args, **kwargs) + + +class FunctionCall(_model_base.Model): + """The name and arguments of a function that should be called, as generated by the model. + + All required parameters must be populated in order to send to server. + + :ivar name: The name of the function to call. Required. + :vartype name: str + :ivar arguments: The arguments to call the function with, as generated by the model in JSON + format. + Note that the model does not always generate valid JSON, and may hallucinate parameters + not defined by your function schema. Validate the arguments in your code before calling + your function. Required. + :vartype arguments: str + """ + + name: str = rest_field() + """The name of the function to call. Required.""" + arguments: str = rest_field() + """The arguments to call the function with, as generated by the model in JSON format. + Note that the model does not always generate valid JSON, and may hallucinate parameters + not defined by your function schema. Validate the arguments in your code before calling + your function. Required.""" + + @overload + def __init__( + self, + *, + name: str, + arguments: str, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FunctionDefinition(_model_base.Model): + """The definition of a caller-specified function that chat completions may invoke in response to + matching user input. + + All required parameters must be populated in order to send to server. + + :ivar name: The name of the function to be called. Required. + :vartype name: str + :ivar description: A description of what the function does. The model will use this description + when selecting the function and + interpreting its parameters. + :vartype description: str + :ivar parameters: The parameters the function accepts, described as a JSON Schema object. + :vartype parameters: any + """ + + name: str = rest_field() + """The name of the function to be called. Required.""" + description: Optional[str] = rest_field() + """A description of what the function does. The model will use this description when selecting the + function and + interpreting its parameters.""" + parameters: Optional[Any] = rest_field() + """The parameters the function accepts, described as a JSON Schema object.""" + + @overload + def __init__( + self, + *, + name: str, + description: Optional[str] = None, + parameters: Optional[Any] = None, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) diff --git a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py index a93b76c1e58a..cf849463e716 100644 --- a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py +++ b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py @@ -6,7 +6,7 @@ import logging import sys import azure.ai.inference as sdk -import azure.ai.inference as async_sdk +import azure.ai.inference.aio as async_sdk from os import path from typing import List, Optional, Union @@ -14,6 +14,8 @@ from azure.core.credentials import AzureKeyCredential from azure.core.exceptions import AzureError from azure.core.pipeline import PipelineRequest +import asyncio +import time # Set to True to enable SDK logging LOGGING_ENABLED = True @@ -22,7 +24,7 @@ # Create a logger for the 'azure' SDK # See https://docs.python.org/3/library/logging.html logger = logging.getLogger("azure") - logger.setLevel(logging.INFO) # INFO or DEBUG + logger.setLevel(logging.DEBUG) # INFO or DEBUG # Configure a console output handler = logging.StreamHandler(stream=sys.stdout) @@ -105,14 +107,24 @@ def _do_chat_completions( ModelInferenceTestBase._validate_query_parameters(query_params, self.connection_url) - async def _do_async_chat_completion( + async def _do_async_chat_completions( self, options: sdk.models.ChatCompletionsOptions, query_params: Optional[dict] = None, **kwargs, ): + start_time = time.time() - result = await self.async_client.chat_completions(options=options, params=query_params) + # Start the operation and get a Future object + future = asyncio.ensure_future(self.async_client.get_chat_completions(chat_completions_options=options)) + + # Loop until the operation is done + while not future.done(): + await asyncio.sleep(0.1) # sleep for 100 ms + print(f"Elapsed time: {int(1000*(time.time()- start_time))}ms") + + # Get the result (this will not block since the operation is done) + result = future.result() # Optional: console printout of all results if ModelInferenceTestBase.PRINT_CHAT_COMPLETION_RESULTS: @@ -155,7 +167,7 @@ async def _do_async_chat_completion_with_error( ): try: - result = await self.async_client.get_chat_completions(chat_completions_options=options) + result = await self.async_client.get_chat_completions(chat_completions_options=options) except AzureError as e: print(e) @@ -199,16 +211,18 @@ def _validate_chat_completions_results(result: sdk.models.ChatCompletions): @staticmethod def _print_chat_completions_results(result: sdk.models.ChatCompletions): + print(" Chat Completions:") + for choice in result.choices: - print(f" choices[0].message.content: {choice.message.content}") - print(" choices[0].message.role: {}".format(choice.message.role)) - print(" choices[0].finish_reason: {}".format(choice.finish_reason)) - print(" choices[0].index: {}".format(choice.index)) - - print(" id: {}".format(result.id)) - print(" created: {}".format(result.created)) - print(" model: {}".format(result.model)) - print(" object: {}".format(result.object)) - print(" usage.prompt_tokens: {}".format(result.usage.prompt_tokens)) - print(" usage.completion_tokens: {}".format(result.usage.completion_tokens)) - print(" usage.total_tokens: {}".format(result.usage.total_tokens)) + print(f"\tchoices[0].message.content: {choice.message.content}") + print("\tchoices[0].message.role: {}".format(choice.message.role)) + print("\tchoices[0].finish_reason: {}".format(choice.finish_reason)) + print("\tchoices[0].index: {}".format(choice.index)) + + print("\tid: {}".format(result.id)) + print("\tcreated: {}".format(result.created)) + print("\tmodel: {}".format(result.model)) + print("\tobject: {}".format(result.object)) + print("\tusage.prompt_tokens: {}".format(result.usage.prompt_tokens)) + print("\tusage.completion_tokens: {}".format(result.usage.completion_tokens)) + print("\tusage.total_tokens: {}".format(result.usage.total_tokens)) diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py index 136d3221e2bf..e5a33c3c8210 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -3,7 +3,7 @@ # Licensed under the MIT License. # ------------------------------------ import inspect -import azure.ai.vision.imageanalysis as sdk +import azure.ai.inference as sdk from model_inference_test_base import ModelInferenceTestBase, ServicePreparer from devtools_testutils.aio import recorded_by_proxy_async @@ -22,15 +22,18 @@ class TestImageAnalysisAsyncClient(ModelInferenceTestBase): @recorded_by_proxy_async async def test_async_chat_completion(self, **kwargs): - self._create_client_for_standard_analysis(sync=False, **kwargs) + self._create_client_for_standard_test(sync=False, **kwargs) - await self._do_async_chat_completions( - options=[ - messages=[ - role=sdk.models.ChatRole.USER, + options = sdk.models.ChatCompletionsOptions( + messages=[ + sdk.models.ChatRequestUserMessage( content="How many feet are in a mile?" - ] - ], + ) + ] + ) + + await self._do_async_chat_completions( + options=options, **kwargs ) @@ -73,7 +76,7 @@ async def test_analyze_async_single_feature_from_url(self, **kwargs): # # ********************************************************************************** - """ @ServicePreparer() +""" @ServicePreparer() @recorded_by_proxy_async async def test_analyze_async_authentication_failure(self, **kwargs): diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py index 4e27940d0cf9..631eeb3ed90b 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -21,11 +21,17 @@ class TestModelClient(ModelInferenceTestBase): # Test one chat completion @ServicePreparer() @recorded_by_proxy - def test_sync_chat_completion(self, **kwargs): + def test_chat_completion(self, **kwargs): self._create_client_for_standard_test(sync=True, **kwargs) - options = sdk.models.ChatCompletionsOptions(messages=[sdk.models.ChatRequestUserMessage(content="How many feet are in a mile?")]) + options = sdk.models.ChatCompletionsOptions( + messages=[ + sdk.models.ChatRequestUserMessage( + content="How many feet are in a mile?" + ) + ] + ) self._do_chat_completions( options=options, diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml index 04190fe5d499..f17a49413995 100644 --- a/sdk/ai/azure-ai-inference/tsp-location.yaml +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/ModelInference -commit: 1b950ebcf6b58804a387e17641b1c933aeb13f4b +commit: a5b5fe9e6c1451c8f33cbcc52291204ed62e6a26 repo: Azure/azure-rest-api-specs additionalDirectories: From 2bdb446bf2099057e79597a84c3a277bc6b44183 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 28 Mar 2024 20:27:39 -0700 Subject: [PATCH 007/112] Add basic samples --- .../sample_chat_completion_async.py | 86 +++++++++++++++++++ .../samples/sample_chat_completion.py | 75 ++++++++++++++++ .../tests/model_inference_test_base.py | 5 +- 3 files changed, 164 insertions(+), 2 deletions(-) create mode 100644 sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completion_async.py create mode 100644 sdk/ai/azure-ai-inference/samples/sample_chat_completion.py diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completion_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completion_async.py new file mode 100644 index 000000000000..fd4b77965e13 --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completion_async.py @@ -0,0 +1,86 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to get a chat completion response from the service using an asynchronous client. + +USAGE: + python sample_chat_completion_async.py + + Set these two environment variables before running the sample: + 1) MODEL_ENDPOINT - Your endpoint URL, in the form https://..inference.ai.azure.com + where `deployment-name` is your unique AI Model deployment name, and + `azure-region` is the Azure region where your model is deployed. + 2) MODEL_KEY - Your model key (a 32-character string). Keep it secret. +""" +import asyncio + +async def sample_chat_completion_async(): + import os + from azure.ai.inference.aio import ModelClient + from azure.ai.inference.models import ChatCompletionsOptions, ChatRequestSystemMessage, ChatRequestUserMessage + from azure.core.credentials import AzureKeyCredential + + # Read the values of your model endpoint and key from environment variables + try: + endpoint = os.environ["MODEL_ENDPOINT"] + key = os.environ["MODEL_KEY"] + except KeyError: + print("Missing environment variable 'MODEL_ENDPOINT' or 'MODEL_KEY'") + print("Set them before running this sample.") + exit() + + # Create an Image Analysis client for synchronous operations + client = ModelClient( + endpoint=endpoint, + credential=AzureKeyCredential(key) + ) + + # Start the operation and get a Future object + future = asyncio.ensure_future( + client.get_chat_completions( + chat_completions_options=ChatCompletionsOptions( + messages=[ + ChatRequestSystemMessage( + content="You are an AI assistant that helps people find information." + ), + ChatRequestUserMessage( + content="How many feet are in a mile?" + ) + ] + ) + ) + ) + + # Loop until the operation is done + while not future.done(): + await asyncio.sleep(0.1) + print("Waiting...") + + # Get the result + result = future.result() + await client.close() + + # Print results the the console + print("Chat Completions:") + for index, choice in enumerate(result.choices): + print(f"choices[{index}].message.content: {choice.message.content}") + print(f"choices[{index}].message.role: {choice.message.role}") + print(f"choices[{index}].finish_reason: {choice.finish_reason}") + print(f"choices[{index}].index: {choice.index}") + print(f"id: {result.id}") + print(f"created: {result.created}") + print(f"model: {result.model}") + print(f"object: {result.object}") + print(f"usage.prompt_tokens: {result.usage.prompt_tokens}") + print(f"usage.completion_tokens: {result.usage.completion_tokens}") + print(f"usage.total_tokens: {result.usage.total_tokens}") + +async def main(): + await sample_chat_completion_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completion.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completion.py new file mode 100644 index 000000000000..60047e98e1fe --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completion.py @@ -0,0 +1,75 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to get a chat completion response from the service using a synchronous client. + +USAGE: + python sample_chat_completion.py + + Set these two environment variables before running the sample: + 1) MODEL_ENDPOINT - Your endpoint URL, in the form https://..inference.ai.azure.com + where `deployment-name` is your unique AI Model deployment name, and + `azure-region` is the Azure region where your model is deployed. + 2) MODEL_KEY - Your model key (a 32-character string). Keep it secret. +""" + + +def sample_chat_completion(): + # [START create_client] + import os + from azure.ai.inference import ModelClient + from azure.ai.inference.models import ChatCompletionsOptions, ChatRequestSystemMessage, ChatRequestUserMessage + from azure.core.credentials import AzureKeyCredential + + # Read the values of your model endpoint and key from environment variables + try: + endpoint = os.environ["MODEL_ENDPOINT"] + key = os.environ["MODEL_KEY"] + except KeyError: + print("Missing environment variable 'MODEL_ENDPOINT' or 'MODEL_KEY'") + print("Set them before running this sample.") + exit() + + # Create an Image Analysis client for synchronous operations + client = ModelClient( + endpoint=endpoint, + credential=AzureKeyCredential(key) + ) + # [END create_client] + + # [START chat-completion] + # Do a single chat completion operation. This will be a synchronously (blocking) call. + result = client.get_chat_completions( + chat_completions_options=ChatCompletionsOptions( + messages=[ + ChatRequestSystemMessage( + content="You are an AI assistant that helps people find information." + ), + ChatRequestUserMessage( + content="How many feet are in a mile?" + ) + ] + )) + + # Print results the the console + print("Chat Completions:") + for index, choice in enumerate(result.choices): + print(f"choices[{index}].message.content: {choice.message.content}") + print(f"choices[{index}].message.role: {choice.message.role}") + print(f"choices[{index}].finish_reason: {choice.finish_reason}") + print(f"choices[{index}].index: {choice.index}") + print(f"id: {result.id}") + print(f"created: {result.created}") + print(f"model: {result.model}") + print(f"object: {result.object}") + print(f"usage.prompt_tokens: {result.usage.prompt_tokens}") + print(f"usage.completion_tokens: {result.usage.completion_tokens}") + print(f"usage.total_tokens: {result.usage.total_tokens}") + # [END chat-completion] + + +if __name__ == "__main__": + sample_chat_completion() diff --git a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py index cf849463e716..272e67fdcf88 100644 --- a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py +++ b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py @@ -7,6 +7,8 @@ import sys import azure.ai.inference as sdk import azure.ai.inference.aio as async_sdk +import asyncio +import time from os import path from typing import List, Optional, Union @@ -14,8 +16,7 @@ from azure.core.credentials import AzureKeyCredential from azure.core.exceptions import AzureError from azure.core.pipeline import PipelineRequest -import asyncio -import time + # Set to True to enable SDK logging LOGGING_ENABLED = True From ad9992c39708553efac95d9b057a4608fc5d194c Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 29 Mar 2024 09:01:28 -0700 Subject: [PATCH 008/112] Ignore spelling errors --- .vscode/cspell.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.vscode/cspell.json b/.vscode/cspell.json index c9c2a69146c3..a5e90369e3b8 100644 --- a/.vscode/cspell.json +++ b/.vscode/cspell.json @@ -1276,6 +1276,15 @@ "smirnov" ] }, + { + "filename": "sdk/ai/azure-ai-inference/**", + "words": [ + "mros", + "Nify", + "ctxt", + "wday" + ] + }, { "filename": "sdk/ai/azure-ai-generative/**", "words": [ From 59bf4f34ab800a142838dbee72c4d13e039b19d9 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 29 Mar 2024 09:35:34 -0700 Subject: [PATCH 009/112] fix `tox run -e sphinx` errors --- sdk/ai/azure-ai-inference/samples/README.md | 86 +++++++++++++++++++++ sdk/ai/azure-ai-inference/tests/README.md | 4 +- 2 files changed, 88 insertions(+), 2 deletions(-) create mode 100644 sdk/ai/azure-ai-inference/samples/README.md diff --git a/sdk/ai/azure-ai-inference/samples/README.md b/sdk/ai/azure-ai-inference/samples/README.md new file mode 100644 index 000000000000..65268b51c10f --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/README.md @@ -0,0 +1,86 @@ +--- +page_type: sample +languages: + - python +products: + - azure + - azure-ai +urlFragment: model-inference-samples +--- + +# Samples for the model client library for Python + +These are runnable console Python programs that show how to do chat completion using the model client. Most samples are in this folder +and use the a synchronous client. Samples in the subfolder `async_samples` use the asynchronous client. +The concepts are similar, you can easily modify any of the samples to your needs. + +## Synchronous client samples + +|**File Name**|**Description**| +|----------------|-------------| +|[sample_chat_completion.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completion.py) | One chat completion operation using a synchronous client. | + +## Asynchronous client samples + +|**File Name**|**Description**| +|----------------|-------------| +|[sample_chat_completion_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completion_async.py) | One chat completion operation using an asynchronous client. | + +## Prerequisites + +See [Prerequisites](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/README.md#prerequisites) here. + +## Setup + +* Clone or download this sample repository +* Open a command prompt / terminal window in this samples folder +* Install the Image Analysis client library for Python with pip: + ```bash + pip install azure-ai-inference + ``` +* If you plan to run the asynchronous client samples, insall the additional package [aiohttp](https://pypi.org/project/aiohttp/): + ```bash + pip install aiohttp + ``` + +## Set environment variables + +See [Set environment variables](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/README.md#set-environment-variables) here. + +## Running the samples + +To run the first sample, type: +```bash +python sample_chat_completion_async.py +``` +similarly for the other samples. + +## Example console output + +The sample `sample_chat_completion_async.py` sends the following system and user messages in a single call: + +- System: "You are an AI assistant that helps people find information." +- User: "How many feet are in a mile?" + +And prints out the service response. It should look similar to the following: + +```text +Chat Completions: +choices[0].message.content: There are 5,280 feet in a mile. +choices[0].message.role: assistant +choices[0].finish_reason: stop +choices[0].index: 0 +id: 93f5bea2-11ec-4b31-af73-cb663196ebd5 +created: 1970-01-14 01:11:54+00:00 +model: Llama-2-70b-chat +object: chat.completion +usage.prompt_tokens: 41 +usage.completion_tokens: 15 +usage.total_tokens: 56 +``` + +## Troubleshooting + +See [Troubleshooting](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/README.md#troubleshooting) here. + + diff --git a/sdk/ai/azure-ai-inference/tests/README.md b/sdk/ai/azure-ai-inference/tests/README.md index bc4df8af54ad..a83ac986a9d7 100644 --- a/sdk/ai/azure-ai-inference/tests/README.md +++ b/sdk/ai/azure-ai-inference/tests/README.md @@ -4,7 +4,7 @@ ### Prerequisites -See [Prerequisites](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/vision/azure-ai-vision-imageanalysis/README.md#prerequisites). Create an Azure resource in one of the GPU-supported regions, otherwise some of the tests will fail. +See [Prerequisites](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/README.md#prerequisites). Create an Azure resource in one of the GPU-supported regions, otherwise some of the tests will fail. ### Setup @@ -29,7 +29,7 @@ See [Prerequisites](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ ### Set environment variables -See [Set environment variables](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/vision/azure-ai-vision-imageanalysis/README.md#set-environment-variables). +See [Set environment variables](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/README.md#set-environment-variables). In addition, the following environment values **must be** defined, although not used. Assign any value to them: ``` From 8d3948c26893f592263b8561a3a63bc56fdfc96a Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 29 Mar 2024 20:18:20 -0700 Subject: [PATCH 010/112] Update SDK to support embeddings. Add sample. Add root README.md --- sdk/ai/azure-ai-inference/README.md | 271 +++++++++++++++-- .../ai/inference/_operations/_operations.py | 280 ++++++++++++++++++ .../inference/aio/_operations/_operations.py | 260 +++++++++++++++- .../azure/ai/inference/models/__init__.py | 10 + .../azure/ai/inference/models/_enums.py | 11 + .../azure/ai/inference/models/_models.py | 190 +++++++++++- .../azure-ai-inference/dev_requirements.txt | 1 - sdk/ai/azure-ai-inference/samples/README.md | 6 +- ...nc.py => sample_chat_completions_async.py} | 23 +- .../async_samples/sample_embeddings_async.py | 82 +++++ ...mpletion.py => sample_chat_completions.py} | 26 +- .../samples/sample_embeddings.py | 92 ++++++ .../tests/model_inference_test_base.py | 33 +-- .../test_model_inference_async_client.py | 27 +- .../tests/test_model_inference_client.py | 29 +- sdk/ai/azure-ai-inference/tsp-location.yaml | 4 +- 16 files changed, 1221 insertions(+), 124 deletions(-) rename sdk/ai/azure-ai-inference/samples/async_samples/{sample_chat_completion_async.py => sample_chat_completions_async.py} (81%) create mode 100644 sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py rename sdk/ai/azure-ai-inference/samples/{sample_chat_completion.py => sample_chat_completions.py} (81%) create mode 100644 sdk/ai/azure-ai-inference/samples/sample_embeddings.py diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 497f318baeed..938d18c0be8a 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -1,28 +1,268 @@ +# Azure AI Model Client Library for Python +The Model Client library allows you to do inference against any of AI models in you deployed to Azure. It supports both "model as a service" and "models with hosted managed infrastructure". For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). -# Azure Ai Inference client library for Python - +Use the model client library to: + +* Authenticate against the service +* Get chat completions +* Get embeddings + +Note that for inference of OpenAI models hosted on azure you should be using the [OpenAI Python client library](https://github.com/openai/openai-python) instead of this client. + +[Product documentation](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview) +| [Samples](https://aka.ms/azsdk/model-client/samples/python) +| [API reference documentation](https://aka.ms/azsdk/model-client/ref-docs/python) +| [Package (Pypi)](https://aka.ms/azsdk/model-client/package/pypi) +| [SDK source code](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/azure/ai/inference) ## Getting started -### Installating the package +### Prerequisites + +* [Python 3.8](https://www.python.org/) or later installed, including [pip](https://pip.pypa.io/en/stable/). +* An [Azure subscription](https://azure.microsoft.com/free). +* A [TBD resouce](https://azure.microsoft.com/) in your Azure subscription. You will need the key and endpoint from this resource to authenticate against the service. + +### Install the Model Client package ```bash -python -m pip install azure-ai-inference +pip install azure-ai-inferencing ``` -#### Prequisites +### Set environment variables + +To authenticate the `ModelClient`, you will need the endpoint and key from your TBD resource in the [Azure Portal](https://portal.azure.com). The code snippet below assumes these values are stored in environment variables: + +* Set the environment variable `MODEL_ENDPOINT` to the endpoint URL. It has the form `https://your-model-deployment-name.your-azure-region.inference.ai.azure.com`, where `your-model-deployment-name` is your unique TBD resource name. + +* Set the environment variable `MODEL_KEY` to the key. The key is a 32-character string. + +Note that the client library does not directly read these environment variable at run time. The endpoint and key must be provided to the constructor of `ModelClient` in your code. The code snippet below reads environment variables to promote the practice of not hard-coding secrets in your source code. + +### Create and authenticate the client + +Once you define the environment variables, this Python code will create and authenticate a synchronous `ModelClient`: + + + +```python +import os +from azure.ai.inference import ModelClient +from azure.ai.inference.models import ChatCompletionsOptions, ChatRequestSystemMessage, ChatRequestUserMessage +from azure.core.credentials import AzureKeyCredential + +# Read the values of your model endpoint and key from environment variables +try: + endpoint = os.environ["MODEL_ENDPOINT"] + key = os.environ["MODEL_KEY"] +except KeyError: + print("Missing environment variable 'MODEL_ENDPOINT' or 'MODEL_KEY'") + print("Set them before running this sample.") + exit() + +# Create Model Client for synchronous operations +client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) +``` + + + +A synchronous client supports synchronous inference methods, meaning they will block until the service responds with inference results. The code snippets below all use synchronous methods because it's easier for a getting-started guide. The SDK offers equivalent asynchronous APIs which are often preferred. To create an asynchronous client, do the following: + +* Update the above code to import `ModelClient` from the `aio` namespace: + + ```python + from azure.ai.inference.aio import ModelClient + ``` + +* Install the additional package [aiohttp](https://pypi.org/project/aiohttp/): + + ```bash + pip install aiohttp + ``` + +## Key concepts + +### Chat Completions + +TBD + +Target the `/v1/chat/completions` route + +### Embeddings + +TBD + +Target the `/v1/embeddings` route + +## Examples + +The following sections provide code snippets covering these common scenarios: + +* [Chat completions](#chat-completions-example) +* [Embeddings](#embeddings-example) + +These snippets use the synchronous `client` from [Create and authenticate the client](#create-and-authenticate-the-client). + +See the [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples) folder for fully working samples for synchronous and asynchronous clients. + +### Chat completions example + +This example demonstrates how to generate chat completions. + + + +```python +# Do a single chat completion operation. This will be a synchronously (blocking) call. +result = client.get_chat_completions( + chat_completions_options=ChatCompletionsOptions( + messages=[ + ChatRequestSystemMessage(content="You are an AI assistant that helps people find information."), + ChatRequestUserMessage(content="How many feet are in a mile?"), + ] + ) +) + +# Print results the the console +print("Chat Completions:") +for index, choice in enumerate(result.choices): + print(f"choices[{index}].message.content: {choice.message.content}") + print(f"choices[{index}].message.role: {choice.message.role}") + print(f"choices[{index}].finish_reason: {choice.finish_reason}") + print(f"choices[{index}].index: {choice.index}") +print(f"id: {result.id}") +print(f"created: {result.created}") +print(f"model: {result.model}") +print(f"object: {result.object}") +print(f"usage.prompt_tokens: {result.usage.prompt_tokens}") +print(f"usage.completion_tokens: {result.usage.completion_tokens}") +print(f"usage.total_tokens: {result.usage.total_tokens}") +``` + + + +To generate completions for additional messages, simply call `get_chat_completions` multiple times using the same `ModelClient`. + +### Embeddings example + +This example demonstrates how to get embeddings. + + + +```python +# Do a single embeddings operation. This will be a synchronously (blocking) call. +result = client.get_embeddings( + embeddings_options=EmbeddingsOptions( + input=[ + "first sentence", + "second sentence", + "third sentence" + ] + ) +) + +# Print results the the console +print("Embeddings result:") +for index, item in enumerate(result.data): + len=item.embedding.__len__() + print(f"data[{index}].index: {item.index}") + print(f"data[{index}].embedding[0]: {item.embedding[0]}") + print(f"data[{index}].embedding[1]: {item.embedding[1]}") + print("...") + print(f"data[{index}].embedding[{len-2}]: {item.embedding[len-2]}") + print(f"data[{index}].embedding[{len-1}]: {item.embedding[len-1]}") +print(f"id: {result.id}") +print(f"model: {result.model}") +print(f"object: {result.object}") +print(f"usage.prompt_tokens: {result.usage.prompt_tokens}") +print(f"usage.total_tokens: {result.usage.total_tokens}") +``` + + + +## Troubleshooting + +### Exceptions + +The `get_chat_completions` and `get_embeddings` methods raise an [HttpResponseError](https://learn.microsoft.com/python/api/azure-core/azure.core.exceptions.httpresponseerror) exception for a non-success HTTP status code response from the service. The exception's `status_code` will be the HTTP response status code. The exception's `error.message` contains a detailed message that will allow you to diagnose the issue: + +```python +from azure.core.exceptions import HttpResponseError + +... + +try: + result = client.get_chat_completions( ... ) +except HttpResponseError as e: + print(f"Status code: {e.status_code} ({e.reason})") + print(f"{e}") +``` + +For example, when you provide a wrong authentication key: + +```text +Status code: 401 (Unauthorized) +Operation returned an invalid status 'Unauthorized' +Content: {"status": "Invalid auth token"} +``` + +Or for example when you call `get_embeddings` on a model that does not support the `/v1/embeddings` route: + +```text +Status code: 424 (Failed Dependency) +Operation returned an invalid status 'Failed Dependency' +Content: {"detail":"Not Found"} +``` + +### Logging + +The client uses the standard [Python logging library](https://docs.python.org/3/library/logging.html). The SDK logs HTTP request and response details, which may be useful in troubleshooting. To log to stdout, add the following: + +```python +import sys +import logging + +# Acquire the logger for this client library. Use 'azure' to affect both +# 'azure.core` and `azure.ai.vision.imageanalysis' libraries. +logger = logging.getLogger("azure") + +# Set the desired logging level. logging.INFO or logging.DEBUG are good options. +logger.setLevel(logging.INFO) + +# Direct logging output to stdout (the default): +handler = logging.StreamHandler(stream=sys.stdout) +# Or direct logging output to a file: +# handler = logging.FileHandler(filename = 'sample.log') +logger.addHandler(handler) + +# Optional: change the default logging format. Here we add a timestamp. +formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s") +handler.setFormatter(formatter) +``` + +By default logs redact the values of URL query strings, the values of some HTTP request and response headers (including `Authorization` which holds the key), and the request and response payloads. To create logs without redaction, set the method argument `logging_enable = True` when you construct `ModelClient`, or when you call any of the client's operation methods (e.g. `get_chat_completions`). + +```python +# Create a Model Client with none redacted log +client = ModelClient( + endpoint=endpoint, + credential=AzureKeyCredential(key), + logging_enable=True +) +``` + +None redacted logs are generated for log level `logging.DEBUG` only. Be sure to protect none redacted logs to avoid compromising security. For more information see [Configure logging in the Azure libraries for Python](https://aka.ms/azsdk/python/logging) + +## Next steps -- Python 3.8 or later is required to use this package. -- You need an [Azure subscription][azure_sub] to use this package. -- An existing Azure Ai Inference instance. +* Have a look at the [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/vision/azure-ai-vision-imageanalysis/samples) folder, containing fully runnable Python code for Image Analysis (all visual features, synchronous and asynchronous clients, from image file or URL). ## Contributing This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. -For details, visit https://cla.microsoft.com. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, @@ -30,16 +270,11 @@ comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. This project has adopted the -[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct). For more information, see the Code of Conduct FAQ or contact opencode@microsoft.com with any additional questions or comments. - -[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ -[authenticate_with_token]: https://docs.microsoft.com/azure/cognitive-services/authentication?tabs=powershell#authenticate-with-an-authentication-token -[azure_identity_credentials]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#credentials -[azure_identity_pip]: https://pypi.org/project/azure-identity/ -[default_azure_credential]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#defaultazurecredential -[pip]: https://pypi.org/project/pip/ -[azure_sub]: https://azure.microsoft.com/free/ + diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index 926eb75f37ec..3ba8edf751b0 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -63,6 +63,28 @@ def build_model_get_chat_completions_request(**kwargs: Any) -> HttpRequest: return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) +def build_model_get_embeddings_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-04-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v1/embeddings" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + class ModelClientOperationsMixin(ModelClientMixinABC): @overload def get_chat_completions( @@ -488,3 +510,261 @@ def get_chat_completions( return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + + @overload + def get_embeddings( + self, embeddings_options: _models.EmbeddingsOptions, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for a given prompt. + + :param embeddings_options: The JSON payload containing embedding options. Required. + :type embeddings_options: ~azure.ai.inference.models.EmbeddingsOptions + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + embeddings_options = { + "input": [ + "str" # Input texts to get embeddings for, encoded as a an array of + strings. Required. + ], + "input_type": "str" # Optional. Specifies the input type to use for + embedding search. Known values are: "text", "query", and "document". + } + + # response body for status code(s): 200 + response == { + "data": [ + { + "embedding": [ + 0.0 # List of embeddings value for the input prompt. + These represent a measurement of the vector-based relatedness of the + provided input. Required. + ], + "index": 0, # Index of the prompt to which the EmbeddingItem + corresponds. Required. + "object": "str" # The object type of this embeddings item. + Will always be ``embedding``. Required. + } + ], + "id": "str", # Unique identifier for the embeddings result. Required. + "model": "str", # The model ID used to generate this result. Required. + "object": "str", # The object type of the embeddings result. Will always be + ``list``. Required. + "usage": { + "prompt_tokens": 0, # Number of tokens sent in the original request. + Required. + "total_tokens": 0 # Total number of tokens transacted in this + request/response. Required. + } + } + """ + + @overload + def get_embeddings( + self, embeddings_options: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for a given prompt. + + :param embeddings_options: The JSON payload containing embedding options. Required. + :type embeddings_options: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "data": [ + { + "embedding": [ + 0.0 # List of embeddings value for the input prompt. + These represent a measurement of the vector-based relatedness of the + provided input. Required. + ], + "index": 0, # Index of the prompt to which the EmbeddingItem + corresponds. Required. + "object": "str" # The object type of this embeddings item. + Will always be ``embedding``. Required. + } + ], + "id": "str", # Unique identifier for the embeddings result. Required. + "model": "str", # The model ID used to generate this result. Required. + "object": "str", # The object type of the embeddings result. Will always be + ``list``. Required. + "usage": { + "prompt_tokens": 0, # Number of tokens sent in the original request. + Required. + "total_tokens": 0 # Total number of tokens transacted in this + request/response. Required. + } + } + """ + + @overload + def get_embeddings( + self, embeddings_options: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for a given prompt. + + :param embeddings_options: The JSON payload containing embedding options. Required. + :type embeddings_options: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "data": [ + { + "embedding": [ + 0.0 # List of embeddings value for the input prompt. + These represent a measurement of the vector-based relatedness of the + provided input. Required. + ], + "index": 0, # Index of the prompt to which the EmbeddingItem + corresponds. Required. + "object": "str" # The object type of this embeddings item. + Will always be ``embedding``. Required. + } + ], + "id": "str", # Unique identifier for the embeddings result. Required. + "model": "str", # The model ID used to generate this result. Required. + "object": "str", # The object type of the embeddings result. Will always be + ``list``. Required. + "usage": { + "prompt_tokens": 0, # Number of tokens sent in the original request. + Required. + "total_tokens": 0 # Total number of tokens transacted in this + request/response. Required. + } + } + """ + + @distributed_trace + def get_embeddings( + self, embeddings_options: Union[_models.EmbeddingsOptions, JSON, IO[bytes]], **kwargs: Any + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for a given prompt. + + :param embeddings_options: The JSON payload containing embedding options. Is one of the + following types: EmbeddingsOptions, JSON, IO[bytes] Required. + :type embeddings_options: ~azure.ai.inference.models.EmbeddingsOptions or JSON or IO[bytes] + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + embeddings_options = { + "input": [ + "str" # Input texts to get embeddings for, encoded as a an array of + strings. Required. + ], + "input_type": "str" # Optional. Specifies the input type to use for + embedding search. Known values are: "text", "query", and "document". + } + + # response body for status code(s): 200 + response == { + "data": [ + { + "embedding": [ + 0.0 # List of embeddings value for the input prompt. + These represent a measurement of the vector-based relatedness of the + provided input. Required. + ], + "index": 0, # Index of the prompt to which the EmbeddingItem + corresponds. Required. + "object": "str" # The object type of this embeddings item. + Will always be ``embedding``. Required. + } + ], + "id": "str", # Unique identifier for the embeddings result. Required. + "model": "str", # The model ID used to generate this result. Required. + "object": "str", # The object type of the embeddings result. Will always be + ``list``. Required. + "usage": { + "prompt_tokens": 0, # Number of tokens sent in the original request. + Required. + "total_tokens": 0 # Total number of tokens transacted in this + request/response. Required. + } + } + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.EmbeddingsResult] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(embeddings_options, (IOBase, bytes)): + _content = embeddings_options + else: + _content = json.dumps(embeddings_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_model_get_embeddings_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.EmbeddingsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index 292a5fce4be3..f9413d110cdd 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -26,7 +26,7 @@ from ... import models as _models from ..._model_base import SdkJSONEncoder, _deserialize -from ..._operations._operations import build_model_get_chat_completions_request +from ..._operations._operations import build_model_get_chat_completions_request, build_model_get_embeddings_request from .._vendor import ModelClientMixinABC if sys.version_info >= (3, 9): @@ -463,3 +463,261 @@ async def get_chat_completions( return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + + @overload + async def get_embeddings( + self, embeddings_options: _models.EmbeddingsOptions, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for a given prompt. + + :param embeddings_options: The JSON payload containing embedding options. Required. + :type embeddings_options: ~azure.ai.inference.models.EmbeddingsOptions + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + embeddings_options = { + "input": [ + "str" # Input texts to get embeddings for, encoded as a an array of + strings. Required. + ], + "input_type": "str" # Optional. Specifies the input type to use for + embedding search. Known values are: "text", "query", and "document". + } + + # response body for status code(s): 200 + response == { + "data": [ + { + "embedding": [ + 0.0 # List of embeddings value for the input prompt. + These represent a measurement of the vector-based relatedness of the + provided input. Required. + ], + "index": 0, # Index of the prompt to which the EmbeddingItem + corresponds. Required. + "object": "str" # The object type of this embeddings item. + Will always be ``embedding``. Required. + } + ], + "id": "str", # Unique identifier for the embeddings result. Required. + "model": "str", # The model ID used to generate this result. Required. + "object": "str", # The object type of the embeddings result. Will always be + ``list``. Required. + "usage": { + "prompt_tokens": 0, # Number of tokens sent in the original request. + Required. + "total_tokens": 0 # Total number of tokens transacted in this + request/response. Required. + } + } + """ + + @overload + async def get_embeddings( + self, embeddings_options: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for a given prompt. + + :param embeddings_options: The JSON payload containing embedding options. Required. + :type embeddings_options: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "data": [ + { + "embedding": [ + 0.0 # List of embeddings value for the input prompt. + These represent a measurement of the vector-based relatedness of the + provided input. Required. + ], + "index": 0, # Index of the prompt to which the EmbeddingItem + corresponds. Required. + "object": "str" # The object type of this embeddings item. + Will always be ``embedding``. Required. + } + ], + "id": "str", # Unique identifier for the embeddings result. Required. + "model": "str", # The model ID used to generate this result. Required. + "object": "str", # The object type of the embeddings result. Will always be + ``list``. Required. + "usage": { + "prompt_tokens": 0, # Number of tokens sent in the original request. + Required. + "total_tokens": 0 # Total number of tokens transacted in this + request/response. Required. + } + } + """ + + @overload + async def get_embeddings( + self, embeddings_options: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for a given prompt. + + :param embeddings_options: The JSON payload containing embedding options. Required. + :type embeddings_options: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "data": [ + { + "embedding": [ + 0.0 # List of embeddings value for the input prompt. + These represent a measurement of the vector-based relatedness of the + provided input. Required. + ], + "index": 0, # Index of the prompt to which the EmbeddingItem + corresponds. Required. + "object": "str" # The object type of this embeddings item. + Will always be ``embedding``. Required. + } + ], + "id": "str", # Unique identifier for the embeddings result. Required. + "model": "str", # The model ID used to generate this result. Required. + "object": "str", # The object type of the embeddings result. Will always be + ``list``. Required. + "usage": { + "prompt_tokens": 0, # Number of tokens sent in the original request. + Required. + "total_tokens": 0 # Total number of tokens transacted in this + request/response. Required. + } + } + """ + + @distributed_trace_async + async def get_embeddings( + self, embeddings_options: Union[_models.EmbeddingsOptions, JSON, IO[bytes]], **kwargs: Any + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for a given prompt. + + :param embeddings_options: The JSON payload containing embedding options. Is one of the + following types: EmbeddingsOptions, JSON, IO[bytes] Required. + :type embeddings_options: ~azure.ai.inference.models.EmbeddingsOptions or JSON or IO[bytes] + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + embeddings_options = { + "input": [ + "str" # Input texts to get embeddings for, encoded as a an array of + strings. Required. + ], + "input_type": "str" # Optional. Specifies the input type to use for + embedding search. Known values are: "text", "query", and "document". + } + + # response body for status code(s): 200 + response == { + "data": [ + { + "embedding": [ + 0.0 # List of embeddings value for the input prompt. + These represent a measurement of the vector-based relatedness of the + provided input. Required. + ], + "index": 0, # Index of the prompt to which the EmbeddingItem + corresponds. Required. + "object": "str" # The object type of this embeddings item. + Will always be ``embedding``. Required. + } + ], + "id": "str", # Unique identifier for the embeddings result. Required. + "model": "str", # The model ID used to generate this result. Required. + "object": "str", # The object type of the embeddings result. Will always be + ``list``. Required. + "usage": { + "prompt_tokens": 0, # Number of tokens sent in the original request. + Required. + "total_tokens": 0 # Total number of tokens transacted in this + request/response. Required. + } + } + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.EmbeddingsResult] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(embeddings_options, (IOBase, bytes)): + _content = embeddings_options + else: + _content = json.dumps(embeddings_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_model_get_embeddings_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.EmbeddingsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py index 100137e8ca4c..bcb20736c6f1 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py @@ -24,12 +24,17 @@ from ._models import ChatRequestUserMessage from ._models import ChatResponseMessage from ._models import CompletionsUsage +from ._models import EmbeddingItem +from ._models import EmbeddingsOptions +from ._models import EmbeddingsResult +from ._models import EmbeddingsUsage from ._models import FunctionCall from ._models import FunctionDefinition from ._enums import ChatCompletionsToolSelectionPreset from ._enums import ChatRole from ._enums import CompletionsFinishReason +from ._enums import EmbeddingInputType from ._patch import __all__ as _patch_all from ._patch import * # pylint: disable=unused-wildcard-import from ._patch import patch_sdk as _patch_sdk @@ -53,11 +58,16 @@ "ChatRequestUserMessage", "ChatResponseMessage", "CompletionsUsage", + "EmbeddingItem", + "EmbeddingsOptions", + "EmbeddingsResult", + "EmbeddingsUsage", "FunctionCall", "FunctionDefinition", "ChatCompletionsToolSelectionPreset", "ChatRole", "CompletionsFinishReason", + "EmbeddingInputType", ] __all__.extend([p for p in _patch_all if p not in __all__]) _patch_sdk() diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py index d226ae417513..6b9ea49109e9 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py @@ -47,3 +47,14 @@ class CompletionsFinishReason(str, Enum, metaclass=CaseInsensitiveEnumMeta): CONTENT_FILTERED = "content_filter" """Completions generated a response that was identified as potentially sensitive per content moderation policies.""" + + +class EmbeddingInputType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Represents the input types used for embedding search.""" + + TEXT = "text" + """to do""" + QUERY = "query" + """to do""" + DOCUMENT = "document" + """to do""" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py index 18f07916556c..f449f53d7fe9 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py @@ -571,8 +571,6 @@ class ChatRequestAssistantMessage(ChatRequestMessage, discriminator="assistant") :vartype role: str or ~azure.ai.inference.models.ASSISTANT :ivar content: The content of the message. Required. :vartype content: str - :ivar name: An optional name for the participant. - :vartype name: str """ role: Literal[ChatRole.ASSISTANT] = rest_discriminator(name="role") # type: ignore @@ -580,15 +578,12 @@ class ChatRequestAssistantMessage(ChatRequestMessage, discriminator="assistant") Required. The role that provides responses to system-instructed, user-prompted input.""" content: str = rest_field() """The content of the message. Required.""" - name: Optional[str] = rest_field() - """An optional name for the participant.""" @overload def __init__( self, *, content: str, - name: Optional[str] = None, ): ... @@ -615,8 +610,6 @@ class ChatRequestSystemMessage(ChatRequestMessage, discriminator="system"): :vartype role: str or ~azure.ai.inference.models.SYSTEM :ivar content: The contents of the system message. Required. :vartype content: str - :ivar name: An optional name for the participant. - :vartype name: str """ role: Literal[ChatRole.SYSTEM] = rest_discriminator(name="role") # type: ignore @@ -624,15 +617,12 @@ class ChatRequestSystemMessage(ChatRequestMessage, discriminator="system"): Required. The role that instructs or sets the behavior of the assistant.""" content: str = rest_field() """The contents of the system message. Required.""" - name: Optional[str] = rest_field() - """An optional name for the participant.""" @overload def __init__( self, *, content: str, - name: Optional[str] = None, ): ... @@ -701,8 +691,6 @@ class ChatRequestUserMessage(ChatRequestMessage, discriminator="user"): :ivar content: The contents of the user message, with available input types varying by selected model. Required. :vartype content: str - :ivar name: An optional name for the participant. - :vartype name: str """ role: Literal[ChatRole.USER] = rest_discriminator(name="role") # type: ignore @@ -711,15 +699,12 @@ class ChatRequestUserMessage(ChatRequestMessage, discriminator="user"): content: str = rest_field() """The contents of the user message, with available input types varying by selected model. Required.""" - name: Optional[str] = rest_field() - """An optional name for the participant.""" @overload def __init__( self, *, content: str, - name: Optional[str] = None, ): ... @@ -827,6 +812,181 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, **kwargs) +class EmbeddingItem(_model_base.Model): + """Representation of a single embeddings relatedness comparison. + + All required parameters must be populated in order to send to server. + + :ivar embedding: List of embeddings value for the input prompt. These represent a measurement + of the + vector-based relatedness of the provided input. Required. + :vartype embedding: list[float] + :ivar index: Index of the prompt to which the EmbeddingItem corresponds. Required. + :vartype index: int + :ivar object: The object type of this embeddings item. Will always be ``embedding``. Required. + :vartype object: str + """ + + embedding: List[float] = rest_field() + """List of embeddings value for the input prompt. These represent a measurement of the + vector-based relatedness of the provided input. Required.""" + index: int = rest_field() + """Index of the prompt to which the EmbeddingItem corresponds. Required.""" + object: str = rest_field() + """The object type of this embeddings item. Will always be ``embedding``. Required.""" + + @overload + def __init__( + self, + *, + embedding: List[float], + index: int, + object: str, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class EmbeddingsOptions(_model_base.Model): + """The configuration information for an embeddings request. + Embeddings measure the relatedness of text strings and are commonly used for search, + clustering, + recommendations, and other similar scenarios. + + All required parameters must be populated in order to send to server. + + :ivar input: Input texts to get embeddings for, encoded as a an array of strings. Required. + :vartype input: list[str] + :ivar input_type: Specifies the input type to use for embedding search. Known values are: + "text", "query", and "document". + :vartype input_type: str or ~azure.ai.inference.models.EmbeddingInputType + """ + + input: List[str] = rest_field() + """Input texts to get embeddings for, encoded as a an array of strings. Required.""" + input_type: Optional[Union[str, "_models.EmbeddingInputType"]] = rest_field() + """Specifies the input type to use for embedding search. Known values are: \"text\", \"query\", + and \"document\".""" + + @overload + def __init__( + self, + *, + input: List[str], + input_type: Optional[Union[str, "_models.EmbeddingInputType"]] = None, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class EmbeddingsResult(_model_base.Model): + """Representation of the response data from an embeddings request. + Embeddings measure the relatedness of text strings and are commonly used for search, + clustering, + recommendations, and other similar scenarios. + + All required parameters must be populated in order to send to server. + + :ivar id: Unique identifier for the embeddings result. Required. + :vartype id: str + :ivar data: Embedding values for the prompts submitted in the request. Required. + :vartype data: list[~azure.ai.inference.models.EmbeddingItem] + :ivar usage: Usage counts for tokens input using the embeddings API. Required. + :vartype usage: ~azure.ai.inference.models.EmbeddingsUsage + :ivar object: The object type of the embeddings result. Will always be ``list``. Required. + :vartype object: str + :ivar model: The model ID used to generate this result. Required. + :vartype model: str + """ + + id: str = rest_field() + """Unique identifier for the embeddings result. Required.""" + data: List["_models.EmbeddingItem"] = rest_field() + """Embedding values for the prompts submitted in the request. Required.""" + usage: "_models.EmbeddingsUsage" = rest_field() + """Usage counts for tokens input using the embeddings API. Required.""" + object: str = rest_field() + """The object type of the embeddings result. Will always be ``list``. Required.""" + model: str = rest_field() + """The model ID used to generate this result. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + data: List["_models.EmbeddingItem"], + usage: "_models.EmbeddingsUsage", + object: str, + model: str, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class EmbeddingsUsage(_model_base.Model): + """Measurement of the amount of tokens used in this request and response. + + All required parameters must be populated in order to send to server. + + :ivar prompt_tokens: Number of tokens sent in the original request. Required. + :vartype prompt_tokens: int + :ivar total_tokens: Total number of tokens transacted in this request/response. Required. + :vartype total_tokens: int + """ + + prompt_tokens: int = rest_field() + """Number of tokens sent in the original request. Required.""" + total_tokens: int = rest_field() + """Total number of tokens transacted in this request/response. Required.""" + + @overload + def __init__( + self, + *, + prompt_tokens: int, + total_tokens: int, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + class FunctionCall(_model_base.Model): """The name and arguments of a function that should be called, as generated by the model. diff --git a/sdk/ai/azure-ai-inference/dev_requirements.txt b/sdk/ai/azure-ai-inference/dev_requirements.txt index ff12ab35dd01..105486471444 100644 --- a/sdk/ai/azure-ai-inference/dev_requirements.txt +++ b/sdk/ai/azure-ai-inference/dev_requirements.txt @@ -1,4 +1,3 @@ --e ../../../tools/azure-devtools -e ../../../tools/azure-sdk-tools ../../core/azure-core aiohttp \ No newline at end of file diff --git a/sdk/ai/azure-ai-inference/samples/README.md b/sdk/ai/azure-ai-inference/samples/README.md index 65268b51c10f..c6e723deb6b3 100644 --- a/sdk/ai/azure-ai-inference/samples/README.md +++ b/sdk/ai/azure-ai-inference/samples/README.md @@ -18,13 +18,15 @@ The concepts are similar, you can easily modify any of the samples to your needs |**File Name**|**Description**| |----------------|-------------| -|[sample_chat_completion.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completion.py) | One chat completion operation using a synchronous client. | +|[sample_chat_completions.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py) | One chat completion operation using a synchronous client. | +|[sample_embeddings.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_embeddings.py) | One embeddings operation using a synchronous client. | ## Asynchronous client samples |**File Name**|**Description**| |----------------|-------------| -|[sample_chat_completion_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completion_async.py) | One chat completion operation using an asynchronous client. | +|[sample_chat_completions_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py) | One chat completion operation using an asynchronous client. | +|[sample_embeddings_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py) | One embeddings operation using an asynchronous client. | ## Prerequisites diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completion_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py similarity index 81% rename from sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completion_async.py rename to sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py index fd4b77965e13..0b3d9baee130 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completion_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py @@ -17,7 +17,8 @@ """ import asyncio -async def sample_chat_completion_async(): + +async def sample_chat_completions_async(): import os from azure.ai.inference.aio import ModelClient from azure.ai.inference.models import ChatCompletionsOptions, ChatRequestSystemMessage, ChatRequestUserMessage @@ -32,23 +33,16 @@ async def sample_chat_completion_async(): print("Set them before running this sample.") exit() - # Create an Image Analysis client for synchronous operations - client = ModelClient( - endpoint=endpoint, - credential=AzureKeyCredential(key) - ) + # Create a Model Client for synchronous operations + client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - # Start the operation and get a Future object + # Do a single chat completion operation. Start the operation and get a Future object. future = asyncio.ensure_future( client.get_chat_completions( chat_completions_options=ChatCompletionsOptions( messages=[ - ChatRequestSystemMessage( - content="You are an AI assistant that helps people find information." - ), - ChatRequestUserMessage( - content="How many feet are in a mile?" - ) + ChatRequestSystemMessage(content="You are an AI assistant that helps people find information."), + ChatRequestUserMessage(content="How many feet are in a mile?"), ] ) ) @@ -78,8 +72,9 @@ async def sample_chat_completion_async(): print(f"usage.completion_tokens: {result.usage.completion_tokens}") print(f"usage.total_tokens: {result.usage.total_tokens}") + async def main(): - await sample_chat_completion_async() + await sample_chat_completions_async() if __name__ == "__main__": diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py new file mode 100644 index 000000000000..ea154623f92b --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py @@ -0,0 +1,82 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to get embeddings for a list of sentences using an asynchronous client. + +USAGE: + python sample_embeddings_async.py + + Set these two environment variables before running the sample: + 1) MODEL_ENDPOINT - Your endpoint URL, in the form https://..inference.ai.azure.com + where `deployment-name` is your unique AI Model deployment name, and + `azure-region` is the Azure region where your model is deployed. + 2) MODEL_KEY - Your model key (a 32-character string). Keep it secret. +""" +import asyncio + +async def sample_embeddings_async(): + import os + from azure.ai.inference.aio import ModelClient + from azure.ai.inference.models import EmbeddingsOptions + from azure.core.credentials import AzureKeyCredential + + # Read the values of your model endpoint and key from environment variables + try: + endpoint = os.environ["MODEL_ENDPOINT"] + key = os.environ["MODEL_KEY"] + except KeyError: + print("Missing environment variable 'MODEL_ENDPOINT' or 'MODEL_KEY'") + print("Set them before running this sample.") + exit() + + # Create an Image Analysis client for synchronous operations + client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + + # Do a single embeddings operation. Start the operation and get a Future object. + future = asyncio.ensure_future( + client.get_embeddings( + embeddings_options=EmbeddingsOptions( + input=[ + "first sentence", + "second sentence", + "third sentence" + ] + ) + ) + ) + + # Loop until the operation is done + while not future.done(): + await asyncio.sleep(0.1) + print("Waiting...") + + # Get the result + result = future.result() + await client.close() + + # Print results the the console + print("Embeddings result:") + for index, item in enumerate(result.data): + len=item.embedding.__len__() + print(f"data[{index}].index: {item.index}") + print(f"data[{index}].embedding[0]: {item.embedding[0]}") + print(f"data[{index}].embedding[1]: {item.embedding[1]}") + print("...") + print(f"data[{index}].embedding[{len-2}]: {item.embedding[len-2]}") + print(f"data[{index}].embedding[{len-1}]: {item.embedding[len-1]}") + print(f"id: {result.id}") + print(f"model: {result.model}") + print(f"object: {result.object}") + print(f"usage.prompt_tokens: {result.usage.prompt_tokens}") + print(f"usage.total_tokens: {result.usage.total_tokens}") + + +async def main(): + await sample_embeddings_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completion.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py similarity index 81% rename from sdk/ai/azure-ai-inference/samples/sample_chat_completion.py rename to sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index 60047e98e1fe..b1d433415b37 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completion.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -17,7 +17,7 @@ """ -def sample_chat_completion(): +def sample_chat_completions(): # [START create_client] import os from azure.ai.inference import ModelClient @@ -33,26 +33,20 @@ def sample_chat_completion(): print("Set them before running this sample.") exit() - # Create an Image Analysis client for synchronous operations - client = ModelClient( - endpoint=endpoint, - credential=AzureKeyCredential(key) - ) + # Create Model Client for synchronous operations + client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # [END create_client] - # [START chat-completion] + # [START chat_completions] # Do a single chat completion operation. This will be a synchronously (blocking) call. result = client.get_chat_completions( chat_completions_options=ChatCompletionsOptions( messages=[ - ChatRequestSystemMessage( - content="You are an AI assistant that helps people find information." - ), - ChatRequestUserMessage( - content="How many feet are in a mile?" - ) + ChatRequestSystemMessage(content="You are an AI assistant that helps people find information."), + ChatRequestUserMessage(content="How many feet are in a mile?"), ] - )) + ) + ) # Print results the the console print("Chat Completions:") @@ -68,8 +62,8 @@ def sample_chat_completion(): print(f"usage.prompt_tokens: {result.usage.prompt_tokens}") print(f"usage.completion_tokens: {result.usage.completion_tokens}") print(f"usage.total_tokens: {result.usage.total_tokens}") - # [END chat-completion] + # [END chat_completions] if __name__ == "__main__": - sample_chat_completion() + sample_chat_completions() diff --git a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py new file mode 100644 index 000000000000..620e416f4d3d --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py @@ -0,0 +1,92 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to get embeddings for a list of sentences using a synchronous client. + +USAGE: + python sample_embeddings.py + + Set these two environment variables before running the sample: + 1) MODEL_ENDPOINT - Your endpoint URL, in the form https://..inference.ai.azure.com + where `deployment-name` is your unique AI Model deployment name, and + `azure-region` is the Azure region where your model is deployed. + 2) MODEL_KEY - Your model key (a 32-character string). Keep it secret. +""" + + +def sample_embeddings(): + import os + + from azure.ai.inference import ModelClient + from azure.ai.inference.models import EmbeddingsOptions + from azure.core.credentials import AzureKeyCredential + + # [START logging] + import sys + import logging + + # Acquire the logger for this client library. Use 'azure' to affect both + # 'azure.core` and `azure.ai.vision.imageanalysis' libraries. + logger = logging.getLogger("azure") + + # Set the desired logging level. logging.INFO or logging.DEBUG are good options. + logger.setLevel(logging.DEBUG) + + # Direct logging output to stdout (the default): + handler = logging.StreamHandler(stream=sys.stdout) + # Or direct logging output to a file: + # handler = logging.FileHandler(filename = 'sample.log') + logger.addHandler(handler) + + # Optional: change the default logging format. Here we add a timestamp. + formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s") + handler.setFormatter(formatter) + # [END logging] + + # Read the values of your model endpoint and key from environment variables + try: + endpoint = os.environ["MODEL_ENDPOINT"] + key = os.environ["MODEL_KEY"] + except KeyError: + print("Missing environment variable 'MODEL_ENDPOINT' or 'MODEL_KEY'") + print("Set them before running this sample.") + exit() + + # Create an Model for synchronous operations + client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential("key")) + + # [START embeddings] + # Do a single embeddings operation. This will be a synchronously (blocking) call. + result = client.get_embeddings( + embeddings_options=EmbeddingsOptions( + input=[ + "first sentence", + "second sentence", + "third sentence" + ] + ) + ) + + # Print results the the console + print("Embeddings result:") + for index, item in enumerate(result.data): + len=item.embedding.__len__() + print(f"data[{index}].index: {item.index}") + print(f"data[{index}].embedding[0]: {item.embedding[0]}") + print(f"data[{index}].embedding[1]: {item.embedding[1]}") + print("...") + print(f"data[{index}].embedding[{len-2}]: {item.embedding[len-2]}") + print(f"data[{index}].embedding[{len-1}]: {item.embedding[len-1]}") + print(f"id: {result.id}") + print(f"model: {result.model}") + print(f"object: {result.object}") + print(f"usage.prompt_tokens: {result.usage.prompt_tokens}") + print(f"usage.total_tokens: {result.usage.total_tokens}") + # [END embeddings] + + +if __name__ == "__main__": + sample_embeddings() diff --git a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py index 272e67fdcf88..00ccc33c41df 100644 --- a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py +++ b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py @@ -40,7 +40,7 @@ # The test class name needs to start with "Test" to get collected by pytest -class ModelInferenceTestBase(AzureRecordedTestCase): +class ModelClientTestBase(AzureRecordedTestCase): client: sdk.ModelClient async_client: async_sdk.ModelClient @@ -49,19 +49,16 @@ class ModelInferenceTestBase(AzureRecordedTestCase): # Set to True to print out all analysis results PRINT_CHAT_COMPLETION_RESULTS = True - def _create_client_for_standard_test(self, sync: bool, get_connection_url: bool = False, **kwargs): endpoint = kwargs.pop("model_endpoint") key = kwargs.pop("model_key") self._create_client(endpoint, key, sync, get_connection_url) - def _create_client_for_authentication_failure(self, sync: bool, **kwargs): endpoint = kwargs.pop("model_endpoint") key = "00000000000000000000000000000000" self._create_client(endpoint, key, sync, False) - def _create_client(self, endpoint: str, key: str, sync: bool, get_connection_url: bool): credential = AzureKeyCredential(key) if sync: @@ -81,12 +78,10 @@ def _create_client(self, endpoint: str, key: str, sync: bool, get_connection_url ) assert self.async_client is not None - def _raw_request_check(self, request: PipelineRequest): self.connection_url = request.http_request.url print(f"Connection URL: {request.http_request.url}") - def _do_chat_completions( self, options: sdk.models.ChatCompletionsOptions, @@ -97,16 +92,15 @@ def _do_chat_completions( result = self.client.get_chat_completions(chat_completions_options=options, params=query_params) # Optional: console printout of all results - if ModelInferenceTestBase.PRINT_CHAT_COMPLETION_RESULTS: - ModelInferenceTestBase._print_chat_completions_results(result) + if ModelClientTestBase.PRINT_CHAT_COMPLETION_RESULTS: + ModelClientTestBase._print_chat_completions_results(result) # Validate all results - ModelInferenceTestBase._validate_chat_completions_results(result) + ModelClientTestBase._validate_chat_completions_results(result) # Validate that additional query parameters exists in the connection URL, if specify if query_params is not None: - ModelInferenceTestBase._validate_query_parameters(query_params, self.connection_url) - + ModelClientTestBase._validate_query_parameters(query_params, self.connection_url) async def _do_async_chat_completions( self, @@ -128,16 +122,15 @@ async def _do_async_chat_completions( result = future.result() # Optional: console printout of all results - if ModelInferenceTestBase.PRINT_CHAT_COMPLETION_RESULTS: - ModelInferenceTestBase._print_chat_completions_results(result) + if ModelClientTestBase.PRINT_CHAT_COMPLETION_RESULTS: + ModelClientTestBase._print_chat_completions_results(result) # Validate all results - ModelInferenceTestBase._validate_chat_completions_results(result) + ModelClientTestBase._validate_chat_completions_results(result) # Validate that additional query parameters exists in the connection URL, if specify if query_params is not None: - ModelInferenceTestBase._validate_query_parameters(query_params, self.connection_url) - + ModelClientTestBase._validate_query_parameters(query_params, self.connection_url) def _do_chat_completion_with_error( self, @@ -158,7 +151,6 @@ def _do_chat_completion_with_error( return assert False # We should not get here - async def _do_async_chat_completion_with_error( self, options: sdk.models.ChatCompletionsOptions, @@ -168,7 +160,7 @@ async def _do_async_chat_completion_with_error( ): try: - result = await self.async_client.get_chat_completions(chat_completions_options=options) + result = await self.async_client.get_chat_completions(chat_completions_options=options) except AzureError as e: print(e) @@ -178,7 +170,6 @@ async def _do_async_chat_completion_with_error( return assert False # We should not get here - @staticmethod def _validate_query_parameters(query_params: dict, connection_url: str): assert len(query_params) > 0 @@ -188,7 +179,6 @@ def _validate_query_parameters(query_params: dict, connection_url: str): query_string = "?" + query_string[1:] assert query_string in connection_url - @staticmethod def _validate_chat_completions_results(result: sdk.models.ChatCompletions): @@ -206,8 +196,7 @@ def _validate_chat_completions_results(result: sdk.models.ChatCompletions): assert result.object == "chat.completion" assert result.usage.prompt_tokens > 0 assert result.usage.completion_tokens > 0 - assert result.usage.total_tokens == result.usage.prompt_tokens + result.usage.completion_tokens - + assert result.usage.total_tokens == result.usage.prompt_tokens + result.usage.completion_tokens @staticmethod def _print_chat_completions_results(result: sdk.models.ChatCompletions): diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py index e5a33c3c8210..93920cba1659 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -5,11 +5,11 @@ import inspect import azure.ai.inference as sdk -from model_inference_test_base import ModelInferenceTestBase, ServicePreparer +from model_inference_test_base import ModelClientTestBase, ServicePreparer from devtools_testutils.aio import recorded_by_proxy_async # The test class name needs to start with "Test" to get collected by pytest -class TestImageAnalysisAsyncClient(ModelInferenceTestBase): +class TestImageAnalysisAsyncClient(ModelClientTestBase): # ********************************************************************************** # @@ -25,21 +25,16 @@ async def test_async_chat_completion(self, **kwargs): self._create_client_for_standard_test(sync=False, **kwargs) options = sdk.models.ChatCompletionsOptions( - messages=[ - sdk.models.ChatRequestUserMessage( - content="How many feet are in a mile?" - ) - ] + messages=[sdk.models.ChatRequestUserMessage(content="How many feet are in a mile?")] ) - await self._do_async_chat_completions( - options=options, - **kwargs - ) + await self._do_async_chat_completions(options=options, **kwargs) await self.async_client.close() # Test some visual features, one after the other, from image URL, with relevant settings specified + + """ @ServicePreparer() @recorded_by_proxy_async async def test_analyze_async_single_feature_from_url(self, **kwargs): @@ -70,11 +65,11 @@ async def test_analyze_async_single_feature_from_url(self, **kwargs): await self.async_client.close() """ - # ********************************************************************************** - # - # ERROR TESTS - # - # ********************************************************************************** +# ********************************************************************************** +# +# ERROR TESTS +# +# ********************************************************************************** """ @ServicePreparer() @recorded_by_proxy_async diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py index 631eeb3ed90b..2d71e101ae83 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -5,12 +5,12 @@ import inspect import azure.ai.inference as sdk -from model_inference_test_base import ModelInferenceTestBase, ServicePreparer +from model_inference_test_base import ModelClientTestBase, ServicePreparer from devtools_testutils import recorded_by_proxy # The test class name needs to start with "Test" to get collected by pytest -class TestModelClient(ModelInferenceTestBase): +class TestModelClient(ModelClientTestBase): # ********************************************************************************** # @@ -26,21 +26,16 @@ def test_chat_completion(self, **kwargs): self._create_client_for_standard_test(sync=True, **kwargs) options = sdk.models.ChatCompletionsOptions( - messages=[ - sdk.models.ChatRequestUserMessage( - content="How many feet are in a mile?" - ) - ] + messages=[sdk.models.ChatRequestUserMessage(content="How many feet are in a mile?")] ) - self._do_chat_completions( - options=options, - **kwargs - ) + self._do_chat_completions(options=options, **kwargs) self.client.close() # Test some visual features, one after the other, from file, using default settings + + """ @ServicePreparer() @recorded_by_proxy def test_analyze_sync_single_feature_from_file(self, **kwargs): @@ -60,11 +55,11 @@ def test_analyze_sync_single_feature_from_file(self, **kwargs): self.client.close() """ - # ********************************************************************************** - # - # ERROR TESTS - # - # ********************************************************************************** +# ********************************************************************************** +# +# ERROR TESTS +# +# ********************************************************************************** """ @ServicePreparer() @recorded_by_proxy @@ -81,4 +76,4 @@ def test_analyze_sync_image_url_does_not_exist(self, **kwargs): ) self.client.close() - """ \ No newline at end of file + """ diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml index f17a49413995..bfe53bbb261b 100644 --- a/sdk/ai/azure-ai-inference/tsp-location.yaml +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -1,4 +1,4 @@ -directory: specification/ai/ModelInference -commit: a5b5fe9e6c1451c8f33cbcc52291204ed62e6a26 +directory: specification/ai/ModelClient +commit: 7f2f7cc612c8b5950b600e14a37e99db491ce15d repo: Azure/azure-rest-api-specs additionalDirectories: From fff460ead5253f8c6e66996cd74dca33369eb6ee Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 1 Apr 2024 17:30:04 -0700 Subject: [PATCH 011/112] Fix typo --- sdk/ai/azure-ai-inference/README.md | 2 +- sdk/ai/azure-ai-inference/samples/sample_chat_completions.py | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 938d18c0be8a..c202d839ef6c 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -22,7 +22,7 @@ Note that for inference of OpenAI models hosted on azure you should be using the * [Python 3.8](https://www.python.org/) or later installed, including [pip](https://pip.pypa.io/en/stable/). * An [Azure subscription](https://azure.microsoft.com/free). -* A [TBD resouce](https://azure.microsoft.com/) in your Azure subscription. You will need the key and endpoint from this resource to authenticate against the service. +* A [TBD resource](https://azure.microsoft.com/) in your Azure subscription. You will need the key and endpoint from this resource to authenticate against the service. ### Install the Model Client package diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index b1d433415b37..4f83744de06d 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -34,7 +34,10 @@ def sample_chat_completions(): exit() # Create Model Client for synchronous operations - client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + client = ModelClient( + endpoint=endpoint, + credential=AzureKeyCredential(key) + ) # [END create_client] # [START chat_completions] From 9d171a70069c7695206016c0eae7539f774eac73 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 1 Apr 2024 18:17:48 -0700 Subject: [PATCH 012/112] After re-emit using flat input arguments --- .../ai/inference/_operations/_operations.py | 397 ++++++++++++++++-- .../inference/aio/_operations/_operations.py | 375 +++++++++++++++-- .../azure/ai/inference/models/__init__.py | 6 +- .../azure/ai/inference/models/_enums.py | 15 + .../azure/ai/inference/models/_models.py | 199 +-------- .../sample_chat_completions_async.py | 12 +- .../async_samples/sample_embeddings_async.py | 18 +- .../samples/sample_chat_completions.py | 12 +- .../samples/sample_embeddings.py | 25 +- .../tests/model_inference_test_base.py | 12 +- .../test_model_inference_async_client.py | 6 +- .../tests/test_model_inference_client.py | 6 +- sdk/ai/azure-ai-inference/tsp-location.yaml | 2 +- 13 files changed, 756 insertions(+), 329 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index 3ba8edf751b0..3d0514ad0c6d 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -9,7 +9,7 @@ from io import IOBase import json import sys -from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload +from typing import Any, Callable, Dict, IO, List, Optional, TypeVar, Union, overload from azure.core.exceptions import ( ClientAuthenticationError, @@ -34,6 +34,7 @@ else: from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] @@ -41,7 +42,12 @@ _SERIALIZER.client_side_validation = False -def build_model_get_chat_completions_request(**kwargs: Any) -> HttpRequest: +def build_model_get_chat_completions_request( + *, + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -56,6 +62,10 @@ def build_model_get_chat_completions_request(**kwargs: Any) -> HttpRequest: _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers + if unknown_parameters is not None: + _headers["unknown-parameters"] = _SERIALIZER.header("unknown_parameters", unknown_parameters, "str") + if model_deployment is not None: + _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployment", model_deployment, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -63,7 +73,12 @@ def build_model_get_chat_completions_request(**kwargs: Any) -> HttpRequest: return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_model_get_embeddings_request(**kwargs: Any) -> HttpRequest: +def build_model_get_embeddings_request( + *, + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -78,6 +93,10 @@ def build_model_get_embeddings_request(**kwargs: Any) -> HttpRequest: _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers + if unknown_parameters is not None: + _headers["unknown-parameters"] = _SERIALIZER.header("unknown_parameters", unknown_parameters, "str") + if model_deployment is not None: + _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployment", model_deployment, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -89,8 +108,10 @@ class ModelClientOperationsMixin(ModelClientMixinABC): @overload def get_chat_completions( self, - chat_completions_options: _models.ChatCompletionsOptions, + body: JSON, *, + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: @@ -100,8 +121,17 @@ def get_chat_completions( "completes" provided prompt data. - :param chat_completions_options: The JSON payload containing chat completion options. Required. - :type chat_completions_options: ~azure.ai.inference.models.ChatCompletionsOptions + :param body: Required. + :type body: JSON + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -126,7 +156,7 @@ def get_chat_completions( } # JSON input template you can fill out and use as your body input. - chat_completions_options = { + body = { "messages": [ chat_request_message ], @@ -215,7 +245,26 @@ def get_chat_completions( @overload def get_chat_completions( - self, chat_completions_options: JSON, *, content_type: str = "application/json", **kwargs: Any + self, + *, + messages: List[_models.ChatRequestMessage], + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + frequency_penalty: Optional[float] = None, + presence_penalty: Optional[float] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_tokens: Optional[int] = None, + response_format: Optional[_models.ChatCompletionsResponseFormat] = None, + stop: Optional[List[str]] = None, + stream_parameter: Optional[bool] = None, + tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, + tool_choice: Optional[ + Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] + ] = None, + seed: Optional[int] = None, + **kwargs: Any ) -> _models.ChatCompletions: # pylint: disable=line-too-long """Gets chat completions for the provided chat messages. @@ -223,11 +272,80 @@ def get_chat_completions( "completes" provided prompt data. - :param chat_completions_options: The JSON payload containing chat completion options. Required. - :type chat_completions_options: JSON + :keyword messages: The collection of context messages associated with this chat completions + request. + Typical usage begins with a chat message for the System role that provides instructions for + the behavior of the assistant, followed by alternating messages between the User and + Assistant roles. Required. + :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str + :keyword frequency_penalty: A value that influences the probability of generated tokens + appearing based on their cumulative + frequency in generated text. + Positive values will make tokens less likely to appear as their frequency increases and + decrease the likelihood of the model repeating the same statements verbatim. Default value is + None. + :paramtype frequency_penalty: float + :keyword presence_penalty: A value that influences the probability of generated tokens + appearing based on their existing + presence in generated text. + Positive values will make tokens less likely to appear when they already exist and increase + the + model's likelihood to output new topics. Default value is None. + :paramtype presence_penalty: float + :keyword temperature: The sampling temperature to use that controls the apparent creativity of + generated completions. + Higher values will make output more random while lower values will make results more focused + and deterministic. + It is not recommended to modify temperature and top_p for the same completions request as the + interaction of these two settings is difficult to predict. Default value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature called nucleus sampling. This value + causes the + model to consider the results of tokens with the provided probability mass. As an example, a + value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be + considered. + It is not recommended to modify temperature and top_p for the same completions request as the + interaction of these two settings is difficult to predict. Default value is None. + :paramtype top_p: float + :keyword max_tokens: The maximum number of tokens to generate. Default value is None. + :paramtype max_tokens: int + :keyword response_format: An object specifying the format that the model must output. Used to + enable JSON mode. Default value is None. + :paramtype response_format: ~azure.ai.inference.models.ChatCompletionsResponseFormat + :keyword stop: A collection of textual sequences that will end completions generation. Default + value is None. + :paramtype stop: list[str] + :keyword stream_parameter: A value indicating whether chat completions should be streamed for + this request. Default value is None. + :paramtype stream_parameter: bool + :keyword tools: The available tool definitions that the chat completions request can use, + including caller-defined functions. Default value is None. + :paramtype tools: list[~azure.ai.inference.models.ChatCompletionsToolDefinition] + :keyword tool_choice: If specified, the model will configure which of the provided tools it can + use for the chat completions response. Is either a Union[str, + "_models.ChatCompletionsToolSelectionPreset"] type or a ChatCompletionsNamedToolSelection type. + Default value is None. + :paramtype tool_choice: str or ~azure.ai.inference.models.ChatCompletionsToolSelectionPreset or + ~azure.ai.inference.models.ChatCompletionsNamedToolSelection + :keyword seed: If specified, the system will make a best effort to sample deterministically + such that repeated requests with the + same seed and parameters should return the same result. Determinism is not guaranteed, and you + should refer to the + system_fingerprint response parameter to monitor changes in the backend.". Default value is + None. + :paramtype seed: int :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping :rtype: ~azure.ai.inference.models.ChatCompletions :raises ~azure.core.exceptions.HttpResponseError: @@ -277,7 +395,13 @@ def get_chat_completions( @overload def get_chat_completions( - self, chat_completions_options: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + self, + body: IO[bytes], + *, + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any ) -> _models.ChatCompletions: # pylint: disable=line-too-long """Gets chat completions for the provided chat messages. @@ -285,8 +409,17 @@ def get_chat_completions( "completes" provided prompt data. - :param chat_completions_options: The JSON payload containing chat completion options. Required. - :type chat_completions_options: IO[bytes] + :param body: Required. + :type body: IO[bytes] + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -339,7 +472,26 @@ def get_chat_completions( @distributed_trace def get_chat_completions( - self, chat_completions_options: Union[_models.ChatCompletionsOptions, JSON, IO[bytes]], **kwargs: Any + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + messages: List[_models.ChatRequestMessage] = _Unset, + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + frequency_penalty: Optional[float] = None, + presence_penalty: Optional[float] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_tokens: Optional[int] = None, + response_format: Optional[_models.ChatCompletionsResponseFormat] = None, + stop: Optional[List[str]] = None, + stream_parameter: Optional[bool] = None, + tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, + tool_choice: Optional[ + Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] + ] = None, + seed: Optional[int] = None, + **kwargs: Any ) -> _models.ChatCompletions: # pylint: disable=line-too-long """Gets chat completions for the provided chat messages. @@ -347,10 +499,79 @@ def get_chat_completions( "completes" provided prompt data. - :param chat_completions_options: The JSON payload containing chat completion options. Is one of - the following types: ChatCompletionsOptions, JSON, IO[bytes] Required. - :type chat_completions_options: ~azure.ai.inference.models.ChatCompletionsOptions or JSON or - IO[bytes] + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword messages: The collection of context messages associated with this chat completions + request. + Typical usage begins with a chat message for the System role that provides instructions for + the behavior of the assistant, followed by alternating messages between the User and + Assistant roles. Required. + :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword frequency_penalty: A value that influences the probability of generated tokens + appearing based on their cumulative + frequency in generated text. + Positive values will make tokens less likely to appear as their frequency increases and + decrease the likelihood of the model repeating the same statements verbatim. Default value is + None. + :paramtype frequency_penalty: float + :keyword presence_penalty: A value that influences the probability of generated tokens + appearing based on their existing + presence in generated text. + Positive values will make tokens less likely to appear when they already exist and increase + the + model's likelihood to output new topics. Default value is None. + :paramtype presence_penalty: float + :keyword temperature: The sampling temperature to use that controls the apparent creativity of + generated completions. + Higher values will make output more random while lower values will make results more focused + and deterministic. + It is not recommended to modify temperature and top_p for the same completions request as the + interaction of these two settings is difficult to predict. Default value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature called nucleus sampling. This value + causes the + model to consider the results of tokens with the provided probability mass. As an example, a + value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be + considered. + It is not recommended to modify temperature and top_p for the same completions request as the + interaction of these two settings is difficult to predict. Default value is None. + :paramtype top_p: float + :keyword max_tokens: The maximum number of tokens to generate. Default value is None. + :paramtype max_tokens: int + :keyword response_format: An object specifying the format that the model must output. Used to + enable JSON mode. Default value is None. + :paramtype response_format: ~azure.ai.inference.models.ChatCompletionsResponseFormat + :keyword stop: A collection of textual sequences that will end completions generation. Default + value is None. + :paramtype stop: list[str] + :keyword stream_parameter: A value indicating whether chat completions should be streamed for + this request. Default value is None. + :paramtype stream_parameter: bool + :keyword tools: The available tool definitions that the chat completions request can use, + including caller-defined functions. Default value is None. + :paramtype tools: list[~azure.ai.inference.models.ChatCompletionsToolDefinition] + :keyword tool_choice: If specified, the model will configure which of the provided tools it can + use for the chat completions response. Is either a Union[str, + "_models.ChatCompletionsToolSelectionPreset"] type or a ChatCompletionsNamedToolSelection type. + Default value is None. + :paramtype tool_choice: str or ~azure.ai.inference.models.ChatCompletionsToolSelectionPreset or + ~azure.ai.inference.models.ChatCompletionsNamedToolSelection + :keyword seed: If specified, the system will make a best effort to sample deterministically + such that repeated requests with the + same seed and parameters should return the same result. Determinism is not guaranteed, and you + should refer to the + system_fingerprint response parameter to monitor changes in the backend.". Default value is + None. + :paramtype seed: int :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping :rtype: ~azure.ai.inference.models.ChatCompletions :raises ~azure.core.exceptions.HttpResponseError: @@ -372,7 +593,7 @@ def get_chat_completions( } # JSON input template you can fill out and use as your body input. - chat_completions_options = { + body = { "messages": [ chat_request_message ], @@ -472,14 +693,34 @@ def get_chat_completions( content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.ChatCompletions] = kwargs.pop("cls", None) + if body is _Unset: + if messages is _Unset: + raise TypeError("missing required argument: messages") + body = { + "frequency_penalty": frequency_penalty, + "max_tokens": max_tokens, + "messages": messages, + "presence_penalty": presence_penalty, + "response_format": response_format, + "seed": seed, + "stop": stop, + "stream": stream_parameter, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None - if isinstance(chat_completions_options, (IOBase, bytes)): - _content = chat_completions_options + if isinstance(body, (IOBase, bytes)): + _content = body else: - _content = json.dumps(chat_completions_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_model_get_chat_completions_request( + unknown_parameters=unknown_parameters, + model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -513,13 +754,28 @@ def get_chat_completions( @overload def get_embeddings( - self, embeddings_options: _models.EmbeddingsOptions, *, content_type: str = "application/json", **kwargs: Any + self, + body: JSON, + *, + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long """Return the embeddings for a given prompt. - :param embeddings_options: The JSON payload containing embedding options. Required. - :type embeddings_options: ~azure.ai.inference.models.EmbeddingsOptions + :param body: Required. + :type body: JSON + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -531,7 +787,7 @@ def get_embeddings( .. code-block:: python # JSON input template you can fill out and use as your body input. - embeddings_options = { + body = { "input": [ "str" # Input texts to get embeddings for, encoded as a an array of strings. Required. @@ -570,16 +826,35 @@ def get_embeddings( @overload def get_embeddings( - self, embeddings_options: JSON, *, content_type: str = "application/json", **kwargs: Any + self, + *, + input: List[str], + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, + **kwargs: Any ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long """Return the embeddings for a given prompt. - :param embeddings_options: The JSON payload containing embedding options. Required. - :type embeddings_options: JSON + :keyword input: Input texts to get embeddings for, encoded as a an array of strings. Required. + :paramtype input: list[str] + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str + :keyword input_type: Specifies the input type to use for embedding search. Known values are: + "text", "query", and "document". Default value is None. + :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping :rtype: ~azure.ai.inference.models.EmbeddingsResult :raises ~azure.core.exceptions.HttpResponseError: @@ -617,13 +892,28 @@ def get_embeddings( @overload def get_embeddings( - self, embeddings_options: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + self, + body: IO[bytes], + *, + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long """Return the embeddings for a given prompt. - :param embeddings_options: The JSON payload containing embedding options. Required. - :type embeddings_options: IO[bytes] + :param body: Required. + :type body: IO[bytes] + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -664,14 +954,34 @@ def get_embeddings( @distributed_trace def get_embeddings( - self, embeddings_options: Union[_models.EmbeddingsOptions, JSON, IO[bytes]], **kwargs: Any + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + input: List[str] = _Unset, + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, + **kwargs: Any ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long """Return the embeddings for a given prompt. - :param embeddings_options: The JSON payload containing embedding options. Is one of the - following types: EmbeddingsOptions, JSON, IO[bytes] Required. - :type embeddings_options: ~azure.ai.inference.models.EmbeddingsOptions or JSON or IO[bytes] + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword input: Input texts to get embeddings for, encoded as a an array of strings. Required. + :paramtype input: list[str] + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword input_type: Specifies the input type to use for embedding search. Known values are: + "text", "query", and "document". Default value is None. + :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping :rtype: ~azure.ai.inference.models.EmbeddingsResult :raises ~azure.core.exceptions.HttpResponseError: @@ -680,7 +990,7 @@ def get_embeddings( .. code-block:: python # JSON input template you can fill out and use as your body input. - embeddings_options = { + body = { "input": [ "str" # Input texts to get embeddings for, encoded as a an array of strings. Required. @@ -730,14 +1040,21 @@ def get_embeddings( content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.EmbeddingsResult] = kwargs.pop("cls", None) + if body is _Unset: + if input is _Unset: + raise TypeError("missing required argument: input") + body = {"input": input, "input_type": input_type} + body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None - if isinstance(embeddings_options, (IOBase, bytes)): - _content = embeddings_options + if isinstance(body, (IOBase, bytes)): + _content = body else: - _content = json.dumps(embeddings_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_model_get_embeddings_request( + unknown_parameters=unknown_parameters, + model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, content=_content, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index f9413d110cdd..846a9799957b 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -9,7 +9,7 @@ from io import IOBase import json import sys -from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload +from typing import Any, Callable, Dict, IO, List, Optional, TypeVar, Union, overload from azure.core.exceptions import ( ClientAuthenticationError, @@ -34,6 +34,7 @@ else: from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] @@ -42,8 +43,10 @@ class ModelClientOperationsMixin(ModelClientMixinABC): @overload async def get_chat_completions( self, - chat_completions_options: _models.ChatCompletionsOptions, + body: JSON, *, + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: @@ -53,8 +56,17 @@ async def get_chat_completions( "completes" provided prompt data. - :param chat_completions_options: The JSON payload containing chat completion options. Required. - :type chat_completions_options: ~azure.ai.inference.models.ChatCompletionsOptions + :param body: Required. + :type body: JSON + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -79,7 +91,7 @@ async def get_chat_completions( } # JSON input template you can fill out and use as your body input. - chat_completions_options = { + body = { "messages": [ chat_request_message ], @@ -168,7 +180,26 @@ async def get_chat_completions( @overload async def get_chat_completions( - self, chat_completions_options: JSON, *, content_type: str = "application/json", **kwargs: Any + self, + *, + messages: List[_models.ChatRequestMessage], + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + frequency_penalty: Optional[float] = None, + presence_penalty: Optional[float] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_tokens: Optional[int] = None, + response_format: Optional[_models.ChatCompletionsResponseFormat] = None, + stop: Optional[List[str]] = None, + stream_parameter: Optional[bool] = None, + tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, + tool_choice: Optional[ + Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] + ] = None, + seed: Optional[int] = None, + **kwargs: Any ) -> _models.ChatCompletions: # pylint: disable=line-too-long """Gets chat completions for the provided chat messages. @@ -176,11 +207,80 @@ async def get_chat_completions( "completes" provided prompt data. - :param chat_completions_options: The JSON payload containing chat completion options. Required. - :type chat_completions_options: JSON + :keyword messages: The collection of context messages associated with this chat completions + request. + Typical usage begins with a chat message for the System role that provides instructions for + the behavior of the assistant, followed by alternating messages between the User and + Assistant roles. Required. + :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str + :keyword frequency_penalty: A value that influences the probability of generated tokens + appearing based on their cumulative + frequency in generated text. + Positive values will make tokens less likely to appear as their frequency increases and + decrease the likelihood of the model repeating the same statements verbatim. Default value is + None. + :paramtype frequency_penalty: float + :keyword presence_penalty: A value that influences the probability of generated tokens + appearing based on their existing + presence in generated text. + Positive values will make tokens less likely to appear when they already exist and increase + the + model's likelihood to output new topics. Default value is None. + :paramtype presence_penalty: float + :keyword temperature: The sampling temperature to use that controls the apparent creativity of + generated completions. + Higher values will make output more random while lower values will make results more focused + and deterministic. + It is not recommended to modify temperature and top_p for the same completions request as the + interaction of these two settings is difficult to predict. Default value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature called nucleus sampling. This value + causes the + model to consider the results of tokens with the provided probability mass. As an example, a + value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be + considered. + It is not recommended to modify temperature and top_p for the same completions request as the + interaction of these two settings is difficult to predict. Default value is None. + :paramtype top_p: float + :keyword max_tokens: The maximum number of tokens to generate. Default value is None. + :paramtype max_tokens: int + :keyword response_format: An object specifying the format that the model must output. Used to + enable JSON mode. Default value is None. + :paramtype response_format: ~azure.ai.inference.models.ChatCompletionsResponseFormat + :keyword stop: A collection of textual sequences that will end completions generation. Default + value is None. + :paramtype stop: list[str] + :keyword stream_parameter: A value indicating whether chat completions should be streamed for + this request. Default value is None. + :paramtype stream_parameter: bool + :keyword tools: The available tool definitions that the chat completions request can use, + including caller-defined functions. Default value is None. + :paramtype tools: list[~azure.ai.inference.models.ChatCompletionsToolDefinition] + :keyword tool_choice: If specified, the model will configure which of the provided tools it can + use for the chat completions response. Is either a Union[str, + "_models.ChatCompletionsToolSelectionPreset"] type or a ChatCompletionsNamedToolSelection type. + Default value is None. + :paramtype tool_choice: str or ~azure.ai.inference.models.ChatCompletionsToolSelectionPreset or + ~azure.ai.inference.models.ChatCompletionsNamedToolSelection + :keyword seed: If specified, the system will make a best effort to sample deterministically + such that repeated requests with the + same seed and parameters should return the same result. Determinism is not guaranteed, and you + should refer to the + system_fingerprint response parameter to monitor changes in the backend.". Default value is + None. + :paramtype seed: int :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping :rtype: ~azure.ai.inference.models.ChatCompletions :raises ~azure.core.exceptions.HttpResponseError: @@ -230,7 +330,13 @@ async def get_chat_completions( @overload async def get_chat_completions( - self, chat_completions_options: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + self, + body: IO[bytes], + *, + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any ) -> _models.ChatCompletions: # pylint: disable=line-too-long """Gets chat completions for the provided chat messages. @@ -238,8 +344,17 @@ async def get_chat_completions( "completes" provided prompt data. - :param chat_completions_options: The JSON payload containing chat completion options. Required. - :type chat_completions_options: IO[bytes] + :param body: Required. + :type body: IO[bytes] + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -292,7 +407,26 @@ async def get_chat_completions( @distributed_trace_async async def get_chat_completions( - self, chat_completions_options: Union[_models.ChatCompletionsOptions, JSON, IO[bytes]], **kwargs: Any + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + messages: List[_models.ChatRequestMessage] = _Unset, + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + frequency_penalty: Optional[float] = None, + presence_penalty: Optional[float] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_tokens: Optional[int] = None, + response_format: Optional[_models.ChatCompletionsResponseFormat] = None, + stop: Optional[List[str]] = None, + stream_parameter: Optional[bool] = None, + tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, + tool_choice: Optional[ + Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] + ] = None, + seed: Optional[int] = None, + **kwargs: Any ) -> _models.ChatCompletions: # pylint: disable=line-too-long """Gets chat completions for the provided chat messages. @@ -300,10 +434,79 @@ async def get_chat_completions( "completes" provided prompt data. - :param chat_completions_options: The JSON payload containing chat completion options. Is one of - the following types: ChatCompletionsOptions, JSON, IO[bytes] Required. - :type chat_completions_options: ~azure.ai.inference.models.ChatCompletionsOptions or JSON or - IO[bytes] + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword messages: The collection of context messages associated with this chat completions + request. + Typical usage begins with a chat message for the System role that provides instructions for + the behavior of the assistant, followed by alternating messages between the User and + Assistant roles. Required. + :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword frequency_penalty: A value that influences the probability of generated tokens + appearing based on their cumulative + frequency in generated text. + Positive values will make tokens less likely to appear as their frequency increases and + decrease the likelihood of the model repeating the same statements verbatim. Default value is + None. + :paramtype frequency_penalty: float + :keyword presence_penalty: A value that influences the probability of generated tokens + appearing based on their existing + presence in generated text. + Positive values will make tokens less likely to appear when they already exist and increase + the + model's likelihood to output new topics. Default value is None. + :paramtype presence_penalty: float + :keyword temperature: The sampling temperature to use that controls the apparent creativity of + generated completions. + Higher values will make output more random while lower values will make results more focused + and deterministic. + It is not recommended to modify temperature and top_p for the same completions request as the + interaction of these two settings is difficult to predict. Default value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature called nucleus sampling. This value + causes the + model to consider the results of tokens with the provided probability mass. As an example, a + value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be + considered. + It is not recommended to modify temperature and top_p for the same completions request as the + interaction of these two settings is difficult to predict. Default value is None. + :paramtype top_p: float + :keyword max_tokens: The maximum number of tokens to generate. Default value is None. + :paramtype max_tokens: int + :keyword response_format: An object specifying the format that the model must output. Used to + enable JSON mode. Default value is None. + :paramtype response_format: ~azure.ai.inference.models.ChatCompletionsResponseFormat + :keyword stop: A collection of textual sequences that will end completions generation. Default + value is None. + :paramtype stop: list[str] + :keyword stream_parameter: A value indicating whether chat completions should be streamed for + this request. Default value is None. + :paramtype stream_parameter: bool + :keyword tools: The available tool definitions that the chat completions request can use, + including caller-defined functions. Default value is None. + :paramtype tools: list[~azure.ai.inference.models.ChatCompletionsToolDefinition] + :keyword tool_choice: If specified, the model will configure which of the provided tools it can + use for the chat completions response. Is either a Union[str, + "_models.ChatCompletionsToolSelectionPreset"] type or a ChatCompletionsNamedToolSelection type. + Default value is None. + :paramtype tool_choice: str or ~azure.ai.inference.models.ChatCompletionsToolSelectionPreset or + ~azure.ai.inference.models.ChatCompletionsNamedToolSelection + :keyword seed: If specified, the system will make a best effort to sample deterministically + such that repeated requests with the + same seed and parameters should return the same result. Determinism is not guaranteed, and you + should refer to the + system_fingerprint response parameter to monitor changes in the backend.". Default value is + None. + :paramtype seed: int :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping :rtype: ~azure.ai.inference.models.ChatCompletions :raises ~azure.core.exceptions.HttpResponseError: @@ -325,7 +528,7 @@ async def get_chat_completions( } # JSON input template you can fill out and use as your body input. - chat_completions_options = { + body = { "messages": [ chat_request_message ], @@ -425,14 +628,34 @@ async def get_chat_completions( content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.ChatCompletions] = kwargs.pop("cls", None) + if body is _Unset: + if messages is _Unset: + raise TypeError("missing required argument: messages") + body = { + "frequency_penalty": frequency_penalty, + "max_tokens": max_tokens, + "messages": messages, + "presence_penalty": presence_penalty, + "response_format": response_format, + "seed": seed, + "stop": stop, + "stream": stream_parameter, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None - if isinstance(chat_completions_options, (IOBase, bytes)): - _content = chat_completions_options + if isinstance(body, (IOBase, bytes)): + _content = body else: - _content = json.dumps(chat_completions_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_model_get_chat_completions_request( + unknown_parameters=unknown_parameters, + model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -466,13 +689,28 @@ async def get_chat_completions( @overload async def get_embeddings( - self, embeddings_options: _models.EmbeddingsOptions, *, content_type: str = "application/json", **kwargs: Any + self, + body: JSON, + *, + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long """Return the embeddings for a given prompt. - :param embeddings_options: The JSON payload containing embedding options. Required. - :type embeddings_options: ~azure.ai.inference.models.EmbeddingsOptions + :param body: Required. + :type body: JSON + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -484,7 +722,7 @@ async def get_embeddings( .. code-block:: python # JSON input template you can fill out and use as your body input. - embeddings_options = { + body = { "input": [ "str" # Input texts to get embeddings for, encoded as a an array of strings. Required. @@ -523,16 +761,35 @@ async def get_embeddings( @overload async def get_embeddings( - self, embeddings_options: JSON, *, content_type: str = "application/json", **kwargs: Any + self, + *, + input: List[str], + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, + **kwargs: Any ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long """Return the embeddings for a given prompt. - :param embeddings_options: The JSON payload containing embedding options. Required. - :type embeddings_options: JSON + :keyword input: Input texts to get embeddings for, encoded as a an array of strings. Required. + :paramtype input: list[str] + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str + :keyword input_type: Specifies the input type to use for embedding search. Known values are: + "text", "query", and "document". Default value is None. + :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping :rtype: ~azure.ai.inference.models.EmbeddingsResult :raises ~azure.core.exceptions.HttpResponseError: @@ -570,13 +827,28 @@ async def get_embeddings( @overload async def get_embeddings( - self, embeddings_options: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + self, + body: IO[bytes], + *, + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long """Return the embeddings for a given prompt. - :param embeddings_options: The JSON payload containing embedding options. Required. - :type embeddings_options: IO[bytes] + :param body: Required. + :type body: IO[bytes] + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -617,14 +889,34 @@ async def get_embeddings( @distributed_trace_async async def get_embeddings( - self, embeddings_options: Union[_models.EmbeddingsOptions, JSON, IO[bytes]], **kwargs: Any + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + input: List[str] = _Unset, + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, + **kwargs: Any ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long """Return the embeddings for a given prompt. - :param embeddings_options: The JSON payload containing embedding options. Is one of the - following types: EmbeddingsOptions, JSON, IO[bytes] Required. - :type embeddings_options: ~azure.ai.inference.models.EmbeddingsOptions or JSON or IO[bytes] + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword input: Input texts to get embeddings for, encoded as a an array of strings. Required. + :paramtype input: list[str] + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword input_type: Specifies the input type to use for embedding search. Known values are: + "text", "query", and "document". Default value is None. + :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping :rtype: ~azure.ai.inference.models.EmbeddingsResult :raises ~azure.core.exceptions.HttpResponseError: @@ -633,7 +925,7 @@ async def get_embeddings( .. code-block:: python # JSON input template you can fill out and use as your body input. - embeddings_options = { + body = { "input": [ "str" # Input texts to get embeddings for, encoded as a an array of strings. Required. @@ -683,14 +975,21 @@ async def get_embeddings( content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.EmbeddingsResult] = kwargs.pop("cls", None) + if body is _Unset: + if input is _Unset: + raise TypeError("missing required argument: input") + body = {"input": input, "input_type": input_type} + body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None - if isinstance(embeddings_options, (IOBase, bytes)): - _content = embeddings_options + if isinstance(body, (IOBase, bytes)): + _content = body else: - _content = json.dumps(embeddings_options, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_model_get_embeddings_request( + unknown_parameters=unknown_parameters, + model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, content=_content, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py index bcb20736c6f1..fdfdb72b38a0 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py @@ -12,7 +12,6 @@ from ._models import ChatCompletionsFunctionToolDefinition from ._models import ChatCompletionsJsonResponseFormat from ._models import ChatCompletionsNamedToolSelection -from ._models import ChatCompletionsOptions from ._models import ChatCompletionsResponseFormat from ._models import ChatCompletionsTextResponseFormat from ._models import ChatCompletionsToolCall @@ -25,7 +24,6 @@ from ._models import ChatResponseMessage from ._models import CompletionsUsage from ._models import EmbeddingItem -from ._models import EmbeddingsOptions from ._models import EmbeddingsResult from ._models import EmbeddingsUsage from ._models import FunctionCall @@ -35,6 +33,7 @@ from ._enums import ChatRole from ._enums import CompletionsFinishReason from ._enums import EmbeddingInputType +from ._enums import UnknownParameters from ._patch import __all__ as _patch_all from ._patch import * # pylint: disable=unused-wildcard-import from ._patch import patch_sdk as _patch_sdk @@ -46,7 +45,6 @@ "ChatCompletionsFunctionToolDefinition", "ChatCompletionsJsonResponseFormat", "ChatCompletionsNamedToolSelection", - "ChatCompletionsOptions", "ChatCompletionsResponseFormat", "ChatCompletionsTextResponseFormat", "ChatCompletionsToolCall", @@ -59,7 +57,6 @@ "ChatResponseMessage", "CompletionsUsage", "EmbeddingItem", - "EmbeddingsOptions", "EmbeddingsResult", "EmbeddingsUsage", "FunctionCall", @@ -68,6 +65,7 @@ "ChatRole", "CompletionsFinishReason", "EmbeddingInputType", + "UnknownParameters", ] __all__.extend([p for p in _patch_all if p not in __all__]) _patch_sdk() diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py index 6b9ea49109e9..5c3005503f23 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py @@ -58,3 +58,18 @@ class EmbeddingInputType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """to do""" DOCUMENT = "document" """to do""" + + +class UnknownParameters(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Defines the service behavior when unknown parameters are passed as extra properties in the + request payload. + """ + + ERROR = "error" + """The service should error when it sees unknown parameters in the request payload. This is the + default behavior if the service.""" + IGNORE = "ignore" + """The service should ignore unknown parameters in the request payload. They will not be passed to + the back-end AI model.""" + ALLOW = "allow" + """The service should pass unknown parameters to the back-end AI model.""" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py index f449f53d7fe9..6d94bb7efbeb 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py @@ -8,15 +8,22 @@ # -------------------------------------------------------------------------- import datetime +import sys from typing import Any, Dict, List, Literal, Mapping, Optional, TYPE_CHECKING, Union, overload from .. import _model_base from .._model_base import rest_discriminator, rest_field from ._enums import ChatRole +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports + if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from .. import models as _models +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object class ChatChoice(_model_base.Model): @@ -354,157 +361,6 @@ class ChatCompletionsNamedToolSelection(_model_base.Model): """The object type. Required.""" -class ChatCompletionsOptions(_model_base.Model): # pylint: disable=too-many-instance-attributes - """The configuration information for a chat completions request. - Completions support a wide variety of tasks and generate text that continues from or - "completes" - provided prompt data. - - All required parameters must be populated in order to send to server. - - :ivar messages: The collection of context messages associated with this chat completions - request. - Typical usage begins with a chat message for the System role that provides instructions for - the behavior of the assistant, followed by alternating messages between the User and - Assistant roles. Required. - :vartype messages: list[~azure.ai.inference.models.ChatRequestMessage] - :ivar frequency_penalty: A value that influences the probability of generated tokens appearing - based on their cumulative - frequency in generated text. - Positive values will make tokens less likely to appear as their frequency increases and - decrease the likelihood of the model repeating the same statements verbatim. - :vartype frequency_penalty: float - :ivar presence_penalty: A value that influences the probability of generated tokens appearing - based on their existing - presence in generated text. - Positive values will make tokens less likely to appear when they already exist and increase - the - model's likelihood to output new topics. - :vartype presence_penalty: float - :ivar temperature: The sampling temperature to use that controls the apparent creativity of - generated completions. - Higher values will make output more random while lower values will make results more focused - and deterministic. - It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict. - :vartype temperature: float - :ivar top_p: An alternative to sampling with temperature called nucleus sampling. This value - causes the - model to consider the results of tokens with the provided probability mass. As an example, a - value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be - considered. - It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict. - :vartype top_p: float - :ivar max_tokens: The maximum number of tokens to generate. - :vartype max_tokens: int - :ivar response_format: An object specifying the format that the model must output. Used to - enable JSON mode. - :vartype response_format: ~azure.ai.inference.models.ChatCompletionsResponseFormat - :ivar stop: A collection of textual sequences that will end completions generation. - :vartype stop: list[str] - :ivar stream: A value indicating whether chat completions should be streamed for this request. - :vartype stream: bool - :ivar tools: The available tool definitions that the chat completions request can use, - including caller-defined functions. - :vartype tools: list[~azure.ai.inference.models.ChatCompletionsToolDefinition] - :ivar tool_choice: If specified, the model will configure which of the provided tools it can - use for the chat completions response. Is either a Union[str, - "_models.ChatCompletionsToolSelectionPreset"] type or a ChatCompletionsNamedToolSelection type. - :vartype tool_choice: str or ~azure.ai.inference.models.ChatCompletionsToolSelectionPreset or - ~azure.ai.inference.models.ChatCompletionsNamedToolSelection - :ivar seed: If specified, the system will make a best effort to sample deterministically such - that repeated requests with the - same seed and parameters should return the same result. Determinism is not guaranteed, and you - should refer to the - system_fingerprint response parameter to monitor changes in the backend.". - :vartype seed: int - """ - - messages: List["_models.ChatRequestMessage"] = rest_field() - """The collection of context messages associated with this chat completions request. - Typical usage begins with a chat message for the System role that provides instructions for - the behavior of the assistant, followed by alternating messages between the User and - Assistant roles. Required.""" - frequency_penalty: Optional[float] = rest_field() - """A value that influences the probability of generated tokens appearing based on their cumulative - frequency in generated text. - Positive values will make tokens less likely to appear as their frequency increases and - decrease the likelihood of the model repeating the same statements verbatim.""" - presence_penalty: Optional[float] = rest_field() - """A value that influences the probability of generated tokens appearing based on their existing - presence in generated text. - Positive values will make tokens less likely to appear when they already exist and increase the - model's likelihood to output new topics.""" - temperature: Optional[float] = rest_field() - """The sampling temperature to use that controls the apparent creativity of generated completions. - Higher values will make output more random while lower values will make results more focused - and deterministic. - It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict.""" - top_p: Optional[float] = rest_field() - """An alternative to sampling with temperature called nucleus sampling. This value causes the - model to consider the results of tokens with the provided probability mass. As an example, a - value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be - considered. - It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict.""" - max_tokens: Optional[int] = rest_field() - """The maximum number of tokens to generate.""" - response_format: Optional["_models.ChatCompletionsResponseFormat"] = rest_field() - """An object specifying the format that the model must output. Used to enable JSON mode.""" - stop: Optional[List[str]] = rest_field() - """A collection of textual sequences that will end completions generation.""" - stream: Optional[bool] = rest_field() - """A value indicating whether chat completions should be streamed for this request.""" - tools: Optional[List["_models.ChatCompletionsToolDefinition"]] = rest_field() - """The available tool definitions that the chat completions request can use, including - caller-defined functions.""" - tool_choice: Optional[ - Union[str, "_models.ChatCompletionsToolSelectionPreset", "_models.ChatCompletionsNamedToolSelection"] - ] = rest_field() - """If specified, the model will configure which of the provided tools it can use for the chat - completions response. Is either a Union[str, \"_models.ChatCompletionsToolSelectionPreset\"] - type or a ChatCompletionsNamedToolSelection type.""" - seed: Optional[int] = rest_field() - """If specified, the system will make a best effort to sample deterministically such that repeated - requests with the - same seed and parameters should return the same result. Determinism is not guaranteed, and you - should refer to the - system_fingerprint response parameter to monitor changes in the backend.\".""" - - @overload - def __init__( - self, - *, - messages: List["_models.ChatRequestMessage"], - frequency_penalty: Optional[float] = None, - presence_penalty: Optional[float] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_tokens: Optional[int] = None, - response_format: Optional["_models.ChatCompletionsResponseFormat"] = None, - stop: Optional[List[str]] = None, - stream: Optional[bool] = None, - tools: Optional[List["_models.ChatCompletionsToolDefinition"]] = None, - tool_choice: Optional[ - Union[str, "_models.ChatCompletionsToolSelectionPreset", "_models.ChatCompletionsNamedToolSelection"] - ] = None, - seed: Optional[int] = None, - ): - ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - class ChatCompletionsTextResponseFormat(ChatCompletionsResponseFormat, discriminator="text"): """The standard Chat Completions response format that can freely generate text and is not guaranteed to produce response @@ -856,47 +712,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, **kwargs) -class EmbeddingsOptions(_model_base.Model): - """The configuration information for an embeddings request. - Embeddings measure the relatedness of text strings and are commonly used for search, - clustering, - recommendations, and other similar scenarios. - - All required parameters must be populated in order to send to server. - - :ivar input: Input texts to get embeddings for, encoded as a an array of strings. Required. - :vartype input: list[str] - :ivar input_type: Specifies the input type to use for embedding search. Known values are: - "text", "query", and "document". - :vartype input_type: str or ~azure.ai.inference.models.EmbeddingInputType - """ - - input: List[str] = rest_field() - """Input texts to get embeddings for, encoded as a an array of strings. Required.""" - input_type: Optional[Union[str, "_models.EmbeddingInputType"]] = rest_field() - """Specifies the input type to use for embedding search. Known values are: \"text\", \"query\", - and \"document\".""" - - @overload - def __init__( - self, - *, - input: List[str], - input_type: Optional[Union[str, "_models.EmbeddingInputType"]] = None, - ): - ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - class EmbeddingsResult(_model_base.Model): """Representation of the response data from an embeddings request. Embeddings measure the relatedness of text strings and are commonly used for search, diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py index 0b3d9baee130..55818705eb2a 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py @@ -21,7 +21,7 @@ async def sample_chat_completions_async(): import os from azure.ai.inference.aio import ModelClient - from azure.ai.inference.models import ChatCompletionsOptions, ChatRequestSystemMessage, ChatRequestUserMessage + from azure.ai.inference.models import ChatRequestSystemMessage, ChatRequestUserMessage from azure.core.credentials import AzureKeyCredential # Read the values of your model endpoint and key from environment variables @@ -39,12 +39,10 @@ async def sample_chat_completions_async(): # Do a single chat completion operation. Start the operation and get a Future object. future = asyncio.ensure_future( client.get_chat_completions( - chat_completions_options=ChatCompletionsOptions( - messages=[ - ChatRequestSystemMessage(content="You are an AI assistant that helps people find information."), - ChatRequestUserMessage(content="How many feet are in a mile?"), - ] - ) + messages=[ + ChatRequestSystemMessage(content="You are an AI assistant that helps people find information."), + ChatRequestUserMessage(content="How many feet are in a mile?"), + ] ) ) diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py index ea154623f92b..971631a3a54e 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py @@ -17,10 +17,10 @@ """ import asyncio + async def sample_embeddings_async(): import os from azure.ai.inference.aio import ModelClient - from azure.ai.inference.models import EmbeddingsOptions from azure.core.credentials import AzureKeyCredential # Read the values of your model endpoint and key from environment variables @@ -38,13 +38,11 @@ async def sample_embeddings_async(): # Do a single embeddings operation. Start the operation and get a Future object. future = asyncio.ensure_future( client.get_embeddings( - embeddings_options=EmbeddingsOptions( - input=[ - "first sentence", - "second sentence", - "third sentence" - ] - ) + input=[ + "first sentence", + "second sentence", + "third sentence" + ] ) ) @@ -60,8 +58,8 @@ async def sample_embeddings_async(): # Print results the the console print("Embeddings result:") for index, item in enumerate(result.data): - len=item.embedding.__len__() - print(f"data[{index}].index: {item.index}") + len = item.embedding.__len__() + print(f"data[{index}].index: {item.index}") print(f"data[{index}].embedding[0]: {item.embedding[0]}") print(f"data[{index}].embedding[1]: {item.embedding[1]}") print("...") diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index 4f83744de06d..e749965e5a86 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -21,7 +21,7 @@ def sample_chat_completions(): # [START create_client] import os from azure.ai.inference import ModelClient - from azure.ai.inference.models import ChatCompletionsOptions, ChatRequestSystemMessage, ChatRequestUserMessage + from azure.ai.inference.models import ChatRequestSystemMessage, ChatRequestUserMessage, UnknownParameters from azure.core.credentials import AzureKeyCredential # Read the values of your model endpoint and key from environment variables @@ -43,12 +43,10 @@ def sample_chat_completions(): # [START chat_completions] # Do a single chat completion operation. This will be a synchronously (blocking) call. result = client.get_chat_completions( - chat_completions_options=ChatCompletionsOptions( - messages=[ - ChatRequestSystemMessage(content="You are an AI assistant that helps people find information."), - ChatRequestUserMessage(content="How many feet are in a mile?"), - ] - ) + messages=[ + ChatRequestSystemMessage(content="You are an AI assistant that helps people find information."), + ChatRequestUserMessage(content="How many feet are in a mile?"), + ] ) # Print results the the console diff --git a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py index 620e416f4d3d..300a7b8ebff0 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py +++ b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py @@ -19,11 +19,10 @@ def sample_embeddings(): import os - + from azure.ai.inference import ModelClient - from azure.ai.inference.models import EmbeddingsOptions from azure.core.credentials import AzureKeyCredential - + # [START logging] import sys import logging @@ -45,7 +44,7 @@ def sample_embeddings(): formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s") handler.setFormatter(formatter) # [END logging] - + # Read the values of your model endpoint and key from environment variables try: endpoint = os.environ["MODEL_ENDPOINT"] @@ -61,20 +60,18 @@ def sample_embeddings(): # [START embeddings] # Do a single embeddings operation. This will be a synchronously (blocking) call. result = client.get_embeddings( - embeddings_options=EmbeddingsOptions( - input=[ - "first sentence", - "second sentence", - "third sentence" - ] - ) + input=[ + "first sentence", + "second sentence", + "third sentence" + ] ) - + # Print results the the console print("Embeddings result:") for index, item in enumerate(result.data): - len=item.embedding.__len__() - print(f"data[{index}].index: {item.index}") + len = item.embedding.__len__() + print(f"data[{index}].index: {item.index}") print(f"data[{index}].embedding[0]: {item.embedding[0]}") print(f"data[{index}].embedding[1]: {item.embedding[1]}") print("...") diff --git a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py index 00ccc33c41df..ae901860c978 100644 --- a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py +++ b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py @@ -84,12 +84,11 @@ def _raw_request_check(self, request: PipelineRequest): def _do_chat_completions( self, - options: sdk.models.ChatCompletionsOptions, query_params: Optional[dict] = None, **kwargs, ): - result = self.client.get_chat_completions(chat_completions_options=options, params=query_params) + result = self.client.get_chat_completions(messages=kwargs.get('messages'), params=query_params) # Optional: console printout of all results if ModelClientTestBase.PRINT_CHAT_COMPLETION_RESULTS: @@ -104,14 +103,13 @@ def _do_chat_completions( async def _do_async_chat_completions( self, - options: sdk.models.ChatCompletionsOptions, query_params: Optional[dict] = None, **kwargs, ): start_time = time.time() # Start the operation and get a Future object - future = asyncio.ensure_future(self.async_client.get_chat_completions(chat_completions_options=options)) + future = asyncio.ensure_future(self.async_client.get_chat_completions(messages=kwargs.get('messages'))) # Loop until the operation is done while not future.done(): @@ -134,14 +132,13 @@ async def _do_async_chat_completions( def _do_chat_completion_with_error( self, - options: sdk.models.ChatCompletionsOptions, expected_status_code: int, expected_message_contains: str, **kwargs, ): try: - result = self.client.get_chat_completions(chat_completions_options=options) + result = self.client.get_chat_completions(messages=kwargs.get('messages')) except AzureError as e: print(e) @@ -153,14 +150,13 @@ def _do_chat_completion_with_error( async def _do_async_chat_completion_with_error( self, - options: sdk.models.ChatCompletionsOptions, expected_status_code: int, expected_message_contains: str, **kwargs, ): try: - result = await self.async_client.get_chat_completions(chat_completions_options=options) + result = await self.async_client.get_chat_completions(messages=kwargs.get('messages')) except AzureError as e: print(e) diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py index 93920cba1659..fe6acdff00e9 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -24,11 +24,9 @@ async def test_async_chat_completion(self, **kwargs): self._create_client_for_standard_test(sync=False, **kwargs) - options = sdk.models.ChatCompletionsOptions( - messages=[sdk.models.ChatRequestUserMessage(content="How many feet are in a mile?")] - ) + messages=[sdk.models.ChatRequestUserMessage(content="How many feet are in a mile?")] - await self._do_async_chat_completions(options=options, **kwargs) + await self._do_async_chat_completions(messages=messages, **kwargs) await self.async_client.close() diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py index 2d71e101ae83..8bc18d43cf67 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -25,11 +25,9 @@ def test_chat_completion(self, **kwargs): self._create_client_for_standard_test(sync=True, **kwargs) - options = sdk.models.ChatCompletionsOptions( - messages=[sdk.models.ChatRequestUserMessage(content="How many feet are in a mile?")] - ) + messages=[sdk.models.ChatRequestUserMessage(content="How many feet are in a mile?")] - self._do_chat_completions(options=options, **kwargs) + self._do_chat_completions(messages=messages, **kwargs) self.client.close() diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml index bfe53bbb261b..fa8e5be0abbd 100644 --- a/sdk/ai/azure-ai-inference/tsp-location.yaml +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/ModelClient -commit: 7f2f7cc612c8b5950b600e14a37e99db491ce15d +commit: c26f993ec2271f0fabdd24905a99d2da062686dd repo: Azure/azure-rest-api-specs additionalDirectories: From 7efa8003c8b0ab59e77ec9f9c3b0d6cfd4bd17e9 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 1 Apr 2024 18:20:06 -0700 Subject: [PATCH 013/112] Update README.md code snippets --- sdk/ai/azure-ai-inference/README.md | 33 ++++++++++++++--------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index c202d839ef6c..1e8cac3db029 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -49,7 +49,7 @@ Once you define the environment variables, this Python code will create and auth ```python import os from azure.ai.inference import ModelClient -from azure.ai.inference.models import ChatCompletionsOptions, ChatRequestSystemMessage, ChatRequestUserMessage +from azure.ai.inference.models import ChatRequestSystemMessage, ChatRequestUserMessage, UnknownParameters from azure.core.credentials import AzureKeyCredential # Read the values of your model endpoint and key from environment variables @@ -62,7 +62,10 @@ except KeyError: exit() # Create Model Client for synchronous operations -client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) +client = ModelClient( + endpoint=endpoint, + credential=AzureKeyCredential(key) +) ``` @@ -115,12 +118,10 @@ This example demonstrates how to generate chat completions. ```python # Do a single chat completion operation. This will be a synchronously (blocking) call. result = client.get_chat_completions( - chat_completions_options=ChatCompletionsOptions( - messages=[ - ChatRequestSystemMessage(content="You are an AI assistant that helps people find information."), - ChatRequestUserMessage(content="How many feet are in a mile?"), - ] - ) + messages=[ + ChatRequestSystemMessage(content="You are an AI assistant that helps people find information."), + ChatRequestUserMessage(content="How many feet are in a mile?"), + ] ) # Print results the the console @@ -152,20 +153,18 @@ This example demonstrates how to get embeddings. ```python # Do a single embeddings operation. This will be a synchronously (blocking) call. result = client.get_embeddings( - embeddings_options=EmbeddingsOptions( - input=[ - "first sentence", - "second sentence", - "third sentence" - ] - ) + input=[ + "first sentence", + "second sentence", + "third sentence" + ] ) # Print results the the console print("Embeddings result:") for index, item in enumerate(result.data): - len=item.embedding.__len__() - print(f"data[{index}].index: {item.index}") + len = item.embedding.__len__() + print(f"data[{index}].index: {item.index}") print(f"data[{index}].embedding[0]: {item.embedding[0]}") print(f"data[{index}].embedding[1]: {item.embedding[1]}") print("...") From 61b62acb71fb0d4a22d9eaa846a912ab21f8b77c Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 4 Apr 2024 09:15:51 -0700 Subject: [PATCH 014/112] Re-emit --- .../ai/inference/_operations/_operations.py | 445 +++++++++++++++++- .../inference/aio/_operations/_operations.py | 420 ++++++++++++++++- .../azure/ai/inference/models/__init__.py | 10 + .../azure/ai/inference/models/_enums.py | 30 ++ .../azure/ai/inference/models/_models.py | 113 ++++- .../async_samples/sample_embeddings_async.py | 10 +- .../samples/sample_chat_completions.py | 5 +- .../samples/sample_embeddings.py | 10 +- .../tests/model_inference_test_base.py | 8 +- .../test_model_inference_async_client.py | 2 +- .../tests/test_model_inference_client.py | 2 +- sdk/ai/azure-ai-inference/tsp-location.yaml | 2 +- 12 files changed, 990 insertions(+), 67 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index 3d0514ad0c6d..3e8666d4de89 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -104,6 +104,37 @@ def build_model_get_embeddings_request( return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) +def build_model_get_image_generations_request( # pylint: disable=name-too-long + *, + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-04-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v1/images/generations" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if unknown_parameters is not None: + _headers["unknown-parameters"] = _SERIALIZER.header("unknown_parameters", unknown_parameters, "str") + if model_deployment is not None: + _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployment", model_deployment, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + class ModelClientOperationsMixin(ModelClientMixinABC): @overload def get_chat_completions( @@ -173,9 +204,7 @@ def get_chat_completions( "response_format": chat_completions_response_format, "seed": 0, # Optional. If specified, the system will make a best effort to sample deterministically such that repeated requests with the same seed and - parameters should return the same result. Determinism is not guaranteed, and you - should refer to the system_fingerprint response parameter to monitor changes in - the backend.". + parameters should return the same result. Determinism is not guaranteed.". "stop": [ "str" # Optional. A collection of textual sequences that will end completions generation. @@ -233,6 +262,9 @@ def get_chat_completions( "object": "str", # The response object type, which is always ``chat.completion``. Required. "usage": { + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". "completion_tokens": 0, # The number of tokens generated across all completions emissions. Required. "prompt_tokens": 0, # The number of tokens in the provided prompts @@ -341,10 +373,8 @@ def get_chat_completions( ~azure.ai.inference.models.ChatCompletionsNamedToolSelection :keyword seed: If specified, the system will make a best effort to sample deterministically such that repeated requests with the - same seed and parameters should return the same result. Determinism is not guaranteed, and you - should refer to the - system_fingerprint response parameter to monitor changes in the backend.". Default value is - None. + same seed and parameters should return the same result. Determinism is not guaranteed.". + Default value is None. :paramtype seed: int :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping :rtype: ~azure.ai.inference.models.ChatCompletions @@ -383,6 +413,9 @@ def get_chat_completions( "object": "str", # The response object type, which is always ``chat.completion``. Required. "usage": { + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". "completion_tokens": 0, # The number of tokens generated across all completions emissions. Required. "prompt_tokens": 0, # The number of tokens in the provided prompts @@ -460,6 +493,9 @@ def get_chat_completions( "object": "str", # The response object type, which is always ``chat.completion``. Required. "usage": { + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". "completion_tokens": 0, # The number of tokens generated across all completions emissions. Required. "prompt_tokens": 0, # The number of tokens in the provided prompts @@ -567,10 +603,8 @@ def get_chat_completions( ~azure.ai.inference.models.ChatCompletionsNamedToolSelection :keyword seed: If specified, the system will make a best effort to sample deterministically such that repeated requests with the - same seed and parameters should return the same result. Determinism is not guaranteed, and you - should refer to the - system_fingerprint response parameter to monitor changes in the backend.". Default value is - None. + same seed and parameters should return the same result. Determinism is not guaranteed.". + Default value is None. :paramtype seed: int :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping :rtype: ~azure.ai.inference.models.ChatCompletions @@ -610,9 +644,7 @@ def get_chat_completions( "response_format": chat_completions_response_format, "seed": 0, # Optional. If specified, the system will make a best effort to sample deterministically such that repeated requests with the same seed and - parameters should return the same result. Determinism is not guaranteed, and you - should refer to the system_fingerprint response parameter to monitor changes in - the backend.". + parameters should return the same result. Determinism is not guaranteed.". "stop": [ "str" # Optional. A collection of textual sequences that will end completions generation. @@ -670,6 +702,9 @@ def get_chat_completions( "object": "str", # The response object type, which is always ``chat.completion``. Required. "usage": { + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". "completion_tokens": 0, # The number of tokens generated across all completions emissions. Required. "prompt_tokens": 0, # The number of tokens in the provided prompts @@ -816,8 +851,15 @@ def get_embeddings( "object": "str", # The object type of the embeddings result. Will always be ``list``. Required. "usage": { - "prompt_tokens": 0, # Number of tokens sent in the original request. + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". + "input_tokens": 0, # Number of tokens in the request prompt. Required. + "prompt_tokens": 0, # Number of tokens used for the prompt sent to + the AI model. Typically identical to"" ``input_tokens``. However, certain AI + models may add extra tokens to the input hence the number can be higher. (for + example when input_type="query"). Required. "total_tokens": 0 # Total number of tokens transacted in this request/response. Required. } @@ -882,8 +924,15 @@ def get_embeddings( "object": "str", # The object type of the embeddings result. Will always be ``list``. Required. "usage": { - "prompt_tokens": 0, # Number of tokens sent in the original request. + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". + "input_tokens": 0, # Number of tokens in the request prompt. Required. + "prompt_tokens": 0, # Number of tokens used for the prompt sent to + the AI model. Typically identical to"" ``input_tokens``. However, certain AI + models may add extra tokens to the input hence the number can be higher. (for + example when input_type="query"). Required. "total_tokens": 0 # Total number of tokens transacted in this request/response. Required. } @@ -944,8 +993,15 @@ def get_embeddings( "object": "str", # The object type of the embeddings result. Will always be ``list``. Required. "usage": { - "prompt_tokens": 0, # Number of tokens sent in the original request. + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". + "input_tokens": 0, # Number of tokens in the request prompt. Required. + "prompt_tokens": 0, # Number of tokens used for the prompt sent to + the AI model. Typically identical to"" ``input_tokens``. However, certain AI + models may add extra tokens to the input hence the number can be higher. (for + example when input_type="query"). Required. "total_tokens": 0 # Total number of tokens transacted in this request/response. Required. } @@ -1019,8 +1075,15 @@ def get_embeddings( "object": "str", # The object type of the embeddings result. Will always be ``list``. Required. "usage": { - "prompt_tokens": 0, # Number of tokens sent in the original request. + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". + "input_tokens": 0, # Number of tokens in the request prompt. Required. + "prompt_tokens": 0, # Number of tokens used for the prompt sent to + the AI model. Typically identical to"" ``input_tokens``. However, certain AI + models may add extra tokens to the input hence the number can be higher. (for + example when input_type="query"). Required. "total_tokens": 0 # Total number of tokens transacted in this request/response. Required. } @@ -1085,3 +1148,349 @@ def get_embeddings( return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + + @overload + def get_image_generations( + self, + body: JSON, + *, + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.ImageGenerations: + # pylint: disable=line-too-long + """Creates images given a prompt. + + :param body: Required. + :type body: JSON + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ImageGenerations. The ImageGenerations is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ImageGenerations + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "prompt": "str", # A description of the desired images. Required. + "size": "str", # The desired dimension in pixels of the generated images, in + the format ":code:``x:code:``". For example: "1024x1024", + "1792x1024". Required. + "quality": "str", # Optional. The desired image generation quality level to + use. Known values are: "standard" and "hd". + "response_format": "str", # Optional. The format in which image generation + response items should be presented. Known values are: "url" and "b64_json". + "seed": 0 # Optional. If specified, the system will make a best effort to + sample deterministically such that repeated requests with the same seed and + parameters should return the same result. Determinism is not guaranteed.". + } + + # response body for status code(s): 200 + response == { + "created": "2020-02-20 00:00:00", # A timestamp representing when this + operation was started. Represented as seconds since the beginning of the Unix + epoch of 00:00 on 1 Jan 1970. Required. + "data": [ + { + "b64_json": "str", # Optional. The complete data for an + image, represented as a base64-encoded string. + "url": "str" # Optional. The URL that provides temporary + access to download the generated image. + } + ], + "id": "str", # A unique identifier associated with this image generation + response. Required. + "model": "str" # The model used for the image generation. Required. + } + """ + + @overload + def get_image_generations( + self, + *, + prompt: str, + size: str, + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + quality: Optional[Union[str, _models.ImageGenerationQuality]] = None, + response_format: Optional[Union[str, _models.ImageGenerationResponseFormat]] = None, + seed: Optional[int] = None, + **kwargs: Any + ) -> _models.ImageGenerations: + # pylint: disable=line-too-long + """Creates images given a prompt. + + :keyword prompt: A description of the desired images. Required. + :paramtype prompt: str + :keyword size: The desired dimension in pixels of the generated images, in the format + ":code:``x:code:``". + For example: "1024x1024", "1792x1024". Required. + :paramtype size: str + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword quality: The desired image generation quality level to use. Known values are: + "standard" and "hd". Default value is None. + :paramtype quality: str or ~azure.ai.inference.models.ImageGenerationQuality + :keyword response_format: The format in which image generation response items should be + presented. Known values are: "url" and "b64_json". Default value is None. + :paramtype response_format: str or ~azure.ai.inference.models.ImageGenerationResponseFormat + :keyword seed: If specified, the system will make a best effort to sample deterministically + such that repeated requests with the + same seed and parameters should return the same result. Determinism is not guaranteed.". + Default value is None. + :paramtype seed: int + :return: ImageGenerations. The ImageGenerations is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ImageGenerations + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "created": "2020-02-20 00:00:00", # A timestamp representing when this + operation was started. Represented as seconds since the beginning of the Unix + epoch of 00:00 on 1 Jan 1970. Required. + "data": [ + { + "b64_json": "str", # Optional. The complete data for an + image, represented as a base64-encoded string. + "url": "str" # Optional. The URL that provides temporary + access to download the generated image. + } + ], + "id": "str", # A unique identifier associated with this image generation + response. Required. + "model": "str" # The model used for the image generation. Required. + } + """ + + @overload + def get_image_generations( + self, + body: IO[bytes], + *, + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.ImageGenerations: + # pylint: disable=line-too-long + """Creates images given a prompt. + + :param body: Required. + :type body: IO[bytes] + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ImageGenerations. The ImageGenerations is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ImageGenerations + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "created": "2020-02-20 00:00:00", # A timestamp representing when this + operation was started. Represented as seconds since the beginning of the Unix + epoch of 00:00 on 1 Jan 1970. Required. + "data": [ + { + "b64_json": "str", # Optional. The complete data for an + image, represented as a base64-encoded string. + "url": "str" # Optional. The URL that provides temporary + access to download the generated image. + } + ], + "id": "str", # A unique identifier associated with this image generation + response. Required. + "model": "str" # The model used for the image generation. Required. + } + """ + + @distributed_trace + def get_image_generations( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + prompt: str = _Unset, + size: str = _Unset, + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + quality: Optional[Union[str, _models.ImageGenerationQuality]] = None, + response_format: Optional[Union[str, _models.ImageGenerationResponseFormat]] = None, + seed: Optional[int] = None, + **kwargs: Any + ) -> _models.ImageGenerations: + # pylint: disable=line-too-long + """Creates images given a prompt. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword prompt: A description of the desired images. Required. + :paramtype prompt: str + :keyword size: The desired dimension in pixels of the generated images, in the format + ":code:``x:code:``". + For example: "1024x1024", "1792x1024". Required. + :paramtype size: str + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword quality: The desired image generation quality level to use. Known values are: + "standard" and "hd". Default value is None. + :paramtype quality: str or ~azure.ai.inference.models.ImageGenerationQuality + :keyword response_format: The format in which image generation response items should be + presented. Known values are: "url" and "b64_json". Default value is None. + :paramtype response_format: str or ~azure.ai.inference.models.ImageGenerationResponseFormat + :keyword seed: If specified, the system will make a best effort to sample deterministically + such that repeated requests with the + same seed and parameters should return the same result. Determinism is not guaranteed.". + Default value is None. + :paramtype seed: int + :return: ImageGenerations. The ImageGenerations is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ImageGenerations + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "prompt": "str", # A description of the desired images. Required. + "size": "str", # The desired dimension in pixels of the generated images, in + the format ":code:``x:code:``". For example: "1024x1024", + "1792x1024". Required. + "quality": "str", # Optional. The desired image generation quality level to + use. Known values are: "standard" and "hd". + "response_format": "str", # Optional. The format in which image generation + response items should be presented. Known values are: "url" and "b64_json". + "seed": 0 # Optional. If specified, the system will make a best effort to + sample deterministically such that repeated requests with the same seed and + parameters should return the same result. Determinism is not guaranteed.". + } + + # response body for status code(s): 200 + response == { + "created": "2020-02-20 00:00:00", # A timestamp representing when this + operation was started. Represented as seconds since the beginning of the Unix + epoch of 00:00 on 1 Jan 1970. Required. + "data": [ + { + "b64_json": "str", # Optional. The complete data for an + image, represented as a base64-encoded string. + "url": "str" # Optional. The URL that provides temporary + access to download the generated image. + } + ], + "id": "str", # A unique identifier associated with this image generation + response. Required. + "model": "str" # The model used for the image generation. Required. + } + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ImageGenerations] = kwargs.pop("cls", None) + + if body is _Unset: + if prompt is _Unset: + raise TypeError("missing required argument: prompt") + if size is _Unset: + raise TypeError("missing required argument: size") + body = { + "prompt": prompt, + "quality": quality, + "response_format": response_format, + "seed": seed, + "size": size, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_model_get_image_generations_request( + unknown_parameters=unknown_parameters, + model_deployment=model_deployment, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ImageGenerations, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index 846a9799957b..496cbfb04017 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -26,7 +26,11 @@ from ... import models as _models from ..._model_base import SdkJSONEncoder, _deserialize -from ..._operations._operations import build_model_get_chat_completions_request, build_model_get_embeddings_request +from ..._operations._operations import ( + build_model_get_chat_completions_request, + build_model_get_embeddings_request, + build_model_get_image_generations_request, +) from .._vendor import ModelClientMixinABC if sys.version_info >= (3, 9): @@ -108,9 +112,7 @@ async def get_chat_completions( "response_format": chat_completions_response_format, "seed": 0, # Optional. If specified, the system will make a best effort to sample deterministically such that repeated requests with the same seed and - parameters should return the same result. Determinism is not guaranteed, and you - should refer to the system_fingerprint response parameter to monitor changes in - the backend.". + parameters should return the same result. Determinism is not guaranteed.". "stop": [ "str" # Optional. A collection of textual sequences that will end completions generation. @@ -168,6 +170,9 @@ async def get_chat_completions( "object": "str", # The response object type, which is always ``chat.completion``. Required. "usage": { + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". "completion_tokens": 0, # The number of tokens generated across all completions emissions. Required. "prompt_tokens": 0, # The number of tokens in the provided prompts @@ -276,10 +281,8 @@ async def get_chat_completions( ~azure.ai.inference.models.ChatCompletionsNamedToolSelection :keyword seed: If specified, the system will make a best effort to sample deterministically such that repeated requests with the - same seed and parameters should return the same result. Determinism is not guaranteed, and you - should refer to the - system_fingerprint response parameter to monitor changes in the backend.". Default value is - None. + same seed and parameters should return the same result. Determinism is not guaranteed.". + Default value is None. :paramtype seed: int :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping :rtype: ~azure.ai.inference.models.ChatCompletions @@ -318,6 +321,9 @@ async def get_chat_completions( "object": "str", # The response object type, which is always ``chat.completion``. Required. "usage": { + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". "completion_tokens": 0, # The number of tokens generated across all completions emissions. Required. "prompt_tokens": 0, # The number of tokens in the provided prompts @@ -395,6 +401,9 @@ async def get_chat_completions( "object": "str", # The response object type, which is always ``chat.completion``. Required. "usage": { + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". "completion_tokens": 0, # The number of tokens generated across all completions emissions. Required. "prompt_tokens": 0, # The number of tokens in the provided prompts @@ -502,10 +511,8 @@ async def get_chat_completions( ~azure.ai.inference.models.ChatCompletionsNamedToolSelection :keyword seed: If specified, the system will make a best effort to sample deterministically such that repeated requests with the - same seed and parameters should return the same result. Determinism is not guaranteed, and you - should refer to the - system_fingerprint response parameter to monitor changes in the backend.". Default value is - None. + same seed and parameters should return the same result. Determinism is not guaranteed.". + Default value is None. :paramtype seed: int :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping :rtype: ~azure.ai.inference.models.ChatCompletions @@ -545,9 +552,7 @@ async def get_chat_completions( "response_format": chat_completions_response_format, "seed": 0, # Optional. If specified, the system will make a best effort to sample deterministically such that repeated requests with the same seed and - parameters should return the same result. Determinism is not guaranteed, and you - should refer to the system_fingerprint response parameter to monitor changes in - the backend.". + parameters should return the same result. Determinism is not guaranteed.". "stop": [ "str" # Optional. A collection of textual sequences that will end completions generation. @@ -605,6 +610,9 @@ async def get_chat_completions( "object": "str", # The response object type, which is always ``chat.completion``. Required. "usage": { + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". "completion_tokens": 0, # The number of tokens generated across all completions emissions. Required. "prompt_tokens": 0, # The number of tokens in the provided prompts @@ -751,8 +759,15 @@ async def get_embeddings( "object": "str", # The object type of the embeddings result. Will always be ``list``. Required. "usage": { - "prompt_tokens": 0, # Number of tokens sent in the original request. + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". + "input_tokens": 0, # Number of tokens in the request prompt. Required. + "prompt_tokens": 0, # Number of tokens used for the prompt sent to + the AI model. Typically identical to"" ``input_tokens``. However, certain AI + models may add extra tokens to the input hence the number can be higher. (for + example when input_type="query"). Required. "total_tokens": 0 # Total number of tokens transacted in this request/response. Required. } @@ -817,8 +832,15 @@ async def get_embeddings( "object": "str", # The object type of the embeddings result. Will always be ``list``. Required. "usage": { - "prompt_tokens": 0, # Number of tokens sent in the original request. + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". + "input_tokens": 0, # Number of tokens in the request prompt. Required. + "prompt_tokens": 0, # Number of tokens used for the prompt sent to + the AI model. Typically identical to"" ``input_tokens``. However, certain AI + models may add extra tokens to the input hence the number can be higher. (for + example when input_type="query"). Required. "total_tokens": 0 # Total number of tokens transacted in this request/response. Required. } @@ -879,8 +901,15 @@ async def get_embeddings( "object": "str", # The object type of the embeddings result. Will always be ``list``. Required. "usage": { - "prompt_tokens": 0, # Number of tokens sent in the original request. + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". + "input_tokens": 0, # Number of tokens in the request prompt. Required. + "prompt_tokens": 0, # Number of tokens used for the prompt sent to + the AI model. Typically identical to"" ``input_tokens``. However, certain AI + models may add extra tokens to the input hence the number can be higher. (for + example when input_type="query"). Required. "total_tokens": 0 # Total number of tokens transacted in this request/response. Required. } @@ -954,8 +983,15 @@ async def get_embeddings( "object": "str", # The object type of the embeddings result. Will always be ``list``. Required. "usage": { - "prompt_tokens": 0, # Number of tokens sent in the original request. + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". + "input_tokens": 0, # Number of tokens in the request prompt. Required. + "prompt_tokens": 0, # Number of tokens used for the prompt sent to + the AI model. Typically identical to"" ``input_tokens``. However, certain AI + models may add extra tokens to the input hence the number can be higher. (for + example when input_type="query"). Required. "total_tokens": 0 # Total number of tokens transacted in this request/response. Required. } @@ -1020,3 +1056,349 @@ async def get_embeddings( return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + + @overload + async def get_image_generations( + self, + body: JSON, + *, + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.ImageGenerations: + # pylint: disable=line-too-long + """Creates images given a prompt. + + :param body: Required. + :type body: JSON + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ImageGenerations. The ImageGenerations is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ImageGenerations + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "prompt": "str", # A description of the desired images. Required. + "size": "str", # The desired dimension in pixels of the generated images, in + the format ":code:``x:code:``". For example: "1024x1024", + "1792x1024". Required. + "quality": "str", # Optional. The desired image generation quality level to + use. Known values are: "standard" and "hd". + "response_format": "str", # Optional. The format in which image generation + response items should be presented. Known values are: "url" and "b64_json". + "seed": 0 # Optional. If specified, the system will make a best effort to + sample deterministically such that repeated requests with the same seed and + parameters should return the same result. Determinism is not guaranteed.". + } + + # response body for status code(s): 200 + response == { + "created": "2020-02-20 00:00:00", # A timestamp representing when this + operation was started. Represented as seconds since the beginning of the Unix + epoch of 00:00 on 1 Jan 1970. Required. + "data": [ + { + "b64_json": "str", # Optional. The complete data for an + image, represented as a base64-encoded string. + "url": "str" # Optional. The URL that provides temporary + access to download the generated image. + } + ], + "id": "str", # A unique identifier associated with this image generation + response. Required. + "model": "str" # The model used for the image generation. Required. + } + """ + + @overload + async def get_image_generations( + self, + *, + prompt: str, + size: str, + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + quality: Optional[Union[str, _models.ImageGenerationQuality]] = None, + response_format: Optional[Union[str, _models.ImageGenerationResponseFormat]] = None, + seed: Optional[int] = None, + **kwargs: Any + ) -> _models.ImageGenerations: + # pylint: disable=line-too-long + """Creates images given a prompt. + + :keyword prompt: A description of the desired images. Required. + :paramtype prompt: str + :keyword size: The desired dimension in pixels of the generated images, in the format + ":code:``x:code:``". + For example: "1024x1024", "1792x1024". Required. + :paramtype size: str + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword quality: The desired image generation quality level to use. Known values are: + "standard" and "hd". Default value is None. + :paramtype quality: str or ~azure.ai.inference.models.ImageGenerationQuality + :keyword response_format: The format in which image generation response items should be + presented. Known values are: "url" and "b64_json". Default value is None. + :paramtype response_format: str or ~azure.ai.inference.models.ImageGenerationResponseFormat + :keyword seed: If specified, the system will make a best effort to sample deterministically + such that repeated requests with the + same seed and parameters should return the same result. Determinism is not guaranteed.". + Default value is None. + :paramtype seed: int + :return: ImageGenerations. The ImageGenerations is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ImageGenerations + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "created": "2020-02-20 00:00:00", # A timestamp representing when this + operation was started. Represented as seconds since the beginning of the Unix + epoch of 00:00 on 1 Jan 1970. Required. + "data": [ + { + "b64_json": "str", # Optional. The complete data for an + image, represented as a base64-encoded string. + "url": "str" # Optional. The URL that provides temporary + access to download the generated image. + } + ], + "id": "str", # A unique identifier associated with this image generation + response. Required. + "model": "str" # The model used for the image generation. Required. + } + """ + + @overload + async def get_image_generations( + self, + body: IO[bytes], + *, + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.ImageGenerations: + # pylint: disable=line-too-long + """Creates images given a prompt. + + :param body: Required. + :type body: IO[bytes] + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ImageGenerations. The ImageGenerations is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ImageGenerations + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "created": "2020-02-20 00:00:00", # A timestamp representing when this + operation was started. Represented as seconds since the beginning of the Unix + epoch of 00:00 on 1 Jan 1970. Required. + "data": [ + { + "b64_json": "str", # Optional. The complete data for an + image, represented as a base64-encoded string. + "url": "str" # Optional. The URL that provides temporary + access to download the generated image. + } + ], + "id": "str", # A unique identifier associated with this image generation + response. Required. + "model": "str" # The model used for the image generation. Required. + } + """ + + @distributed_trace_async + async def get_image_generations( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + prompt: str = _Unset, + size: str = _Unset, + unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, + model_deployment: Optional[str] = None, + quality: Optional[Union[str, _models.ImageGenerationQuality]] = None, + response_format: Optional[Union[str, _models.ImageGenerationResponseFormat]] = None, + seed: Optional[int] = None, + **kwargs: Any + ) -> _models.ImageGenerations: + # pylint: disable=line-too-long + """Creates images given a prompt. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword prompt: A description of the desired images. Required. + :paramtype prompt: str + :keyword size: The desired dimension in pixels of the generated images, in the format + ":code:``x:code:``". + For example: "1024x1024", "1792x1024". Required. + :paramtype size: str + :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra + properties in the request payload. Known values are: "error", "ignore", and "allow". Default + value is None. + :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword quality: The desired image generation quality level to use. Known values are: + "standard" and "hd". Default value is None. + :paramtype quality: str or ~azure.ai.inference.models.ImageGenerationQuality + :keyword response_format: The format in which image generation response items should be + presented. Known values are: "url" and "b64_json". Default value is None. + :paramtype response_format: str or ~azure.ai.inference.models.ImageGenerationResponseFormat + :keyword seed: If specified, the system will make a best effort to sample deterministically + such that repeated requests with the + same seed and parameters should return the same result. Determinism is not guaranteed.". + Default value is None. + :paramtype seed: int + :return: ImageGenerations. The ImageGenerations is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ImageGenerations + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "prompt": "str", # A description of the desired images. Required. + "size": "str", # The desired dimension in pixels of the generated images, in + the format ":code:``x:code:``". For example: "1024x1024", + "1792x1024". Required. + "quality": "str", # Optional. The desired image generation quality level to + use. Known values are: "standard" and "hd". + "response_format": "str", # Optional. The format in which image generation + response items should be presented. Known values are: "url" and "b64_json". + "seed": 0 # Optional. If specified, the system will make a best effort to + sample deterministically such that repeated requests with the same seed and + parameters should return the same result. Determinism is not guaranteed.". + } + + # response body for status code(s): 200 + response == { + "created": "2020-02-20 00:00:00", # A timestamp representing when this + operation was started. Represented as seconds since the beginning of the Unix + epoch of 00:00 on 1 Jan 1970. Required. + "data": [ + { + "b64_json": "str", # Optional. The complete data for an + image, represented as a base64-encoded string. + "url": "str" # Optional. The URL that provides temporary + access to download the generated image. + } + ], + "id": "str", # A unique identifier associated with this image generation + response. Required. + "model": "str" # The model used for the image generation. Required. + } + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ImageGenerations] = kwargs.pop("cls", None) + + if body is _Unset: + if prompt is _Unset: + raise TypeError("missing required argument: prompt") + if size is _Unset: + raise TypeError("missing required argument: size") + body = { + "prompt": prompt, + "quality": quality, + "response_format": response_format, + "seed": seed, + "size": size, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_model_get_image_generations_request( + unknown_parameters=unknown_parameters, + model_deployment=model_deployment, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ImageGenerations, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py index fdfdb72b38a0..bd1c026db317 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py @@ -28,11 +28,16 @@ from ._models import EmbeddingsUsage from ._models import FunctionCall from ._models import FunctionDefinition +from ._models import ImageGenerationData +from ._models import ImageGenerations +from ._enums import CapacityType from ._enums import ChatCompletionsToolSelectionPreset from ._enums import ChatRole from ._enums import CompletionsFinishReason from ._enums import EmbeddingInputType +from ._enums import ImageGenerationQuality +from ._enums import ImageGenerationResponseFormat from ._enums import UnknownParameters from ._patch import __all__ as _patch_all from ._patch import * # pylint: disable=unused-wildcard-import @@ -61,10 +66,15 @@ "EmbeddingsUsage", "FunctionCall", "FunctionDefinition", + "ImageGenerationData", + "ImageGenerations", + "CapacityType", "ChatCompletionsToolSelectionPreset", "ChatRole", "CompletionsFinishReason", "EmbeddingInputType", + "ImageGenerationQuality", + "ImageGenerationResponseFormat", "UnknownParameters", ] __all__.extend([p for p in _patch_all if p not in __all__]) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py index 5c3005503f23..eee6a41df3ac 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py @@ -10,6 +10,15 @@ from azure.core import CaseInsensitiveEnumMeta +class CapacityType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Whether your capacity has been affected by the usage amount (token count) reported here.""" + + ERROR = "usage" + """Your capacity has been affected by the usage amount (token count) reported here.""" + IGNORE = "fixed" + """Your capacity has not been affected by the usage amount (token count) reported here.""" + + class ChatCompletionsToolSelectionPreset(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Represents a generic policy for how a chat completions tool may be selected.""" @@ -60,6 +69,27 @@ class EmbeddingInputType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """to do""" +class ImageGenerationQuality(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """An image generation configuration that specifies how the model should prioritize quality, cost, + and speed. + """ + + STANDARD = "standard" + """Requests image generation with standard, balanced characteristics of quality, cost, and speed.""" + HD = "hd" + """Requests image generation with higher quality, higher cost and lower speed relative to + standard.""" + + +class ImageGenerationResponseFormat(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The format in which the generated images are returned.""" + + URL = "url" + """Image generation response items should provide a URL from which the image may be retrieved.""" + BASE64 = "b64_json" + """Image generation response items should provide image data as a base64-encoded string.""" + + class UnknownParameters(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Defines the service behavior when unknown parameters are passed as extra properties in the request payload. diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py index 6d94bb7efbeb..1b6a770bb104 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py @@ -629,6 +629,9 @@ class CompletionsUsage(_model_base.Model): All required parameters must be populated in order to send to server. + :ivar capacity_type: Indicates whether your capacity has been affected by the usage amount + (token count) reported here. Required. Known values are: "usage" and "fixed". + :vartype capacity_type: str or ~azure.ai.inference.models.CapacityType :ivar completion_tokens: The number of tokens generated across all completions emissions. Required. :vartype completion_tokens: int @@ -640,6 +643,9 @@ class CompletionsUsage(_model_base.Model): :vartype total_tokens: int """ + capacity_type: Union[str, "_models.CapacityType"] = rest_field() + """Indicates whether your capacity has been affected by the usage amount (token count) reported + here. Required. Known values are: \"usage\" and \"fixed\".""" completion_tokens: int = rest_field() """The number of tokens generated across all completions emissions. Required.""" prompt_tokens: int = rest_field() @@ -651,6 +657,7 @@ class CompletionsUsage(_model_base.Model): def __init__( self, *, + capacity_type: Union[str, "_models.CapacityType"], completion_tokens: int, prompt_tokens: int, total_tokens: int, @@ -771,14 +778,30 @@ class EmbeddingsUsage(_model_base.Model): All required parameters must be populated in order to send to server. - :ivar prompt_tokens: Number of tokens sent in the original request. Required. + :ivar capacity_type: Indicates whether your capacity has been affected by the usage amount + (token count) reported here. Required. Known values are: "usage" and "fixed". + :vartype capacity_type: str or ~azure.ai.inference.models.CapacityType + :ivar input_tokens: Number of tokens in the request prompt. Required. + :vartype input_tokens: int + :ivar prompt_tokens: Number of tokens used for the prompt sent to the AI model. Typically + identical to\ ``input_tokens``. + However, certain AI models may add extra tokens to the input hence the number can be higher. + (for example when input_type="query"). Required. :vartype prompt_tokens: int :ivar total_tokens: Total number of tokens transacted in this request/response. Required. :vartype total_tokens: int """ + capacity_type: Union[str, "_models.CapacityType"] = rest_field() + """Indicates whether your capacity has been affected by the usage amount (token count) reported + here. Required. Known values are: \"usage\" and \"fixed\".""" + input_tokens: int = rest_field() + """Number of tokens in the request prompt. Required.""" prompt_tokens: int = rest_field() - """Number of tokens sent in the original request. Required.""" + """Number of tokens used for the prompt sent to the AI model. Typically identical to\ + ``input_tokens``. + However, certain AI models may add extra tokens to the input hence the number can be higher. + (for example when input_type=\"query\"). Required.""" total_tokens: int = rest_field() """Total number of tokens transacted in this request/response. Required.""" @@ -786,6 +809,8 @@ class EmbeddingsUsage(_model_base.Model): def __init__( self, *, + capacity_type: Union[str, "_models.CapacityType"], + input_tokens: int, prompt_tokens: int, total_tokens: int, ): @@ -889,3 +914,87 @@ def __init__(self, mapping: Mapping[str, Any]): def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation super().__init__(*args, **kwargs) + + +class ImageGenerationData(_model_base.Model): + """A representation of a single generated image, provided as either base64-encoded data or as a + URL from which the image + may be retrieved. + + :ivar url: The URL that provides temporary access to download the generated image. + :vartype url: str + :ivar b64_json: The complete data for an image, represented as a base64-encoded string. + :vartype b64_json: str + """ + + url: Optional[str] = rest_field() + """The URL that provides temporary access to download the generated image.""" + b64_json: Optional[str] = rest_field() + """The complete data for an image, represented as a base64-encoded string.""" + + @overload + def __init__( + self, + *, + url: Optional[str] = None, + b64_json: Optional[str] = None, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ImageGenerations(_model_base.Model): + """The result of a successful image generation operation. + + All required parameters must be populated in order to send to server. + + :ivar id: A unique identifier associated with this image generation response. Required. + :vartype id: str + :ivar created: A timestamp representing when this operation was started. + Represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. + :vartype created: ~datetime.datetime + :ivar model: The model used for the image generation. Required. + :vartype model: str + :ivar data: The images generated by the operation. Required. + :vartype data: list[~azure.ai.inference.models.ImageGenerationData] + """ + + id: str = rest_field() + """A unique identifier associated with this image generation response. Required.""" + created: datetime.datetime = rest_field(format="unix-timestamp") + """A timestamp representing when this operation was started. + Represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required.""" + model: str = rest_field() + """The model used for the image generation. Required.""" + data: List["_models.ImageGenerationData"] = rest_field() + """The images generated by the operation. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created: datetime.datetime, + model: str, + data: List["_models.ImageGenerationData"], + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py index 971631a3a54e..6b806a641e96 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py @@ -36,15 +36,7 @@ async def sample_embeddings_async(): client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # Do a single embeddings operation. Start the operation and get a Future object. - future = asyncio.ensure_future( - client.get_embeddings( - input=[ - "first sentence", - "second sentence", - "third sentence" - ] - ) - ) + future = asyncio.ensure_future(client.get_embeddings(input=["first sentence", "second sentence", "third sentence"])) # Loop until the operation is done while not future.done(): diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index e749965e5a86..c1631e863d55 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -34,10 +34,7 @@ def sample_chat_completions(): exit() # Create Model Client for synchronous operations - client = ModelClient( - endpoint=endpoint, - credential=AzureKeyCredential(key) - ) + client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # [END create_client] # [START chat_completions] diff --git a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py index 300a7b8ebff0..5e8276e324ca 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py +++ b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py @@ -59,14 +59,8 @@ def sample_embeddings(): # [START embeddings] # Do a single embeddings operation. This will be a synchronously (blocking) call. - result = client.get_embeddings( - input=[ - "first sentence", - "second sentence", - "third sentence" - ] - ) - + result = client.get_embeddings(input=["first sentence", "second sentence", "third sentence"]) + # Print results the the console print("Embeddings result:") for index, item in enumerate(result.data): diff --git a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py index ae901860c978..63b468414a65 100644 --- a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py +++ b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py @@ -88,7 +88,7 @@ def _do_chat_completions( **kwargs, ): - result = self.client.get_chat_completions(messages=kwargs.get('messages'), params=query_params) + result = self.client.get_chat_completions(messages=kwargs.get("messages"), params=query_params) # Optional: console printout of all results if ModelClientTestBase.PRINT_CHAT_COMPLETION_RESULTS: @@ -109,7 +109,7 @@ async def _do_async_chat_completions( start_time = time.time() # Start the operation and get a Future object - future = asyncio.ensure_future(self.async_client.get_chat_completions(messages=kwargs.get('messages'))) + future = asyncio.ensure_future(self.async_client.get_chat_completions(messages=kwargs.get("messages"))) # Loop until the operation is done while not future.done(): @@ -138,7 +138,7 @@ def _do_chat_completion_with_error( ): try: - result = self.client.get_chat_completions(messages=kwargs.get('messages')) + result = self.client.get_chat_completions(messages=kwargs.get("messages")) except AzureError as e: print(e) @@ -156,7 +156,7 @@ async def _do_async_chat_completion_with_error( ): try: - result = await self.async_client.get_chat_completions(messages=kwargs.get('messages')) + result = await self.async_client.get_chat_completions(messages=kwargs.get("messages")) except AzureError as e: print(e) diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py index fe6acdff00e9..eb47da202ccc 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -24,7 +24,7 @@ async def test_async_chat_completion(self, **kwargs): self._create_client_for_standard_test(sync=False, **kwargs) - messages=[sdk.models.ChatRequestUserMessage(content="How many feet are in a mile?")] + messages = [sdk.models.ChatRequestUserMessage(content="How many feet are in a mile?")] await self._do_async_chat_completions(messages=messages, **kwargs) diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py index 8bc18d43cf67..05fea3f8dd83 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -25,7 +25,7 @@ def test_chat_completion(self, **kwargs): self._create_client_for_standard_test(sync=True, **kwargs) - messages=[sdk.models.ChatRequestUserMessage(content="How many feet are in a mile?")] + messages = [sdk.models.ChatRequestUserMessage(content="How many feet are in a mile?")] self._do_chat_completions(messages=messages, **kwargs) diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml index fa8e5be0abbd..8957c511a391 100644 --- a/sdk/ai/azure-ai-inference/tsp-location.yaml +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/ModelClient -commit: c26f993ec2271f0fabdd24905a99d2da062686dd +commit: 319f92bd61ce8acce6e0c5e14025d7d6f34430e3 repo: Azure/azure-rest-api-specs additionalDirectories: From 675ba6d80bb8ba92342fd23aaace170da1bb60c6 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 4 Apr 2024 13:36:07 -0700 Subject: [PATCH 015/112] Samples for image generation --- sdk/ai/azure-ai-inference/README.md | 43 +++++++-- sdk/ai/azure-ai-inference/samples/README.md | 2 + .../sample_chat_completions_async.py | 5 +- .../async_samples/sample_embeddings_async.py | 15 ++- .../sample_image_generation_async.py | 95 +++++++++++++++++++ .../samples/sample_chat_completions.py | 5 +- .../samples/sample_embeddings.py | 5 +- .../samples/sample_image_generation.py | 82 ++++++++++++++++ 8 files changed, 239 insertions(+), 13 deletions(-) create mode 100644 sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py create mode 100644 sdk/ai/azure-ai-inference/samples/sample_image_generation.py diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 1e8cac3db029..0c46e4bfb808 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -7,6 +7,7 @@ Use the model client library to: * Authenticate against the service * Get chat completions * Get embeddings +* Generate an image from a text prompt Note that for inference of OpenAI models hosted on azure you should be using the [OpenAI Python client library](https://github.com/openai/openai-python) instead of this client. @@ -98,12 +99,19 @@ TBD Target the `/v1/embeddings` route +### Image Generation + +TBD + +Target the `/images/generations` route + ## Examples The following sections provide code snippets covering these common scenarios: * [Chat completions](#chat-completions-example) * [Embeddings](#embeddings-example) +* [Image geneartion](#image-generation-example) These snippets use the synchronous `client` from [Create and authenticate the client](#create-and-authenticate-the-client). @@ -152,13 +160,7 @@ This example demonstrates how to get embeddings. ```python # Do a single embeddings operation. This will be a synchronously (blocking) call. -result = client.get_embeddings( - input=[ - "first sentence", - "second sentence", - "third sentence" - ] -) +result = client.get_embeddings(input=["first sentence", "second sentence", "third sentence"]) # Print results the the console print("Embeddings result:") @@ -179,11 +181,36 @@ print(f"usage.total_tokens: {result.usage.total_tokens}") +### Image generation example + +This example demonstrates how to generate and image from a text prompt + + + +```python +# Generate a single image from a text prompt. This will be a synchronously (blocking) call. +result = client.get_image_generations( + prompt="A painting of a beautiful sunset over a mountain lake.", + size="1024x768" +) + +# Save generated image to file and print other results the the console +print("Image generation result:") +for index, item in enumerate(result.data): + with open(f"image_{index}.png", "wb") as image: + image.write(item.b64_json.decode('base64')) +print(f"id: {result.id}") +print(f"model: {result.model}") +print(f"created: {result.created}") +``` + + + ## Troubleshooting ### Exceptions -The `get_chat_completions` and `get_embeddings` methods raise an [HttpResponseError](https://learn.microsoft.com/python/api/azure-core/azure.core.exceptions.httpresponseerror) exception for a non-success HTTP status code response from the service. The exception's `status_code` will be the HTTP response status code. The exception's `error.message` contains a detailed message that will allow you to diagnose the issue: +The `get_chat_completions`, `get_embeddings` and `get_image_geneartions` methods raise an [HttpResponseError](https://learn.microsoft.com/python/api/azure-core/azure.core.exceptions.httpresponseerror) exception for a non-success HTTP status code response from the service. The exception's `status_code` will be the HTTP response status code. The exception's `error.message` contains a detailed message that will allow you to diagnose the issue: ```python from azure.core.exceptions import HttpResponseError diff --git a/sdk/ai/azure-ai-inference/samples/README.md b/sdk/ai/azure-ai-inference/samples/README.md index c6e723deb6b3..08e6b902dc60 100644 --- a/sdk/ai/azure-ai-inference/samples/README.md +++ b/sdk/ai/azure-ai-inference/samples/README.md @@ -20,6 +20,7 @@ The concepts are similar, you can easily modify any of the samples to your needs |----------------|-------------| |[sample_chat_completions.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py) | One chat completion operation using a synchronous client. | |[sample_embeddings.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_embeddings.py) | One embeddings operation using a synchronous client. | +|[sample_image_generation.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_image_generation.py) | Generate an image from a prompt using a synchronous client. | ## Asynchronous client samples @@ -27,6 +28,7 @@ The concepts are similar, you can easily modify any of the samples to your needs |----------------|-------------| |[sample_chat_completions_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py) | One chat completion operation using an asynchronous client. | |[sample_embeddings_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py) | One embeddings operation using an asynchronous client. | +|[sample_image_generation_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py) | Generate an image from a prompt using an asynchronous client. | ## Prerequisites diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py index 55818705eb2a..5d3ef71c705f 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py @@ -34,7 +34,10 @@ async def sample_chat_completions_async(): exit() # Create a Model Client for synchronous operations - client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + client = ModelClient( + endpoint=endpoint, + credential=AzureKeyCredential(key) + ) # Do a single chat completion operation. Start the operation and get a Future object. future = asyncio.ensure_future( diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py index 6b806a641e96..215a81824ee2 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py @@ -33,10 +33,21 @@ async def sample_embeddings_async(): exit() # Create an Image Analysis client for synchronous operations - client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + client = ModelClient( + endpoint=endpoint, + credential=AzureKeyCredential(key) + ) # Do a single embeddings operation. Start the operation and get a Future object. - future = asyncio.ensure_future(client.get_embeddings(input=["first sentence", "second sentence", "third sentence"])) + future = asyncio.ensure_future( + client.get_embeddings( + input=[ + "first sentence", + "second sentence", + "third sentence" + ] + ) + ) # Loop until the operation is done while not future.done(): diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py new file mode 100644 index 000000000000..a42a3e744514 --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py @@ -0,0 +1,95 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to generate an image from a prompt using an asynchronous client. + +USAGE: + python sample_image_generation_async.py + + Set these two environment variables before running the sample: + 1) MODEL_ENDPOINT - Your endpoint URL, in the form https://..inference.ai.azure.com + where `deployment-name` is your unique AI Model deployment name, and + `azure-region` is the Azure region where your model is deployed. + 2) MODEL_KEY - Your model key (a 32-character string). Keep it secret. +""" +import asyncio + +async def sample_image_generation_async(): + import os + + from azure.ai.inference.aio import ModelClient + from azure.core.credentials import AzureKeyCredential + + # [START logging] + import sys + import logging + + # Acquire the logger for this client library. Use 'azure' to affect both + # 'azure.core` and `azure.ai.vision.imageanalysis' libraries. + logger = logging.getLogger("azure") + + # Set the desired logging level. logging.INFO or logging.DEBUG are good options. + logger.setLevel(logging.DEBUG) + + # Direct logging output to stdout (the default): + handler = logging.StreamHandler(stream=sys.stdout) + # Or direct logging output to a file: + # handler = logging.FileHandler(filename = 'sample.log') + logger.addHandler(handler) + + # Optional: change the default logging format. Here we add a timestamp. + formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s") + handler.setFormatter(formatter) + # [END logging] + + # Read the values of your model endpoint and key from environment variables + try: + endpoint = os.environ["MODEL_ENDPOINT"] + key = os.environ["MODEL_KEY"] + except KeyError: + print("Missing environment variable 'MODEL_ENDPOINT' or 'MODEL_KEY'") + print("Set them before running this sample.") + exit() + + # Create an Model for synchronous operations + client = ModelClient( + endpoint=endpoint, + credential=AzureKeyCredential("key") + ) + + # Generate an image from text prompt. This will be an asynchronously (non-blocking) call. + future = asyncio.ensure_future( + client.get_image_generations( + prompt="A painting of a beautiful sunset over a mountain lake.", + size="1024x768" + ) + ) + + # Loop until the operation is done + while not future.done(): + await asyncio.sleep(0.1) + print("Waiting...") + + # Get the result + result = future.result() + await client.close() + + # Save generated image to file and print other results the the console + print("Image generation result:") + for index, item in enumerate(result.data): + with open(f"image_{index}.png", "wb") as image: + image.write(item.b64_json.decode('base64')) + print(f"id: {result.id}") + print(f"model: {result.model}") + print(f"created: {result.created}") + + +async def main(): + await sample_image_generation_async() + + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index c1631e863d55..e749965e5a86 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -34,7 +34,10 @@ def sample_chat_completions(): exit() # Create Model Client for synchronous operations - client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + client = ModelClient( + endpoint=endpoint, + credential=AzureKeyCredential(key) + ) # [END create_client] # [START chat_completions] diff --git a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py index 5e8276e324ca..ce771efa21a4 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py +++ b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py @@ -55,7 +55,10 @@ def sample_embeddings(): exit() # Create an Model for synchronous operations - client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential("key")) + client = ModelClient( + endpoint=endpoint, + credential=AzureKeyCredential("key") + ) # [START embeddings] # Do a single embeddings operation. This will be a synchronously (blocking) call. diff --git a/sdk/ai/azure-ai-inference/samples/sample_image_generation.py b/sdk/ai/azure-ai-inference/samples/sample_image_generation.py new file mode 100644 index 000000000000..bf9703bc155a --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/sample_image_generation.py @@ -0,0 +1,82 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to generate an image from a prompt. + +USAGE: + python sample_image_generation.py + + Set these two environment variables before running the sample: + 1) MODEL_ENDPOINT - Your endpoint URL, in the form https://..inference.ai.azure.com + where `deployment-name` is your unique AI Model deployment name, and + `azure-region` is the Azure region where your model is deployed. + 2) MODEL_KEY - Your model key (a 32-character string). Keep it secret. +""" + + +def sample_image_generation(): + import os + + from azure.ai.inference import ModelClient + from azure.core.credentials import AzureKeyCredential + + # [START logging] + import sys + import logging + + # Acquire the logger for this client library. Use 'azure' to affect both + # 'azure.core` and `azure.ai.vision.imageanalysis' libraries. + logger = logging.getLogger("azure") + + # Set the desired logging level. logging.INFO or logging.DEBUG are good options. + logger.setLevel(logging.DEBUG) + + # Direct logging output to stdout (the default): + handler = logging.StreamHandler(stream=sys.stdout) + # Or direct logging output to a file: + # handler = logging.FileHandler(filename = 'sample.log') + logger.addHandler(handler) + + # Optional: change the default logging format. Here we add a timestamp. + formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s") + handler.setFormatter(formatter) + # [END logging] + + # Read the values of your model endpoint and key from environment variables + try: + endpoint = os.environ["MODEL_ENDPOINT"] + key = os.environ["MODEL_KEY"] + except KeyError: + print("Missing environment variable 'MODEL_ENDPOINT' or 'MODEL_KEY'") + print("Set them before running this sample.") + exit() + + # Create an Model for synchronous operations + client = ModelClient( + endpoint=endpoint, + credential=AzureKeyCredential("key") + ) + + # [START image_generation] + # Generate a single image from a text prompt. This will be a synchronously (blocking) call. + result = client.get_image_generations( + prompt="A painting of a beautiful sunset over a mountain lake.", + size="1024x768" + ) + + # Save generated image to file and print other results the the console + print("Image generation result:") + for index, item in enumerate(result.data): + with open(f"image_{index}.png", "wb") as image: + image.write(item.b64_json.decode('base64')) + print(f"id: {result.id}") + print(f"model: {result.model}") + print(f"created: {result.created}") + # [END image_generation] + + +if __name__ == "__main__": + sample_image_generation() From c5ea2fce49f6bcc7aed00ae354c6956f5e2b56e5 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 4 Apr 2024 14:54:27 -0700 Subject: [PATCH 016/112] Add dictionary of extra parameters --- sdk/ai/azure-ai-inference/README.md | 9 +- .../ai/inference/_operations/_operations.py | 324 +++++++++++------- .../inference/aio/_operations/_operations.py | 286 ++++++++++------ .../azure/ai/inference/models/__init__.py | 4 +- .../azure/ai/inference/models/_enums.py | 28 +- .../async_samples/sample_embeddings_async.py | 3 +- .../sample_image_generation_async.py | 7 +- .../samples/sample_embeddings.py | 7 +- .../samples/sample_image_generation.py | 2 +- sdk/ai/azure-ai-inference/tsp-location.yaml | 2 +- 10 files changed, 420 insertions(+), 252 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 0c46e4bfb808..01bfbebb70bc 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -160,7 +160,12 @@ This example demonstrates how to get embeddings. ```python # Do a single embeddings operation. This will be a synchronously (blocking) call. -result = client.get_embeddings(input=["first sentence", "second sentence", "third sentence"]) +result = client.get_embeddings( + input=[ + "first sentence", + "second sentence","third sentence" + ] +) # Print results the the console print("Embeddings result:") @@ -198,7 +203,7 @@ result = client.get_image_generations( print("Image generation result:") for index, item in enumerate(result.data): with open(f"image_{index}.png", "wb") as image: - image.write(item.b64_json.decode('base64')) + image.write(item.b64_json.decode("base64")) print(f"id: {result.id}") print(f"model: {result.model}") print(f"created: {result.created}") diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index 3e8666d4de89..43de0120f5cb 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -44,8 +44,8 @@ def build_model_get_chat_completions_request( *, - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -62,10 +62,10 @@ def build_model_get_chat_completions_request( _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - if unknown_parameters is not None: - _headers["unknown-parameters"] = _SERIALIZER.header("unknown_parameters", unknown_parameters, "str") - if model_deployment is not None: - _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployment", model_deployment, "str") + if extra_parameters is not None: + _headers["extra-parameters"] = _SERIALIZER.header("extra_parameters", extra_parameters, "str") + if model_deployemnt is not None: + _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployemnt", model_deployemnt, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -75,8 +75,8 @@ def build_model_get_chat_completions_request( def build_model_get_embeddings_request( *, - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -93,10 +93,10 @@ def build_model_get_embeddings_request( _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - if unknown_parameters is not None: - _headers["unknown-parameters"] = _SERIALIZER.header("unknown_parameters", unknown_parameters, "str") - if model_deployment is not None: - _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployment", model_deployment, "str") + if extra_parameters is not None: + _headers["extra-parameters"] = _SERIALIZER.header("extra_parameters", extra_parameters, "str") + if model_deployemnt is not None: + _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployemnt", model_deployemnt, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -106,8 +106,8 @@ def build_model_get_embeddings_request( def build_model_get_image_generations_request( # pylint: disable=name-too-long *, - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -118,16 +118,16 @@ def build_model_get_image_generations_request( # pylint: disable=name-too-long accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/v1/images/generations" + _url = "/images/generations" # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - if unknown_parameters is not None: - _headers["unknown-parameters"] = _SERIALIZER.header("unknown_parameters", unknown_parameters, "str") - if model_deployment is not None: - _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployment", model_deployment, "str") + if extra_parameters is not None: + _headers["extra-parameters"] = _SERIALIZER.header("extra_parameters", extra_parameters, "str") + if model_deployemnt is not None: + _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployemnt", model_deployemnt, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -141,8 +141,8 @@ def get_chat_completions( self, body: JSON, *, - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: @@ -154,15 +154,14 @@ def get_chat_completions( :param body: Required. :type body: JSON - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -191,6 +190,13 @@ def get_chat_completions( "messages": [ chat_request_message ], + "extras": { + "str": "str" # Optional. Extra parameters (in the form of string + key-value pairs) that are not in the standard request payload. They will be + passed to the service as-is in the root of the JSON request payload. How the + service handles these extra parameters depends on the value of the + ``extra-parameters`` HTTP request header. + }, "frequency_penalty": 0.0, # Optional. A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. Positive values will make tokens less likely to appear as their @@ -280,9 +286,10 @@ def get_chat_completions( self, *, messages: List[_models.ChatRequestMessage], - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, content_type: str = "application/json", + extras: Optional[Dict[str, str]] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None, temperature: Optional[float] = None, @@ -310,18 +317,24 @@ def get_chat_completions( the behavior of the assistant, followed by alternating messages between the User and Assistant roles. Required. :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] :keyword frequency_penalty: A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. @@ -431,8 +444,8 @@ def get_chat_completions( self, body: IO[bytes], *, - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: @@ -444,15 +457,14 @@ def get_chat_completions( :param body: Required. :type body: IO[bytes] - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -512,8 +524,9 @@ def get_chat_completions( body: Union[JSON, IO[bytes]] = _Unset, *, messages: List[_models.ChatRequestMessage] = _Unset, - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, + extras: Optional[Dict[str, str]] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None, temperature: Optional[float] = None, @@ -543,15 +556,21 @@ def get_chat_completions( the behavior of the assistant, followed by alternating messages between the User and Assistant roles. Required. :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] :keyword frequency_penalty: A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. @@ -631,6 +650,13 @@ def get_chat_completions( "messages": [ chat_request_message ], + "extras": { + "str": "str" # Optional. Extra parameters (in the form of string + key-value pairs) that are not in the standard request payload. They will be + passed to the service as-is in the root of the JSON request payload. How the + service handles these extra parameters depends on the value of the + ``extra-parameters`` HTTP request header. + }, "frequency_penalty": 0.0, # Optional. A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. Positive values will make tokens less likely to appear as their @@ -732,6 +758,7 @@ def get_chat_completions( if messages is _Unset: raise TypeError("missing required argument: messages") body = { + "extras": extras, "frequency_penalty": frequency_penalty, "max_tokens": max_tokens, "messages": messages, @@ -754,8 +781,8 @@ def get_chat_completions( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_model_get_chat_completions_request( - unknown_parameters=unknown_parameters, - model_deployment=model_deployment, + extra_parameters=extra_parameters, + model_deployemnt=model_deployemnt, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -792,8 +819,8 @@ def get_embeddings( self, body: JSON, *, - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: @@ -802,15 +829,14 @@ def get_embeddings( :param body: Required. :type body: JSON - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -827,6 +853,13 @@ def get_embeddings( "str" # Input texts to get embeddings for, encoded as a an array of strings. Required. ], + "extras": { + "str": "str" # Optional. Extra parameters (in the form of string + key-value pairs) that are not in the standard request payload. They will be + passed to the service as-is in the root of the JSON request payload. How the + service handles these extra parameters depends on the value of the + ``extra-parameters`` HTTP request header. + }, "input_type": "str" # Optional. Specifies the input type to use for embedding search. Known values are: "text", "query", and "document". } @@ -871,9 +904,10 @@ def get_embeddings( self, *, input: List[str], - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, content_type: str = "application/json", + extras: Optional[Dict[str, str]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any ) -> _models.EmbeddingsResult: @@ -882,18 +916,24 @@ def get_embeddings( :keyword input: Input texts to get embeddings for, encoded as a an array of strings. Required. :paramtype input: list[str] - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] :keyword input_type: Specifies the input type to use for embedding search. Known values are: "text", "query", and "document". Default value is None. :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType @@ -944,8 +984,8 @@ def get_embeddings( self, body: IO[bytes], *, - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: @@ -954,15 +994,14 @@ def get_embeddings( :param body: Required. :type body: IO[bytes] - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -1014,8 +1053,9 @@ def get_embeddings( body: Union[JSON, IO[bytes]] = _Unset, *, input: List[str] = _Unset, - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, + extras: Optional[Dict[str, str]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any ) -> _models.EmbeddingsResult: @@ -1026,15 +1066,21 @@ def get_embeddings( :type body: JSON or IO[bytes] :keyword input: Input texts to get embeddings for, encoded as a an array of strings. Required. :paramtype input: list[str] - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] :keyword input_type: Specifies the input type to use for embedding search. Known values are: "text", "query", and "document". Default value is None. :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType @@ -1051,6 +1097,13 @@ def get_embeddings( "str" # Input texts to get embeddings for, encoded as a an array of strings. Required. ], + "extras": { + "str": "str" # Optional. Extra parameters (in the form of string + key-value pairs) that are not in the standard request payload. They will be + passed to the service as-is in the root of the JSON request payload. How the + service handles these extra parameters depends on the value of the + ``extra-parameters`` HTTP request header. + }, "input_type": "str" # Optional. Specifies the input type to use for embedding search. Known values are: "text", "query", and "document". } @@ -1106,7 +1159,7 @@ def get_embeddings( if body is _Unset: if input is _Unset: raise TypeError("missing required argument: input") - body = {"input": input, "input_type": input_type} + body = {"extras": extras, "input": input, "input_type": input_type} body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -1116,8 +1169,8 @@ def get_embeddings( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_model_get_embeddings_request( - unknown_parameters=unknown_parameters, - model_deployment=model_deployment, + extra_parameters=extra_parameters, + model_deployemnt=model_deployemnt, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -1154,8 +1207,8 @@ def get_image_generations( self, body: JSON, *, - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ImageGenerations: @@ -1164,15 +1217,14 @@ def get_image_generations( :param body: Required. :type body: JSON - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -1189,6 +1241,13 @@ def get_image_generations( "size": "str", # The desired dimension in pixels of the generated images, in the format ":code:``x:code:``". For example: "1024x1024", "1792x1024". Required. + "extras": { + "str": "str" # Optional. Extra parameters (in the form of string + key-value pairs) that are not in the standard request payload. They will be + passed to the service as-is in the root of the JSON request payload. How the + service handles these extra parameters depends on the value of the + ``extra-parameters`` HTTP request header. + }, "quality": "str", # Optional. The desired image generation quality level to use. Known values are: "standard" and "hd". "response_format": "str", # Optional. The format in which image generation @@ -1223,9 +1282,10 @@ def get_image_generations( *, prompt: str, size: str, - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, content_type: str = "application/json", + extras: Optional[Dict[str, str]] = None, quality: Optional[Union[str, _models.ImageGenerationQuality]] = None, response_format: Optional[Union[str, _models.ImageGenerationResponseFormat]] = None, seed: Optional[int] = None, @@ -1240,18 +1300,24 @@ def get_image_generations( ":code:``x:code:``". For example: "1024x1024", "1792x1024". Required. :paramtype size: str - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] :keyword quality: The desired image generation quality level to use. Known values are: "standard" and "hd". Default value is None. :paramtype quality: str or ~azure.ai.inference.models.ImageGenerationQuality @@ -1294,8 +1360,8 @@ def get_image_generations( self, body: IO[bytes], *, - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ImageGenerations: @@ -1304,15 +1370,14 @@ def get_image_generations( :param body: Required. :type body: IO[bytes] - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -1349,8 +1414,9 @@ def get_image_generations( *, prompt: str = _Unset, size: str = _Unset, - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, + extras: Optional[Dict[str, str]] = None, quality: Optional[Union[str, _models.ImageGenerationQuality]] = None, response_format: Optional[Union[str, _models.ImageGenerationResponseFormat]] = None, seed: Optional[int] = None, @@ -1367,15 +1433,21 @@ def get_image_generations( ":code:``x:code:``". For example: "1024x1024", "1792x1024". Required. :paramtype size: str - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] :keyword quality: The desired image generation quality level to use. Known values are: "standard" and "hd". Default value is None. :paramtype quality: str or ~azure.ai.inference.models.ImageGenerationQuality @@ -1400,6 +1472,13 @@ def get_image_generations( "size": "str", # The desired dimension in pixels of the generated images, in the format ":code:``x:code:``". For example: "1024x1024", "1792x1024". Required. + "extras": { + "str": "str" # Optional. Extra parameters (in the form of string + key-value pairs) that are not in the standard request payload. They will be + passed to the service as-is in the root of the JSON request payload. How the + service handles these extra parameters depends on the value of the + ``extra-parameters`` HTTP request header. + }, "quality": "str", # Optional. The desired image generation quality level to use. Known values are: "standard" and "hd". "response_format": "str", # Optional. The format in which image generation @@ -1447,6 +1526,7 @@ def get_image_generations( if size is _Unset: raise TypeError("missing required argument: size") body = { + "extras": extras, "prompt": prompt, "quality": quality, "response_format": response_format, @@ -1462,8 +1542,8 @@ def get_image_generations( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_model_get_image_generations_request( - unknown_parameters=unknown_parameters, - model_deployment=model_deployment, + extra_parameters=extra_parameters, + model_deployemnt=model_deployemnt, content_type=content_type, api_version=self._config.api_version, content=_content, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index 496cbfb04017..3aa4b45b09e6 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -49,8 +49,8 @@ async def get_chat_completions( self, body: JSON, *, - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: @@ -62,15 +62,14 @@ async def get_chat_completions( :param body: Required. :type body: JSON - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -99,6 +98,13 @@ async def get_chat_completions( "messages": [ chat_request_message ], + "extras": { + "str": "str" # Optional. Extra parameters (in the form of string + key-value pairs) that are not in the standard request payload. They will be + passed to the service as-is in the root of the JSON request payload. How the + service handles these extra parameters depends on the value of the + ``extra-parameters`` HTTP request header. + }, "frequency_penalty": 0.0, # Optional. A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. Positive values will make tokens less likely to appear as their @@ -188,9 +194,10 @@ async def get_chat_completions( self, *, messages: List[_models.ChatRequestMessage], - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, content_type: str = "application/json", + extras: Optional[Dict[str, str]] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None, temperature: Optional[float] = None, @@ -218,18 +225,24 @@ async def get_chat_completions( the behavior of the assistant, followed by alternating messages between the User and Assistant roles. Required. :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] :keyword frequency_penalty: A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. @@ -339,8 +352,8 @@ async def get_chat_completions( self, body: IO[bytes], *, - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: @@ -352,15 +365,14 @@ async def get_chat_completions( :param body: Required. :type body: IO[bytes] - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -420,8 +432,9 @@ async def get_chat_completions( body: Union[JSON, IO[bytes]] = _Unset, *, messages: List[_models.ChatRequestMessage] = _Unset, - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, + extras: Optional[Dict[str, str]] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None, temperature: Optional[float] = None, @@ -451,15 +464,21 @@ async def get_chat_completions( the behavior of the assistant, followed by alternating messages between the User and Assistant roles. Required. :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] :keyword frequency_penalty: A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. @@ -539,6 +558,13 @@ async def get_chat_completions( "messages": [ chat_request_message ], + "extras": { + "str": "str" # Optional. Extra parameters (in the form of string + key-value pairs) that are not in the standard request payload. They will be + passed to the service as-is in the root of the JSON request payload. How the + service handles these extra parameters depends on the value of the + ``extra-parameters`` HTTP request header. + }, "frequency_penalty": 0.0, # Optional. A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. Positive values will make tokens less likely to appear as their @@ -640,6 +666,7 @@ async def get_chat_completions( if messages is _Unset: raise TypeError("missing required argument: messages") body = { + "extras": extras, "frequency_penalty": frequency_penalty, "max_tokens": max_tokens, "messages": messages, @@ -662,8 +689,8 @@ async def get_chat_completions( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_model_get_chat_completions_request( - unknown_parameters=unknown_parameters, - model_deployment=model_deployment, + extra_parameters=extra_parameters, + model_deployemnt=model_deployemnt, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -700,8 +727,8 @@ async def get_embeddings( self, body: JSON, *, - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: @@ -710,15 +737,14 @@ async def get_embeddings( :param body: Required. :type body: JSON - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -735,6 +761,13 @@ async def get_embeddings( "str" # Input texts to get embeddings for, encoded as a an array of strings. Required. ], + "extras": { + "str": "str" # Optional. Extra parameters (in the form of string + key-value pairs) that are not in the standard request payload. They will be + passed to the service as-is in the root of the JSON request payload. How the + service handles these extra parameters depends on the value of the + ``extra-parameters`` HTTP request header. + }, "input_type": "str" # Optional. Specifies the input type to use for embedding search. Known values are: "text", "query", and "document". } @@ -779,9 +812,10 @@ async def get_embeddings( self, *, input: List[str], - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, content_type: str = "application/json", + extras: Optional[Dict[str, str]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any ) -> _models.EmbeddingsResult: @@ -790,18 +824,24 @@ async def get_embeddings( :keyword input: Input texts to get embeddings for, encoded as a an array of strings. Required. :paramtype input: list[str] - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] :keyword input_type: Specifies the input type to use for embedding search. Known values are: "text", "query", and "document". Default value is None. :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType @@ -852,8 +892,8 @@ async def get_embeddings( self, body: IO[bytes], *, - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: @@ -862,15 +902,14 @@ async def get_embeddings( :param body: Required. :type body: IO[bytes] - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -922,8 +961,9 @@ async def get_embeddings( body: Union[JSON, IO[bytes]] = _Unset, *, input: List[str] = _Unset, - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, + extras: Optional[Dict[str, str]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any ) -> _models.EmbeddingsResult: @@ -934,15 +974,21 @@ async def get_embeddings( :type body: JSON or IO[bytes] :keyword input: Input texts to get embeddings for, encoded as a an array of strings. Required. :paramtype input: list[str] - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] :keyword input_type: Specifies the input type to use for embedding search. Known values are: "text", "query", and "document". Default value is None. :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType @@ -959,6 +1005,13 @@ async def get_embeddings( "str" # Input texts to get embeddings for, encoded as a an array of strings. Required. ], + "extras": { + "str": "str" # Optional. Extra parameters (in the form of string + key-value pairs) that are not in the standard request payload. They will be + passed to the service as-is in the root of the JSON request payload. How the + service handles these extra parameters depends on the value of the + ``extra-parameters`` HTTP request header. + }, "input_type": "str" # Optional. Specifies the input type to use for embedding search. Known values are: "text", "query", and "document". } @@ -1014,7 +1067,7 @@ async def get_embeddings( if body is _Unset: if input is _Unset: raise TypeError("missing required argument: input") - body = {"input": input, "input_type": input_type} + body = {"extras": extras, "input": input, "input_type": input_type} body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -1024,8 +1077,8 @@ async def get_embeddings( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_model_get_embeddings_request( - unknown_parameters=unknown_parameters, - model_deployment=model_deployment, + extra_parameters=extra_parameters, + model_deployemnt=model_deployemnt, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -1062,8 +1115,8 @@ async def get_image_generations( self, body: JSON, *, - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ImageGenerations: @@ -1072,15 +1125,14 @@ async def get_image_generations( :param body: Required. :type body: JSON - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -1097,6 +1149,13 @@ async def get_image_generations( "size": "str", # The desired dimension in pixels of the generated images, in the format ":code:``x:code:``". For example: "1024x1024", "1792x1024". Required. + "extras": { + "str": "str" # Optional. Extra parameters (in the form of string + key-value pairs) that are not in the standard request payload. They will be + passed to the service as-is in the root of the JSON request payload. How the + service handles these extra parameters depends on the value of the + ``extra-parameters`` HTTP request header. + }, "quality": "str", # Optional. The desired image generation quality level to use. Known values are: "standard" and "hd". "response_format": "str", # Optional. The format in which image generation @@ -1131,9 +1190,10 @@ async def get_image_generations( *, prompt: str, size: str, - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, content_type: str = "application/json", + extras: Optional[Dict[str, str]] = None, quality: Optional[Union[str, _models.ImageGenerationQuality]] = None, response_format: Optional[Union[str, _models.ImageGenerationResponseFormat]] = None, seed: Optional[int] = None, @@ -1148,18 +1208,24 @@ async def get_image_generations( ":code:``x:code:``". For example: "1024x1024", "1792x1024". Required. :paramtype size: str - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] :keyword quality: The desired image generation quality level to use. Known values are: "standard" and "hd". Default value is None. :paramtype quality: str or ~azure.ai.inference.models.ImageGenerationQuality @@ -1202,8 +1268,8 @@ async def get_image_generations( self, body: IO[bytes], *, - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ImageGenerations: @@ -1212,15 +1278,14 @@ async def get_image_generations( :param body: Required. :type body: IO[bytes] - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -1257,8 +1322,9 @@ async def get_image_generations( *, prompt: str = _Unset, size: str = _Unset, - unknown_parameters: Optional[Union[str, _models.UnknownParameters]] = None, - model_deployment: Optional[str] = None, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployemnt: Optional[str] = None, + extras: Optional[Dict[str, str]] = None, quality: Optional[Union[str, _models.ImageGenerationQuality]] = None, response_format: Optional[Union[str, _models.ImageGenerationResponseFormat]] = None, seed: Optional[int] = None, @@ -1275,15 +1341,21 @@ async def get_image_generations( ":code:``x:code:``". For example: "1024x1024", "1792x1024". Required. :paramtype size: str - :keyword unknown_parameters: Controls what happens if unknown parameters are passed as extra - properties in the request payload. Known values are: "error", "ignore", and "allow". Default - value is None. - :paramtype unknown_parameters: str or ~azure.ai.inference.models.UnknownParameters - :keyword model_deployment: Name of the deployment to which you would like to route the request. + :keyword extra_parameters: Controls what happens if extra parameters are passed in the request + payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters + :keyword model_deployemnt: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployment: str + :paramtype model_deployemnt: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] :keyword quality: The desired image generation quality level to use. Known values are: "standard" and "hd". Default value is None. :paramtype quality: str or ~azure.ai.inference.models.ImageGenerationQuality @@ -1308,6 +1380,13 @@ async def get_image_generations( "size": "str", # The desired dimension in pixels of the generated images, in the format ":code:``x:code:``". For example: "1024x1024", "1792x1024". Required. + "extras": { + "str": "str" # Optional. Extra parameters (in the form of string + key-value pairs) that are not in the standard request payload. They will be + passed to the service as-is in the root of the JSON request payload. How the + service handles these extra parameters depends on the value of the + ``extra-parameters`` HTTP request header. + }, "quality": "str", # Optional. The desired image generation quality level to use. Known values are: "standard" and "hd". "response_format": "str", # Optional. The format in which image generation @@ -1355,6 +1434,7 @@ async def get_image_generations( if size is _Unset: raise TypeError("missing required argument: size") body = { + "extras": extras, "prompt": prompt, "quality": quality, "response_format": response_format, @@ -1370,8 +1450,8 @@ async def get_image_generations( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_model_get_image_generations_request( - unknown_parameters=unknown_parameters, - model_deployment=model_deployment, + extra_parameters=extra_parameters, + model_deployemnt=model_deployemnt, content_type=content_type, api_version=self._config.api_version, content=_content, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py index bd1c026db317..ebe181771061 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py @@ -36,9 +36,9 @@ from ._enums import ChatRole from ._enums import CompletionsFinishReason from ._enums import EmbeddingInputType +from ._enums import ExtraParameters from ._enums import ImageGenerationQuality from ._enums import ImageGenerationResponseFormat -from ._enums import UnknownParameters from ._patch import __all__ as _patch_all from ._patch import * # pylint: disable=unused-wildcard-import from ._patch import patch_sdk as _patch_sdk @@ -73,9 +73,9 @@ "ChatRole", "CompletionsFinishReason", "EmbeddingInputType", + "ExtraParameters", "ImageGenerationQuality", "ImageGenerationResponseFormat", - "UnknownParameters", ] __all__.extend([p for p in _patch_all if p not in __all__]) _patch_sdk() diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py index eee6a41df3ac..879fdb8015fc 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py @@ -69,6 +69,19 @@ class EmbeddingInputType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """to do""" +class ExtraParameters(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Defines the service behavior when extra parameters are passed in the request payload.""" + + ERROR = "error" + """The service should error when it sees extra parameters in the request payload. This is the + default behavior if the service.""" + IGNORE = "ignore" + """The service should ignore extra parameters in the request payload. They will not be passed to + the back-end AI model.""" + ALLOW = "allow" + """The service should pass extra parameters to the back-end AI model.""" + + class ImageGenerationQuality(str, Enum, metaclass=CaseInsensitiveEnumMeta): """An image generation configuration that specifies how the model should prioritize quality, cost, and speed. @@ -88,18 +101,3 @@ class ImageGenerationResponseFormat(str, Enum, metaclass=CaseInsensitiveEnumMeta """Image generation response items should provide a URL from which the image may be retrieved.""" BASE64 = "b64_json" """Image generation response items should provide image data as a base64-encoded string.""" - - -class UnknownParameters(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Defines the service behavior when unknown parameters are passed as extra properties in the - request payload. - """ - - ERROR = "error" - """The service should error when it sees unknown parameters in the request payload. This is the - default behavior if the service.""" - IGNORE = "ignore" - """The service should ignore unknown parameters in the request payload. They will not be passed to - the back-end AI model.""" - ALLOW = "allow" - """The service should pass unknown parameters to the back-end AI model.""" diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py index 215a81824ee2..5c965b4a8e76 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py @@ -43,8 +43,7 @@ async def sample_embeddings_async(): client.get_embeddings( input=[ "first sentence", - "second sentence", - "third sentence" + "second sentence","third sentence" ] ) ) diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py index a42a3e744514..09a6d7dea1fe 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py @@ -17,6 +17,7 @@ """ import asyncio + async def sample_image_generation_async(): import os @@ -66,7 +67,7 @@ async def sample_image_generation_async(): prompt="A painting of a beautiful sunset over a mountain lake.", size="1024x768" ) - ) + ) # Loop until the operation is done while not future.done(): @@ -81,7 +82,7 @@ async def sample_image_generation_async(): print("Image generation result:") for index, item in enumerate(result.data): with open(f"image_{index}.png", "wb") as image: - image.write(item.b64_json.decode('base64')) + image.write(item.b64_json.decode("base64")) print(f"id: {result.id}") print(f"model: {result.model}") print(f"created: {result.created}") @@ -92,4 +93,4 @@ async def main(): if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py index ce771efa21a4..472a3b181459 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py +++ b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py @@ -62,7 +62,12 @@ def sample_embeddings(): # [START embeddings] # Do a single embeddings operation. This will be a synchronously (blocking) call. - result = client.get_embeddings(input=["first sentence", "second sentence", "third sentence"]) + result = client.get_embeddings( + input=[ + "first sentence", + "second sentence","third sentence" + ] + ) # Print results the the console print("Embeddings result:") diff --git a/sdk/ai/azure-ai-inference/samples/sample_image_generation.py b/sdk/ai/azure-ai-inference/samples/sample_image_generation.py index bf9703bc155a..28d2561cbb29 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_image_generation.py +++ b/sdk/ai/azure-ai-inference/samples/sample_image_generation.py @@ -71,7 +71,7 @@ def sample_image_generation(): print("Image generation result:") for index, item in enumerate(result.data): with open(f"image_{index}.png", "wb") as image: - image.write(item.b64_json.decode('base64')) + image.write(item.b64_json.decode("base64")) print(f"id: {result.id}") print(f"model: {result.model}") print(f"created: {result.created}") diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml index 8957c511a391..4eaa34eac41f 100644 --- a/sdk/ai/azure-ai-inference/tsp-location.yaml +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/ModelClient -commit: 319f92bd61ce8acce6e0c5e14025d7d6f34430e3 +commit: 227172eb4c80e86d103825ccbb37e4583167c4e5 repo: Azure/azure-rest-api-specs additionalDirectories: From 45c7ca12b2968823496ba9113ddfdda7e9b5e2f4 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 4 Apr 2024 15:37:21 -0700 Subject: [PATCH 017/112] Re-emit --- sdk/ai/azure-ai-inference/README.md | 15 +-- .../ai/inference/_operations/_operations.py | 96 +++++++++---------- .../inference/aio/_operations/_operations.py | 78 +++++++-------- .../sample_chat_completions_async.py | 5 +- .../async_samples/sample_embeddings_async.py | 14 +-- .../sample_image_generation_async.py | 10 +- .../samples/sample_chat_completions.py | 5 +- .../samples/sample_embeddings.py | 12 +-- .../samples/sample_image_generation.py | 8 +- sdk/ai/azure-ai-inference/tsp-location.yaml | 2 +- 10 files changed, 101 insertions(+), 144 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 01bfbebb70bc..40f25f287164 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -63,10 +63,7 @@ except KeyError: exit() # Create Model Client for synchronous operations -client = ModelClient( - endpoint=endpoint, - credential=AzureKeyCredential(key) -) +client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) ``` @@ -160,12 +157,7 @@ This example demonstrates how to get embeddings. ```python # Do a single embeddings operation. This will be a synchronously (blocking) call. -result = client.get_embeddings( - input=[ - "first sentence", - "second sentence","third sentence" - ] -) +result = client.get_embeddings(input=["first sentence", "second sentence", "third sentence"]) # Print results the the console print("Embeddings result:") @@ -195,8 +187,7 @@ This example demonstrates how to generate and image from a text prompt ```python # Generate a single image from a text prompt. This will be a synchronously (blocking) call. result = client.get_image_generations( - prompt="A painting of a beautiful sunset over a mountain lake.", - size="1024x768" + prompt="A painting of a beautiful sunset over a mountain lake.", size="1024x768" ) # Save generated image to file and print other results the the console diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index 43de0120f5cb..76a434a68084 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -45,7 +45,7 @@ def build_model_get_chat_completions_request( *, extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -64,8 +64,8 @@ def build_model_get_chat_completions_request( # Construct headers if extra_parameters is not None: _headers["extra-parameters"] = _SERIALIZER.header("extra_parameters", extra_parameters, "str") - if model_deployemnt is not None: - _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployemnt", model_deployemnt, "str") + if model_deployment is not None: + _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployment", model_deployment, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -76,7 +76,7 @@ def build_model_get_chat_completions_request( def build_model_get_embeddings_request( *, extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -95,8 +95,8 @@ def build_model_get_embeddings_request( # Construct headers if extra_parameters is not None: _headers["extra-parameters"] = _SERIALIZER.header("extra_parameters", extra_parameters, "str") - if model_deployemnt is not None: - _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployemnt", model_deployemnt, "str") + if model_deployment is not None: + _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployment", model_deployment, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -107,7 +107,7 @@ def build_model_get_embeddings_request( def build_model_get_image_generations_request( # pylint: disable=name-too-long *, extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -126,8 +126,8 @@ def build_model_get_image_generations_request( # pylint: disable=name-too-long # Construct headers if extra_parameters is not None: _headers["extra-parameters"] = _SERIALIZER.header("extra_parameters", extra_parameters, "str") - if model_deployemnt is not None: - _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployemnt", model_deployemnt, "str") + if model_deployment is not None: + _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployment", model_deployment, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -142,7 +142,7 @@ def get_chat_completions( body: JSON, *, extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: @@ -157,11 +157,11 @@ def get_chat_completions( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -287,7 +287,7 @@ def get_chat_completions( *, messages: List[_models.ChatRequestMessage], extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, content_type: str = "application/json", extras: Optional[Dict[str, str]] = None, frequency_penalty: Optional[float] = None, @@ -320,11 +320,11 @@ def get_chat_completions( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -445,7 +445,7 @@ def get_chat_completions( body: IO[bytes], *, extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: @@ -460,11 +460,11 @@ def get_chat_completions( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -525,7 +525,7 @@ def get_chat_completions( *, messages: List[_models.ChatRequestMessage] = _Unset, extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, extras: Optional[Dict[str, str]] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None, @@ -559,11 +559,11 @@ def get_chat_completions( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the standard request payload. They will be passed to the service as-is in the root of the JSON request payload. @@ -782,7 +782,7 @@ def get_chat_completions( _request = build_model_get_chat_completions_request( extra_parameters=extra_parameters, - model_deployemnt=model_deployemnt, + model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -820,7 +820,7 @@ def get_embeddings( body: JSON, *, extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: @@ -832,11 +832,11 @@ def get_embeddings( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -905,7 +905,7 @@ def get_embeddings( *, input: List[str], extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, content_type: str = "application/json", extras: Optional[Dict[str, str]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, @@ -919,11 +919,11 @@ def get_embeddings( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -985,7 +985,7 @@ def get_embeddings( body: IO[bytes], *, extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: @@ -997,11 +997,11 @@ def get_embeddings( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -1054,7 +1054,7 @@ def get_embeddings( *, input: List[str] = _Unset, extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, extras: Optional[Dict[str, str]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any @@ -1069,11 +1069,11 @@ def get_embeddings( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the standard request payload. They will be passed to the service as-is in the root of the JSON request payload. @@ -1170,7 +1170,7 @@ def get_embeddings( _request = build_model_get_embeddings_request( extra_parameters=extra_parameters, - model_deployemnt=model_deployemnt, + model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -1208,7 +1208,7 @@ def get_image_generations( body: JSON, *, extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ImageGenerations: @@ -1220,11 +1220,11 @@ def get_image_generations( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -1283,7 +1283,7 @@ def get_image_generations( prompt: str, size: str, extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, content_type: str = "application/json", extras: Optional[Dict[str, str]] = None, quality: Optional[Union[str, _models.ImageGenerationQuality]] = None, @@ -1303,11 +1303,11 @@ def get_image_generations( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -1361,7 +1361,7 @@ def get_image_generations( body: IO[bytes], *, extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ImageGenerations: @@ -1373,11 +1373,11 @@ def get_image_generations( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -1415,7 +1415,7 @@ def get_image_generations( prompt: str = _Unset, size: str = _Unset, extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, extras: Optional[Dict[str, str]] = None, quality: Optional[Union[str, _models.ImageGenerationQuality]] = None, response_format: Optional[Union[str, _models.ImageGenerationResponseFormat]] = None, @@ -1436,11 +1436,11 @@ def get_image_generations( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the standard request payload. They will be passed to the service as-is in the root of the JSON request payload. @@ -1543,7 +1543,7 @@ def get_image_generations( _request = build_model_get_image_generations_request( extra_parameters=extra_parameters, - model_deployemnt=model_deployemnt, + model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, content=_content, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index 3aa4b45b09e6..f7633a29906b 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -50,7 +50,7 @@ async def get_chat_completions( body: JSON, *, extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: @@ -65,11 +65,11 @@ async def get_chat_completions( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -195,7 +195,7 @@ async def get_chat_completions( *, messages: List[_models.ChatRequestMessage], extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, content_type: str = "application/json", extras: Optional[Dict[str, str]] = None, frequency_penalty: Optional[float] = None, @@ -228,11 +228,11 @@ async def get_chat_completions( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -353,7 +353,7 @@ async def get_chat_completions( body: IO[bytes], *, extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: @@ -368,11 +368,11 @@ async def get_chat_completions( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -433,7 +433,7 @@ async def get_chat_completions( *, messages: List[_models.ChatRequestMessage] = _Unset, extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, extras: Optional[Dict[str, str]] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None, @@ -467,11 +467,11 @@ async def get_chat_completions( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the standard request payload. They will be passed to the service as-is in the root of the JSON request payload. @@ -690,7 +690,7 @@ async def get_chat_completions( _request = build_model_get_chat_completions_request( extra_parameters=extra_parameters, - model_deployemnt=model_deployemnt, + model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -728,7 +728,7 @@ async def get_embeddings( body: JSON, *, extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: @@ -740,11 +740,11 @@ async def get_embeddings( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -813,7 +813,7 @@ async def get_embeddings( *, input: List[str], extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, content_type: str = "application/json", extras: Optional[Dict[str, str]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, @@ -827,11 +827,11 @@ async def get_embeddings( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -893,7 +893,7 @@ async def get_embeddings( body: IO[bytes], *, extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: @@ -905,11 +905,11 @@ async def get_embeddings( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -962,7 +962,7 @@ async def get_embeddings( *, input: List[str] = _Unset, extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, extras: Optional[Dict[str, str]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any @@ -977,11 +977,11 @@ async def get_embeddings( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the standard request payload. They will be passed to the service as-is in the root of the JSON request payload. @@ -1078,7 +1078,7 @@ async def get_embeddings( _request = build_model_get_embeddings_request( extra_parameters=extra_parameters, - model_deployemnt=model_deployemnt, + model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -1116,7 +1116,7 @@ async def get_image_generations( body: JSON, *, extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ImageGenerations: @@ -1128,11 +1128,11 @@ async def get_image_generations( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -1191,7 +1191,7 @@ async def get_image_generations( prompt: str, size: str, extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, content_type: str = "application/json", extras: Optional[Dict[str, str]] = None, quality: Optional[Union[str, _models.ImageGenerationQuality]] = None, @@ -1211,11 +1211,11 @@ async def get_image_generations( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -1269,7 +1269,7 @@ async def get_image_generations( body: IO[bytes], *, extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ImageGenerations: @@ -1281,11 +1281,11 @@ async def get_image_generations( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -1323,7 +1323,7 @@ async def get_image_generations( prompt: str = _Unset, size: str = _Unset, extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployemnt: Optional[str] = None, + model_deployment: Optional[str] = None, extras: Optional[Dict[str, str]] = None, quality: Optional[Union[str, _models.ImageGenerationQuality]] = None, response_format: Optional[Union[str, _models.ImageGenerationResponseFormat]] = None, @@ -1344,11 +1344,11 @@ async def get_image_generations( :keyword extra_parameters: Controls what happens if extra parameters are passed in the request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters - :keyword model_deployemnt: Name of the deployment to which you would like to route the request. + :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. Default value is None. - :paramtype model_deployemnt: str + :paramtype model_deployment: str :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the standard request payload. They will be passed to the service as-is in the root of the JSON request payload. @@ -1451,7 +1451,7 @@ async def get_image_generations( _request = build_model_get_image_generations_request( extra_parameters=extra_parameters, - model_deployemnt=model_deployemnt, + model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, content=_content, diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py index 5d3ef71c705f..55818705eb2a 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py @@ -34,10 +34,7 @@ async def sample_chat_completions_async(): exit() # Create a Model Client for synchronous operations - client = ModelClient( - endpoint=endpoint, - credential=AzureKeyCredential(key) - ) + client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # Do a single chat completion operation. Start the operation and get a Future object. future = asyncio.ensure_future( diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py index 5c965b4a8e76..6b806a641e96 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py @@ -33,20 +33,10 @@ async def sample_embeddings_async(): exit() # Create an Image Analysis client for synchronous operations - client = ModelClient( - endpoint=endpoint, - credential=AzureKeyCredential(key) - ) + client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # Do a single embeddings operation. Start the operation and get a Future object. - future = asyncio.ensure_future( - client.get_embeddings( - input=[ - "first sentence", - "second sentence","third sentence" - ] - ) - ) + future = asyncio.ensure_future(client.get_embeddings(input=["first sentence", "second sentence", "third sentence"])) # Loop until the operation is done while not future.done(): diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py index 09a6d7dea1fe..36477ecfe194 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py @@ -56,17 +56,11 @@ async def sample_image_generation_async(): exit() # Create an Model for synchronous operations - client = ModelClient( - endpoint=endpoint, - credential=AzureKeyCredential("key") - ) + client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential("key")) # Generate an image from text prompt. This will be an asynchronously (non-blocking) call. future = asyncio.ensure_future( - client.get_image_generations( - prompt="A painting of a beautiful sunset over a mountain lake.", - size="1024x768" - ) + client.get_image_generations(prompt="A painting of a beautiful sunset over a mountain lake.", size="1024x768") ) # Loop until the operation is done diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index e749965e5a86..c1631e863d55 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -34,10 +34,7 @@ def sample_chat_completions(): exit() # Create Model Client for synchronous operations - client = ModelClient( - endpoint=endpoint, - credential=AzureKeyCredential(key) - ) + client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # [END create_client] # [START chat_completions] diff --git a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py index 472a3b181459..5e8276e324ca 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py +++ b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py @@ -55,19 +55,11 @@ def sample_embeddings(): exit() # Create an Model for synchronous operations - client = ModelClient( - endpoint=endpoint, - credential=AzureKeyCredential("key") - ) + client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential("key")) # [START embeddings] # Do a single embeddings operation. This will be a synchronously (blocking) call. - result = client.get_embeddings( - input=[ - "first sentence", - "second sentence","third sentence" - ] - ) + result = client.get_embeddings(input=["first sentence", "second sentence", "third sentence"]) # Print results the the console print("Embeddings result:") diff --git a/sdk/ai/azure-ai-inference/samples/sample_image_generation.py b/sdk/ai/azure-ai-inference/samples/sample_image_generation.py index 28d2561cbb29..ae7303673c87 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_image_generation.py +++ b/sdk/ai/azure-ai-inference/samples/sample_image_generation.py @@ -55,16 +55,12 @@ def sample_image_generation(): exit() # Create an Model for synchronous operations - client = ModelClient( - endpoint=endpoint, - credential=AzureKeyCredential("key") - ) + client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential("key")) # [START image_generation] # Generate a single image from a text prompt. This will be a synchronously (blocking) call. result = client.get_image_generations( - prompt="A painting of a beautiful sunset over a mountain lake.", - size="1024x768" + prompt="A painting of a beautiful sunset over a mountain lake.", size="1024x768" ) # Save generated image to file and print other results the the console diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml index 4eaa34eac41f..5113363f073c 100644 --- a/sdk/ai/azure-ai-inference/tsp-location.yaml +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/ModelClient -commit: 227172eb4c80e86d103825ccbb37e4583167c4e5 +commit: 4269199cc639dcec3c0f8158078ff3a381a4022f repo: Azure/azure-rest-api-specs additionalDirectories: From 8ee88aafd5b3c55817c063edb2b60baea60cb9bc Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 4 Apr 2024 16:07:58 -0700 Subject: [PATCH 018/112] Example of setting extra parameters --- sdk/ai/azure-ai-inference/README.md | 38 ++++++++++++------- .../samples/sample_chat_completions.py | 31 +++++++++++++-- 2 files changed, 53 insertions(+), 16 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 40f25f287164..ece6c1960c98 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -50,20 +50,29 @@ Once you define the environment variables, this Python code will create and auth ```python import os from azure.ai.inference import ModelClient -from azure.ai.inference.models import ChatRequestSystemMessage, ChatRequestUserMessage, UnknownParameters +from azure.ai.inference.models import ChatRequestSystemMessage, ChatRequestUserMessage, ExtraParameters from azure.core.credentials import AzureKeyCredential -# Read the values of your model endpoint and key from environment variables -try: - endpoint = os.environ["MODEL_ENDPOINT"] - key = os.environ["MODEL_KEY"] -except KeyError: - print("Missing environment variable 'MODEL_ENDPOINT' or 'MODEL_KEY'") - print("Set them before running this sample.") - exit() - -# Create Model Client for synchronous operations -client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) +# [START logging] +import sys +import logging + +# Acquire the logger for this client library. Use 'azure' to affect both +# 'azure.core` and `azure.ai.vision.imageanalysis' libraries. +logger = logging.getLogger("azure") + +# Set the desired logging level. logging.INFO or logging.DEBUG are good options. +logger.setLevel(logging.DEBUG) + +# Direct logging output to stdout (the default): +handler = logging.StreamHandler(stream=sys.stdout) +# Or direct logging output to a file: +# handler = logging.FileHandler(filename = 'sample.log') +logger.addHandler(handler) + +# Optional: change the default logging format. Here we add a timestamp. +formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s") +handler.setFormatter(formatter) ``` @@ -126,7 +135,10 @@ result = client.get_chat_completions( messages=[ ChatRequestSystemMessage(content="You are an AI assistant that helps people find information."), ChatRequestUserMessage(content="How many feet are in a mile?"), - ] + ], + # Examples of setting extra parameters (TODO: move this to advanced sample) + extras=dict(key1="value1", key2="value2"), + extra_parameters=ExtraParameters.ALLOW ) # Print results the the console diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index c1631e863d55..413f19c805f5 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -21,9 +21,31 @@ def sample_chat_completions(): # [START create_client] import os from azure.ai.inference import ModelClient - from azure.ai.inference.models import ChatRequestSystemMessage, ChatRequestUserMessage, UnknownParameters + from azure.ai.inference.models import ChatRequestSystemMessage, ChatRequestUserMessage, ExtraParameters from azure.core.credentials import AzureKeyCredential + # [START logging] + import sys + import logging + + # Acquire the logger for this client library. Use 'azure' to affect both + # 'azure.core` and `azure.ai.vision.imageanalysis' libraries. + logger = logging.getLogger("azure") + + # Set the desired logging level. logging.INFO or logging.DEBUG are good options. + logger.setLevel(logging.DEBUG) + + # Direct logging output to stdout (the default): + handler = logging.StreamHandler(stream=sys.stdout) + # Or direct logging output to a file: + # handler = logging.FileHandler(filename = 'sample.log') + logger.addHandler(handler) + + # Optional: change the default logging format. Here we add a timestamp. + formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s") + handler.setFormatter(formatter) + # [END logging] + # Read the values of your model endpoint and key from environment variables try: endpoint = os.environ["MODEL_ENDPOINT"] @@ -34,7 +56,7 @@ def sample_chat_completions(): exit() # Create Model Client for synchronous operations - client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key), logging_enable=True) # [END create_client] # [START chat_completions] @@ -43,7 +65,10 @@ def sample_chat_completions(): messages=[ ChatRequestSystemMessage(content="You are an AI assistant that helps people find information."), ChatRequestUserMessage(content="How many feet are in a mile?"), - ] + ], + # Examples of setting extra parameters (TODO: move this to advanced sample) + extras=dict(key1="value1", key2="value2"), + extra_parameters=ExtraParameters.ALLOW ) # Print results the the console From bad14c952990e83b7077b0abf0b4701515ab06c8 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 4 Apr 2024 20:54:15 -0700 Subject: [PATCH 019/112] Fix README.md title --- sdk/ai/azure-ai-inference/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index ece6c1960c98..055566899c36 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -1,6 +1,6 @@ -# Azure AI Model Client Library for Python +# Azure model client library for Python -The Model Client library allows you to do inference against any of AI models in you deployed to Azure. It supports both "model as a service" and "models with hosted managed infrastructure". For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). +The Azure AI Model Client Library allows you to do inference against any of AI models in you deployed to Azure. It supports both "model as a service" and "models with hosted managed infrastructure". For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). Use the model client library to: From b49acb6ddeb1c24790c9e5652251b8311e2a85be Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 5 Apr 2024 11:04:22 -0700 Subject: [PATCH 020/112] Placeholder patch for streaming chat method --- .../azure/ai/inference/_patch.py | 46 +++++++++++- .../azure/ai/inference/models/_models.py | 24 +++---- .../samples/sample_chat_completions.py | 2 +- .../sample_streaming_chat_completions.py | 72 +++++++++++++++++++ sdk/ai/azure-ai-inference/tsp-location.yaml | 2 +- 5 files changed, 130 insertions(+), 16 deletions(-) create mode 100644 sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index f7dd32510333..c76f7082d26a 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -6,9 +6,51 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from typing import List +import json +import sys +from typing import List, Any, Union, IO, Optional, Dict +from azure.core.tracing.decorator import distributed_trace +from ._client import ModelClient as ModelClientGenerated +from . import models as _models -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() + +class ModelClient(ModelClientGenerated): + + @distributed_trace + def get_streaming_chat_completions( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + messages: List[_models.ChatRequestMessage] = _Unset, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployment: Optional[str] = None, + extras: Optional[Dict[str, str]] = None, + frequency_penalty: Optional[float] = None, + presence_penalty: Optional[float] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_tokens: Optional[int] = None, + response_format: Optional[_models.ChatCompletionsResponseFormat] = None, + stop: Optional[List[str]] = None, + stream_parameter: Optional[bool] = None, + tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, + tool_choice: Optional[ + Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] + ] = None, + seed: Optional[int] = None, + **kwargs: Any + ) -> None: + print("This is a placeholder for the actual implementation") + +__all__: List[str] = [ + "ModelClient" +] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py index 1b6a770bb104..c5542b86a18f 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py @@ -36,28 +36,28 @@ class ChatChoice(_model_base.Model): :ivar index: The ordered index associated with this chat completions choice. Required. :vartype index: int - :ivar message: The chat message for a given chat completions prompt. Required. - :vartype message: ~azure.ai.inference.models.ChatResponseMessage :ivar finish_reason: The reason that this chat completions choice completed its generated. Required. Known values are: "stop", "length", and "content_filter". :vartype finish_reason: str or ~azure.ai.inference.models.CompletionsFinishReason + :ivar message: The chat message for a given chat completions prompt. Required. + :vartype message: ~azure.ai.inference.models.ChatResponseMessage """ index: int = rest_field() """The ordered index associated with this chat completions choice. Required.""" - message: "_models.ChatResponseMessage" = rest_field() - """The chat message for a given chat completions prompt. Required.""" finish_reason: Union[str, "_models.CompletionsFinishReason"] = rest_field() """The reason that this chat completions choice completed its generated. Required. Known values are: \"stop\", \"length\", and \"content_filter\".""" + message: "_models.ChatResponseMessage" = rest_field() + """The chat message for a given chat completions prompt. Required.""" @overload def __init__( self, *, index: int, - message: "_models.ChatResponseMessage", finish_reason: Union[str, "_models.CompletionsFinishReason"], + message: "_models.ChatResponseMessage", ): ... @@ -90,13 +90,13 @@ class ChatCompletions(_model_base.Model): :vartype created: ~datetime.datetime :ivar model: The model used for the chat completion. Required. :vartype model: str + :ivar usage: Usage information for tokens processed and generated as part of this completions + operation. Required. + :vartype usage: ~azure.ai.inference.models.CompletionsUsage :ivar choices: The collection of completions choices associated with this completions response. Generally, ``n`` choices are generated per provided prompt with a default value of 1. Token limits and other settings may limit the number of choices generated. Required. :vartype choices: list[~azure.ai.inference.models.ChatChoice] - :ivar usage: Usage information for tokens processed and generated as part of this completions - operation. Required. - :vartype usage: ~azure.ai.inference.models.CompletionsUsage """ id: str = rest_field() @@ -108,13 +108,13 @@ class ChatCompletions(_model_base.Model): represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required.""" model: str = rest_field() """The model used for the chat completion. Required.""" + usage: "_models.CompletionsUsage" = rest_field() + """Usage information for tokens processed and generated as part of this completions operation. + Required.""" choices: List["_models.ChatChoice"] = rest_field() """The collection of completions choices associated with this completions response. Generally, ``n`` choices are generated per provided prompt with a default value of 1. Token limits and other settings may limit the number of choices generated. Required.""" - usage: "_models.CompletionsUsage" = rest_field() - """Usage information for tokens processed and generated as part of this completions operation. - Required.""" @overload def __init__( @@ -124,8 +124,8 @@ def __init__( object: str, created: datetime.datetime, model: str, - choices: List["_models.ChatChoice"], usage: "_models.CompletionsUsage", + choices: List["_models.ChatChoice"], ): ... diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index 413f19c805f5..a0bd65bc54dc 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -68,7 +68,7 @@ def sample_chat_completions(): ], # Examples of setting extra parameters (TODO: move this to advanced sample) extras=dict(key1="value1", key2="value2"), - extra_parameters=ExtraParameters.ALLOW + extra_parameters=ExtraParameters.ALLOW, ) # Print results the the console diff --git a/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py new file mode 100644 index 000000000000..31e625f77f59 --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py @@ -0,0 +1,72 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to get a chat completion streaming response from the service using a synchronous. + +USAGE: + python sample_streaming_chat_completion.py + + Set these two environment variables before running the sample: + 1) MODEL_ENDPOINT - Your endpoint URL, in the form https://..inference.ai.azure.com + where `deployment-name` is your unique AI Model deployment name, and + `azure-region` is the Azure region where your model is deployed. + 2) MODEL_KEY - Your model key (a 32-character string). Keep it secret. +""" + + +def sample_chat_completions(): + import os + from azure.ai.inference import ModelClient + from azure.ai.inference.models import ChatRequestSystemMessage, ChatRequestUserMessage + from azure.core.credentials import AzureKeyCredential + + # [START logging] + import sys + import logging + + # Acquire the logger for this client library. Use 'azure' to affect both + # 'azure.core` and `azure.ai.vision.imageanalysis' libraries. + logger = logging.getLogger("azure") + + # Set the desired logging level. logging.INFO or logging.DEBUG are good options. + logger.setLevel(logging.DEBUG) + + # Direct logging output to stdout (the default): + handler = logging.StreamHandler(stream=sys.stdout) + # Or direct logging output to a file: + # handler = logging.FileHandler(filename = 'sample.log') + logger.addHandler(handler) + + # Optional: change the default logging format. Here we add a timestamp. + formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s") + handler.setFormatter(formatter) + # [END logging] + + # Read the values of your model endpoint and key from environment variables + try: + endpoint = os.environ["MODEL_ENDPOINT"] + key = os.environ["MODEL_KEY"] + except KeyError: + print("Missing environment variable 'MODEL_ENDPOINT' or 'MODEL_KEY'") + print("Set them before running this sample.") + exit() + + # Create Model Client for synchronous operations + client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key), logging_enable=True) + + # [START streaming_chat_completions] + # Do a single chat completion operation. This will be a synchronously (blocking) call. + result = client.get_streaming_chat_completions( + messages=[ + ChatRequestSystemMessage(content="You are an AI assistant that helps people find information."), + ChatRequestUserMessage(content="How many feet are in a mile?"), + ] + ) + # [END streaming_chat_completions] + + +if __name__ == "__main__": + sample_chat_completions() diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml index 5113363f073c..3d51e053d3d5 100644 --- a/sdk/ai/azure-ai-inference/tsp-location.yaml +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/ModelClient -commit: 4269199cc639dcec3c0f8158078ff3a381a4022f +commit: 3f67808667ee3e853c50ab7388aaec009ae91240 repo: Azure/azure-rest-api-specs additionalDirectories: From 708e4c4f1e01760e2a6dae685126d79d6e2ece3a Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 5 Apr 2024 13:30:41 -0700 Subject: [PATCH 021/112] Re-emit, to get two new streaming 'Delta' classes --- .../azure/ai/inference/_patch.py | 59 +++++---- .../azure/ai/inference/models/__init__.py | 4 + .../azure/ai/inference/models/_models.py | 117 ++++++++++++++++++ sdk/ai/azure-ai-inference/tsp-location.yaml | 2 +- 4 files changed, 151 insertions(+), 31 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index c76f7082d26a..2600b6c5fdd9 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -20,37 +20,36 @@ JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object _Unset: Any = object() -class ModelClient(ModelClientGenerated): - @distributed_trace - def get_streaming_chat_completions( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - messages: List[_models.ChatRequestMessage] = _Unset, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployment: Optional[str] = None, - extras: Optional[Dict[str, str]] = None, - frequency_penalty: Optional[float] = None, - presence_penalty: Optional[float] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_tokens: Optional[int] = None, - response_format: Optional[_models.ChatCompletionsResponseFormat] = None, - stop: Optional[List[str]] = None, - stream_parameter: Optional[bool] = None, - tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, - tool_choice: Optional[ - Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] - ] = None, - seed: Optional[int] = None, - **kwargs: Any - ) -> None: - print("This is a placeholder for the actual implementation") - -__all__: List[str] = [ - "ModelClient" -] # Add all objects you want publicly available to users at this package level +class ModelClient(ModelClientGenerated): + @distributed_trace + def get_streaming_chat_completions( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + messages: List[_models.ChatRequestMessage] = _Unset, + extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, + model_deployment: Optional[str] = None, + extras: Optional[Dict[str, str]] = None, + frequency_penalty: Optional[float] = None, + presence_penalty: Optional[float] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_tokens: Optional[int] = None, + response_format: Optional[_models.ChatCompletionsResponseFormat] = None, + stop: Optional[List[str]] = None, + stream_parameter: Optional[bool] = None, + tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, + tool_choice: Optional[ + Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] + ] = None, + seed: Optional[int] = None, + **kwargs: Any + ) -> None: + print("This is a placeholder for the actual implementation") + + +__all__: List[str] = ["ModelClient"] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py index ebe181771061..dc8849a49b15 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py @@ -7,7 +7,9 @@ # -------------------------------------------------------------------------- from ._models import ChatChoice +from ._models import ChatChoiceDelta from ._models import ChatCompletions +from ._models import ChatCompletionsDelta from ._models import ChatCompletionsFunctionToolCall from ._models import ChatCompletionsFunctionToolDefinition from ._models import ChatCompletionsJsonResponseFormat @@ -45,7 +47,9 @@ __all__ = [ "ChatChoice", + "ChatChoiceDelta", "ChatCompletions", + "ChatCompletionsDelta", "ChatCompletionsFunctionToolCall", "ChatCompletionsFunctionToolDefinition", "ChatCompletionsJsonResponseFormat", diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py index c5542b86a18f..aa8ae67910a8 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py @@ -72,6 +72,52 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, **kwargs) +class ChatChoiceDelta(_model_base.Model): + """Represents an update to a single prompt completion when the service is streaming updates + using Server Sent Events (SSE). + Generally, ``n`` choices are generated per provided prompt with a default value of 1. + Token limits and other settings may limit the number of choices generated. + + All required parameters must be populated in order to send to server. + + :ivar index: The ordered index associated with this chat completions choice. Required. + :vartype index: int + :ivar finish_reason: The reason that this chat completions choice completed its generated. + Required. Known values are: "stop", "length", and "content_filter". + :vartype finish_reason: str or ~azure.ai.inference.models.CompletionsFinishReason + :ivar delta: An update to the chat message for a given chat completions prompt. Required. + :vartype delta: ~azure.ai.inference.models.ChatResponseMessage + """ + + index: int = rest_field() + """The ordered index associated with this chat completions choice. Required.""" + finish_reason: Union[str, "_models.CompletionsFinishReason"] = rest_field() + """The reason that this chat completions choice completed its generated. Required. Known values + are: \"stop\", \"length\", and \"content_filter\".""" + delta: "_models.ChatResponseMessage" = rest_field() + """An update to the chat message for a given chat completions prompt. Required.""" + + @overload + def __init__( + self, + *, + index: int, + finish_reason: Union[str, "_models.CompletionsFinishReason"], + delta: "_models.ChatResponseMessage", + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + class ChatCompletions(_model_base.Model): """Representation of the response data from a chat completions request. Completions support a wide variety of tasks and generate text that continues from or @@ -140,6 +186,77 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, **kwargs) +class ChatCompletionsDelta(_model_base.Model): + """Represents a response update to a chat completions request, when the service is streaming + updates + using Server Sent Events (SSE). + Completions support a wide variety of tasks and generate text that continues from or + "completes" + provided prompt data. + + All required parameters must be populated in order to send to server. + + :ivar id: A unique identifier associated with this chat completions response. Required. + :vartype id: str + :ivar object: The response object type, which is always ``chat.completion``. Required. + :vartype object: str + :ivar created: The first timestamp associated with generation activity for this completions + response, + represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. + :vartype created: ~datetime.datetime + :ivar model: The model used for the chat completion. Required. + :vartype model: str + :ivar usage: Usage information for tokens processed and generated as part of this completions + operation. Required. + :vartype usage: ~azure.ai.inference.models.CompletionsUsage + :ivar choices: An update to the collection of completion choices associated with this + completions response. + Generally, ``n`` choices are generated per provided prompt with a default value of 1. + Token limits and other settings may limit the number of choices generated. Required. + :vartype choices: list[~azure.ai.inference.models.ChatChoiceDelta] + """ + + id: str = rest_field() + """A unique identifier associated with this chat completions response. Required.""" + object: str = rest_field() + """The response object type, which is always ``chat.completion``. Required.""" + created: datetime.datetime = rest_field(format="unix-timestamp") + """The first timestamp associated with generation activity for this completions response, + represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required.""" + model: str = rest_field() + """The model used for the chat completion. Required.""" + usage: "_models.CompletionsUsage" = rest_field() + """Usage information for tokens processed and generated as part of this completions operation. + Required.""" + choices: List["_models.ChatChoiceDelta"] = rest_field() + """An update to the collection of completion choices associated with this completions response. + Generally, ``n`` choices are generated per provided prompt with a default value of 1. + Token limits and other settings may limit the number of choices generated. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + object: str, + created: datetime.datetime, + model: str, + usage: "_models.CompletionsUsage", + choices: List["_models.ChatChoiceDelta"], + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + class ChatCompletionsToolCall(_model_base.Model): """An abstract representation of a tool call that must be resolved in a subsequent request to perform the requested diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml index 3d51e053d3d5..9bc95ef76e29 100644 --- a/sdk/ai/azure-ai-inference/tsp-location.yaml +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/ModelClient -commit: 3f67808667ee3e853c50ab7388aaec009ae91240 +commit: 0053febfe9c1a4951e32ba8cd9d23d2c4d4f3c94 repo: Azure/azure-rest-api-specs additionalDirectories: From da8a678e2e4cae502a0a3dc615587c5c6c6065d2 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 9 Apr 2024 21:43:24 -0700 Subject: [PATCH 022/112] first go at streaming --- .../azure/ai/inference/_patch.py | 99 +++++++++++++++- .../azure/ai/inference/models/_patch.py | 112 +++++++++++++++++- .../samples/sample_chat_completions.py | 1 + .../sample_streaming_chat_completions.py | 40 ++++++- 4 files changed, 239 insertions(+), 13 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index 2600b6c5fdd9..ea092116f7c9 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -6,12 +6,30 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import json import sys -from typing import List, Any, Union, IO, Optional, Dict +import json + +from io import IOBase +from typing import Any, Callable, Dict, IO, List, Optional, TypeVar, Union, overload +from azure.core.pipeline import PipelineResponse +from azure.core.rest import HttpRequest, HttpResponse from azure.core.tracing.decorator import distributed_trace -from ._client import ModelClient as ModelClientGenerated +from azure.core.utils import case_insensitive_dict from . import models as _models +from ._model_base import SdkJSONEncoder, _deserialize +from ._serialization import Serializer +from ._vendor import ModelClientMixinABC +from ._client import ModelClient as ModelClientGenerated +from ._operations._operations import build_model_get_chat_completions_request + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -19,9 +37,14 @@ from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object _Unset: Any = object() +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False class ModelClient(ModelClientGenerated): + @distributed_trace def get_streaming_chat_completions( self, @@ -38,15 +61,79 @@ def get_streaming_chat_completions( max_tokens: Optional[int] = None, response_format: Optional[_models.ChatCompletionsResponseFormat] = None, stop: Optional[List[str]] = None, - stream_parameter: Optional[bool] = None, tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, tool_choice: Optional[ Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] ] = None, seed: Optional[int] = None, **kwargs: Any - ) -> None: - print("This is a placeholder for the actual implementation") + ) -> _models.ChatCompletionsDeltaInterator: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ChatCompletions] = kwargs.pop("cls", None) + + if body is _Unset: + if messages is _Unset: + raise TypeError("missing required argument: messages") + body = { + "extras": extras, + "frequency_penalty": frequency_penalty, + "max_tokens": max_tokens, + "messages": messages, + "presence_penalty": presence_penalty, + "response_format": response_format, + "seed": seed, + "stop": stop, + "stream": True, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_model_get_chat_completions_request( + extra_parameters=extra_parameters, + model_deployment=model_deployment, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + + _request.url = self._client.format_url(_request.url) + + kwargs.pop("stream", True) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=True, **kwargs + ) + + response: HttpResponse = pipeline_response.http_response + + if response.status_code not in [200]: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + result = _models.ChatCompletionsDeltaInterator(response) + return result __all__: List[str] = ["ModelClient"] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index f7dd32510333..3918d2a27c5f 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -6,9 +6,119 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ +import queue +import time +import re +import json + from typing import List +from .. import models as _models +from azure.core.rest import HttpResponse + +class ChatCompletionsDeltaInterator: + """Representation of the streaming response chat completions request. + Completions support a wide variety of tasks and generate text that continues from or + "completes" + provided prompt data. + """ + + + # Enable console logs for debugging + ENABLE_CLASS_LOGS = False + + # The prefix of each line in the SSE stream that contains a JSON string + # to deserialize into a ChatCompletionsDelta object + SSE_DATA_EVENT_PREFIX = "data: " + + # The line indicating the end of the SSE stream + SSE_DATA_EVENT_DONE = "data: [DONE]" + + def __init__(self, response: HttpResponse): + self._response = response + self._bytes_iterator = response.iter_bytes() + self._queue = queue.Queue() + self._incomplete_json = "" + self._done = False + + def __iter__(self): + return self + + def __next__(self): + if self._queue.empty(): + self._read_next_block() + if self._queue.empty(): + raise StopIteration + return self._queue.get() + + def __enter__(self): + return self + + def __exit__(self, *args): + self._response.close() + + def close(self): + self._response.close() + + def __del__(self): + self._response.close() + + def _read_next_block(self): + + if self.ENABLE_CLASS_LOGS: + start_time = time.time() + + try: + element = next(self._bytes_iterator) + except StopIteration: + self._done = True + return + + if self.ENABLE_CLASS_LOGS: + print(f"Elapsed time: {int(1000*(time.time()- start_time))}ms") + print(f"Size: {len(element)} bytes") + + # Clear the queue of ChatCompletionsDelta before processing the next block + self._queue.queue.clear() + + # Convert `bytes` to string and split the string by newline, while keeping the new line char. + # the last may be a partial "line" that does not contain a newline char at the end. + line_list = re.split(r'(?<=\n)', element.decode('utf-8')) + for index, element in enumerate(line_list): + + if self.ENABLE_CLASS_LOGS: + print(f"[original] {repr(element)}") + + if index == 0: + element = self._incomplete_json + element + self._incomplete_json = "" + + if index == len(line_list) - 1 and not element.endswith("\n"): + self._incomplete_json = element + return + + if self.ENABLE_CLASS_LOGS: + print(f"[modified] {repr(element)}") + + if element == "\n": # Empty line, indicating flush output to client + continue + + if not element.startswith(self.SSE_DATA_EVENT_PREFIX): + raise ValueError(f"SSE event not supported (line `{element}`)") + + if element.startswith(self.SSE_DATA_EVENT_DONE): + self._done = True + return + + # If you reached here, the line should contain `data: {...}\n` + # where the curly braces contain a valid JSON object. Deserialize it into a ChatCompletionsDelta object + # and add it to the queue. + self._queue.put(_models.ChatCompletionsDelta._deserialize(json.loads(element[len(self.SSE_DATA_EVENT_PREFIX):-1]), [])) + + if self.ENABLE_CLASS_LOGS: + print("[added]") + -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level +__all__: List[str] = ["ChatCompletionsDeltaInterator"] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index a0bd65bc54dc..fefba41b5441 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -82,6 +82,7 @@ def sample_chat_completions(): print(f"created: {result.created}") print(f"model: {result.model}") print(f"object: {result.object}") + print(f"usage.capacity_type: {result.usage.capacity_type}") print(f"usage.prompt_tokens: {result.usage.prompt_tokens}") print(f"usage.completion_tokens: {result.usage.completion_tokens}") print(f"usage.total_tokens: {result.usage.total_tokens}") diff --git a/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py index 31e625f77f59..60296eed6ca9 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py @@ -57,14 +57,42 @@ def sample_chat_completions(): # Create Model Client for synchronous operations client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key), logging_enable=True) + messages=[ + ChatRequestSystemMessage(content="You are an AI assistant that helps people find information."), + ChatRequestUserMessage(content="Give me 5 good reasons why I should exercise every day"), + ] + # [START streaming_chat_completions] # Do a single chat completion operation. This will be a synchronously (blocking) call. - result = client.get_streaming_chat_completions( - messages=[ - ChatRequestSystemMessage(content="You are an AI assistant that helps people find information."), - ChatRequestUserMessage(content="How many feet are in a mile?"), - ] - ) + result = client.get_streaming_chat_completions(messages=messages) + + accumulated_content = "" + + # Iterate on the result to get chat completion updates, as they arrive from the service + for delta in result: + print("ChatCompletionsDelta:") + for index, choice in enumerate(delta.choices): + print(f"choices[{index}].delta.content: `{choice.delta.content}`") + if (choice.delta.content is not None): + accumulated_content += choice.delta.content + print(f"choices[{index}].delta.role: {choice.delta.role}") + print(f"choices[{index}].finish_reason: {choice.finish_reason}") + print(f"choices[{index}].index: {choice.index}") + print(f"id: {delta.id}") + print(f"created: {delta.created}") + print(f"model: {delta.model}") + print(f"object: {delta.object}") + if delta.usage is not None: + print(f"usage.capacity_type: {delta.usage.capacity_type}") + print(f"usage.prompt_tokens: {delta.usage.prompt_tokens}") + print(f"usage.completion_tokens: {delta.usage.completion_tokens}") + print(f"usage.total_tokens: {delta.usage.total_tokens}") + + # Remember to always close the result object when you are done with it + result.close() + + print(f"Accumulated content: {accumulated_content}") + # [END streaming_chat_completions] From ed2b2278879e6f8290c27f83f64963523638fba9 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 10 Apr 2024 08:19:12 -0700 Subject: [PATCH 023/112] Latest re-emit, removing 'extra_parameters' --- sdk/ai/azure-ai-inference/README.md | 4 +- .../ai/inference/_operations/_operations.py | 171 +++++++++--------- .../azure/ai/inference/_patch.py | 4 +- .../inference/aio/_operations/_operations.py | 128 ++++++------- .../azure/ai/inference/models/__init__.py | 6 +- .../azure/ai/inference/models/_enums.py | 28 +-- .../azure/ai/inference/models/_models.py | 43 +++++ .../azure/ai/inference/models/_patch.py | 20 +- .../samples/sample_chat_completions.py | 9 +- .../sample_streaming_chat_completions.py | 4 +- sdk/ai/azure-ai-inference/tsp-location.yaml | 2 +- 11 files changed, 235 insertions(+), 184 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 055566899c36..0a9477f8cbd3 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -50,7 +50,7 @@ Once you define the environment variables, this Python code will create and auth ```python import os from azure.ai.inference import ModelClient -from azure.ai.inference.models import ChatRequestSystemMessage, ChatRequestUserMessage, ExtraParameters +from azure.ai.inference.models import ChatRequestSystemMessage, ChatRequestUserMessage from azure.core.credentials import AzureKeyCredential # [START logging] @@ -138,7 +138,6 @@ result = client.get_chat_completions( ], # Examples of setting extra parameters (TODO: move this to advanced sample) extras=dict(key1="value1", key2="value2"), - extra_parameters=ExtraParameters.ALLOW ) # Print results the the console @@ -152,6 +151,7 @@ print(f"id: {result.id}") print(f"created: {result.created}") print(f"model: {result.model}") print(f"object: {result.object}") +print(f"usage.capacity_type: {result.usage.capacity_type}") print(f"usage.prompt_tokens: {result.usage.prompt_tokens}") print(f"usage.completion_tokens: {result.usage.completion_tokens}") print(f"usage.total_tokens: {result.usage.total_tokens}") diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index 76a434a68084..cb91032f5f56 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -42,12 +42,7 @@ _SERIALIZER.client_side_validation = False -def build_model_get_chat_completions_request( - *, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployment: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: +def build_model_get_chat_completions_request(*, model_deployment: Optional[str] = None, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -62,8 +57,6 @@ def build_model_get_chat_completions_request( _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - if extra_parameters is not None: - _headers["extra-parameters"] = _SERIALIZER.header("extra_parameters", extra_parameters, "str") if model_deployment is not None: _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployment", model_deployment, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -73,12 +66,7 @@ def build_model_get_chat_completions_request( return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_model_get_embeddings_request( - *, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployment: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: +def build_model_get_embeddings_request(*, model_deployment: Optional[str] = None, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -93,8 +81,6 @@ def build_model_get_embeddings_request( _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - if extra_parameters is not None: - _headers["extra-parameters"] = _SERIALIZER.header("extra_parameters", extra_parameters, "str") if model_deployment is not None: _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployment", model_deployment, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -105,10 +91,7 @@ def build_model_get_embeddings_request( def build_model_get_image_generations_request( # pylint: disable=name-too-long - *, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, - model_deployment: Optional[str] = None, - **kwargs: Any + *, model_deployment: Optional[str] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -124,8 +107,6 @@ def build_model_get_image_generations_request( # pylint: disable=name-too-long _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - if extra_parameters is not None: - _headers["extra-parameters"] = _SERIALIZER.header("extra_parameters", extra_parameters, "str") if model_deployment is not None: _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployment", model_deployment, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -135,13 +116,31 @@ def build_model_get_image_generations_request( # pylint: disable=name-too-long return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) +def build_model_get_model_information_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-04-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/info" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + class ModelClientOperationsMixin(ModelClientMixinABC): @overload def get_chat_completions( self, body: JSON, *, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any @@ -154,9 +153,6 @@ def get_chat_completions( :param body: Required. :type body: JSON - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -215,8 +211,6 @@ def get_chat_completions( "str" # Optional. A collection of textual sequences that will end completions generation. ], - "stream": bool, # Optional. A value indicating whether chat completions - should be streamed for this request. "temperature": 0.0, # Optional. The sampling temperature to use that controls the apparent creativity of generated completions. Higher values will make output more random while lower values will make results more focused and @@ -286,7 +280,6 @@ def get_chat_completions( self, *, messages: List[_models.ChatRequestMessage], - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, content_type: str = "application/json", extras: Optional[Dict[str, str]] = None, @@ -297,7 +290,6 @@ def get_chat_completions( max_tokens: Optional[int] = None, response_format: Optional[_models.ChatCompletionsResponseFormat] = None, stop: Optional[List[str]] = None, - stream_parameter: Optional[bool] = None, tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, tool_choice: Optional[ Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] @@ -317,9 +309,6 @@ def get_chat_completions( the behavior of the assistant, followed by alternating messages between the User and Assistant roles. Required. :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -372,9 +361,6 @@ def get_chat_completions( :keyword stop: A collection of textual sequences that will end completions generation. Default value is None. :paramtype stop: list[str] - :keyword stream_parameter: A value indicating whether chat completions should be streamed for - this request. Default value is None. - :paramtype stream_parameter: bool :keyword tools: The available tool definitions that the chat completions request can use, including caller-defined functions. Default value is None. :paramtype tools: list[~azure.ai.inference.models.ChatCompletionsToolDefinition] @@ -444,7 +430,6 @@ def get_chat_completions( self, body: IO[bytes], *, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any @@ -457,9 +442,6 @@ def get_chat_completions( :param body: Required. :type body: IO[bytes] - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -524,7 +506,6 @@ def get_chat_completions( body: Union[JSON, IO[bytes]] = _Unset, *, messages: List[_models.ChatRequestMessage] = _Unset, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, extras: Optional[Dict[str, str]] = None, frequency_penalty: Optional[float] = None, @@ -534,7 +515,6 @@ def get_chat_completions( max_tokens: Optional[int] = None, response_format: Optional[_models.ChatCompletionsResponseFormat] = None, stop: Optional[List[str]] = None, - stream_parameter: Optional[bool] = None, tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, tool_choice: Optional[ Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] @@ -556,9 +536,6 @@ def get_chat_completions( the behavior of the assistant, followed by alternating messages between the User and Assistant roles. Required. :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -608,9 +585,6 @@ def get_chat_completions( :keyword stop: A collection of textual sequences that will end completions generation. Default value is None. :paramtype stop: list[str] - :keyword stream_parameter: A value indicating whether chat completions should be streamed for - this request. Default value is None. - :paramtype stream_parameter: bool :keyword tools: The available tool definitions that the chat completions request can use, including caller-defined functions. Default value is None. :paramtype tools: list[~azure.ai.inference.models.ChatCompletionsToolDefinition] @@ -675,8 +649,6 @@ def get_chat_completions( "str" # Optional. A collection of textual sequences that will end completions generation. ], - "stream": bool, # Optional. A value indicating whether chat completions - should be streamed for this request. "temperature": 0.0, # Optional. The sampling temperature to use that controls the apparent creativity of generated completions. Higher values will make output more random while lower values will make results more focused and @@ -766,7 +738,6 @@ def get_chat_completions( "response_format": response_format, "seed": seed, "stop": stop, - "stream": stream_parameter, "temperature": temperature, "tool_choice": tool_choice, "tools": tools, @@ -781,7 +752,6 @@ def get_chat_completions( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_model_get_chat_completions_request( - extra_parameters=extra_parameters, model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, @@ -819,7 +789,6 @@ def get_embeddings( self, body: JSON, *, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any @@ -829,9 +798,6 @@ def get_embeddings( :param body: Required. :type body: JSON - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -904,7 +870,6 @@ def get_embeddings( self, *, input: List[str], - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, content_type: str = "application/json", extras: Optional[Dict[str, str]] = None, @@ -916,9 +881,6 @@ def get_embeddings( :keyword input: Input texts to get embeddings for, encoded as a an array of strings. Required. :paramtype input: list[str] - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -984,7 +946,6 @@ def get_embeddings( self, body: IO[bytes], *, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any @@ -994,9 +955,6 @@ def get_embeddings( :param body: Required. :type body: IO[bytes] - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -1053,7 +1011,6 @@ def get_embeddings( body: Union[JSON, IO[bytes]] = _Unset, *, input: List[str] = _Unset, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, extras: Optional[Dict[str, str]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, @@ -1066,9 +1023,6 @@ def get_embeddings( :type body: JSON or IO[bytes] :keyword input: Input texts to get embeddings for, encoded as a an array of strings. Required. :paramtype input: list[str] - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -1169,7 +1123,6 @@ def get_embeddings( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_model_get_embeddings_request( - extra_parameters=extra_parameters, model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, @@ -1207,7 +1160,6 @@ def get_image_generations( self, body: JSON, *, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any @@ -1217,9 +1169,6 @@ def get_image_generations( :param body: Required. :type body: JSON - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -1282,7 +1231,6 @@ def get_image_generations( *, prompt: str, size: str, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, content_type: str = "application/json", extras: Optional[Dict[str, str]] = None, @@ -1300,9 +1248,6 @@ def get_image_generations( ":code:``x:code:``". For example: "1024x1024", "1792x1024". Required. :paramtype size: str - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -1360,7 +1305,6 @@ def get_image_generations( self, body: IO[bytes], *, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any @@ -1370,9 +1314,6 @@ def get_image_generations( :param body: Required. :type body: IO[bytes] - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -1414,7 +1355,6 @@ def get_image_generations( *, prompt: str = _Unset, size: str = _Unset, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, extras: Optional[Dict[str, str]] = None, quality: Optional[Union[str, _models.ImageGenerationQuality]] = None, @@ -1433,9 +1373,6 @@ def get_image_generations( ":code:``x:code:``". For example: "1024x1024", "1792x1024". Required. :paramtype size: str - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -1542,7 +1479,6 @@ def get_image_generations( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_model_get_image_generations_request( - extra_parameters=extra_parameters, model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, @@ -1574,3 +1510,66 @@ def get_image_generations( return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + + @distributed_trace + def get_model_information(self, **kwargs: Any) -> _models.ModelInformation: + # pylint: disable=line-too-long + """Returns information about the AI model. + + :return: ModelInformation. The ModelInformation is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ModelInformation + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "model_name": "str", # The name of the AI model. Required. + "model_provider": "str", # The model provider. Required. + "model_type": "str" # The type of the AI model. Required. Known values are: + "embeddings", "custom", "chat", "text_generation", and "image_generation". + } + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ModelInformation] = kwargs.pop("cls", None) + + _request = build_model_get_model_information_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ModelInformation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index ea092116f7c9..ea2dca1fe8ec 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -43,15 +43,14 @@ _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False -class ModelClient(ModelClientGenerated): +class ModelClient(ModelClientGenerated): @distributed_trace def get_streaming_chat_completions( self, body: Union[JSON, IO[bytes]] = _Unset, *, messages: List[_models.ChatRequestMessage] = _Unset, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, extras: Optional[Dict[str, str]] = None, frequency_penalty: Optional[float] = None, @@ -109,7 +108,6 @@ def get_streaming_chat_completions( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_model_get_chat_completions_request( - extra_parameters=extra_parameters, model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index f7633a29906b..27f6bb1b0037 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -30,6 +30,7 @@ build_model_get_chat_completions_request, build_model_get_embeddings_request, build_model_get_image_generations_request, + build_model_get_model_information_request, ) from .._vendor import ModelClientMixinABC @@ -49,7 +50,6 @@ async def get_chat_completions( self, body: JSON, *, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any @@ -62,9 +62,6 @@ async def get_chat_completions( :param body: Required. :type body: JSON - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -123,8 +120,6 @@ async def get_chat_completions( "str" # Optional. A collection of textual sequences that will end completions generation. ], - "stream": bool, # Optional. A value indicating whether chat completions - should be streamed for this request. "temperature": 0.0, # Optional. The sampling temperature to use that controls the apparent creativity of generated completions. Higher values will make output more random while lower values will make results more focused and @@ -194,7 +189,6 @@ async def get_chat_completions( self, *, messages: List[_models.ChatRequestMessage], - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, content_type: str = "application/json", extras: Optional[Dict[str, str]] = None, @@ -205,7 +199,6 @@ async def get_chat_completions( max_tokens: Optional[int] = None, response_format: Optional[_models.ChatCompletionsResponseFormat] = None, stop: Optional[List[str]] = None, - stream_parameter: Optional[bool] = None, tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, tool_choice: Optional[ Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] @@ -225,9 +218,6 @@ async def get_chat_completions( the behavior of the assistant, followed by alternating messages between the User and Assistant roles. Required. :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -280,9 +270,6 @@ async def get_chat_completions( :keyword stop: A collection of textual sequences that will end completions generation. Default value is None. :paramtype stop: list[str] - :keyword stream_parameter: A value indicating whether chat completions should be streamed for - this request. Default value is None. - :paramtype stream_parameter: bool :keyword tools: The available tool definitions that the chat completions request can use, including caller-defined functions. Default value is None. :paramtype tools: list[~azure.ai.inference.models.ChatCompletionsToolDefinition] @@ -352,7 +339,6 @@ async def get_chat_completions( self, body: IO[bytes], *, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any @@ -365,9 +351,6 @@ async def get_chat_completions( :param body: Required. :type body: IO[bytes] - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -432,7 +415,6 @@ async def get_chat_completions( body: Union[JSON, IO[bytes]] = _Unset, *, messages: List[_models.ChatRequestMessage] = _Unset, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, extras: Optional[Dict[str, str]] = None, frequency_penalty: Optional[float] = None, @@ -442,7 +424,6 @@ async def get_chat_completions( max_tokens: Optional[int] = None, response_format: Optional[_models.ChatCompletionsResponseFormat] = None, stop: Optional[List[str]] = None, - stream_parameter: Optional[bool] = None, tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, tool_choice: Optional[ Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] @@ -464,9 +445,6 @@ async def get_chat_completions( the behavior of the assistant, followed by alternating messages between the User and Assistant roles. Required. :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -516,9 +494,6 @@ async def get_chat_completions( :keyword stop: A collection of textual sequences that will end completions generation. Default value is None. :paramtype stop: list[str] - :keyword stream_parameter: A value indicating whether chat completions should be streamed for - this request. Default value is None. - :paramtype stream_parameter: bool :keyword tools: The available tool definitions that the chat completions request can use, including caller-defined functions. Default value is None. :paramtype tools: list[~azure.ai.inference.models.ChatCompletionsToolDefinition] @@ -583,8 +558,6 @@ async def get_chat_completions( "str" # Optional. A collection of textual sequences that will end completions generation. ], - "stream": bool, # Optional. A value indicating whether chat completions - should be streamed for this request. "temperature": 0.0, # Optional. The sampling temperature to use that controls the apparent creativity of generated completions. Higher values will make output more random while lower values will make results more focused and @@ -674,7 +647,6 @@ async def get_chat_completions( "response_format": response_format, "seed": seed, "stop": stop, - "stream": stream_parameter, "temperature": temperature, "tool_choice": tool_choice, "tools": tools, @@ -689,7 +661,6 @@ async def get_chat_completions( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_model_get_chat_completions_request( - extra_parameters=extra_parameters, model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, @@ -727,7 +698,6 @@ async def get_embeddings( self, body: JSON, *, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any @@ -737,9 +707,6 @@ async def get_embeddings( :param body: Required. :type body: JSON - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -812,7 +779,6 @@ async def get_embeddings( self, *, input: List[str], - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, content_type: str = "application/json", extras: Optional[Dict[str, str]] = None, @@ -824,9 +790,6 @@ async def get_embeddings( :keyword input: Input texts to get embeddings for, encoded as a an array of strings. Required. :paramtype input: list[str] - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -892,7 +855,6 @@ async def get_embeddings( self, body: IO[bytes], *, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any @@ -902,9 +864,6 @@ async def get_embeddings( :param body: Required. :type body: IO[bytes] - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -961,7 +920,6 @@ async def get_embeddings( body: Union[JSON, IO[bytes]] = _Unset, *, input: List[str] = _Unset, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, extras: Optional[Dict[str, str]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, @@ -974,9 +932,6 @@ async def get_embeddings( :type body: JSON or IO[bytes] :keyword input: Input texts to get embeddings for, encoded as a an array of strings. Required. :paramtype input: list[str] - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -1077,7 +1032,6 @@ async def get_embeddings( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_model_get_embeddings_request( - extra_parameters=extra_parameters, model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, @@ -1115,7 +1069,6 @@ async def get_image_generations( self, body: JSON, *, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any @@ -1125,9 +1078,6 @@ async def get_image_generations( :param body: Required. :type body: JSON - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -1190,7 +1140,6 @@ async def get_image_generations( *, prompt: str, size: str, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, content_type: str = "application/json", extras: Optional[Dict[str, str]] = None, @@ -1208,9 +1157,6 @@ async def get_image_generations( ":code:``x:code:``". For example: "1024x1024", "1792x1024". Required. :paramtype size: str - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -1268,7 +1214,6 @@ async def get_image_generations( self, body: IO[bytes], *, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any @@ -1278,9 +1223,6 @@ async def get_image_generations( :param body: Required. :type body: IO[bytes] - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -1322,7 +1264,6 @@ async def get_image_generations( *, prompt: str = _Unset, size: str = _Unset, - extra_parameters: Optional[Union[str, _models.ExtraParameters]] = None, model_deployment: Optional[str] = None, extras: Optional[Dict[str, str]] = None, quality: Optional[Union[str, _models.ImageGenerationQuality]] = None, @@ -1341,9 +1282,6 @@ async def get_image_generations( ":code:``x:code:``". For example: "1024x1024", "1792x1024". Required. :paramtype size: str - :keyword extra_parameters: Controls what happens if extra parameters are passed in the request - payload. Known values are: "error", "ignore", and "allow". Default value is None. - :paramtype extra_parameters: str or ~azure.ai.inference.models.ExtraParameters :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. Typically used when you want to target a test environment instead of production environment. @@ -1450,7 +1388,6 @@ async def get_image_generations( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_model_get_image_generations_request( - extra_parameters=extra_parameters, model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, @@ -1482,3 +1419,66 @@ async def get_image_generations( return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + + @distributed_trace_async + async def get_model_information(self, **kwargs: Any) -> _models.ModelInformation: + # pylint: disable=line-too-long + """Returns information about the AI model. + + :return: ModelInformation. The ModelInformation is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ModelInformation + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "model_name": "str", # The name of the AI model. Required. + "model_provider": "str", # The model provider. Required. + "model_type": "str" # The type of the AI model. Required. Known values are: + "embeddings", "custom", "chat", "text_generation", and "image_generation". + } + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ModelInformation] = kwargs.pop("cls", None) + + _request = build_model_get_model_information_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ModelInformation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py index dc8849a49b15..d1a319f8888f 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py @@ -32,15 +32,16 @@ from ._models import FunctionDefinition from ._models import ImageGenerationData from ._models import ImageGenerations +from ._models import ModelInformation from ._enums import CapacityType from ._enums import ChatCompletionsToolSelectionPreset from ._enums import ChatRole from ._enums import CompletionsFinishReason from ._enums import EmbeddingInputType -from ._enums import ExtraParameters from ._enums import ImageGenerationQuality from ._enums import ImageGenerationResponseFormat +from ._enums import ModelType from ._patch import __all__ as _patch_all from ._patch import * # pylint: disable=unused-wildcard-import from ._patch import patch_sdk as _patch_sdk @@ -72,14 +73,15 @@ "FunctionDefinition", "ImageGenerationData", "ImageGenerations", + "ModelInformation", "CapacityType", "ChatCompletionsToolSelectionPreset", "ChatRole", "CompletionsFinishReason", "EmbeddingInputType", - "ExtraParameters", "ImageGenerationQuality", "ImageGenerationResponseFormat", + "ModelType", ] __all__.extend([p for p in _patch_all if p not in __all__]) _patch_sdk() diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py index 879fdb8015fc..4df78cbcb461 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py @@ -69,19 +69,6 @@ class EmbeddingInputType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """to do""" -class ExtraParameters(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Defines the service behavior when extra parameters are passed in the request payload.""" - - ERROR = "error" - """The service should error when it sees extra parameters in the request payload. This is the - default behavior if the service.""" - IGNORE = "ignore" - """The service should ignore extra parameters in the request payload. They will not be passed to - the back-end AI model.""" - ALLOW = "allow" - """The service should pass extra parameters to the back-end AI model.""" - - class ImageGenerationQuality(str, Enum, metaclass=CaseInsensitiveEnumMeta): """An image generation configuration that specifies how the model should prioritize quality, cost, and speed. @@ -101,3 +88,18 @@ class ImageGenerationResponseFormat(str, Enum, metaclass=CaseInsensitiveEnumMeta """Image generation response items should provide a URL from which the image may be retrieved.""" BASE64 = "b64_json" """Image generation response items should provide image data as a base64-encoded string.""" + + +class ModelType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The type of AI model.""" + + EMBEDDINGS = "embeddings" + """Embeddings.""" + CUSTOM = "custom" + """Custom model.""" + CHAT = "chat" + """Chat completions""" + TEXT_GENERATION = "text_generation" + """Text generation""" + IMAGE_GENERATION = "image_generation" + """Image generation""" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py index aa8ae67910a8..fdf5f64dfe4b 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py @@ -1115,3 +1115,46 @@ def __init__(self, mapping: Mapping[str, Any]): def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation super().__init__(*args, **kwargs) + + +class ModelInformation(_model_base.Model): + """Represents some basic information about the AI model. + + All required parameters must be populated in order to send to server. + + :ivar model_type: The type of the AI model. Required. Known values are: "embeddings", "custom", + "chat", "text_generation", and "image_generation". + :vartype model_type: str or ~azure.ai.inference.models.ModelType + :ivar model_provider: The model provider. Required. + :vartype model_provider: str + :ivar model_name: The name of the AI model. Required. + :vartype model_name: str + """ + + model_type: Union[str, "_models.ModelType"] = rest_field() + """The type of the AI model. Required. Known values are: \"embeddings\", \"custom\", \"chat\", + \"text_generation\", and \"image_generation\".""" + model_provider: str = rest_field() + """The model provider. Required.""" + model_name: str = rest_field() + """The name of the AI model. Required.""" + + @overload + def __init__( + self, + *, + model_type: Union[str, "_models.ModelType"], + model_provider: str, + model_name: str, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index 3918d2a27c5f..0c81a51aa4b0 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -15,6 +15,7 @@ from .. import models as _models from azure.core.rest import HttpResponse + class ChatCompletionsDeltaInterator: """Representation of the streaming response chat completions request. Completions support a wide variety of tasks and generate text that continues from or @@ -22,11 +23,10 @@ class ChatCompletionsDeltaInterator: provided prompt data. """ - # Enable console logs for debugging ENABLE_CLASS_LOGS = False - # The prefix of each line in the SSE stream that contains a JSON string + # The prefix of each line in the SSE stream that contains a JSON string # to deserialize into a ChatCompletionsDelta object SSE_DATA_EVENT_PREFIX = "data: " @@ -82,7 +82,7 @@ def _read_next_block(self): # Convert `bytes` to string and split the string by newline, while keeping the new line char. # the last may be a partial "line" that does not contain a newline char at the end. - line_list = re.split(r'(?<=\n)', element.decode('utf-8')) + line_list = re.split(r"(?<=\n)", element.decode("utf-8")) for index, element in enumerate(line_list): if self.ENABLE_CLASS_LOGS: @@ -93,17 +93,17 @@ def _read_next_block(self): self._incomplete_json = "" if index == len(line_list) - 1 and not element.endswith("\n"): - self._incomplete_json = element + self._incomplete_json = element return if self.ENABLE_CLASS_LOGS: print(f"[modified] {repr(element)}") - if element == "\n": # Empty line, indicating flush output to client + if element == "\n": # Empty line, indicating flush output to client continue if not element.startswith(self.SSE_DATA_EVENT_PREFIX): - raise ValueError(f"SSE event not supported (line `{element}`)") + raise ValueError(f"SSE event not supported (line `{element}`)") if element.startswith(self.SSE_DATA_EVENT_DONE): self._done = True @@ -112,13 +112,17 @@ def _read_next_block(self): # If you reached here, the line should contain `data: {...}\n` # where the curly braces contain a valid JSON object. Deserialize it into a ChatCompletionsDelta object # and add it to the queue. - self._queue.put(_models.ChatCompletionsDelta._deserialize(json.loads(element[len(self.SSE_DATA_EVENT_PREFIX):-1]), [])) + self._queue.put( + _models.ChatCompletionsDelta._deserialize(json.loads(element[len(self.SSE_DATA_EVENT_PREFIX) : -1]), []) + ) if self.ENABLE_CLASS_LOGS: print("[added]") -__all__: List[str] = ["ChatCompletionsDeltaInterator"] # Add all objects you want publicly available to users at this package level +__all__: List[str] = [ + "ChatCompletionsDeltaInterator" +] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index fefba41b5441..98b46caf064c 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -21,7 +21,7 @@ def sample_chat_completions(): # [START create_client] import os from azure.ai.inference import ModelClient - from azure.ai.inference.models import ChatRequestSystemMessage, ChatRequestUserMessage, ExtraParameters + from azure.ai.inference.models import ChatRequestSystemMessage, ChatRequestUserMessage from azure.core.credentials import AzureKeyCredential # [START logging] @@ -56,7 +56,11 @@ def sample_chat_completions(): exit() # Create Model Client for synchronous operations - client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key), logging_enable=True) + client = ModelClient( + endpoint=endpoint, + credential=AzureKeyCredential(key), + logging_enable=True, + ) # [END create_client] # [START chat_completions] @@ -68,7 +72,6 @@ def sample_chat_completions(): ], # Examples of setting extra parameters (TODO: move this to advanced sample) extras=dict(key1="value1", key2="value2"), - extra_parameters=ExtraParameters.ALLOW, ) # Print results the the console diff --git a/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py index 60296eed6ca9..37b34bc1e889 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py @@ -57,7 +57,7 @@ def sample_chat_completions(): # Create Model Client for synchronous operations client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key), logging_enable=True) - messages=[ + messages = [ ChatRequestSystemMessage(content="You are an AI assistant that helps people find information."), ChatRequestUserMessage(content="Give me 5 good reasons why I should exercise every day"), ] @@ -73,7 +73,7 @@ def sample_chat_completions(): print("ChatCompletionsDelta:") for index, choice in enumerate(delta.choices): print(f"choices[{index}].delta.content: `{choice.delta.content}`") - if (choice.delta.content is not None): + if choice.delta.content is not None: accumulated_content += choice.delta.content print(f"choices[{index}].delta.role: {choice.delta.role}") print(f"choices[{index}].finish_reason: {choice.finish_reason}") diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml index 9bc95ef76e29..460742949ee4 100644 --- a/sdk/ai/azure-ai-inference/tsp-location.yaml +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/ModelClient -commit: 0053febfe9c1a4951e32ba8cd9d23d2c4d4f3c94 +commit: 178c216eabb49f96cf6eabbd8e34f3d026757208 repo: Azure/azure-rest-api-specs additionalDirectories: From 0428d954a006b7f49613f184041ece7ac1944187 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 10 Apr 2024 17:02:30 -0700 Subject: [PATCH 024/112] async streaming support --- .github/CODEOWNERS | 4 + eng/.docsettings.yml | 1 + sdk/ai/azure-ai-inference/README.md | 2 +- .../azure/ai/inference/_patch.py | 11 +- .../azure/ai/inference/aio/_patch.py | 121 +++++++++++++++++- .../azure/ai/inference/models/_patch.py | 90 +++++++++---- sdk/ai/azure-ai-inference/samples/README.md | 2 + .../sample_chat_completions_async.py | 1 - .../async_samples/sample_embeddings_async.py | 1 - .../sample_image_generation_async.py | 1 - ...sample_streaming_chat_completions_async.py | 88 +++++++++++++ .../sample_streaming_chat_completions.py | 82 ++++-------- sdk/ai/azure-ai-inference/tests/README.md | 14 +- 13 files changed, 323 insertions(+), 95 deletions(-) create mode 100644 sdk/ai/azure-ai-inference/samples/async_samples/sample_streaming_chat_completions_async.py diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 0a7748bfde1b..9402276d5622 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -210,6 +210,10 @@ # ServiceLabel: %Image Analysis %Service Attention /sdk/vision/azure-ai-vision-imageanalysis/ @dargilco @rhurey +# PRLabel: %AI Model Inference +# ServiceLabel: %AI Model Inference %Service Attention +/sdk/ai/azure-ai-inference/ @dargilco + # PRLabel: %HDInsight /sdk/hdinsight/ @idear1203 diff --git a/eng/.docsettings.yml b/eng/.docsettings.yml index fdca190818e5..202d26e7640a 100644 --- a/eng/.docsettings.yml +++ b/eng/.docsettings.yml @@ -14,6 +14,7 @@ omitted_paths: - sdk/**/swagger/* - sdk/ml/azure-ai-ml/tests/* - sdk/vision/azure-ai-vision-imageanalysis/tests/* + - sdk/ai/azure-ai-inference/tests/* - sdk/storage/azure-storage-extensions/* language: python diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 0a9477f8cbd3..f15af8638911 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -289,7 +289,7 @@ None redacted logs are generated for log level `logging.DEBUG` only. Be sure to ## Next steps -* Have a look at the [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/vision/azure-ai-vision-imageanalysis/samples) folder, containing fully runnable Python code for Image Analysis (all visual features, synchronous and asynchronous clients, from image file or URL). +* Have a look at the [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples) folder, containing fully runnable Python code for Image Analysis (all visual features, synchronous and asynchronous clients, from image file or URL). ## Contributing diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index ea2dca1fe8ec..c551978dc9b7 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -19,8 +19,8 @@ from ._model_base import SdkJSONEncoder, _deserialize from ._serialization import Serializer from ._vendor import ModelClientMixinABC -from ._client import ModelClient as ModelClientGenerated from ._operations._operations import build_model_get_chat_completions_request +from ._client import ModelClient as ModelClientGenerated from azure.core.exceptions import ( ClientAuthenticationError, @@ -45,6 +45,7 @@ class ModelClient(ModelClientGenerated): + @distributed_trace def get_streaming_chat_completions( self, @@ -66,7 +67,8 @@ def get_streaming_chat_completions( ] = None, seed: Optional[int] = None, **kwargs: Any - ) -> _models.ChatCompletionsDeltaInterator: + ) -> _models.ChatCompletionsDeltaIterator: + error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -118,7 +120,7 @@ def get_streaming_chat_completions( _request.url = self._client.format_url(_request.url) - kwargs.pop("stream", True) + kwargs.pop("stream", True) # Remove stream from kwargs (ignore value set by the application) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=True, **kwargs ) @@ -130,8 +132,7 @@ def get_streaming_chat_completions( map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) - result = _models.ChatCompletionsDeltaInterator(response) - return result + return _models.ChatCompletionsDeltaIterator(response.iter_bytes()) __all__: List[str] = ["ModelClient"] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index f7dd32510333..704a570c024c 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -6,9 +6,128 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ +from io import IOBase +import json +import sys from typing import List +from .. import models as _models +from ._client import ModelClient as ModelClientGenerated +from typing import Callable, Any, Union, IO, Optional, Dict, TypeVar +from azure.core.utils import case_insensitive_dict +from azure.core.pipeline import PipelineResponse +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async +from .._model_base import SdkJSONEncoder, _deserialize +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from .._operations._operations import build_model_get_chat_completions_request + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + +class ModelClient(ModelClientGenerated): + @distributed_trace_async + async def get_streaming_chat_completions( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + messages: List[_models.ChatRequestMessage] = _Unset, + model_deployment: Optional[str] = None, + extras: Optional[Dict[str, str]] = None, + frequency_penalty: Optional[float] = None, + presence_penalty: Optional[float] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_tokens: Optional[int] = None, + response_format: Optional[_models.ChatCompletionsResponseFormat] = None, + stop: Optional[List[str]] = None, + tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, + tool_choice: Optional[ + Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] + ] = None, + seed: Optional[int] = None, + **kwargs: Any + ) -> bool: # _models.ChatCompletionsDeltaIterator: + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ChatCompletions] = kwargs.pop("cls", None) + + if body is _Unset: + if messages is _Unset: + raise TypeError("missing required argument: messages") + body = { + "extras": extras, + "frequency_penalty": frequency_penalty, + "max_tokens": max_tokens, + "messages": messages, + "presence_penalty": presence_penalty, + "response_format": response_format, + "seed": seed, + "stop": stop, + "stream": True, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_model_get_chat_completions_request( + model_deployment=model_deployment, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + _request.url = self._client.format_url(_request.url) + + kwargs.pop("stream", True) # Remove stream from kwargs (ignore value set by the application) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=True, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return _models.ChatCompletionsDeltaIterator(response.iter_bytes()) + + +__all__: List[str] = ["ModelClient"] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index 0c81a51aa4b0..0efdb0c7375a 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -10,20 +10,20 @@ import time import re import json +import types -from typing import List +from typing import List, Union from .. import models as _models from azure.core.rest import HttpResponse -class ChatCompletionsDeltaInterator: - """Representation of the streaming response chat completions request. - Completions support a wide variety of tasks and generate text that continues from or - "completes" - provided prompt data. +class ChatCompletionsDeltaIterator: + """Represents an interator over ChatCompletionsDelta objects. It can be used for either synchronous or + asynchronous iterations. The class deserializes the Server Sent Events (SSE) response stream + into chat completions updates, each one represented by a ChatCompletionsDelta object. """ - # Enable console logs for debugging + # Enable console logs for debugging. For development only, will be removed before release. ENABLE_CLASS_LOGS = False # The prefix of each line in the SSE stream that contains a JSON string @@ -33,45 +33,76 @@ class ChatCompletionsDeltaInterator: # The line indicating the end of the SSE stream SSE_DATA_EVENT_DONE = "data: [DONE]" - def __init__(self, response: HttpResponse): - self._response = response - self._bytes_iterator = response.iter_bytes() + + def __init__(self, bytes_iterator: Union[types.AsyncGeneratorType, types.GeneratorType]): + self._bytes_iterator = bytes_iterator + self._is_async_iterator = isinstance(self._bytes_iterator, types.AsyncGeneratorType) self._queue = queue.Queue() self._incomplete_json = "" self._done = False + + def __aiter__(self): + if (not self._is_async_iterator): + raise ValueError("This method is only supported for async iterators") + return self + + def __iter__(self): + if (self._is_async_iterator): + raise ValueError("This method is not supported for async iterators") return self + + async def __anext__(self): + if (not self._is_async_iterator): + raise ValueError("This method is only supported for async iterators") + if self._queue.empty(): + await self._read_next_block_async() + if self._queue.empty(): + await self.close() + raise StopAsyncIteration + return self._queue.get() + + def __next__(self): + if (self._is_async_iterator): + raise ValueError("This method is not supported for async iterators") if self._queue.empty(): self._read_next_block() if self._queue.empty(): + self.close() raise StopIteration return self._queue.get() - def __enter__(self): - return self - - def __exit__(self, *args): - self._response.close() - def close(self): - self._response.close() + async def _read_next_block_async(self): + start_time = 0.0 + if self.ENABLE_CLASS_LOGS: + start_time = time.time() + try: + element = await self._bytes_iterator.__anext__() + except StopAsyncIteration: + await self.close() + self._done = True + return + self._deserialize_and_add_to_queue(element, start_time) - def __del__(self): - self._response.close() def _read_next_block(self): - + start_time = 0.0 if self.ENABLE_CLASS_LOGS: start_time = time.time() - try: element = next(self._bytes_iterator) except StopIteration: + self.close() self._done = True return + self._deserialize_and_add_to_queue(element, start_time) + + + def _deserialize_and_add_to_queue(self, element: bytes, start_time: float = 0.0): if self.ENABLE_CLASS_LOGS: print(f"Elapsed time: {int(1000*(time.time()- start_time))}ms") @@ -120,8 +151,23 @@ def _read_next_block(self): print("[added]") + def __enter__(self): + return self + + + def __exit__(self) -> None: + self.close() + + + def close(self) -> None: + self._bytes_iterator.close() + + async def close(self): + await self._bytes_iterator.aclose() + + __all__: List[str] = [ - "ChatCompletionsDeltaInterator" + "ChatCompletionsDeltaIterator" ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-inference/samples/README.md b/sdk/ai/azure-ai-inference/samples/README.md index 08e6b902dc60..08d010b6c8ba 100644 --- a/sdk/ai/azure-ai-inference/samples/README.md +++ b/sdk/ai/azure-ai-inference/samples/README.md @@ -18,6 +18,7 @@ The concepts are similar, you can easily modify any of the samples to your needs |**File Name**|**Description**| |----------------|-------------| +|[sample_streaming_chat_completions.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py) | One chat completion operation using a synchronous client and streaming response. | |[sample_chat_completions.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py) | One chat completion operation using a synchronous client. | |[sample_embeddings.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_embeddings.py) | One embeddings operation using a synchronous client. | |[sample_image_generation.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_image_generation.py) | Generate an image from a prompt using a synchronous client. | @@ -26,6 +27,7 @@ The concepts are similar, you can easily modify any of the samples to your needs |**File Name**|**Description**| |----------------|-------------| +|[sample_streaming_chat_completions_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_streaming_chat_completions_async.py) | One chat completion operation using an asynchronous client and streaming response. | |[sample_chat_completions_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py) | One chat completion operation using an asynchronous client. | |[sample_embeddings_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py) | One embeddings operation using an asynchronous client. | |[sample_image_generation_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py) | Generate an image from a prompt using an asynchronous client. | diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py index 55818705eb2a..7f6b8a0ed9e4 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py @@ -17,7 +17,6 @@ """ import asyncio - async def sample_chat_completions_async(): import os from azure.ai.inference.aio import ModelClient diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py index 6b806a641e96..4d370a4e8c6f 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py @@ -17,7 +17,6 @@ """ import asyncio - async def sample_embeddings_async(): import os from azure.ai.inference.aio import ModelClient diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py index 36477ecfe194..f54e8971238c 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py @@ -17,7 +17,6 @@ """ import asyncio - async def sample_image_generation_async(): import os diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_streaming_chat_completions_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_streaming_chat_completions_async.py new file mode 100644 index 000000000000..85fe4bb5bf73 --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_streaming_chat_completions_async.py @@ -0,0 +1,88 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to get a chat completion streaming response + from the service using an asynchronous client. + +USAGE: + python sample_streaming_chat_completion_async.py + + Set these two environment variables before running the sample: + 1) MODEL_ENDPOINT - Your endpoint URL, in the form https://..inference.ai.azure.com + where `deployment-name` is your unique AI Model deployment name, and + `azure-region` is the Azure region where your model is deployed. + 2) MODEL_KEY - Your model key (a 32-character string). Keep it secret. +""" +import asyncio + +import os +from azure.ai.inference.aio import ModelClient +from azure.ai.inference.models import ChatRequestSystemMessage, ChatRequestUserMessage, ChatCompletionsDelta +from azure.core.credentials import AzureKeyCredential + +async def sample_streaming_chat_completions_async(): + + # Read the values of your model endpoint and key from environment variables + try: + endpoint = os.environ["MODEL_ENDPOINT"] + key = os.environ["MODEL_KEY"] + except KeyError: + print("Missing environment variable 'MODEL_ENDPOINT' or 'MODEL_KEY'") + print("Set them before running this sample.") + exit() + + # Create Model Client for synchronous operations + client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + + # Do a single streaming chat completion operation. Start the operation and get a Future object. + future = asyncio.ensure_future( + client.get_streaming_chat_completions( + messages=[ + ChatRequestSystemMessage(content="You are an AI assistant that helps people find information."), + ChatRequestUserMessage(content="Give me 5 good reasons why I should exercise every day."), + ] + ) + ) + + # Loop until you get the HTTP response headers from the service + while not future.done(): + await asyncio.sleep(0.1) + print("Waiting...") + + # Get the result + result = future.result() + + # Iterate on the result to get chat completion updates, as they arrive from the service + accumulated_content = "" + async for element in result: + accumulated_content += element.choices[0].delta.content if element.choices[0].delta.content is not None else "" + print_chat_completions_delta(element) + + print(f"Accumulated content: {accumulated_content}") + + # Remember to always close the asynchronous client when you are done with it + await client.close() + + +def print_chat_completions_delta(element: ChatCompletionsDelta): + print(f"content: {repr(element.choices[0].delta.content)}, "\ + f"role: {element.choices[0].delta.role}, "\ + f"finish_reason: {element.choices[0].finish_reason}, "\ + f"index: {element.choices[0].index}") + print(f"id: {element.id}, created: {element.created}, model: {element.model}, object: {element.object}") + if element.usage is not None: + print(f"usage: capacity_type: {element.usage.capacity_type}, "\ + f"prompt_tokens: {element.usage.prompt_tokens}, "\ + f"completion_tokens: {element.usage.completion_tokens}, "\ + f"usage.total_tokens: {element.usage.total_tokens}") + + +async def main(): + await sample_streaming_chat_completions_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py index 37b34bc1e889..3f096158f4eb 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py @@ -4,7 +4,8 @@ # ------------------------------------ """ DESCRIPTION: - This sample demonstrates how to get a chat completion streaming response from the service using a synchronous. + This sample demonstrates how to get a chat completion streaming response + from the service using a synchronous client. USAGE: python sample_streaming_chat_completion.py @@ -15,35 +16,12 @@ `azure-region` is the Azure region where your model is deployed. 2) MODEL_KEY - Your model key (a 32-character string). Keep it secret. """ +import os +from azure.ai.inference import ModelClient +from azure.ai.inference.models import ChatRequestSystemMessage, ChatRequestUserMessage, ChatCompletionsDelta +from azure.core.credentials import AzureKeyCredential - -def sample_chat_completions(): - import os - from azure.ai.inference import ModelClient - from azure.ai.inference.models import ChatRequestSystemMessage, ChatRequestUserMessage - from azure.core.credentials import AzureKeyCredential - - # [START logging] - import sys - import logging - - # Acquire the logger for this client library. Use 'azure' to affect both - # 'azure.core` and `azure.ai.vision.imageanalysis' libraries. - logger = logging.getLogger("azure") - - # Set the desired logging level. logging.INFO or logging.DEBUG are good options. - logger.setLevel(logging.DEBUG) - - # Direct logging output to stdout (the default): - handler = logging.StreamHandler(stream=sys.stdout) - # Or direct logging output to a file: - # handler = logging.FileHandler(filename = 'sample.log') - logger.addHandler(handler) - - # Optional: change the default logging format. Here we add a timestamp. - formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s") - handler.setFormatter(formatter) - # [END logging] +def sample_streaming_chat_completions(): # Read the values of your model endpoint and key from environment variables try: @@ -55,46 +33,38 @@ def sample_chat_completions(): exit() # Create Model Client for synchronous operations - client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key), logging_enable=True) + client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) messages = [ ChatRequestSystemMessage(content="You are an AI assistant that helps people find information."), - ChatRequestUserMessage(content="Give me 5 good reasons why I should exercise every day"), + ChatRequestUserMessage(content="Give me 5 good reasons why I should exercise every day."), ] # [START streaming_chat_completions] # Do a single chat completion operation. This will be a synchronously (blocking) call. result = client.get_streaming_chat_completions(messages=messages) - accumulated_content = "" - # Iterate on the result to get chat completion updates, as they arrive from the service - for delta in result: - print("ChatCompletionsDelta:") - for index, choice in enumerate(delta.choices): - print(f"choices[{index}].delta.content: `{choice.delta.content}`") - if choice.delta.content is not None: - accumulated_content += choice.delta.content - print(f"choices[{index}].delta.role: {choice.delta.role}") - print(f"choices[{index}].finish_reason: {choice.finish_reason}") - print(f"choices[{index}].index: {choice.index}") - print(f"id: {delta.id}") - print(f"created: {delta.created}") - print(f"model: {delta.model}") - print(f"object: {delta.object}") - if delta.usage is not None: - print(f"usage.capacity_type: {delta.usage.capacity_type}") - print(f"usage.prompt_tokens: {delta.usage.prompt_tokens}") - print(f"usage.completion_tokens: {delta.usage.completion_tokens}") - print(f"usage.total_tokens: {delta.usage.total_tokens}") - - # Remember to always close the result object when you are done with it - result.close() + accumulated_content = "" + for element in result: + accumulated_content += element.choices[0].delta.content if element.choices[0].delta.content is not None else "" + print_chat_completions_delta(element) print(f"Accumulated content: {accumulated_content}") - # [END streaming_chat_completions] +def print_chat_completions_delta(element: ChatCompletionsDelta): + print(f"content: {repr(element.choices[0].delta.content)}, "\ + f"role: {element.choices[0].delta.role}, "\ + f"finish_reason: {element.choices[0].finish_reason}, "\ + f"index: {element.choices[0].index}") + print(f"id: {element.id}, created: {element.created}, model: {element.model}, object: {element.object}") + if element.usage is not None: + print(f"usage: capacity_type: {element.usage.capacity_type}, "\ + f"prompt_tokens: {element.usage.prompt_tokens}, "\ + f"completion_tokens: {element.usage.completion_tokens}, "\ + f"usage.total_tokens: {element.usage.total_tokens}") + if __name__ == "__main__": - sample_chat_completions() + sample_streaming_chat_completions() diff --git a/sdk/ai/azure-ai-inference/tests/README.md b/sdk/ai/azure-ai-inference/tests/README.md index a83ac986a9d7..5062f95e27be 100644 --- a/sdk/ai/azure-ai-inference/tests/README.md +++ b/sdk/ai/azure-ai-inference/tests/README.md @@ -1,4 +1,4 @@ -# Azure Image Analysis client library tests for Python +# Azure AI Model Inference client library tests for Python ## Running tests locally, on a Windows PC, against the live service @@ -9,10 +9,10 @@ See [Prerequisites](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ ### Setup * Clone or download this sample repository. -* Open a command prompt window in the folder `sdk\vision\azure-ai-vision-imageanalysis`. +* Open a command prompt window in the folder `sdk\ai\azure-ai-inference`. * If you want to run tests against the latest public Image Analysis client library, install it by running: ```bash - pip install azure-ai-vision-imageanalysis + pip install azure-ai-inference ``` * If you want to run tests against a locally built Image Analysis client library: * First build the wheel: @@ -23,7 +23,7 @@ See [Prerequisites](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ ``` * Then install the resulting local wheel (update version `1.0.0b1` to the current one): ```bash - pip install dist\azure_ai_vision_imageanalysis-1.0.0b1-py3-none-any.whl --user --force-reinstall + pip install dist\azure_ai_inference-1.0.0b1-py3-none-any.whl --user --force-reinstall ``` @@ -33,9 +33,9 @@ See [Set environment variables](https://github.com/Azure/azure-sdk-for-python/bl In addition, the following environment values **must be** defined, although not used. Assign any value to them: ``` -set VISION_TENANT_ID=not-used -set VISION_CLIENT_ID=not-used -set VISION_CLIENT_SECRET=not-used +set AI_TENANT_ID=not-used +set AI_CLIENT_ID=not-used +set AI_CLIENT_SECRET=not-used ``` ### Configure test proxy From 9d989582eacc091789e5283cc8d315112f42de6b Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 10 Apr 2024 23:57:22 -0700 Subject: [PATCH 025/112] A few quality gates fixes --- sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py | 2 +- .../azure-ai-inference/azure/ai/inference/models/_patch.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index 704a570c024c..e694fb6d7772 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -60,7 +60,7 @@ async def get_streaming_chat_completions( ] = None, seed: Optional[int] = None, **kwargs: Any - ) -> bool: # _models.ChatCompletionsDeltaIterator: + ) -> _models.ChatCompletionsDeltaIterator: error_map = { 401: ClientAuthenticationError, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index 0efdb0c7375a..e81a43c3ed0e 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -54,7 +54,7 @@ def __iter__(self): return self - async def __anext__(self): + async def __anext__(self) -> _models.ChatCompletionsDelta: if (not self._is_async_iterator): raise ValueError("This method is only supported for async iterators") if self._queue.empty(): @@ -65,7 +65,7 @@ async def __anext__(self): return self._queue.get() - def __next__(self): + def __next__(self) -> _models.ChatCompletionsDelta: if (self._is_async_iterator): raise ValueError("This method is not supported for async iterators") if self._queue.empty(): @@ -162,7 +162,7 @@ def __exit__(self) -> None: def close(self) -> None: self._bytes_iterator.close() - async def close(self): + async def close(self) -> None: await self._bytes_iterator.aclose() From 37c359965e86283ca545f261efab8699128037eb Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 11 Apr 2024 00:10:34 -0700 Subject: [PATCH 026/112] Use aclose() for async interator --- .../azure/ai/inference/models/_patch.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index e81a43c3ed0e..493da12809fc 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -60,7 +60,7 @@ async def __anext__(self) -> _models.ChatCompletionsDelta: if self._queue.empty(): await self._read_next_block_async() if self._queue.empty(): - await self.close() + await self.aclose() raise StopAsyncIteration return self._queue.get() @@ -83,7 +83,7 @@ async def _read_next_block_async(self): try: element = await self._bytes_iterator.__anext__() except StopAsyncIteration: - await self.close() + await self.aclose() self._done = True return self._deserialize_and_add_to_queue(element, start_time) @@ -161,8 +161,8 @@ def __exit__(self) -> None: def close(self) -> None: self._bytes_iterator.close() - - async def close(self) -> None: + + async def aclose(self) -> None: await self._bytes_iterator.aclose() From 58b3669d9db6fb1412cd3db6de33f5684f271eb4 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 11 Apr 2024 13:45:49 -0700 Subject: [PATCH 027/112] Update env-variable names --- sdk/ai/azure-ai-inference/README.md | 112 +++++++----------- sdk/ai/azure-ai-inference/samples/README.md | 34 ++++-- .../sample_chat_completions_async.py | 26 ++-- .../async_samples/sample_embeddings_async.py | 29 ++--- .../sample_image_generation_async.py | 40 ++----- ...sample_streaming_chat_completions_async.py | 18 +-- .../samples/sample_chat_completions.py | 63 ++++------ .../samples/sample_embeddings.py | 54 +++------ .../samples/sample_image_generation.py | 41 ++----- .../sample_streaming_chat_completions.py | 19 +-- .../tests/model_inference_test_base.py | 12 +- 11 files changed, 172 insertions(+), 276 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index f15af8638911..771ba08fd9ab 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -1,15 +1,16 @@ # Azure model client library for Python -The Azure AI Model Client Library allows you to do inference against any of AI models in you deployed to Azure. It supports both "model as a service" and "models with hosted managed infrastructure". For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). +The ModelClient Library allows you to do inference using AI models you deployed to Azure. It supports both serverless endpoints (aka "model as a service" (MaaS) or "pay as you go") and selfhosted endpoints (aka "model as a platform" (MaaP) or "real-time endpoints"). The ModelClient library makes services calls using REST AP version `2024-04-01-preview` specificed here (TODO: insert link). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). -Use the model client library to: +Use the ModelClient library to: * Authenticate against the service +* Get information about the model * Get chat completions * Get embeddings * Generate an image from a text prompt -Note that for inference of OpenAI models hosted on azure you should be using the [OpenAI Python client library](https://github.com/openai/openai-python) instead of this client. +Note that for inference using OpenAI models hosted on Azure you should be using the [OpenAI Python client library](https://github.com/openai/openai-python) instead of this client. [Product documentation](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview) | [Samples](https://aka.ms/azsdk/model-client/samples/python) @@ -23,7 +24,11 @@ Note that for inference of OpenAI models hosted on azure you should be using the * [Python 3.8](https://www.python.org/) or later installed, including [pip](https://pip.pypa.io/en/stable/). * An [Azure subscription](https://azure.microsoft.com/free). -* A [TBD resource](https://azure.microsoft.com/) in your Azure subscription. You will need the key and endpoint from this resource to authenticate against the service. +* An [AI Model from the catalog](https://ai.azure.com/explore/models) deployed through Azure AI Studio. To construct the `ModelClient`, you will need to pass in the endpoint URL and key associated with your deployed AI model. + + * The endpoint URL has the form `https://your-deployment-name.your-azure-region.inference.ai.azure.com`, where `your-deployment-name` is your unique model deployment name and `your-azure-region` is the Azure region where the model is deployed (e.g. `eastus2`). + + * The key is a 32-character string. ### Install the Model Client package @@ -31,85 +36,59 @@ Note that for inference of OpenAI models hosted on azure you should be using the pip install azure-ai-inferencing ``` -### Set environment variables - -To authenticate the `ModelClient`, you will need the endpoint and key from your TBD resource in the [Azure Portal](https://portal.azure.com). The code snippet below assumes these values are stored in environment variables: - -* Set the environment variable `MODEL_ENDPOINT` to the endpoint URL. It has the form `https://your-model-deployment-name.your-azure-region.inference.ai.azure.com`, where `your-model-deployment-name` is your unique TBD resource name. - -* Set the environment variable `MODEL_KEY` to the key. The key is a 32-character string. - -Note that the client library does not directly read these environment variable at run time. The endpoint and key must be provided to the constructor of `ModelClient` in your code. The code snippet below reads environment variables to promote the practice of not hard-coding secrets in your source code. - ### Create and authenticate the client -Once you define the environment variables, this Python code will create and authenticate a synchronous `ModelClient`: +Assuming `endpoint` and `key` are strings holding your endpoint URL and key, this Python code will create and authenticate a synchronous `ModelClient`: ```python -import os from azure.ai.inference import ModelClient -from azure.ai.inference.models import ChatRequestSystemMessage, ChatRequestUserMessage from azure.core.credentials import AzureKeyCredential -# [START logging] -import sys -import logging - -# Acquire the logger for this client library. Use 'azure' to affect both -# 'azure.core` and `azure.ai.vision.imageanalysis' libraries. -logger = logging.getLogger("azure") - -# Set the desired logging level. logging.INFO or logging.DEBUG are good options. -logger.setLevel(logging.DEBUG) - -# Direct logging output to stdout (the default): -handler = logging.StreamHandler(stream=sys.stdout) -# Or direct logging output to a file: -# handler = logging.FileHandler(filename = 'sample.log') -logger.addHandler(handler) - -# Optional: change the default logging format. Here we add a timestamp. -formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s") -handler.setFormatter(formatter) +# Create Model Client for synchronous operations +client = ModelClient( + endpoint=endpoint, + credential=AzureKeyCredential(key) +) ``` -A synchronous client supports synchronous inference methods, meaning they will block until the service responds with inference results. The code snippets below all use synchronous methods because it's easier for a getting-started guide. The SDK offers equivalent asynchronous APIs which are often preferred. To create an asynchronous client, do the following: +A synchronous client supports synchronous inference methods, meaning they will block until the service responds with inference results. For simplicity the code snippets below all use synchronous methods. The client offers equivalent asynchronous methods which are more commonly used in production. -* Update the above code to import `ModelClient` from the `aio` namespace: +To create an asynchronous client, Install the additional package [aiohttp](https://pypi.org/project/aiohttp/): - ```python - from azure.ai.inference.aio import ModelClient - ``` +```bash + pip install aiohttp +``` -* Install the additional package [aiohttp](https://pypi.org/project/aiohttp/): +and update the code above to import `ModelClient` from the `aio` namespace: - ```bash - pip install aiohttp - ``` +```python + import asyncio + from azure.ai.inference.aio import ModelClient +``` ## Key concepts ### Chat Completions -TBD +TODO: Add overview and link to explain chat completions. -Target the `/v1/chat/completions` route +Chat completion operations target the URL route `/v1/chat/completions` on the provided endpoint. ### Embeddings -TBD +TODO: Add overview and link to explain embeddings. -Target the `/v1/embeddings` route +Embeddings operations target the URL route `/v1/embeddings` on the provided endpoint. ### Image Generation -TBD +TODO: Add overview and link to explain image generation. -Target the `/images/generations` route +Image generation operations target the URL route `/images/generations` on the provided endpoint. ## Examples @@ -125,7 +104,7 @@ See the [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai ### Chat completions example -This example demonstrates how to generate chat completions. +This example demonstrates how to generate a single chat completions. @@ -142,11 +121,10 @@ result = client.get_chat_completions( # Print results the the console print("Chat Completions:") -for index, choice in enumerate(result.choices): - print(f"choices[{index}].message.content: {choice.message.content}") - print(f"choices[{index}].message.role: {choice.message.role}") - print(f"choices[{index}].finish_reason: {choice.finish_reason}") - print(f"choices[{index}].index: {choice.index}") +print(f"choices[0].message.content: {result.choices[0].message.content}") +print(f"choices[0].message.role: {result.choices[0].message.role}") +print(f"choices[0].finish_reason: {result.choices[0].finish_reason}") +print(f"choices[0].index: {result.choices[0].index}") print(f"id: {result.id}") print(f"created: {result.created}") print(f"model: {result.model}") @@ -159,7 +137,7 @@ print(f"usage.total_tokens: {result.usage.total_tokens}") -To generate completions for additional messages, simply call `get_chat_completions` multiple times using the same `ModelClient`. +To generate completions for additional messages, simply call `get_chat_completions` multiple times using the same `client`. ### Embeddings example @@ -169,21 +147,17 @@ This example demonstrates how to get embeddings. ```python # Do a single embeddings operation. This will be a synchronously (blocking) call. -result = client.get_embeddings(input=["first sentence", "second sentence", "third sentence"]) +result = client.get_embeddings(input=["first phrase", "second phrase", "third phrase"]) # Print results the the console print("Embeddings result:") -for index, item in enumerate(result.data): - len = item.embedding.__len__() - print(f"data[{index}].index: {item.index}") - print(f"data[{index}].embedding[0]: {item.embedding[0]}") - print(f"data[{index}].embedding[1]: {item.embedding[1]}") - print("...") - print(f"data[{index}].embedding[{len-2}]: {item.embedding[len-2]}") - print(f"data[{index}].embedding[{len-1}]: {item.embedding[len-1]}") +for item in result.data: + length = len(item.embedding) + print(f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, ..., {item.embedding[length-2]}, {item.embedding[length-1]}]") print(f"id: {result.id}") print(f"model: {result.model}") print(f"object: {result.object}") +print(f"usage.input_tokens: {result.usage.input_tokens}") print(f"usage.prompt_tokens: {result.usage.prompt_tokens}") print(f"usage.total_tokens: {result.usage.total_tokens}") ``` @@ -289,7 +263,7 @@ None redacted logs are generated for log level `logging.DEBUG` only. Be sure to ## Next steps -* Have a look at the [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples) folder, containing fully runnable Python code for Image Analysis (all visual features, synchronous and asynchronous clients, from image file or URL). +* Have a look at the [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples) folder, containing fully runnable Python code for doing inference using synchronous and asynchronous clients. ## Contributing diff --git a/sdk/ai/azure-ai-inference/samples/README.md b/sdk/ai/azure-ai-inference/samples/README.md index 08d010b6c8ba..ebd238a8fed7 100644 --- a/sdk/ai/azure-ai-inference/samples/README.md +++ b/sdk/ai/azure-ai-inference/samples/README.md @@ -51,19 +51,34 @@ See [Prerequisites](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ ## Set environment variables -See [Set environment variables](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/README.md#set-environment-variables) here. +To construct the `ModelClient`, you will need to pass in the endpoint URL and key associated with your deployed AI model. + +* The endpoint URL has the form `https://your-deployment-name.your-azure-region.inference.ai.azure.com`, where `your-deployment-name` is your unique model deployment name and `your-azure-region` is the Azure region where the model is deployed (e.g. `eastus2`). + +* The key is a 32-character string. + +For convenience, and to promote the practice of not hard-coding secrets in your source code, all samples here assume the endpoint URL and key are stored in environment variables. You will need to set these environment variables before running the samples as-is. These are the environment variables used: + +| Sample type | Endpoint environment variable name | Key environment variable name | +|----------|----------|----------| +| Chat completions | `CHAT_COMPLETIONS_ENDPOINT` | `CHAT_COMPLETIONS_KEY` | +| Embeddings | `EMBEDDINGS_ENDPOINT` | `EMBEDDINGS_KEY` | +| Image generation | `IMAGE_GENERATION_ENDPOINT` | `IMAGE_GENERATION_KEY` | + +Note that the client library does not directly read these environment variable at run time. The sample code reads the environment variables and constructs the `ModelClient` with this read values. + ## Running the samples To run the first sample, type: ```bash -python sample_chat_completion_async.py +python sample_chat_completions.py ``` similarly for the other samples. ## Example console output -The sample `sample_chat_completion_async.py` sends the following system and user messages in a single call: +The sample `sample_chat_completions.py` sends the following system and user messages in a single call: - System: "You are an AI assistant that helps people find information." - User: "How many feet are in a mile?" @@ -72,17 +87,18 @@ And prints out the service response. It should look similar to the following: ```text Chat Completions: -choices[0].message.content: There are 5,280 feet in a mile. +choices[0].message.content: Hello! I'd be happy to help you find the answer to your question. There are 5,280 feet in a mile. choices[0].message.role: assistant choices[0].finish_reason: stop choices[0].index: 0 -id: 93f5bea2-11ec-4b31-af73-cb663196ebd5 -created: 1970-01-14 01:11:54+00:00 -model: Llama-2-70b-chat +id: 77f08d7e-8127-431d-bed5-a814b78ddd80 +created: 1970-01-08 23:28:48+00:00 +model: Llama-2-13b-chat object: chat.completion +usage.capacity_type: None usage.prompt_tokens: 41 -usage.completion_tokens: 15 -usage.total_tokens: 56 +usage.completion_tokens: 32 +usage.total_tokens: 73 ``` ## Troubleshooting diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py index 7f6b8a0ed9e4..82909fceb465 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py @@ -10,10 +10,11 @@ python sample_chat_completion_async.py Set these two environment variables before running the sample: - 1) MODEL_ENDPOINT - Your endpoint URL, in the form https://..inference.ai.azure.com - where `deployment-name` is your unique AI Model deployment name, and - `azure-region` is the Azure region where your model is deployed. - 2) MODEL_KEY - Your model key (a 32-character string). Keep it secret. + 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form + https://..inference.ai.azure.com + where `your-deployment-name` is your unique AI Model deployment name, and + `your-azure-region` is the Azure region where your model is deployed. + 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ import asyncio @@ -25,10 +26,10 @@ async def sample_chat_completions_async(): # Read the values of your model endpoint and key from environment variables try: - endpoint = os.environ["MODEL_ENDPOINT"] - key = os.environ["MODEL_KEY"] + endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] + key = os.environ["CHAT_COMPLETIONS_KEY"] except KeyError: - print("Missing environment variable 'MODEL_ENDPOINT' or 'MODEL_KEY'") + print("Missing environment variable 'CHAT_COMPLETIONS_ENDPOINT' or 'CHAT_COMPLETIONS_KEY'") print("Set them before running this sample.") exit() @@ -56,20 +57,19 @@ async def sample_chat_completions_async(): # Print results the the console print("Chat Completions:") - for index, choice in enumerate(result.choices): - print(f"choices[{index}].message.content: {choice.message.content}") - print(f"choices[{index}].message.role: {choice.message.role}") - print(f"choices[{index}].finish_reason: {choice.finish_reason}") - print(f"choices[{index}].index: {choice.index}") + print(f"choices[0].message.content: {result.choices[0].message.content}") + print(f"choices[0].message.role: {result.choices[0].message.role}") + print(f"choices[0].finish_reason: {result.choices[0].finish_reason}") + print(f"choices[0].index: {result.choices[0].index}") print(f"id: {result.id}") print(f"created: {result.created}") print(f"model: {result.model}") print(f"object: {result.object}") + print(f"usage.capacity_type: {result.usage.capacity_type}") print(f"usage.prompt_tokens: {result.usage.prompt_tokens}") print(f"usage.completion_tokens: {result.usage.completion_tokens}") print(f"usage.total_tokens: {result.usage.total_tokens}") - async def main(): await sample_chat_completions_async() diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py index 4d370a4e8c6f..4c4a4f551f2d 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py @@ -10,10 +10,11 @@ python sample_embeddings_async.py Set these two environment variables before running the sample: - 1) MODEL_ENDPOINT - Your endpoint URL, in the form https://..inference.ai.azure.com - where `deployment-name` is your unique AI Model deployment name, and - `azure-region` is the Azure region where your model is deployed. - 2) MODEL_KEY - Your model key (a 32-character string). Keep it secret. + 1) EMBEDDINGS_ENDPOINT - Your endpoint URL, in the form + https://..inference.ai.azure.com + where `your-deployment-name` is your unique AI Model deployment name, and + `your-azure-region` is the Azure region where your model is deployed. + 2) EMBEDDINGS_KEY - Your model key (a 32-character string). Keep it secret. """ import asyncio @@ -24,10 +25,10 @@ async def sample_embeddings_async(): # Read the values of your model endpoint and key from environment variables try: - endpoint = os.environ["MODEL_ENDPOINT"] - key = os.environ["MODEL_KEY"] + endpoint = os.environ["EMBEDDINGS_ENDPOINT"] + key = os.environ["EMBEDDINGS_KEY"] except KeyError: - print("Missing environment variable 'MODEL_ENDPOINT' or 'MODEL_KEY'") + print("Missing environment variable 'EMBEDDINGS_ENDPOINT' or 'EMBEDDINGS_KEY'") print("Set them before running this sample.") exit() @@ -35,7 +36,7 @@ async def sample_embeddings_async(): client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # Do a single embeddings operation. Start the operation and get a Future object. - future = asyncio.ensure_future(client.get_embeddings(input=["first sentence", "second sentence", "third sentence"])) + future = asyncio.ensure_future(client.get_embeddings(input=["first phrase", "second phrase", "third phrase"])) # Loop until the operation is done while not future.done(): @@ -48,17 +49,13 @@ async def sample_embeddings_async(): # Print results the the console print("Embeddings result:") - for index, item in enumerate(result.data): - len = item.embedding.__len__() - print(f"data[{index}].index: {item.index}") - print(f"data[{index}].embedding[0]: {item.embedding[0]}") - print(f"data[{index}].embedding[1]: {item.embedding[1]}") - print("...") - print(f"data[{index}].embedding[{len-2}]: {item.embedding[len-2]}") - print(f"data[{index}].embedding[{len-1}]: {item.embedding[len-1]}") + for item in result.data: + length = len(item.embedding) + print(f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, ..., {item.embedding[length-2]}, {item.embedding[length-1]}]") print(f"id: {result.id}") print(f"model: {result.model}") print(f"object: {result.object}") + print(f"usage.input_tokens: {result.usage.input_tokens}") print(f"usage.prompt_tokens: {result.usage.prompt_tokens}") print(f"usage.total_tokens: {result.usage.total_tokens}") diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py index f54e8971238c..91892c580c0a 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py @@ -10,47 +10,25 @@ python sample_image_generation_async.py Set these two environment variables before running the sample: - 1) MODEL_ENDPOINT - Your endpoint URL, in the form https://..inference.ai.azure.com - where `deployment-name` is your unique AI Model deployment name, and - `azure-region` is the Azure region where your model is deployed. - 2) MODEL_KEY - Your model key (a 32-character string). Keep it secret. + 1) IMAGE_GENERATION_ENDPOINT - Your endpoint URL, in the form + https://..inference.ai.azure.com + where `your-deployment-name` is your unique AI Model deployment name, and + `your-azure-region` is the Azure region where your model is deployed. + 2) IMAGE_GENERATION_KEY - Your model key (a 32-character string). Keep it secret. """ import asyncio async def sample_image_generation_async(): import os - from azure.ai.inference.aio import ModelClient from azure.core.credentials import AzureKeyCredential - - # [START logging] - import sys - import logging - - # Acquire the logger for this client library. Use 'azure' to affect both - # 'azure.core` and `azure.ai.vision.imageanalysis' libraries. - logger = logging.getLogger("azure") - - # Set the desired logging level. logging.INFO or logging.DEBUG are good options. - logger.setLevel(logging.DEBUG) - - # Direct logging output to stdout (the default): - handler = logging.StreamHandler(stream=sys.stdout) - # Or direct logging output to a file: - # handler = logging.FileHandler(filename = 'sample.log') - logger.addHandler(handler) - - # Optional: change the default logging format. Here we add a timestamp. - formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s") - handler.setFormatter(formatter) - # [END logging] - + # Read the values of your model endpoint and key from environment variables try: - endpoint = os.environ["MODEL_ENDPOINT"] - key = os.environ["MODEL_KEY"] + endpoint = os.environ["IMAGE_GENERATION_ENDPOINT"] + key = os.environ["IMAGE_GENERATION_KEY"] except KeyError: - print("Missing environment variable 'MODEL_ENDPOINT' or 'MODEL_KEY'") + print("Missing environment variable 'IMAGE_GENERATION_ENDPOINT' or 'IMAGE_GENERATION_KEY'") print("Set them before running this sample.") exit() diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_streaming_chat_completions_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_streaming_chat_completions_async.py index 85fe4bb5bf73..0e6d3d57f15f 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_streaming_chat_completions_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_streaming_chat_completions_async.py @@ -8,16 +8,16 @@ from the service using an asynchronous client. USAGE: - python sample_streaming_chat_completion_async.py + python sample_streaming_chat_completions_async.py Set these two environment variables before running the sample: - 1) MODEL_ENDPOINT - Your endpoint URL, in the form https://..inference.ai.azure.com - where `deployment-name` is your unique AI Model deployment name, and - `azure-region` is the Azure region where your model is deployed. - 2) MODEL_KEY - Your model key (a 32-character string). Keep it secret. + 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form + https://..inference.ai.azure.com + where `your-deployment-name` is your unique AI Model deployment name, and + `your-azure-region` is the Azure region where your model is deployed. + 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ import asyncio - import os from azure.ai.inference.aio import ModelClient from azure.ai.inference.models import ChatRequestSystemMessage, ChatRequestUserMessage, ChatCompletionsDelta @@ -27,10 +27,10 @@ async def sample_streaming_chat_completions_async(): # Read the values of your model endpoint and key from environment variables try: - endpoint = os.environ["MODEL_ENDPOINT"] - key = os.environ["MODEL_KEY"] + endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] + key = os.environ["CHAT_COMPLETIONS_KEY"] except KeyError: - print("Missing environment variable 'MODEL_ENDPOINT' or 'MODEL_KEY'") + print("Missing environment variable 'CHAT_COMPLETIONS_ENDPOINT' or 'CHAT_COMPLETIONS_KEY'") print("Set them before running this sample.") exit() diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index 98b46caf064c..6a35783cc9b2 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -4,62 +4,40 @@ # ------------------------------------ """ DESCRIPTION: - This sample demonstrates how to get a chat completion response from the service using a synchronous client. + This sample demonstrates how to get a chat completions response from the service using a synchronous client. USAGE: - python sample_chat_completion.py + python sample_chat_completions.py Set these two environment variables before running the sample: - 1) MODEL_ENDPOINT - Your endpoint URL, in the form https://..inference.ai.azure.com - where `deployment-name` is your unique AI Model deployment name, and - `azure-region` is the Azure region where your model is deployed. - 2) MODEL_KEY - Your model key (a 32-character string). Keep it secret. + 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form + https://..inference.ai.azure.com + where `your-deployment-name` is your unique AI Model deployment name, and + `your-azure-region` is the Azure region where your model is deployed. + 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ - def sample_chat_completions(): - # [START create_client] import os - from azure.ai.inference import ModelClient from azure.ai.inference.models import ChatRequestSystemMessage, ChatRequestUserMessage - from azure.core.credentials import AzureKeyCredential - - # [START logging] - import sys - import logging - - # Acquire the logger for this client library. Use 'azure' to affect both - # 'azure.core` and `azure.ai.vision.imageanalysis' libraries. - logger = logging.getLogger("azure") - - # Set the desired logging level. logging.INFO or logging.DEBUG are good options. - logger.setLevel(logging.DEBUG) - - # Direct logging output to stdout (the default): - handler = logging.StreamHandler(stream=sys.stdout) - # Or direct logging output to a file: - # handler = logging.FileHandler(filename = 'sample.log') - logger.addHandler(handler) - - # Optional: change the default logging format. Here we add a timestamp. - formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s") - handler.setFormatter(formatter) - # [END logging] - + # Read the values of your model endpoint and key from environment variables try: - endpoint = os.environ["MODEL_ENDPOINT"] - key = os.environ["MODEL_KEY"] + endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] + key = os.environ["CHAT_COMPLETIONS_KEY"] except KeyError: - print("Missing environment variable 'MODEL_ENDPOINT' or 'MODEL_KEY'") + print("Missing environment variable 'CHAT_COMPLETIONS_ENDPOINT' or 'CHAT_COMPLETIONS_KEY'") print("Set them before running this sample.") exit() + # [START create_client] + from azure.ai.inference import ModelClient + from azure.core.credentials import AzureKeyCredential + # Create Model Client for synchronous operations client = ModelClient( endpoint=endpoint, - credential=AzureKeyCredential(key), - logging_enable=True, + credential=AzureKeyCredential(key) ) # [END create_client] @@ -76,11 +54,10 @@ def sample_chat_completions(): # Print results the the console print("Chat Completions:") - for index, choice in enumerate(result.choices): - print(f"choices[{index}].message.content: {choice.message.content}") - print(f"choices[{index}].message.role: {choice.message.role}") - print(f"choices[{index}].finish_reason: {choice.finish_reason}") - print(f"choices[{index}].index: {choice.index}") + print(f"choices[0].message.content: {result.choices[0].message.content}") + print(f"choices[0].message.role: {result.choices[0].message.role}") + print(f"choices[0].finish_reason: {result.choices[0].finish_reason}") + print(f"choices[0].index: {result.choices[0].index}") print(f"id: {result.id}") print(f"created: {result.created}") print(f"model: {result.model}") diff --git a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py index 5e8276e324ca..10a4295c6941 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py +++ b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py @@ -10,70 +10,44 @@ python sample_embeddings.py Set these two environment variables before running the sample: - 1) MODEL_ENDPOINT - Your endpoint URL, in the form https://..inference.ai.azure.com - where `deployment-name` is your unique AI Model deployment name, and - `azure-region` is the Azure region where your model is deployed. - 2) MODEL_KEY - Your model key (a 32-character string). Keep it secret. + 1) EMBEDDINGS_ENDPOINT - Your endpoint URL, in the form + https://..inference.ai.azure.com + where `your-deployment-name` is your unique AI Model deployment name, and + `your-azure-region` is the Azure region where your model is deployed. + 2) EMBEDDINGS_KEY - Your model key (a 32-character string). Keep it secret. """ def sample_embeddings(): import os - from azure.ai.inference import ModelClient from azure.core.credentials import AzureKeyCredential - # [START logging] - import sys - import logging - - # Acquire the logger for this client library. Use 'azure' to affect both - # 'azure.core` and `azure.ai.vision.imageanalysis' libraries. - logger = logging.getLogger("azure") - - # Set the desired logging level. logging.INFO or logging.DEBUG are good options. - logger.setLevel(logging.DEBUG) - - # Direct logging output to stdout (the default): - handler = logging.StreamHandler(stream=sys.stdout) - # Or direct logging output to a file: - # handler = logging.FileHandler(filename = 'sample.log') - logger.addHandler(handler) - - # Optional: change the default logging format. Here we add a timestamp. - formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s") - handler.setFormatter(formatter) - # [END logging] - # Read the values of your model endpoint and key from environment variables try: - endpoint = os.environ["MODEL_ENDPOINT"] - key = os.environ["MODEL_KEY"] + endpoint = os.environ["EMBEDDINGS_ENDPOINT"] + key = os.environ["EMBEDDINGS_KEY"] except KeyError: - print("Missing environment variable 'MODEL_ENDPOINT' or 'MODEL_KEY'") + print("Missing environment variable 'EMBEDDINGS_ENDPOINT' or 'EMBEDDINGS_KEY'") print("Set them before running this sample.") exit() # Create an Model for synchronous operations - client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential("key")) + client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key), logging_enable=True) # [START embeddings] # Do a single embeddings operation. This will be a synchronously (blocking) call. - result = client.get_embeddings(input=["first sentence", "second sentence", "third sentence"]) + result = client.get_embeddings(input=["first phrase", "second phrase", "third phrase"]) # Print results the the console print("Embeddings result:") - for index, item in enumerate(result.data): - len = item.embedding.__len__() - print(f"data[{index}].index: {item.index}") - print(f"data[{index}].embedding[0]: {item.embedding[0]}") - print(f"data[{index}].embedding[1]: {item.embedding[1]}") - print("...") - print(f"data[{index}].embedding[{len-2}]: {item.embedding[len-2]}") - print(f"data[{index}].embedding[{len-1}]: {item.embedding[len-1]}") + for item in result.data: + length = len(item.embedding) + print(f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, ..., {item.embedding[length-2]}, {item.embedding[length-1]}]") print(f"id: {result.id}") print(f"model: {result.model}") print(f"object: {result.object}") + print(f"usage.input_tokens: {result.usage.input_tokens}") print(f"usage.prompt_tokens: {result.usage.prompt_tokens}") print(f"usage.total_tokens: {result.usage.total_tokens}") # [END embeddings] diff --git a/sdk/ai/azure-ai-inference/samples/sample_image_generation.py b/sdk/ai/azure-ai-inference/samples/sample_image_generation.py index ae7303673c87..cf4d3fd720aa 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_image_generation.py +++ b/sdk/ai/azure-ai-inference/samples/sample_image_generation.py @@ -10,52 +10,29 @@ python sample_image_generation.py Set these two environment variables before running the sample: - 1) MODEL_ENDPOINT - Your endpoint URL, in the form https://..inference.ai.azure.com - where `deployment-name` is your unique AI Model deployment name, and - `azure-region` is the Azure region where your model is deployed. - 2) MODEL_KEY - Your model key (a 32-character string). Keep it secret. + 1) IMAGE_GENERATION_ENDPOINT - Your endpoint URL, in the form + https://..inference.ai.azure.com + where `your-deployment-name` is your unique AI Model deployment name, and + `your-azure-region` is the Azure region where your model is deployed. + 2) IMAGE_GENERATION_KEY - Your model key (a 32-character string). Keep it secret. """ - def sample_image_generation(): import os - from azure.ai.inference import ModelClient from azure.core.credentials import AzureKeyCredential - # [START logging] - import sys - import logging - - # Acquire the logger for this client library. Use 'azure' to affect both - # 'azure.core` and `azure.ai.vision.imageanalysis' libraries. - logger = logging.getLogger("azure") - - # Set the desired logging level. logging.INFO or logging.DEBUG are good options. - logger.setLevel(logging.DEBUG) - - # Direct logging output to stdout (the default): - handler = logging.StreamHandler(stream=sys.stdout) - # Or direct logging output to a file: - # handler = logging.FileHandler(filename = 'sample.log') - logger.addHandler(handler) - - # Optional: change the default logging format. Here we add a timestamp. - formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s") - handler.setFormatter(formatter) - # [END logging] - # Read the values of your model endpoint and key from environment variables try: - endpoint = os.environ["MODEL_ENDPOINT"] - key = os.environ["MODEL_KEY"] + endpoint = os.environ["IMAGE_GENERATION_ENDPOINT"] + key = os.environ["IMAGE_GENERATION_KEY"] except KeyError: - print("Missing environment variable 'MODEL_ENDPOINT' or 'MODEL_KEY'") + print("Missing environment variable 'IMAGE_GENERATION_ENDPOINT' or 'IMAGE_GENERATION_KEY'") print("Set them before running this sample.") exit() # Create an Model for synchronous operations - client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential("key")) + client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # [START image_generation] # Generate a single image from a text prompt. This will be a synchronously (blocking) call. diff --git a/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py index 3f096158f4eb..f2c4d0f970ef 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py @@ -8,27 +8,30 @@ from the service using a synchronous client. USAGE: - python sample_streaming_chat_completion.py + python sample_streaming_chat_completions.py Set these two environment variables before running the sample: - 1) MODEL_ENDPOINT - Your endpoint URL, in the form https://..inference.ai.azure.com - where `deployment-name` is your unique AI Model deployment name, and - `azure-region` is the Azure region where your model is deployed. - 2) MODEL_KEY - Your model key (a 32-character string). Keep it secret. + 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form + https://..inference.ai.azure.com + where `your-deployment-name` is your unique AI Model deployment name, and + `your-azure-region` is the Azure region where your model is deployed. + 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ + import os from azure.ai.inference import ModelClient from azure.ai.inference.models import ChatRequestSystemMessage, ChatRequestUserMessage, ChatCompletionsDelta from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline.transport import RequestsTransport def sample_streaming_chat_completions(): # Read the values of your model endpoint and key from environment variables try: - endpoint = os.environ["MODEL_ENDPOINT"] - key = os.environ["MODEL_KEY"] + endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] + key = os.environ["CHAT_COMPLETIONS_KEY"] except KeyError: - print("Missing environment variable 'MODEL_ENDPOINT' or 'MODEL_KEY'") + print("Missing environment variable 'CHAT_COMPLETIONS_ENDPOINT' or 'CHAT_COMPLETIONS_KEY'") print("Set them before running this sample.") exit() diff --git a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py index 63b468414a65..ee06b6d6aa0f 100644 --- a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py +++ b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py @@ -33,9 +33,9 @@ ServicePreparer = functools.partial( EnvironmentVariableLoader, - "model", - model_endpoint="https://your-azure-resource-name.your-azure-region.inference.ai.azure.com", - model_key="00000000000000000000000000000000", + "chat_completions", + chat_completions_endpoint="https://your-deployment-name.your-azure-region.inference.ai.azure.com", + chat_completions_key="00000000000000000000000000000000", ) @@ -50,12 +50,12 @@ class ModelClientTestBase(AzureRecordedTestCase): PRINT_CHAT_COMPLETION_RESULTS = True def _create_client_for_standard_test(self, sync: bool, get_connection_url: bool = False, **kwargs): - endpoint = kwargs.pop("model_endpoint") - key = kwargs.pop("model_key") + endpoint = kwargs.pop("chat_completions_endpoint") + key = kwargs.pop("chat_completions_key") self._create_client(endpoint, key, sync, get_connection_url) def _create_client_for_authentication_failure(self, sync: bool, **kwargs): - endpoint = kwargs.pop("model_endpoint") + endpoint = kwargs.pop("chat_completions_endpoint") key = "00000000000000000000000000000000" self._create_client(endpoint, key, sync, False) From 0f79e700df72fab33bcad7bc3b3a078a925982ec Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 11 Apr 2024 23:30:53 -0700 Subject: [PATCH 028/112] First set of updates following SDK review meeting --- sdk/ai/azure-ai-inference/README.md | 6 +- .../ai/inference/_operations/_operations.py | 66 +- .../azure/ai/inference/_patch.py | 7 +- .../inference/aio/_operations/_operations.py | 64 +- .../azure/ai/inference/aio/_patch.py | 10 +- .../azure/ai/inference/models/__init__.py | 32 +- .../azure/ai/inference/models/_enums.py | 14 + .../azure/ai/inference/models/_models.py | 648 ++++++++---------- .../azure/ai/inference/models/_patch.py | 45 +- .../sample_chat_completions_async.py | 8 +- .../async_samples/sample_embeddings_async.py | 5 +- .../sample_image_generation_async.py | 5 +- ...sample_streaming_chat_completions_async.py | 31 +- .../samples/sample_chat_completions.py | 14 +- .../samples/sample_embeddings.py | 4 +- .../samples/sample_image_generation.py | 3 +- .../sample_streaming_chat_completions.py | 30 +- .../test_model_inference_async_client.py | 2 +- .../tests/test_model_inference_client.py | 2 +- sdk/ai/azure-ai-inference/tsp-location.yaml | 2 +- 20 files changed, 449 insertions(+), 549 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 771ba08fd9ab..5a29eddfc3d0 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -112,8 +112,8 @@ This example demonstrates how to generate a single chat completions. # Do a single chat completion operation. This will be a synchronously (blocking) call. result = client.get_chat_completions( messages=[ - ChatRequestSystemMessage(content="You are an AI assistant that helps people find information."), - ChatRequestUserMessage(content="How many feet are in a mile?"), + SystemMessage(content="You are an AI assistant that helps people find information."), + UserMessage(content="How many feet are in a mile?"), ], # Examples of setting extra parameters (TODO: move this to advanced sample) extras=dict(key1="value1", key2="value2"), @@ -172,7 +172,7 @@ This example demonstrates how to generate and image from a text prompt ```python # Generate a single image from a text prompt. This will be a synchronously (blocking) call. -result = client.get_image_generations( +result = client.generate_images( prompt="A painting of a beautiful sunset over a mountain lake.", size="1024x768" ) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index cb91032f5f56..5b611462a865 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -90,9 +90,7 @@ def build_model_get_embeddings_request(*, model_deployment: Optional[str] = None return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_model_get_image_generations_request( # pylint: disable=name-too-long - *, model_deployment: Optional[str] = None, **kwargs: Any -) -> HttpRequest: +def build_model_generate_images_request(*, model_deployment: Optional[str] = None, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -116,7 +114,7 @@ def build_model_get_image_generations_request( # pylint: disable=name-too-long return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_model_get_model_information_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long +def build_model_get_model_info_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -168,19 +166,6 @@ def get_chat_completions( Example: .. code-block:: python - # The input is polymorphic. The following are possible polymorphic inputs based off - discriminator "type": - - # JSON input template for discriminator value "json_object": - chat_completions_response_format = { - "type": "json_object" - } - - # JSON input template for discriminator value "text": - chat_completions_response_format = { - "type": "text" - } - # JSON input template you can fill out and use as your body input. body = { "messages": [ @@ -203,7 +188,9 @@ def get_chat_completions( of generated tokens appearing based on their existing presence in generated text. Positive values will make tokens less likely to appear when they already exist and increase the model's likelihood to output new topics. - "response_format": chat_completions_response_format, + "response_format": "str", # Optional. An object specifying the format that + the model must output. Used to enable JSON mode. Known values are: "text" and + "json_object". "seed": 0, # Optional. If specified, the system will make a best effort to sample deterministically such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.". @@ -288,7 +275,7 @@ def get_chat_completions( temperature: Optional[float] = None, top_p: Optional[float] = None, max_tokens: Optional[int] = None, - response_format: Optional[_models.ChatCompletionsResponseFormat] = None, + response_format: Optional[Union[str, _models.ChatCompletionsResponseFormat]] = None, stop: Optional[List[str]] = None, tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, tool_choice: Optional[ @@ -356,8 +343,8 @@ def get_chat_completions( :keyword max_tokens: The maximum number of tokens to generate. Default value is None. :paramtype max_tokens: int :keyword response_format: An object specifying the format that the model must output. Used to - enable JSON mode. Default value is None. - :paramtype response_format: ~azure.ai.inference.models.ChatCompletionsResponseFormat + enable JSON mode. Known values are: "text" and "json_object". Default value is None. + :paramtype response_format: str or ~azure.ai.inference.models.ChatCompletionsResponseFormat :keyword stop: A collection of textual sequences that will end completions generation. Default value is None. :paramtype stop: list[str] @@ -513,7 +500,7 @@ def get_chat_completions( temperature: Optional[float] = None, top_p: Optional[float] = None, max_tokens: Optional[int] = None, - response_format: Optional[_models.ChatCompletionsResponseFormat] = None, + response_format: Optional[Union[str, _models.ChatCompletionsResponseFormat]] = None, stop: Optional[List[str]] = None, tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, tool_choice: Optional[ @@ -580,8 +567,8 @@ def get_chat_completions( :keyword max_tokens: The maximum number of tokens to generate. Default value is None. :paramtype max_tokens: int :keyword response_format: An object specifying the format that the model must output. Used to - enable JSON mode. Default value is None. - :paramtype response_format: ~azure.ai.inference.models.ChatCompletionsResponseFormat + enable JSON mode. Known values are: "text" and "json_object". Default value is None. + :paramtype response_format: str or ~azure.ai.inference.models.ChatCompletionsResponseFormat :keyword stop: A collection of textual sequences that will end completions generation. Default value is None. :paramtype stop: list[str] @@ -606,19 +593,6 @@ def get_chat_completions( Example: .. code-block:: python - # The input is polymorphic. The following are possible polymorphic inputs based off - discriminator "type": - - # JSON input template for discriminator value "json_object": - chat_completions_response_format = { - "type": "json_object" - } - - # JSON input template for discriminator value "text": - chat_completions_response_format = { - "type": "text" - } - # JSON input template you can fill out and use as your body input. body = { "messages": [ @@ -641,7 +615,9 @@ def get_chat_completions( of generated tokens appearing based on their existing presence in generated text. Positive values will make tokens less likely to appear when they already exist and increase the model's likelihood to output new topics. - "response_format": chat_completions_response_format, + "response_format": "str", # Optional. An object specifying the format that + the model must output. Used to enable JSON mode. Known values are: "text" and + "json_object". "seed": 0, # Optional. If specified, the system will make a best effort to sample deterministically such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.". @@ -1156,7 +1132,7 @@ def get_embeddings( return deserialized # type: ignore @overload - def get_image_generations( + def generate_images( self, body: JSON, *, @@ -1226,7 +1202,7 @@ def get_image_generations( """ @overload - def get_image_generations( + def generate_images( self, *, prompt: str, @@ -1301,7 +1277,7 @@ def get_image_generations( """ @overload - def get_image_generations( + def generate_images( self, body: IO[bytes], *, @@ -1349,7 +1325,7 @@ def get_image_generations( """ @distributed_trace - def get_image_generations( + def generate_images( self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -1478,7 +1454,7 @@ def get_image_generations( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_model_get_image_generations_request( + _request = build_model_generate_images_request( model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, @@ -1512,7 +1488,7 @@ def get_image_generations( return deserialized # type: ignore @distributed_trace - def get_model_information(self, **kwargs: Any) -> _models.ModelInformation: + def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: # pylint: disable=line-too-long """Returns information about the AI model. @@ -1544,7 +1520,7 @@ def get_model_information(self, **kwargs: Any) -> _models.ModelInformation: cls: ClsType[_models.ModelInformation] = kwargs.pop("cls", None) - _request = build_model_get_model_information_request( + _request = build_model_get_model_info_request( api_version=self._config.api_version, headers=_headers, params=_params, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index c551978dc9b7..d648275dbd12 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -45,7 +45,6 @@ class ModelClient(ModelClientGenerated): - @distributed_trace def get_streaming_chat_completions( self, @@ -67,7 +66,7 @@ def get_streaming_chat_completions( ] = None, seed: Optional[int] = None, **kwargs: Any - ) -> _models.ChatCompletionsDeltaIterator: + ) -> _models.StreamingChatCompletions: error_map = { 401: ClientAuthenticationError, @@ -120,7 +119,7 @@ def get_streaming_chat_completions( _request.url = self._client.format_url(_request.url) - kwargs.pop("stream", True) # Remove stream from kwargs (ignore value set by the application) + kwargs.pop("stream", True) # Remove stream from kwargs (ignore value set by the application) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=True, **kwargs ) @@ -132,7 +131,7 @@ def get_streaming_chat_completions( map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) - return _models.ChatCompletionsDeltaIterator(response.iter_bytes()) + return _models.StreamingChatCompletions(response.iter_bytes()) __all__: List[str] = ["ModelClient"] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index 27f6bb1b0037..7b9fa63bf841 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -27,10 +27,10 @@ from ... import models as _models from ..._model_base import SdkJSONEncoder, _deserialize from ..._operations._operations import ( + build_model_generate_images_request, build_model_get_chat_completions_request, build_model_get_embeddings_request, - build_model_get_image_generations_request, - build_model_get_model_information_request, + build_model_get_model_info_request, ) from .._vendor import ModelClientMixinABC @@ -77,19 +77,6 @@ async def get_chat_completions( Example: .. code-block:: python - # The input is polymorphic. The following are possible polymorphic inputs based off - discriminator "type": - - # JSON input template for discriminator value "json_object": - chat_completions_response_format = { - "type": "json_object" - } - - # JSON input template for discriminator value "text": - chat_completions_response_format = { - "type": "text" - } - # JSON input template you can fill out and use as your body input. body = { "messages": [ @@ -112,7 +99,9 @@ async def get_chat_completions( of generated tokens appearing based on their existing presence in generated text. Positive values will make tokens less likely to appear when they already exist and increase the model's likelihood to output new topics. - "response_format": chat_completions_response_format, + "response_format": "str", # Optional. An object specifying the format that + the model must output. Used to enable JSON mode. Known values are: "text" and + "json_object". "seed": 0, # Optional. If specified, the system will make a best effort to sample deterministically such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.". @@ -197,7 +186,7 @@ async def get_chat_completions( temperature: Optional[float] = None, top_p: Optional[float] = None, max_tokens: Optional[int] = None, - response_format: Optional[_models.ChatCompletionsResponseFormat] = None, + response_format: Optional[Union[str, _models.ChatCompletionsResponseFormat]] = None, stop: Optional[List[str]] = None, tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, tool_choice: Optional[ @@ -265,8 +254,8 @@ async def get_chat_completions( :keyword max_tokens: The maximum number of tokens to generate. Default value is None. :paramtype max_tokens: int :keyword response_format: An object specifying the format that the model must output. Used to - enable JSON mode. Default value is None. - :paramtype response_format: ~azure.ai.inference.models.ChatCompletionsResponseFormat + enable JSON mode. Known values are: "text" and "json_object". Default value is None. + :paramtype response_format: str or ~azure.ai.inference.models.ChatCompletionsResponseFormat :keyword stop: A collection of textual sequences that will end completions generation. Default value is None. :paramtype stop: list[str] @@ -422,7 +411,7 @@ async def get_chat_completions( temperature: Optional[float] = None, top_p: Optional[float] = None, max_tokens: Optional[int] = None, - response_format: Optional[_models.ChatCompletionsResponseFormat] = None, + response_format: Optional[Union[str, _models.ChatCompletionsResponseFormat]] = None, stop: Optional[List[str]] = None, tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, tool_choice: Optional[ @@ -489,8 +478,8 @@ async def get_chat_completions( :keyword max_tokens: The maximum number of tokens to generate. Default value is None. :paramtype max_tokens: int :keyword response_format: An object specifying the format that the model must output. Used to - enable JSON mode. Default value is None. - :paramtype response_format: ~azure.ai.inference.models.ChatCompletionsResponseFormat + enable JSON mode. Known values are: "text" and "json_object". Default value is None. + :paramtype response_format: str or ~azure.ai.inference.models.ChatCompletionsResponseFormat :keyword stop: A collection of textual sequences that will end completions generation. Default value is None. :paramtype stop: list[str] @@ -515,19 +504,6 @@ async def get_chat_completions( Example: .. code-block:: python - # The input is polymorphic. The following are possible polymorphic inputs based off - discriminator "type": - - # JSON input template for discriminator value "json_object": - chat_completions_response_format = { - "type": "json_object" - } - - # JSON input template for discriminator value "text": - chat_completions_response_format = { - "type": "text" - } - # JSON input template you can fill out and use as your body input. body = { "messages": [ @@ -550,7 +526,9 @@ async def get_chat_completions( of generated tokens appearing based on their existing presence in generated text. Positive values will make tokens less likely to appear when they already exist and increase the model's likelihood to output new topics. - "response_format": chat_completions_response_format, + "response_format": "str", # Optional. An object specifying the format that + the model must output. Used to enable JSON mode. Known values are: "text" and + "json_object". "seed": 0, # Optional. If specified, the system will make a best effort to sample deterministically such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.". @@ -1065,7 +1043,7 @@ async def get_embeddings( return deserialized # type: ignore @overload - async def get_image_generations( + async def generate_images( self, body: JSON, *, @@ -1135,7 +1113,7 @@ async def get_image_generations( """ @overload - async def get_image_generations( + async def generate_images( self, *, prompt: str, @@ -1210,7 +1188,7 @@ async def get_image_generations( """ @overload - async def get_image_generations( + async def generate_images( self, body: IO[bytes], *, @@ -1258,7 +1236,7 @@ async def get_image_generations( """ @distributed_trace_async - async def get_image_generations( + async def generate_images( self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -1387,7 +1365,7 @@ async def get_image_generations( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_model_get_image_generations_request( + _request = build_model_generate_images_request( model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, @@ -1421,7 +1399,7 @@ async def get_image_generations( return deserialized # type: ignore @distributed_trace_async - async def get_model_information(self, **kwargs: Any) -> _models.ModelInformation: + async def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: # pylint: disable=line-too-long """Returns information about the AI model. @@ -1453,7 +1431,7 @@ async def get_model_information(self, **kwargs: Any) -> _models.ModelInformation cls: ClsType[_models.ModelInformation] = kwargs.pop("cls", None) - _request = build_model_get_model_information_request( + _request = build_model_get_model_info_request( api_version=self._config.api_version, headers=_headers, params=_params, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index e694fb6d7772..05e0cf6ee953 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -27,7 +27,7 @@ map_error, ) from .._operations._operations import build_model_get_chat_completions_request - + if sys.version_info >= (3, 9): from collections.abc import MutableMapping else: @@ -60,7 +60,7 @@ async def get_streaming_chat_completions( ] = None, seed: Optional[int] = None, **kwargs: Any - ) -> _models.ChatCompletionsDeltaIterator: + ) -> _models.StreamingChatCompletions: error_map = { 401: ClientAuthenticationError, @@ -112,11 +112,11 @@ async def get_streaming_chat_completions( ) _request.url = self._client.format_url(_request.url) - kwargs.pop("stream", True) # Remove stream from kwargs (ignore value set by the application) + kwargs.pop("stream", True) # Remove stream from kwargs (ignore value set by the application) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=True, **kwargs ) - + response = pipeline_response.http_response if response.status_code not in [200]: @@ -124,7 +124,7 @@ async def get_streaming_chat_completions( map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) - return _models.ChatCompletionsDeltaIterator(response.iter_bytes()) + return _models.StreamingChatCompletions(response.iter_bytes()) __all__: List[str] = ["ModelClient"] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py index d1a319f8888f..1636653f8d42 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py @@ -6,23 +6,17 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from ._models import AssistantMessage from ._models import ChatChoice -from ._models import ChatChoiceDelta +from ._models import ChatChoiceUpdate from ._models import ChatCompletions -from ._models import ChatCompletionsDelta from ._models import ChatCompletionsFunctionToolCall from ._models import ChatCompletionsFunctionToolDefinition -from ._models import ChatCompletionsJsonResponseFormat from ._models import ChatCompletionsNamedToolSelection -from ._models import ChatCompletionsResponseFormat -from ._models import ChatCompletionsTextResponseFormat from ._models import ChatCompletionsToolCall from ._models import ChatCompletionsToolDefinition -from ._models import ChatRequestAssistantMessage +from ._models import ChatCompletionsUpdate from ._models import ChatRequestMessage -from ._models import ChatRequestSystemMessage -from ._models import ChatRequestToolMessage -from ._models import ChatRequestUserMessage from ._models import ChatResponseMessage from ._models import CompletionsUsage from ._models import EmbeddingItem @@ -33,8 +27,12 @@ from ._models import ImageGenerationData from ._models import ImageGenerations from ._models import ModelInformation +from ._models import SystemMessage +from ._models import ToolMessage +from ._models import UserMessage from ._enums import CapacityType +from ._enums import ChatCompletionsResponseFormat from ._enums import ChatCompletionsToolSelectionPreset from ._enums import ChatRole from ._enums import CompletionsFinishReason @@ -47,23 +45,17 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ + "AssistantMessage", "ChatChoice", - "ChatChoiceDelta", + "ChatChoiceUpdate", "ChatCompletions", - "ChatCompletionsDelta", "ChatCompletionsFunctionToolCall", "ChatCompletionsFunctionToolDefinition", - "ChatCompletionsJsonResponseFormat", "ChatCompletionsNamedToolSelection", - "ChatCompletionsResponseFormat", - "ChatCompletionsTextResponseFormat", "ChatCompletionsToolCall", "ChatCompletionsToolDefinition", - "ChatRequestAssistantMessage", + "ChatCompletionsUpdate", "ChatRequestMessage", - "ChatRequestSystemMessage", - "ChatRequestToolMessage", - "ChatRequestUserMessage", "ChatResponseMessage", "CompletionsUsage", "EmbeddingItem", @@ -74,7 +66,11 @@ "ImageGenerationData", "ImageGenerations", "ModelInformation", + "SystemMessage", + "ToolMessage", + "UserMessage", "CapacityType", + "ChatCompletionsResponseFormat", "ChatCompletionsToolSelectionPreset", "ChatRole", "CompletionsFinishReason", diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py index 4df78cbcb461..cfe28a49ae65 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py @@ -19,6 +19,20 @@ class CapacityType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Your capacity has not been affected by the usage amount (token count) reported here.""" +class ChatCompletionsResponseFormat(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """An representation of a response format configuration usable by Chat Completions. Can be used to + enable JSON + mode. + """ + + TEXT = "text" + """The standard Chat Completions response format that can freely generate text and is not + guaranteed to produce response + content that adheres to a specific schema.""" + JSON_OBJECT = "json_object" + """A response format for Chat Completions that restricts responses to emitting valid JSON objects.""" + + class ChatCompletionsToolSelectionPreset(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Represents a generic policy for how a chat completions tool may be selected.""" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py index fdf5f64dfe4b..3fafe3a11a18 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py @@ -26,6 +26,81 @@ JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +class ChatRequestMessage(_model_base.Model): + """An abstract representation of a chat message as provided in a request. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + AssistantMessage, SystemMessage, ToolMessage, UserMessage + + All required parameters must be populated in order to send to server. + + :ivar role: The chat role associated with this message. Required. Known values are: "system", + "user", "assistant", and "tool". + :vartype role: str or ~azure.ai.inference.models.ChatRole + """ + + __mapping__: Dict[str, _model_base.Model] = {} + role: str = rest_discriminator(name="role") + """The chat role associated with this message. Required. Known values are: \"system\", \"user\", + \"assistant\", and \"tool\".""" + + @overload + def __init__( + self, + *, + role: str, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class AssistantMessage(ChatRequestMessage, discriminator="assistant"): + """A request chat message representing response or action from the assistant. + + All required parameters must be populated in order to send to server. + + :ivar role: The chat role associated with this message, which is always 'assistant' for + assistant messages. Required. The role that provides responses to system-instructed, + user-prompted input. + :vartype role: str or ~azure.ai.inference.models.ASSISTANT + :ivar content: The content of the message. Required. + :vartype content: str + """ + + role: Literal[ChatRole.ASSISTANT] = rest_discriminator(name="role") # type: ignore + """The chat role associated with this message, which is always 'assistant' for assistant messages. + Required. The role that provides responses to system-instructed, user-prompted input.""" + content: str = rest_field() + """The content of the message. Required.""" + + @overload + def __init__( + self, + *, + content: str, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, role=ChatRole.ASSISTANT, **kwargs) + + class ChatChoice(_model_base.Model): """The representation of a single prompt completion as part of an overall chat completions request. @@ -72,7 +147,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, **kwargs) -class ChatChoiceDelta(_model_base.Model): +class ChatChoiceUpdate(_model_base.Model): """Represents an update to a single prompt completion when the service is streaming updates using Server Sent Events (SSE). Generally, ``n`` choices are generated per provided prompt with a default value of 1. @@ -186,77 +261,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, **kwargs) -class ChatCompletionsDelta(_model_base.Model): - """Represents a response update to a chat completions request, when the service is streaming - updates - using Server Sent Events (SSE). - Completions support a wide variety of tasks and generate text that continues from or - "completes" - provided prompt data. - - All required parameters must be populated in order to send to server. - - :ivar id: A unique identifier associated with this chat completions response. Required. - :vartype id: str - :ivar object: The response object type, which is always ``chat.completion``. Required. - :vartype object: str - :ivar created: The first timestamp associated with generation activity for this completions - response, - represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. - :vartype created: ~datetime.datetime - :ivar model: The model used for the chat completion. Required. - :vartype model: str - :ivar usage: Usage information for tokens processed and generated as part of this completions - operation. Required. - :vartype usage: ~azure.ai.inference.models.CompletionsUsage - :ivar choices: An update to the collection of completion choices associated with this - completions response. - Generally, ``n`` choices are generated per provided prompt with a default value of 1. - Token limits and other settings may limit the number of choices generated. Required. - :vartype choices: list[~azure.ai.inference.models.ChatChoiceDelta] - """ - - id: str = rest_field() - """A unique identifier associated with this chat completions response. Required.""" - object: str = rest_field() - """The response object type, which is always ``chat.completion``. Required.""" - created: datetime.datetime = rest_field(format="unix-timestamp") - """The first timestamp associated with generation activity for this completions response, - represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required.""" - model: str = rest_field() - """The model used for the chat completion. Required.""" - usage: "_models.CompletionsUsage" = rest_field() - """Usage information for tokens processed and generated as part of this completions operation. - Required.""" - choices: List["_models.ChatChoiceDelta"] = rest_field() - """An update to the collection of completion choices associated with this completions response. - Generally, ``n`` choices are generated per provided prompt with a default value of 1. - Token limits and other settings may limit the number of choices generated. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - object: str, - created: datetime.datetime, - model: str, - usage: "_models.CompletionsUsage", - choices: List["_models.ChatChoiceDelta"], - ): - ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - class ChatCompletionsToolCall(_model_base.Model): """An abstract representation of a tool call that must be resolved in a subsequent request to perform the requested @@ -317,246 +321,15 @@ class ChatCompletionsFunctionToolCall(ChatCompletionsToolCall, discriminator="fu type: Literal["function"] = rest_discriminator(name="type") # type: ignore """The type of tool call, in this case always 'function'. Required. Default value is \"function\".""" - function: "_models.FunctionCall" = rest_field() - """The details of the function invocation requested by the tool call. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - function: "_models.FunctionCall", - ): - ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="function", **kwargs) - - -class ChatCompletionsToolDefinition(_model_base.Model): - """An abstract representation of a tool that can be used by the model to improve a chat - completions response. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - ChatCompletionsFunctionToolDefinition - - All required parameters must be populated in order to send to server. - - :ivar type: The object type. Required. Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type. Required. Default value is None.""" - - @overload - def __init__( - self, - *, - type: str, - ): - ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class ChatCompletionsFunctionToolDefinition(ChatCompletionsToolDefinition, discriminator="function"): - """The definition information for a chat completions function tool that can call a function in - response to a tool call. - - All required parameters must be populated in order to send to server. - - :ivar type: The object name, which is always 'function'. Required. Default value is "function". - :vartype type: str - :ivar function: The function definition details for the function tool. Required. - :vartype function: ~azure.ai.inference.models.FunctionDefinition - """ - - type: Literal["function"] = rest_discriminator(name="type") # type: ignore - """The object name, which is always 'function'. Required. Default value is \"function\".""" - function: "_models.FunctionDefinition" = rest_field() - """The function definition details for the function tool. Required.""" - - @overload - def __init__( - self, - *, - function: "_models.FunctionDefinition", - ): - ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="function", **kwargs) - - -class ChatCompletionsResponseFormat(_model_base.Model): - """An abstract representation of a response format configuration usable by Chat Completions. Can - be used to enable JSON - mode. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - ChatCompletionsJsonResponseFormat, ChatCompletionsTextResponseFormat - - All required parameters must be populated in order to send to server. - - :ivar type: The discriminated type for the response format. Required. Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The discriminated type for the response format. Required. Default value is None.""" - - @overload - def __init__( - self, - *, - type: str, - ): - ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class ChatCompletionsJsonResponseFormat(ChatCompletionsResponseFormat, discriminator="json_object"): - """A response format for Chat Completions that restricts responses to emitting valid JSON objects. - - All required parameters must be populated in order to send to server. - - :ivar type: The discriminated object type, which is always 'json_object' for this format. - Required. Default value is "json_object". - :vartype type: str - """ - - type: Literal["json_object"] = rest_discriminator(name="type") # type: ignore - """The discriminated object type, which is always 'json_object' for this format. Required. Default - value is \"json_object\".""" - - -class ChatCompletionsNamedToolSelection(_model_base.Model): - """An abstract representation of an explicit, named tool selection to use for a chat completions - request. - - All required parameters must be populated in order to send to server. - - :ivar type: The object type. Required. - :vartype type: str - """ - - type: str = rest_discriminator(name="type") - """The object type. Required.""" - - -class ChatCompletionsTextResponseFormat(ChatCompletionsResponseFormat, discriminator="text"): - """The standard Chat Completions response format that can freely generate text and is not - guaranteed to produce response - content that adheres to a specific schema. - - All required parameters must be populated in order to send to server. - - :ivar type: The discriminated object type, which is always 'text' for this format. Required. - Default value is "text". - :vartype type: str - """ - - type: Literal["text"] = rest_discriminator(name="type") # type: ignore - """The discriminated object type, which is always 'text' for this format. Required. Default value - is \"text\".""" - - -class ChatRequestMessage(_model_base.Model): - """An abstract representation of a chat message as provided in a request. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - ChatRequestAssistantMessage, ChatRequestSystemMessage, ChatRequestToolMessage, - ChatRequestUserMessage - - All required parameters must be populated in order to send to server. - - :ivar role: The chat role associated with this message. Required. Known values are: "system", - "user", "assistant", and "tool". - :vartype role: str or ~azure.ai.inference.models.ChatRole - """ - - __mapping__: Dict[str, _model_base.Model] = {} - role: str = rest_discriminator(name="role") - """The chat role associated with this message. Required. Known values are: \"system\", \"user\", - \"assistant\", and \"tool\".""" - - @overload - def __init__( - self, - *, - role: str, - ): - ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class ChatRequestAssistantMessage(ChatRequestMessage, discriminator="assistant"): - """A request chat message representing response or action from the assistant. - - All required parameters must be populated in order to send to server. - - :ivar role: The chat role associated with this message, which is always 'assistant' for - assistant messages. Required. The role that provides responses to system-instructed, - user-prompted input. - :vartype role: str or ~azure.ai.inference.models.ASSISTANT - :ivar content: The content of the message. Required. - :vartype content: str - """ - - role: Literal[ChatRole.ASSISTANT] = rest_discriminator(name="role") # type: ignore - """The chat role associated with this message, which is always 'assistant' for assistant messages. - Required. The role that provides responses to system-instructed, user-prompted input.""" - content: str = rest_field() - """The content of the message. Required.""" + function: "_models.FunctionCall" = rest_field() + """The details of the function invocation requested by the tool call. Required.""" @overload def __init__( self, *, - content: str, + id: str, # pylint: disable=redefined-builtin + function: "_models.FunctionCall", ): ... @@ -568,34 +341,31 @@ def __init__(self, mapping: Mapping[str, Any]): """ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, role=ChatRole.ASSISTANT, **kwargs) + super().__init__(*args, type="function", **kwargs) -class ChatRequestSystemMessage(ChatRequestMessage, discriminator="system"): - """A request chat message containing system instructions that influence how the model will - generate a chat completions - response. +class ChatCompletionsToolDefinition(_model_base.Model): + """An abstract representation of a tool that can be used by the model to improve a chat + completions response. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + ChatCompletionsFunctionToolDefinition All required parameters must be populated in order to send to server. - :ivar role: The chat role associated with this message, which is always 'system' for system - messages. Required. The role that instructs or sets the behavior of the assistant. - :vartype role: str or ~azure.ai.inference.models.SYSTEM - :ivar content: The contents of the system message. Required. - :vartype content: str + :ivar type: The object type. Required. Default value is None. + :vartype type: str """ - role: Literal[ChatRole.SYSTEM] = rest_discriminator(name="role") # type: ignore - """The chat role associated with this message, which is always 'system' for system messages. - Required. The role that instructs or sets the behavior of the assistant.""" - content: str = rest_field() - """The contents of the system message. Required.""" + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Default value is None.""" @overload def __init__( self, *, - content: str, + type: str, ): ... @@ -607,38 +377,31 @@ def __init__(self, mapping: Mapping[str, Any]): """ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, role=ChatRole.SYSTEM, **kwargs) + super().__init__(*args, **kwargs) -class ChatRequestToolMessage(ChatRequestMessage, discriminator="tool"): - """A request chat message representing requested output from a configured tool. +class ChatCompletionsFunctionToolDefinition(ChatCompletionsToolDefinition, discriminator="function"): + """The definition information for a chat completions function tool that can call a function in + response to a tool call. All required parameters must be populated in order to send to server. - :ivar role: The chat role associated with this message, which is always 'tool' for tool - messages. Required. The role that represents extension tool activity within a chat completions - operation. - :vartype role: str or ~azure.ai.inference.models.TOOL - :ivar content: The content of the message. Required. - :vartype content: str - :ivar tool_call_id: The ID of the tool call resolved by the provided content. Required. - :vartype tool_call_id: str + :ivar type: The object name, which is always 'function'. Required. Default value is "function". + :vartype type: str + :ivar function: The function definition details for the function tool. Required. + :vartype function: ~azure.ai.inference.models.FunctionDefinition """ - role: Literal[ChatRole.TOOL] = rest_discriminator(name="role") # type: ignore - """The chat role associated with this message, which is always 'tool' for tool messages. Required. - The role that represents extension tool activity within a chat completions operation.""" - content: str = rest_field() - """The content of the message. Required.""" - tool_call_id: str = rest_field() - """The ID of the tool call resolved by the provided content. Required.""" + type: Literal["function"] = rest_discriminator(name="type") # type: ignore + """The object name, which is always 'function'. Required. Default value is \"function\".""" + function: "_models.FunctionDefinition" = rest_field() + """The function definition details for the function tool. Required.""" @overload def __init__( self, *, - content: str, - tool_call_id: str, + function: "_models.FunctionDefinition", ): ... @@ -650,34 +413,80 @@ def __init__(self, mapping: Mapping[str, Any]): """ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, role=ChatRole.TOOL, **kwargs) + super().__init__(*args, type="function", **kwargs) -class ChatRequestUserMessage(ChatRequestMessage, discriminator="user"): - """A request chat message representing user input to the assistant. +class ChatCompletionsNamedToolSelection(_model_base.Model): + """An abstract representation of an explicit, named tool selection to use for a chat completions + request. All required parameters must be populated in order to send to server. - :ivar role: The chat role associated with this message, which is always 'user' for user - messages. Required. The role that provides input for chat completions. - :vartype role: str or ~azure.ai.inference.models.USER - :ivar content: The contents of the user message, with available input types varying by selected - model. Required. - :vartype content: str + :ivar type: The object type. Required. + :vartype type: str """ - role: Literal[ChatRole.USER] = rest_discriminator(name="role") # type: ignore - """The chat role associated with this message, which is always 'user' for user messages. Required. - The role that provides input for chat completions.""" - content: str = rest_field() - """The contents of the user message, with available input types varying by selected model. + type: str = rest_discriminator(name="type") + """The object type. Required.""" + + +class ChatCompletionsUpdate(_model_base.Model): + """Represents a response update to a chat completions request, when the service is streaming + updates + using Server Sent Events (SSE). + Completions support a wide variety of tasks and generate text that continues from or + "completes" + provided prompt data. + + All required parameters must be populated in order to send to server. + + :ivar id: A unique identifier associated with this chat completions response. Required. + :vartype id: str + :ivar object: The response object type, which is always ``chat.completion``. Required. + :vartype object: str + :ivar created: The first timestamp associated with generation activity for this completions + response, + represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. + :vartype created: ~datetime.datetime + :ivar model: The model used for the chat completion. Required. + :vartype model: str + :ivar usage: Usage information for tokens processed and generated as part of this completions + operation. Required. + :vartype usage: ~azure.ai.inference.models.CompletionsUsage + :ivar choices: An update to the collection of completion choices associated with this + completions response. + Generally, ``n`` choices are generated per provided prompt with a default value of 1. + Token limits and other settings may limit the number of choices generated. Required. + :vartype choices: list[~azure.ai.inference.models.ChatChoiceUpdate] + """ + + id: str = rest_field() + """A unique identifier associated with this chat completions response. Required.""" + object: str = rest_field() + """The response object type, which is always ``chat.completion``. Required.""" + created: datetime.datetime = rest_field(format="unix-timestamp") + """The first timestamp associated with generation activity for this completions response, + represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required.""" + model: str = rest_field() + """The model used for the chat completion. Required.""" + usage: "_models.CompletionsUsage" = rest_field() + """Usage information for tokens processed and generated as part of this completions operation. Required.""" + choices: List["_models.ChatChoiceUpdate"] = rest_field() + """An update to the collection of completion choices associated with this completions response. + Generally, ``n`` choices are generated per provided prompt with a default value of 1. + Token limits and other settings may limit the number of choices generated. Required.""" @overload def __init__( self, *, - content: str, + id: str, # pylint: disable=redefined-builtin + object: str, + created: datetime.datetime, + model: str, + usage: "_models.CompletionsUsage", + choices: List["_models.ChatChoiceUpdate"], ): ... @@ -689,7 +498,7 @@ def __init__(self, mapping: Mapping[str, Any]): """ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, role=ChatRole.USER, **kwargs) + super().__init__(*args, **kwargs) class ChatResponseMessage(_model_base.Model): @@ -1158,3 +967,124 @@ def __init__(self, mapping: Mapping[str, Any]): def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation super().__init__(*args, **kwargs) + + +class SystemMessage(ChatRequestMessage, discriminator="system"): + """A request chat message containing system instructions that influence how the model will + generate a chat completions + response. + + All required parameters must be populated in order to send to server. + + :ivar role: The chat role associated with this message, which is always 'system' for system + messages. Required. The role that instructs or sets the behavior of the assistant. + :vartype role: str or ~azure.ai.inference.models.SYSTEM + :ivar content: The contents of the system message. Required. + :vartype content: str + """ + + role: Literal[ChatRole.SYSTEM] = rest_discriminator(name="role") # type: ignore + """The chat role associated with this message, which is always 'system' for system messages. + Required. The role that instructs or sets the behavior of the assistant.""" + content: str = rest_field() + """The contents of the system message. Required.""" + + @overload + def __init__( + self, + *, + content: str, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, role=ChatRole.SYSTEM, **kwargs) + + +class ToolMessage(ChatRequestMessage, discriminator="tool"): + """A request chat message representing requested output from a configured tool. + + All required parameters must be populated in order to send to server. + + :ivar role: The chat role associated with this message, which is always 'tool' for tool + messages. Required. The role that represents extension tool activity within a chat completions + operation. + :vartype role: str or ~azure.ai.inference.models.TOOL + :ivar content: The content of the message. Required. + :vartype content: str + :ivar tool_call_id: The ID of the tool call resolved by the provided content. Required. + :vartype tool_call_id: str + """ + + role: Literal[ChatRole.TOOL] = rest_discriminator(name="role") # type: ignore + """The chat role associated with this message, which is always 'tool' for tool messages. Required. + The role that represents extension tool activity within a chat completions operation.""" + content: str = rest_field() + """The content of the message. Required.""" + tool_call_id: str = rest_field() + """The ID of the tool call resolved by the provided content. Required.""" + + @overload + def __init__( + self, + *, + content: str, + tool_call_id: str, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, role=ChatRole.TOOL, **kwargs) + + +class UserMessage(ChatRequestMessage, discriminator="user"): + """A request chat message representing user input to the assistant. + + All required parameters must be populated in order to send to server. + + :ivar role: The chat role associated with this message, which is always 'user' for user + messages. Required. The role that provides input for chat completions. + :vartype role: str or ~azure.ai.inference.models.USER + :ivar content: The contents of the user message, with available input types varying by selected + model. Required. + :vartype content: str + """ + + role: Literal[ChatRole.USER] = rest_discriminator(name="role") # type: ignore + """The chat role associated with this message, which is always 'user' for user messages. Required. + The role that provides input for chat completions.""" + content: str = rest_field() + """The contents of the user message, with available input types varying by selected model. + Required.""" + + @overload + def __init__( + self, + *, + content: str, + ): + ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, role=ChatRole.USER, **kwargs) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index 493da12809fc..7861aee3eb50 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -17,45 +17,41 @@ from azure.core.rest import HttpResponse -class ChatCompletionsDeltaIterator: - """Represents an interator over ChatCompletionsDelta objects. It can be used for either synchronous or - asynchronous iterations. The class deserializes the Server Sent Events (SSE) response stream - into chat completions updates, each one represented by a ChatCompletionsDelta object. +class StreamingChatCompletions: + """Represents an interator over ChatCompletionsUpdate objects. It can be used for either synchronous or + asynchronous iterations. The class deserializes the Server Sent Events (SSE) response stream + into chat completions updates, each one represented by a ChatCompletionsUpdate object. """ # Enable console logs for debugging. For development only, will be removed before release. ENABLE_CLASS_LOGS = False # The prefix of each line in the SSE stream that contains a JSON string - # to deserialize into a ChatCompletionsDelta object + # to deserialize into a ChatCompletionsUpdate object SSE_DATA_EVENT_PREFIX = "data: " # The line indicating the end of the SSE stream SSE_DATA_EVENT_DONE = "data: [DONE]" - def __init__(self, bytes_iterator: Union[types.AsyncGeneratorType, types.GeneratorType]): self._bytes_iterator = bytes_iterator - self._is_async_iterator = isinstance(self._bytes_iterator, types.AsyncGeneratorType) + self._is_async_iterator = isinstance(self._bytes_iterator, types.AsyncGeneratorType) self._queue = queue.Queue() self._incomplete_json = "" self._done = False - def __aiter__(self): - if (not self._is_async_iterator): + if not self._is_async_iterator: raise ValueError("This method is only supported for async iterators") return self - def __iter__(self): - if (self._is_async_iterator): + if self._is_async_iterator: raise ValueError("This method is not supported for async iterators") return self - - async def __anext__(self) -> _models.ChatCompletionsDelta: - if (not self._is_async_iterator): + async def __anext__(self) -> _models.ChatCompletionsUpdate: + if not self._is_async_iterator: raise ValueError("This method is only supported for async iterators") if self._queue.empty(): await self._read_next_block_async() @@ -64,9 +60,8 @@ async def __anext__(self) -> _models.ChatCompletionsDelta: raise StopAsyncIteration return self._queue.get() - - def __next__(self) -> _models.ChatCompletionsDelta: - if (self._is_async_iterator): + def __next__(self) -> _models.ChatCompletionsUpdate: + if self._is_async_iterator: raise ValueError("This method is not supported for async iterators") if self._queue.empty(): self._read_next_block() @@ -75,7 +70,6 @@ def __next__(self) -> _models.ChatCompletionsDelta: raise StopIteration return self._queue.get() - async def _read_next_block_async(self): start_time = 0.0 if self.ENABLE_CLASS_LOGS: @@ -88,7 +82,6 @@ async def _read_next_block_async(self): return self._deserialize_and_add_to_queue(element, start_time) - def _read_next_block(self): start_time = 0.0 if self.ENABLE_CLASS_LOGS: @@ -101,14 +94,13 @@ def _read_next_block(self): return self._deserialize_and_add_to_queue(element, start_time) - def _deserialize_and_add_to_queue(self, element: bytes, start_time: float = 0.0): if self.ENABLE_CLASS_LOGS: print(f"Elapsed time: {int(1000*(time.time()- start_time))}ms") print(f"Size: {len(element)} bytes") - # Clear the queue of ChatCompletionsDelta before processing the next block + # Clear the queue of ChatCompletionsUpdate before processing the next block self._queue.queue.clear() # Convert `bytes` to string and split the string by newline, while keeping the new line char. @@ -141,33 +133,30 @@ def _deserialize_and_add_to_queue(self, element: bytes, start_time: float = 0.0) return # If you reached here, the line should contain `data: {...}\n` - # where the curly braces contain a valid JSON object. Deserialize it into a ChatCompletionsDelta object + # where the curly braces contain a valid JSON object. Deserialize it into a ChatCompletionsUpdate object # and add it to the queue. self._queue.put( - _models.ChatCompletionsDelta._deserialize(json.loads(element[len(self.SSE_DATA_EVENT_PREFIX) : -1]), []) + _models.ChatCompletionsUpdate._deserialize(json.loads(element[len(self.SSE_DATA_EVENT_PREFIX) : -1]), []) ) if self.ENABLE_CLASS_LOGS: print("[added]") - def __enter__(self): return self - def __exit__(self) -> None: self.close() - def close(self) -> None: self._bytes_iterator.close() async def aclose(self) -> None: - await self._bytes_iterator.aclose() + await self._bytes_iterator.aclose() __all__: List[str] = [ - "ChatCompletionsDeltaIterator" + "StreamingChatCompletions" ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py index 82909fceb465..eb3fa0d0d5fc 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py @@ -18,10 +18,11 @@ """ import asyncio + async def sample_chat_completions_async(): import os from azure.ai.inference.aio import ModelClient - from azure.ai.inference.models import ChatRequestSystemMessage, ChatRequestUserMessage + from azure.ai.inference.models import SystemMessage, UserMessage from azure.core.credentials import AzureKeyCredential # Read the values of your model endpoint and key from environment variables @@ -40,8 +41,8 @@ async def sample_chat_completions_async(): future = asyncio.ensure_future( client.get_chat_completions( messages=[ - ChatRequestSystemMessage(content="You are an AI assistant that helps people find information."), - ChatRequestUserMessage(content="How many feet are in a mile?"), + SystemMessage(content="You are an AI assistant that helps people find information."), + UserMessage(content="How many feet are in a mile?"), ] ) ) @@ -70,6 +71,7 @@ async def sample_chat_completions_async(): print(f"usage.completion_tokens: {result.usage.completion_tokens}") print(f"usage.total_tokens: {result.usage.total_tokens}") + async def main(): await sample_chat_completions_async() diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py index 4c4a4f551f2d..358bcce53751 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py @@ -18,6 +18,7 @@ """ import asyncio + async def sample_embeddings_async(): import os from azure.ai.inference.aio import ModelClient @@ -51,7 +52,9 @@ async def sample_embeddings_async(): print("Embeddings result:") for item in result.data: length = len(item.embedding) - print(f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, ..., {item.embedding[length-2]}, {item.embedding[length-1]}]") + print( + f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, ..., {item.embedding[length-2]}, {item.embedding[length-1]}]" + ) print(f"id: {result.id}") print(f"model: {result.model}") print(f"object: {result.object}") diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py index 91892c580c0a..bf2a251bd049 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py @@ -18,11 +18,12 @@ """ import asyncio + async def sample_image_generation_async(): import os from azure.ai.inference.aio import ModelClient from azure.core.credentials import AzureKeyCredential - + # Read the values of your model endpoint and key from environment variables try: endpoint = os.environ["IMAGE_GENERATION_ENDPOINT"] @@ -37,7 +38,7 @@ async def sample_image_generation_async(): # Generate an image from text prompt. This will be an asynchronously (non-blocking) call. future = asyncio.ensure_future( - client.get_image_generations(prompt="A painting of a beautiful sunset over a mountain lake.", size="1024x768") + client.generate_images(prompt="A painting of a beautiful sunset over a mountain lake.", size="1024x768") ) # Loop until the operation is done diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_streaming_chat_completions_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_streaming_chat_completions_async.py index 0e6d3d57f15f..d006b1a193dd 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_streaming_chat_completions_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_streaming_chat_completions_async.py @@ -20,9 +20,10 @@ import asyncio import os from azure.ai.inference.aio import ModelClient -from azure.ai.inference.models import ChatRequestSystemMessage, ChatRequestUserMessage, ChatCompletionsDelta +from azure.ai.inference.models import SystemMessage, UserMessage, ChatCompletionsUpdate from azure.core.credentials import AzureKeyCredential + async def sample_streaming_chat_completions_async(): # Read the values of your model endpoint and key from environment variables @@ -41,13 +42,13 @@ async def sample_streaming_chat_completions_async(): future = asyncio.ensure_future( client.get_streaming_chat_completions( messages=[ - ChatRequestSystemMessage(content="You are an AI assistant that helps people find information."), - ChatRequestUserMessage(content="Give me 5 good reasons why I should exercise every day."), + SystemMessage(content="You are an AI assistant that helps people find information."), + UserMessage(content="Give me 5 good reasons why I should exercise every day."), ] ) ) - # Loop until you get the HTTP response headers from the service + # Loop until you get the HTTP response headers from the service while not future.done(): await asyncio.sleep(0.1) print("Waiting...") @@ -67,17 +68,21 @@ async def sample_streaming_chat_completions_async(): await client.close() -def print_chat_completions_delta(element: ChatCompletionsDelta): - print(f"content: {repr(element.choices[0].delta.content)}, "\ - f"role: {element.choices[0].delta.role}, "\ - f"finish_reason: {element.choices[0].finish_reason}, "\ - f"index: {element.choices[0].index}") +def print_chat_completions_delta(element: ChatCompletionsUpdate): + print( + f"content: {repr(element.choices[0].delta.content)}, " + f"role: {element.choices[0].delta.role}, " + f"finish_reason: {element.choices[0].finish_reason}, " + f"index: {element.choices[0].index}" + ) print(f"id: {element.id}, created: {element.created}, model: {element.model}, object: {element.object}") if element.usage is not None: - print(f"usage: capacity_type: {element.usage.capacity_type}, "\ - f"prompt_tokens: {element.usage.prompt_tokens}, "\ - f"completion_tokens: {element.usage.completion_tokens}, "\ - f"usage.total_tokens: {element.usage.total_tokens}") + print( + f"usage: capacity_type: {element.usage.capacity_type}, " + f"prompt_tokens: {element.usage.prompt_tokens}, " + f"completion_tokens: {element.usage.completion_tokens}, " + f"usage.total_tokens: {element.usage.total_tokens}" + ) async def main(): diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index 6a35783cc9b2..ed1dc6b5e2da 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -17,10 +17,11 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ + def sample_chat_completions(): import os - from azure.ai.inference.models import ChatRequestSystemMessage, ChatRequestUserMessage - + from azure.ai.inference.models import SystemMessage, UserMessage + # Read the values of your model endpoint and key from environment variables try: endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] @@ -35,18 +36,15 @@ def sample_chat_completions(): from azure.core.credentials import AzureKeyCredential # Create Model Client for synchronous operations - client = ModelClient( - endpoint=endpoint, - credential=AzureKeyCredential(key) - ) + client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # [END create_client] # [START chat_completions] # Do a single chat completion operation. This will be a synchronously (blocking) call. result = client.get_chat_completions( messages=[ - ChatRequestSystemMessage(content="You are an AI assistant that helps people find information."), - ChatRequestUserMessage(content="How many feet are in a mile?"), + SystemMessage(content="You are an AI assistant that helps people find information."), + UserMessage(content="How many feet are in a mile?"), ], # Examples of setting extra parameters (TODO: move this to advanced sample) extras=dict(key1="value1", key2="value2"), diff --git a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py index 10a4295c6941..2048b87a5baa 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py +++ b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py @@ -43,7 +43,9 @@ def sample_embeddings(): print("Embeddings result:") for item in result.data: length = len(item.embedding) - print(f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, ..., {item.embedding[length-2]}, {item.embedding[length-1]}]") + print( + f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, ..., {item.embedding[length-2]}, {item.embedding[length-1]}]" + ) print(f"id: {result.id}") print(f"model: {result.model}") print(f"object: {result.object}") diff --git a/sdk/ai/azure-ai-inference/samples/sample_image_generation.py b/sdk/ai/azure-ai-inference/samples/sample_image_generation.py index cf4d3fd720aa..95dfa1ed5e92 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_image_generation.py +++ b/sdk/ai/azure-ai-inference/samples/sample_image_generation.py @@ -17,6 +17,7 @@ 2) IMAGE_GENERATION_KEY - Your model key (a 32-character string). Keep it secret. """ + def sample_image_generation(): import os from azure.ai.inference import ModelClient @@ -36,7 +37,7 @@ def sample_image_generation(): # [START image_generation] # Generate a single image from a text prompt. This will be a synchronously (blocking) call. - result = client.get_image_generations( + result = client.generate_images( prompt="A painting of a beautiful sunset over a mountain lake.", size="1024x768" ) diff --git a/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py index f2c4d0f970ef..382c5b75037f 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py @@ -20,10 +20,11 @@ import os from azure.ai.inference import ModelClient -from azure.ai.inference.models import ChatRequestSystemMessage, ChatRequestUserMessage, ChatCompletionsDelta +from azure.ai.inference.models import SystemMessage, UserMessage, ChatCompletionsUpdate from azure.core.credentials import AzureKeyCredential from azure.core.pipeline.transport import RequestsTransport + def sample_streaming_chat_completions(): # Read the values of your model endpoint and key from environment variables @@ -39,8 +40,8 @@ def sample_streaming_chat_completions(): client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) messages = [ - ChatRequestSystemMessage(content="You are an AI assistant that helps people find information."), - ChatRequestUserMessage(content="Give me 5 good reasons why I should exercise every day."), + SystemMessage(content="You are an AI assistant that helps people find information."), + UserMessage(content="Give me 5 good reasons why I should exercise every day."), ] # [START streaming_chat_completions] @@ -57,17 +58,22 @@ def sample_streaming_chat_completions(): # [END streaming_chat_completions] -def print_chat_completions_delta(element: ChatCompletionsDelta): - print(f"content: {repr(element.choices[0].delta.content)}, "\ - f"role: {element.choices[0].delta.role}, "\ - f"finish_reason: {element.choices[0].finish_reason}, "\ - f"index: {element.choices[0].index}") +def print_chat_completions_delta(element: ChatCompletionsUpdate): + print( + f"content: {repr(element.choices[0].delta.content)}, " + f"role: {element.choices[0].delta.role}, " + f"finish_reason: {element.choices[0].finish_reason}, " + f"index: {element.choices[0].index}" + ) print(f"id: {element.id}, created: {element.created}, model: {element.model}, object: {element.object}") if element.usage is not None: - print(f"usage: capacity_type: {element.usage.capacity_type}, "\ - f"prompt_tokens: {element.usage.prompt_tokens}, "\ - f"completion_tokens: {element.usage.completion_tokens}, "\ - f"usage.total_tokens: {element.usage.total_tokens}") + print( + f"usage: capacity_type: {element.usage.capacity_type}, " + f"prompt_tokens: {element.usage.prompt_tokens}, " + f"completion_tokens: {element.usage.completion_tokens}, " + f"usage.total_tokens: {element.usage.total_tokens}" + ) + if __name__ == "__main__": sample_streaming_chat_completions() diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py index eb47da202ccc..77095d982512 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -24,7 +24,7 @@ async def test_async_chat_completion(self, **kwargs): self._create_client_for_standard_test(sync=False, **kwargs) - messages = [sdk.models.ChatRequestUserMessage(content="How many feet are in a mile?")] + messages = [sdk.models.UserMessage(content="How many feet are in a mile?")] await self._do_async_chat_completions(messages=messages, **kwargs) diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py index 05fea3f8dd83..f857e19c9c57 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -25,7 +25,7 @@ def test_chat_completion(self, **kwargs): self._create_client_for_standard_test(sync=True, **kwargs) - messages = [sdk.models.ChatRequestUserMessage(content="How many feet are in a mile?")] + messages = [sdk.models.UserMessage(content="How many feet are in a mile?")] self._do_chat_completions(messages=messages, **kwargs) diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml index 460742949ee4..4d017a67d8ee 100644 --- a/sdk/ai/azure-ai-inference/tsp-location.yaml +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/ModelClient -commit: 178c216eabb49f96cf6eabbd8e34f3d026757208 +commit: b57d3b3f4b39b8eda9bcf212830cfbd813fd23e7 repo: Azure/azure-rest-api-specs additionalDirectories: From 8e274b64c0322fa900c5abe762c1f97746a0ade6 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 12 Apr 2024 14:34:32 -0700 Subject: [PATCH 029/112] New client names. Other minor model name changes --- sdk/ai/azure-ai-inference/README.md | 162 ++++++----- .../azure/ai/inference/__init__.py | 8 +- .../azure/ai/inference/_client.py | 189 ++++++++++++- .../azure/ai/inference/_configuration.py | 107 +++++++- .../ai/inference/_operations/__init__.py | 8 +- .../ai/inference/_operations/_operations.py | 254 +++++++++++++++--- .../azure/ai/inference/_patch.py | 22 +- .../azure/ai/inference/_vendor.py | 28 +- .../azure/ai/inference/aio/__init__.py | 8 +- .../azure/ai/inference/aio/_client.py | 193 ++++++++++++- .../azure/ai/inference/aio/_configuration.py | 107 +++++++- .../ai/inference/aio/_operations/__init__.py | 8 +- .../inference/aio/_operations/_operations.py | 212 ++++++++++++--- .../azure/ai/inference/aio/_patch.py | 17 +- .../azure/ai/inference/aio/_vendor.py | 28 +- .../azure/ai/inference/models/_enums.py | 4 +- .../azure/ai/inference/models/_patch.py | 4 +- sdk/ai/azure-ai-inference/samples/README.md | 36 +-- .../sample_chat_completions_async.py | 11 +- ...ample_chat_completions_streaming_async.py} | 38 +-- .../async_samples/sample_embeddings_async.py | 6 +- .../sample_image_generation_async.py | 6 +- .../samples/sample_chat_completions.py | 39 +-- .../sample_chat_completions_streaming.py | 53 ++++ .../samples/sample_embeddings.py | 28 +- .../samples/sample_image_generation.py | 31 +-- .../sample_streaming_chat_completions.py | 79 ------ sdk/ai/azure-ai-inference/setup.py | 4 +- .../tests/model_inference_test_base.py | 16 +- sdk/ai/azure-ai-inference/tsp-location.yaml | 2 +- 30 files changed, 1294 insertions(+), 414 deletions(-) rename sdk/ai/azure-ai-inference/samples/async_samples/{sample_streaming_chat_completions_async.py => sample_chat_completions_streaming_async.py} (67%) create mode 100644 sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py delete mode 100644 sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 5a29eddfc3d0..8d0f6b81f6c2 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -1,8 +1,8 @@ -# Azure model client library for Python +# Azure model inference client library for Python -The ModelClient Library allows you to do inference using AI models you deployed to Azure. It supports both serverless endpoints (aka "model as a service" (MaaS) or "pay as you go") and selfhosted endpoints (aka "model as a platform" (MaaP) or "real-time endpoints"). The ModelClient library makes services calls using REST AP version `2024-04-01-preview` specificed here (TODO: insert link). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). +The client Library allows you to do inference using AI models you deployed to Azure. It supports both serverless endpoints (aka "model as a service" (MaaS) or "pay as you go") and selfhosted endpoints (aka "model as a platform" (MaaP) or "real-time endpoints"). The client library makes services calls using REST AP version `2024-04-01-preview` specificed here (TODO: insert link). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). -Use the ModelClient library to: +Use the model inference client library to: * Authenticate against the service * Get information about the model @@ -24,37 +24,32 @@ Note that for inference using OpenAI models hosted on Azure you should be using * [Python 3.8](https://www.python.org/) or later installed, including [pip](https://pip.pypa.io/en/stable/). * An [Azure subscription](https://azure.microsoft.com/free). -* An [AI Model from the catalog](https://ai.azure.com/explore/models) deployed through Azure AI Studio. To construct the `ModelClient`, you will need to pass in the endpoint URL and key associated with your deployed AI model. +* An [AI Model from the catalog](https://ai.azure.com/explore/models) deployed through Azure AI Studio. To construct the client library, you will need to pass in the endpoint URL and key associated with your deployed AI model. * The endpoint URL has the form `https://your-deployment-name.your-azure-region.inference.ai.azure.com`, where `your-deployment-name` is your unique model deployment name and `your-azure-region` is the Azure region where the model is deployed (e.g. `eastus2`). * The key is a 32-character string. -### Install the Model Client package +### Install the package ```bash pip install azure-ai-inferencing ``` -### Create and authenticate the client +### Create and authenticate clients -Assuming `endpoint` and `key` are strings holding your endpoint URL and key, this Python code will create and authenticate a synchronous `ModelClient`: - - +The package includes three clients `ChatCompletionsClient`, `EmbeddingsClient` and `ImageGenerationClients`. They are all created in the similar manner. For example, assuming `endpoint` and `key` are strings holding your endpoint URL and key, this Python code will create and authenticate a synchronous `ChatCompletionsClient`: ```python -from azure.ai.inference import ModelClient +from azure.ai.inference import ChatCompletionsClient from azure.core.credentials import AzureKeyCredential -# Create Model Client for synchronous operations -client = ModelClient( +client = ChatCompletionsClient( endpoint=endpoint, credential=AzureKeyCredential(key) ) ``` - - A synchronous client supports synchronous inference methods, meaning they will block until the service responds with inference results. For simplicity the code snippets below all use synchronous methods. The client offers equivalent asynchronous methods which are more commonly used in production. To create an asynchronous client, Install the additional package [aiohttp](https://pypi.org/project/aiohttp/): @@ -63,11 +58,17 @@ To create an asynchronous client, Install the additional package [aiohttp](https pip install aiohttp ``` -and update the code above to import `ModelClient` from the `aio` namespace: +and update the code above to import `ChatCompletionsClient` from the `aio` namespace: ```python - import asyncio - from azure.ai.inference.aio import ModelClient +import asyncio +from azure.ai.inference.aio import ChatCompletionsClient +from azure.core.credentials import AzureKeyCredential + +client = ChatCompletionsClient( + endpoint=endpoint, + credential=AzureKeyCredential(key) +) ``` ## Key concepts @@ -95,10 +96,11 @@ Image generation operations target the URL route `/images/generations` on the pr The following sections provide code snippets covering these common scenarios: * [Chat completions](#chat-completions-example) +* [Streaming chat completions](#streaming-chat-completions-example) * [Embeddings](#embeddings-example) * [Image geneartion](#image-generation-example) -These snippets use the synchronous `client` from [Create and authenticate the client](#create-and-authenticate-the-client). +These snippets use the synchronous `client` from [Create and authenticate clients](#create-and-authenticate-clients). See the [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples) folder for fully working samples for synchronous and asynchronous clients. @@ -109,35 +111,57 @@ This example demonstrates how to generate a single chat completions. ```python -# Do a single chat completion operation. This will be a synchronously (blocking) call. -result = client.get_chat_completions( +from azure.ai.inference import ChatCompletionsClient +from azure.ai.inference.models import SystemMessage, UserMessage +from azure.core.credentials import AzureKeyCredential + +client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + +result = client.create( messages=[ - SystemMessage(content="You are an AI assistant that helps people find information."), + SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), - ], - # Examples of setting extra parameters (TODO: move this to advanced sample) - extras=dict(key1="value1", key2="value2"), + ] +) + +print(result.choices[0].message.content) +``` + + + +The printed result of course depends on the model. You may get something like this: `Hello! I'd be happy to help answer your question. There are 5,280 feet in a mile`. + +To generate completions for additional messages, simply call `client.create` multiple times using the same `client`. + +### Streaming chat completions example + +This example demonstrates how to generate a single chat completions with streaming response. + + + +```python +from azure.ai.inference import ChatCompletionsClient +from azure.ai.inference.models import SystemMessage, UserMessage +from azure.core.credentials import AzureKeyCredential + +client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + +result = client.create_streaming( + messages=[ + SystemMessage(content="You are a helpful assistant."), + UserMessage(content="Give me 5 good reasons why I should exercise every day."), + ] ) -# Print results the the console -print("Chat Completions:") -print(f"choices[0].message.content: {result.choices[0].message.content}") -print(f"choices[0].message.role: {result.choices[0].message.role}") -print(f"choices[0].finish_reason: {result.choices[0].finish_reason}") -print(f"choices[0].index: {result.choices[0].index}") -print(f"id: {result.id}") -print(f"created: {result.created}") -print(f"model: {result.model}") -print(f"object: {result.object}") -print(f"usage.capacity_type: {result.usage.capacity_type}") -print(f"usage.prompt_tokens: {result.usage.prompt_tokens}") -print(f"usage.completion_tokens: {result.usage.completion_tokens}") -print(f"usage.total_tokens: {result.usage.total_tokens}") +for update in result: + print(update.choices[0].delta.content, end="") ``` -To generate completions for additional messages, simply call `get_chat_completions` multiple times using the same `client`. +The printed result of course depends on the model, but you should see the answer progressively get longer as updates get streamed to the client. + +To generate completions for additional messages, simply call `client.create_streaming` multiple times using the same `client`. ### Embeddings example @@ -146,44 +170,48 @@ This example demonstrates how to get embeddings. ```python -# Do a single embeddings operation. This will be a synchronously (blocking) call. -result = client.get_embeddings(input=["first phrase", "second phrase", "third phrase"]) +from azure.ai.inference import EmbeddingsClient +from azure.core.credentials import AzureKeyCredential + +client = EmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + +result = client.create(input=["first phrase", "second phrase", "third phrase"]) -# Print results the the console -print("Embeddings result:") for item in result.data: length = len(item.embedding) - print(f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, ..., {item.embedding[length-2]}, {item.embedding[length-1]}]") -print(f"id: {result.id}") -print(f"model: {result.model}") -print(f"object: {result.object}") -print(f"usage.input_tokens: {result.usage.input_tokens}") -print(f"usage.prompt_tokens: {result.usage.prompt_tokens}") -print(f"usage.total_tokens: {result.usage.total_tokens}") + print( + f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " + f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" + ) ``` +The printed result of course depends on the model. You should see something like this: +```txt +data[0]: length=1024, [0.0013399124, -0.01576233, ..., 0.007843018, 0.000238657] +data[1]: length=1024, [0.036590576, -0.0059547424, ..., 0.011405945, 0.004863739] +data[2]: length=1024, [0.04196167, 0.029083252, ..., -0.0027484894, 0.0073127747] +``` + +To generate embeddings for additional phrases, simply call `client.create` multiple times using the same `client`. + ### Image generation example -This example demonstrates how to generate and image from a text prompt +This example demonstrates how to generate an image of size 1024x768 from a text prompt, and save the resulting image to a `image.png` file. ```python -# Generate a single image from a text prompt. This will be a synchronously (blocking) call. -result = client.generate_images( - prompt="A painting of a beautiful sunset over a mountain lake.", size="1024x768" -) +from azure.ai.inference import ImageGenerationClient +from azure.core.credentials import AzureKeyCredential + +client = ImageGenerationClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + +result = client.create(prompt="A painting of a beautiful sunset over a mountain lake.", size="1024x768") -# Save generated image to file and print other results the the console -print("Image generation result:") -for index, item in enumerate(result.data): - with open(f"image_{index}.png", "wb") as image: - image.write(item.b64_json.decode("base64")) -print(f"id: {result.id}") -print(f"model: {result.model}") -print(f"created: {result.created}") +with open(f"image.png", "wb") as image: + image.write(result.data[0].b64_json.decode("base64")) ``` @@ -192,7 +220,7 @@ print(f"created: {result.created}") ### Exceptions -The `get_chat_completions`, `get_embeddings` and `get_image_geneartions` methods raise an [HttpResponseError](https://learn.microsoft.com/python/api/azure-core/azure.core.exceptions.httpresponseerror) exception for a non-success HTTP status code response from the service. The exception's `status_code` will be the HTTP response status code. The exception's `error.message` contains a detailed message that will allow you to diagnose the issue: +The `create` and `get_model_info` methods on the clients raise an [HttpResponseError](https://learn.microsoft.com/python/api/azure-core/azure.core.exceptions.httpresponseerror) exception for a non-success HTTP status code response from the service. The exception's `status_code` will be the HTTP response status code. The exception's `error.message` contains a detailed message that will allow you to diagnose the issue: ```python from azure.core.exceptions import HttpResponseError @@ -200,7 +228,7 @@ from azure.core.exceptions import HttpResponseError ... try: - result = client.get_chat_completions( ... ) + result = client.create( ... ) except HttpResponseError as e: print(f"Status code: {e.status_code} ({e.reason})") print(f"{e}") @@ -248,11 +276,11 @@ formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s") handler.setFormatter(formatter) ``` -By default logs redact the values of URL query strings, the values of some HTTP request and response headers (including `Authorization` which holds the key), and the request and response payloads. To create logs without redaction, set the method argument `logging_enable = True` when you construct `ModelClient`, or when you call any of the client's operation methods (e.g. `get_chat_completions`). +By default logs redact the values of URL query strings, the values of some HTTP request and response headers (including `Authorization` which holds the key), and the request and response payloads. To create logs without redaction, set the method argument `logging_enable = True` when you construct the client library, or when you call any of the client's `create` methods. ```python # Create a Model Client with none redacted log -client = ModelClient( +client = ChatCompletionsClient( endpoint=endpoint, credential=AzureKeyCredential(key), logging_enable=True diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py index c92dce37ed18..52fb809c15f3 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py @@ -6,7 +6,9 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from ._client import ModelClient +from ._client import ChatCompletionsClient +from ._client import EmbeddingsClient +from ._client import ImageGenerationClient from ._version import VERSION __version__ = VERSION @@ -19,7 +21,9 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ - "ModelClient", + "ChatCompletionsClient", + "EmbeddingsClient", + "ImageGenerationClient", ] __all__.extend([p for p in _patch_all if p not in __all__]) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_client.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_client.py index c0c4a2c537aa..7eb652f09aa9 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_client.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_client.py @@ -14,26 +14,35 @@ from azure.core.pipeline import policies from azure.core.rest import HttpRequest, HttpResponse -from ._configuration import ModelClientConfiguration -from ._operations import ModelClientOperationsMixin +from ._configuration import ( + ChatCompletionsClientConfiguration, + EmbeddingsClientConfiguration, + ImageGenerationClientConfiguration, +) +from ._operations import ( + ChatCompletionsClientOperationsMixin, + EmbeddingsClientOperationsMixin, + ImageGenerationClientOperationsMixin, +) from ._serialization import Deserializer, Serializer -class ModelClient(ModelClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword - """ModelClient. +class ChatCompletionsClient(ChatCompletionsClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword + """ChatCompletionsClient. - :param credential: Credential needed for the client to connect to Azure. Required. + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials.AzureKeyCredential - :keyword endpoint: Service host. Required. - :paramtype endpoint: str :keyword api_version: The API version to use for this operation. Default value is "2024-04-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ - def __init__(self, credential: AzureKeyCredential, *, endpoint: str, **kwargs: Any) -> None: - self._config = ModelClientConfiguration(credential=credential, **kwargs) + def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: + _endpoint = "{endpoint}" + self._config = ChatCompletionsClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) _policies = kwargs.pop("policies", None) if _policies is None: _policies = [ @@ -51,7 +60,7 @@ def __init__(self, credential: AzureKeyCredential, *, endpoint: str, **kwargs: A policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, self._config.http_logging_policy, ] - self._client: PipelineClient = PipelineClient(base_url=endpoint, policies=_policies, **kwargs) + self._client: PipelineClient = PipelineClient(base_url=_endpoint, policies=_policies, **kwargs) self._serialize = Serializer() self._deserialize = Deserializer() @@ -76,13 +85,169 @@ def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: """ request_copy = deepcopy(request) - request_copy.url = self._client.format_url(request_copy.url) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> "ChatCompletionsClient": + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) + + +class EmbeddingsClient(EmbeddingsClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword + """EmbeddingsClient. + + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-04-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: + _endpoint = "{endpoint}" + self._config = EmbeddingsClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: PipelineClient = PipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> "EmbeddingsClient": + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) + + +class ImageGenerationClient(ImageGenerationClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword + """ImageGenerationClient. + + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-04-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: + _endpoint = "{endpoint}" + self._config = ImageGenerationClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: PipelineClient = PipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore def close(self) -> None: self._client.close() - def __enter__(self) -> "ModelClient": + def __enter__(self) -> "ImageGenerationClient": self._client.__enter__() return self diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py index 7527fe7e3c5a..4d27040de96d 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py @@ -14,13 +14,15 @@ from ._version import VERSION -class ModelClientConfiguration: # pylint: disable=too-many-instance-attributes - """Configuration for ModelClient. +class ChatCompletionsClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long + """Configuration for ChatCompletionsClient. Note that all parameters used to create this instance are saved as instance attributes. - :param credential: Credential needed for the client to connect to Azure. Required. + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials.AzureKeyCredential :keyword api_version: The API version to use for this operation. Default value is "2024-04-01-preview". Note that overriding this default value may result in unsupported @@ -28,12 +30,109 @@ class ModelClientConfiguration: # pylint: disable=too-many-instance-attributes :paramtype api_version: str """ - def __init__(self, credential: AzureKeyCredential, **kwargs: Any) -> None: + def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: api_version: str = kwargs.pop("api_version", "2024-04-01-preview") + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") if credential is None: raise ValueError("Parameter 'credential' must not be None.") + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.AzureKeyCredentialPolicy( + self.credential, "Authorization", prefix="Bearer", **kwargs + ) + + +class EmbeddingsClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long + """Configuration for EmbeddingsClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-04-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: + api_version: str = kwargs.pop("api_version", "2024-04-01-preview") + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.AzureKeyCredentialPolicy( + self.credential, "Authorization", prefix="Bearer", **kwargs + ) + + +class ImageGenerationClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long + """Configuration for ImageGenerationClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-04-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: + api_version: str = kwargs.pop("api_version", "2024-04-01-preview") + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.endpoint = endpoint self.credential = credential self.api_version = api_version kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/__init__.py index 886bf4218356..b5b194f3cca4 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/__init__.py @@ -6,14 +6,18 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from ._operations import ModelClientOperationsMixin +from ._operations import ChatCompletionsClientOperationsMixin +from ._operations import EmbeddingsClientOperationsMixin +from ._operations import ImageGenerationClientOperationsMixin from ._patch import __all__ as _patch_all from ._patch import * # pylint: disable=unused-wildcard-import from ._patch import patch_sdk as _patch_sdk __all__ = [ - "ModelClientOperationsMixin", + "ChatCompletionsClientOperationsMixin", + "EmbeddingsClientOperationsMixin", + "ImageGenerationClientOperationsMixin", ] __all__.extend([p for p in _patch_all if p not in __all__]) _patch_sdk() diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index 5b611462a865..8a024d948a26 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -9,7 +9,7 @@ from io import IOBase import json import sys -from typing import Any, Callable, Dict, IO, List, Optional, TypeVar, Union, overload +from typing import Any, Callable, Dict, IO, List, Optional, Type, TypeVar, Union, overload from azure.core.exceptions import ( ClientAuthenticationError, @@ -27,7 +27,7 @@ from .. import models as _models from .._model_base import SdkJSONEncoder, _deserialize from .._serialization import Serializer -from .._vendor import ModelClientMixinABC +from .._vendor import ChatCompletionsClientMixinABC, EmbeddingsClientMixinABC, ImageGenerationClientMixinABC if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -42,7 +42,7 @@ _SERIALIZER.client_side_validation = False -def build_model_get_chat_completions_request(*, model_deployment: Optional[str] = None, **kwargs: Any) -> HttpRequest: +def build_chat_completions_create_request(*, model_deployment: Optional[str] = None, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -59,14 +59,33 @@ def build_model_get_chat_completions_request(*, model_deployment: Optional[str] # Construct headers if model_deployment is not None: _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployment", model_deployment, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_model_get_embeddings_request(*, model_deployment: Optional[str] = None, **kwargs: Any) -> HttpRequest: +def build_chat_completions_get_model_info_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-04-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/info" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_embeddings_create_request(*, model_deployment: Optional[str] = None, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -83,14 +102,33 @@ def build_model_get_embeddings_request(*, model_deployment: Optional[str] = None # Construct headers if model_deployment is not None: _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployment", model_deployment, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_model_generate_images_request(*, model_deployment: Optional[str] = None, **kwargs: Any) -> HttpRequest: +def build_embeddings_get_model_info_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-04-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/info" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_image_generation_create_request(*, model_deployment: Optional[str] = None, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -107,14 +145,14 @@ def build_model_generate_images_request(*, model_deployment: Optional[str] = Non # Construct headers if model_deployment is not None: _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployment", model_deployment, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_model_get_model_info_request(**kwargs: Any) -> HttpRequest: +def build_image_generation_get_model_info_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -133,9 +171,9 @@ def build_model_get_model_info_request(**kwargs: Any) -> HttpRequest: return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -class ModelClientOperationsMixin(ModelClientMixinABC): +class ChatCompletionsClientOperationsMixin(ChatCompletionsClientMixinABC): @overload - def get_chat_completions( + def create( self, body: JSON, *, @@ -263,7 +301,7 @@ def get_chat_completions( """ @overload - def get_chat_completions( + def create( self, *, messages: List[_models.ChatRequestMessage], @@ -413,7 +451,7 @@ def get_chat_completions( """ @overload - def get_chat_completions( + def create( self, body: IO[bytes], *, @@ -488,7 +526,7 @@ def get_chat_completions( """ @distributed_trace - def get_chat_completions( + def create( self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -688,7 +726,7 @@ def get_chat_completions( } } """ - error_map = { + error_map: MutableMapping[int, Type[HttpResponseError]] = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -727,7 +765,7 @@ def get_chat_completions( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_model_get_chat_completions_request( + _request = build_chat_completions_create_request( model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, @@ -735,7 +773,10 @@ def get_chat_completions( headers=_headers, params=_params, ) - _request.url = self._client.format_url(_request.url) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access @@ -760,8 +801,76 @@ def get_chat_completions( return deserialized # type: ignore + @distributed_trace + def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: + # pylint: disable=line-too-long + """Returns information about the AI model. + + :return: ModelInformation. The ModelInformation is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ModelInformation + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "model_name": "str", # The name of the AI model. Required. + "model_provider": "str", # The model provider. Required. + "model_type": "str" # The type of the AI model. Required. Known values are: + "embeddings", "custom", "chat", "text_generation", and "image_generation". + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ModelInformation] = kwargs.pop("cls", None) + + _request = build_chat_completions_get_model_info_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ModelInformation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class EmbeddingsClientOperationsMixin(EmbeddingsClientMixinABC): @overload - def get_embeddings( + def create( self, body: JSON, *, @@ -842,7 +951,7 @@ def get_embeddings( """ @overload - def get_embeddings( + def create( self, *, input: List[str], @@ -918,7 +1027,7 @@ def get_embeddings( """ @overload - def get_embeddings( + def create( self, body: IO[bytes], *, @@ -982,7 +1091,7 @@ def get_embeddings( """ @distributed_trace - def get_embeddings( + def create( self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -1072,7 +1181,7 @@ def get_embeddings( } } """ - error_map = { + error_map: MutableMapping[int, Type[HttpResponseError]] = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1098,7 +1207,7 @@ def get_embeddings( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_model_get_embeddings_request( + _request = build_embeddings_create_request( model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, @@ -1106,7 +1215,10 @@ def get_embeddings( headers=_headers, params=_params, ) - _request.url = self._client.format_url(_request.url) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access @@ -1131,8 +1243,76 @@ def get_embeddings( return deserialized # type: ignore + @distributed_trace + def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: + # pylint: disable=line-too-long + """Returns information about the AI model. + + :return: ModelInformation. The ModelInformation is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ModelInformation + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "model_name": "str", # The name of the AI model. Required. + "model_provider": "str", # The model provider. Required. + "model_type": "str" # The type of the AI model. Required. Known values are: + "embeddings", "custom", "chat", "text_generation", and "image_generation". + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ModelInformation] = kwargs.pop("cls", None) + + _request = build_embeddings_get_model_info_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ModelInformation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class ImageGenerationClientOperationsMixin(ImageGenerationClientMixinABC): @overload - def generate_images( + def create( self, body: JSON, *, @@ -1202,7 +1382,7 @@ def generate_images( """ @overload - def generate_images( + def create( self, *, prompt: str, @@ -1277,7 +1457,7 @@ def generate_images( """ @overload - def generate_images( + def create( self, body: IO[bytes], *, @@ -1325,7 +1505,7 @@ def generate_images( """ @distributed_trace - def generate_images( + def create( self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -1419,7 +1599,7 @@ def generate_images( "model": "str" # The model used for the image generation. Required. } """ - error_map = { + error_map: MutableMapping[int, Type[HttpResponseError]] = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1454,7 +1634,7 @@ def generate_images( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_model_generate_images_request( + _request = build_image_generation_create_request( model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, @@ -1462,7 +1642,10 @@ def generate_images( headers=_headers, params=_params, ) - _request.url = self._client.format_url(_request.url) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access @@ -1507,7 +1690,7 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: "embeddings", "custom", "chat", "text_generation", and "image_generation". } """ - error_map = { + error_map: MutableMapping[int, Type[HttpResponseError]] = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1520,12 +1703,15 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: cls: ClsType[_models.ModelInformation] = kwargs.pop("cls", None) - _request = build_model_get_model_info_request( + _request = build_image_generation_get_model_info_request( api_version=self._config.api_version, headers=_headers, params=_params, ) - _request.url = self._client.format_url(_request.url) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index d648275dbd12..85051a26bd9a 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -18,9 +18,9 @@ from . import models as _models from ._model_base import SdkJSONEncoder, _deserialize from ._serialization import Serializer -from ._vendor import ModelClientMixinABC -from ._operations._operations import build_model_get_chat_completions_request -from ._client import ModelClient as ModelClientGenerated +from ._vendor import ChatCompletionsClientMixinABC +from ._operations._operations import build_chat_completions_create_request +from ._client import ChatCompletionsClient as ChatCompletionsClientGenerated from azure.core.exceptions import ( ClientAuthenticationError, @@ -44,9 +44,9 @@ _SERIALIZER.client_side_validation = False -class ModelClient(ModelClientGenerated): +class ChatCompletionsClient(ChatCompletionsClientGenerated): @distributed_trace - def get_streaming_chat_completions( + def create_streaming( self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -108,7 +108,7 @@ def get_streaming_chat_completions( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_model_get_chat_completions_request( + _request = build_chat_completions_create_request( model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, @@ -116,8 +116,10 @@ def get_streaming_chat_completions( headers=_headers, params=_params, ) - - _request.url = self._client.format_url(_request.url) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) kwargs.pop("stream", True) # Remove stream from kwargs (ignore value set by the application) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access @@ -134,7 +136,9 @@ def get_streaming_chat_completions( return _models.StreamingChatCompletions(response.iter_bytes()) -__all__: List[str] = ["ModelClient"] # Add all objects you want publicly available to users at this package level +__all__: List[str] = [ + "ChatCompletionsClient" +] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_vendor.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_vendor.py index 554de774a90f..9e95c0a2f86b 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_vendor.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_vendor.py @@ -8,7 +8,11 @@ from abc import ABC from typing import TYPE_CHECKING -from ._configuration import ModelClientConfiguration +from ._configuration import ( + ChatCompletionsClientConfiguration, + EmbeddingsClientConfiguration, + ImageGenerationClientConfiguration, +) if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports @@ -17,10 +21,28 @@ from ._serialization import Deserializer, Serializer -class ModelClientMixinABC(ABC): +class ChatCompletionsClientMixinABC(ABC): """DO NOT use this class. It is for internal typing use only.""" _client: "PipelineClient" - _config: ModelClientConfiguration + _config: ChatCompletionsClientConfiguration + _serialize: "Serializer" + _deserialize: "Deserializer" + + +class EmbeddingsClientMixinABC(ABC): + """DO NOT use this class. It is for internal typing use only.""" + + _client: "PipelineClient" + _config: EmbeddingsClientConfiguration + _serialize: "Serializer" + _deserialize: "Deserializer" + + +class ImageGenerationClientMixinABC(ABC): + """DO NOT use this class. It is for internal typing use only.""" + + _client: "PipelineClient" + _config: ImageGenerationClientConfiguration _serialize: "Serializer" _deserialize: "Deserializer" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py index 2f40b73be25b..34ce566598a6 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py @@ -6,7 +6,9 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from ._client import ModelClient +from ._client import ChatCompletionsClient +from ._client import EmbeddingsClient +from ._client import ImageGenerationClient try: from ._patch import __all__ as _patch_all @@ -16,7 +18,9 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ - "ModelClient", + "ChatCompletionsClient", + "EmbeddingsClient", + "ImageGenerationClient", ] __all__.extend([p for p in _patch_all if p not in __all__]) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py index 4551c8d4a36c..a3ff8c0ee133 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py @@ -15,25 +15,34 @@ from azure.core.rest import AsyncHttpResponse, HttpRequest from .._serialization import Deserializer, Serializer -from ._configuration import ModelClientConfiguration -from ._operations import ModelClientOperationsMixin +from ._configuration import ( + ChatCompletionsClientConfiguration, + EmbeddingsClientConfiguration, + ImageGenerationClientConfiguration, +) +from ._operations import ( + ChatCompletionsClientOperationsMixin, + EmbeddingsClientOperationsMixin, + ImageGenerationClientOperationsMixin, +) -class ModelClient(ModelClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword - """ModelClient. +class ChatCompletionsClient(ChatCompletionsClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword + """ChatCompletionsClient. - :param credential: Credential needed for the client to connect to Azure. Required. + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials.AzureKeyCredential - :keyword endpoint: Service host. Required. - :paramtype endpoint: str :keyword api_version: The API version to use for this operation. Default value is "2024-04-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ - def __init__(self, credential: AzureKeyCredential, *, endpoint: str, **kwargs: Any) -> None: - self._config = ModelClientConfiguration(credential=credential, **kwargs) + def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: + _endpoint = "{endpoint}" + self._config = ChatCompletionsClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) _policies = kwargs.pop("policies", None) if _policies is None: _policies = [ @@ -51,7 +60,7 @@ def __init__(self, credential: AzureKeyCredential, *, endpoint: str, **kwargs: A policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, self._config.http_logging_policy, ] - self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=endpoint, policies=_policies, **kwargs) + self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint, policies=_policies, **kwargs) self._serialize = Serializer() self._deserialize = Deserializer() @@ -78,13 +87,173 @@ def send_request( """ request_copy = deepcopy(request) - request_copy.url = self._client.format_url(request_copy.url) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> "ChatCompletionsClient": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) + + +class EmbeddingsClient(EmbeddingsClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword + """EmbeddingsClient. + + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-04-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: + _endpoint = "{endpoint}" + self._config = EmbeddingsClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request( + self, request: HttpRequest, *, stream: bool = False, **kwargs: Any + ) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> "EmbeddingsClient": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) + + +class ImageGenerationClient(ImageGenerationClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword + """ImageGenerationClient. + + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-04-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: + _endpoint = "{endpoint}" + self._config = ImageGenerationClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request( + self, request: HttpRequest, *, stream: bool = False, **kwargs: Any + ) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore async def close(self) -> None: await self._client.close() - async def __aenter__(self) -> "ModelClient": + async def __aenter__(self) -> "ImageGenerationClient": await self._client.__aenter__() return self diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py index 8fbd844c2808..89309fa151e8 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py @@ -14,13 +14,15 @@ from .._version import VERSION -class ModelClientConfiguration: # pylint: disable=too-many-instance-attributes - """Configuration for ModelClient. +class ChatCompletionsClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long + """Configuration for ChatCompletionsClient. Note that all parameters used to create this instance are saved as instance attributes. - :param credential: Credential needed for the client to connect to Azure. Required. + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials.AzureKeyCredential :keyword api_version: The API version to use for this operation. Default value is "2024-04-01-preview". Note that overriding this default value may result in unsupported @@ -28,12 +30,109 @@ class ModelClientConfiguration: # pylint: disable=too-many-instance-attributes :paramtype api_version: str """ - def __init__(self, credential: AzureKeyCredential, **kwargs: Any) -> None: + def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: api_version: str = kwargs.pop("api_version", "2024-04-01-preview") + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") if credential is None: raise ValueError("Parameter 'credential' must not be None.") + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.AzureKeyCredentialPolicy( + self.credential, "Authorization", prefix="Bearer", **kwargs + ) + + +class EmbeddingsClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long + """Configuration for EmbeddingsClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-04-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: + api_version: str = kwargs.pop("api_version", "2024-04-01-preview") + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.AzureKeyCredentialPolicy( + self.credential, "Authorization", prefix="Bearer", **kwargs + ) + + +class ImageGenerationClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long + """Configuration for ImageGenerationClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-04-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: + api_version: str = kwargs.pop("api_version", "2024-04-01-preview") + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.endpoint = endpoint self.credential = credential self.api_version = api_version kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/__init__.py index 886bf4218356..b5b194f3cca4 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/__init__.py @@ -6,14 +6,18 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from ._operations import ModelClientOperationsMixin +from ._operations import ChatCompletionsClientOperationsMixin +from ._operations import EmbeddingsClientOperationsMixin +from ._operations import ImageGenerationClientOperationsMixin from ._patch import __all__ as _patch_all from ._patch import * # pylint: disable=unused-wildcard-import from ._patch import patch_sdk as _patch_sdk __all__ = [ - "ModelClientOperationsMixin", + "ChatCompletionsClientOperationsMixin", + "EmbeddingsClientOperationsMixin", + "ImageGenerationClientOperationsMixin", ] __all__.extend([p for p in _patch_all if p not in __all__]) _patch_sdk() diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index 7b9fa63bf841..de1f4429877c 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -9,7 +9,7 @@ from io import IOBase import json import sys -from typing import Any, Callable, Dict, IO, List, Optional, TypeVar, Union, overload +from typing import Any, Callable, Dict, IO, List, Optional, Type, TypeVar, Union, overload from azure.core.exceptions import ( ClientAuthenticationError, @@ -27,12 +27,14 @@ from ... import models as _models from ..._model_base import SdkJSONEncoder, _deserialize from ..._operations._operations import ( - build_model_generate_images_request, - build_model_get_chat_completions_request, - build_model_get_embeddings_request, - build_model_get_model_info_request, + build_chat_completions_create_request, + build_chat_completions_get_model_info_request, + build_embeddings_create_request, + build_embeddings_get_model_info_request, + build_image_generation_create_request, + build_image_generation_get_model_info_request, ) -from .._vendor import ModelClientMixinABC +from .._vendor import ChatCompletionsClientMixinABC, EmbeddingsClientMixinABC, ImageGenerationClientMixinABC if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -44,9 +46,9 @@ ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] -class ModelClientOperationsMixin(ModelClientMixinABC): +class ChatCompletionsClientOperationsMixin(ChatCompletionsClientMixinABC): @overload - async def get_chat_completions( + async def create( self, body: JSON, *, @@ -174,7 +176,7 @@ async def get_chat_completions( """ @overload - async def get_chat_completions( + async def create( self, *, messages: List[_models.ChatRequestMessage], @@ -324,7 +326,7 @@ async def get_chat_completions( """ @overload - async def get_chat_completions( + async def create( self, body: IO[bytes], *, @@ -399,7 +401,7 @@ async def get_chat_completions( """ @distributed_trace_async - async def get_chat_completions( + async def create( self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -599,7 +601,7 @@ async def get_chat_completions( } } """ - error_map = { + error_map: MutableMapping[int, Type[HttpResponseError]] = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -638,7 +640,7 @@ async def get_chat_completions( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_model_get_chat_completions_request( + _request = build_chat_completions_create_request( model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, @@ -646,7 +648,10 @@ async def get_chat_completions( headers=_headers, params=_params, ) - _request.url = self._client.format_url(_request.url) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access @@ -671,8 +676,76 @@ async def get_chat_completions( return deserialized # type: ignore + @distributed_trace_async + async def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: + # pylint: disable=line-too-long + """Returns information about the AI model. + + :return: ModelInformation. The ModelInformation is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ModelInformation + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "model_name": "str", # The name of the AI model. Required. + "model_provider": "str", # The model provider. Required. + "model_type": "str" # The type of the AI model. Required. Known values are: + "embeddings", "custom", "chat", "text_generation", and "image_generation". + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ModelInformation] = kwargs.pop("cls", None) + + _request = build_chat_completions_get_model_info_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ModelInformation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class EmbeddingsClientOperationsMixin(EmbeddingsClientMixinABC): @overload - async def get_embeddings( + async def create( self, body: JSON, *, @@ -753,7 +826,7 @@ async def get_embeddings( """ @overload - async def get_embeddings( + async def create( self, *, input: List[str], @@ -829,7 +902,7 @@ async def get_embeddings( """ @overload - async def get_embeddings( + async def create( self, body: IO[bytes], *, @@ -893,7 +966,7 @@ async def get_embeddings( """ @distributed_trace_async - async def get_embeddings( + async def create( self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -983,7 +1056,7 @@ async def get_embeddings( } } """ - error_map = { + error_map: MutableMapping[int, Type[HttpResponseError]] = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1009,7 +1082,7 @@ async def get_embeddings( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_model_get_embeddings_request( + _request = build_embeddings_create_request( model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, @@ -1017,7 +1090,10 @@ async def get_embeddings( headers=_headers, params=_params, ) - _request.url = self._client.format_url(_request.url) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access @@ -1042,8 +1118,76 @@ async def get_embeddings( return deserialized # type: ignore + @distributed_trace_async + async def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: + # pylint: disable=line-too-long + """Returns information about the AI model. + + :return: ModelInformation. The ModelInformation is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ModelInformation + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "model_name": "str", # The name of the AI model. Required. + "model_provider": "str", # The model provider. Required. + "model_type": "str" # The type of the AI model. Required. Known values are: + "embeddings", "custom", "chat", "text_generation", and "image_generation". + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ModelInformation] = kwargs.pop("cls", None) + + _request = build_embeddings_get_model_info_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ModelInformation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class ImageGenerationClientOperationsMixin(ImageGenerationClientMixinABC): @overload - async def generate_images( + async def create( self, body: JSON, *, @@ -1113,7 +1257,7 @@ async def generate_images( """ @overload - async def generate_images( + async def create( self, *, prompt: str, @@ -1188,7 +1332,7 @@ async def generate_images( """ @overload - async def generate_images( + async def create( self, body: IO[bytes], *, @@ -1236,7 +1380,7 @@ async def generate_images( """ @distributed_trace_async - async def generate_images( + async def create( self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -1330,7 +1474,7 @@ async def generate_images( "model": "str" # The model used for the image generation. Required. } """ - error_map = { + error_map: MutableMapping[int, Type[HttpResponseError]] = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1365,7 +1509,7 @@ async def generate_images( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_model_generate_images_request( + _request = build_image_generation_create_request( model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, @@ -1373,7 +1517,10 @@ async def generate_images( headers=_headers, params=_params, ) - _request.url = self._client.format_url(_request.url) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access @@ -1418,7 +1565,7 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: "embeddings", "custom", "chat", "text_generation", and "image_generation". } """ - error_map = { + error_map: MutableMapping[int, Type[HttpResponseError]] = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1431,12 +1578,15 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: cls: ClsType[_models.ModelInformation] = kwargs.pop("cls", None) - _request = build_model_get_model_info_request( + _request = build_image_generation_get_model_info_request( api_version=self._config.api_version, headers=_headers, params=_params, ) - _request.url = self._client.format_url(_request.url) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index 05e0cf6ee953..ee18cb8ae5e9 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -11,7 +11,7 @@ import sys from typing import List from .. import models as _models -from ._client import ModelClient as ModelClientGenerated +from ._client import ChatCompletionsClient as ChatCompletionsClientGenerated from typing import Callable, Any, Union, IO, Optional, Dict, TypeVar from azure.core.utils import case_insensitive_dict from azure.core.pipeline import PipelineResponse @@ -26,7 +26,7 @@ ResourceNotModifiedError, map_error, ) -from .._operations._operations import build_model_get_chat_completions_request +from .._operations._operations import build_chat_completions_create_request if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -38,7 +38,7 @@ ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] -class ModelClient(ModelClientGenerated): +class ChatCompletionsClient(ChatCompletionsClientGenerated): @distributed_trace_async async def get_streaming_chat_completions( self, @@ -102,7 +102,7 @@ async def get_streaming_chat_completions( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_model_get_chat_completions_request( + _request = build_chat_completions_create_request( model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, @@ -110,7 +110,10 @@ async def get_streaming_chat_completions( headers=_headers, params=_params, ) - _request.url = self._client.format_url(_request.url) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) kwargs.pop("stream", True) # Remove stream from kwargs (ignore value set by the application) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access @@ -127,7 +130,9 @@ async def get_streaming_chat_completions( return _models.StreamingChatCompletions(response.iter_bytes()) -__all__: List[str] = ["ModelClient"] # Add all objects you want publicly available to users at this package level +__all__: List[str] = [ + "ChatCompletionsClient" +] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_vendor.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_vendor.py index 97e248d5f6fb..1e0074ce0da0 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_vendor.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_vendor.py @@ -8,7 +8,11 @@ from abc import ABC from typing import TYPE_CHECKING -from ._configuration import ModelClientConfiguration +from ._configuration import ( + ChatCompletionsClientConfiguration, + EmbeddingsClientConfiguration, + ImageGenerationClientConfiguration, +) if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports @@ -17,10 +21,28 @@ from .._serialization import Deserializer, Serializer -class ModelClientMixinABC(ABC): +class ChatCompletionsClientMixinABC(ABC): """DO NOT use this class. It is for internal typing use only.""" _client: "AsyncPipelineClient" - _config: ModelClientConfiguration + _config: ChatCompletionsClientConfiguration + _serialize: "Serializer" + _deserialize: "Deserializer" + + +class EmbeddingsClientMixinABC(ABC): + """DO NOT use this class. It is for internal typing use only.""" + + _client: "AsyncPipelineClient" + _config: EmbeddingsClientConfiguration + _serialize: "Serializer" + _deserialize: "Deserializer" + + +class ImageGenerationClientMixinABC(ABC): + """DO NOT use this class. It is for internal typing use only.""" + + _client: "AsyncPipelineClient" + _config: ImageGenerationClientConfiguration _serialize: "Serializer" _deserialize: "Deserializer" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py index cfe28a49ae65..a19dc51f3131 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py @@ -13,9 +13,9 @@ class CapacityType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Whether your capacity has been affected by the usage amount (token count) reported here.""" - ERROR = "usage" + USAGE = "usage" """Your capacity has been affected by the usage amount (token count) reported here.""" - IGNORE = "fixed" + FIXED = "fixed" """Your capacity has not been affected by the usage amount (token count) reported here.""" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index 7861aee3eb50..2c89fba5d31f 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -136,7 +136,9 @@ def _deserialize_and_add_to_queue(self, element: bytes, start_time: float = 0.0) # where the curly braces contain a valid JSON object. Deserialize it into a ChatCompletionsUpdate object # and add it to the queue. self._queue.put( - _models.ChatCompletionsUpdate._deserialize(json.loads(element[len(self.SSE_DATA_EVENT_PREFIX) : -1]), []) + _models.ChatCompletionsUpdate._deserialize( + json.loads(element[len(self.SSE_DATA_EVENT_PREFIX) : -1]), [] + ) ) if self.ENABLE_CLASS_LOGS: diff --git a/sdk/ai/azure-ai-inference/samples/README.md b/sdk/ai/azure-ai-inference/samples/README.md index ebd238a8fed7..448121e6f7d8 100644 --- a/sdk/ai/azure-ai-inference/samples/README.md +++ b/sdk/ai/azure-ai-inference/samples/README.md @@ -10,7 +10,7 @@ urlFragment: model-inference-samples # Samples for the model client library for Python -These are runnable console Python programs that show how to do chat completion using the model client. Most samples are in this folder +These are runnable console Python programs that show how to do chat completion, embeddings and image geneartion using the clients in this package. Samples are in this folder and use the a synchronous client. Samples in the subfolder `async_samples` use the asynchronous client. The concepts are similar, you can easily modify any of the samples to your needs. @@ -18,7 +18,7 @@ The concepts are similar, you can easily modify any of the samples to your needs |**File Name**|**Description**| |----------------|-------------| -|[sample_streaming_chat_completions.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py) | One chat completion operation using a synchronous client and streaming response. | +|[sample_chat_completions_streaming.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py) | One chat completion operation using a synchronous client and streaming response. | |[sample_chat_completions.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py) | One chat completion operation using a synchronous client. | |[sample_embeddings.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_embeddings.py) | One embeddings operation using a synchronous client. | |[sample_image_generation.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_image_generation.py) | Generate an image from a prompt using a synchronous client. | @@ -27,7 +27,7 @@ The concepts are similar, you can easily modify any of the samples to your needs |**File Name**|**Description**| |----------------|-------------| -|[sample_streaming_chat_completions_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_streaming_chat_completions_async.py) | One chat completion operation using an asynchronous client and streaming response. | +|[sample_chat_completions_streaming_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py) | One chat completion operation using an asynchronous client and streaming response. | |[sample_chat_completions_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py) | One chat completion operation using an asynchronous client. | |[sample_embeddings_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py) | One embeddings operation using an asynchronous client. | |[sample_image_generation_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py) | Generate an image from a prompt using an asynchronous client. | @@ -51,7 +51,7 @@ See [Prerequisites](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ ## Set environment variables -To construct the `ModelClient`, you will need to pass in the endpoint URL and key associated with your deployed AI model. +To construct any of the clients, you will need to pass in the endpoint URL and key associated with your deployed AI model. * The endpoint URL has the form `https://your-deployment-name.your-azure-region.inference.ai.azure.com`, where `your-deployment-name` is your unique model deployment name and `your-azure-region` is the Azure region where the model is deployed (e.g. `eastus2`). @@ -65,8 +65,7 @@ For convenience, and to promote the practice of not hard-coding secrets in your | Embeddings | `EMBEDDINGS_ENDPOINT` | `EMBEDDINGS_KEY` | | Image generation | `IMAGE_GENERATION_ENDPOINT` | `IMAGE_GENERATION_KEY` | -Note that the client library does not directly read these environment variable at run time. The sample code reads the environment variables and constructs the `ModelClient` with this read values. - +Note that the client library does not directly read these environment variable at run time. The sample code reads the environment variables and constructs the relevant client with these values. ## Running the samples @@ -76,31 +75,6 @@ python sample_chat_completions.py ``` similarly for the other samples. -## Example console output - -The sample `sample_chat_completions.py` sends the following system and user messages in a single call: - -- System: "You are an AI assistant that helps people find information." -- User: "How many feet are in a mile?" - -And prints out the service response. It should look similar to the following: - -```text -Chat Completions: -choices[0].message.content: Hello! I'd be happy to help you find the answer to your question. There are 5,280 feet in a mile. -choices[0].message.role: assistant -choices[0].finish_reason: stop -choices[0].index: 0 -id: 77f08d7e-8127-431d-bed5-a814b78ddd80 -created: 1970-01-08 23:28:48+00:00 -model: Llama-2-13b-chat -object: chat.completion -usage.capacity_type: None -usage.prompt_tokens: 41 -usage.completion_tokens: 32 -usage.total_tokens: 73 -``` - ## Troubleshooting See [Troubleshooting](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/README.md#troubleshooting) here. diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py index eb3fa0d0d5fc..c4b7d1ccbdcf 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py @@ -4,7 +4,8 @@ # ------------------------------------ """ DESCRIPTION: - This sample demonstrates how to get a chat completion response from the service using an asynchronous client. + This sample demonstrates how to get a chat completion response + from the service using an asynchronous client. USAGE: python sample_chat_completion_async.py @@ -21,7 +22,7 @@ async def sample_chat_completions_async(): import os - from azure.ai.inference.aio import ModelClient + from azure.ai.inference.aio import ChatCompletionsClient from azure.ai.inference.models import SystemMessage, UserMessage from azure.core.credentials import AzureKeyCredential @@ -35,13 +36,13 @@ async def sample_chat_completions_async(): exit() # Create a Model Client for synchronous operations - client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # Do a single chat completion operation. Start the operation and get a Future object. future = asyncio.ensure_future( - client.get_chat_completions( + client.create( messages=[ - SystemMessage(content="You are an AI assistant that helps people find information."), + SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), ] ) diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_streaming_chat_completions_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py similarity index 67% rename from sdk/ai/azure-ai-inference/samples/async_samples/sample_streaming_chat_completions_async.py rename to sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py index d006b1a193dd..49467ce8bb01 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_streaming_chat_completions_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py @@ -19,12 +19,12 @@ """ import asyncio import os -from azure.ai.inference.aio import ModelClient +from azure.ai.inference.aio import ChatCompletionsClient from azure.ai.inference.models import SystemMessage, UserMessage, ChatCompletionsUpdate from azure.core.credentials import AzureKeyCredential -async def sample_streaming_chat_completions_async(): +async def sample_chat_completions_streaming_async(): # Read the values of your model endpoint and key from environment variables try: @@ -35,8 +35,8 @@ async def sample_streaming_chat_completions_async(): print("Set them before running this sample.") exit() - # Create Model Client for synchronous operations - client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + # Create chat completions client for synchronous operations + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # Do a single streaming chat completion operation. Start the operation and get a Future object. future = asyncio.ensure_future( @@ -58,9 +58,9 @@ async def sample_streaming_chat_completions_async(): # Iterate on the result to get chat completion updates, as they arrive from the service accumulated_content = "" - async for element in result: - accumulated_content += element.choices[0].delta.content if element.choices[0].delta.content is not None else "" - print_chat_completions_delta(element) + async for update in result: + accumulated_content += update.choices[0].delta.content if update.choices[0].delta.content is not None else "" + print_chat_completions_delta(update) print(f"Accumulated content: {accumulated_content}") @@ -68,25 +68,25 @@ async def sample_streaming_chat_completions_async(): await client.close() -def print_chat_completions_delta(element: ChatCompletionsUpdate): +def print_chat_completions_delta(update: ChatCompletionsUpdate): print( - f"content: {repr(element.choices[0].delta.content)}, " - f"role: {element.choices[0].delta.role}, " - f"finish_reason: {element.choices[0].finish_reason}, " - f"index: {element.choices[0].index}" + f"content: {repr(update.choices[0].delta.content)}, " + f"role: {update.choices[0].delta.role}, " + f"finish_reason: {update.choices[0].finish_reason}, " + f"index: {update.choices[0].index}" ) - print(f"id: {element.id}, created: {element.created}, model: {element.model}, object: {element.object}") - if element.usage is not None: + print(f"id: {update.id}, created: {update.created}, model: {update.model}, object: {update.object}") + if update.usage is not None: print( - f"usage: capacity_type: {element.usage.capacity_type}, " - f"prompt_tokens: {element.usage.prompt_tokens}, " - f"completion_tokens: {element.usage.completion_tokens}, " - f"usage.total_tokens: {element.usage.total_tokens}" + f"usage: capacity_type: {update.usage.capacity_type}, " + f"prompt_tokens: {update.usage.prompt_tokens}, " + f"completion_tokens: {update.usage.completion_tokens}, " + f"usage.total_tokens: {update.usage.total_tokens}" ) async def main(): - await sample_streaming_chat_completions_async() + await sample_chat_completions_streaming_async() if __name__ == "__main__": diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py index 358bcce53751..782c7c23cd2e 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py @@ -21,7 +21,7 @@ async def sample_embeddings_async(): import os - from azure.ai.inference.aio import ModelClient + from azure.ai.inference.aio import EmbeddingsClient from azure.core.credentials import AzureKeyCredential # Read the values of your model endpoint and key from environment variables @@ -34,10 +34,10 @@ async def sample_embeddings_async(): exit() # Create an Image Analysis client for synchronous operations - client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + client = EmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # Do a single embeddings operation. Start the operation and get a Future object. - future = asyncio.ensure_future(client.get_embeddings(input=["first phrase", "second phrase", "third phrase"])) + future = asyncio.ensure_future(client.create(input=["first phrase", "second phrase", "third phrase"])) # Loop until the operation is done while not future.done(): diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py index bf2a251bd049..2d48d74bc651 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py @@ -21,7 +21,7 @@ async def sample_image_generation_async(): import os - from azure.ai.inference.aio import ModelClient + from azure.ai.inference.aio import ImageGenerationClient from azure.core.credentials import AzureKeyCredential # Read the values of your model endpoint and key from environment variables @@ -34,11 +34,11 @@ async def sample_image_generation_async(): exit() # Create an Model for synchronous operations - client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential("key")) + client = ImageGenerationClient(endpoint=endpoint, credential=AzureKeyCredential("key")) # Generate an image from text prompt. This will be an asynchronously (non-blocking) call. future = asyncio.ensure_future( - client.generate_images(prompt="A painting of a beautiful sunset over a mountain lake.", size="1024x768") + client.create(prompt="A painting of a beautiful sunset over a mountain lake.", size="1024x768") ) # Loop until the operation is done diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index ed1dc6b5e2da..f239fdafeaed 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -4,7 +4,8 @@ # ------------------------------------ """ DESCRIPTION: - This sample demonstrates how to get a chat completions response from the service using a synchronous client. + This sample demonstrates how to get a chat completions response from + the service using a synchronous client. USAGE: python sample_chat_completions.py @@ -20,9 +21,7 @@ def sample_chat_completions(): import os - from azure.ai.inference.models import SystemMessage, UserMessage - # Read the values of your model endpoint and key from environment variables try: endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] key = os.environ["CHAT_COMPLETIONS_KEY"] @@ -31,39 +30,21 @@ def sample_chat_completions(): print("Set them before running this sample.") exit() - # [START create_client] - from azure.ai.inference import ModelClient + # [START chat_completions] + from azure.ai.inference import ChatCompletionsClient + from azure.ai.inference.models import SystemMessage, UserMessage from azure.core.credentials import AzureKeyCredential - # Create Model Client for synchronous operations - client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - # [END create_client] + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - # [START chat_completions] - # Do a single chat completion operation. This will be a synchronously (blocking) call. - result = client.get_chat_completions( + result = client.create( messages=[ - SystemMessage(content="You are an AI assistant that helps people find information."), + SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), - ], - # Examples of setting extra parameters (TODO: move this to advanced sample) - extras=dict(key1="value1", key2="value2"), + ] ) - # Print results the the console - print("Chat Completions:") - print(f"choices[0].message.content: {result.choices[0].message.content}") - print(f"choices[0].message.role: {result.choices[0].message.role}") - print(f"choices[0].finish_reason: {result.choices[0].finish_reason}") - print(f"choices[0].index: {result.choices[0].index}") - print(f"id: {result.id}") - print(f"created: {result.created}") - print(f"model: {result.model}") - print(f"object: {result.object}") - print(f"usage.capacity_type: {result.usage.capacity_type}") - print(f"usage.prompt_tokens: {result.usage.prompt_tokens}") - print(f"usage.completion_tokens: {result.usage.completion_tokens}") - print(f"usage.total_tokens: {result.usage.total_tokens}") + print(result.choices[0].message.content) # [END chat_completions] diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py new file mode 100644 index 000000000000..22e541eb48bc --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py @@ -0,0 +1,53 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to get a chat completion streaming response + from the service using a synchronous client. + +USAGE: + python sample_chat_completions_streaming.py + + Set these two environment variables before running the sample: + 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form + https://..inference.ai.azure.com + where `your-deployment-name` is your unique AI Model deployment name, and + `your-azure-region` is the Azure region where your model is deployed. + 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. +""" + + +def sample_chat_completions_streaming(): + import os + + try: + endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] + key = os.environ["CHAT_COMPLETIONS_KEY"] + except KeyError: + print("Missing environment variable 'CHAT_COMPLETIONS_ENDPOINT' or 'CHAT_COMPLETIONS_KEY'") + print("Set them before running this sample.") + exit() + + # [START chat_completions_streaming] + from azure.ai.inference import ChatCompletionsClient + from azure.ai.inference.models import SystemMessage, UserMessage + from azure.core.credentials import AzureKeyCredential + + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + + result = client.create_streaming( + messages=[ + SystemMessage(content="You are a helpful assistant."), + UserMessage(content="Give me 5 good reasons why I should exercise every day."), + ] + ) + + for update in result: + print(update.choices[0].delta.content, end="") + # [END chat_completions_streaming] + + +if __name__ == "__main__": + sample_chat_completions_streaming() diff --git a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py index 2048b87a5baa..66c112bb7692 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py +++ b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py @@ -4,7 +4,8 @@ # ------------------------------------ """ DESCRIPTION: - This sample demonstrates how to get embeddings for a list of sentences using a synchronous client. + This sample demonstrates how to get embeddings for a list of sentences + using a synchronous client. USAGE: python sample_embeddings.py @@ -20,10 +21,7 @@ def sample_embeddings(): import os - from azure.ai.inference import ModelClient - from azure.core.credentials import AzureKeyCredential - # Read the values of your model endpoint and key from environment variables try: endpoint = os.environ["EMBEDDINGS_ENDPOINT"] key = os.environ["EMBEDDINGS_KEY"] @@ -32,26 +30,20 @@ def sample_embeddings(): print("Set them before running this sample.") exit() - # Create an Model for synchronous operations - client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key), logging_enable=True) - # [START embeddings] - # Do a single embeddings operation. This will be a synchronously (blocking) call. - result = client.get_embeddings(input=["first phrase", "second phrase", "third phrase"]) + from azure.ai.inference import EmbeddingsClient + from azure.core.credentials import AzureKeyCredential + + client = EmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + + result = client.create(input=["first phrase", "second phrase", "third phrase"]) - # Print results the the console - print("Embeddings result:") for item in result.data: length = len(item.embedding) print( - f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, ..., {item.embedding[length-2]}, {item.embedding[length-1]}]" + f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " + f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" ) - print(f"id: {result.id}") - print(f"model: {result.model}") - print(f"object: {result.object}") - print(f"usage.input_tokens: {result.usage.input_tokens}") - print(f"usage.prompt_tokens: {result.usage.prompt_tokens}") - print(f"usage.total_tokens: {result.usage.total_tokens}") # [END embeddings] diff --git a/sdk/ai/azure-ai-inference/samples/sample_image_generation.py b/sdk/ai/azure-ai-inference/samples/sample_image_generation.py index 95dfa1ed5e92..722d0b88c621 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_image_generation.py +++ b/sdk/ai/azure-ai-inference/samples/sample_image_generation.py @@ -4,7 +4,8 @@ # ------------------------------------ """ DESCRIPTION: - This sample demonstrates how to generate an image from a prompt. + This sample demonstrates how to generate an image from a prompt + using a synchronous client. USAGE: python sample_image_generation.py @@ -20,10 +21,7 @@ def sample_image_generation(): import os - from azure.ai.inference import ModelClient - from azure.core.credentials import AzureKeyCredential - # Read the values of your model endpoint and key from environment variables try: endpoint = os.environ["IMAGE_GENERATION_ENDPOINT"] key = os.environ["IMAGE_GENERATION_KEY"] @@ -32,23 +30,16 @@ def sample_image_generation(): print("Set them before running this sample.") exit() - # Create an Model for synchronous operations - client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - # [START image_generation] - # Generate a single image from a text prompt. This will be a synchronously (blocking) call. - result = client.generate_images( - prompt="A painting of a beautiful sunset over a mountain lake.", size="1024x768" - ) - - # Save generated image to file and print other results the the console - print("Image generation result:") - for index, item in enumerate(result.data): - with open(f"image_{index}.png", "wb") as image: - image.write(item.b64_json.decode("base64")) - print(f"id: {result.id}") - print(f"model: {result.model}") - print(f"created: {result.created}") + from azure.ai.inference import ImageGenerationClient + from azure.core.credentials import AzureKeyCredential + + client = ImageGenerationClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + + result = client.create(prompt="A painting of a beautiful sunset over a mountain lake.", size="1024x768") + + with open(f"image.png", "wb") as image: + image.write(result.data[0].b64_json.decode("base64")) # [END image_generation] diff --git a/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py deleted file mode 100644 index 382c5b75037f..000000000000 --- a/sdk/ai/azure-ai-inference/samples/sample_streaming_chat_completions.py +++ /dev/null @@ -1,79 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -""" -DESCRIPTION: - This sample demonstrates how to get a chat completion streaming response - from the service using a synchronous client. - -USAGE: - python sample_streaming_chat_completions.py - - Set these two environment variables before running the sample: - 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form - https://..inference.ai.azure.com - where `your-deployment-name` is your unique AI Model deployment name, and - `your-azure-region` is the Azure region where your model is deployed. - 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. -""" - -import os -from azure.ai.inference import ModelClient -from azure.ai.inference.models import SystemMessage, UserMessage, ChatCompletionsUpdate -from azure.core.credentials import AzureKeyCredential -from azure.core.pipeline.transport import RequestsTransport - - -def sample_streaming_chat_completions(): - - # Read the values of your model endpoint and key from environment variables - try: - endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] - key = os.environ["CHAT_COMPLETIONS_KEY"] - except KeyError: - print("Missing environment variable 'CHAT_COMPLETIONS_ENDPOINT' or 'CHAT_COMPLETIONS_KEY'") - print("Set them before running this sample.") - exit() - - # Create Model Client for synchronous operations - client = ModelClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - - messages = [ - SystemMessage(content="You are an AI assistant that helps people find information."), - UserMessage(content="Give me 5 good reasons why I should exercise every day."), - ] - - # [START streaming_chat_completions] - # Do a single chat completion operation. This will be a synchronously (blocking) call. - result = client.get_streaming_chat_completions(messages=messages) - - # Iterate on the result to get chat completion updates, as they arrive from the service - accumulated_content = "" - for element in result: - accumulated_content += element.choices[0].delta.content if element.choices[0].delta.content is not None else "" - print_chat_completions_delta(element) - - print(f"Accumulated content: {accumulated_content}") - # [END streaming_chat_completions] - - -def print_chat_completions_delta(element: ChatCompletionsUpdate): - print( - f"content: {repr(element.choices[0].delta.content)}, " - f"role: {element.choices[0].delta.role}, " - f"finish_reason: {element.choices[0].finish_reason}, " - f"index: {element.choices[0].index}" - ) - print(f"id: {element.id}, created: {element.created}, model: {element.model}, object: {element.object}") - if element.usage is not None: - print( - f"usage: capacity_type: {element.usage.capacity_type}, " - f"prompt_tokens: {element.usage.prompt_tokens}, " - f"completion_tokens: {element.usage.completion_tokens}, " - f"usage.total_tokens: {element.usage.total_tokens}" - ) - - -if __name__ == "__main__": - sample_streaming_chat_completions() diff --git a/sdk/ai/azure-ai-inference/setup.py b/sdk/ai/azure-ai-inference/setup.py index fc6a19e4a776..c7b5395a3f9f 100644 --- a/sdk/ai/azure-ai-inference/setup.py +++ b/sdk/ai/azure-ai-inference/setup.py @@ -63,8 +63,8 @@ "azure.ai.inference": ["py.typed"], }, install_requires=[ - "isodate<1.0.0,>=0.6.1", - "azure-core<2.0.0,>=1.30.0", + "isodate>=0.6.1", + "azure-core>=1.30.0", "typing-extensions>=4.6.0", ], python_requires=">=3.8", diff --git a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py index ee06b6d6aa0f..4c34726210bb 100644 --- a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py +++ b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py @@ -42,8 +42,8 @@ # The test class name needs to start with "Test" to get collected by pytest class ModelClientTestBase(AzureRecordedTestCase): - client: sdk.ModelClient - async_client: async_sdk.ModelClient + client: sdk.ChatCompletionsClient + async_client: async_sdk.ChatCompletionsClient connection_url: str # Set to True to print out all analysis results @@ -62,7 +62,7 @@ def _create_client_for_authentication_failure(self, sync: bool, **kwargs): def _create_client(self, endpoint: str, key: str, sync: bool, get_connection_url: bool): credential = AzureKeyCredential(key) if sync: - self.client = sdk.ModelClient( + self.client = sdk.ChatCompletionsClient( endpoint=endpoint, credential=credential, logging_enable=LOGGING_ENABLED, @@ -70,7 +70,7 @@ def _create_client(self, endpoint: str, key: str, sync: bool, get_connection_url ) assert self.client is not None else: - self.async_client = async_sdk.ModelClient( + self.async_client = async_sdk.ChatCompletionsClient( endpoint=endpoint, credential=credential, logging_enable=LOGGING_ENABLED, @@ -88,7 +88,7 @@ def _do_chat_completions( **kwargs, ): - result = self.client.get_chat_completions(messages=kwargs.get("messages"), params=query_params) + result = self.client.create(messages=kwargs.get("messages"), params=query_params) # Optional: console printout of all results if ModelClientTestBase.PRINT_CHAT_COMPLETION_RESULTS: @@ -109,7 +109,7 @@ async def _do_async_chat_completions( start_time = time.time() # Start the operation and get a Future object - future = asyncio.ensure_future(self.async_client.get_chat_completions(messages=kwargs.get("messages"))) + future = asyncio.ensure_future(self.async_client.create(messages=kwargs.get("messages"))) # Loop until the operation is done while not future.done(): @@ -138,7 +138,7 @@ def _do_chat_completion_with_error( ): try: - result = self.client.get_chat_completions(messages=kwargs.get("messages")) + result = self.client.create(messages=kwargs.get("messages")) except AzureError as e: print(e) @@ -156,7 +156,7 @@ async def _do_async_chat_completion_with_error( ): try: - result = await self.async_client.get_chat_completions(messages=kwargs.get("messages")) + result = await self.async_client.create(messages=kwargs.get("messages")) except AzureError as e: print(e) diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml index 4d017a67d8ee..f912ad03e209 100644 --- a/sdk/ai/azure-ai-inference/tsp-location.yaml +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/ModelClient -commit: b57d3b3f4b39b8eda9bcf212830cfbd813fd23e7 +commit: 78e7193dec65e6ee806ed71337064a5f334966c4 repo: Azure/azure-rest-api-specs additionalDirectories: From 159d82f6aac9d23173ff8839f7eb9a648ddb91c0 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 12 Apr 2024 14:58:29 -0700 Subject: [PATCH 030/112] Minor fixes to root README.md --- sdk/ai/azure-ai-inference/README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 8d0f6b81f6c2..c1759ff09adc 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -93,16 +93,16 @@ Image generation operations target the URL route `/images/generations` on the pr ## Examples -The following sections provide code snippets covering these common scenarios: +In the following sections you will find simple examples of: * [Chat completions](#chat-completions-example) * [Streaming chat completions](#streaming-chat-completions-example) * [Embeddings](#embeddings-example) * [Image geneartion](#image-generation-example) -These snippets use the synchronous `client` from [Create and authenticate clients](#create-and-authenticate-clients). +The examples create a synchronous client as mentioned in [Create and authenticate clients](#create-and-authenticate-clients). Only mandatory input settings are shown for simplicity. -See the [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples) folder for fully working samples for synchronous and asynchronous clients. +See the [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples) folder for full working samples for synchronous and asynchronous clients. ### Chat completions example @@ -129,7 +129,7 @@ print(result.choices[0].message.content) -The printed result of course depends on the model. You may get something like this: `Hello! I'd be happy to help answer your question. There are 5,280 feet in a mile`. +The printed result of course depends on the model, but you should get something like this: `Hello! I'd be happy to help answer your question. There are 5,280 feet in a mile`. To generate completions for additional messages, simply call `client.create` multiple times using the same `client`. @@ -187,7 +187,7 @@ for item in result.data: -The printed result of course depends on the model. You should see something like this: +The printed result of course depends on the model, but you should see something like this: ```txt data[0]: length=1024, [0.0013399124, -0.01576233, ..., 0.007843018, 0.000238657] data[1]: length=1024, [0.036590576, -0.0059547424, ..., 0.011405945, 0.004863739] @@ -240,7 +240,7 @@ For example, when you provide a wrong authentication key: Status code: 401 (Unauthorized) Operation returned an invalid status 'Unauthorized' Content: {"status": "Invalid auth token"} -``` +```v Or for example when you call `get_embeddings` on a model that does not support the `/v1/embeddings` route: From 5326bba72887bed040fe479190d7d06eb7820b57 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 15 Apr 2024 13:50:16 -0700 Subject: [PATCH 031/112] Update tests --- sdk/ai/azure-ai-inference/tests/README.md | 27 +- .../tests/model_inference_test_base.py | 234 +++++++----------- .../test_model_inference_async_client.py | 110 ++++---- .../tests/test_model_inference_client.py | 99 ++++---- 4 files changed, 196 insertions(+), 274 deletions(-) diff --git a/sdk/ai/azure-ai-inference/tests/README.md b/sdk/ai/azure-ai-inference/tests/README.md index 5062f95e27be..6a698ec3abcb 100644 --- a/sdk/ai/azure-ai-inference/tests/README.md +++ b/sdk/ai/azure-ai-inference/tests/README.md @@ -1,20 +1,24 @@ # Azure AI Model Inference client library tests for Python -## Running tests locally, on a Windows PC, against the live service +The instructions below are for running tests locally, on a Windows machine, against the live service. -### Prerequisites +## Prerequisites -See [Prerequisites](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/README.md#prerequisites). Create an Azure resource in one of the GPU-supported regions, otherwise some of the tests will fail. +The live tests were written against the AI models mentioned below. You will need to deploy them in [Azure AI Studio](https://ai.azure.com/) and have the endpoint and key for each one of them. -### Setup +- TBD (fro chat completion tests) +- TBD (for embedding tests) +- TBD (for image generation tests) + +## Setup * Clone or download this sample repository. * Open a command prompt window in the folder `sdk\ai\azure-ai-inference`. -* If you want to run tests against the latest public Image Analysis client library, install it by running: +* If you want to run tests against the latest published client library, install it by running: ```bash pip install azure-ai-inference ``` -* If you want to run tests against a locally built Image Analysis client library: +* If you want to run tests against a locally built client library: * First build the wheel: ```bash pip install wheel @@ -26,10 +30,9 @@ See [Prerequisites](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ pip install dist\azure_ai_inference-1.0.0b1-py3-none-any.whl --user --force-reinstall ``` +## Set environment variables -### Set environment variables - -See [Set environment variables](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/README.md#set-environment-variables). +The tests read endpoints and keys from environemt variables. See the [Set environment variables](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/README.md#set-environment-variables) section in the samples README.md file for the full list of environment variables that need to be set for all tests to pass. In addition, the following environment values **must be** defined, although not used. Assign any value to them: ``` @@ -38,7 +41,7 @@ set AI_CLIENT_ID=not-used set AI_CLIENT_SECRET=not-used ``` -### Configure test proxy +## Configure test proxy Configure the test proxy to run live service tests without recordings: ``` @@ -46,13 +49,13 @@ set AZURE_TEST_RUN_LIVE=true set AZURE_SKIP_LIVE_RECORDING=true ``` -### Run tests +## Run tests To run all tests, type: ``` pytest ``` -### Additional information +## Additional information See [test documentation](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/tests.md) for additional information, including how to set proxy recordings and run tests using recordings. \ No newline at end of file diff --git a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py index 4c34726210bb..cb13bf6a46a9 100644 --- a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py +++ b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py @@ -7,8 +7,7 @@ import sys import azure.ai.inference as sdk import azure.ai.inference.aio as async_sdk -import asyncio -import time +import re from os import path from typing import List, Optional, Union @@ -31,160 +30,62 @@ handler = logging.StreamHandler(stream=sys.stdout) logger.addHandler(handler) -ServicePreparer = functools.partial( +ServicePreparerChatCompletions = functools.partial( EnvironmentVariableLoader, "chat_completions", chat_completions_endpoint="https://your-deployment-name.your-azure-region.inference.ai.azure.com", chat_completions_key="00000000000000000000000000000000", ) +ServicePreparerEmbeddings = functools.partial( + EnvironmentVariableLoader, + "embeddings", + embeddings_endpoint="https://your-deployment-name.your-azure-region.inference.ai.azure.com", + embeddings_key="00000000000000000000000000000000", +) # The test class name needs to start with "Test" to get collected by pytest class ModelClientTestBase(AzureRecordedTestCase): - client: sdk.ChatCompletionsClient - async_client: async_sdk.ChatCompletionsClient - connection_url: str - - # Set to True to print out all analysis results - PRINT_CHAT_COMPLETION_RESULTS = True + # Set to True to print out all results to the console + PRINT_RESULT = True - def _create_client_for_standard_test(self, sync: bool, get_connection_url: bool = False, **kwargs): - endpoint = kwargs.pop("chat_completions_endpoint") - key = kwargs.pop("chat_completions_key") - self._create_client(endpoint, key, sync, get_connection_url) + # Regular expression describing the pattern of a result ID (e.g. "183b56eb-8512-484d-be50-5d8df82301a2") + REGEX_RESULT_ID = re.compile(r'^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$') - def _create_client_for_authentication_failure(self, sync: bool, **kwargs): + def _create_chat_client(self, *, sync: bool = True, bad_key: bool = False, **kwargs): endpoint = kwargs.pop("chat_completions_endpoint") - key = "00000000000000000000000000000000" - self._create_client(endpoint, key, sync, False) + key = "00000000000000000000000000000000" if bad_key else kwargs.pop("chat_completions_key") + credential = AzureKeyCredential(key) + if sync: + return sdk.ChatCompletionsClient(endpoint=endpoint, credential=credential, logging_enable=LOGGING_ENABLED) + else: + return async_sdk.ChatCompletionsClient(endpoint=endpoint, credential=credential, logging_enable=LOGGING_ENABLED) - def _create_client(self, endpoint: str, key: str, sync: bool, get_connection_url: bool): + def _create_embeddings_client(self, *, sync: bool = True, bad_key: bool = False, **kwargs) -> sdk.EmbeddingsClient | async_sdk.EmbeddingsClient: + endpoint = kwargs.pop("embeddings_endpoint") + key = "00000000000000000000000000000000" if bad_key else kwargs.pop("embeddings_key") credential = AzureKeyCredential(key) if sync: - self.client = sdk.ChatCompletionsClient( - endpoint=endpoint, - credential=credential, - logging_enable=LOGGING_ENABLED, - raw_request_hook=self._raw_request_check if get_connection_url else None, - ) - assert self.client is not None + return sdk.EmbeddingsClient(endpoint=endpoint, credential=credential, logging_enable=LOGGING_ENABLED) else: - self.async_client = async_sdk.ChatCompletionsClient( - endpoint=endpoint, - credential=credential, - logging_enable=LOGGING_ENABLED, - raw_request_hook=self._raw_request_check if get_connection_url else None, - ) - assert self.async_client is not None - - def _raw_request_check(self, request: PipelineRequest): - self.connection_url = request.http_request.url - print(f"Connection URL: {request.http_request.url}") - - def _do_chat_completions( - self, - query_params: Optional[dict] = None, - **kwargs, - ): - - result = self.client.create(messages=kwargs.get("messages"), params=query_params) - - # Optional: console printout of all results - if ModelClientTestBase.PRINT_CHAT_COMPLETION_RESULTS: - ModelClientTestBase._print_chat_completions_results(result) - - # Validate all results - ModelClientTestBase._validate_chat_completions_results(result) - - # Validate that additional query parameters exists in the connection URL, if specify - if query_params is not None: - ModelClientTestBase._validate_query_parameters(query_params, self.connection_url) - - async def _do_async_chat_completions( - self, - query_params: Optional[dict] = None, - **kwargs, - ): - start_time = time.time() - - # Start the operation and get a Future object - future = asyncio.ensure_future(self.async_client.create(messages=kwargs.get("messages"))) - - # Loop until the operation is done - while not future.done(): - await asyncio.sleep(0.1) # sleep for 100 ms - print(f"Elapsed time: {int(1000*(time.time()- start_time))}ms") - - # Get the result (this will not block since the operation is done) - result = future.result() - - # Optional: console printout of all results - if ModelClientTestBase.PRINT_CHAT_COMPLETION_RESULTS: - ModelClientTestBase._print_chat_completions_results(result) - - # Validate all results - ModelClientTestBase._validate_chat_completions_results(result) - - # Validate that additional query parameters exists in the connection URL, if specify - if query_params is not None: - ModelClientTestBase._validate_query_parameters(query_params, self.connection_url) - - def _do_chat_completion_with_error( - self, - expected_status_code: int, - expected_message_contains: str, - **kwargs, - ): - - try: - result = self.client.create(messages=kwargs.get("messages")) - - except AzureError as e: - print(e) - assert hasattr(e, "status_code") - assert e.status_code == expected_status_code - assert expected_message_contains in e.message - return - assert False # We should not get here - - async def _do_async_chat_completion_with_error( - self, - expected_status_code: int, - expected_message_contains: str, - **kwargs, - ): - - try: - result = await self.async_client.create(messages=kwargs.get("messages")) - - except AzureError as e: - print(e) - assert hasattr(e, "status_code") - assert e.status_code == expected_status_code - assert expected_message_contains in e.message - return - assert False # We should not get here + return async_sdk.EmbeddingsClient(endpoint=endpoint, credential=credential, logging_enable=LOGGING_ENABLED) - @staticmethod - def _validate_query_parameters(query_params: dict, connection_url: str): - assert len(query_params) > 0 - query_string = "" - for key, value in query_params.items(): - query_string += "&" + key + "=" + value - query_string = "?" + query_string[1:] - assert query_string in connection_url + def _create_embeddings_client_with_chat_completions_credentials(self, **kwargs) -> sdk.EmbeddingsClient: + endpoint = kwargs.pop("chat_completions_endpoint") + key = kwargs.pop("chat_completions_key") + credential = AzureKeyCredential(key) + return sdk.EmbeddingsClient(endpoint=endpoint, credential=credential, logging_enable=LOGGING_ENABLED) @staticmethod - def _validate_chat_completions_results(result: sdk.models.ChatCompletions): - - assert "5,280" in result.choices[0].message.content or "5280" in result.choices[0].message.content + def _validate_chat_completions_result(result: sdk.models.ChatCompletions, contains: List[str]): + assert any(item in result.choices[0].message.content for item in contains) assert result.choices[0].message.role == sdk.models.ChatRole.ASSISTANT assert result.choices[0].finish_reason == sdk.models.CompletionsFinishReason.STOPPED assert result.choices[0].index == 0 assert result.id is not None - assert result.id != "" + assert len(result.id) == 36 assert result.created is not None assert result.created != "" assert result.model is not None @@ -194,21 +95,58 @@ def _validate_chat_completions_results(result: sdk.models.ChatCompletions): assert result.usage.completion_tokens > 0 assert result.usage.total_tokens == result.usage.prompt_tokens + result.usage.completion_tokens + + @staticmethod + def _print_chat_completions_result(result: sdk.models.ChatCompletions): + if ModelClientTestBase.PRINT_RESULT: + print(" Chat Completions result:") + for choice in result.choices: + print(f"\tchoices[0].message.content: {choice.message.content}") + print("\tchoices[0].message.role: {}".format(choice.message.role)) + print("\tchoices[0].finish_reason: {}".format(choice.finish_reason)) + print("\tchoices[0].index: {}".format(choice.index)) + print("\tid: {}".format(result.id)) + print("\tcreated: {}".format(result.created)) + print("\tmodel: {}".format(result.model)) + print("\tobject: {}".format(result.object)) + print("\tusage.prompt_tokens: {}".format(result.usage.prompt_tokens)) + print("\tusage.completion_tokens: {}".format(result.usage.completion_tokens)) + print("\tusage.total_tokens: {}".format(result.usage.total_tokens)) + + + @staticmethod + def _validate_embeddings_result(result: sdk.models.EmbeddingsResult): + assert result is not None + assert result.data is not None + assert len(result.data) == 3 + for i in [0, 1, 2]: + assert result.data[i] is not None + assert result.data[i].object == "embedding" + assert result.data[i].index == i + assert len(result.data[i].embedding) == 1024 + assert result.data[i].embedding[0] != 0.0 + assert result.data[i].embedding[1023] != 0.0 + assert bool(ModelClientTestBase.REGEX_RESULT_ID.match(result.id)) + #assert len(result.model) > 0 # At the time of writing this test, this JSON field existed but was empty + assert result.object == "list" + # At the time of writing this test, input_tokens did not exist (I see completion tokens instead) + #assert result.usage.input_tokens > 0 + #assert result.usage.prompt_tokens > 0 + #assert result.total_tokens == result.usage.input_tokens + result.usage.prompt_tokens + + @staticmethod - def _print_chat_completions_results(result: sdk.models.ChatCompletions): - - print(" Chat Completions:") - - for choice in result.choices: - print(f"\tchoices[0].message.content: {choice.message.content}") - print("\tchoices[0].message.role: {}".format(choice.message.role)) - print("\tchoices[0].finish_reason: {}".format(choice.finish_reason)) - print("\tchoices[0].index: {}".format(choice.index)) - - print("\tid: {}".format(result.id)) - print("\tcreated: {}".format(result.created)) - print("\tmodel: {}".format(result.model)) - print("\tobject: {}".format(result.object)) - print("\tusage.prompt_tokens: {}".format(result.usage.prompt_tokens)) - print("\tusage.completion_tokens: {}".format(result.usage.completion_tokens)) - print("\tusage.total_tokens: {}".format(result.usage.total_tokens)) + def _print_embeddings_result(result: sdk.models.EmbeddingsResult): + if ModelClientTestBase.PRINT_RESULT: + print("Embeddings result:") + for item in result.data: + length = len(item.embedding) + print( + f"\tdata[{item.index}]: length={length}, object={item.object}, [{item.embedding[0]}, {item.embedding[1]}, ..., {item.embedding[length-2]}, {item.embedding[length-1]}]" + ) + print(f"\tid: {result.id}") + print(f"\tmodel: {result.model}") + print(f"\tobject: {result.object}") + #print(f"\tusage.input_tokens: {result.usage.input_tokens}") # At the time of writing this test, this JSON field does not exist + print(f"\tusage.prompt_tokens: {result.usage.prompt_tokens}") + print(f"\tusage.total_tokens: {result.usage.total_tokens}") diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py index 77095d982512..720d882b10ae 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -5,8 +5,9 @@ import inspect import azure.ai.inference as sdk -from model_inference_test_base import ModelClientTestBase, ServicePreparer +from model_inference_test_base import ModelClientTestBase, ServicePreparerChatCompletions, ServicePreparerEmbeddings from devtools_testutils.aio import recorded_by_proxy_async +from azure.core.exceptions import AzureError # The test class name needs to start with "Test" to get collected by pytest class TestImageAnalysisAsyncClient(ModelClientTestBase): @@ -17,70 +18,53 @@ class TestImageAnalysisAsyncClient(ModelClientTestBase): # # ********************************************************************************** - # Test all visual features from a local image, using default settings - @ServicePreparer() + # Test two async chat completions with chat history + @ServicePreparerChatCompletions() @recorded_by_proxy_async - async def test_async_chat_completion(self, **kwargs): - - self._create_client_for_standard_test(sync=False, **kwargs) - - messages = [sdk.models.UserMessage(content="How many feet are in a mile?")] - - await self._do_async_chat_completions(messages=messages, **kwargs) - - await self.async_client.close() - - # Test some visual features, one after the other, from image URL, with relevant settings specified - - -""" @ServicePreparer() + async def test_async_chat_completions_error_free(self, **kwargs): + messages = [ + sdk.models.SystemMessage(content="You are a helpful assistant answering questions regarding length units."), + sdk.models.UserMessage(content="How many feet are in a mile?") + ] + + client = self._create_chat_client(sync=False, **kwargs) + result = await client.create(messages=messages) + self._print_chat_completions_result(result) + self._validate_chat_completions_result(result, ["5280", "5,280"]) + + messages.append(sdk.models.AssistantMessage(content=result.choices[0].message.content)) + messages.append(sdk.models.UserMessage(content="and how many yards?")) + result = await client.create(messages=messages) + self._print_chat_completions_result(result) + self._validate_chat_completions_result(result, ["1760", "1,760"]) + await client.close() + + # Test one embeddings async call + @ServicePreparerEmbeddings() @recorded_by_proxy_async - async def test_analyze_async_single_feature_from_url(self, **kwargs): - - self._create_client_for_standard_analysis(sync=False, **kwargs) - - await self._do_async_analysis( - image_source=self.IMAGE_URL, - visual_features=[sdk.models.VisualFeatures.DENSE_CAPTIONS], - gender_neutral_caption=True, - **kwargs - ) - - await self._do_async_analysis( - image_source=self.IMAGE_URL, - visual_features=[sdk.models.VisualFeatures.SMART_CROPS], - smart_crops_aspect_ratios=[0.9, 1.33], - **kwargs - ) + async def test_async_embeddings_error_free(self, **kwargs): + client = self._create_embeddings_client(sync=False, **kwargs) + result = await client.create(input=["first phrase", "second phrase", "third phrase"]) + self._print_embeddings_result(result) + self._validate_embeddings_result(result) + await client.close() - await self._do_async_analysis( - image_source=self.IMAGE_URL, visual_features=[sdk.models.VisualFeatures.TAGS], language="en", **kwargs - ) - - await self._do_async_analysis( - image_source=self.IMAGE_URL, visual_features=[sdk.models.VisualFeatures.PEOPLE], **kwargs - ) - - await self.async_client.close() """ - -# ********************************************************************************** -# -# ERROR TESTS -# -# ********************************************************************************** + # ********************************************************************************** + # + # ERROR TESTS + # + # ********************************************************************************** -""" @ServicePreparer() + # Test one chat completion async call with bad key (authentication failure) + @ServicePreparerEmbeddings() @recorded_by_proxy_async - async def test_analyze_async_authentication_failure(self, **kwargs): - - self._create_client_for_authentication_failure(sync=False, **kwargs) - - await self._do_async_analysis_with_error( - image_source=self.IMAGE_URL, - visual_features=[sdk.models.VisualFeatures.TAGS], - expected_status_code=401, - expected_message_contains="Access denied", - **kwargs - ) - - await self.async_client.close() """ + async def test_embeddings_with_auth_failure(self, **kwargs): + client = self._create_embeddings_client(sync=False, bad_key=True, **kwargs) + try: + result = await client.create(input=["first phrase", "second phrase", "third phrase"]) + except AzureError as e: + print(e) + assert hasattr(e, "status_code") + assert e.status_code == 401 + assert "unauthorized" in e.message.lower() + await client.close() diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py index f857e19c9c57..42f692b53a7f 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -5,8 +5,9 @@ import inspect import azure.ai.inference as sdk -from model_inference_test_base import ModelClientTestBase, ServicePreparer +from model_inference_test_base import ModelClientTestBase, ServicePreparerChatCompletions, ServicePreparerEmbeddings from devtools_testutils import recorded_by_proxy +from azure.core.exceptions import AzureError # The test class name needs to start with "Test" to get collected by pytest @@ -19,59 +20,55 @@ class TestModelClient(ModelClientTestBase): # ********************************************************************************** # Test one chat completion - @ServicePreparer() + @ServicePreparerChatCompletions() @recorded_by_proxy - def test_chat_completion(self, **kwargs): - - self._create_client_for_standard_test(sync=True, **kwargs) - - messages = [sdk.models.UserMessage(content="How many feet are in a mile?")] - - self._do_chat_completions(messages=messages, **kwargs) - - self.client.close() - - # Test some visual features, one after the other, from file, using default settings - - -""" @ServicePreparer() + def test_chat_completions_error_free(self, **kwargs): + client = self._create_chat_client(**kwargs) + result = client.create(messages=[sdk.models.UserMessage(content="How many feet are in a mile?")]) + self._print_chat_completions_result(result) + self._validate_chat_completions_result(result, ["5280", "5,280"]) + client.close() + + # Test one embeddings call + @ServicePreparerEmbeddings() @recorded_by_proxy - def test_analyze_sync_single_feature_from_file(self, **kwargs): - - self._create_client_for_standard_analysis(sync=True, get_connection_url=True, **kwargs) - - self._do_analysis( - image_source=self.IMAGE_FILE, - visual_features=[sdk.models.VisualFeatures.CAPTION], - query_params={"key1": "value1", "key2": "value2"}, - **kwargs - ) - - self._do_analysis(image_source=self.IMAGE_FILE, visual_features=[sdk.models.VisualFeatures.READ], **kwargs) + def test_embeddings_error_free(self, **kwargs): + client = self._create_embeddings_client(**kwargs) + result = client.create(input=["first phrase", "second phrase", "third phrase"]) + self._print_embeddings_result(result) + self._validate_embeddings_result(result) + client.close() - self._do_analysis(image_source=self.IMAGE_FILE, visual_features=[sdk.models.VisualFeatures.TAGS], **kwargs) - - self.client.close() """ - -# ********************************************************************************** -# -# ERROR TESTS -# -# ********************************************************************************** + # ********************************************************************************** + # + # ERROR TESTS + # + # ********************************************************************************** -""" @ServicePreparer() + @ServicePreparerChatCompletions() @recorded_by_proxy - def test_analyze_sync_image_url_does_not_exist(self, **kwargs): - - self._create_client_for_standard_analysis(sync=True, **kwargs) - - self._do_analysis_with_error( - image_source="https://www.this.is.a.bad.url.com/for/sure.jpg", - visual_features=[sdk.models.VisualFeatures.CAPTION], - expected_status_code=400, - expected_message_contains="image url is not accessible", - **kwargs - ) + def test_chat_completion_with_auth_failure(self, **kwargs): + client = self._create_chat_client(bad_key=True, **kwargs) + try: + result = client.create(messages=[sdk.models.UserMessage(content="How many feet are in a mile?")]) + except AzureError as e: + print(e) + assert hasattr(e, "status_code") + assert e.status_code == 401 + assert "unauthorized" in e.message.lower() + client.close() + + + @ServicePreparerChatCompletions() + @recorded_by_proxy + def test_embeddings_on_chat_completion_endpoint(self, **kwargs): + client = self._create_embeddings_client_with_chat_completions_credentials(**kwargs) + try: + result = client.create(input=["first phrase", "second phrase", "third phrase"]) + except AzureError as e: + print(e) + assert hasattr(e, "status_code") + assert e.status_code == 404 + assert "not found" in e.message.lower() + client.close() - self.client.close() - """ From 1e4264749d7fa8e626334c639030135e84e4ff4c Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 15 Apr 2024 17:39:55 -0700 Subject: [PATCH 032/112] Minor test updates --- sdk/ai/azure-ai-inference/tests/README.md | 2 +- .../tests/test_model_inference_async_client.py | 6 +++--- .../tests/test_model_inference_client.py | 9 ++++++--- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/sdk/ai/azure-ai-inference/tests/README.md b/sdk/ai/azure-ai-inference/tests/README.md index 6a698ec3abcb..4f051d1584e0 100644 --- a/sdk/ai/azure-ai-inference/tests/README.md +++ b/sdk/ai/azure-ai-inference/tests/README.md @@ -6,7 +6,7 @@ The instructions below are for running tests locally, on a Windows machine, agai The live tests were written against the AI models mentioned below. You will need to deploy them in [Azure AI Studio](https://ai.azure.com/) and have the endpoint and key for each one of them. -- TBD (fro chat completion tests) +- llama-2-13b (fro chat completion tests) - TBD (for embedding tests) - TBD (for image generation tests) diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py index 720d882b10ae..1089a796a095 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -18,7 +18,6 @@ class TestImageAnalysisAsyncClient(ModelClientTestBase): # # ********************************************************************************** - # Test two async chat completions with chat history @ServicePreparerChatCompletions() @recorded_by_proxy_async async def test_async_chat_completions_error_free(self, **kwargs): @@ -39,7 +38,6 @@ async def test_async_chat_completions_error_free(self, **kwargs): self._validate_chat_completions_result(result, ["1760", "1,760"]) await client.close() - # Test one embeddings async call @ServicePreparerEmbeddings() @recorded_by_proxy_async async def test_async_embeddings_error_free(self, **kwargs): @@ -55,16 +53,18 @@ async def test_async_embeddings_error_free(self, **kwargs): # # ********************************************************************************** - # Test one chat completion async call with bad key (authentication failure) @ServicePreparerEmbeddings() @recorded_by_proxy_async async def test_embeddings_with_auth_failure(self, **kwargs): client = self._create_embeddings_client(sync=False, bad_key=True, **kwargs) + exception_caught = False try: result = await client.create(input=["first phrase", "second phrase", "third phrase"]) except AzureError as e: + exception_caught = True print(e) assert hasattr(e, "status_code") assert e.status_code == 401 assert "unauthorized" in e.message.lower() await client.close() + assert exception_caught diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py index 42f692b53a7f..f90565e7b854 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -19,7 +19,6 @@ class TestModelClient(ModelClientTestBase): # # ********************************************************************************** - # Test one chat completion @ServicePreparerChatCompletions() @recorded_by_proxy def test_chat_completions_error_free(self, **kwargs): @@ -29,7 +28,6 @@ def test_chat_completions_error_free(self, **kwargs): self._validate_chat_completions_result(result, ["5280", "5,280"]) client.close() - # Test one embeddings call @ServicePreparerEmbeddings() @recorded_by_proxy def test_embeddings_error_free(self, **kwargs): @@ -49,26 +47,31 @@ def test_embeddings_error_free(self, **kwargs): @recorded_by_proxy def test_chat_completion_with_auth_failure(self, **kwargs): client = self._create_chat_client(bad_key=True, **kwargs) + exception_caught = False try: result = client.create(messages=[sdk.models.UserMessage(content="How many feet are in a mile?")]) except AzureError as e: + exception_caught = True print(e) assert hasattr(e, "status_code") assert e.status_code == 401 assert "unauthorized" in e.message.lower() client.close() + assert exception_caught @ServicePreparerChatCompletions() @recorded_by_proxy def test_embeddings_on_chat_completion_endpoint(self, **kwargs): client = self._create_embeddings_client_with_chat_completions_credentials(**kwargs) + exception_caught = False try: result = client.create(input=["first phrase", "second phrase", "third phrase"]) except AzureError as e: + exception_caught = True print(e) assert hasattr(e, "status_code") assert e.status_code == 404 assert "not found" in e.message.lower() client.close() - + assert exception_caught From 65b2bf101ee2d4e34180b76c271f9d95282903bd Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 15 Apr 2024 18:22:19 -0700 Subject: [PATCH 033/112] Add assets.json --- sdk/ai/azure-ai-inference/assets.json | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 sdk/ai/azure-ai-inference/assets.json diff --git a/sdk/ai/azure-ai-inference/assets.json b/sdk/ai/azure-ai-inference/assets.json new file mode 100644 index 000000000000..710faaaf12c5 --- /dev/null +++ b/sdk/ai/azure-ai-inference/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "python", + "TagPrefix": "python/ai/azure-ai-inference", + "Tag": "python/ai/azure-ai-inference_4a5ffe4f01" +} From 32368f21e707b0f40ab4695d50a42bea0f255ade Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 16 Apr 2024 00:12:53 -0700 Subject: [PATCH 034/112] Fix test name --- sdk/ai/azure-ai-inference/assets.json | 2 +- sdk/ai/azure-ai-inference/tests/model_inference_test_base.py | 2 +- .../tests/test_model_inference_async_client.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/sdk/ai/azure-ai-inference/assets.json b/sdk/ai/azure-ai-inference/assets.json index 710faaaf12c5..afa110bd7b7d 100644 --- a/sdk/ai/azure-ai-inference/assets.json +++ b/sdk/ai/azure-ai-inference/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "python", "TagPrefix": "python/ai/azure-ai-inference", - "Tag": "python/ai/azure-ai-inference_4a5ffe4f01" + "Tag": "python/ai/azure-ai-inference_1a8375ba35" } diff --git a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py index cb13bf6a46a9..7978d3081abd 100644 --- a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py +++ b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py @@ -62,7 +62,7 @@ def _create_chat_client(self, *, sync: bool = True, bad_key: bool = False, **kwa else: return async_sdk.ChatCompletionsClient(endpoint=endpoint, credential=credential, logging_enable=LOGGING_ENABLED) - def _create_embeddings_client(self, *, sync: bool = True, bad_key: bool = False, **kwargs) -> sdk.EmbeddingsClient | async_sdk.EmbeddingsClient: + def _create_embeddings_client(self, *, sync: bool = True, bad_key: bool = False, **kwargs) -> Union[sdk.EmbeddingsClient, async_sdk.EmbeddingsClient]: endpoint = kwargs.pop("embeddings_endpoint") key = "00000000000000000000000000000000" if bad_key else kwargs.pop("embeddings_key") credential = AzureKeyCredential(key) diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py index 1089a796a095..ad405e2ed7dc 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -10,7 +10,7 @@ from azure.core.exceptions import AzureError # The test class name needs to start with "Test" to get collected by pytest -class TestImageAnalysisAsyncClient(ModelClientTestBase): +class TestModelAsyncClient(ModelClientTestBase): # ********************************************************************************** # From 18d6baa52d4fc6ec7244887e5185b47a2f665b44 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 16 Apr 2024 11:32:20 -0700 Subject: [PATCH 035/112] First round of Pylint fixes --- .../azure/ai/inference/_patch.py | 79 ++++++++++++++++--- .../azure/ai/inference/aio/_patch.py | 79 ++++++++++++++++--- .../azure/ai/inference/models/_patch.py | 26 +++--- ...sample_chat_completions_streaming_async.py | 8 +- .../sample_image_generation_async.py | 6 +- .../samples/sample_image_generation.py | 6 +- 6 files changed, 160 insertions(+), 44 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index 85051a26bd9a..a8c9d5bb1c82 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -6,22 +6,15 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import sys import json +import sys from io import IOBase -from typing import Any, Callable, Dict, IO, List, Optional, TypeVar, Union, overload +from typing import Any, Callable, Dict, Union, IO, List, Optional, TypeVar, overload from azure.core.pipeline import PipelineResponse from azure.core.rest import HttpRequest, HttpResponse from azure.core.tracing.decorator import distributed_trace from azure.core.utils import case_insensitive_dict -from . import models as _models -from ._model_base import SdkJSONEncoder, _deserialize -from ._serialization import Serializer -from ._vendor import ChatCompletionsClientMixinABC -from ._operations._operations import build_chat_completions_create_request -from ._client import ChatCompletionsClient as ChatCompletionsClientGenerated - from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, @@ -30,6 +23,11 @@ ResourceNotModifiedError, map_error, ) +from . import models as _models +from ._model_base import SdkJSONEncoder +from ._serialization import Serializer +from ._operations._operations import build_chat_completions_create_request +from ._client import ChatCompletionsClient as ChatCompletionsClientGenerated if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -45,6 +43,62 @@ class ChatCompletionsClient(ChatCompletionsClientGenerated): + + @overload + def create_streaming( + self, + body: JSON, + *, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.StreamingChatCompletions: + # pylint: disable=line-too-long + """ + TBD + """ + + @overload + def create_streaming( + self, + *, + messages: List[_models.ChatRequestMessage], + model_deployment: Optional[str] = None, + content_type: str = "application/json", + extras: Optional[Dict[str, str]] = None, + frequency_penalty: Optional[float] = None, + presence_penalty: Optional[float] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_tokens: Optional[int] = None, + response_format: Optional[Union[str, _models.ChatCompletionsResponseFormat]] = None, + stop: Optional[List[str]] = None, + tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, + tool_choice: Optional[ + Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] + ] = None, + seed: Optional[int] = None, + **kwargs: Any + ) -> _models.StreamingChatCompletions: + # pylint: disable=line-too-long + """ + TBD + """ + + @overload + def create_streaming( + self, + body: IO[bytes], + *, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.StreamingChatCompletions: + # pylint: disable=line-too-long + """ + TBD + """ + @distributed_trace def create_streaming( self, @@ -58,7 +112,7 @@ def create_streaming( temperature: Optional[float] = None, top_p: Optional[float] = None, max_tokens: Optional[int] = None, - response_format: Optional[_models.ChatCompletionsResponseFormat] = None, + response_format: Optional[Union[str, _models.ChatCompletionsResponseFormat]] = None, stop: Optional[List[str]] = None, tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, tool_choice: Optional[ @@ -67,7 +121,10 @@ def create_streaming( seed: Optional[int] = None, **kwargs: Any ) -> _models.StreamingChatCompletions: - + # pylint: disable=line-too-long + """ + TBD + """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index ee18cb8ae5e9..e1e030ee05c9 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -6,18 +6,15 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from io import IOBase import json import sys -from typing import List -from .. import models as _models -from ._client import ChatCompletionsClient as ChatCompletionsClientGenerated -from typing import Callable, Any, Union, IO, Optional, Dict, TypeVar -from azure.core.utils import case_insensitive_dict + +from io import IOBase +from typing import Any, Callable, Dict, Union, IO, List, Optional, TypeVar, overload from azure.core.pipeline import PipelineResponse from azure.core.rest import AsyncHttpResponse, HttpRequest from azure.core.tracing.decorator_async import distributed_trace_async -from .._model_base import SdkJSONEncoder, _deserialize +from azure.core.utils import case_insensitive_dict from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, @@ -26,6 +23,9 @@ ResourceNotModifiedError, map_error, ) +from .. import models as _models +from .._model_base import SdkJSONEncoder +from ._client import ChatCompletionsClient as ChatCompletionsClientGenerated from .._operations._operations import build_chat_completions_create_request if sys.version_info >= (3, 9): @@ -39,8 +39,64 @@ class ChatCompletionsClient(ChatCompletionsClientGenerated): + + @overload + async def create_streaming( + self, + body: JSON, + *, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.StreamingChatCompletions: + # pylint: disable=line-too-long + """ + TBD + """ + + @overload + async def create_streaming( + self, + *, + messages: List[_models.ChatRequestMessage], + model_deployment: Optional[str] = None, + content_type: str = "application/json", + extras: Optional[Dict[str, str]] = None, + frequency_penalty: Optional[float] = None, + presence_penalty: Optional[float] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_tokens: Optional[int] = None, + response_format: Optional[Union[str, _models.ChatCompletionsResponseFormat]] = None, + stop: Optional[List[str]] = None, + tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, + tool_choice: Optional[ + Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] + ] = None, + seed: Optional[int] = None, + **kwargs: Any + ) -> _models.StreamingChatCompletions: + # pylint: disable=line-too-long + """ + TBD + """ + + @overload + async def create_streaming( + self, + body: IO[bytes], + *, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.StreamingChatCompletions: + # pylint: disable=line-too-long + """ + TBD + """ + @distributed_trace_async - async def get_streaming_chat_completions( + async def create_streaming( self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -52,7 +108,7 @@ async def get_streaming_chat_completions( temperature: Optional[float] = None, top_p: Optional[float] = None, max_tokens: Optional[int] = None, - response_format: Optional[_models.ChatCompletionsResponseFormat] = None, + response_format: Optional[Union[str, _models.ChatCompletionsResponseFormat]] = None, stop: Optional[List[str]] = None, tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, tool_choice: Optional[ @@ -61,7 +117,10 @@ async def get_streaming_chat_completions( seed: Optional[int] = None, **kwargs: Any ) -> _models.StreamingChatCompletions: - + # pylint: disable=line-too-long + """ + TBD + """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index 2c89fba5d31f..a71380a52a87 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -13,8 +13,8 @@ import types from typing import List, Union -from .. import models as _models from azure.core.rest import HttpResponse +from .. import models as _models class StreamingChatCompletions: @@ -105,30 +105,30 @@ def _deserialize_and_add_to_queue(self, element: bytes, start_time: float = 0.0) # Convert `bytes` to string and split the string by newline, while keeping the new line char. # the last may be a partial "line" that does not contain a newline char at the end. - line_list = re.split(r"(?<=\n)", element.decode("utf-8")) - for index, element in enumerate(line_list): + line_list: List[str] = re.split(r"(?<=\n)", element.decode("utf-8")) + for index, line in enumerate(line_list): if self.ENABLE_CLASS_LOGS: - print(f"[original] {repr(element)}") + print(f"[original] {repr(line)}") if index == 0: - element = self._incomplete_json + element + line = self._incomplete_json + line self._incomplete_json = "" - if index == len(line_list) - 1 and not element.endswith("\n"): - self._incomplete_json = element + if index == len(line_list) - 1 and not line.endswith("\n"): + self._incomplete_json = line return if self.ENABLE_CLASS_LOGS: - print(f"[modified] {repr(element)}") + print(f"[modified] {repr(line)}") - if element == "\n": # Empty line, indicating flush output to client + if line == "\n": # Empty line, indicating flush output to client continue - if not element.startswith(self.SSE_DATA_EVENT_PREFIX): - raise ValueError(f"SSE event not supported (line `{element}`)") + if not line.startswith(self.SSE_DATA_EVENT_PREFIX): + raise ValueError(f"SSE event not supported (line `{line}`)") - if element.startswith(self.SSE_DATA_EVENT_DONE): + if line.startswith(self.SSE_DATA_EVENT_DONE): self._done = True return @@ -137,7 +137,7 @@ def _deserialize_and_add_to_queue(self, element: bytes, start_time: float = 0.0) # and add it to the queue. self._queue.put( _models.ChatCompletionsUpdate._deserialize( - json.loads(element[len(self.SSE_DATA_EVENT_PREFIX) : -1]), [] + json.loads(line[len(self.SSE_DATA_EVENT_PREFIX) : -1]), [] ) ) diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py index 49467ce8bb01..2c828b86ffc1 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py @@ -40,7 +40,7 @@ async def sample_chat_completions_streaming_async(): # Do a single streaming chat completion operation. Start the operation and get a Future object. future = asyncio.ensure_future( - client.get_streaming_chat_completions( + client.create_streaming( messages=[ SystemMessage(content="You are an AI assistant that helps people find information."), UserMessage(content="Give me 5 good reasons why I should exercise every day."), @@ -57,12 +57,8 @@ async def sample_chat_completions_streaming_async(): result = future.result() # Iterate on the result to get chat completion updates, as they arrive from the service - accumulated_content = "" async for update in result: - accumulated_content += update.choices[0].delta.content if update.choices[0].delta.content is not None else "" - print_chat_completions_delta(update) - - print(f"Accumulated content: {accumulated_content}") + print(update.choices[0].delta.content, end="") # Remember to always close the asynchronous client when you are done with it await client.close() diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py index 2d48d74bc651..bdc1adc72235 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py @@ -21,6 +21,7 @@ async def sample_image_generation_async(): import os + import base64 from azure.ai.inference.aio import ImageGenerationClient from azure.core.credentials import AzureKeyCredential @@ -53,8 +54,9 @@ async def sample_image_generation_async(): # Save generated image to file and print other results the the console print("Image generation result:") for index, item in enumerate(result.data): - with open(f"image_{index}.png", "wb") as image: - image.write(item.b64_json.decode("base64")) + if item.b64_json is not None: + with open(f"image_{index}.png", "wb") as image: + image.write(base64.b64decode(item.b64_json)) print(f"id: {result.id}") print(f"model: {result.model}") print(f"created: {result.created}") diff --git a/sdk/ai/azure-ai-inference/samples/sample_image_generation.py b/sdk/ai/azure-ai-inference/samples/sample_image_generation.py index 722d0b88c621..f86e3ee7031a 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_image_generation.py +++ b/sdk/ai/azure-ai-inference/samples/sample_image_generation.py @@ -21,6 +21,7 @@ def sample_image_generation(): import os + import base64 try: endpoint = os.environ["IMAGE_GENERATION_ENDPOINT"] @@ -38,8 +39,9 @@ def sample_image_generation(): result = client.create(prompt="A painting of a beautiful sunset over a mountain lake.", size="1024x768") - with open(f"image.png", "wb") as image: - image.write(result.data[0].b64_json.decode("base64")) + if result.data[0].b64_json is not None: + with open(f"image.png", "wb") as image: + image.write(base64.b64decode(result.data[0].b64_json)) # [END image_generation] From 124304869b03d48d699751359000a580ff2283d0 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 16 Apr 2024 13:18:11 -0700 Subject: [PATCH 036/112] Fix pyright errors --- .../azure/ai/inference/_patch.py | 2 +- .../azure/ai/inference/aio/_patch.py | 2 +- .../azure/ai/inference/models/_patch.py | 23 ++++++++++--------- ...sample_chat_completions_streaming_async.py | 23 +++---------------- .../sample_chat_completions_streaming.py | 5 +++- 5 files changed, 21 insertions(+), 34 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index a8c9d5bb1c82..0883ba05e0fd 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -190,7 +190,7 @@ def create_streaming( map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) - return _models.StreamingChatCompletions(response.iter_bytes()) + return _models.StreamingChatCompletions(response) __all__: List[str] = [ diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index e1e030ee05c9..0d3d75a6e58e 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -186,7 +186,7 @@ async def create_streaming( map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) - return _models.StreamingChatCompletions(response.iter_bytes()) + return _models.StreamingChatCompletions(response) __all__: List[str] = [ diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index a71380a52a87..9320d7d44ff2 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -12,8 +12,8 @@ import json import types -from typing import List, Union -from azure.core.rest import HttpResponse +from typing import List, Union, AsyncIterator, Iterator +from azure.core.rest import HttpResponse, AsyncHttpResponse from .. import models as _models @@ -33,12 +33,12 @@ class StreamingChatCompletions: # The line indicating the end of the SSE stream SSE_DATA_EVENT_DONE = "data: [DONE]" - def __init__(self, bytes_iterator: Union[types.AsyncGeneratorType, types.GeneratorType]): - self._bytes_iterator = bytes_iterator - self._is_async_iterator = isinstance(self._bytes_iterator, types.AsyncGeneratorType) + def __init__(self, response: Union[HttpResponse, AsyncHttpResponse]): + self.response = response + self._bytes_iterator: Union[AsyncIterator[bytes], Iterator[bytes]] = response.iter_bytes() + self._is_async_iterator = isinstance(self.response, AsyncHttpResponse) self._queue = queue.Queue() self._incomplete_json = "" - self._done = False def __aiter__(self): if not self._is_async_iterator: @@ -78,7 +78,6 @@ async def _read_next_block_async(self): element = await self._bytes_iterator.__anext__() except StopAsyncIteration: await self.aclose() - self._done = True return self._deserialize_and_add_to_queue(element, start_time) @@ -90,7 +89,6 @@ def _read_next_block(self): element = next(self._bytes_iterator) except StopIteration: self.close() - self._done = True return self._deserialize_and_add_to_queue(element, start_time) @@ -129,7 +127,6 @@ def _deserialize_and_add_to_queue(self, element: bytes, start_time: float = 0.0) raise ValueError(f"SSE event not supported (line `{line}`)") if line.startswith(self.SSE_DATA_EVENT_DONE): - self._done = True return # If you reached here, the line should contain `data: {...}\n` @@ -151,10 +148,14 @@ def __exit__(self) -> None: self.close() def close(self) -> None: - self._bytes_iterator.close() + if self.response: + self.response.close() + self.response = None async def aclose(self) -> None: - await self._bytes_iterator.aclose() + if self.response: + await self.response.close() + self.response = None __all__: List[str] = [ diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py index 2c828b86ffc1..d1ca4710ce23 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py @@ -42,7 +42,7 @@ async def sample_chat_completions_streaming_async(): future = asyncio.ensure_future( client.create_streaming( messages=[ - SystemMessage(content="You are an AI assistant that helps people find information."), + SystemMessage(content="You are a helpful assistant."), UserMessage(content="Give me 5 good reasons why I should exercise every day."), ] ) @@ -58,29 +58,12 @@ async def sample_chat_completions_streaming_async(): # Iterate on the result to get chat completion updates, as they arrive from the service async for update in result: - print(update.choices[0].delta.content, end="") + if update.choices[0].delta.content: + print(update.choices[0].delta.content, end="") # Remember to always close the asynchronous client when you are done with it await client.close() - -def print_chat_completions_delta(update: ChatCompletionsUpdate): - print( - f"content: {repr(update.choices[0].delta.content)}, " - f"role: {update.choices[0].delta.role}, " - f"finish_reason: {update.choices[0].finish_reason}, " - f"index: {update.choices[0].index}" - ) - print(f"id: {update.id}, created: {update.created}, model: {update.model}, object: {update.object}") - if update.usage is not None: - print( - f"usage: capacity_type: {update.usage.capacity_type}, " - f"prompt_tokens: {update.usage.prompt_tokens}, " - f"completion_tokens: {update.usage.completion_tokens}, " - f"usage.total_tokens: {update.usage.total_tokens}" - ) - - async def main(): await sample_chat_completions_streaming_async() diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py index 22e541eb48bc..9ed9ed275d41 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py @@ -45,7 +45,10 @@ def sample_chat_completions_streaming(): ) for update in result: - print(update.choices[0].delta.content, end="") + if update.choices[0].delta.content: + print(update.choices[0].delta.content, end="") + + result.close() # [END chat_completions_streaming] From 3c84d22cfe7640188a68e5fcc5bf77adf079a08c Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 16 Apr 2024 16:29:31 -0700 Subject: [PATCH 037/112] Fix all pyright errors --- .../azure/ai/inference/models/_patch.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index 9320d7d44ff2..1b9c45402c26 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -12,7 +12,7 @@ import json import types -from typing import List, Union, AsyncIterator, Iterator +from typing import List, Union, AsyncIterator, Iterator, cast from azure.core.rest import HttpResponse, AsyncHttpResponse from .. import models as _models @@ -75,7 +75,8 @@ async def _read_next_block_async(self): if self.ENABLE_CLASS_LOGS: start_time = time.time() try: - element = await self._bytes_iterator.__anext__() + # Use 'cast' to make 'pyright' error go away + element = await cast(AsyncIterator[bytes], self._bytes_iterator).__anext__() except StopAsyncIteration: await self.aclose() return @@ -86,7 +87,8 @@ def _read_next_block(self): if self.ENABLE_CLASS_LOGS: start_time = time.time() try: - element = next(self._bytes_iterator) + # Use 'cast' to make 'pyright' error go away + element = cast(Iterator[bytes], self._bytes_iterator).__next__() except StopIteration: self.close() return From c951405f8e2e8af159c98e0ca5a3faff3efa8fc0 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 17 Apr 2024 11:45:57 -0700 Subject: [PATCH 038/112] Fix more quality gates --- .../azure/ai/inference/_patch.py | 217 ++++++++++++++++-- .../azure/ai/inference/aio/_patch.py | 216 +++++++++++++++-- .../azure/ai/inference/models/_patch.py | 17 +- .../sample_chat_completions_async.py | 1 - ...sample_chat_completions_streaming_async.py | 9 +- .../async_samples/sample_embeddings_async.py | 1 - .../sample_image_generation_async.py | 1 - .../samples/sample_chat_completions.py | 1 - .../sample_chat_completions_streaming.py | 1 - .../samples/sample_embeddings.py | 1 - .../samples/sample_image_generation.py | 1 - 11 files changed, 414 insertions(+), 52 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index 0883ba05e0fd..dd3a968f7bef 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -10,9 +10,8 @@ import sys from io import IOBase -from typing import Any, Callable, Dict, Union, IO, List, Optional, TypeVar, overload +from typing import Any, Dict, Union, IO, List, Optional, overload from azure.core.pipeline import PipelineResponse -from azure.core.rest import HttpRequest, HttpResponse from azure.core.tracing.decorator import distributed_trace from azure.core.utils import case_insensitive_dict from azure.core.exceptions import ( @@ -35,13 +34,9 @@ from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object _Unset: Any = object() -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False - class ChatCompletionsClient(ChatCompletionsClientGenerated): @overload @@ -54,8 +49,25 @@ def create_streaming( **kwargs: Any ) -> _models.StreamingChatCompletions: # pylint: disable=line-too-long - """ - TBD + """Gets streaming chat completions for the provided chat messages. + Completions support a wide variety of tasks and generate text that continues from or + "completes" provided prompt data. When using this method, the response is streamed + back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions + object to get content updates as they arrive. + + :param body: Required. + :type body: JSON + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.StreamingChatCompletions + :raises ~azure.core.exceptions.HttpResponseError: """ @overload @@ -81,8 +93,87 @@ def create_streaming( **kwargs: Any ) -> _models.StreamingChatCompletions: # pylint: disable=line-too-long - """ - TBD + """Gets streaming chat completions for the provided chat messages. + Completions support a wide variety of tasks and generate text that continues from or + "completes" provided prompt data. When using this method, the response is streamed + back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions + object to get content updates as they arrive. + + :keyword messages: The collection of context messages associated with this chat completions + request. + Typical usage begins with a chat message for the System role that provides instructions for + the behavior of the assistant, followed by alternating messages between the User and + Assistant roles. Required. + :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] + :keyword frequency_penalty: A value that influences the probability of generated tokens + appearing based on their cumulative + frequency in generated text. + Positive values will make tokens less likely to appear as their frequency increases and + decrease the likelihood of the model repeating the same statements verbatim. Default value is + None. + :paramtype frequency_penalty: float + :keyword presence_penalty: A value that influences the probability of generated tokens + appearing based on their existing + presence in generated text. + Positive values will make tokens less likely to appear when they already exist and increase + the + model's likelihood to output new topics. Default value is None. + :paramtype presence_penalty: float + :keyword temperature: The sampling temperature to use that controls the apparent creativity of + generated completions. + Higher values will make output more random while lower values will make results more focused + and deterministic. + It is not recommended to modify temperature and top_p for the same completions request as the + interaction of these two settings is difficult to predict. Default value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature called nucleus sampling. This value + causes the + model to consider the results of tokens with the provided probability mass. As an example, a + value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be + considered. + It is not recommended to modify temperature and top_p for the same completions request as the + interaction of these two settings is difficult to predict. Default value is None. + :paramtype top_p: float + :keyword max_tokens: The maximum number of tokens to generate. Default value is None. + :paramtype max_tokens: int + :keyword response_format: An object specifying the format that the model must output. Used to + enable JSON mode. Known values are: "text" and "json_object". Default value is None. + :paramtype response_format: str or ~azure.ai.inference.models.ChatCompletionsResponseFormat + :keyword stop: A collection of textual sequences that will end completions generation. Default + value is None. + :paramtype stop: list[str] + :keyword tools: The available tool definitions that the chat completions request can use, + including caller-defined functions. Default value is None. + :paramtype tools: list[~azure.ai.inference.models.ChatCompletionsToolDefinition] + :keyword tool_choice: If specified, the model will configure which of the provided tools it can + use for the chat completions response. Is either a Union[str, + "_models.ChatCompletionsToolSelectionPreset"] type or a ChatCompletionsNamedToolSelection type. + Default value is None. + :paramtype tool_choice: str or ~azure.ai.inference.models.ChatCompletionsToolSelectionPreset or + ~azure.ai.inference.models.ChatCompletionsNamedToolSelection + :keyword seed: If specified, the system will make a best effort to sample deterministically + such that repeated requests with the + same seed and parameters should return the same result. Determinism is not guaranteed.". + Default value is None. + :paramtype seed: int + :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ChatCompletions + :raises ~azure.core.exceptions.HttpResponseError: """ @overload @@ -95,8 +186,25 @@ def create_streaming( **kwargs: Any ) -> _models.StreamingChatCompletions: # pylint: disable=line-too-long - """ - TBD + """Gets streaming chat completions for the provided chat messages. + Completions support a wide variety of tasks and generate text that continues from or + "completes" provided prompt data. When using this method, the response is streamed + back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions + object to get content updates as they arrive. + + :param body: Required. + :type body: IO[bytes] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ChatCompletions + :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace @@ -122,8 +230,86 @@ def create_streaming( **kwargs: Any ) -> _models.StreamingChatCompletions: # pylint: disable=line-too-long - """ - TBD + """Gets streaming chat completions for the provided chat messages. + Completions support a wide variety of tasks and generate text that continues from or + "completes" provided prompt data. When using this method, the response is streamed + back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions + object to get content updates as they arrive. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword messages: The collection of context messages associated with this chat completions + request. + Typical usage begins with a chat message for the System role that provides instructions for + the behavior of the assistant, followed by alternating messages between the User and + Assistant roles. Required. + :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] + :keyword frequency_penalty: A value that influences the probability of generated tokens + appearing based on their cumulative + frequency in generated text. + Positive values will make tokens less likely to appear as their frequency increases and + decrease the likelihood of the model repeating the same statements verbatim. Default value is + None. + :paramtype frequency_penalty: float + :keyword presence_penalty: A value that influences the probability of generated tokens + appearing based on their existing + presence in generated text. + Positive values will make tokens less likely to appear when they already exist and increase + the + model's likelihood to output new topics. Default value is None. + :paramtype presence_penalty: float + :keyword temperature: The sampling temperature to use that controls the apparent creativity of + generated completions. + Higher values will make output more random while lower values will make results more focused + and deterministic. + It is not recommended to modify temperature and top_p for the same completions request as the + interaction of these two settings is difficult to predict. Default value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature called nucleus sampling. This value + causes the + model to consider the results of tokens with the provided probability mass. As an example, a + value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be + considered. + It is not recommended to modify temperature and top_p for the same completions request as the + interaction of these two settings is difficult to predict. Default value is None. + :paramtype top_p: float + :keyword max_tokens: The maximum number of tokens to generate. Default value is None. + :paramtype max_tokens: int + :keyword response_format: An object specifying the format that the model must output. Used to + enable JSON mode. Known values are: "text" and "json_object". Default value is None. + :paramtype response_format: str or ~azure.ai.inference.models.ChatCompletionsResponseFormat + :keyword stop: A collection of textual sequences that will end completions generation. Default + value is None. + :paramtype stop: list[str] + :keyword tools: The available tool definitions that the chat completions request can use, + including caller-defined functions. Default value is None. + :paramtype tools: list[~azure.ai.inference.models.ChatCompletionsToolDefinition] + :keyword tool_choice: If specified, the model will configure which of the provided tools it can + use for the chat completions response. Is either a Union[str, + "_models.ChatCompletionsToolSelectionPreset"] type or a ChatCompletionsNamedToolSelection type. + Default value is None. + :paramtype tool_choice: str or ~azure.ai.inference.models.ChatCompletionsToolSelectionPreset or + ~azure.ai.inference.models.ChatCompletionsNamedToolSelection + :keyword seed: If specified, the system will make a best effort to sample deterministically + such that repeated requests with the + same seed and parameters should return the same result. Determinism is not guaranteed.". + Default value is None. + :paramtype seed: int + :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ChatCompletions + :raises ~azure.core.exceptions.HttpResponseError: """ error_map = { 401: ClientAuthenticationError, @@ -137,7 +323,6 @@ def create_streaming( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ChatCompletions] = kwargs.pop("cls", None) if body is _Unset: if messages is _Unset: @@ -183,7 +368,7 @@ def create_streaming( _request, stream=True, **kwargs ) - response: HttpResponse = pipeline_response.http_response + response = pipeline_response.http_response if response.status_code not in [200]: response.read() # Load the body in memory and close the socket diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index 0d3d75a6e58e..a91fd612535d 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -10,9 +10,8 @@ import sys from io import IOBase -from typing import Any, Callable, Dict, Union, IO, List, Optional, TypeVar, overload +from typing import Any, Dict, Union, IO, List, Optional, overload from azure.core.pipeline import PipelineResponse -from azure.core.rest import AsyncHttpResponse, HttpRequest from azure.core.tracing.decorator_async import distributed_trace_async from azure.core.utils import case_insensitive_dict from azure.core.exceptions import ( @@ -34,9 +33,6 @@ from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object _Unset: Any = object() -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - class ChatCompletionsClient(ChatCompletionsClientGenerated): @@ -50,8 +46,25 @@ async def create_streaming( **kwargs: Any ) -> _models.StreamingChatCompletions: # pylint: disable=line-too-long - """ - TBD + """Gets streaming chat completions for the provided chat messages. + Completions support a wide variety of tasks and generate text that continues from or + "completes" provided prompt data. When using this method, the response is streamed + back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions + object to get content updates as they arrive. + + :param body: Required. + :type body: JSON + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.StreamingChatCompletions + :raises ~azure.core.exceptions.HttpResponseError: """ @overload @@ -77,8 +90,87 @@ async def create_streaming( **kwargs: Any ) -> _models.StreamingChatCompletions: # pylint: disable=line-too-long - """ - TBD + """Gets streaming chat completions for the provided chat messages. + Completions support a wide variety of tasks and generate text that continues from or + "completes" provided prompt data. When using this method, the response is streamed + back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions + object to get content updates as they arrive. + + :keyword messages: The collection of context messages associated with this chat completions + request. + Typical usage begins with a chat message for the System role that provides instructions for + the behavior of the assistant, followed by alternating messages between the User and + Assistant roles. Required. + :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] + :keyword frequency_penalty: A value that influences the probability of generated tokens + appearing based on their cumulative + frequency in generated text. + Positive values will make tokens less likely to appear as their frequency increases and + decrease the likelihood of the model repeating the same statements verbatim. Default value is + None. + :paramtype frequency_penalty: float + :keyword presence_penalty: A value that influences the probability of generated tokens + appearing based on their existing + presence in generated text. + Positive values will make tokens less likely to appear when they already exist and increase + the + model's likelihood to output new topics. Default value is None. + :paramtype presence_penalty: float + :keyword temperature: The sampling temperature to use that controls the apparent creativity of + generated completions. + Higher values will make output more random while lower values will make results more focused + and deterministic. + It is not recommended to modify temperature and top_p for the same completions request as the + interaction of these two settings is difficult to predict. Default value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature called nucleus sampling. This value + causes the + model to consider the results of tokens with the provided probability mass. As an example, a + value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be + considered. + It is not recommended to modify temperature and top_p for the same completions request as the + interaction of these two settings is difficult to predict. Default value is None. + :paramtype top_p: float + :keyword max_tokens: The maximum number of tokens to generate. Default value is None. + :paramtype max_tokens: int + :keyword response_format: An object specifying the format that the model must output. Used to + enable JSON mode. Known values are: "text" and "json_object". Default value is None. + :paramtype response_format: str or ~azure.ai.inference.models.ChatCompletionsResponseFormat + :keyword stop: A collection of textual sequences that will end completions generation. Default + value is None. + :paramtype stop: list[str] + :keyword tools: The available tool definitions that the chat completions request can use, + including caller-defined functions. Default value is None. + :paramtype tools: list[~azure.ai.inference.models.ChatCompletionsToolDefinition] + :keyword tool_choice: If specified, the model will configure which of the provided tools it can + use for the chat completions response. Is either a Union[str, + "_models.ChatCompletionsToolSelectionPreset"] type or a ChatCompletionsNamedToolSelection type. + Default value is None. + :paramtype tool_choice: str or ~azure.ai.inference.models.ChatCompletionsToolSelectionPreset or + ~azure.ai.inference.models.ChatCompletionsNamedToolSelection + :keyword seed: If specified, the system will make a best effort to sample deterministically + such that repeated requests with the + same seed and parameters should return the same result. Determinism is not guaranteed.". + Default value is None. + :paramtype seed: int + :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ChatCompletions + :raises ~azure.core.exceptions.HttpResponseError: """ @overload @@ -91,10 +183,27 @@ async def create_streaming( **kwargs: Any ) -> _models.StreamingChatCompletions: # pylint: disable=line-too-long + """Gets streaming chat completions for the provided chat messages. + Completions support a wide variety of tasks and generate text that continues from or + "completes" provided prompt data. When using this method, the response is streamed + back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions + object to get content updates as they arrive. + + :param body: Required. + :type body: IO[bytes] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ChatCompletions + :raises ~azure.core.exceptions.HttpResponseError: """ - TBD - """ - + @distributed_trace_async async def create_streaming( self, @@ -118,8 +227,86 @@ async def create_streaming( **kwargs: Any ) -> _models.StreamingChatCompletions: # pylint: disable=line-too-long - """ - TBD + """Gets streaming chat completions for the provided chat messages. + Completions support a wide variety of tasks and generate text that continues from or + "completes" provided prompt data. When using this method, the response is streamed + back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions + object to get content updates as they arrive. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword messages: The collection of context messages associated with this chat completions + request. + Typical usage begins with a chat message for the System role that provides instructions for + the behavior of the assistant, followed by alternating messages between the User and + Assistant roles. Required. + :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] + :keyword frequency_penalty: A value that influences the probability of generated tokens + appearing based on their cumulative + frequency in generated text. + Positive values will make tokens less likely to appear as their frequency increases and + decrease the likelihood of the model repeating the same statements verbatim. Default value is + None. + :paramtype frequency_penalty: float + :keyword presence_penalty: A value that influences the probability of generated tokens + appearing based on their existing + presence in generated text. + Positive values will make tokens less likely to appear when they already exist and increase + the + model's likelihood to output new topics. Default value is None. + :paramtype presence_penalty: float + :keyword temperature: The sampling temperature to use that controls the apparent creativity of + generated completions. + Higher values will make output more random while lower values will make results more focused + and deterministic. + It is not recommended to modify temperature and top_p for the same completions request as the + interaction of these two settings is difficult to predict. Default value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature called nucleus sampling. This value + causes the + model to consider the results of tokens with the provided probability mass. As an example, a + value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be + considered. + It is not recommended to modify temperature and top_p for the same completions request as the + interaction of these two settings is difficult to predict. Default value is None. + :paramtype top_p: float + :keyword max_tokens: The maximum number of tokens to generate. Default value is None. + :paramtype max_tokens: int + :keyword response_format: An object specifying the format that the model must output. Used to + enable JSON mode. Known values are: "text" and "json_object". Default value is None. + :paramtype response_format: str or ~azure.ai.inference.models.ChatCompletionsResponseFormat + :keyword stop: A collection of textual sequences that will end completions generation. Default + value is None. + :paramtype stop: list[str] + :keyword tools: The available tool definitions that the chat completions request can use, + including caller-defined functions. Default value is None. + :paramtype tools: list[~azure.ai.inference.models.ChatCompletionsToolDefinition] + :keyword tool_choice: If specified, the model will configure which of the provided tools it can + use for the chat completions response. Is either a Union[str, + "_models.ChatCompletionsToolSelectionPreset"] type or a ChatCompletionsNamedToolSelection type. + Default value is None. + :paramtype tool_choice: str or ~azure.ai.inference.models.ChatCompletionsToolSelectionPreset or + ~azure.ai.inference.models.ChatCompletionsNamedToolSelection + :keyword seed: If specified, the system will make a best effort to sample deterministically + such that repeated requests with the + same seed and parameters should return the same result. Determinism is not guaranteed.". + Default value is None. + :paramtype seed: int + :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ChatCompletions + :raises ~azure.core.exceptions.HttpResponseError: """ error_map = { 401: ClientAuthenticationError, @@ -133,7 +320,6 @@ async def create_streaming( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ChatCompletions] = kwargs.pop("cls", None) if body is _Unset: if messages is _Unset: diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index 1b9c45402c26..f00d968be117 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -10,7 +10,6 @@ import time import re import json -import types from typing import List, Union, AsyncIterator, Iterator, cast from azure.core.rest import HttpResponse, AsyncHttpResponse @@ -37,7 +36,7 @@ def __init__(self, response: Union[HttpResponse, AsyncHttpResponse]): self.response = response self._bytes_iterator: Union[AsyncIterator[bytes], Iterator[bytes]] = response.iter_bytes() self._is_async_iterator = isinstance(self.response, AsyncHttpResponse) - self._queue = queue.Queue() + self._queue: "queue.Queue[_models.ChatCompletionsUpdate]" = queue.Queue() self._incomplete_json = "" def __aiter__(self): @@ -75,8 +74,8 @@ async def _read_next_block_async(self): if self.ENABLE_CLASS_LOGS: start_time = time.time() try: - # Use 'cast' to make 'pyright' error go away - element = await cast(AsyncIterator[bytes], self._bytes_iterator).__anext__() + # Use 'cast' to make 'pyright' error go away + element = await cast(AsyncIterator[bytes], self._bytes_iterator).__anext__() except StopAsyncIteration: await self.aclose() return @@ -135,6 +134,7 @@ def _deserialize_and_add_to_queue(self, element: bytes, start_time: float = 0.0) # where the curly braces contain a valid JSON object. Deserialize it into a ChatCompletionsUpdate object # and add it to the queue. self._queue.put( + # pylint: disable=W0212 # Access to a protected member _deserialize of a client class _models.ChatCompletionsUpdate._deserialize( json.loads(line[len(self.SSE_DATA_EVENT_PREFIX) : -1]), [] ) @@ -146,18 +146,17 @@ def _deserialize_and_add_to_queue(self, element: bytes, start_time: float = 0.0) def __enter__(self): return self - def __exit__(self) -> None: + def __exit__(self, exc_type, exc_val, exc_tb) -> None: self.close() def close(self) -> None: - if self.response: + if isinstance(self.response, HttpResponse): self.response.close() - self.response = None async def aclose(self) -> None: - if self.response: + # `if`` statement added to avoid mypy error: Incompatible types in "await" (actual type "Optional[Coroutine[Any, Any, None]]", expected type "Awaitable[Any]") + if isinstance(self.response, AsyncHttpResponse): await self.response.close() - self.response = None __all__: List[str] = [ diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py index c4b7d1ccbdcf..a33efa86a04e 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py @@ -19,7 +19,6 @@ """ import asyncio - async def sample_chat_completions_async(): import os from azure.ai.inference.aio import ChatCompletionsClient diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py index d1ca4710ce23..2d93dbcc0785 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py @@ -18,13 +18,12 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ import asyncio -import os -from azure.ai.inference.aio import ChatCompletionsClient -from azure.ai.inference.models import SystemMessage, UserMessage, ChatCompletionsUpdate -from azure.core.credentials import AzureKeyCredential - async def sample_chat_completions_streaming_async(): + import os + from azure.ai.inference.aio import ChatCompletionsClient + from azure.ai.inference.models import SystemMessage, UserMessage, ChatCompletionsUpdate + from azure.core.credentials import AzureKeyCredential # Read the values of your model endpoint and key from environment variables try: diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py index 782c7c23cd2e..634baec7f900 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py @@ -18,7 +18,6 @@ """ import asyncio - async def sample_embeddings_async(): import os from azure.ai.inference.aio import EmbeddingsClient diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py index bdc1adc72235..b23c719ec9c0 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py @@ -18,7 +18,6 @@ """ import asyncio - async def sample_image_generation_async(): import os import base64 diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index f239fdafeaed..998911df0943 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -18,7 +18,6 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ - def sample_chat_completions(): import os diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py index 9ed9ed275d41..1a0d15668399 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py @@ -18,7 +18,6 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ - def sample_chat_completions_streaming(): import os diff --git a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py index 66c112bb7692..1d694188502f 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py +++ b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py @@ -18,7 +18,6 @@ 2) EMBEDDINGS_KEY - Your model key (a 32-character string). Keep it secret. """ - def sample_embeddings(): import os diff --git a/sdk/ai/azure-ai-inference/samples/sample_image_generation.py b/sdk/ai/azure-ai-inference/samples/sample_image_generation.py index f86e3ee7031a..3b4fded4451f 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_image_generation.py +++ b/sdk/ai/azure-ai-inference/samples/sample_image_generation.py @@ -18,7 +18,6 @@ 2) IMAGE_GENERATION_KEY - Your model key (a 32-character string). Keep it secret. """ - def sample_image_generation(): import os import base64 From e18d79c450b837fa3b05a6d819a38b6cca9b2d3c Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 17 Apr 2024 16:20:16 -0700 Subject: [PATCH 039/112] Add streaming tests --- sdk/ai/azure-ai-inference/assets.json | 2 +- .../tests/model_inference_test_base.py | 87 ++++++++++++++++--- .../test_model_inference_async_client.py | 19 +++- .../tests/test_model_inference_client.py | 13 +++ 4 files changed, 107 insertions(+), 14 deletions(-) diff --git a/sdk/ai/azure-ai-inference/assets.json b/sdk/ai/azure-ai-inference/assets.json index afa110bd7b7d..30cf7e8830ab 100644 --- a/sdk/ai/azure-ai-inference/assets.json +++ b/sdk/ai/azure-ai-inference/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "python", "TagPrefix": "python/ai/azure-ai-inference", - "Tag": "python/ai/azure-ai-inference_1a8375ba35" + "Tag": "python/ai/azure-ai-inference_c7e4a15b67" } diff --git a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py index 7978d3081abd..7044efa23821 100644 --- a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py +++ b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py @@ -53,23 +53,35 @@ class ModelClientTestBase(AzureRecordedTestCase): # Regular expression describing the pattern of a result ID (e.g. "183b56eb-8512-484d-be50-5d8df82301a2") REGEX_RESULT_ID = re.compile(r'^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$') - def _create_chat_client(self, *, sync: bool = True, bad_key: bool = False, **kwargs): + # Methods to load credentials from environment variables + def _load_chat_credentials(self, *, bad_key: bool, **kwargs): endpoint = kwargs.pop("chat_completions_endpoint") key = "00000000000000000000000000000000" if bad_key else kwargs.pop("chat_completions_key") credential = AzureKeyCredential(key) - if sync: - return sdk.ChatCompletionsClient(endpoint=endpoint, credential=credential, logging_enable=LOGGING_ENABLED) - else: - return async_sdk.ChatCompletionsClient(endpoint=endpoint, credential=credential, logging_enable=LOGGING_ENABLED) + return endpoint, credential - def _create_embeddings_client(self, *, sync: bool = True, bad_key: bool = False, **kwargs) -> Union[sdk.EmbeddingsClient, async_sdk.EmbeddingsClient]: + def _load_embeddings_credentials(self, *, bad_key: bool, **kwargs): endpoint = kwargs.pop("embeddings_endpoint") key = "00000000000000000000000000000000" if bad_key else kwargs.pop("embeddings_key") credential = AzureKeyCredential(key) - if sync: - return sdk.EmbeddingsClient(endpoint=endpoint, credential=credential, logging_enable=LOGGING_ENABLED) - else: - return async_sdk.EmbeddingsClient(endpoint=endpoint, credential=credential, logging_enable=LOGGING_ENABLED) + return endpoint, credential + + # Methos to create the different sync and async clients + def _create_async_chat_client(self, *, bad_key: bool = False, **kwargs) -> async_sdk.ChatCompletionsClient: + endpoint, credential = self._load_chat_credentials(bad_key=bad_key, **kwargs) + return async_sdk.ChatCompletionsClient(endpoint=endpoint, credential=credential, logging_enable=LOGGING_ENABLED) + + def _create_chat_client(self, *, bad_key: bool = False, **kwargs) -> sdk.ChatCompletionsClient: + endpoint, credential = self._load_chat_credentials(bad_key=bad_key, **kwargs) + return sdk.ChatCompletionsClient(endpoint=endpoint, credential=credential, logging_enable=LOGGING_ENABLED) + + def _create_async_embeddings_client(self, *, bad_key: bool = False, **kwargs) -> async_sdk.EmbeddingsClient: + endpoint, credential = self._load_embeddings_credentials(bad_key=bad_key, **kwargs) + return async_sdk.EmbeddingsClient(endpoint=endpoint, credential=credential, logging_enable=LOGGING_ENABLED) + + def _create_embeddings_client(self, *, sync: bool = True, bad_key: bool = False, **kwargs) -> sdk.EmbeddingsClient: + endpoint, credential = self._load_embeddings_credentials(bad_key=bad_key, **kwargs) + return sdk.EmbeddingsClient(endpoint=endpoint, credential=credential, logging_enable=LOGGING_ENABLED) def _create_embeddings_client_with_chat_completions_credentials(self, **kwargs) -> sdk.EmbeddingsClient: endpoint = kwargs.pop("chat_completions_endpoint") @@ -95,6 +107,61 @@ def _validate_chat_completions_result(result: sdk.models.ChatCompletions, contai assert result.usage.completion_tokens > 0 assert result.usage.total_tokens == result.usage.prompt_tokens + result.usage.completion_tokens + @staticmethod + def _validate_chat_completions_update(update: sdk.models.ChatCompletionsUpdate, first: bool) -> str: + if first: + # Why is 'content','created' and 'object' missing in the first update? + assert update.choices[0].delta.role == sdk.models.ChatRole.ASSISTANT + else: + assert update.choices[0].delta.role == None + assert update.choices[0].delta.content != None + assert update.created is not None + assert update.created != "" + assert update.object == "chat.completion.chunk" + assert update.choices[0].delta.tool_calls == None + assert update.choices[0].index == 0 + assert update.id is not None + assert len(update.id) == 36 + assert update.model is not None + assert update.model != "" + if update.choices[0].delta.content != None: + return update.choices[0].delta.content + else: + return "" + + @staticmethod + def _validate_chat_completions_streaming_result(result: sdk.models.StreamingChatCompletions): + count = 0 + content ="" + for update in result: + content += ModelClientTestBase._validate_chat_completions_update(update, count == 0) + count += 1 + assert count > 2 + assert len(content) > 100 # Some arbitrary number + # The last update should have a finish reason and usage + assert update.choices[0].finish_reason == sdk.models.CompletionsFinishReason.STOPPED + assert update.usage.prompt_tokens > 0 + assert update.usage.completion_tokens > 0 + assert update.usage.total_tokens == update.usage.prompt_tokens + update.usage.completion_tokens + if ModelClientTestBase.PRINT_RESULT: + print(content) + + @staticmethod + async def _validate_async_chat_completions_streaming_result(result: sdk.models.StreamingChatCompletions): + count = 0 + content = "" + async for update in result: + content += ModelClientTestBase._validate_chat_completions_update(update, count == 0) + count += 1 + assert count > 2 + assert len(content) > 100 # Some arbitrary number + # The last update should have a finish reason and usage + assert update.choices[0].finish_reason == sdk.models.CompletionsFinishReason.STOPPED + assert update.usage.prompt_tokens > 0 + assert update.usage.completion_tokens > 0 + assert update.usage.total_tokens == update.usage.prompt_tokens + update.usage.completion_tokens + if ModelClientTestBase.PRINT_RESULT: + print(content) @staticmethod def _print_chat_completions_result(result: sdk.models.ChatCompletions): diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py index ad405e2ed7dc..bd21eae6a6d0 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -26,7 +26,7 @@ async def test_async_chat_completions_error_free(self, **kwargs): sdk.models.UserMessage(content="How many feet are in a mile?") ] - client = self._create_chat_client(sync=False, **kwargs) + client = self._create_async_chat_client(**kwargs) result = await client.create(messages=messages) self._print_chat_completions_result(result) self._validate_chat_completions_result(result, ["5280", "5,280"]) @@ -38,10 +38,23 @@ async def test_async_chat_completions_error_free(self, **kwargs): self._validate_chat_completions_result(result, ["1760", "1,760"]) await client.close() + @ServicePreparerChatCompletions() + @recorded_by_proxy_async + async def test_async_chat_completions_streaming_error_free(self, **kwargs): + client = self._create_async_chat_client(Sync=False, **kwargs) + result = await client.create_streaming( + messages=[ + sdk.models.SystemMessage(content="You are a helpful assistant."), + sdk.models.UserMessage(content="Give me 5 good reasons why I should exercise every day."), + ] + ) + await self._validate_async_chat_completions_streaming_result(result) + await client.close() + @ServicePreparerEmbeddings() @recorded_by_proxy_async async def test_async_embeddings_error_free(self, **kwargs): - client = self._create_embeddings_client(sync=False, **kwargs) + client = self._create_async_embeddings_client(**kwargs) result = await client.create(input=["first phrase", "second phrase", "third phrase"]) self._print_embeddings_result(result) self._validate_embeddings_result(result) @@ -56,7 +69,7 @@ async def test_async_embeddings_error_free(self, **kwargs): @ServicePreparerEmbeddings() @recorded_by_proxy_async async def test_embeddings_with_auth_failure(self, **kwargs): - client = self._create_embeddings_client(sync=False, bad_key=True, **kwargs) + client = self._create_async_embeddings_client(bad_key=True, **kwargs) exception_caught = False try: result = await client.create(input=["first phrase", "second phrase", "third phrase"]) diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py index f90565e7b854..7441d3cbb8d6 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -28,6 +28,19 @@ def test_chat_completions_error_free(self, **kwargs): self._validate_chat_completions_result(result, ["5280", "5,280"]) client.close() + @ServicePreparerChatCompletions() + @recorded_by_proxy + def test_chat_completions_streaming_error_free(self, **kwargs): + client = self._create_chat_client(**kwargs) + result = client.create_streaming( + messages=[ + sdk.models.SystemMessage(content="You are a helpful assistant."), + sdk.models.UserMessage(content="Give me 5 good reasons why I should exercise every day."), + ] + ) + self._validate_chat_completions_streaming_result(result) + client.close() + @ServicePreparerEmbeddings() @recorded_by_proxy def test_embeddings_error_free(self, **kwargs): From 51f30d3168b7a5ae85cb42e6fbecbbf40852228f Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 17 Apr 2024 17:26:45 -0700 Subject: [PATCH 040/112] Fix streaming to work with small HTTP buffers (tested down to 64 bytes) --- .../azure/ai/inference/models/_patch.py | 89 +++++++++---------- ...sample_chat_completions_streaming_async.py | 9 +- 2 files changed, 49 insertions(+), 49 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index f00d968be117..59d312e25102 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -33,71 +33,60 @@ class StreamingChatCompletions: SSE_DATA_EVENT_DONE = "data: [DONE]" def __init__(self, response: Union[HttpResponse, AsyncHttpResponse]): - self.response = response + self._response = response self._bytes_iterator: Union[AsyncIterator[bytes], Iterator[bytes]] = response.iter_bytes() - self._is_async_iterator = isinstance(self.response, AsyncHttpResponse) + self._is_async_iterator = isinstance(self._response, AsyncHttpResponse) self._queue: "queue.Queue[_models.ChatCompletionsUpdate]" = queue.Queue() self._incomplete_json = "" - - def __aiter__(self): - if not self._is_async_iterator: - raise ValueError("This method is only supported for async iterators") - return self + self._done = False # Will be set to True when reading 'data: [DONE]' line def __iter__(self): if self._is_async_iterator: raise ValueError("This method is not supported for async iterators") return self - async def __anext__(self) -> _models.ChatCompletionsUpdate: - if not self._is_async_iterator: - raise ValueError("This method is only supported for async iterators") - if self._queue.empty(): - await self._read_next_block_async() - if self._queue.empty(): - await self.aclose() - raise StopAsyncIteration - return self._queue.get() - def __next__(self) -> _models.ChatCompletionsUpdate: if self._is_async_iterator: raise ValueError("This method is not supported for async iterators") + while self._queue.empty() and not self._done: + self._done = self._read_next_block() if self._queue.empty(): - self._read_next_block() - if self._queue.empty(): - self.close() raise StopIteration return self._queue.get() - async def _read_next_block_async(self): - start_time = 0.0 - if self.ENABLE_CLASS_LOGS: - start_time = time.time() - try: - # Use 'cast' to make 'pyright' error go away - element = await cast(AsyncIterator[bytes], self._bytes_iterator).__anext__() - except StopAsyncIteration: - await self.aclose() - return - self._deserialize_and_add_to_queue(element, start_time) - - def _read_next_block(self): - start_time = 0.0 - if self.ENABLE_CLASS_LOGS: - start_time = time.time() + def _read_next_block(self) -> bool: try: # Use 'cast' to make 'pyright' error go away element = cast(Iterator[bytes], self._bytes_iterator).__next__() except StopIteration: self.close() - return - self._deserialize_and_add_to_queue(element, start_time) + return True + return self._deserialize_and_add_to_queue(element) + + def __aiter__(self): + if not self._is_async_iterator: + raise ValueError("This method is only supported for async iterators") + return self + + async def __anext__(self) -> _models.ChatCompletionsUpdate: + if not self._is_async_iterator: + raise ValueError("This method is only supported for async iterators") + while self._queue.empty() and not self._done: + self._done = await self._read_next_block_async() + if self._queue.empty(): + raise StopAsyncIteration + return self._queue.get() - def _deserialize_and_add_to_queue(self, element: bytes, start_time: float = 0.0): + async def _read_next_block_async(self) -> bool: + try: + # Use 'cast' to make 'pyright' error go away + element = await cast(AsyncIterator[bytes], self._bytes_iterator).__anext__() + except StopAsyncIteration: + await self.aclose() + return True + return self._deserialize_and_add_to_queue(element) - if self.ENABLE_CLASS_LOGS: - print(f"Elapsed time: {int(1000*(time.time()- start_time))}ms") - print(f"Size: {len(element)} bytes") + def _deserialize_and_add_to_queue(self, element: bytes) -> bool: # Clear the queue of ChatCompletionsUpdate before processing the next block self._queue.queue.clear() @@ -116,7 +105,7 @@ def _deserialize_and_add_to_queue(self, element: bytes, start_time: float = 0.0) if index == len(line_list) - 1 and not line.endswith("\n"): self._incomplete_json = line - return + return False if self.ENABLE_CLASS_LOGS: print(f"[modified] {repr(line)}") @@ -128,7 +117,9 @@ def _deserialize_and_add_to_queue(self, element: bytes, start_time: float = 0.0) raise ValueError(f"SSE event not supported (line `{line}`)") if line.startswith(self.SSE_DATA_EVENT_DONE): - return + if self.ENABLE_CLASS_LOGS: + print("done]") + return True # If you reached here, the line should contain `data: {...}\n` # where the curly braces contain a valid JSON object. Deserialize it into a ChatCompletionsUpdate object @@ -143,6 +134,8 @@ def _deserialize_and_add_to_queue(self, element: bytes, start_time: float = 0.0) if self.ENABLE_CLASS_LOGS: print("[added]") + return False + def __enter__(self): return self @@ -150,13 +143,13 @@ def __exit__(self, exc_type, exc_val, exc_tb) -> None: self.close() def close(self) -> None: - if isinstance(self.response, HttpResponse): - self.response.close() + if isinstance(self._response, HttpResponse): + self._response.close() async def aclose(self) -> None: # `if`` statement added to avoid mypy error: Incompatible types in "await" (actual type "Optional[Coroutine[Any, Any, None]]", expected type "Awaitable[Any]") - if isinstance(self.response, AsyncHttpResponse): - await self.response.close() + if isinstance(self._response, AsyncHttpResponse): + await self._response.close() __all__: List[str] = [ diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py index 2d93dbcc0785..8921411d3c74 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py @@ -24,6 +24,7 @@ async def sample_chat_completions_streaming_async(): from azure.ai.inference.aio import ChatCompletionsClient from azure.ai.inference.models import SystemMessage, UserMessage, ChatCompletionsUpdate from azure.core.credentials import AzureKeyCredential + from azure.core.pipeline.transport import AsyncioRequestsTransport # Read the values of your model endpoint and key from environment variables try: @@ -34,8 +35,14 @@ async def sample_chat_completions_streaming_async(): print("Set them before running this sample.") exit() + # TODO: Remove this. + # Example of how the app can change the HTTP buffer size. The default is 4096 bytes. Reducing it here to 64 bytes + # does not improve the latency of the streamed results. Is there caching happening on the service? or is the AI model + # itself producing output tokens at high-latency? + transport = AsyncioRequestsTransport(connection_data_block_size=64) + # Create chat completions client for synchronous operations - client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key), transport=transport) # Do a single streaming chat completion operation. Start the operation and get a Future object. future = asyncio.ensure_future( From 3090baef5b6d53b019bf549f1f48f6dbf180878b Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 18 Apr 2024 15:55:29 -0700 Subject: [PATCH 041/112] Update ci.yml --- sdk/ai/ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sdk/ai/ci.yml b/sdk/ai/ci.yml index d0ceb7e8d11f..0db94435f39b 100644 --- a/sdk/ai/ci.yml +++ b/sdk/ai/ci.yml @@ -49,3 +49,5 @@ extends: safeName: azureaigenerative - name: azure-ai-resources safeName: azureairesources + - name: azure-ai-inference + safeName: azureaiinference From 84ad0b5b6ed5564e3c3baae9f6de32e00aa1e76b Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 18 Apr 2024 18:44:18 -0700 Subject: [PATCH 042/112] Add samples for chat history, JSON input, IO[bytes] input --- sdk/ai/azure-ai-inference/README.md | 2 +- sdk/ai/azure-ai-inference/samples/README.md | 3 + .../samples/example_chat.json | 21 ++++++ ...ample_chat_completions_from_input_bytes.py | 56 ++++++++++++++++ ...sample_chat_completions_from_input_json.py | 67 +++++++++++++++++++ .../sample_chat_completions_with_history.py | 54 +++++++++++++++ 6 files changed, 202 insertions(+), 1 deletion(-) create mode 100644 sdk/ai/azure-ai-inference/samples/example_chat.json create mode 100644 sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py create mode 100644 sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py create mode 100644 sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index c1759ff09adc..257c0283774c 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -263,7 +263,7 @@ import logging logger = logging.getLogger("azure") # Set the desired logging level. logging.INFO or logging.DEBUG are good options. -logger.setLevel(logging.INFO) +logger.setLevel(logging.DEBUG) # Direct logging output to stdout (the default): handler = logging.StreamHandler(stream=sys.stdout) diff --git a/sdk/ai/azure-ai-inference/samples/README.md b/sdk/ai/azure-ai-inference/samples/README.md index 448121e6f7d8..47822341e33e 100644 --- a/sdk/ai/azure-ai-inference/samples/README.md +++ b/sdk/ai/azure-ai-inference/samples/README.md @@ -20,6 +20,9 @@ The concepts are similar, you can easily modify any of the samples to your needs |----------------|-------------| |[sample_chat_completions_streaming.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py) | One chat completion operation using a synchronous client and streaming response. | |[sample_chat_completions.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py) | One chat completion operation using a synchronous client. | +|[sample_chat_completions_with_history.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py) | Two chat completion operations using a synchronous client, which the second completion using chat history from the first. | +|[sample_chat_completions_from_input_bytes.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py) | One chat completion operation using a synchronous client, with input messages provided as `IO[bytes]`. | +|[sample_chat_completions_from_input_json.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py) | One chat completion operation using a synchronous client, with input messages provided as `MutableMapping[str, Any]` | |[sample_embeddings.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_embeddings.py) | One embeddings operation using a synchronous client. | |[sample_image_generation.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_image_generation.py) | Generate an image from a prompt using a synchronous client. | diff --git a/sdk/ai/azure-ai-inference/samples/example_chat.json b/sdk/ai/azure-ai-inference/samples/example_chat.json new file mode 100644 index 000000000000..49cf3e2cb647 --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/example_chat.json @@ -0,0 +1,21 @@ +{ + "messages": + [ + { + "role": "system", + "content": "You are an AI assistant that helps people find information. Your replies are short, no more than two sentences." + }, + { + "role": "user", + "content": "What year was construction of the international space station mostly done?" + }, + { + "role": "assistant", + "content": "The main construction of the International Space Station (ISS) was completed between 1998 and 2011. During this period, more than 30 flights by US space shuttles and 40 by Russian rockets were conducted to transport components and modules to the station." + }, + { + "role": "user", + "content": "And what was the estimated cost to build it?" + } + ] +} \ No newline at end of file diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py new file mode 100644 index 000000000000..c463e92ab069 --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py @@ -0,0 +1,56 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to get a chat completions response from + the service using a synchronous client, and directly providing the + IO[bytes] request body (containing input chat messages). + +USAGE: + python sample_chat_completions_from_input_bytes.py + + Set these two environment variables before running the sample: + 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form + https://..inference.ai.azure.com + where `your-deployment-name` is your unique AI Model deployment name, and + `your-azure-region` is the Azure region where your model is deployed. + 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. +""" +import io + +def sample_chat_completions_from_input_bytes(): + import os + + try: + endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] + key = os.environ["CHAT_COMPLETIONS_KEY"] + except KeyError: + print("Missing environment variable 'CHAT_COMPLETIONS_ENDPOINT' or 'CHAT_COMPLETIONS_KEY'") + print("Set them before running this sample.") + exit() + + from azure.ai.inference import ChatCompletionsClient + from azure.ai.inference.models import SystemMessage, UserMessage + from azure.core.credentials import AzureKeyCredential + + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + + # Make a chat completion call, by directly providing the + # HTTP request body as IO[bytes], containing chat messages. + result = client.create(read_text_file("example_chat.json")) + + print(result.choices[0].message.content) + +def read_text_file(file_path: str) -> io.BytesIO: + """Reads a text file and returns a BytesIO object with the file content in UTF-8 encoding.""" + try: + with open(file_path, 'r') as file: + return io.BytesIO(file.read().encode('utf-8')) + except FileNotFoundError: + print(f"File '{file_path}' not found.") + return None + +if __name__ == "__main__": + sample_chat_completions_from_input_bytes() diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py new file mode 100644 index 000000000000..9262250a0f6d --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py @@ -0,0 +1,67 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to get a chat completions response from + the service using a synchronous client, and directly providing the + JSON request body (containing input chat messages). + +USAGE: + python sample_chat_completions_from_input_json.py + + Set these two environment variables before running the sample: + 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form + https://..inference.ai.azure.com + where `your-deployment-name` is your unique AI Model deployment name, and + `your-azure-region` is the Azure region where your model is deployed. + 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. +""" + +def sample_chat_completions_from_input_json(): + import os + from typing import MutableMapping, Any + from azure.ai.inference import ChatCompletionsClient + from azure.core.credentials import AzureKeyCredential + + try: + endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] + key = os.environ["CHAT_COMPLETIONS_KEY"] + except KeyError: + print("Missing environment variable 'CHAT_COMPLETIONS_ENDPOINT' or 'CHAT_COMPLETIONS_KEY'") + print("Set them before running this sample.") + exit() + + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + + # Define the input chat messages as a MutableMapping + json_messages: MutableMapping[str, Any] = { + "messages": [ + { + "role": "system", + "content": "You are an AI assistant that helps people find information. Your replies are short, no more than two sentences." + }, + { + "role": "user", + "content": "What year was construction of the International Space Station mostly done?" + }, + { + "role": "assistant", + "content": "The main construction of the International Space Station (ISS) was completed between 1998 and 2011. During this period, more than 30 flights by US space shuttles and 40 by Russian rockets were conducted to transport components and modules to the station." + }, + { + "role": "user", + "content": "And what was the estimated cost to build it?" + } + ] + } + + # Make a chat completion call, by directly providing the + # HTTP request body as IO[bytes], containing chat messages. + result = client.create(json_messages) + + print(result.choices[0].message.content) + +if __name__ == "__main__": + sample_chat_completions_from_input_json() diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py new file mode 100644 index 000000000000..2988b8df50d8 --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py @@ -0,0 +1,54 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to get a chat completions response from + the service using a synchronous client. Two completion calls are made, + the second one containing the chat history from the first one. + +USAGE: + python sample_chat_completions_with_history.py + + Set these two environment variables before running the sample: + 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form + https://..inference.ai.azure.com + where `your-deployment-name` is your unique AI Model deployment name, and + `your-azure-region` is the Azure region where your model is deployed. + 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. +""" + +def sample_chat_completions_with_history(): + import os + + try: + endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] + key = os.environ["CHAT_COMPLETIONS_KEY"] + except KeyError: + print("Missing environment variable 'CHAT_COMPLETIONS_ENDPOINT' or 'CHAT_COMPLETIONS_KEY'") + print("Set them before running this sample.") + exit() + + from azure.ai.inference import ChatCompletionsClient + from azure.ai.inference.models import SystemMessage, UserMessage, AssistantMessage + from azure.core.credentials import AzureKeyCredential + + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + + messages=[ + SystemMessage(content="You are an AI assistant that helps people find information. Your replies are short, no more than two sentences."), + UserMessage(content="What year was construction of the international space station mostly done?"), + ] + + result = client.create(messages=messages) + print(result.choices[0].message.content) + + messages.append(AssistantMessage(content=result.choices[0].message.content)) + messages.append(UserMessage(content="And what was the estimated cost to build it?")) + + result = client.create(messages=messages) + print(result.choices[0].message.content) + +if __name__ == "__main__": + sample_chat_completions_with_history() From 6aaa1c4da92cb706560db61c59d3f7f1d22eead0 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 7 May 2024 18:15:40 -0700 Subject: [PATCH 043/112] Draft sample for chat completion with tools --- sdk/ai/azure-ai-inference/README.md | 2 +- .../sample_chat_completions_with_tools.py | 148 ++++++++++++++++++ 2 files changed, 149 insertions(+), 1 deletion(-) create mode 100644 sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 257c0283774c..cfa70cf2d438 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -259,7 +259,7 @@ import sys import logging # Acquire the logger for this client library. Use 'azure' to affect both -# 'azure.core` and `azure.ai.vision.imageanalysis' libraries. +# 'azure.core` and `azure.ai.inference' libraries. logger = logging.getLogger("azure") # Set the desired logging level. logging.INFO or logging.DEBUG are good options. diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py new file mode 100644 index 000000000000..49b9190f056f --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py @@ -0,0 +1,148 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to do chat completions using a synchronous client, + with the assistance of tools. In this sample, we use a mock function tool to retrieve + flight information in order to answer a query about the next flight between two + cities. + +USAGE: + python sample_chat_completions_with_tools.py + + Set these two environment variables before running the sample: + 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form + https://..inference.ai.azure.com + where `your-deployment-name` is your unique AI Model deployment name, and + `your-azure-region` is the Azure region where your model is deployed. + 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. +""" + +def sample_chat_completions_with_tools(): + import os + import json + + # Enable unredacted logging, including full request and response payloads (delete me!) + import sys + import logging + logger = logging.getLogger("azure") + logger.setLevel(logging.DEBUG) + logger.addHandler(logging.StreamHandler(stream=sys.stdout)) + + try: + endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] + key = os.environ["CHAT_COMPLETIONS_KEY"] + except KeyError: + print("Missing environment variable 'CHAT_COMPLETIONS_ENDPOINT' or 'CHAT_COMPLETIONS_KEY'") + print("Set them before running this sample.") + exit() + + from azure.ai.inference import ChatCompletionsClient + from azure.ai.inference.models import (SystemMessage, UserMessage, AssistantMessage, + ToolMessage, ChatCompletionsFunctionToolDefinition, FunctionDefinition, + CompletionsFinishReason, ChatCompletionsToolSelectionPreset) + from azure.core.credentials import AzureKeyCredential + + # Create a chat completion client. Make sure you selected a model that supports tools. + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key), logging_enable=True) + + # Define a function that retrieves flight information + def get_flight_info(origin_city: str, destination_city: str): + """ + This is a mock function that returns information about the next + flight between two cities. + + Parameters: + origin_city (str): The name of the city where the flight originates + destination_city (str): The destination city + + Returns: + str: The airline name, fight number, date and time of the next flight between the cities + """ + if (origin_city == "Seattle" and destination_city == "Miami"): + #return "Delta airlines flight number 123 from Seattle to Miami, departing May 7th, 2024 at 10:00 AM." + return "{\"info\": \"Delta airlines flight number 123 from Seattle to Miami, departing May 7th, 2024 at 10:00 AM.\"}" + elif (origin_city == "Seattle" and destination_city == "Orlando"): + #return "American Airlines flight number 456 from Seattle to Orlando, departing May 8th, 2024 at 2:45 PM." + return "{\"info\": \"American Airlines flight number 456 from Seattle to Orlando, departing May 8th, 2024 at 2:45 PM.\"}" + else: + #return "I don't have that information." + return "{\"into\": \"I don't have that information.\"}" + + # Define a 'tool' that the model can use to retrieves flight information + flight_info = ChatCompletionsFunctionToolDefinition( + function = FunctionDefinition( + name="get_flight_info", + description="Returns information about the next flight between two cities. This inclues the name of the airline, flight number and the date and time of the next flight", + parameters={ + "type": "object", + "properties": { + "origin_city": { + "type": "string", + "description": "The name of the city where the flight originates", + }, + "destination_city": { + "type": "string", + "description": "The flight destination city", + }, + }, + "required": ["origin_city", "destination_city"], + } + ) + ) + + # Make a chat completions call asking for flight information, while providing a tool to handle the request + messages=[ + SystemMessage(content="You an assistant that helps users find flight information."), + UserMessage(content="What are the next flights from Seattle to Miami and from Seattle to Orlando?"), + ] + + result = client.create( + messages=messages, + tools=[flight_info], + #tool_choice=ChatCompletionsToolSelectionPreset.NONE + ) + + # As long as the model keeps requesting tool calls, make tool calls and provide the tool outputs to the model + while result.choices[0].finish_reason == CompletionsFinishReason.TOOL_CALLS: + + # Append the previous model response to the chat history + messages.append( + AssistantMessage( + tool_calls=result.choices[0].message.tool_calls + ) + ) + + # Make new function call(s) as needed. If parallel function calling is supported by the model, + # we may have more than one tool call request. + for tool_call in result.choices[0].message.tool_calls: + function_name = tool_call.function.name + function_args = json.loads(tool_call.function.arguments.replace("\'", "\"")) + tool_call_id = tool_call.id + print(f"Calling function `{function_name}` with arguments {function_args}") + callable_func = locals()[function_name] + function_response = callable_func(**function_args) + print(f"Function response is: {function_response}") + + # Provide the tool response to the model, by appending it to the chat history + messages.append( + ToolMessage( + tool_call_id=tool_call_id, + content=function_response #json.dumps(function_response) + ) + ) + + # With the additional tools information on hand, get another response from the model + result = client.create( + messages=messages, + tools=[flight_info], + #tool_choice=ChatCompletionsToolSelectionPreset.AUTO + ) + + # Print the final response + print(result.choices[0].message.content) + +if __name__ == "__main__": + sample_chat_completions_with_tools() From 11a1c7870ef09742077bde997238b92bc70a848f Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 7 May 2024 18:20:54 -0700 Subject: [PATCH 044/112] Grab latest TypeSpec changes --- .../samples/sample_chat_completions_with_tools.py | 2 +- sdk/ai/azure-ai-inference/tsp-location.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py index 49b9190f056f..9c8e0569efe1 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py @@ -102,7 +102,7 @@ def get_flight_info(origin_city: str, destination_city: str): result = client.create( messages=messages, tools=[flight_info], - #tool_choice=ChatCompletionsToolSelectionPreset.NONE + #tool_choice=ChatCompletionsToolSelectionPreset.NONE # Cohere model does not support ) # As long as the model keeps requesting tool calls, make tool calls and provide the tool outputs to the model diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml index f912ad03e209..a9173a8c5b50 100644 --- a/sdk/ai/azure-ai-inference/tsp-location.yaml +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/ModelClient -commit: 78e7193dec65e6ee806ed71337064a5f334966c4 +commit: 32a24a5702ba8ad817280c740ab3b485c1b34079 repo: Azure/azure-rest-api-specs additionalDirectories: From 5864275e6092515c8d3332e3eb2e0a404625773d Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 8 May 2024 11:48:20 -0700 Subject: [PATCH 045/112] Re-emit SDK to pick up TypeSpec tools changes. Fix result.id check to also include `Sanitized` --- .../azure/ai/inference/_model_base.py | 147 ++++++++++-------- .../ai/inference/_operations/_operations.py | 19 ++- .../azure/ai/inference/_patch.py | 1 + .../inference/aio/_operations/_operations.py | 19 ++- .../azure/ai/inference/aio/_patch.py | 1 + .../azure/ai/inference/models/_enums.py | 2 + .../azure/ai/inference/models/_models.py | 90 +++++------ .../azure/ai/inference/models/_patch.py | 6 +- .../sample_chat_completions_async.py | 1 + ...sample_chat_completions_streaming_async.py | 2 + .../async_samples/sample_embeddings_async.py | 1 + .../sample_image_generation_async.py | 1 + .../samples/sample_chat_completions.py | 1 + ...ample_chat_completions_from_input_bytes.py | 7 +- ...sample_chat_completions_from_input_json.py | 16 +- .../sample_chat_completions_streaming.py | 1 + .../sample_chat_completions_with_history.py | 8 +- .../sample_chat_completions_with_tools.py | 57 +++---- .../samples/sample_embeddings.py | 1 + .../samples/sample_image_generation.py | 1 + sdk/ai/azure-ai-inference/tests/conftest.py | 1 + .../tests/model_inference_test_base.py | 26 ++-- .../test_model_inference_async_client.py | 3 +- .../tests/test_model_inference_client.py | 1 - 24 files changed, 217 insertions(+), 196 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_model_base.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_model_base.py index 1ddc071517d6..5cf70733404d 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_model_base.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_model_base.py @@ -6,6 +6,7 @@ # -------------------------------------------------------------------------- # pylint: disable=protected-access, arguments-differ, signature-differs, broad-except +import copy import calendar import decimal import functools @@ -13,7 +14,6 @@ import logging import base64 import re -import copy import typing import enum import email.utils @@ -339,7 +339,7 @@ def _get_model(module_name: str, model_name: str): class _MyMutableMapping(MutableMapping[str, typing.Any]): # pylint: disable=unsubscriptable-object def __init__(self, data: typing.Dict[str, typing.Any]) -> None: - self._data = copy.deepcopy(data) + self._data = data def __contains__(self, key: typing.Any) -> bool: return key in self._data @@ -378,16 +378,13 @@ def get(self, key: str, default: typing.Any = None) -> typing.Any: return default @typing.overload - def pop(self, key: str) -> typing.Any: - ... + def pop(self, key: str) -> typing.Any: ... @typing.overload - def pop(self, key: str, default: _T) -> _T: - ... + def pop(self, key: str, default: _T) -> _T: ... @typing.overload - def pop(self, key: str, default: typing.Any) -> typing.Any: - ... + def pop(self, key: str, default: typing.Any) -> typing.Any: ... def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any: if default is _UNSET: @@ -404,12 +401,10 @@ def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: self._data.update(*args, **kwargs) @typing.overload - def setdefault(self, key: str, default: None = None) -> None: - ... + def setdefault(self, key: str, default: None = None) -> None: ... @typing.overload - def setdefault(self, key: str, default: typing.Any) -> typing.Any: - ... + def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: if default is _UNSET: @@ -594,6 +589,64 @@ def _as_dict_value(v: typing.Any, exclude_readonly: bool = False) -> typing.Any: return v.as_dict(exclude_readonly=exclude_readonly) if hasattr(v, "as_dict") else v +def _deserialize_model(model_deserializer: typing.Optional[typing.Callable], obj): + if _is_model(obj): + return obj + return _deserialize(model_deserializer, obj) + + +def _deserialize_with_optional(if_obj_deserializer: typing.Optional[typing.Callable], obj): + if obj is None: + return obj + return _deserialize_with_callable(if_obj_deserializer, obj) + + +def _deserialize_with_union(deserializers, obj): + for deserializer in deserializers: + try: + return _deserialize(deserializer, obj) + except DeserializationError: + pass + raise DeserializationError() + + +def _deserialize_dict( + value_deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj: typing.Dict[typing.Any, typing.Any], +): + if obj is None: + return obj + return {k: _deserialize(value_deserializer, v, module) for k, v in obj.items()} + + +def _deserialize_multiple_sequence( + entry_deserializers: typing.List[typing.Optional[typing.Callable]], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + return type(obj)(_deserialize(deserializer, entry, module) for entry, deserializer in zip(obj, entry_deserializers)) + + +def _deserialize_sequence( + deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + return type(obj)(_deserialize(deserializer, entry, module) for entry in obj) + + +def _sorted_annotations(types: typing.List[typing.Any]) -> typing.List[typing.Any]: + return sorted( + types, + key=lambda x: hasattr(x, "__name__") and x.__name__.lower() in ("str", "float", "int", "bool"), + ) + + def _get_deserialize_callable_from_annotation( # pylint: disable=R0911, R0915, R0912 annotation: typing.Any, module: typing.Optional[str], @@ -621,11 +674,6 @@ def _get_deserialize_callable_from_annotation( # pylint: disable=R0911, R0915, if rf: rf._is_model = True - def _deserialize_model(model_deserializer: typing.Optional[typing.Callable], obj): - if _is_model(obj): - return obj - return _deserialize(model_deserializer, obj) - return functools.partial(_deserialize_model, annotation) # pyright: ignore except Exception: pass @@ -640,36 +688,27 @@ def _deserialize_model(model_deserializer: typing.Optional[typing.Callable], obj # is it optional? try: if any(a for a in annotation.__args__ if a == type(None)): # pyright: ignore - if_obj_deserializer = _get_deserialize_callable_from_annotation( - next(a for a in annotation.__args__ if a != type(None)), module, rf # pyright: ignore - ) - - def _deserialize_with_optional(if_obj_deserializer: typing.Optional[typing.Callable], obj): - if obj is None: - return obj - return _deserialize_with_callable(if_obj_deserializer, obj) - - return functools.partial(_deserialize_with_optional, if_obj_deserializer) + if len(annotation.__args__) <= 2: # pyright: ignore + if_obj_deserializer = _get_deserialize_callable_from_annotation( + next(a for a in annotation.__args__ if a != type(None)), module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_with_optional, if_obj_deserializer) + # the type is Optional[Union[...]], we need to remove the None type from the Union + annotation_copy = copy.copy(annotation) + annotation_copy.__args__ = [a for a in annotation_copy.__args__ if a != type(None)] # pyright: ignore + return _get_deserialize_callable_from_annotation(annotation_copy, module, rf) except AttributeError: pass + # is it union? if getattr(annotation, "__origin__", None) is typing.Union: # initial ordering is we make `string` the last deserialization option, because it is often them most generic deserializers = [ _get_deserialize_callable_from_annotation(arg, module, rf) - for arg in sorted( - annotation.__args__, key=lambda x: hasattr(x, "__name__") and x.__name__ == "str" # pyright: ignore - ) + for arg in _sorted_annotations(annotation.__args__) # pyright: ignore ] - def _deserialize_with_union(deserializers, obj): - for deserializer in deserializers: - try: - return _deserialize(deserializer, obj) - except DeserializationError: - pass - raise DeserializationError() - return functools.partial(_deserialize_with_union, deserializers) try: @@ -678,17 +717,10 @@ def _deserialize_with_union(deserializers, obj): annotation.__args__[1], module, rf # pyright: ignore ) - def _deserialize_dict( - value_deserializer: typing.Optional[typing.Callable], - obj: typing.Dict[typing.Any, typing.Any], - ): - if obj is None: - return obj - return {k: _deserialize(value_deserializer, v, module) for k, v in obj.items()} - return functools.partial( _deserialize_dict, value_deserializer, + module, ) except (AttributeError, IndexError): pass @@ -696,35 +728,16 @@ def _deserialize_dict( if annotation._name in ["List", "Set", "Tuple", "Sequence"]: # pyright: ignore if len(annotation.__args__) > 1: # pyright: ignore - def _deserialize_multiple_sequence( - entry_deserializers: typing.List[typing.Optional[typing.Callable]], - obj, - ): - if obj is None: - return obj - return type(obj)( - _deserialize(deserializer, entry, module) - for entry, deserializer in zip(obj, entry_deserializers) - ) - entry_deserializers = [ _get_deserialize_callable_from_annotation(dt, module, rf) for dt in annotation.__args__ # pyright: ignore ] - return functools.partial(_deserialize_multiple_sequence, entry_deserializers) + return functools.partial(_deserialize_multiple_sequence, entry_deserializers, module) deserializer = _get_deserialize_callable_from_annotation( annotation.__args__[0], module, rf # pyright: ignore ) - def _deserialize_sequence( - deserializer: typing.Optional[typing.Callable], - obj, - ): - if obj is None: - return obj - return type(obj)(_deserialize(deserializer, entry, module) for entry in obj) - - return functools.partial(_deserialize_sequence, deserializer) + return functools.partial(_deserialize_sequence, deserializer, module) except (TypeError, IndexError, AttributeError, SyntaxError): pass diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index 8a024d948a26..fdc2665fc6aa 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -172,6 +172,7 @@ def build_image_generation_get_model_info_request(**kwargs: Any) -> HttpRequest: class ChatCompletionsClientOperationsMixin(ChatCompletionsClientMixinABC): + @overload def create( self, @@ -263,7 +264,7 @@ def create( { "finish_reason": "str", # The reason that this chat completions choice completed its generated. Required. Known values are: - "stop", "length", and "content_filter". + "stop", "length", "content_filter", and "tool_calls". "index": 0, # The ordered index associated with this chat completions choice. Required. "message": { @@ -413,7 +414,7 @@ def create( { "finish_reason": "str", # The reason that this chat completions choice completed its generated. Required. Known values are: - "stop", "length", and "content_filter". + "stop", "length", "content_filter", and "tool_calls". "index": 0, # The ordered index associated with this chat completions choice. Required. "message": { @@ -488,7 +489,7 @@ def create( { "finish_reason": "str", # The reason that this chat completions choice completed its generated. Required. Known values are: - "stop", "length", and "content_filter". + "stop", "length", "content_filter", and "tool_calls". "index": 0, # The ordered index associated with this chat completions choice. Required. "message": { @@ -690,7 +691,7 @@ def create( { "finish_reason": "str", # The reason that this chat completions choice completed its generated. Required. Known values are: - "stop", "length", and "content_filter". + "stop", "length", "content_filter", and "tool_calls". "index": 0, # The ordered index associated with this chat completions choice. Required. "message": { @@ -869,6 +870,7 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: class EmbeddingsClientOperationsMixin(EmbeddingsClientMixinABC): + @overload def create( self, @@ -941,7 +943,7 @@ def create( "input_tokens": 0, # Number of tokens in the request prompt. Required. "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to"" ``input_tokens``. However, certain AI + the AI model. Typically identical to ``input_tokens``. However, certain AI models may add extra tokens to the input hence the number can be higher. (for example when input_type="query"). Required. "total_tokens": 0 # Total number of tokens transacted in this @@ -1017,7 +1019,7 @@ def create( "input_tokens": 0, # Number of tokens in the request prompt. Required. "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to"" ``input_tokens``. However, certain AI + the AI model. Typically identical to ``input_tokens``. However, certain AI models may add extra tokens to the input hence the number can be higher. (for example when input_type="query"). Required. "total_tokens": 0 # Total number of tokens transacted in this @@ -1081,7 +1083,7 @@ def create( "input_tokens": 0, # Number of tokens in the request prompt. Required. "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to"" ``input_tokens``. However, certain AI + the AI model. Typically identical to ``input_tokens``. However, certain AI models may add extra tokens to the input hence the number can be higher. (for example when input_type="query"). Required. "total_tokens": 0 # Total number of tokens transacted in this @@ -1173,7 +1175,7 @@ def create( "input_tokens": 0, # Number of tokens in the request prompt. Required. "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to"" ``input_tokens``. However, certain AI + the AI model. Typically identical to ``input_tokens``. However, certain AI models may add extra tokens to the input hence the number can be higher. (for example when input_type="query"). Required. "total_tokens": 0 # Total number of tokens transacted in this @@ -1311,6 +1313,7 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: class ImageGenerationClientOperationsMixin(ImageGenerationClientMixinABC): + @overload def create( self, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index dd3a968f7bef..37b4e404e564 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -37,6 +37,7 @@ _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False + class ChatCompletionsClient(ChatCompletionsClientGenerated): @overload diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index de1f4429877c..2bfd83fcf087 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -47,6 +47,7 @@ class ChatCompletionsClientOperationsMixin(ChatCompletionsClientMixinABC): + @overload async def create( self, @@ -138,7 +139,7 @@ async def create( { "finish_reason": "str", # The reason that this chat completions choice completed its generated. Required. Known values are: - "stop", "length", and "content_filter". + "stop", "length", "content_filter", and "tool_calls". "index": 0, # The ordered index associated with this chat completions choice. Required. "message": { @@ -288,7 +289,7 @@ async def create( { "finish_reason": "str", # The reason that this chat completions choice completed its generated. Required. Known values are: - "stop", "length", and "content_filter". + "stop", "length", "content_filter", and "tool_calls". "index": 0, # The ordered index associated with this chat completions choice. Required. "message": { @@ -363,7 +364,7 @@ async def create( { "finish_reason": "str", # The reason that this chat completions choice completed its generated. Required. Known values are: - "stop", "length", and "content_filter". + "stop", "length", "content_filter", and "tool_calls". "index": 0, # The ordered index associated with this chat completions choice. Required. "message": { @@ -565,7 +566,7 @@ async def create( { "finish_reason": "str", # The reason that this chat completions choice completed its generated. Required. Known values are: - "stop", "length", and "content_filter". + "stop", "length", "content_filter", and "tool_calls". "index": 0, # The ordered index associated with this chat completions choice. Required. "message": { @@ -744,6 +745,7 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: class EmbeddingsClientOperationsMixin(EmbeddingsClientMixinABC): + @overload async def create( self, @@ -816,7 +818,7 @@ async def create( "input_tokens": 0, # Number of tokens in the request prompt. Required. "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to"" ``input_tokens``. However, certain AI + the AI model. Typically identical to ``input_tokens``. However, certain AI models may add extra tokens to the input hence the number can be higher. (for example when input_type="query"). Required. "total_tokens": 0 # Total number of tokens transacted in this @@ -892,7 +894,7 @@ async def create( "input_tokens": 0, # Number of tokens in the request prompt. Required. "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to"" ``input_tokens``. However, certain AI + the AI model. Typically identical to ``input_tokens``. However, certain AI models may add extra tokens to the input hence the number can be higher. (for example when input_type="query"). Required. "total_tokens": 0 # Total number of tokens transacted in this @@ -956,7 +958,7 @@ async def create( "input_tokens": 0, # Number of tokens in the request prompt. Required. "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to"" ``input_tokens``. However, certain AI + the AI model. Typically identical to ``input_tokens``. However, certain AI models may add extra tokens to the input hence the number can be higher. (for example when input_type="query"). Required. "total_tokens": 0 # Total number of tokens transacted in this @@ -1048,7 +1050,7 @@ async def create( "input_tokens": 0, # Number of tokens in the request prompt. Required. "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to"" ``input_tokens``. However, certain AI + the AI model. Typically identical to ``input_tokens``. However, certain AI models may add extra tokens to the input hence the number can be higher. (for example when input_type="query"). Required. "total_tokens": 0 # Total number of tokens transacted in this @@ -1186,6 +1188,7 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: class ImageGenerationClientOperationsMixin(ImageGenerationClientMixinABC): + @overload async def create( self, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index a91fd612535d..21eaeb0b493d 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -34,6 +34,7 @@ JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object _Unset: Any = object() + class ChatCompletionsClient(ChatCompletionsClientGenerated): @overload diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py index a19dc51f3131..e3dd1c7fa88a 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py @@ -70,6 +70,8 @@ class CompletionsFinishReason(str, Enum, metaclass=CaseInsensitiveEnumMeta): CONTENT_FILTERED = "content_filter" """Completions generated a response that was identified as potentially sensitive per content moderation policies.""" + TOOL_CALLS = "tool_calls" + """Completion ended with the model calling a provided tool for output.""" class EmbeddingInputType(str, Enum, metaclass=CaseInsensitiveEnumMeta): diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py index 3fafe3a11a18..ee1ecba861a1 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py @@ -49,8 +49,7 @@ def __init__( self, *, role: str, - ): - ... + ): ... @overload def __init__(self, mapping: Mapping[str, Any]): @@ -74,6 +73,10 @@ class AssistantMessage(ChatRequestMessage, discriminator="assistant"): :vartype role: str or ~azure.ai.inference.models.ASSISTANT :ivar content: The content of the message. Required. :vartype content: str + :ivar tool_calls: The tool calls that must be resolved and have their outputs appended to + subsequent input messages for the chat + completions request to resolve as configured. + :vartype tool_calls: list[~azure.ai.inference.models.ChatCompletionsToolCall] """ role: Literal[ChatRole.ASSISTANT] = rest_discriminator(name="role") # type: ignore @@ -81,14 +84,18 @@ class AssistantMessage(ChatRequestMessage, discriminator="assistant"): Required. The role that provides responses to system-instructed, user-prompted input.""" content: str = rest_field() """The content of the message. Required.""" + tool_calls: Optional[List["_models.ChatCompletionsToolCall"]] = rest_field() + """The tool calls that must be resolved and have their outputs appended to subsequent input + messages for the chat + completions request to resolve as configured.""" @overload def __init__( self, *, content: str, - ): - ... + tool_calls: Optional[List["_models.ChatCompletionsToolCall"]] = None, + ): ... @overload def __init__(self, mapping: Mapping[str, Any]): @@ -112,7 +119,7 @@ class ChatChoice(_model_base.Model): :ivar index: The ordered index associated with this chat completions choice. Required. :vartype index: int :ivar finish_reason: The reason that this chat completions choice completed its generated. - Required. Known values are: "stop", "length", and "content_filter". + Required. Known values are: "stop", "length", "content_filter", and "tool_calls". :vartype finish_reason: str or ~azure.ai.inference.models.CompletionsFinishReason :ivar message: The chat message for a given chat completions prompt. Required. :vartype message: ~azure.ai.inference.models.ChatResponseMessage @@ -122,7 +129,7 @@ class ChatChoice(_model_base.Model): """The ordered index associated with this chat completions choice. Required.""" finish_reason: Union[str, "_models.CompletionsFinishReason"] = rest_field() """The reason that this chat completions choice completed its generated. Required. Known values - are: \"stop\", \"length\", and \"content_filter\".""" + are: \"stop\", \"length\", \"content_filter\", and \"tool_calls\".""" message: "_models.ChatResponseMessage" = rest_field() """The chat message for a given chat completions prompt. Required.""" @@ -133,8 +140,7 @@ def __init__( index: int, finish_reason: Union[str, "_models.CompletionsFinishReason"], message: "_models.ChatResponseMessage", - ): - ... + ): ... @overload def __init__(self, mapping: Mapping[str, Any]): @@ -158,7 +164,7 @@ class ChatChoiceUpdate(_model_base.Model): :ivar index: The ordered index associated with this chat completions choice. Required. :vartype index: int :ivar finish_reason: The reason that this chat completions choice completed its generated. - Required. Known values are: "stop", "length", and "content_filter". + Required. Known values are: "stop", "length", "content_filter", and "tool_calls". :vartype finish_reason: str or ~azure.ai.inference.models.CompletionsFinishReason :ivar delta: An update to the chat message for a given chat completions prompt. Required. :vartype delta: ~azure.ai.inference.models.ChatResponseMessage @@ -168,7 +174,7 @@ class ChatChoiceUpdate(_model_base.Model): """The ordered index associated with this chat completions choice. Required.""" finish_reason: Union[str, "_models.CompletionsFinishReason"] = rest_field() """The reason that this chat completions choice completed its generated. Required. Known values - are: \"stop\", \"length\", and \"content_filter\".""" + are: \"stop\", \"length\", \"content_filter\", and \"tool_calls\".""" delta: "_models.ChatResponseMessage" = rest_field() """An update to the chat message for a given chat completions prompt. Required.""" @@ -179,8 +185,7 @@ def __init__( index: int, finish_reason: Union[str, "_models.CompletionsFinishReason"], delta: "_models.ChatResponseMessage", - ): - ... + ): ... @overload def __init__(self, mapping: Mapping[str, Any]): @@ -247,8 +252,7 @@ def __init__( model: str, usage: "_models.CompletionsUsage", choices: List["_models.ChatChoice"], - ): - ... + ): ... @overload def __init__(self, mapping: Mapping[str, Any]): @@ -289,8 +293,7 @@ def __init__( *, type: str, id: str, # pylint: disable=redefined-builtin - ): - ... + ): ... @overload def __init__(self, mapping: Mapping[str, Any]): @@ -330,8 +333,7 @@ def __init__( *, id: str, # pylint: disable=redefined-builtin function: "_models.FunctionCall", - ): - ... + ): ... @overload def __init__(self, mapping: Mapping[str, Any]): @@ -366,8 +368,7 @@ def __init__( self, *, type: str, - ): - ... + ): ... @overload def __init__(self, mapping: Mapping[str, Any]): @@ -402,8 +403,7 @@ def __init__( self, *, function: "_models.FunctionDefinition", - ): - ... + ): ... @overload def __init__(self, mapping: Mapping[str, Any]): @@ -487,8 +487,7 @@ def __init__( model: str, usage: "_models.CompletionsUsage", choices: List["_models.ChatChoiceUpdate"], - ): - ... + ): ... @overload def __init__(self, mapping: Mapping[str, Any]): @@ -534,8 +533,7 @@ def __init__( role: Union[str, "_models.ChatRole"], content: str, tool_calls: Optional[List["_models.ChatCompletionsToolCall"]] = None, - ): - ... + ): ... @overload def __init__(self, mapping: Mapping[str, Any]): @@ -587,8 +585,7 @@ def __init__( completion_tokens: int, prompt_tokens: int, total_tokens: int, - ): - ... + ): ... @overload def __init__(self, mapping: Mapping[str, Any]): @@ -631,8 +628,7 @@ def __init__( embedding: List[float], index: int, object: str, - ): - ... + ): ... @overload def __init__(self, mapping: Mapping[str, Any]): @@ -685,8 +681,7 @@ def __init__( usage: "_models.EmbeddingsUsage", object: str, model: str, - ): - ... + ): ... @overload def __init__(self, mapping: Mapping[str, Any]): @@ -710,7 +705,7 @@ class EmbeddingsUsage(_model_base.Model): :ivar input_tokens: Number of tokens in the request prompt. Required. :vartype input_tokens: int :ivar prompt_tokens: Number of tokens used for the prompt sent to the AI model. Typically - identical to\ ``input_tokens``. + identical to ``input_tokens``. However, certain AI models may add extra tokens to the input hence the number can be higher. (for example when input_type="query"). Required. :vartype prompt_tokens: int @@ -724,7 +719,7 @@ class EmbeddingsUsage(_model_base.Model): input_tokens: int = rest_field() """Number of tokens in the request prompt. Required.""" prompt_tokens: int = rest_field() - """Number of tokens used for the prompt sent to the AI model. Typically identical to\ + """Number of tokens used for the prompt sent to the AI model. Typically identical to ``input_tokens``. However, certain AI models may add extra tokens to the input hence the number can be higher. (for example when input_type=\"query\"). Required.""" @@ -739,8 +734,7 @@ def __init__( input_tokens: int, prompt_tokens: int, total_tokens: int, - ): - ... + ): ... @overload def __init__(self, mapping: Mapping[str, Any]): @@ -782,8 +776,7 @@ def __init__( *, name: str, arguments: str, - ): - ... + ): ... @overload def __init__(self, mapping: Mapping[str, Any]): @@ -828,8 +821,7 @@ def __init__( name: str, description: Optional[str] = None, parameters: Optional[Any] = None, - ): - ... + ): ... @overload def __init__(self, mapping: Mapping[str, Any]): @@ -864,8 +856,7 @@ def __init__( *, url: Optional[str] = None, b64_json: Optional[str] = None, - ): - ... + ): ... @overload def __init__(self, mapping: Mapping[str, Any]): @@ -912,8 +903,7 @@ def __init__( created: datetime.datetime, model: str, data: List["_models.ImageGenerationData"], - ): - ... + ): ... @overload def __init__(self, mapping: Mapping[str, Any]): @@ -955,8 +945,7 @@ def __init__( model_type: Union[str, "_models.ModelType"], model_provider: str, model_name: str, - ): - ... + ): ... @overload def __init__(self, mapping: Mapping[str, Any]): @@ -994,8 +983,7 @@ def __init__( self, *, content: str, - ): - ... + ): ... @overload def __init__(self, mapping: Mapping[str, Any]): @@ -1037,8 +1025,7 @@ def __init__( *, content: str, tool_call_id: str, - ): - ... + ): ... @overload def __init__(self, mapping: Mapping[str, Any]): @@ -1076,8 +1063,7 @@ def __init__( self, *, content: str, - ): - ... + ): ... @overload def __init__(self, mapping: Mapping[str, Any]): diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index 59d312e25102..a74281746385 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -38,7 +38,7 @@ def __init__(self, response: Union[HttpResponse, AsyncHttpResponse]): self._is_async_iterator = isinstance(self._response, AsyncHttpResponse) self._queue: "queue.Queue[_models.ChatCompletionsUpdate]" = queue.Queue() self._incomplete_json = "" - self._done = False # Will be set to True when reading 'data: [DONE]' line + self._done = False # Will be set to True when reading 'data: [DONE]' line def __iter__(self): if self._is_async_iterator: @@ -126,9 +126,7 @@ def _deserialize_and_add_to_queue(self, element: bytes) -> bool: # and add it to the queue. self._queue.put( # pylint: disable=W0212 # Access to a protected member _deserialize of a client class - _models.ChatCompletionsUpdate._deserialize( - json.loads(line[len(self.SSE_DATA_EVENT_PREFIX) : -1]), [] - ) + _models.ChatCompletionsUpdate._deserialize(json.loads(line[len(self.SSE_DATA_EVENT_PREFIX) : -1]), []) ) if self.ENABLE_CLASS_LOGS: diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py index a33efa86a04e..c4b7d1ccbdcf 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py @@ -19,6 +19,7 @@ """ import asyncio + async def sample_chat_completions_async(): import os from azure.ai.inference.aio import ChatCompletionsClient diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py index 8921411d3c74..ed5f15a1f15e 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py @@ -19,6 +19,7 @@ """ import asyncio + async def sample_chat_completions_streaming_async(): import os from azure.ai.inference.aio import ChatCompletionsClient @@ -70,6 +71,7 @@ async def sample_chat_completions_streaming_async(): # Remember to always close the asynchronous client when you are done with it await client.close() + async def main(): await sample_chat_completions_streaming_async() diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py index 634baec7f900..782c7c23cd2e 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py @@ -18,6 +18,7 @@ """ import asyncio + async def sample_embeddings_async(): import os from azure.ai.inference.aio import EmbeddingsClient diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py index b23c719ec9c0..bdc1adc72235 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py @@ -18,6 +18,7 @@ """ import asyncio + async def sample_image_generation_async(): import os import base64 diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index 998911df0943..f239fdafeaed 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -18,6 +18,7 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ + def sample_chat_completions(): import os diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py index c463e92ab069..ccfaaf1281ad 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py @@ -20,6 +20,7 @@ """ import io + def sample_chat_completions_from_input_bytes(): import os @@ -43,14 +44,16 @@ def sample_chat_completions_from_input_bytes(): print(result.choices[0].message.content) + def read_text_file(file_path: str) -> io.BytesIO: """Reads a text file and returns a BytesIO object with the file content in UTF-8 encoding.""" try: - with open(file_path, 'r') as file: - return io.BytesIO(file.read().encode('utf-8')) + with open(file_path, "r") as file: + return io.BytesIO(file.read().encode("utf-8")) except FileNotFoundError: print(f"File '{file_path}' not found.") return None + if __name__ == "__main__": sample_chat_completions_from_input_bytes() diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py index 9262250a0f6d..8b5f51d2a28d 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py @@ -19,6 +19,7 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ + def sample_chat_completions_from_input_json(): import os from typing import MutableMapping, Any @@ -40,20 +41,14 @@ def sample_chat_completions_from_input_json(): "messages": [ { "role": "system", - "content": "You are an AI assistant that helps people find information. Your replies are short, no more than two sentences." - }, - { - "role": "user", - "content": "What year was construction of the International Space Station mostly done?" + "content": "You are an AI assistant that helps people find information. Your replies are short, no more than two sentences.", }, + {"role": "user", "content": "What year was construction of the International Space Station mostly done?"}, { "role": "assistant", - "content": "The main construction of the International Space Station (ISS) was completed between 1998 and 2011. During this period, more than 30 flights by US space shuttles and 40 by Russian rockets were conducted to transport components and modules to the station." + "content": "The main construction of the International Space Station (ISS) was completed between 1998 and 2011. During this period, more than 30 flights by US space shuttles and 40 by Russian rockets were conducted to transport components and modules to the station.", }, - { - "role": "user", - "content": "And what was the estimated cost to build it?" - } + {"role": "user", "content": "And what was the estimated cost to build it?"}, ] } @@ -63,5 +58,6 @@ def sample_chat_completions_from_input_json(): print(result.choices[0].message.content) + if __name__ == "__main__": sample_chat_completions_from_input_json() diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py index 1a0d15668399..9ed9ed275d41 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py @@ -18,6 +18,7 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ + def sample_chat_completions_streaming(): import os diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py index 2988b8df50d8..a375d86f78c9 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py @@ -19,6 +19,7 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ + def sample_chat_completions_with_history(): import os @@ -36,8 +37,10 @@ def sample_chat_completions_with_history(): client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - messages=[ - SystemMessage(content="You are an AI assistant that helps people find information. Your replies are short, no more than two sentences."), + messages = [ + SystemMessage( + content="You are an AI assistant that helps people find information. Your replies are short, no more than two sentences." + ), UserMessage(content="What year was construction of the international space station mostly done?"), ] @@ -50,5 +53,6 @@ def sample_chat_completions_with_history(): result = client.create(messages=messages) print(result.choices[0].message.content) + if __name__ == "__main__": sample_chat_completions_with_history() diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py index 9c8e0569efe1..7584fbf72175 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py @@ -20,6 +20,7 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ + def sample_chat_completions_with_tools(): import os import json @@ -27,6 +28,7 @@ def sample_chat_completions_with_tools(): # Enable unredacted logging, including full request and response payloads (delete me!) import sys import logging + logger = logging.getLogger("azure") logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler(stream=sys.stdout)) @@ -40,9 +42,16 @@ def sample_chat_completions_with_tools(): exit() from azure.ai.inference import ChatCompletionsClient - from azure.ai.inference.models import (SystemMessage, UserMessage, AssistantMessage, - ToolMessage, ChatCompletionsFunctionToolDefinition, FunctionDefinition, - CompletionsFinishReason, ChatCompletionsToolSelectionPreset) + from azure.ai.inference.models import ( + SystemMessage, + UserMessage, + AssistantMessage, + ToolMessage, + ChatCompletionsFunctionToolDefinition, + FunctionDefinition, + CompletionsFinishReason, + ChatCompletionsToolSelectionPreset, + ) from azure.core.credentials import AzureKeyCredential # Create a chat completion client. Make sure you selected a model that supports tools. @@ -61,19 +70,19 @@ def get_flight_info(origin_city: str, destination_city: str): Returns: str: The airline name, fight number, date and time of the next flight between the cities """ - if (origin_city == "Seattle" and destination_city == "Miami"): - #return "Delta airlines flight number 123 from Seattle to Miami, departing May 7th, 2024 at 10:00 AM." - return "{\"info\": \"Delta airlines flight number 123 from Seattle to Miami, departing May 7th, 2024 at 10:00 AM.\"}" - elif (origin_city == "Seattle" and destination_city == "Orlando"): - #return "American Airlines flight number 456 from Seattle to Orlando, departing May 8th, 2024 at 2:45 PM." - return "{\"info\": \"American Airlines flight number 456 from Seattle to Orlando, departing May 8th, 2024 at 2:45 PM.\"}" + if origin_city == "Seattle" and destination_city == "Miami": + # return "Delta airlines flight number 123 from Seattle to Miami, departing May 7th, 2024 at 10:00 AM." + return '{"info": "Delta airlines flight number 123 from Seattle to Miami, departing May 7th, 2024 at 10:00 AM."}' + elif origin_city == "Seattle" and destination_city == "Orlando": + # return "American Airlines flight number 456 from Seattle to Orlando, departing May 8th, 2024 at 2:45 PM." + return '{"info": "American Airlines flight number 456 from Seattle to Orlando, departing May 8th, 2024 at 2:45 PM."}' else: - #return "I don't have that information." - return "{\"into\": \"I don't have that information.\"}" + # return "I don't have that information." + return '{"into": "I don\'t have that information."}' # Define a 'tool' that the model can use to retrieves flight information flight_info = ChatCompletionsFunctionToolDefinition( - function = FunctionDefinition( + function=FunctionDefinition( name="get_flight_info", description="Returns information about the next flight between two cities. This inclues the name of the airline, flight number and the date and time of the next flight", parameters={ @@ -89,12 +98,12 @@ def get_flight_info(origin_city: str, destination_city: str): }, }, "required": ["origin_city", "destination_city"], - } + }, ) ) # Make a chat completions call asking for flight information, while providing a tool to handle the request - messages=[ + messages = [ SystemMessage(content="You an assistant that helps users find flight information."), UserMessage(content="What are the next flights from Seattle to Miami and from Seattle to Orlando?"), ] @@ -102,24 +111,20 @@ def get_flight_info(origin_city: str, destination_city: str): result = client.create( messages=messages, tools=[flight_info], - #tool_choice=ChatCompletionsToolSelectionPreset.NONE # Cohere model does not support + # tool_choice=ChatCompletionsToolSelectionPreset.NONE # Cohere model does not support ) # As long as the model keeps requesting tool calls, make tool calls and provide the tool outputs to the model - while result.choices[0].finish_reason == CompletionsFinishReason.TOOL_CALLS: + while result.choices[0].finish_reason == CompletionsFinishReason.TOOL_CALLS: # Append the previous model response to the chat history - messages.append( - AssistantMessage( - tool_calls=result.choices[0].message.tool_calls - ) - ) + messages.append(AssistantMessage(tool_calls=result.choices[0].message.tool_calls)) # Make new function call(s) as needed. If parallel function calling is supported by the model, # we may have more than one tool call request. for tool_call in result.choices[0].message.tool_calls: function_name = tool_call.function.name - function_args = json.loads(tool_call.function.arguments.replace("\'", "\"")) + function_args = json.loads(tool_call.function.arguments.replace("'", '"')) tool_call_id = tool_call.id print(f"Calling function `{function_name}` with arguments {function_args}") callable_func = locals()[function_name] @@ -128,21 +133,19 @@ def get_flight_info(origin_city: str, destination_city: str): # Provide the tool response to the model, by appending it to the chat history messages.append( - ToolMessage( - tool_call_id=tool_call_id, - content=function_response #json.dumps(function_response) - ) + ToolMessage(tool_call_id=tool_call_id, content=function_response) # json.dumps(function_response) ) # With the additional tools information on hand, get another response from the model result = client.create( messages=messages, tools=[flight_info], - #tool_choice=ChatCompletionsToolSelectionPreset.AUTO + # tool_choice=ChatCompletionsToolSelectionPreset.AUTO ) # Print the final response print(result.choices[0].message.content) + if __name__ == "__main__": sample_chat_completions_with_tools() diff --git a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py index 1d694188502f..66c112bb7692 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py +++ b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py @@ -18,6 +18,7 @@ 2) EMBEDDINGS_KEY - Your model key (a 32-character string). Keep it secret. """ + def sample_embeddings(): import os diff --git a/sdk/ai/azure-ai-inference/samples/sample_image_generation.py b/sdk/ai/azure-ai-inference/samples/sample_image_generation.py index 3b4fded4451f..f86e3ee7031a 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_image_generation.py +++ b/sdk/ai/azure-ai-inference/samples/sample_image_generation.py @@ -18,6 +18,7 @@ 2) IMAGE_GENERATION_KEY - Your model key (a 32-character string). Keep it secret. """ + def sample_image_generation(): import os import base64 diff --git a/sdk/ai/azure-ai-inference/tests/conftest.py b/sdk/ai/azure-ai-inference/tests/conftest.py index 91e541e4d1bd..1ea8cf843682 100644 --- a/sdk/ai/azure-ai-inference/tests/conftest.py +++ b/sdk/ai/azure-ai-inference/tests/conftest.py @@ -6,6 +6,7 @@ import pytest from devtools_testutils import test_proxy + # autouse=True will trigger this fixture on each pytest run, even if it's not explicitly used by a test method @pytest.fixture(scope="session", autouse=True) def start_proxy(test_proxy): diff --git a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py index 7044efa23821..3b829b3b7bf1 100644 --- a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py +++ b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py @@ -44,6 +44,7 @@ embeddings_key="00000000000000000000000000000000", ) + # The test class name needs to start with "Test" to get collected by pytest class ModelClientTestBase(AzureRecordedTestCase): @@ -51,7 +52,7 @@ class ModelClientTestBase(AzureRecordedTestCase): PRINT_RESULT = True # Regular expression describing the pattern of a result ID (e.g. "183b56eb-8512-484d-be50-5d8df82301a2") - REGEX_RESULT_ID = re.compile(r'^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$') + REGEX_RESULT_ID = re.compile(r"^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$|^Sanitized$") # Methods to load credentials from environment variables def _load_chat_credentials(self, *, bad_key: bool, **kwargs): @@ -96,8 +97,7 @@ def _validate_chat_completions_result(result: sdk.models.ChatCompletions, contai assert result.choices[0].finish_reason == sdk.models.CompletionsFinishReason.STOPPED assert result.choices[0].index == 0 - assert result.id is not None - assert len(result.id) == 36 + assert bool(ModelClientTestBase.REGEX_RESULT_ID.match(result.id)) assert result.created is not None assert result.created != "" assert result.model is not None @@ -122,7 +122,7 @@ def _validate_chat_completions_update(update: sdk.models.ChatCompletionsUpdate, assert update.choices[0].index == 0 assert update.id is not None assert len(update.id) == 36 - assert update.model is not None + assert update.model is not None assert update.model != "" if update.choices[0].delta.content != None: return update.choices[0].delta.content @@ -132,12 +132,12 @@ def _validate_chat_completions_update(update: sdk.models.ChatCompletionsUpdate, @staticmethod def _validate_chat_completions_streaming_result(result: sdk.models.StreamingChatCompletions): count = 0 - content ="" + content = "" for update in result: content += ModelClientTestBase._validate_chat_completions_update(update, count == 0) count += 1 assert count > 2 - assert len(content) > 100 # Some arbitrary number + assert len(content) > 100 # Some arbitrary number # The last update should have a finish reason and usage assert update.choices[0].finish_reason == sdk.models.CompletionsFinishReason.STOPPED assert update.usage.prompt_tokens > 0 @@ -154,7 +154,7 @@ async def _validate_async_chat_completions_streaming_result(result: sdk.models.S content += ModelClientTestBase._validate_chat_completions_update(update, count == 0) count += 1 assert count > 2 - assert len(content) > 100 # Some arbitrary number + assert len(content) > 100 # Some arbitrary number # The last update should have a finish reason and usage assert update.choices[0].finish_reason == sdk.models.CompletionsFinishReason.STOPPED assert update.usage.prompt_tokens > 0 @@ -180,7 +180,6 @@ def _print_chat_completions_result(result: sdk.models.ChatCompletions): print("\tusage.completion_tokens: {}".format(result.usage.completion_tokens)) print("\tusage.total_tokens: {}".format(result.usage.total_tokens)) - @staticmethod def _validate_embeddings_result(result: sdk.models.EmbeddingsResult): assert result is not None @@ -194,13 +193,12 @@ def _validate_embeddings_result(result: sdk.models.EmbeddingsResult): assert result.data[i].embedding[0] != 0.0 assert result.data[i].embedding[1023] != 0.0 assert bool(ModelClientTestBase.REGEX_RESULT_ID.match(result.id)) - #assert len(result.model) > 0 # At the time of writing this test, this JSON field existed but was empty + # assert len(result.model) > 0 # At the time of writing this test, this JSON field existed but was empty assert result.object == "list" # At the time of writing this test, input_tokens did not exist (I see completion tokens instead) - #assert result.usage.input_tokens > 0 - #assert result.usage.prompt_tokens > 0 - #assert result.total_tokens == result.usage.input_tokens + result.usage.prompt_tokens - + # assert result.usage.input_tokens > 0 + # assert result.usage.prompt_tokens > 0 + # assert result.total_tokens == result.usage.input_tokens + result.usage.prompt_tokens @staticmethod def _print_embeddings_result(result: sdk.models.EmbeddingsResult): @@ -214,6 +212,6 @@ def _print_embeddings_result(result: sdk.models.EmbeddingsResult): print(f"\tid: {result.id}") print(f"\tmodel: {result.model}") print(f"\tobject: {result.object}") - #print(f"\tusage.input_tokens: {result.usage.input_tokens}") # At the time of writing this test, this JSON field does not exist + # print(f"\tusage.input_tokens: {result.usage.input_tokens}") # At the time of writing this test, this JSON field does not exist print(f"\tusage.prompt_tokens: {result.usage.prompt_tokens}") print(f"\tusage.total_tokens: {result.usage.total_tokens}") diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py index bd21eae6a6d0..a490a21d04a5 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -9,6 +9,7 @@ from devtools_testutils.aio import recorded_by_proxy_async from azure.core.exceptions import AzureError + # The test class name needs to start with "Test" to get collected by pytest class TestModelAsyncClient(ModelClientTestBase): @@ -23,7 +24,7 @@ class TestModelAsyncClient(ModelClientTestBase): async def test_async_chat_completions_error_free(self, **kwargs): messages = [ sdk.models.SystemMessage(content="You are a helpful assistant answering questions regarding length units."), - sdk.models.UserMessage(content="How many feet are in a mile?") + sdk.models.UserMessage(content="How many feet are in a mile?"), ] client = self._create_async_chat_client(**kwargs) diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py index 7441d3cbb8d6..7676a2f40742 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -72,7 +72,6 @@ def test_chat_completion_with_auth_failure(self, **kwargs): client.close() assert exception_caught - @ServicePreparerChatCompletions() @recorded_by_proxy def test_embeddings_on_chat_completion_endpoint(self, **kwargs): From 9e181466e96646f7419c1093edab3d7aea15215a Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 8 May 2024 16:47:10 -0700 Subject: [PATCH 046/112] New test for tool, new recordings --- sdk/ai/azure-ai-inference/assets.json | 2 +- sdk/ai/azure-ai-inference/tests/README.md | 8 +-- .../tests/model_inference_test_base.py | 30 +++++++++-- .../tests/test_model_inference_client.py | 52 ++++++++++++++++++- 4 files changed, 82 insertions(+), 10 deletions(-) diff --git a/sdk/ai/azure-ai-inference/assets.json b/sdk/ai/azure-ai-inference/assets.json index 30cf7e8830ab..3054172500dd 100644 --- a/sdk/ai/azure-ai-inference/assets.json +++ b/sdk/ai/azure-ai-inference/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "python", "TagPrefix": "python/ai/azure-ai-inference", - "Tag": "python/ai/azure-ai-inference_c7e4a15b67" + "Tag": "python/ai/azure-ai-inference_b8b76ccaac" } diff --git a/sdk/ai/azure-ai-inference/tests/README.md b/sdk/ai/azure-ai-inference/tests/README.md index 4f051d1584e0..0a69a8d23684 100644 --- a/sdk/ai/azure-ai-inference/tests/README.md +++ b/sdk/ai/azure-ai-inference/tests/README.md @@ -6,9 +6,9 @@ The instructions below are for running tests locally, on a Windows machine, agai The live tests were written against the AI models mentioned below. You will need to deploy them in [Azure AI Studio](https://ai.azure.com/) and have the endpoint and key for each one of them. -- llama-2-13b (fro chat completion tests) -- TBD (for embedding tests) -- TBD (for image generation tests) +- `Mistral-Large` for chat completion tests +- `Cohere-embed-v3-english` for embedding tests +- `TBD` for image generation tests ## Setup @@ -47,6 +47,8 @@ Configure the test proxy to run live service tests without recordings: ``` set AZURE_TEST_RUN_LIVE=true set AZURE_SKIP_LIVE_RECORDING=true +set PROXY_URL=http://localhost:5000 +set AZURE_TEST_USE_CLI_AUTH=true ``` ## Run tests diff --git a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py index 3b829b3b7bf1..304e0c33d801 100644 --- a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py +++ b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py @@ -8,6 +8,7 @@ import azure.ai.inference as sdk import azure.ai.inference.aio as async_sdk import re +import json from os import path from typing import List, Optional, Union @@ -51,8 +52,9 @@ class ModelClientTestBase(AzureRecordedTestCase): # Set to True to print out all results to the console PRINT_RESULT = True - # Regular expression describing the pattern of a result ID (e.g. "183b56eb-8512-484d-be50-5d8df82301a2") - REGEX_RESULT_ID = re.compile(r"^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$|^Sanitized$") + # Regular expression describing the pattern of a result ID. Format allowed are: + # "183b56eb-8512-484d-be50-5d8df82301a2", "26ef25aa45424781865a2d38a4484274" and "Sanitized" + REGEX_RESULT_ID = re.compile(r"^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$|^[0-9a-fA-F]{32}$|^Sanitized$") # Methods to load credentials from environment variables def _load_chat_credentials(self, *, bad_key: bool, **kwargs): @@ -96,7 +98,6 @@ def _validate_chat_completions_result(result: sdk.models.ChatCompletions, contai assert result.choices[0].message.role == sdk.models.ChatRole.ASSISTANT assert result.choices[0].finish_reason == sdk.models.CompletionsFinishReason.STOPPED assert result.choices[0].index == 0 - assert bool(ModelClientTestBase.REGEX_RESULT_ID.match(result.id)) assert result.created is not None assert result.created != "" @@ -107,6 +108,26 @@ def _validate_chat_completions_result(result: sdk.models.ChatCompletions, contai assert result.usage.completion_tokens > 0 assert result.usage.total_tokens == result.usage.prompt_tokens + result.usage.completion_tokens + @staticmethod + def _validate_chat_completions_tool_result(result: sdk.models.ChatCompletions): + assert result.choices[0].message.content == None or result.choices[0].message.content == "" + assert result.choices[0].message.role == sdk.models.ChatRole.ASSISTANT + assert result.choices[0].finish_reason == sdk.models.CompletionsFinishReason.TOOL_CALLS + assert result.choices[0].index == 0 + function_args = json.loads(result.choices[0].message.tool_calls[0].function.arguments.replace("'", '"')) + print(function_args) + assert function_args["city"].lower() == "seattle" + assert function_args["days"] == "2" + assert bool(ModelClientTestBase.REGEX_RESULT_ID.match(result.id)) + assert result.created is not None + assert result.created != "" + assert result.model is not None + #assert result.model != "" + assert result.object == "chat.completion" + assert result.usage.prompt_tokens > 0 + assert result.usage.completion_tokens > 0 + assert result.usage.total_tokens == result.usage.prompt_tokens + result.usage.completion_tokens + @staticmethod def _validate_chat_completions_update(update: sdk.models.ChatCompletionsUpdate, first: bool) -> str: if first: @@ -121,7 +142,7 @@ def _validate_chat_completions_update(update: sdk.models.ChatCompletionsUpdate, assert update.choices[0].delta.tool_calls == None assert update.choices[0].index == 0 assert update.id is not None - assert len(update.id) == 36 + assert bool(ModelClientTestBase.REGEX_RESULT_ID.match(update.id)) assert update.model is not None assert update.model != "" if update.choices[0].delta.content != None: @@ -169,6 +190,7 @@ def _print_chat_completions_result(result: sdk.models.ChatCompletions): print(" Chat Completions result:") for choice in result.choices: print(f"\tchoices[0].message.content: {choice.message.content}") + print(f"\tchoices[0].message.tool_calls: {choice.message.tool_calls}") print("\tchoices[0].message.role: {}".format(choice.message.role)) print("\tchoices[0].finish_reason: {}".format(choice.finish_reason)) print("\tchoices[0].index: {}".format(choice.index)) diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py index 7676a2f40742..54e7d6193310 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -41,6 +41,54 @@ def test_chat_completions_streaming_error_free(self, **kwargs): self._validate_chat_completions_streaming_result(result) client.close() + @ServicePreparerChatCompletions() + @recorded_by_proxy + def test_chat_completions_with_tool_error_free(self, **kwargs): + forecast_tool=sdk.models.ChatCompletionsFunctionToolDefinition( + function=sdk.models.FunctionDefinition( + name="get_max_temperature", + description="A function that returns the forecasted maximum temperature IN a given city, a given few days from now, in Fahrenheit. It returns `unknown` if the forecast is not known.", + parameters={ + "type": "object", + "properties": { + "city": { + "type": "string", + "description": "The name of the city", + }, + "days": { + "type": "string", + "description": "The number of days from now, starting from 0, where 0 represents today, 1 represents tomorrow, etc.", + }, + }, + "required": ["city", "days"], + }, + ) + ) + client = self._create_chat_client(**kwargs) + messages=[ + sdk.models.SystemMessage(content="You are an assistant that helps users find weather information."), + sdk.models.UserMessage(content="what's the maximum temperature in Seattle two days from now?") + ] + result = client.create( + messages=messages, + tools=[forecast_tool], + ) + self._print_chat_completions_result(result) + self._validate_chat_completions_tool_result(result) + messages.append(sdk.models.AssistantMessage( + tool_calls=result.choices[0].message.tool_calls + )) + messages.append(sdk.models.ToolMessage( + content="62", + tool_call_id=result.choices[0].message.tool_calls[0].id, + )) + result = client.create( + messages=messages, + tools=[forecast_tool], + ) + self._validate_chat_completions_result(result, ["62"]) + client.close() + @ServicePreparerEmbeddings() @recorded_by_proxy def test_embeddings_error_free(self, **kwargs): @@ -83,7 +131,7 @@ def test_embeddings_on_chat_completion_endpoint(self, **kwargs): exception_caught = True print(e) assert hasattr(e, "status_code") - assert e.status_code == 404 - assert "not found" in e.message.lower() + assert e.status_code == 404 or e.status_code == 405 # `404 - not found` or `405 - method not allowed` + assert "not found" in e.message.lower() or "not allowed" in e.message.lower() client.close() assert exception_caught From 2c973564fbfa06c25cc84d50d32e436d28c8fa40 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 9 May 2024 09:46:22 -0700 Subject: [PATCH 047/112] use logger for detailed SSE streaming debug spew --- .../azure/ai/inference/models/_patch.py | 19 ++++++++++++------- .../sample_chat_completions_with_tools.py | 19 ++++++++++--------- 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index a74281746385..29bc5336c5d5 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -6,15 +6,16 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ +import json +import logging import queue -import time import re -import json from typing import List, Union, AsyncIterator, Iterator, cast from azure.core.rest import HttpResponse, AsyncHttpResponse from .. import models as _models +logger = logging.getLogger(__name__) class StreamingChatCompletions: """Represents an interator over ChatCompletionsUpdate objects. It can be used for either synchronous or @@ -22,7 +23,7 @@ class StreamingChatCompletions: into chat completions updates, each one represented by a ChatCompletionsUpdate object. """ - # Enable console logs for debugging. For development only, will be removed before release. + # Enable detailed logs of SSE parsing. For development only, should be `False` by default. ENABLE_CLASS_LOGS = False # The prefix of each line in the SSE stream that contains a JSON string @@ -55,6 +56,8 @@ def __next__(self) -> _models.ChatCompletionsUpdate: return self._queue.get() def _read_next_block(self) -> bool: + if self.ENABLE_CLASS_LOGS: + logger.debug("[Reading next block]") try: # Use 'cast' to make 'pyright' error go away element = cast(Iterator[bytes], self._bytes_iterator).__next__() @@ -78,6 +81,8 @@ async def __anext__(self) -> _models.ChatCompletionsUpdate: return self._queue.get() async def _read_next_block_async(self) -> bool: + if self.ENABLE_CLASS_LOGS: + logger.debug("[Reading next block]") try: # Use 'cast' to make 'pyright' error go away element = await cast(AsyncIterator[bytes], self._bytes_iterator).__anext__() @@ -97,7 +102,7 @@ def _deserialize_and_add_to_queue(self, element: bytes) -> bool: for index, line in enumerate(line_list): if self.ENABLE_CLASS_LOGS: - print(f"[original] {repr(line)}") + logger.debug(f"[Original line] {repr(line)}") if index == 0: line = self._incomplete_json + line @@ -108,7 +113,7 @@ def _deserialize_and_add_to_queue(self, element: bytes) -> bool: return False if self.ENABLE_CLASS_LOGS: - print(f"[modified] {repr(line)}") + logger.debug(f"[Modified line] {repr(line)}") if line == "\n": # Empty line, indicating flush output to client continue @@ -118,7 +123,7 @@ def _deserialize_and_add_to_queue(self, element: bytes) -> bool: if line.startswith(self.SSE_DATA_EVENT_DONE): if self.ENABLE_CLASS_LOGS: - print("done]") + logger.debug("[Done]") return True # If you reached here, the line should contain `data: {...}\n` @@ -130,7 +135,7 @@ def _deserialize_and_add_to_queue(self, element: bytes) -> bool: ) if self.ENABLE_CLASS_LOGS: - print("[added]") + logger.debug("[Added to queue]") return False diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py index 7584fbf72175..a03cd1db5e54 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py @@ -28,7 +28,6 @@ def sample_chat_completions_with_tools(): # Enable unredacted logging, including full request and response payloads (delete me!) import sys import logging - logger = logging.getLogger("azure") logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler(stream=sys.stdout)) @@ -51,6 +50,7 @@ def sample_chat_completions_with_tools(): FunctionDefinition, CompletionsFinishReason, ChatCompletionsToolSelectionPreset, + ChatCompletionsNamedToolSelection ) from azure.core.credentials import AzureKeyCredential @@ -71,14 +71,14 @@ def get_flight_info(origin_city: str, destination_city: str): str: The airline name, fight number, date and time of the next flight between the cities """ if origin_city == "Seattle" and destination_city == "Miami": - # return "Delta airlines flight number 123 from Seattle to Miami, departing May 7th, 2024 at 10:00 AM." - return '{"info": "Delta airlines flight number 123 from Seattle to Miami, departing May 7th, 2024 at 10:00 AM."}' + return "Delta airlines flight number 123 from Seattle to Miami, departing May 7th, 2024 at 10:00 AM." + #return '{"info": "Delta airlines flight number 123 from Seattle to Miami, departing May 7th, 2024 at 10:00 AM."}' elif origin_city == "Seattle" and destination_city == "Orlando": - # return "American Airlines flight number 456 from Seattle to Orlando, departing May 8th, 2024 at 2:45 PM." - return '{"info": "American Airlines flight number 456 from Seattle to Orlando, departing May 8th, 2024 at 2:45 PM."}' + return "American Airlines flight number 456 from Seattle to Orlando, departing May 8th, 2024 at 2:45 PM." + #return '{"info": "American Airlines flight number 456 from Seattle to Orlando, departing May 8th, 2024 at 2:45 PM."}' else: - # return "I don't have that information." - return '{"into": "I don\'t have that information."}' + return "I don't have that information." + #return '{"into": "I don\'t have that information."}' # Define a 'tool' that the model can use to retrieves flight information flight_info = ChatCompletionsFunctionToolDefinition( @@ -108,10 +108,11 @@ def get_flight_info(origin_city: str, destination_city: str): UserMessage(content="What are the next flights from Seattle to Miami and from Seattle to Orlando?"), ] + x : ChatCompletionsNamedToolSelection result = client.create( messages=messages, tools=[flight_info], - # tool_choice=ChatCompletionsToolSelectionPreset.NONE # Cohere model does not support + #tool_choice=ChatCompletionsNamedToolSelection(type="function") # Cohere model does not support ) # As long as the model keeps requesting tool calls, make tool calls and provide the tool outputs to the model @@ -140,7 +141,7 @@ def get_flight_info(origin_city: str, destination_city: str): result = client.create( messages=messages, tools=[flight_info], - # tool_choice=ChatCompletionsToolSelectionPreset.AUTO + tool_choice=ChatCompletionsToolSelectionPreset.AUTO ) # Print the final response From 37d1e35c7f2f2d64dbdbaf9126504c181aaf90cf Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 9 May 2024 10:23:01 -0700 Subject: [PATCH 048/112] Don't build azure-ai-generative and azure-ai-resources packages, as they are deprecated --- sdk/ai/ci.yml | 26 ++++++++++++++------------ sdk/ai/{tests.yml => tests.yml.old} | 1 + 2 files changed, 15 insertions(+), 12 deletions(-) rename sdk/ai/{tests.yml => tests.yml.old} (68%) diff --git a/sdk/ai/ci.yml b/sdk/ai/ci.yml index 0db94435f39b..d6dee468e9f0 100644 --- a/sdk/ai/ci.yml +++ b/sdk/ai/ci.yml @@ -29,25 +29,27 @@ extends: template: /eng/pipelines/templates/stages/archetype-sdk-client.yml parameters: ServiceDirectory: ai - TestTimeoutInMinutes: 75 - BuildDocs: true TestProxy: true + BuildDocs: true + # The below were set before when azure-ai-generative and azure-ai-resources packages were built: + # TestTimeoutInMinutes: 75 # This is a short term solution to create API review for python azure-ml package only when running pipeline manually # Long term solution should be to have different versions on main branch and release branch for python package so APIView can have different revisions for each version. # Tracking issue: https://github.com/Azure/azure-sdk-for-python/issues/29196 - GenerateApiReviewForManualOnly: true + # GenerateApiReviewForManualOnly: true # This custom matrix config should be dropped once: # * The Azure SDKs removes Python 3.7 from the test matrix # * Once all of azure-ai-generative's extra packages can be installed on Python3.12 - MatrixConfigs: - - Name: ai_ci_matrix - Path: eng/pipelines/templates/stages/platform-matrix-ai.json - Selection: sparse - GenerateVMJobs: true + # MatrixConfigs: + # - Name: ai_ci_matrix + # Path: eng/pipelines/templates/stages/platform-matrix-ai.json + # Selection: sparse + # GenerateVMJobs: true Artifacts: - - name: azure-ai-generative - safeName: azureaigenerative - - name: azure-ai-resources - safeName: azureairesources - name: azure-ai-inference safeName: azureaiinference + # These packages are deprecated: + #- name: azure-ai-generative + # safeName: azureaigenerative + #- name: azure-ai-resources + # safeName: azureairesources diff --git a/sdk/ai/tests.yml b/sdk/ai/tests.yml.old similarity index 68% rename from sdk/ai/tests.yml rename to sdk/ai/tests.yml.old index 960b65f98853..b51eb6aaf15d 100644 --- a/sdk/ai/tests.yml +++ b/sdk/ai/tests.yml.old @@ -1,3 +1,4 @@ +# This was the tests.yml file that was used when azure-ai-generative and azure-ai-resources packages were built. trigger: none # NOTE: Service live tests are NOT enabled. This file only enables the analyze stage currently. From e79c4b564e275a2e61b1e885663cd4ad78ded054 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 9 May 2024 11:45:36 -0700 Subject: [PATCH 049/112] Split streaming response class to two, one for sync, one for async --- .../azure/ai/inference/aio/_patch.py | 20 +-- .../azure/ai/inference/models/_patch.py | 145 ++++++++++-------- 2 files changed, 91 insertions(+), 74 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index 21eaeb0b493d..ef8be533d33f 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -45,12 +45,12 @@ async def create_streaming( model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.StreamingChatCompletions: + ) -> _models.AsyncStreamingChatCompletions: # pylint: disable=line-too-long """Gets streaming chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or "completes" provided prompt data. When using this method, the response is streamed - back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions + back to the client. Iterate over the resulting ~azure.ai.inference.models.AsyncStreamingChatCompletions object to get content updates as they arrive. :param body: Required. @@ -64,7 +64,7 @@ async def create_streaming( Default value is "application/json". :paramtype content_type: str :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.StreamingChatCompletions + :rtype: ~azure.ai.inference.models.AsyncStreamingChatCompletions :raises ~azure.core.exceptions.HttpResponseError: """ @@ -89,12 +89,12 @@ async def create_streaming( ] = None, seed: Optional[int] = None, **kwargs: Any - ) -> _models.StreamingChatCompletions: + ) -> _models.AsyncStreamingChatCompletions: # pylint: disable=line-too-long """Gets streaming chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or "completes" provided prompt data. When using this method, the response is streamed - back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions + back to the client. Iterate over the resulting ~azure.ai.inference.models.AsyncStreamingChatCompletions object to get content updates as they arrive. :keyword messages: The collection of context messages associated with this chat completions @@ -182,12 +182,12 @@ async def create_streaming( model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.StreamingChatCompletions: + ) -> _models.AsyncStreamingChatCompletions: # pylint: disable=line-too-long """Gets streaming chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or "completes" provided prompt data. When using this method, the response is streamed - back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions + back to the client. Iterate over the resulting ~azure.ai.inference.models.AsyncStreamingChatCompletions object to get content updates as they arrive. :param body: Required. @@ -226,12 +226,12 @@ async def create_streaming( ] = None, seed: Optional[int] = None, **kwargs: Any - ) -> _models.StreamingChatCompletions: + ) -> _models.AsyncStreamingChatCompletions: # pylint: disable=line-too-long """Gets streaming chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or "completes" provided prompt data. When using this method, the response is streamed - back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions + back to the client. Iterate over the resulting ~azure.ai.inference.models.AsyncStreamingChatCompletions object to get content updates as they arrive. :param body: Is either a JSON type or a IO[bytes] type. Required. @@ -373,7 +373,7 @@ async def create_streaming( map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) - return _models.StreamingChatCompletions(response) + return _models.AsyncStreamingChatCompletions(response) __all__: List[str] = [ diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index 29bc5336c5d5..f64f90729977 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -17,10 +17,10 @@ logger = logging.getLogger(__name__) -class StreamingChatCompletions: - """Represents an interator over ChatCompletionsUpdate objects. It can be used for either synchronous or - asynchronous iterations. The class deserializes the Server Sent Events (SSE) response stream - into chat completions updates, each one represented by a ChatCompletionsUpdate object. +class BaseStreamingChatCompletions: + """A base class for the sync and async streaming chat completions responses, holding any common code + to deserializes the Server Sent Events (SSE) response stream into chat completions updates, each one + represented by a ChatCompletionsUpdate object. """ # Enable detailed logs of SSE parsing. For development only, should be `False` by default. @@ -33,64 +33,11 @@ class StreamingChatCompletions: # The line indicating the end of the SSE stream SSE_DATA_EVENT_DONE = "data: [DONE]" - def __init__(self, response: Union[HttpResponse, AsyncHttpResponse]): - self._response = response - self._bytes_iterator: Union[AsyncIterator[bytes], Iterator[bytes]] = response.iter_bytes() - self._is_async_iterator = isinstance(self._response, AsyncHttpResponse) + def __init__(self): self._queue: "queue.Queue[_models.ChatCompletionsUpdate]" = queue.Queue() self._incomplete_json = "" self._done = False # Will be set to True when reading 'data: [DONE]' line - def __iter__(self): - if self._is_async_iterator: - raise ValueError("This method is not supported for async iterators") - return self - - def __next__(self) -> _models.ChatCompletionsUpdate: - if self._is_async_iterator: - raise ValueError("This method is not supported for async iterators") - while self._queue.empty() and not self._done: - self._done = self._read_next_block() - if self._queue.empty(): - raise StopIteration - return self._queue.get() - - def _read_next_block(self) -> bool: - if self.ENABLE_CLASS_LOGS: - logger.debug("[Reading next block]") - try: - # Use 'cast' to make 'pyright' error go away - element = cast(Iterator[bytes], self._bytes_iterator).__next__() - except StopIteration: - self.close() - return True - return self._deserialize_and_add_to_queue(element) - - def __aiter__(self): - if not self._is_async_iterator: - raise ValueError("This method is only supported for async iterators") - return self - - async def __anext__(self) -> _models.ChatCompletionsUpdate: - if not self._is_async_iterator: - raise ValueError("This method is only supported for async iterators") - while self._queue.empty() and not self._done: - self._done = await self._read_next_block_async() - if self._queue.empty(): - raise StopAsyncIteration - return self._queue.get() - - async def _read_next_block_async(self) -> bool: - if self.ENABLE_CLASS_LOGS: - logger.debug("[Reading next block]") - try: - # Use 'cast' to make 'pyright' error go away - element = await cast(AsyncIterator[bytes], self._bytes_iterator).__anext__() - except StopAsyncIteration: - await self.aclose() - return True - return self._deserialize_and_add_to_queue(element) - def _deserialize_and_add_to_queue(self, element: bytes) -> bool: # Clear the queue of ChatCompletionsUpdate before processing the next block @@ -139,6 +86,39 @@ def _deserialize_and_add_to_queue(self, element: bytes) -> bool: return False + +class StreamingChatCompletions(BaseStreamingChatCompletions): + """Represents an interator over ChatCompletionsUpdate objects. It can be used for either synchronous or + asynchronous iterations. The class deserializes the Server Sent Events (SSE) response stream + into chat completions updates, each one represented by a ChatCompletionsUpdate object. + """ + + def __init__(self, response: HttpResponse): + super().__init__() + self._response = response + self._bytes_iterator: Iterator[bytes] = response.iter_bytes() + + def __iter__(self): + return self + + def __next__(self) -> _models.ChatCompletionsUpdate: + while self._queue.empty() and not self._done: + self._done = self._read_next_block() + if self._queue.empty(): + raise StopIteration + return self._queue.get() + + def _read_next_block(self) -> bool: + if self.ENABLE_CLASS_LOGS: + logger.debug("[Reading next block]") + try: + # Use 'cast' to make 'pyright' error go away + element = cast(Iterator[bytes], self._bytes_iterator).__next__() + except StopIteration: + self.close() + return True + return self._deserialize_and_add_to_queue(element) + def __enter__(self): return self @@ -146,17 +126,54 @@ def __exit__(self, exc_type, exc_val, exc_tb) -> None: self.close() def close(self) -> None: - if isinstance(self._response, HttpResponse): - self._response.close() + self._response.close() + + +class AsyncStreamingChatCompletions(BaseStreamingChatCompletions): + """Represents an async interator over ChatCompletionsUpdate objects. It can be used for either synchronous or + asynchronous iterations. The class deserializes the Server Sent Events (SSE) response stream + into chat completions updates, each one represented by a ChatCompletionsUpdate object. + """ + + def __init__(self, response: AsyncHttpResponse): + super().__init__() + self._response = response + self._bytes_iterator: AsyncIterator[bytes] = response.iter_bytes() + + def __aiter__(self): + return self + + async def __anext__(self) -> _models.ChatCompletionsUpdate: + while self._queue.empty() and not self._done: + self._done = await self._read_next_block_async() + if self._queue.empty(): + raise StopAsyncIteration + return self._queue.get() + + async def _read_next_block_async(self) -> bool: + if self.ENABLE_CLASS_LOGS: + logger.debug("[Reading next block]") + try: + # Use 'cast' to make 'pyright' error go away + element = await cast(AsyncIterator[bytes], self._bytes_iterator).__anext__() + except StopAsyncIteration: + await self.aclose() + return True + return self._deserialize_and_add_to_queue(element) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb) -> None: + self.close() async def aclose(self) -> None: - # `if`` statement added to avoid mypy error: Incompatible types in "await" (actual type "Optional[Coroutine[Any, Any, None]]", expected type "Awaitable[Any]") - if isinstance(self._response, AsyncHttpResponse): - await self._response.close() + await self._response.close() __all__: List[str] = [ - "StreamingChatCompletions" + "StreamingChatCompletions", + "AsyncStreamingChatCompletions" ] # Add all objects you want publicly available to users at this package level From 06e11f717c137b8c302dedcc9ce382024c0efcdb Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 9 May 2024 13:18:01 -0700 Subject: [PATCH 050/112] Update test timeout. Rename /templates/stages/platform-matrix-ai.json.old --- .../{platform-matrix-ai.json => platform-matrix-ai.json.old} | 0 sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py | 3 ++- sdk/ai/ci.yml | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) rename eng/pipelines/templates/stages/{platform-matrix-ai.json => platform-matrix-ai.json.old} (100%) diff --git a/eng/pipelines/templates/stages/platform-matrix-ai.json b/eng/pipelines/templates/stages/platform-matrix-ai.json.old similarity index 100% rename from eng/pipelines/templates/stages/platform-matrix-ai.json rename to eng/pipelines/templates/stages/platform-matrix-ai.json.old diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index f64f90729977..8eed8e55200f 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -6,6 +6,7 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ +import asyncio import json import logging import queue @@ -165,7 +166,7 @@ def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb) -> None: - self.close() + asyncio.run(self.close()) async def aclose(self) -> None: await self._response.close() diff --git a/sdk/ai/ci.yml b/sdk/ai/ci.yml index d6dee468e9f0..5780d354da0a 100644 --- a/sdk/ai/ci.yml +++ b/sdk/ai/ci.yml @@ -31,8 +31,8 @@ extends: ServiceDirectory: ai TestProxy: true BuildDocs: true + TestTimeoutInMinutes: 60 # The below were set before when azure-ai-generative and azure-ai-resources packages were built: - # TestTimeoutInMinutes: 75 # This is a short term solution to create API review for python azure-ml package only when running pipeline manually # Long term solution should be to have different versions on main branch and release branch for python package so APIView can have different revisions for each version. # Tracking issue: https://github.com/Azure/azure-sdk-for-python/issues/29196 From 610b4eb052cc7005c668ba6e6426ec5b7fbbfa42 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 9 May 2024 14:58:13 -0700 Subject: [PATCH 051/112] Mark azure-ai-generative and azure-ai-resources as in-active --- sdk/ai/azure-ai-generative/setup.py | 2 +- sdk/ai/azure-ai-resources/setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/ai/azure-ai-generative/setup.py b/sdk/ai/azure-ai-generative/setup.py index b767590d29c9..696e84a33431 100644 --- a/sdk/ai/azure-ai-generative/setup.py +++ b/sdk/ai/azure-ai-generative/setup.py @@ -42,7 +42,7 @@ url="https://github.com/Azure/azure-sdk-for-python", keywords="azure, azuresdk, azure sdk", classifiers=[ - "Development Status :: 4 - Beta", + "Development Status :: 7 - Inactive", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", diff --git a/sdk/ai/azure-ai-resources/setup.py b/sdk/ai/azure-ai-resources/setup.py index f688a10e2ba1..cb0abaa55318 100644 --- a/sdk/ai/azure-ai-resources/setup.py +++ b/sdk/ai/azure-ai-resources/setup.py @@ -42,7 +42,7 @@ url="https://github.com/Azure/azure-sdk-for-python", keywords="azure, azuresdk, azure sdk", classifiers=[ - "Development Status :: 4 - Beta", + "Development Status :: 7 - Inactive", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", From 9def3bbfd67df48a73c97527cebe557dbc65993b Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 9 May 2024 18:07:49 -0700 Subject: [PATCH 052/112] Fix mypy and pylint errors --- .../azure/ai/inference/__init__.py | 2 +- .../ai/inference/_operations/_operations.py | 2 +- .../azure/ai/inference/aio/__init__.py | 2 +- .../inference/aio/_operations/_operations.py | 2 +- .../azure/ai/inference/models/_patch.py | 8 +-- ...ample_chat_completions_from_input_bytes.py | 8 +-- .../sample_chat_completions_with_tools.py | 49 +++++++++++-------- 7 files changed, 38 insertions(+), 35 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py index 52fb809c15f3..340418218163 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py @@ -6,7 +6,7 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from ._client import ChatCompletionsClient +from ._patch import ChatCompletionsClient from ._client import EmbeddingsClient from ._client import ImageGenerationClient from ._version import VERSION diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index fdc2665fc6aa..dc75e0630c8b 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -527,7 +527,7 @@ def create( """ @distributed_trace - def create( + def create( # pylint: disable=too-many-locals self, body: Union[JSON, IO[bytes]] = _Unset, *, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py index 34ce566598a6..94ec65ac14df 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py @@ -6,7 +6,7 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from ._client import ChatCompletionsClient +from ._patch import ChatCompletionsClient from ._client import EmbeddingsClient from ._client import ImageGenerationClient diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index 2bfd83fcf087..6150068dddb3 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -402,7 +402,7 @@ async def create( """ @distributed_trace_async - async def create( + async def create( # pylint: disable=too-many-locals self, body: Union[JSON, IO[bytes]] = _Unset, *, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index 8eed8e55200f..80d3e8c50691 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -12,7 +12,7 @@ import queue import re -from typing import List, Union, AsyncIterator, Iterator, cast +from typing import List, AsyncIterator, Iterator, cast from azure.core.rest import HttpResponse, AsyncHttpResponse from .. import models as _models @@ -50,7 +50,7 @@ def _deserialize_and_add_to_queue(self, element: bytes) -> bool: for index, line in enumerate(line_list): if self.ENABLE_CLASS_LOGS: - logger.debug(f"[Original line] {repr(line)}") + logger.debug("[Original line] %s", repr(line)) if index == 0: line = self._incomplete_json + line @@ -61,7 +61,7 @@ def _deserialize_and_add_to_queue(self, element: bytes) -> bool: return False if self.ENABLE_CLASS_LOGS: - logger.debug(f"[Modified line] {repr(line)}") + logger.debug("[Modified line] %s", repr(line)) if line == "\n": # Empty line, indicating flush output to client continue @@ -166,7 +166,7 @@ def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb) -> None: - asyncio.run(self.close()) + asyncio.run(self.aclose()) async def aclose(self) -> None: await self._response.close() diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py index ccfaaf1281ad..5eb173b14f8a 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py @@ -47,12 +47,8 @@ def sample_chat_completions_from_input_bytes(): def read_text_file(file_path: str) -> io.BytesIO: """Reads a text file and returns a BytesIO object with the file content in UTF-8 encoding.""" - try: - with open(file_path, "r") as file: - return io.BytesIO(file.read().encode("utf-8")) - except FileNotFoundError: - print(f"File '{file_path}' not found.") - return None + with open(file_path, "r") as file: + return io.BytesIO(file.read().encode("utf-8")) if __name__ == "__main__": diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py index a03cd1db5e54..88760a4c18d0 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py @@ -3,6 +3,9 @@ # Licensed under the MIT License. # ------------------------------------ """ +NOTE: + This sample is still work in progress... + DESCRIPTION: This sample demonstrates how to do chat completions using a synchronous client, with the assistance of tools. In this sample, we use a mock function tool to retrieve @@ -42,15 +45,16 @@ def sample_chat_completions_with_tools(): from azure.ai.inference import ChatCompletionsClient from azure.ai.inference.models import ( - SystemMessage, - UserMessage, AssistantMessage, - ToolMessage, + ChatCompletionsFunctionToolCall, ChatCompletionsFunctionToolDefinition, - FunctionDefinition, - CompletionsFinishReason, + ChatCompletionsNamedToolSelection, ChatCompletionsToolSelectionPreset, - ChatCompletionsNamedToolSelection + CompletionsFinishReason, + FunctionDefinition, + SystemMessage, + ToolMessage, + UserMessage, ) from azure.core.credentials import AzureKeyCredential @@ -108,7 +112,6 @@ def get_flight_info(origin_city: str, destination_city: str): UserMessage(content="What are the next flights from Seattle to Miami and from Seattle to Orlando?"), ] - x : ChatCompletionsNamedToolSelection result = client.create( messages=messages, tools=[flight_info], @@ -119,23 +122,27 @@ def get_flight_info(origin_city: str, destination_city: str): while result.choices[0].finish_reason == CompletionsFinishReason.TOOL_CALLS: # Append the previous model response to the chat history - messages.append(AssistantMessage(tool_calls=result.choices[0].message.tool_calls)) + if result.choices[0].message.tool_calls is not None: + # TODO: Remove the need to set content="" + messages.append(AssistantMessage(content="", tool_calls=result.choices[0].message.tool_calls)) # Make new function call(s) as needed. If parallel function calling is supported by the model, # we may have more than one tool call request. - for tool_call in result.choices[0].message.tool_calls: - function_name = tool_call.function.name - function_args = json.loads(tool_call.function.arguments.replace("'", '"')) - tool_call_id = tool_call.id - print(f"Calling function `{function_name}` with arguments {function_args}") - callable_func = locals()[function_name] - function_response = callable_func(**function_args) - print(f"Function response is: {function_response}") - - # Provide the tool response to the model, by appending it to the chat history - messages.append( - ToolMessage(tool_call_id=tool_call_id, content=function_response) # json.dumps(function_response) - ) + if result.choices[0].message.tool_calls is not None: + for tool_call in result.choices[0].message.tool_calls: + if hasattr(tool_call, "function"): + function_name = tool_call.function.name + function_args = json.loads(tool_call.function.arguments.replace("'", '"')) + tool_call_id = tool_call.id + print(f"Calling function `{function_name}` with arguments {function_args}") + callable_func = locals()[function_name] + function_response = callable_func(**function_args) + print(f"Function response is: {function_response}") + + # Provide the tool response to the model, by appending it to the chat history + messages.append( + ToolMessage(tool_call_id=tool_call_id, content=function_response) # json.dumps(function_response) + ) # With the additional tools information on hand, get another response from the model result = client.create( From fb7612b2249b9a5d9a042f96e2cff7950c651203 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 10 May 2024 11:07:22 -0700 Subject: [PATCH 053/112] Sample for getting model info --- sdk/ai/azure-ai-inference/README.md | 32 ++++++++++-- .../ai/inference/_operations/_operations.py | 30 ++++++------ .../inference/aio/_operations/_operations.py | 30 ++++++------ .../azure/ai/inference/models/__init__.py | 4 +- .../azure/ai/inference/models/_enums.py | 14 +++--- .../azure/ai/inference/models/_models.py | 12 ++--- sdk/ai/azure-ai-inference/samples/README.md | 3 +- .../samples/sample_get_model_info.py | 49 +++++++++++++++++++ 8 files changed, 126 insertions(+), 48 deletions(-) create mode 100644 sdk/ai/azure-ai-inference/samples/sample_get_model_info.py diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index cfa70cf2d438..ebef7aaedbc7 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -99,6 +99,7 @@ In the following sections you will find simple examples of: * [Streaming chat completions](#streaming-chat-completions-example) * [Embeddings](#embeddings-example) * [Image geneartion](#image-generation-example) +* [Get model information](#get-model-information-example) The examples create a synchronous client as mentioned in [Create and authenticate clients](#create-and-authenticate-clients). Only mandatory input settings are shown for simplicity. @@ -154,7 +155,10 @@ result = client.create_streaming( ) for update in result: - print(update.choices[0].delta.content, end="") + if update.choices[0].delta.content: + print(update.choices[0].delta.content, end="") + +result.close() ``` @@ -210,8 +214,30 @@ client = ImageGenerationClient(endpoint=endpoint, credential=AzureKeyCredential( result = client.create(prompt="A painting of a beautiful sunset over a mountain lake.", size="1024x768") -with open(f"image.png", "wb") as image: - image.write(result.data[0].b64_json.decode("base64")) +if result.data[0].b64_json is not None: + with open(f"image.png", "wb") as image: + image.write(base64.b64decode(result.data[0].b64_json)) +``` + + + +### Get model information example + +Each one of the clients supports a `get_model_info` method that can be used to retreive infomation about the AI model. This example shows how to get model information from the `ChatCompletionsClient`, but similarly can be done with the other clients. + + + +```python +from azure.ai.inference import ChatCompletionsClient +from azure.core.credentials import AzureKeyCredential + +client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + +model_info = client.get_model_info() + +print(f"Model name: {model_info.model_name}") +print(f"Model provider name: {model_info.model_provider_name}") +print(f"Model type: {model_info.model_type}") ``` diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index dc75e0630c8b..96a7653345c3 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -803,12 +803,12 @@ def create( # pylint: disable=too-many-locals return deserialized # type: ignore @distributed_trace - def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: + def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: # pylint: disable=line-too-long """Returns information about the AI model. - :return: ModelInformation. The ModelInformation is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ModelInformation + :return: ModelInfo. The ModelInfo is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -833,7 +833,7 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.ModelInformation] = kwargs.pop("cls", None) + cls: ClsType[_models.ModelInfo] = kwargs.pop("cls", None) _request = build_chat_completions_get_model_info_request( api_version=self._config.api_version, @@ -861,7 +861,7 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ModelInformation, response.json()) + deserialized = _deserialize(_models.ModelInfo, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1246,12 +1246,12 @@ def create( return deserialized # type: ignore @distributed_trace - def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: + def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: # pylint: disable=line-too-long """Returns information about the AI model. - :return: ModelInformation. The ModelInformation is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ModelInformation + :return: ModelInfo. The ModelInfo is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -1276,7 +1276,7 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.ModelInformation] = kwargs.pop("cls", None) + cls: ClsType[_models.ModelInfo] = kwargs.pop("cls", None) _request = build_embeddings_get_model_info_request( api_version=self._config.api_version, @@ -1304,7 +1304,7 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ModelInformation, response.json()) + deserialized = _deserialize(_models.ModelInfo, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1674,12 +1674,12 @@ def create( return deserialized # type: ignore @distributed_trace - def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: + def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: # pylint: disable=line-too-long """Returns information about the AI model. - :return: ModelInformation. The ModelInformation is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ModelInformation + :return: ModelInfo. The ModelInfo is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -1704,7 +1704,7 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.ModelInformation] = kwargs.pop("cls", None) + cls: ClsType[_models.ModelInfo] = kwargs.pop("cls", None) _request = build_image_generation_get_model_info_request( api_version=self._config.api_version, @@ -1732,7 +1732,7 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ModelInformation, response.json()) + deserialized = _deserialize(_models.ModelInfo, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index 6150068dddb3..9957b8d7f792 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -678,12 +678,12 @@ async def create( # pylint: disable=too-many-locals return deserialized # type: ignore @distributed_trace_async - async def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: + async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: # pylint: disable=line-too-long """Returns information about the AI model. - :return: ModelInformation. The ModelInformation is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ModelInformation + :return: ModelInfo. The ModelInfo is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -708,7 +708,7 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.ModelInformation] = kwargs.pop("cls", None) + cls: ClsType[_models.ModelInfo] = kwargs.pop("cls", None) _request = build_chat_completions_get_model_info_request( api_version=self._config.api_version, @@ -736,7 +736,7 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ModelInformation, response.json()) + deserialized = _deserialize(_models.ModelInfo, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1121,12 +1121,12 @@ async def create( return deserialized # type: ignore @distributed_trace_async - async def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: + async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: # pylint: disable=line-too-long """Returns information about the AI model. - :return: ModelInformation. The ModelInformation is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ModelInformation + :return: ModelInfo. The ModelInfo is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -1151,7 +1151,7 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.ModelInformation] = kwargs.pop("cls", None) + cls: ClsType[_models.ModelInfo] = kwargs.pop("cls", None) _request = build_embeddings_get_model_info_request( api_version=self._config.api_version, @@ -1179,7 +1179,7 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ModelInformation, response.json()) + deserialized = _deserialize(_models.ModelInfo, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1549,12 +1549,12 @@ async def create( return deserialized # type: ignore @distributed_trace_async - async def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: + async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: # pylint: disable=line-too-long """Returns information about the AI model. - :return: ModelInformation. The ModelInformation is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ModelInformation + :return: ModelInfo. The ModelInfo is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -1579,7 +1579,7 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.ModelInformation] = kwargs.pop("cls", None) + cls: ClsType[_models.ModelInfo] = kwargs.pop("cls", None) _request = build_image_generation_get_model_info_request( api_version=self._config.api_version, @@ -1607,7 +1607,7 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInformation: if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ModelInformation, response.json()) + deserialized = _deserialize(_models.ModelInfo, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py index 1636653f8d42..f5785318c237 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py @@ -26,7 +26,7 @@ from ._models import FunctionDefinition from ._models import ImageGenerationData from ._models import ImageGenerations -from ._models import ModelInformation +from ._models import ModelInfo from ._models import SystemMessage from ._models import ToolMessage from ._models import UserMessage @@ -65,7 +65,7 @@ "FunctionDefinition", "ImageGenerationData", "ImageGenerations", - "ModelInformation", + "ModelInfo", "SystemMessage", "ToolMessage", "UserMessage", diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py index e3dd1c7fa88a..9f4ca0d1bef1 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py @@ -111,11 +111,13 @@ class ModelType(str, Enum, metaclass=CaseInsensitiveEnumMeta): EMBEDDINGS = "embeddings" """Embeddings.""" - CUSTOM = "custom" - """Custom model.""" - CHAT = "chat" - """Chat completions""" + IMAGE_GENERATION = "image_generation" + """Image generation.""" TEXT_GENERATION = "text_generation" """Text generation""" - IMAGE_GENERATION = "image_generation" - """Image generation""" + IMAGE_EMBEDDINGS = "image_embeddings" + """Image embeddings.""" + AUDIO_GENERATION = "audio_generation" + """Audio generation""" + CHAT = "chat" + """Chat completions""" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py index ee1ecba861a1..85245430270f 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py @@ -916,7 +916,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, **kwargs) -class ModelInformation(_model_base.Model): +class ModelInfo(_model_base.Model): """Represents some basic information about the AI model. All required parameters must be populated in order to send to server. @@ -931,12 +931,12 @@ class ModelInformation(_model_base.Model): """ model_type: Union[str, "_models.ModelType"] = rest_field() - """The type of the AI model. Required. Known values are: \"embeddings\", \"custom\", \"chat\", - \"text_generation\", and \"image_generation\".""" - model_provider: str = rest_field() - """The model provider. Required.""" + """The type of the AI model. A Unique identifier for the profile. Required. Known values are: \"embeddings\", \"image_generation\", \"text_generation\", + \"image_embeddings\", \"audio_generation\", and \"chat\".""" + model_provider_name: str = rest_field() + """The model provider name. For example: `Microsoft Research`. Required.""" model_name: str = rest_field() - """The name of the AI model. Required.""" + """The name of the AI model. For example: `Phi21`. Required.""" @overload def __init__( diff --git a/sdk/ai/azure-ai-inference/samples/README.md b/sdk/ai/azure-ai-inference/samples/README.md index 47822341e33e..09252ee27ade 100644 --- a/sdk/ai/azure-ai-inference/samples/README.md +++ b/sdk/ai/azure-ai-inference/samples/README.md @@ -23,9 +23,10 @@ The concepts are similar, you can easily modify any of the samples to your needs |[sample_chat_completions_with_history.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py) | Two chat completion operations using a synchronous client, which the second completion using chat history from the first. | |[sample_chat_completions_from_input_bytes.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py) | One chat completion operation using a synchronous client, with input messages provided as `IO[bytes]`. | |[sample_chat_completions_from_input_json.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py) | One chat completion operation using a synchronous client, with input messages provided as `MutableMapping[str, Any]` | +|[sample_chat_completions_with_tools.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py) | Shows how do use a tool (function) in chat completions, for an AI model that supports tools | |[sample_embeddings.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_embeddings.py) | One embeddings operation using a synchronous client. | |[sample_image_generation.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_image_generation.py) | Generate an image from a prompt using a synchronous client. | - +|[sample_get_model_info.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py) | Get AI model information using the chat completions client. Similarly can be done with all other clients. | ## Asynchronous client samples |**File Name**|**Description**| diff --git a/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py b/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py new file mode 100644 index 000000000000..f7aca4e2be47 --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py @@ -0,0 +1,49 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to get information about the AI model, using the + synchronous chat completions client. Similarly can be done with the other + clients. + +USAGE: + python sample_get_model_info + + Set these two environment variables before running the sample: + 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form + https://..inference.ai.azure.com + where `your-deployment-name` is your unique AI Model deployment name, and + `your-azure-region` is the Azure region where your model is deployed. + 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. +""" + + +def sample_get_model_info(): + import os + + try: + endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] + key = os.environ["CHAT_COMPLETIONS_KEY"] + except KeyError: + print("Missing environment variable 'CHAT_COMPLETIONS_ENDPOINT' or 'CHAT_COMPLETIONS_KEY'") + print("Set them before running this sample.") + exit() + + # [START get_model_info] + from azure.ai.inference import ChatCompletionsClient + from azure.core.credentials import AzureKeyCredential + + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + + model_info = client.get_model_info() + + print(f"Model name: {model_info.model_name}") + print(f"Model provider name: {model_info.model_provider_name}") + print(f"Model type: {model_info.model_type}") + # [END get_model_info] + + +if __name__ == "__main__": + sample_get_model_info() From cd30a481693f5b004d37a39dd3931583a0ac1bcf Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 10 May 2024 16:02:09 -0700 Subject: [PATCH 054/112] Remove image generation --- sdk/ai/azure-ai-inference/README.md | 28 -------- .../azure/ai/inference/_patch.py | 19 +++++ sdk/ai/azure-ai-inference/samples/README.md | 3 +- .../sample_image_generation_async.py | 70 ------------------- .../samples/sample_image_generation.py | 49 ------------- sdk/ai/azure-ai-inference/tsp-location.yaml | 2 +- 6 files changed, 21 insertions(+), 150 deletions(-) delete mode 100644 sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py delete mode 100644 sdk/ai/azure-ai-inference/samples/sample_image_generation.py diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index ebef7aaedbc7..7f8a0e5d2d53 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -85,12 +85,6 @@ TODO: Add overview and link to explain embeddings. Embeddings operations target the URL route `/v1/embeddings` on the provided endpoint. -### Image Generation - -TODO: Add overview and link to explain image generation. - -Image generation operations target the URL route `/images/generations` on the provided endpoint. - ## Examples In the following sections you will find simple examples of: @@ -98,7 +92,6 @@ In the following sections you will find simple examples of: * [Chat completions](#chat-completions-example) * [Streaming chat completions](#streaming-chat-completions-example) * [Embeddings](#embeddings-example) -* [Image geneartion](#image-generation-example) * [Get model information](#get-model-information-example) The examples create a synchronous client as mentioned in [Create and authenticate clients](#create-and-authenticate-clients). Only mandatory input settings are shown for simplicity. @@ -200,27 +193,6 @@ data[2]: length=1024, [0.04196167, 0.029083252, ..., -0.0027484894, 0.0073127747 To generate embeddings for additional phrases, simply call `client.create` multiple times using the same `client`. -### Image generation example - -This example demonstrates how to generate an image of size 1024x768 from a text prompt, and save the resulting image to a `image.png` file. - - - -```python -from azure.ai.inference import ImageGenerationClient -from azure.core.credentials import AzureKeyCredential - -client = ImageGenerationClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - -result = client.create(prompt="A painting of a beautiful sunset over a mountain lake.", size="1024x768") - -if result.data[0].b64_json is not None: - with open(f"image.png", "wb") as image: - image.write(base64.b64decode(result.data[0].b64_json)) -``` - - - ### Get model information example Each one of the clients supports a `get_model_info` method that can be used to retreive infomation about the AI model. This example shows how to get model information from the `ChatCompletionsClient`, but similarly can be done with the other clients. diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index 37b4e404e564..564fbeb641a6 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -12,6 +12,7 @@ from io import IOBase from typing import Any, Dict, Union, IO, List, Optional, overload from azure.core.pipeline import PipelineResponse +from azure.core.credentials import AzureKeyCredential from azure.core.tracing.decorator import distributed_trace from azure.core.utils import case_insensitive_dict from azure.core.exceptions import ( @@ -27,6 +28,7 @@ from ._serialization import Serializer from ._operations._operations import build_chat_completions_create_request from ._client import ChatCompletionsClient as ChatCompletionsClientGenerated +from ._client import EmbeddingsClient if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -38,6 +40,22 @@ _SERIALIZER.client_side_validation = False +class ClientGenerator: + @staticmethod + def from_endpoint(endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> Union[ChatCompletionsClientGenerated, EmbeddingsClient]: + client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... + model_info = client.get_model_info() + print(model_info) + if model_info.model_type == None or model_info.model_type == "": + raise ValueError("The AI model information is missing a value for `model type`. Cannot create an appropriate client.") + elif model_info.model_type == _models.ModelType.CHAT: + return client + elif model_info.model_type == _models.ModelType.EMBEDDINGS: + return EmbeddingsClient(endpoint, credential, **kwargs) + else: + raise ValueError(f"No client available to support AI model type {model_info.model_type}") + + class ChatCompletionsClient(ChatCompletionsClientGenerated): @overload @@ -380,6 +398,7 @@ def create_streaming( __all__: List[str] = [ + "ClientGenerator", "ChatCompletionsClient" ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-inference/samples/README.md b/sdk/ai/azure-ai-inference/samples/README.md index 09252ee27ade..9b8c3bbc19c0 100644 --- a/sdk/ai/azure-ai-inference/samples/README.md +++ b/sdk/ai/azure-ai-inference/samples/README.md @@ -25,8 +25,8 @@ The concepts are similar, you can easily modify any of the samples to your needs |[sample_chat_completions_from_input_json.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py) | One chat completion operation using a synchronous client, with input messages provided as `MutableMapping[str, Any]` | |[sample_chat_completions_with_tools.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py) | Shows how do use a tool (function) in chat completions, for an AI model that supports tools | |[sample_embeddings.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_embeddings.py) | One embeddings operation using a synchronous client. | -|[sample_image_generation.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_image_generation.py) | Generate an image from a prompt using a synchronous client. | |[sample_get_model_info.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py) | Get AI model information using the chat completions client. Similarly can be done with all other clients. | + ## Asynchronous client samples |**File Name**|**Description**| @@ -34,7 +34,6 @@ The concepts are similar, you can easily modify any of the samples to your needs |[sample_chat_completions_streaming_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py) | One chat completion operation using an asynchronous client and streaming response. | |[sample_chat_completions_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py) | One chat completion operation using an asynchronous client. | |[sample_embeddings_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py) | One embeddings operation using an asynchronous client. | -|[sample_image_generation_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py) | Generate an image from a prompt using an asynchronous client. | ## Prerequisites diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py deleted file mode 100644 index bdc1adc72235..000000000000 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_generation_async.py +++ /dev/null @@ -1,70 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -""" -DESCRIPTION: - This sample demonstrates how to generate an image from a prompt using an asynchronous client. - -USAGE: - python sample_image_generation_async.py - - Set these two environment variables before running the sample: - 1) IMAGE_GENERATION_ENDPOINT - Your endpoint URL, in the form - https://..inference.ai.azure.com - where `your-deployment-name` is your unique AI Model deployment name, and - `your-azure-region` is the Azure region where your model is deployed. - 2) IMAGE_GENERATION_KEY - Your model key (a 32-character string). Keep it secret. -""" -import asyncio - - -async def sample_image_generation_async(): - import os - import base64 - from azure.ai.inference.aio import ImageGenerationClient - from azure.core.credentials import AzureKeyCredential - - # Read the values of your model endpoint and key from environment variables - try: - endpoint = os.environ["IMAGE_GENERATION_ENDPOINT"] - key = os.environ["IMAGE_GENERATION_KEY"] - except KeyError: - print("Missing environment variable 'IMAGE_GENERATION_ENDPOINT' or 'IMAGE_GENERATION_KEY'") - print("Set them before running this sample.") - exit() - - # Create an Model for synchronous operations - client = ImageGenerationClient(endpoint=endpoint, credential=AzureKeyCredential("key")) - - # Generate an image from text prompt. This will be an asynchronously (non-blocking) call. - future = asyncio.ensure_future( - client.create(prompt="A painting of a beautiful sunset over a mountain lake.", size="1024x768") - ) - - # Loop until the operation is done - while not future.done(): - await asyncio.sleep(0.1) - print("Waiting...") - - # Get the result - result = future.result() - await client.close() - - # Save generated image to file and print other results the the console - print("Image generation result:") - for index, item in enumerate(result.data): - if item.b64_json is not None: - with open(f"image_{index}.png", "wb") as image: - image.write(base64.b64decode(item.b64_json)) - print(f"id: {result.id}") - print(f"model: {result.model}") - print(f"created: {result.created}") - - -async def main(): - await sample_image_generation_async() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-inference/samples/sample_image_generation.py b/sdk/ai/azure-ai-inference/samples/sample_image_generation.py deleted file mode 100644 index f86e3ee7031a..000000000000 --- a/sdk/ai/azure-ai-inference/samples/sample_image_generation.py +++ /dev/null @@ -1,49 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -""" -DESCRIPTION: - This sample demonstrates how to generate an image from a prompt - using a synchronous client. - -USAGE: - python sample_image_generation.py - - Set these two environment variables before running the sample: - 1) IMAGE_GENERATION_ENDPOINT - Your endpoint URL, in the form - https://..inference.ai.azure.com - where `your-deployment-name` is your unique AI Model deployment name, and - `your-azure-region` is the Azure region where your model is deployed. - 2) IMAGE_GENERATION_KEY - Your model key (a 32-character string). Keep it secret. -""" - - -def sample_image_generation(): - import os - import base64 - - try: - endpoint = os.environ["IMAGE_GENERATION_ENDPOINT"] - key = os.environ["IMAGE_GENERATION_KEY"] - except KeyError: - print("Missing environment variable 'IMAGE_GENERATION_ENDPOINT' or 'IMAGE_GENERATION_KEY'") - print("Set them before running this sample.") - exit() - - # [START image_generation] - from azure.ai.inference import ImageGenerationClient - from azure.core.credentials import AzureKeyCredential - - client = ImageGenerationClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - - result = client.create(prompt="A painting of a beautiful sunset over a mountain lake.", size="1024x768") - - if result.data[0].b64_json is not None: - with open(f"image.png", "wb") as image: - image.write(base64.b64decode(result.data[0].b64_json)) - # [END image_generation] - - -if __name__ == "__main__": - sample_image_generation() diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml index a9173a8c5b50..308cb8aa6e8c 100644 --- a/sdk/ai/azure-ai-inference/tsp-location.yaml +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/ModelClient -commit: 32a24a5702ba8ad817280c740ab3b485c1b34079 +commit: 43e5511f7c8543a88811d9218d9e7bf301c22646 repo: Azure/azure-rest-api-specs additionalDirectories: From a6ae4075b41ef632c1044ff601c6d4a9fb66a905 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 10 May 2024 16:15:50 -0700 Subject: [PATCH 055/112] Re-emit from TypeSpec without Image Generation --- .../azure/ai/inference/__init__.py | 4 +- .../azure/ai/inference/_client.py | 88 +-- .../azure/ai/inference/_configuration.py | 47 -- .../ai/inference/_operations/__init__.py | 2 - .../ai/inference/_operations/_operations.py | 505 +----------------- .../azure/ai/inference/_patch.py | 20 +- .../azure/ai/inference/_vendor.py | 15 +- .../azure/ai/inference/aio/__init__.py | 4 +- .../azure/ai/inference/aio/_client.py | 90 +--- .../azure/ai/inference/aio/_configuration.py | 47 -- .../ai/inference/aio/_operations/__init__.py | 2 - .../inference/aio/_operations/_operations.py | 464 +--------------- .../azure/ai/inference/aio/_vendor.py | 15 +- .../azure/ai/inference/models/__init__.py | 8 - .../azure/ai/inference/models/_enums.py | 25 +- .../azure/ai/inference/models/_models.py | 111 +--- .../azure/ai/inference/models/_patch.py | 3 +- .../sample_chat_completions_with_tools.py | 19 +- .../tests/model_inference_test_base.py | 6 +- .../tests/test_model_inference_client.py | 22 +- 20 files changed, 105 insertions(+), 1392 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py index 340418218163..057fbcaefaee 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py @@ -6,9 +6,8 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from ._patch import ChatCompletionsClient +from ._client import ChatCompletionsClient from ._client import EmbeddingsClient -from ._client import ImageGenerationClient from ._version import VERSION __version__ = VERSION @@ -23,7 +22,6 @@ __all__ = [ "ChatCompletionsClient", "EmbeddingsClient", - "ImageGenerationClient", ] __all__.extend([p for p in _patch_all if p not in __all__]) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_client.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_client.py index 7eb652f09aa9..941c03c014d7 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_client.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_client.py @@ -14,16 +14,8 @@ from azure.core.pipeline import policies from azure.core.rest import HttpRequest, HttpResponse -from ._configuration import ( - ChatCompletionsClientConfiguration, - EmbeddingsClientConfiguration, - ImageGenerationClientConfiguration, -) -from ._operations import ( - ChatCompletionsClientOperationsMixin, - EmbeddingsClientOperationsMixin, - ImageGenerationClientOperationsMixin, -) +from ._configuration import ChatCompletionsClientConfiguration, EmbeddingsClientConfiguration +from ._operations import ChatCompletionsClientOperationsMixin, EmbeddingsClientOperationsMixin from ._serialization import Deserializer, Serializer @@ -177,79 +169,3 @@ def __enter__(self) -> "EmbeddingsClient": def __exit__(self, *exc_details: Any) -> None: self._client.__exit__(*exc_details) - - -class ImageGenerationClient(ImageGenerationClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword - """ImageGenerationClient. - - :param endpoint: Service host. Required. - :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Required. - :type credential: ~azure.core.credentials.AzureKeyCredential - :keyword api_version: The API version to use for this operation. Default value is - "2024-04-01-preview". Note that overriding this default value may result in unsupported - behavior. - :paramtype api_version: str - """ - - def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: - _endpoint = "{endpoint}" - self._config = ImageGenerationClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) - _policies = kwargs.pop("policies", None) - if _policies is None: - _policies = [ - policies.RequestIdPolicy(**kwargs), - self._config.headers_policy, - self._config.user_agent_policy, - self._config.proxy_policy, - policies.ContentDecodePolicy(**kwargs), - self._config.redirect_policy, - self._config.retry_policy, - self._config.authentication_policy, - self._config.custom_hook_policy, - self._config.logging_policy, - policies.DistributedTracingPolicy(**kwargs), - policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, - self._config.http_logging_policy, - ] - self._client: PipelineClient = PipelineClient(base_url=_endpoint, policies=_policies, **kwargs) - - self._serialize = Serializer() - self._deserialize = Deserializer() - self._serialize.client_side_validation = False - - def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: - """Runs the network request through the client's chained policies. - - >>> from azure.core.rest import HttpRequest - >>> request = HttpRequest("GET", "https://www.example.org/") - - >>> response = client.send_request(request) - - - For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request - - :param request: The network request you want to make. Required. - :type request: ~azure.core.rest.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to False. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.rest.HttpResponse - """ - - request_copy = deepcopy(request) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - - request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) - return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore - - def close(self) -> None: - self._client.close() - - def __enter__(self) -> "ImageGenerationClient": - self._client.__enter__() - return self - - def __exit__(self, *exc_details: Any) -> None: - self._client.__exit__(*exc_details) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py index 4d27040de96d..608bf26c541c 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py @@ -106,50 +106,3 @@ def _configure(self, **kwargs: Any) -> None: self.authentication_policy = policies.AzureKeyCredentialPolicy( self.credential, "Authorization", prefix="Bearer", **kwargs ) - - -class ImageGenerationClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long - """Configuration for ImageGenerationClient. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param endpoint: Service host. Required. - :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Required. - :type credential: ~azure.core.credentials.AzureKeyCredential - :keyword api_version: The API version to use for this operation. Default value is - "2024-04-01-preview". Note that overriding this default value may result in unsupported - behavior. - :paramtype api_version: str - """ - - def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2024-04-01-preview") - - if endpoint is None: - raise ValueError("Parameter 'endpoint' must not be None.") - if credential is None: - raise ValueError("Parameter 'credential' must not be None.") - - self.endpoint = endpoint - self.credential = credential - self.api_version = api_version - kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) - self.polling_interval = kwargs.get("polling_interval", 30) - self._configure(**kwargs) - - def _configure(self, **kwargs: Any) -> None: - self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) - self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) - self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) - self.authentication_policy = kwargs.get("authentication_policy") - if self.credential and not self.authentication_policy: - self.authentication_policy = policies.AzureKeyCredentialPolicy( - self.credential, "Authorization", prefix="Bearer", **kwargs - ) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/__init__.py index b5b194f3cca4..d0e8dcc776a6 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/__init__.py @@ -8,7 +8,6 @@ from ._operations import ChatCompletionsClientOperationsMixin from ._operations import EmbeddingsClientOperationsMixin -from ._operations import ImageGenerationClientOperationsMixin from ._patch import __all__ as _patch_all from ._patch import * # pylint: disable=unused-wildcard-import @@ -17,7 +16,6 @@ __all__ = [ "ChatCompletionsClientOperationsMixin", "EmbeddingsClientOperationsMixin", - "ImageGenerationClientOperationsMixin", ] __all__.extend([p for p in _patch_all if p not in __all__]) _patch_sdk() diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index 96a7653345c3..142fc67800d7 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -27,7 +27,7 @@ from .. import models as _models from .._model_base import SdkJSONEncoder, _deserialize from .._serialization import Serializer -from .._vendor import ChatCompletionsClientMixinABC, EmbeddingsClientMixinABC, ImageGenerationClientMixinABC +from .._vendor import ChatCompletionsClientMixinABC, EmbeddingsClientMixinABC if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -128,49 +128,6 @@ def build_embeddings_get_model_info_request(**kwargs: Any) -> HttpRequest: return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_image_generation_create_request(*, model_deployment: Optional[str] = None, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-04-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/images/generations" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if model_deployment is not None: - _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployment", model_deployment, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_image_generation_get_model_info_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-04-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/info" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - class ChatCompletionsClientOperationsMixin(ChatCompletionsClientMixinABC): @overload @@ -527,7 +484,7 @@ def create( """ @distributed_trace - def create( # pylint: disable=too-many-locals + def create( self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -816,10 +773,13 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: # response body for status code(s): 200 response == { - "model_name": "str", # The name of the AI model. Required. - "model_provider": "str", # The model provider. Required. - "model_type": "str" # The type of the AI model. Required. Known values are: - "embeddings", "custom", "chat", "text_generation", and "image_generation". + "model_name": "str", # The name of the AI model. For example: ``Phi21``. + Required. + "model_provider_name": "str", # The model provider name. For example: + ``Microsoft Research``. Required. + "model_type": "str" # The type of the AI model. A Unique identifier for the + profile. Required. Known values are: "embeddings", "image_generation", + "text_generation", "image_embeddings", "audio_generation", and "chat". } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { @@ -881,7 +841,7 @@ def create( **kwargs: Any ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long - """Return the embeddings for a given prompt. + """Return the embeddings for a given text prompt. :param body: Required. :type body: JSON @@ -964,7 +924,7 @@ def create( **kwargs: Any ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long - """Return the embeddings for a given prompt. + """Return the embeddings for a given text prompt. :keyword input: Input texts to get embeddings for, encoded as a an array of strings. Required. :paramtype input: list[str] @@ -1038,7 +998,7 @@ def create( **kwargs: Any ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long - """Return the embeddings for a given prompt. + """Return the embeddings for a given text prompt. :param body: Required. :type body: IO[bytes] @@ -1104,7 +1064,7 @@ def create( **kwargs: Any ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long - """Return the embeddings for a given prompt. + """Return the embeddings for a given text prompt. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] @@ -1259,10 +1219,13 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: # response body for status code(s): 200 response == { - "model_name": "str", # The name of the AI model. Required. - "model_provider": "str", # The model provider. Required. - "model_type": "str" # The type of the AI model. Required. Known values are: - "embeddings", "custom", "chat", "text_generation", and "image_generation". + "model_name": "str", # The name of the AI model. For example: ``Phi21``. + Required. + "model_provider_name": "str", # The model provider name. For example: + ``Microsoft Research``. Required. + "model_type": "str" # The type of the AI model. A Unique identifier for the + profile. Required. Known values are: "embeddings", "image_generation", + "text_generation", "image_embeddings", "audio_generation", and "chat". } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { @@ -1310,431 +1273,3 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - - -class ImageGenerationClientOperationsMixin(ImageGenerationClientMixinABC): - - @overload - def create( - self, - body: JSON, - *, - model_deployment: Optional[str] = None, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.ImageGenerations: - # pylint: disable=line-too-long - """Creates images given a prompt. - - :param body: Required. - :type body: JSON - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ImageGenerations. The ImageGenerations is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ImageGenerations - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "prompt": "str", # A description of the desired images. Required. - "size": "str", # The desired dimension in pixels of the generated images, in - the format ":code:``x:code:``". For example: "1024x1024", - "1792x1024". Required. - "extras": { - "str": "str" # Optional. Extra parameters (in the form of string - key-value pairs) that are not in the standard request payload. They will be - passed to the service as-is in the root of the JSON request payload. How the - service handles these extra parameters depends on the value of the - ``extra-parameters`` HTTP request header. - }, - "quality": "str", # Optional. The desired image generation quality level to - use. Known values are: "standard" and "hd". - "response_format": "str", # Optional. The format in which image generation - response items should be presented. Known values are: "url" and "b64_json". - "seed": 0 # Optional. If specified, the system will make a best effort to - sample deterministically such that repeated requests with the same seed and - parameters should return the same result. Determinism is not guaranteed.". - } - - # response body for status code(s): 200 - response == { - "created": "2020-02-20 00:00:00", # A timestamp representing when this - operation was started. Represented as seconds since the beginning of the Unix - epoch of 00:00 on 1 Jan 1970. Required. - "data": [ - { - "b64_json": "str", # Optional. The complete data for an - image, represented as a base64-encoded string. - "url": "str" # Optional. The URL that provides temporary - access to download the generated image. - } - ], - "id": "str", # A unique identifier associated with this image generation - response. Required. - "model": "str" # The model used for the image generation. Required. - } - """ - - @overload - def create( - self, - *, - prompt: str, - size: str, - model_deployment: Optional[str] = None, - content_type: str = "application/json", - extras: Optional[Dict[str, str]] = None, - quality: Optional[Union[str, _models.ImageGenerationQuality]] = None, - response_format: Optional[Union[str, _models.ImageGenerationResponseFormat]] = None, - seed: Optional[int] = None, - **kwargs: Any - ) -> _models.ImageGenerations: - # pylint: disable=line-too-long - """Creates images given a prompt. - - :keyword prompt: A description of the desired images. Required. - :paramtype prompt: str - :keyword size: The desired dimension in pixels of the generated images, in the format - ":code:``x:code:``". - For example: "1024x1024", "1792x1024". Required. - :paramtype size: str - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the - standard request payload. - They will be passed to the service as-is in the root of the JSON request payload. - How the service handles these extra parameters depends on the value of the - ``extra-parameters`` - HTTP request header. Default value is None. - :paramtype extras: dict[str, str] - :keyword quality: The desired image generation quality level to use. Known values are: - "standard" and "hd". Default value is None. - :paramtype quality: str or ~azure.ai.inference.models.ImageGenerationQuality - :keyword response_format: The format in which image generation response items should be - presented. Known values are: "url" and "b64_json". Default value is None. - :paramtype response_format: str or ~azure.ai.inference.models.ImageGenerationResponseFormat - :keyword seed: If specified, the system will make a best effort to sample deterministically - such that repeated requests with the - same seed and parameters should return the same result. Determinism is not guaranteed.". - Default value is None. - :paramtype seed: int - :return: ImageGenerations. The ImageGenerations is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ImageGenerations - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "created": "2020-02-20 00:00:00", # A timestamp representing when this - operation was started. Represented as seconds since the beginning of the Unix - epoch of 00:00 on 1 Jan 1970. Required. - "data": [ - { - "b64_json": "str", # Optional. The complete data for an - image, represented as a base64-encoded string. - "url": "str" # Optional. The URL that provides temporary - access to download the generated image. - } - ], - "id": "str", # A unique identifier associated with this image generation - response. Required. - "model": "str" # The model used for the image generation. Required. - } - """ - - @overload - def create( - self, - body: IO[bytes], - *, - model_deployment: Optional[str] = None, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.ImageGenerations: - # pylint: disable=line-too-long - """Creates images given a prompt. - - :param body: Required. - :type body: IO[bytes] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ImageGenerations. The ImageGenerations is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ImageGenerations - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "created": "2020-02-20 00:00:00", # A timestamp representing when this - operation was started. Represented as seconds since the beginning of the Unix - epoch of 00:00 on 1 Jan 1970. Required. - "data": [ - { - "b64_json": "str", # Optional. The complete data for an - image, represented as a base64-encoded string. - "url": "str" # Optional. The URL that provides temporary - access to download the generated image. - } - ], - "id": "str", # A unique identifier associated with this image generation - response. Required. - "model": "str" # The model used for the image generation. Required. - } - """ - - @distributed_trace - def create( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - prompt: str = _Unset, - size: str = _Unset, - model_deployment: Optional[str] = None, - extras: Optional[Dict[str, str]] = None, - quality: Optional[Union[str, _models.ImageGenerationQuality]] = None, - response_format: Optional[Union[str, _models.ImageGenerationResponseFormat]] = None, - seed: Optional[int] = None, - **kwargs: Any - ) -> _models.ImageGenerations: - # pylint: disable=line-too-long - """Creates images given a prompt. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword prompt: A description of the desired images. Required. - :paramtype prompt: str - :keyword size: The desired dimension in pixels of the generated images, in the format - ":code:``x:code:``". - For example: "1024x1024", "1792x1024". Required. - :paramtype size: str - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the - standard request payload. - They will be passed to the service as-is in the root of the JSON request payload. - How the service handles these extra parameters depends on the value of the - ``extra-parameters`` - HTTP request header. Default value is None. - :paramtype extras: dict[str, str] - :keyword quality: The desired image generation quality level to use. Known values are: - "standard" and "hd". Default value is None. - :paramtype quality: str or ~azure.ai.inference.models.ImageGenerationQuality - :keyword response_format: The format in which image generation response items should be - presented. Known values are: "url" and "b64_json". Default value is None. - :paramtype response_format: str or ~azure.ai.inference.models.ImageGenerationResponseFormat - :keyword seed: If specified, the system will make a best effort to sample deterministically - such that repeated requests with the - same seed and parameters should return the same result. Determinism is not guaranteed.". - Default value is None. - :paramtype seed: int - :return: ImageGenerations. The ImageGenerations is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ImageGenerations - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "prompt": "str", # A description of the desired images. Required. - "size": "str", # The desired dimension in pixels of the generated images, in - the format ":code:``x:code:``". For example: "1024x1024", - "1792x1024". Required. - "extras": { - "str": "str" # Optional. Extra parameters (in the form of string - key-value pairs) that are not in the standard request payload. They will be - passed to the service as-is in the root of the JSON request payload. How the - service handles these extra parameters depends on the value of the - ``extra-parameters`` HTTP request header. - }, - "quality": "str", # Optional. The desired image generation quality level to - use. Known values are: "standard" and "hd". - "response_format": "str", # Optional. The format in which image generation - response items should be presented. Known values are: "url" and "b64_json". - "seed": 0 # Optional. If specified, the system will make a best effort to - sample deterministically such that repeated requests with the same seed and - parameters should return the same result. Determinism is not guaranteed.". - } - - # response body for status code(s): 200 - response == { - "created": "2020-02-20 00:00:00", # A timestamp representing when this - operation was started. Represented as seconds since the beginning of the Unix - epoch of 00:00 on 1 Jan 1970. Required. - "data": [ - { - "b64_json": "str", # Optional. The complete data for an - image, represented as a base64-encoded string. - "url": "str" # Optional. The URL that provides temporary - access to download the generated image. - } - ], - "id": "str", # A unique identifier associated with this image generation - response. Required. - "model": "str" # The model used for the image generation. Required. - } - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ImageGenerations] = kwargs.pop("cls", None) - - if body is _Unset: - if prompt is _Unset: - raise TypeError("missing required argument: prompt") - if size is _Unset: - raise TypeError("missing required argument: size") - body = { - "extras": extras, - "prompt": prompt, - "quality": quality, - "response_format": response_format, - "seed": seed, - "size": size, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_image_generation_create_request( - model_deployment=model_deployment, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ImageGenerations, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: - # pylint: disable=line-too-long - """Returns information about the AI model. - - :return: ModelInfo. The ModelInfo is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ModelInfo - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "model_name": "str", # The name of the AI model. Required. - "model_provider": "str", # The model provider. Required. - "model_type": "str" # The type of the AI model. Required. Known values are: - "embeddings", "custom", "chat", "text_generation", and "image_generation". - } - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.ModelInfo] = kwargs.pop("cls", None) - - _request = build_image_generation_get_model_info_request( - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ModelInfo, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index 564fbeb641a6..6fbf8dc2a29c 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -42,12 +42,16 @@ class ClientGenerator: @staticmethod - def from_endpoint(endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> Union[ChatCompletionsClientGenerated, EmbeddingsClient]: - client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... + def from_endpoint( + endpoint: str, credential: AzureKeyCredential, **kwargs: Any + ) -> Union[ChatCompletionsClientGenerated, EmbeddingsClient]: + client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... model_info = client.get_model_info() print(model_info) if model_info.model_type == None or model_info.model_type == "": - raise ValueError("The AI model information is missing a value for `model type`. Cannot create an appropriate client.") + raise ValueError( + "The AI model information is missing a value for `model type`. Cannot create an appropriate client." + ) elif model_info.model_type == _models.ModelType.CHAT: return client elif model_info.model_type == _models.ModelType.EMBEDDINGS: @@ -65,7 +69,7 @@ def create_streaming( *, model_deployment: Optional[str] = None, content_type: str = "application/json", - **kwargs: Any + **kwargs: Any, ) -> _models.StreamingChatCompletions: # pylint: disable=line-too-long """Gets streaming chat completions for the provided chat messages. @@ -109,7 +113,7 @@ def create_streaming( Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] ] = None, seed: Optional[int] = None, - **kwargs: Any + **kwargs: Any, ) -> _models.StreamingChatCompletions: # pylint: disable=line-too-long """Gets streaming chat completions for the provided chat messages. @@ -202,7 +206,7 @@ def create_streaming( *, model_deployment: Optional[str] = None, content_type: str = "application/json", - **kwargs: Any + **kwargs: Any, ) -> _models.StreamingChatCompletions: # pylint: disable=line-too-long """Gets streaming chat completions for the provided chat messages. @@ -246,7 +250,7 @@ def create_streaming( Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] ] = None, seed: Optional[int] = None, - **kwargs: Any + **kwargs: Any, ) -> _models.StreamingChatCompletions: # pylint: disable=line-too-long """Gets streaming chat completions for the provided chat messages. @@ -399,7 +403,7 @@ def create_streaming( __all__: List[str] = [ "ClientGenerator", - "ChatCompletionsClient" + "ChatCompletionsClient", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_vendor.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_vendor.py index 9e95c0a2f86b..16b7610458ec 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_vendor.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_vendor.py @@ -8,11 +8,7 @@ from abc import ABC from typing import TYPE_CHECKING -from ._configuration import ( - ChatCompletionsClientConfiguration, - EmbeddingsClientConfiguration, - ImageGenerationClientConfiguration, -) +from ._configuration import ChatCompletionsClientConfiguration, EmbeddingsClientConfiguration if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports @@ -37,12 +33,3 @@ class EmbeddingsClientMixinABC(ABC): _config: EmbeddingsClientConfiguration _serialize: "Serializer" _deserialize: "Deserializer" - - -class ImageGenerationClientMixinABC(ABC): - """DO NOT use this class. It is for internal typing use only.""" - - _client: "PipelineClient" - _config: ImageGenerationClientConfiguration - _serialize: "Serializer" - _deserialize: "Deserializer" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py index 94ec65ac14df..0335784ac1ea 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py @@ -6,9 +6,8 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from ._patch import ChatCompletionsClient +from ._client import ChatCompletionsClient from ._client import EmbeddingsClient -from ._client import ImageGenerationClient try: from ._patch import __all__ as _patch_all @@ -20,7 +19,6 @@ __all__ = [ "ChatCompletionsClient", "EmbeddingsClient", - "ImageGenerationClient", ] __all__.extend([p for p in _patch_all if p not in __all__]) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py index a3ff8c0ee133..810a39e5ddb2 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py @@ -15,16 +15,8 @@ from azure.core.rest import AsyncHttpResponse, HttpRequest from .._serialization import Deserializer, Serializer -from ._configuration import ( - ChatCompletionsClientConfiguration, - EmbeddingsClientConfiguration, - ImageGenerationClientConfiguration, -) -from ._operations import ( - ChatCompletionsClientOperationsMixin, - EmbeddingsClientOperationsMixin, - ImageGenerationClientOperationsMixin, -) +from ._configuration import ChatCompletionsClientConfiguration, EmbeddingsClientConfiguration +from ._operations import ChatCompletionsClientOperationsMixin, EmbeddingsClientOperationsMixin class ChatCompletionsClient(ChatCompletionsClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword @@ -181,81 +173,3 @@ async def __aenter__(self) -> "EmbeddingsClient": async def __aexit__(self, *exc_details: Any) -> None: await self._client.__aexit__(*exc_details) - - -class ImageGenerationClient(ImageGenerationClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword - """ImageGenerationClient. - - :param endpoint: Service host. Required. - :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Required. - :type credential: ~azure.core.credentials.AzureKeyCredential - :keyword api_version: The API version to use for this operation. Default value is - "2024-04-01-preview". Note that overriding this default value may result in unsupported - behavior. - :paramtype api_version: str - """ - - def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: - _endpoint = "{endpoint}" - self._config = ImageGenerationClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) - _policies = kwargs.pop("policies", None) - if _policies is None: - _policies = [ - policies.RequestIdPolicy(**kwargs), - self._config.headers_policy, - self._config.user_agent_policy, - self._config.proxy_policy, - policies.ContentDecodePolicy(**kwargs), - self._config.redirect_policy, - self._config.retry_policy, - self._config.authentication_policy, - self._config.custom_hook_policy, - self._config.logging_policy, - policies.DistributedTracingPolicy(**kwargs), - policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, - self._config.http_logging_policy, - ] - self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint, policies=_policies, **kwargs) - - self._serialize = Serializer() - self._deserialize = Deserializer() - self._serialize.client_side_validation = False - - def send_request( - self, request: HttpRequest, *, stream: bool = False, **kwargs: Any - ) -> Awaitable[AsyncHttpResponse]: - """Runs the network request through the client's chained policies. - - >>> from azure.core.rest import HttpRequest - >>> request = HttpRequest("GET", "https://www.example.org/") - - >>> response = await client.send_request(request) - - - For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request - - :param request: The network request you want to make. Required. - :type request: ~azure.core.rest.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to False. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.rest.AsyncHttpResponse - """ - - request_copy = deepcopy(request) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - - request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) - return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore - - async def close(self) -> None: - await self._client.close() - - async def __aenter__(self) -> "ImageGenerationClient": - await self._client.__aenter__() - return self - - async def __aexit__(self, *exc_details: Any) -> None: - await self._client.__aexit__(*exc_details) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py index 89309fa151e8..41c1596e1410 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py @@ -106,50 +106,3 @@ def _configure(self, **kwargs: Any) -> None: self.authentication_policy = policies.AzureKeyCredentialPolicy( self.credential, "Authorization", prefix="Bearer", **kwargs ) - - -class ImageGenerationClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long - """Configuration for ImageGenerationClient. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param endpoint: Service host. Required. - :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Required. - :type credential: ~azure.core.credentials.AzureKeyCredential - :keyword api_version: The API version to use for this operation. Default value is - "2024-04-01-preview". Note that overriding this default value may result in unsupported - behavior. - :paramtype api_version: str - """ - - def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2024-04-01-preview") - - if endpoint is None: - raise ValueError("Parameter 'endpoint' must not be None.") - if credential is None: - raise ValueError("Parameter 'credential' must not be None.") - - self.endpoint = endpoint - self.credential = credential - self.api_version = api_version - kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) - self.polling_interval = kwargs.get("polling_interval", 30) - self._configure(**kwargs) - - def _configure(self, **kwargs: Any) -> None: - self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) - self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) - self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) - self.authentication_policy = kwargs.get("authentication_policy") - if self.credential and not self.authentication_policy: - self.authentication_policy = policies.AzureKeyCredentialPolicy( - self.credential, "Authorization", prefix="Bearer", **kwargs - ) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/__init__.py index b5b194f3cca4..d0e8dcc776a6 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/__init__.py @@ -8,7 +8,6 @@ from ._operations import ChatCompletionsClientOperationsMixin from ._operations import EmbeddingsClientOperationsMixin -from ._operations import ImageGenerationClientOperationsMixin from ._patch import __all__ as _patch_all from ._patch import * # pylint: disable=unused-wildcard-import @@ -17,7 +16,6 @@ __all__ = [ "ChatCompletionsClientOperationsMixin", "EmbeddingsClientOperationsMixin", - "ImageGenerationClientOperationsMixin", ] __all__.extend([p for p in _patch_all if p not in __all__]) _patch_sdk() diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index 9957b8d7f792..3fb182863a1d 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -31,10 +31,8 @@ build_chat_completions_get_model_info_request, build_embeddings_create_request, build_embeddings_get_model_info_request, - build_image_generation_create_request, - build_image_generation_get_model_info_request, ) -from .._vendor import ChatCompletionsClientMixinABC, EmbeddingsClientMixinABC, ImageGenerationClientMixinABC +from .._vendor import ChatCompletionsClientMixinABC, EmbeddingsClientMixinABC if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -402,7 +400,7 @@ async def create( """ @distributed_trace_async - async def create( # pylint: disable=too-many-locals + async def create( self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -691,10 +689,13 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: # response body for status code(s): 200 response == { - "model_name": "str", # The name of the AI model. Required. - "model_provider": "str", # The model provider. Required. - "model_type": "str" # The type of the AI model. Required. Known values are: - "embeddings", "custom", "chat", "text_generation", and "image_generation". + "model_name": "str", # The name of the AI model. For example: ``Phi21``. + Required. + "model_provider_name": "str", # The model provider name. For example: + ``Microsoft Research``. Required. + "model_type": "str" # The type of the AI model. A Unique identifier for the + profile. Required. Known values are: "embeddings", "image_generation", + "text_generation", "image_embeddings", "audio_generation", and "chat". } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { @@ -756,7 +757,7 @@ async def create( **kwargs: Any ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long - """Return the embeddings for a given prompt. + """Return the embeddings for a given text prompt. :param body: Required. :type body: JSON @@ -839,7 +840,7 @@ async def create( **kwargs: Any ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long - """Return the embeddings for a given prompt. + """Return the embeddings for a given text prompt. :keyword input: Input texts to get embeddings for, encoded as a an array of strings. Required. :paramtype input: list[str] @@ -913,7 +914,7 @@ async def create( **kwargs: Any ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long - """Return the embeddings for a given prompt. + """Return the embeddings for a given text prompt. :param body: Required. :type body: IO[bytes] @@ -979,7 +980,7 @@ async def create( **kwargs: Any ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long - """Return the embeddings for a given prompt. + """Return the embeddings for a given text prompt. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] @@ -1134,10 +1135,13 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: # response body for status code(s): 200 response == { - "model_name": "str", # The name of the AI model. Required. - "model_provider": "str", # The model provider. Required. - "model_type": "str" # The type of the AI model. Required. Known values are: - "embeddings", "custom", "chat", "text_generation", and "image_generation". + "model_name": "str", # The name of the AI model. For example: ``Phi21``. + Required. + "model_provider_name": "str", # The model provider name. For example: + ``Microsoft Research``. Required. + "model_type": "str" # The type of the AI model. A Unique identifier for the + profile. Required. Known values are: "embeddings", "image_generation", + "text_generation", "image_embeddings", "audio_generation", and "chat". } """ error_map: MutableMapping[int, Type[HttpResponseError]] = { @@ -1185,431 +1189,3 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - - -class ImageGenerationClientOperationsMixin(ImageGenerationClientMixinABC): - - @overload - async def create( - self, - body: JSON, - *, - model_deployment: Optional[str] = None, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.ImageGenerations: - # pylint: disable=line-too-long - """Creates images given a prompt. - - :param body: Required. - :type body: JSON - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ImageGenerations. The ImageGenerations is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ImageGenerations - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "prompt": "str", # A description of the desired images. Required. - "size": "str", # The desired dimension in pixels of the generated images, in - the format ":code:``x:code:``". For example: "1024x1024", - "1792x1024". Required. - "extras": { - "str": "str" # Optional. Extra parameters (in the form of string - key-value pairs) that are not in the standard request payload. They will be - passed to the service as-is in the root of the JSON request payload. How the - service handles these extra parameters depends on the value of the - ``extra-parameters`` HTTP request header. - }, - "quality": "str", # Optional. The desired image generation quality level to - use. Known values are: "standard" and "hd". - "response_format": "str", # Optional. The format in which image generation - response items should be presented. Known values are: "url" and "b64_json". - "seed": 0 # Optional. If specified, the system will make a best effort to - sample deterministically such that repeated requests with the same seed and - parameters should return the same result. Determinism is not guaranteed.". - } - - # response body for status code(s): 200 - response == { - "created": "2020-02-20 00:00:00", # A timestamp representing when this - operation was started. Represented as seconds since the beginning of the Unix - epoch of 00:00 on 1 Jan 1970. Required. - "data": [ - { - "b64_json": "str", # Optional. The complete data for an - image, represented as a base64-encoded string. - "url": "str" # Optional. The URL that provides temporary - access to download the generated image. - } - ], - "id": "str", # A unique identifier associated with this image generation - response. Required. - "model": "str" # The model used for the image generation. Required. - } - """ - - @overload - async def create( - self, - *, - prompt: str, - size: str, - model_deployment: Optional[str] = None, - content_type: str = "application/json", - extras: Optional[Dict[str, str]] = None, - quality: Optional[Union[str, _models.ImageGenerationQuality]] = None, - response_format: Optional[Union[str, _models.ImageGenerationResponseFormat]] = None, - seed: Optional[int] = None, - **kwargs: Any - ) -> _models.ImageGenerations: - # pylint: disable=line-too-long - """Creates images given a prompt. - - :keyword prompt: A description of the desired images. Required. - :paramtype prompt: str - :keyword size: The desired dimension in pixels of the generated images, in the format - ":code:``x:code:``". - For example: "1024x1024", "1792x1024". Required. - :paramtype size: str - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the - standard request payload. - They will be passed to the service as-is in the root of the JSON request payload. - How the service handles these extra parameters depends on the value of the - ``extra-parameters`` - HTTP request header. Default value is None. - :paramtype extras: dict[str, str] - :keyword quality: The desired image generation quality level to use. Known values are: - "standard" and "hd". Default value is None. - :paramtype quality: str or ~azure.ai.inference.models.ImageGenerationQuality - :keyword response_format: The format in which image generation response items should be - presented. Known values are: "url" and "b64_json". Default value is None. - :paramtype response_format: str or ~azure.ai.inference.models.ImageGenerationResponseFormat - :keyword seed: If specified, the system will make a best effort to sample deterministically - such that repeated requests with the - same seed and parameters should return the same result. Determinism is not guaranteed.". - Default value is None. - :paramtype seed: int - :return: ImageGenerations. The ImageGenerations is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ImageGenerations - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "created": "2020-02-20 00:00:00", # A timestamp representing when this - operation was started. Represented as seconds since the beginning of the Unix - epoch of 00:00 on 1 Jan 1970. Required. - "data": [ - { - "b64_json": "str", # Optional. The complete data for an - image, represented as a base64-encoded string. - "url": "str" # Optional. The URL that provides temporary - access to download the generated image. - } - ], - "id": "str", # A unique identifier associated with this image generation - response. Required. - "model": "str" # The model used for the image generation. Required. - } - """ - - @overload - async def create( - self, - body: IO[bytes], - *, - model_deployment: Optional[str] = None, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.ImageGenerations: - # pylint: disable=line-too-long - """Creates images given a prompt. - - :param body: Required. - :type body: IO[bytes] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ImageGenerations. The ImageGenerations is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ImageGenerations - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "created": "2020-02-20 00:00:00", # A timestamp representing when this - operation was started. Represented as seconds since the beginning of the Unix - epoch of 00:00 on 1 Jan 1970. Required. - "data": [ - { - "b64_json": "str", # Optional. The complete data for an - image, represented as a base64-encoded string. - "url": "str" # Optional. The URL that provides temporary - access to download the generated image. - } - ], - "id": "str", # A unique identifier associated with this image generation - response. Required. - "model": "str" # The model used for the image generation. Required. - } - """ - - @distributed_trace_async - async def create( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - prompt: str = _Unset, - size: str = _Unset, - model_deployment: Optional[str] = None, - extras: Optional[Dict[str, str]] = None, - quality: Optional[Union[str, _models.ImageGenerationQuality]] = None, - response_format: Optional[Union[str, _models.ImageGenerationResponseFormat]] = None, - seed: Optional[int] = None, - **kwargs: Any - ) -> _models.ImageGenerations: - # pylint: disable=line-too-long - """Creates images given a prompt. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword prompt: A description of the desired images. Required. - :paramtype prompt: str - :keyword size: The desired dimension in pixels of the generated images, in the format - ":code:``x:code:``". - For example: "1024x1024", "1792x1024". Required. - :paramtype size: str - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the - standard request payload. - They will be passed to the service as-is in the root of the JSON request payload. - How the service handles these extra parameters depends on the value of the - ``extra-parameters`` - HTTP request header. Default value is None. - :paramtype extras: dict[str, str] - :keyword quality: The desired image generation quality level to use. Known values are: - "standard" and "hd". Default value is None. - :paramtype quality: str or ~azure.ai.inference.models.ImageGenerationQuality - :keyword response_format: The format in which image generation response items should be - presented. Known values are: "url" and "b64_json". Default value is None. - :paramtype response_format: str or ~azure.ai.inference.models.ImageGenerationResponseFormat - :keyword seed: If specified, the system will make a best effort to sample deterministically - such that repeated requests with the - same seed and parameters should return the same result. Determinism is not guaranteed.". - Default value is None. - :paramtype seed: int - :return: ImageGenerations. The ImageGenerations is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ImageGenerations - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "prompt": "str", # A description of the desired images. Required. - "size": "str", # The desired dimension in pixels of the generated images, in - the format ":code:``x:code:``". For example: "1024x1024", - "1792x1024". Required. - "extras": { - "str": "str" # Optional. Extra parameters (in the form of string - key-value pairs) that are not in the standard request payload. They will be - passed to the service as-is in the root of the JSON request payload. How the - service handles these extra parameters depends on the value of the - ``extra-parameters`` HTTP request header. - }, - "quality": "str", # Optional. The desired image generation quality level to - use. Known values are: "standard" and "hd". - "response_format": "str", # Optional. The format in which image generation - response items should be presented. Known values are: "url" and "b64_json". - "seed": 0 # Optional. If specified, the system will make a best effort to - sample deterministically such that repeated requests with the same seed and - parameters should return the same result. Determinism is not guaranteed.". - } - - # response body for status code(s): 200 - response == { - "created": "2020-02-20 00:00:00", # A timestamp representing when this - operation was started. Represented as seconds since the beginning of the Unix - epoch of 00:00 on 1 Jan 1970. Required. - "data": [ - { - "b64_json": "str", # Optional. The complete data for an - image, represented as a base64-encoded string. - "url": "str" # Optional. The URL that provides temporary - access to download the generated image. - } - ], - "id": "str", # A unique identifier associated with this image generation - response. Required. - "model": "str" # The model used for the image generation. Required. - } - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ImageGenerations] = kwargs.pop("cls", None) - - if body is _Unset: - if prompt is _Unset: - raise TypeError("missing required argument: prompt") - if size is _Unset: - raise TypeError("missing required argument: size") - body = { - "extras": extras, - "prompt": prompt, - "quality": quality, - "response_format": response_format, - "seed": seed, - "size": size, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_image_generation_create_request( - model_deployment=model_deployment, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ImageGenerations, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: - # pylint: disable=line-too-long - """Returns information about the AI model. - - :return: ModelInfo. The ModelInfo is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ModelInfo - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "model_name": "str", # The name of the AI model. Required. - "model_provider": "str", # The model provider. Required. - "model_type": "str" # The type of the AI model. Required. Known values are: - "embeddings", "custom", "chat", "text_generation", and "image_generation". - } - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.ModelInfo] = kwargs.pop("cls", None) - - _request = build_image_generation_get_model_info_request( - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - await response.read() # Load the body in memory and close the socket - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ModelInfo, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_vendor.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_vendor.py index 1e0074ce0da0..3159e318c3b6 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_vendor.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_vendor.py @@ -8,11 +8,7 @@ from abc import ABC from typing import TYPE_CHECKING -from ._configuration import ( - ChatCompletionsClientConfiguration, - EmbeddingsClientConfiguration, - ImageGenerationClientConfiguration, -) +from ._configuration import ChatCompletionsClientConfiguration, EmbeddingsClientConfiguration if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports @@ -37,12 +33,3 @@ class EmbeddingsClientMixinABC(ABC): _config: EmbeddingsClientConfiguration _serialize: "Serializer" _deserialize: "Deserializer" - - -class ImageGenerationClientMixinABC(ABC): - """DO NOT use this class. It is for internal typing use only.""" - - _client: "AsyncPipelineClient" - _config: ImageGenerationClientConfiguration - _serialize: "Serializer" - _deserialize: "Deserializer" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py index f5785318c237..a83308edc53b 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py @@ -24,8 +24,6 @@ from ._models import EmbeddingsUsage from ._models import FunctionCall from ._models import FunctionDefinition -from ._models import ImageGenerationData -from ._models import ImageGenerations from ._models import ModelInfo from ._models import SystemMessage from ._models import ToolMessage @@ -37,8 +35,6 @@ from ._enums import ChatRole from ._enums import CompletionsFinishReason from ._enums import EmbeddingInputType -from ._enums import ImageGenerationQuality -from ._enums import ImageGenerationResponseFormat from ._enums import ModelType from ._patch import __all__ as _patch_all from ._patch import * # pylint: disable=unused-wildcard-import @@ -63,8 +59,6 @@ "EmbeddingsUsage", "FunctionCall", "FunctionDefinition", - "ImageGenerationData", - "ImageGenerations", "ModelInfo", "SystemMessage", "ToolMessage", @@ -75,8 +69,6 @@ "ChatRole", "CompletionsFinishReason", "EmbeddingInputType", - "ImageGenerationQuality", - "ImageGenerationResponseFormat", "ModelType", ] __all__.extend([p for p in _patch_all if p not in __all__]) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py index 9f4ca0d1bef1..c0a9ed0785d1 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py @@ -85,38 +85,17 @@ class EmbeddingInputType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """to do""" -class ImageGenerationQuality(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """An image generation configuration that specifies how the model should prioritize quality, cost, - and speed. - """ - - STANDARD = "standard" - """Requests image generation with standard, balanced characteristics of quality, cost, and speed.""" - HD = "hd" - """Requests image generation with higher quality, higher cost and lower speed relative to - standard.""" - - -class ImageGenerationResponseFormat(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The format in which the generated images are returned.""" - - URL = "url" - """Image generation response items should provide a URL from which the image may be retrieved.""" - BASE64 = "b64_json" - """Image generation response items should provide image data as a base64-encoded string.""" - - class ModelType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The type of AI model.""" EMBEDDINGS = "embeddings" """Embeddings.""" IMAGE_GENERATION = "image_generation" - """Image generation.""" + """Image generation""" TEXT_GENERATION = "text_generation" """Text generation""" IMAGE_EMBEDDINGS = "image_embeddings" - """Image embeddings.""" + """Image embeddings""" AUDIO_GENERATION = "audio_generation" """Audio generation""" CHAT = "chat" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py index 85245430270f..0c0b3c5769c5 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py @@ -834,117 +834,38 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, **kwargs) -class ImageGenerationData(_model_base.Model): - """A representation of a single generated image, provided as either base64-encoded data or as a - URL from which the image - may be retrieved. - - :ivar url: The URL that provides temporary access to download the generated image. - :vartype url: str - :ivar b64_json: The complete data for an image, represented as a base64-encoded string. - :vartype b64_json: str - """ - - url: Optional[str] = rest_field() - """The URL that provides temporary access to download the generated image.""" - b64_json: Optional[str] = rest_field() - """The complete data for an image, represented as a base64-encoded string.""" - - @overload - def __init__( - self, - *, - url: Optional[str] = None, - b64_json: Optional[str] = None, - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class ImageGenerations(_model_base.Model): - """The result of a successful image generation operation. - - All required parameters must be populated in order to send to server. - - :ivar id: A unique identifier associated with this image generation response. Required. - :vartype id: str - :ivar created: A timestamp representing when this operation was started. - Represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. - :vartype created: ~datetime.datetime - :ivar model: The model used for the image generation. Required. - :vartype model: str - :ivar data: The images generated by the operation. Required. - :vartype data: list[~azure.ai.inference.models.ImageGenerationData] - """ - - id: str = rest_field() - """A unique identifier associated with this image generation response. Required.""" - created: datetime.datetime = rest_field(format="unix-timestamp") - """A timestamp representing when this operation was started. - Represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required.""" - model: str = rest_field() - """The model used for the image generation. Required.""" - data: List["_models.ImageGenerationData"] = rest_field() - """The images generated by the operation. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - created: datetime.datetime, - model: str, - data: List["_models.ImageGenerationData"], - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - class ModelInfo(_model_base.Model): """Represents some basic information about the AI model. All required parameters must be populated in order to send to server. - :ivar model_type: The type of the AI model. Required. Known values are: "embeddings", "custom", - "chat", "text_generation", and "image_generation". - :vartype model_type: str or ~azure.ai.inference.models.ModelType - :ivar model_provider: The model provider. Required. - :vartype model_provider: str - :ivar model_name: The name of the AI model. Required. + :ivar model_name: The name of the AI model. For example: ``Phi21``. Required. :vartype model_name: str + :ivar model_type: The type of the AI model. A Unique identifier for the profile. Required. + Known values are: "embeddings", "image_generation", "text_generation", "image_embeddings", + "audio_generation", and "chat". + :vartype model_type: str or ~azure.ai.inference.models.ModelType + :ivar model_provider_name: The model provider name. For example: ``Microsoft Research``. + Required. + :vartype model_provider_name: str """ + model_name: str = rest_field() + """The name of the AI model. For example: ``Phi21``. Required.""" model_type: Union[str, "_models.ModelType"] = rest_field() - """The type of the AI model. A Unique identifier for the profile. Required. Known values are: \"embeddings\", \"image_generation\", \"text_generation\", - \"image_embeddings\", \"audio_generation\", and \"chat\".""" + """The type of the AI model. A Unique identifier for the profile. Required. Known values are: + \"embeddings\", \"image_generation\", \"text_generation\", \"image_embeddings\", + \"audio_generation\", and \"chat\".""" model_provider_name: str = rest_field() - """The model provider name. For example: `Microsoft Research`. Required.""" - model_name: str = rest_field() - """The name of the AI model. For example: `Phi21`. Required.""" + """The model provider name. For example: ``Microsoft Research``. Required.""" @overload def __init__( self, *, - model_type: Union[str, "_models.ModelType"], - model_provider: str, model_name: str, + model_type: Union[str, "_models.ModelType"], + model_provider_name: str, ): ... @overload diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index 80d3e8c50691..6e876d00253b 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -18,6 +18,7 @@ logger = logging.getLogger(__name__) + class BaseStreamingChatCompletions: """A base class for the sync and async streaming chat completions responses, holding any common code to deserializes the Server Sent Events (SSE) response stream into chat completions updates, each one @@ -174,7 +175,7 @@ async def aclose(self) -> None: __all__: List[str] = [ "StreamingChatCompletions", - "AsyncStreamingChatCompletions" + "AsyncStreamingChatCompletions", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py index 88760a4c18d0..6669df784b8e 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py @@ -31,6 +31,7 @@ def sample_chat_completions_with_tools(): # Enable unredacted logging, including full request and response payloads (delete me!) import sys import logging + logger = logging.getLogger("azure") logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler(stream=sys.stdout)) @@ -76,13 +77,13 @@ def get_flight_info(origin_city: str, destination_city: str): """ if origin_city == "Seattle" and destination_city == "Miami": return "Delta airlines flight number 123 from Seattle to Miami, departing May 7th, 2024 at 10:00 AM." - #return '{"info": "Delta airlines flight number 123 from Seattle to Miami, departing May 7th, 2024 at 10:00 AM."}' + # return '{"info": "Delta airlines flight number 123 from Seattle to Miami, departing May 7th, 2024 at 10:00 AM."}' elif origin_city == "Seattle" and destination_city == "Orlando": return "American Airlines flight number 456 from Seattle to Orlando, departing May 8th, 2024 at 2:45 PM." - #return '{"info": "American Airlines flight number 456 from Seattle to Orlando, departing May 8th, 2024 at 2:45 PM."}' + # return '{"info": "American Airlines flight number 456 from Seattle to Orlando, departing May 8th, 2024 at 2:45 PM."}' else: return "I don't have that information." - #return '{"into": "I don\'t have that information."}' + # return '{"into": "I don\'t have that information."}' # Define a 'tool' that the model can use to retrieves flight information flight_info = ChatCompletionsFunctionToolDefinition( @@ -115,7 +116,7 @@ def get_flight_info(origin_city: str, destination_city: str): result = client.create( messages=messages, tools=[flight_info], - #tool_choice=ChatCompletionsNamedToolSelection(type="function") # Cohere model does not support + # tool_choice=ChatCompletionsNamedToolSelection(type="function") # Cohere model does not support ) # As long as the model keeps requesting tool calls, make tool calls and provide the tool outputs to the model @@ -124,7 +125,7 @@ def get_flight_info(origin_city: str, destination_city: str): # Append the previous model response to the chat history if result.choices[0].message.tool_calls is not None: # TODO: Remove the need to set content="" - messages.append(AssistantMessage(content="", tool_calls=result.choices[0].message.tool_calls)) + messages.append(AssistantMessage(content="", tool_calls=result.choices[0].message.tool_calls)) # Make new function call(s) as needed. If parallel function calling is supported by the model, # we may have more than one tool call request. @@ -141,14 +142,14 @@ def get_flight_info(origin_city: str, destination_city: str): # Provide the tool response to the model, by appending it to the chat history messages.append( - ToolMessage(tool_call_id=tool_call_id, content=function_response) # json.dumps(function_response) + ToolMessage( + tool_call_id=tool_call_id, content=function_response + ) # json.dumps(function_response) ) # With the additional tools information on hand, get another response from the model result = client.create( - messages=messages, - tools=[flight_info], - tool_choice=ChatCompletionsToolSelectionPreset.AUTO + messages=messages, tools=[flight_info], tool_choice=ChatCompletionsToolSelectionPreset.AUTO ) # Print the final response diff --git a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py index 304e0c33d801..aa4b03277dca 100644 --- a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py +++ b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py @@ -54,7 +54,9 @@ class ModelClientTestBase(AzureRecordedTestCase): # Regular expression describing the pattern of a result ID. Format allowed are: # "183b56eb-8512-484d-be50-5d8df82301a2", "26ef25aa45424781865a2d38a4484274" and "Sanitized" - REGEX_RESULT_ID = re.compile(r"^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$|^[0-9a-fA-F]{32}$|^Sanitized$") + REGEX_RESULT_ID = re.compile( + r"^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$|^[0-9a-fA-F]{32}$|^Sanitized$" + ) # Methods to load credentials from environment variables def _load_chat_credentials(self, *, bad_key: bool, **kwargs): @@ -122,7 +124,7 @@ def _validate_chat_completions_tool_result(result: sdk.models.ChatCompletions): assert result.created is not None assert result.created != "" assert result.model is not None - #assert result.model != "" + # assert result.model != "" assert result.object == "chat.completion" assert result.usage.prompt_tokens > 0 assert result.usage.completion_tokens > 0 diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py index 54e7d6193310..6f1d439c7cf1 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -44,7 +44,7 @@ def test_chat_completions_streaming_error_free(self, **kwargs): @ServicePreparerChatCompletions() @recorded_by_proxy def test_chat_completions_with_tool_error_free(self, **kwargs): - forecast_tool=sdk.models.ChatCompletionsFunctionToolDefinition( + forecast_tool = sdk.models.ChatCompletionsFunctionToolDefinition( function=sdk.models.FunctionDefinition( name="get_max_temperature", description="A function that returns the forecasted maximum temperature IN a given city, a given few days from now, in Fahrenheit. It returns `unknown` if the forecast is not known.", @@ -65,9 +65,9 @@ def test_chat_completions_with_tool_error_free(self, **kwargs): ) ) client = self._create_chat_client(**kwargs) - messages=[ + messages = [ sdk.models.SystemMessage(content="You are an assistant that helps users find weather information."), - sdk.models.UserMessage(content="what's the maximum temperature in Seattle two days from now?") + sdk.models.UserMessage(content="what's the maximum temperature in Seattle two days from now?"), ] result = client.create( messages=messages, @@ -75,13 +75,13 @@ def test_chat_completions_with_tool_error_free(self, **kwargs): ) self._print_chat_completions_result(result) self._validate_chat_completions_tool_result(result) - messages.append(sdk.models.AssistantMessage( - tool_calls=result.choices[0].message.tool_calls - )) - messages.append(sdk.models.ToolMessage( - content="62", - tool_call_id=result.choices[0].message.tool_calls[0].id, - )) + messages.append(sdk.models.AssistantMessage(tool_calls=result.choices[0].message.tool_calls)) + messages.append( + sdk.models.ToolMessage( + content="62", + tool_call_id=result.choices[0].message.tool_calls[0].id, + ) + ) result = client.create( messages=messages, tools=[forecast_tool], @@ -131,7 +131,7 @@ def test_embeddings_on_chat_completion_endpoint(self, **kwargs): exception_caught = True print(e) assert hasattr(e, "status_code") - assert e.status_code == 404 or e.status_code == 405 # `404 - not found` or `405 - method not allowed` + assert e.status_code == 404 or e.status_code == 405 # `404 - not found` or `405 - method not allowed` assert "not found" in e.message.lower() or "not allowed" in e.message.lower() client.close() assert exception_caught From a1ebd6b252eb13c1a07091ab82b287d3c8ff7fc0 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 13 May 2024 15:39:35 -0700 Subject: [PATCH 056/112] Update auth --- sdk/ai/azure-ai-inference/README.md | 15 ++++-- .../azure/ai/inference/_client.py | 22 +++++--- .../azure/ai/inference/_configuration.py | 46 +++++++++++----- .../azure/ai/inference/aio/_client.py | 26 ++++++--- .../azure/ai/inference/aio/_configuration.py | 50 ++++++++++++----- ...ple_chat_completions_with_entra_id_auth.py | 53 +++++++++++++++++++ sdk/ai/azure-ai-inference/tsp-location.yaml | 2 +- 7 files changed, 169 insertions(+), 45 deletions(-) create mode 100644 sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_entra_id_auth.py diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 7f8a0e5d2d53..80d3b68163e8 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -1,14 +1,13 @@ # Azure model inference client library for Python -The client Library allows you to do inference using AI models you deployed to Azure. It supports both serverless endpoints (aka "model as a service" (MaaS) or "pay as you go") and selfhosted endpoints (aka "model as a platform" (MaaP) or "real-time endpoints"). The client library makes services calls using REST AP version `2024-04-01-preview` specificed here (TODO: insert link). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). +The client Library allows you to do inference using AI models you deployed to Azure. It supports both serverless endpoints (aka "model as a service" (MaaS) or "pay as you go") and selfhosted endpoints (aka "model as a platform" (MaaP) or "real-time endpoints"). The client library makes services calls using REST AP version `2024-04-01-preview` [specificed here](https://review.learn.microsoft.com/en-us/azure/ai-studio/reference/reference-model-inference-api?branch=release-build-azure-ai-studio&tabs=azure-studio). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). Use the model inference client library to: * Authenticate against the service * Get information about the model -* Get chat completions -* Get embeddings -* Generate an image from a text prompt +* Do chat completions +* Get text embeddings Note that for inference using OpenAI models hosted on Azure you should be using the [OpenAI Python client library](https://github.com/openai/openai-python) instead of this client. @@ -32,10 +31,18 @@ Note that for inference using OpenAI models hosted on Azure you should be using ### Install the package +To install the Azure AI Inferencing package use the following command: + ```bash pip install azure-ai-inferencing ``` +To update an existing installation of the package, use: + +```bash +pip install --upgrade azure-ai-inferencing +``` + ### Create and authenticate clients The package includes three clients `ChatCompletionsClient`, `EmbeddingsClient` and `ImageGenerationClients`. They are all created in the similar manner. For example, assuming `endpoint` and `key` are strings holding your endpoint URL and key, this Python code will create and authenticate a synchronous `ChatCompletionsClient`: diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_client.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_client.py index 941c03c014d7..fd03d8f22f7d 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_client.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_client.py @@ -7,7 +7,7 @@ # -------------------------------------------------------------------------- from copy import deepcopy -from typing import Any +from typing import Any, TYPE_CHECKING, Union from azure.core import PipelineClient from azure.core.credentials import AzureKeyCredential @@ -18,21 +18,27 @@ from ._operations import ChatCompletionsClientOperationsMixin, EmbeddingsClientOperationsMixin from ._serialization import Deserializer, Serializer +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import TokenCredential + class ChatCompletionsClient(ChatCompletionsClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword """ChatCompletionsClient. :param endpoint: Service host. Required. :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Required. - :type credential: ~azure.core.credentials.AzureKeyCredential + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential :keyword api_version: The API version to use for this operation. Default value is "2024-04-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ - def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: _endpoint = "{endpoint}" self._config = ChatCompletionsClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) _policies = kwargs.pop("policies", None) @@ -100,15 +106,17 @@ class EmbeddingsClient(EmbeddingsClientOperationsMixin): # pylint: disable=clie :param endpoint: Service host. Required. :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Required. - :type credential: ~azure.core.credentials.AzureKeyCredential + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential :keyword api_version: The API version to use for this operation. Default value is "2024-04-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ - def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: _endpoint = "{endpoint}" self._config = EmbeddingsClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) _policies = kwargs.pop("policies", None) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py index 608bf26c541c..1a988d66b7af 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py @@ -6,13 +6,17 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any +from typing import Any, TYPE_CHECKING, Union from azure.core.credentials import AzureKeyCredential from azure.core.pipeline import policies from ._version import VERSION +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import TokenCredential + class ChatCompletionsClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long """Configuration for ChatCompletionsClient. @@ -22,15 +26,17 @@ class ChatCompletionsClientConfiguration: # pylint: disable=too-many-instance-a :param endpoint: Service host. Required. :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Required. - :type credential: ~azure.core.credentials.AzureKeyCredential + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential :keyword api_version: The API version to use for this operation. Default value is "2024-04-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ - def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: api_version: str = kwargs.pop("api_version", "2024-04-01-preview") if endpoint is None: @@ -41,10 +47,18 @@ def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) self.endpoint = endpoint self.credential = credential self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) + def _infer_policy(self, **kwargs): + if isinstance(self.credential, AzureKeyCredential): + return policies.AzureKeyCredentialPolicy(self.credential, "Authorization", prefix="Bearer", **kwargs) + if hasattr(self.credential, "get_token"): + return policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) + raise TypeError(f"Unsupported credential: {self.credential}") + def _configure(self, **kwargs: Any) -> None: self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) @@ -56,9 +70,7 @@ def _configure(self, **kwargs: Any) -> None: self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) self.authentication_policy = kwargs.get("authentication_policy") if self.credential and not self.authentication_policy: - self.authentication_policy = policies.AzureKeyCredentialPolicy( - self.credential, "Authorization", prefix="Bearer", **kwargs - ) + self.authentication_policy = self._infer_policy(**kwargs) class EmbeddingsClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long @@ -69,15 +81,17 @@ class EmbeddingsClientConfiguration: # pylint: disable=too-many-instance-attrib :param endpoint: Service host. Required. :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Required. - :type credential: ~azure.core.credentials.AzureKeyCredential + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential :keyword api_version: The API version to use for this operation. Default value is "2024-04-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ - def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: api_version: str = kwargs.pop("api_version", "2024-04-01-preview") if endpoint is None: @@ -88,10 +102,18 @@ def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) self.endpoint = endpoint self.credential = credential self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) + def _infer_policy(self, **kwargs): + if isinstance(self.credential, AzureKeyCredential): + return policies.AzureKeyCredentialPolicy(self.credential, "Authorization", prefix="Bearer", **kwargs) + if hasattr(self.credential, "get_token"): + return policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) + raise TypeError(f"Unsupported credential: {self.credential}") + def _configure(self, **kwargs: Any) -> None: self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) @@ -103,6 +125,4 @@ def _configure(self, **kwargs: Any) -> None: self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) self.authentication_policy = kwargs.get("authentication_policy") if self.credential and not self.authentication_policy: - self.authentication_policy = policies.AzureKeyCredentialPolicy( - self.credential, "Authorization", prefix="Bearer", **kwargs - ) + self.authentication_policy = self._infer_policy(**kwargs) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py index 810a39e5ddb2..75a178d76839 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py @@ -7,7 +7,7 @@ # -------------------------------------------------------------------------- from copy import deepcopy -from typing import Any, Awaitable +from typing import Any, Awaitable, TYPE_CHECKING, Union from azure.core import AsyncPipelineClient from azure.core.credentials import AzureKeyCredential @@ -18,21 +18,29 @@ from ._configuration import ChatCompletionsClientConfiguration, EmbeddingsClientConfiguration from ._operations import ChatCompletionsClientOperationsMixin, EmbeddingsClientOperationsMixin +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + class ChatCompletionsClient(ChatCompletionsClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword """ChatCompletionsClient. :param endpoint: Service host. Required. :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Required. - :type credential: ~azure.core.credentials.AzureKeyCredential + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential :keyword api_version: The API version to use for this operation. Default value is "2024-04-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ - def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + ) -> None: _endpoint = "{endpoint}" self._config = ChatCompletionsClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) _policies = kwargs.pop("policies", None) @@ -102,15 +110,19 @@ class EmbeddingsClient(EmbeddingsClientOperationsMixin): # pylint: disable=clie :param endpoint: Service host. Required. :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Required. - :type credential: ~azure.core.credentials.AzureKeyCredential + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential :keyword api_version: The API version to use for this operation. Default value is "2024-04-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ - def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + ) -> None: _endpoint = "{endpoint}" self._config = EmbeddingsClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) _policies = kwargs.pop("policies", None) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py index 41c1596e1410..912c1f7e8f3a 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py @@ -6,13 +6,17 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any +from typing import Any, TYPE_CHECKING, Union from azure.core.credentials import AzureKeyCredential from azure.core.pipeline import policies from .._version import VERSION +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + class ChatCompletionsClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long """Configuration for ChatCompletionsClient. @@ -22,15 +26,19 @@ class ChatCompletionsClientConfiguration: # pylint: disable=too-many-instance-a :param endpoint: Service host. Required. :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Required. - :type credential: ~azure.core.credentials.AzureKeyCredential + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential :keyword api_version: The API version to use for this operation. Default value is "2024-04-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ - def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + ) -> None: api_version: str = kwargs.pop("api_version", "2024-04-01-preview") if endpoint is None: @@ -41,10 +49,18 @@ def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) self.endpoint = endpoint self.credential = credential self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) + def _infer_policy(self, **kwargs): + if isinstance(self.credential, AzureKeyCredential): + return policies.AzureKeyCredentialPolicy(self.credential, "Authorization", prefix="Bearer", **kwargs) + if hasattr(self.credential, "get_token"): + return policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) + raise TypeError(f"Unsupported credential: {self.credential}") + def _configure(self, **kwargs: Any) -> None: self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) @@ -56,9 +72,7 @@ def _configure(self, **kwargs: Any) -> None: self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) self.authentication_policy = kwargs.get("authentication_policy") if self.credential and not self.authentication_policy: - self.authentication_policy = policies.AzureKeyCredentialPolicy( - self.credential, "Authorization", prefix="Bearer", **kwargs - ) + self.authentication_policy = self._infer_policy(**kwargs) class EmbeddingsClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long @@ -69,15 +83,19 @@ class EmbeddingsClientConfiguration: # pylint: disable=too-many-instance-attrib :param endpoint: Service host. Required. :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Required. - :type credential: ~azure.core.credentials.AzureKeyCredential + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential :keyword api_version: The API version to use for this operation. Default value is "2024-04-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ - def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) -> None: + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + ) -> None: api_version: str = kwargs.pop("api_version", "2024-04-01-preview") if endpoint is None: @@ -88,10 +106,18 @@ def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any) self.endpoint = endpoint self.credential = credential self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) + def _infer_policy(self, **kwargs): + if isinstance(self.credential, AzureKeyCredential): + return policies.AzureKeyCredentialPolicy(self.credential, "Authorization", prefix="Bearer", **kwargs) + if hasattr(self.credential, "get_token"): + return policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) + raise TypeError(f"Unsupported credential: {self.credential}") + def _configure(self, **kwargs: Any) -> None: self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) @@ -103,6 +129,4 @@ def _configure(self, **kwargs: Any) -> None: self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) self.authentication_policy = kwargs.get("authentication_policy") if self.credential and not self.authentication_policy: - self.authentication_policy = policies.AzureKeyCredentialPolicy( - self.credential, "Authorization", prefix="Bearer", **kwargs - ) + self.authentication_policy = self._infer_policy(**kwargs) diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_entra_id_auth.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_entra_id_auth.py new file mode 100644 index 000000000000..e48bd86d0b4b --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_entra_id_auth.py @@ -0,0 +1,53 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to get a chat completions response from + the service using a synchronous client, with an Entra ID authentication. + +USAGE: + python sample_chat_completions_with_entra_id_auth + + Set these two environment variables before running the sample: + 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form + https://..inference.ai.azure.com + where `your-deployment-name` is your unique AI Model deployment name, and + `your-azure-region` is the Azure region where your model is deployed. + 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. +""" + + +def sample_chat_completions_with_entra_id_auth(): + import os + + try: + endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] + key = os.environ["CHAT_COMPLETIONS_KEY"] + except KeyError: + print("Missing environment variable 'CHAT_COMPLETIONS_ENDPOINT' or 'CHAT_COMPLETIONS_KEY'") + print("Set them before running this sample.") + exit() + + from azure.ai.inference import ChatCompletionsClient + from azure.ai.inference.models import SystemMessage, UserMessage + from azure.identity import DefaultAzureCredential + + # See https://learn.microsoft.com/python/api/overview/azure/identity-readme#defaultazurecredential + default_azure_credential = DefaultAzureCredential() + + client = ChatCompletionsClient(endpoint=endpoint, credential=default_azure_credential) + + result = client.create( + messages=[ + SystemMessage(content="You are a helpful assistant."), + UserMessage(content="How many feet are in a mile?"), + ] + ) + + print(result.choices[0].message.content) + + +if __name__ == "__main__": + sample_chat_completions_with_entra_id_auth() diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml index 308cb8aa6e8c..f892797e6619 100644 --- a/sdk/ai/azure-ai-inference/tsp-location.yaml +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/ModelClient -commit: 43e5511f7c8543a88811d9218d9e7bf301c22646 +commit: 3dc0445d8fddfcf0b1ee278e6f61dd4f9b3b2634 repo: Azure/azure-rest-api-specs additionalDirectories: From f0b773cb3106e5947e4852f36a799d47a1525069 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 14 May 2024 08:59:13 -0700 Subject: [PATCH 057/112] Add get_model_info tests --- sdk/ai/azure-ai-inference/assets.json | 2 +- .../tests/model_inference_test_base.py | 17 +++++++++++++++++ .../tests/test_model_inference_async_client.py | 9 +++++++++ .../tests/test_model_inference_client.py | 9 +++++++++ 4 files changed, 36 insertions(+), 1 deletion(-) diff --git a/sdk/ai/azure-ai-inference/assets.json b/sdk/ai/azure-ai-inference/assets.json index 3054172500dd..61e6451fbcff 100644 --- a/sdk/ai/azure-ai-inference/assets.json +++ b/sdk/ai/azure-ai-inference/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "python", "TagPrefix": "python/ai/azure-ai-inference", - "Tag": "python/ai/azure-ai-inference_b8b76ccaac" + "Tag": "python/ai/azure-ai-inference_54d6a21e45" } diff --git a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py index aa4b03277dca..d6d0d3499036 100644 --- a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py +++ b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py @@ -94,6 +94,23 @@ def _create_embeddings_client_with_chat_completions_credentials(self, **kwargs) credential = AzureKeyCredential(key) return sdk.EmbeddingsClient(endpoint=endpoint, credential=credential, logging_enable=LOGGING_ENABLED) + @staticmethod + def _print_model_info_result(model_info: sdk.models.ModelInfo): + if ModelClientTestBase.PRINT_RESULT: + print(" Model info result:") + print("\tmodel_name: {}".format(model_info.model_name)) + print("\tmodel_type: {}".format(model_info.model_type)) + print("\tmodel_provider_name: {}".format(model_info.model_provider_name)) + + @staticmethod + def _validate_model_info_result(model_info: sdk.models.ModelInfo): + assert model_info.model_name is not None + assert len(model_info.model_name) > 0 + assert model_info.model_provider_name is not None + assert len(model_info.model_provider_name) > 0 + assert model_info.model_type is not None + assert model_info.model_type == "completion" # This should be sdk.models.ModelType.CHAT_COMPLETION once the model is fixed + @staticmethod def _validate_chat_completions_result(result: sdk.models.ChatCompletions, contains: List[str]): assert any(item in result.choices[0].message.content for item in contains) diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py index a490a21d04a5..2cab56c394a7 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -19,6 +19,15 @@ class TestModelAsyncClient(ModelClientTestBase): # # ********************************************************************************** + @ServicePreparerChatCompletions() + @recorded_by_proxy_async + async def test_async_get_model_info_error_free(self, **kwargs): + client = self._create_async_chat_client(**kwargs) + result = await client.get_model_info() + self._print_model_info_result(result) + self._validate_model_info_result(result) + await client.close() + @ServicePreparerChatCompletions() @recorded_by_proxy_async async def test_async_chat_completions_error_free(self, **kwargs): diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py index 6f1d439c7cf1..b825271e58c0 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -19,6 +19,15 @@ class TestModelClient(ModelClientTestBase): # # ********************************************************************************** + @ServicePreparerChatCompletions() + @recorded_by_proxy + def test_get_model_info_error_free(self, **kwargs): + client = self._create_chat_client(**kwargs) + result = client.get_model_info() + self._print_model_info_result(result) + self._validate_model_info_result(result) + client.close() + @ServicePreparerChatCompletions() @recorded_by_proxy def test_chat_completions_error_free(self, **kwargs): From deb3d1652f592bb5fae3807279e1124e0d8b8cc2 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 14 May 2024 10:21:22 -0700 Subject: [PATCH 058/112] Add sample for ClientGenerator --- sdk/ai/azure-ai-inference/README.md | 30 +++++++++++ .../azure/ai/inference/_patch.py | 6 ++- sdk/ai/azure-ai-inference/samples/README.md | 1 + ..._chat_completions_with_client_generator.py | 53 +++++++++++++++++++ 4 files changed, 88 insertions(+), 2 deletions(-) create mode 100644 sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_client_generator.py diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 80d3b68163e8..9c5e9f9a3551 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -92,6 +92,10 @@ TODO: Add overview and link to explain embeddings. Embeddings operations target the URL route `/v1/embeddings` on the provided endpoint. +### Client generator + +TODO + ## Examples In the following sections you will find simple examples of: @@ -100,6 +104,7 @@ In the following sections you will find simple examples of: * [Streaming chat completions](#streaming-chat-completions-example) * [Embeddings](#embeddings-example) * [Get model information](#get-model-information-example) +* [Create a client using the ClientGenerator](#create-a-client-using-the-clientgenerator) The examples create a synchronous client as mentioned in [Create and authenticate clients](#create-and-authenticate-clients). Only mandatory input settings are shown for simplicity. @@ -221,6 +226,31 @@ print(f"Model type: {model_info.model_type}") +### Create a client using the ClientGenerator + +Instead of creating a specific client directly (`ChatCompletionsClient`, `EmbeddingsClient`) you can use the `ClientGenerator.from_endpoint` method to create the appropriate client associated with the provided endpoint URL. In this example we use it to create a `ChatCompletionsClient`: + + + +```python +from azure.ai.inference import ClientGenerator +from azure.ai.inference.models import SystemMessage, UserMessage +from azure.core.credentials import AzureKeyCredential + +client = ClientGenerator.from_endpoint(endpoint=endpoint, credential=AzureKeyCredential(key)) + +result = client.create( + messages=[ + SystemMessage(content="You are a helpful assistant."), + UserMessage(content="How many feet are in a mile?"), + ] +) + +print(result.choices[0].message.content) +``` + + + ## Troubleshooting ### Exceptions diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index 6fbf8dc2a29c..bf79cc6d612e 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -7,6 +7,7 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ import json +import logging import sys from io import IOBase @@ -39,6 +40,7 @@ _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False +logger = logging.getLogger(__name__) class ClientGenerator: @staticmethod @@ -47,7 +49,7 @@ def from_endpoint( ) -> Union[ChatCompletionsClientGenerated, EmbeddingsClient]: client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... model_info = client.get_model_info() - print(model_info) + logger.info("model_info=%s", model_info) if model_info.model_type == None or model_info.model_type == "": raise ValueError( "The AI model information is missing a value for `model type`. Cannot create an appropriate client." @@ -57,7 +59,7 @@ def from_endpoint( elif model_info.model_type == _models.ModelType.EMBEDDINGS: return EmbeddingsClient(endpoint, credential, **kwargs) else: - raise ValueError(f"No client available to support AI model type {model_info.model_type}") + raise ValueError(f"No client available to support AI model type `{model_info.model_type}`") class ChatCompletionsClient(ChatCompletionsClientGenerated): diff --git a/sdk/ai/azure-ai-inference/samples/README.md b/sdk/ai/azure-ai-inference/samples/README.md index 9b8c3bbc19c0..8f8d3c7371ed 100644 --- a/sdk/ai/azure-ai-inference/samples/README.md +++ b/sdk/ai/azure-ai-inference/samples/README.md @@ -24,6 +24,7 @@ The concepts are similar, you can easily modify any of the samples to your needs |[sample_chat_completions_from_input_bytes.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py) | One chat completion operation using a synchronous client, with input messages provided as `IO[bytes]`. | |[sample_chat_completions_from_input_json.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py) | One chat completion operation using a synchronous client, with input messages provided as `MutableMapping[str, Any]` | |[sample_chat_completions_with_tools.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py) | Shows how do use a tool (function) in chat completions, for an AI model that supports tools | +|[sample_chat_completions_with_client_generator.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_client_generator.py) | Shows how do use the `ClientGenerator.from_endpoint()` to create the appropriate client based on the provided endpoint URL. In this example, it creates a `ChatCompletionsClient`. | |[sample_embeddings.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_embeddings.py) | One embeddings operation using a synchronous client. | |[sample_get_model_info.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py) | Get AI model information using the chat completions client. Similarly can be done with all other clients. | diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_client_generator.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_client_generator.py new file mode 100644 index 000000000000..b85d659cb80d --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_client_generator.py @@ -0,0 +1,53 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to get a chat completions response from + the service using a synchronous client that was obtained from a + `ClientGenerator.from_endpoint` call. + +USAGE: + python sample_chat_completions_with_client_generator + + Set these two environment variables before running the sample: + 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form + https://..inference.ai.azure.com + where `your-deployment-name` is your unique AI Model deployment name, and + `your-azure-region` is the Azure region where your model is deployed. + 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. +""" + + +def sample_chat_completions_with_client_generator(): + import os + + try: + endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] + key = os.environ["CHAT_COMPLETIONS_KEY"] + except KeyError: + print("Missing environment variable 'CHAT_COMPLETIONS_ENDPOINT' or 'CHAT_COMPLETIONS_KEY'") + print("Set them before running this sample.") + exit() + + # [START chat_completions_with_client_generator] + from azure.ai.inference import ClientGenerator + from azure.ai.inference.models import SystemMessage, UserMessage + from azure.core.credentials import AzureKeyCredential + + client = ClientGenerator.from_endpoint(endpoint=endpoint, credential=AzureKeyCredential(key)) + + result = client.create( + messages=[ + SystemMessage(content="You are a helpful assistant."), + UserMessage(content="How many feet are in a mile?"), + ] + ) + + print(result.choices[0].message.content) + # [END chat_completions_with_client_generator] + + +if __name__ == "__main__": + sample_chat_completions_with_client_generator() From c6b3bc25e9ff998e99b5bb12b5c2ccb76d2fbb3d Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 14 May 2024 11:14:37 -0700 Subject: [PATCH 059/112] Remove /v1 --- sdk/ai/azure-ai-inference/README.md | 7 ++++--- .../azure/ai/inference/_operations/_operations.py | 4 ++-- .../tests/test_model_inference_async_client.py | 2 +- .../tests/test_model_inference_client.py | 2 +- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 9c5e9f9a3551..d74179097758 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -84,13 +84,13 @@ client = ChatCompletionsClient( TODO: Add overview and link to explain chat completions. -Chat completion operations target the URL route `/v1/chat/completions` on the provided endpoint. +Chat completion operations target the URL route `/chat/completions` on the provided endpoint. ### Embeddings TODO: Add overview and link to explain embeddings. -Embeddings operations target the URL route `/v1/embeddings` on the provided endpoint. +Embeddings operations target the URL route `/embeddings` on the provided endpoint. ### Client generator @@ -277,7 +277,8 @@ Operation returned an invalid status 'Unauthorized' Content: {"status": "Invalid auth token"} ```v -Or for example when you call `get_embeddings` on a model that does not support the `/v1/embeddings` route: +Or for example when you created an `EmbeddingsClient` and called `create` on the client, but the endpoint does not +support the `/embeddings` route: ```text Status code: 424 (Failed Dependency) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index 142fc67800d7..d6d4e6ec671f 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -51,7 +51,7 @@ def build_chat_completions_create_request(*, model_deployment: Optional[str] = N accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/v1/chat/completions" + _url = "/chat/completions" # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") @@ -94,7 +94,7 @@ def build_embeddings_create_request(*, model_deployment: Optional[str] = None, * accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/v1/embeddings" + _url = "/embeddings" # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py index 2cab56c394a7..a06cd1281892 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -55,7 +55,7 @@ async def test_async_chat_completions_streaming_error_free(self, **kwargs): result = await client.create_streaming( messages=[ sdk.models.SystemMessage(content="You are a helpful assistant."), - sdk.models.UserMessage(content="Give me 5 good reasons why I should exercise every day."), + sdk.models.UserMessage(content="Give me 3 good reasons why I should exercise every day."), ] ) await self._validate_async_chat_completions_streaming_result(result) diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py index b825271e58c0..4acddfb132a3 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -44,7 +44,7 @@ def test_chat_completions_streaming_error_free(self, **kwargs): result = client.create_streaming( messages=[ sdk.models.SystemMessage(content="You are a helpful assistant."), - sdk.models.UserMessage(content="Give me 5 good reasons why I should exercise every day."), + sdk.models.UserMessage(content="Give me 3 good reasons why I should exercise every day."), ] ) self._validate_chat_completions_streaming_result(result) From 042c06a6e10648157d0ec3116666d3a1bb781d50 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 14 May 2024 11:16:34 -0700 Subject: [PATCH 060/112] Use new test recording assests without /v1 --- sdk/ai/azure-ai-inference/assets.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/ai/azure-ai-inference/assets.json b/sdk/ai/azure-ai-inference/assets.json index 61e6451fbcff..1433e07f47b8 100644 --- a/sdk/ai/azure-ai-inference/assets.json +++ b/sdk/ai/azure-ai-inference/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "python", "TagPrefix": "python/ai/azure-ai-inference", - "Tag": "python/ai/azure-ai-inference_54d6a21e45" + "Tag": "python/ai/azure-ai-inference_f6e39baf60" } From 56450c9c840efc28a2cfe6f42078bea7f91817fd Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 14 May 2024 11:21:46 -0700 Subject: [PATCH 061/112] Pick up TypeSpec with /v1 removed from route --- sdk/ai/azure-ai-inference/tsp-location.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml index f892797e6619..24ec38780c3f 100644 --- a/sdk/ai/azure-ai-inference/tsp-location.yaml +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/ModelClient -commit: 3dc0445d8fddfcf0b1ee278e6f61dd4f9b3b2634 +commit: bbd51a40f4dab41ba235a7317b1697ac65dc9e6d repo: Azure/azure-rest-api-specs additionalDirectories: From a36fbee4b214af21a760260c9f815c8fbb9f8465 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 14 May 2024 16:39:08 -0700 Subject: [PATCH 062/112] Support Image Embeddings and version 2024-05 --- sdk/ai/azure-ai-inference/README.md | 63 +- .../azure/ai/inference/__init__.py | 2 + .../azure/ai/inference/_client.py | 94 ++- .../azure/ai/inference/_configuration.py | 63 +- .../ai/inference/_operations/__init__.py | 2 + .../ai/inference/_operations/_operations.py | 655 +++++++++++++++++- .../azure/ai/inference/_patch.py | 1 + .../azure/ai/inference/_vendor.py | 15 +- .../azure/ai/inference/aio/__init__.py | 2 + .../azure/ai/inference/aio/_client.py | 98 ++- .../azure/ai/inference/aio/_configuration.py | 65 +- .../ai/inference/aio/_operations/__init__.py | 2 + .../inference/aio/_operations/_operations.py | 606 +++++++++++++++- .../azure/ai/inference/aio/_vendor.py | 15 +- .../azure/ai/inference/models/__init__.py | 4 + .../azure/ai/inference/models/_enums.py | 19 + .../azure/ai/inference/models/_models.py | 37 + sdk/ai/azure-ai-inference/samples/README.md | 1 + .../samples/async_samples/sample1.png | Bin 0 -> 307178 bytes .../samples/async_samples/sample2.png | Bin 0 -> 264509 bytes .../async_samples/sample_image_embeddings.py | 77 ++ sdk/ai/azure-ai-inference/samples/sample1.png | Bin 0 -> 307178 bytes sdk/ai/azure-ai-inference/samples/sample2.png | Bin 0 -> 264509 bytes .../samples/sample_image_embeddings.py | 61 ++ .../tests/model_inference_test_base.py | 4 +- sdk/ai/azure-ai-inference/tsp-location.yaml | 2 +- 26 files changed, 1832 insertions(+), 56 deletions(-) create mode 100644 sdk/ai/azure-ai-inference/samples/async_samples/sample1.png create mode 100644 sdk/ai/azure-ai-inference/samples/async_samples/sample2.png create mode 100644 sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings.py create mode 100644 sdk/ai/azure-ai-inference/samples/sample1.png create mode 100644 sdk/ai/azure-ai-inference/samples/sample2.png create mode 100644 sdk/ai/azure-ai-inference/samples/sample_image_embeddings.py diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index d74179097758..3b747fe37d13 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -1,6 +1,6 @@ # Azure model inference client library for Python -The client Library allows you to do inference using AI models you deployed to Azure. It supports both serverless endpoints (aka "model as a service" (MaaS) or "pay as you go") and selfhosted endpoints (aka "model as a platform" (MaaP) or "real-time endpoints"). The client library makes services calls using REST AP version `2024-04-01-preview` [specificed here](https://review.learn.microsoft.com/en-us/azure/ai-studio/reference/reference-model-inference-api?branch=release-build-azure-ai-studio&tabs=azure-studio). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). +The client Library allows you to do inference using AI models you deployed to Azure. It supports both serverless endpoints (aka "model as a service" (MaaS) or "pay as you go") and selfhosted endpoints (aka "model as a platform" (MaaP) or "real-time endpoints"). The client library makes services calls using REST AP version `2024-05-01-preview` [specificed here](https://review.learn.microsoft.com/en-us/azure/ai-studio/reference/reference-model-inference-api?branch=release-build-azure-ai-studio&tabs=azure-studio). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). Use the model inference client library to: @@ -80,18 +80,31 @@ client = ChatCompletionsClient( ## Key concepts +### AI Model information + +TODO: Add overview and link to explain AI model info + +The operation to get AI model information targets the URL route `/info` on the provided endpoint. + + ### Chat Completions TODO: Add overview and link to explain chat completions. Chat completion operations target the URL route `/chat/completions` on the provided endpoint. -### Embeddings +### Text Embeddings TODO: Add overview and link to explain embeddings. Embeddings operations target the URL route `/embeddings` on the provided endpoint. +### Image Embeddings + +TODO: Add overview and link to explain image embeddings. + +Embeddings operations target the URL route `images/embeddings` on the provided endpoint. + ### Client generator TODO @@ -102,7 +115,8 @@ In the following sections you will find simple examples of: * [Chat completions](#chat-completions-example) * [Streaming chat completions](#streaming-chat-completions-example) -* [Embeddings](#embeddings-example) +* [Text Embeddings](#embeddings-example) +* [Image Embeddings](#image-embeddings-example) * [Get model information](#get-model-information-example) * [Create a client using the ClientGenerator](#create-a-client-using-the-clientgenerator) @@ -172,7 +186,7 @@ The printed result of course depends on the model, but you should see the answer To generate completions for additional messages, simply call `client.create_streaming` multiple times using the same `client`. -### Embeddings example +### Text Embeddings example This example demonstrates how to get embeddings. @@ -205,6 +219,47 @@ data[2]: length=1024, [0.04196167, 0.029083252, ..., -0.0027484894, 0.0073127747 To generate embeddings for additional phrases, simply call `client.create` multiple times using the same `client`. +### Image Embeddings example + +This example demonstrates how to get image embeddings. + + + +```python +from azure.ai.inference import ImageEmbeddingsClient +from azure.ai.inference.models import EmbeddingInput +from azure.core.credentials import AzureKeyCredential + +with open("sample1.png", "rb") as f: + image1:str = base64.b64encode(f.read()).decode('utf-8') +with open("sample2.png", "rb") as f: + image2:str = base64.b64encode(f.read()).decode('utf-8') + +client = ImageEmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + +result = client.create(input=[ + EmbeddingInput(image=image1), + EmbeddingInput(image=image2) +]) + +for item in result.data: + length = len(item.embedding) + print( + f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " + f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" + ) +``` + + + +The printed result of course depends on the model, but you should see something like this: + +```txt +TBD +``` + +To generate embeddings for additional phrases, simply call `client.create` multiple times using the same `client`. + ### Get model information example Each one of the clients supports a `get_model_info` method that can be used to retreive infomation about the AI model. This example shows how to get model information from the `ChatCompletionsClient`, but similarly can be done with the other clients. diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py index 057fbcaefaee..898076e89409 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py @@ -8,6 +8,7 @@ from ._client import ChatCompletionsClient from ._client import EmbeddingsClient +from ._client import ImageEmbeddingsClient from ._version import VERSION __version__ = VERSION @@ -22,6 +23,7 @@ __all__ = [ "ChatCompletionsClient", "EmbeddingsClient", + "ImageEmbeddingsClient", ] __all__.extend([p for p in _patch_all if p not in __all__]) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_client.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_client.py index fd03d8f22f7d..f717136114ce 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_client.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_client.py @@ -14,8 +14,16 @@ from azure.core.pipeline import policies from azure.core.rest import HttpRequest, HttpResponse -from ._configuration import ChatCompletionsClientConfiguration, EmbeddingsClientConfiguration -from ._operations import ChatCompletionsClientOperationsMixin, EmbeddingsClientOperationsMixin +from ._configuration import ( + ChatCompletionsClientConfiguration, + EmbeddingsClientConfiguration, + ImageEmbeddingsClientConfiguration, +) +from ._operations import ( + ChatCompletionsClientOperationsMixin, + EmbeddingsClientOperationsMixin, + ImageEmbeddingsClientOperationsMixin, +) from ._serialization import Deserializer, Serializer if TYPE_CHECKING: @@ -33,7 +41,7 @@ class ChatCompletionsClient(ChatCompletionsClientOperationsMixin): # pylint: di :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-04-01-preview". Note that overriding this default value may result in unsupported + "2024-05-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ @@ -111,7 +119,7 @@ class EmbeddingsClient(EmbeddingsClientOperationsMixin): # pylint: disable=clie :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-04-01-preview". Note that overriding this default value may result in unsupported + "2024-05-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ @@ -177,3 +185,81 @@ def __enter__(self) -> "EmbeddingsClient": def __exit__(self, *exc_details: Any) -> None: self._client.__exit__(*exc_details) + + +class ImageEmbeddingsClient(ImageEmbeddingsClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword + """ImageEmbeddingsClient. + + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-05-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + _endpoint = "{endpoint}" + self._config = ImageEmbeddingsClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: PipelineClient = PipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> "ImageEmbeddingsClient": + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py index 1a988d66b7af..2aab1cca6b60 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py @@ -31,13 +31,13 @@ class ChatCompletionsClientConfiguration: # pylint: disable=too-many-instance-a :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-04-01-preview". Note that overriding this default value may result in unsupported + "2024-05-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2024-04-01-preview") + api_version: str = kwargs.pop("api_version", "2024-05-01-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") @@ -86,13 +86,68 @@ class EmbeddingsClientConfiguration: # pylint: disable=too-many-instance-attrib :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-04-01-preview". Note that overriding this default value may result in unsupported + "2024-05-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2024-04-01-preview") + api_version: str = kwargs.pop("api_version", "2024-05-01-preview") + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _infer_policy(self, **kwargs): + if isinstance(self.credential, AzureKeyCredential): + return policies.AzureKeyCredentialPolicy(self.credential, "Authorization", prefix="Bearer", **kwargs) + if hasattr(self.credential, "get_token"): + return policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) + raise TypeError(f"Unsupported credential: {self.credential}") + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = self._infer_policy(**kwargs) + + +class ImageEmbeddingsClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long + """Configuration for ImageEmbeddingsClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-05-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + api_version: str = kwargs.pop("api_version", "2024-05-01-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/__init__.py index d0e8dcc776a6..d3ebd561f739 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/__init__.py @@ -8,6 +8,7 @@ from ._operations import ChatCompletionsClientOperationsMixin from ._operations import EmbeddingsClientOperationsMixin +from ._operations import ImageEmbeddingsClientOperationsMixin from ._patch import __all__ as _patch_all from ._patch import * # pylint: disable=unused-wildcard-import @@ -16,6 +17,7 @@ __all__ = [ "ChatCompletionsClientOperationsMixin", "EmbeddingsClientOperationsMixin", + "ImageEmbeddingsClientOperationsMixin", ] __all__.extend([p for p in _patch_all if p not in __all__]) _patch_sdk() diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index d6d4e6ec671f..ee75606ca0ff 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -27,7 +27,7 @@ from .. import models as _models from .._model_base import SdkJSONEncoder, _deserialize from .._serialization import Serializer -from .._vendor import ChatCompletionsClientMixinABC, EmbeddingsClientMixinABC +from .._vendor import ChatCompletionsClientMixinABC, EmbeddingsClientMixinABC, ImageEmbeddingsClientMixinABC if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -47,7 +47,7 @@ def build_chat_completions_create_request(*, model_deployment: Optional[str] = N _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-04-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-05-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -70,7 +70,7 @@ def build_chat_completions_get_model_info_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-04-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-05-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -90,7 +90,7 @@ def build_embeddings_create_request(*, model_deployment: Optional[str] = None, * _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-04-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-05-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -113,7 +113,50 @@ def build_embeddings_get_model_info_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-04-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-05-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/info" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_image_embeddings_create_request(*, model_deployment: Optional[str] = None, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-05-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/images/embeddings" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if model_deployment is not None: + _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployment", model_deployment, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_image_embeddings_get_model_info_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-05-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -863,9 +906,19 @@ def create( # JSON input template you can fill out and use as your body input. body = { "input": [ - "str" # Input texts to get embeddings for, encoded as a an array of - strings. Required. + "str" # Input text to embed, encoded as a string or array of tokens. + To embed multiple inputs in a single request, pass an array of strings or + array of token arrays. Required. ], + "dimensions": 0, # Optional. Optional. The number of dimensions the + resulting output embeddings should have. Passing null causes the model to use its + default value. Returns a 422 error if the model doesn't support the value or + parameter. + "encoding_format": "str", # Optional. Optional. The number of dimensions the + resulting output embeddings should have. Passing null causes the model to use its + default value. Returns a 422 error if the model doesn't support the value or + parameter. Known values are: "base64", "binary", "float", "int8", "ubinary", and + "uint8". "extras": { "str": "str" # Optional. Extra parameters (in the form of string key-value pairs) that are not in the standard request payload. They will be @@ -873,8 +926,9 @@ def create( service handles these extra parameters depends on the value of the ``extra-parameters`` HTTP request header. }, - "input_type": "str" # Optional. Specifies the input type to use for - embedding search. Known values are: "text", "query", and "document". + "input_type": "str" # Optional. Optional. The type of the input. Returns a + 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". } # response body for status code(s): 200 @@ -920,13 +974,17 @@ def create( model_deployment: Optional[str] = None, content_type: str = "application/json", extras: Optional[Dict[str, str]] = None, + dimensions: Optional[int] = None, + encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long """Return the embeddings for a given text prompt. - :keyword input: Input texts to get embeddings for, encoded as a an array of strings. Required. + :keyword input: Input text to embed, encoded as a string or array of tokens. + To embed multiple inputs in a single request, pass an array + of strings or array of token arrays. Required. :paramtype input: list[str] :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. @@ -943,7 +1001,20 @@ def create( ``extra-parameters`` HTTP request header. Default value is None. :paramtype extras: dict[str, str] - :keyword input_type: Specifies the input type to use for embedding search. Known values are: + :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should + have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Default value is + None. + :paramtype dimensions: int + :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings + should have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. + :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat + :keyword input_type: Optional. The type of the input. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: "text", "query", and "document". Default value is None. :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping @@ -1060,6 +1131,8 @@ def create( input: List[str] = _Unset, model_deployment: Optional[str] = None, extras: Optional[Dict[str, str]] = None, + dimensions: Optional[int] = None, + encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any ) -> _models.EmbeddingsResult: @@ -1068,7 +1141,9 @@ def create( :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword input: Input texts to get embeddings for, encoded as a an array of strings. Required. + :keyword input: Input text to embed, encoded as a string or array of tokens. + To embed multiple inputs in a single request, pass an array + of strings or array of token arrays. Required. :paramtype input: list[str] :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. @@ -1082,7 +1157,20 @@ def create( ``extra-parameters`` HTTP request header. Default value is None. :paramtype extras: dict[str, str] - :keyword input_type: Specifies the input type to use for embedding search. Known values are: + :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should + have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Default value is + None. + :paramtype dimensions: int + :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings + should have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. + :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat + :keyword input_type: Optional. The type of the input. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: "text", "query", and "document". Default value is None. :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping @@ -1095,9 +1183,19 @@ def create( # JSON input template you can fill out and use as your body input. body = { "input": [ - "str" # Input texts to get embeddings for, encoded as a an array of - strings. Required. + "str" # Input text to embed, encoded as a string or array of tokens. + To embed multiple inputs in a single request, pass an array of strings or + array of token arrays. Required. ], + "dimensions": 0, # Optional. Optional. The number of dimensions the + resulting output embeddings should have. Passing null causes the model to use its + default value. Returns a 422 error if the model doesn't support the value or + parameter. + "encoding_format": "str", # Optional. Optional. The number of dimensions the + resulting output embeddings should have. Passing null causes the model to use its + default value. Returns a 422 error if the model doesn't support the value or + parameter. Known values are: "base64", "binary", "float", "int8", "ubinary", and + "uint8". "extras": { "str": "str" # Optional. Extra parameters (in the form of string key-value pairs) that are not in the standard request payload. They will be @@ -1105,8 +1203,9 @@ def create( service handles these extra parameters depends on the value of the ``extra-parameters`` HTTP request header. }, - "input_type": "str" # Optional. Specifies the input type to use for - embedding search. Known values are: "text", "query", and "document". + "input_type": "str" # Optional. Optional. The type of the input. Returns a + 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". } # response body for status code(s): 200 @@ -1160,7 +1259,13 @@ def create( if body is _Unset: if input is _Unset: raise TypeError("missing required argument: input") - body = {"extras": extras, "input": input, "input_type": input_type} + body = { + "dimensions": dimensions, + "encoding_format": encoding_format, + "extras": extras, + "input": input, + "input_type": input_type, + } body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -1273,3 +1378,517 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + + +class ImageEmbeddingsClientOperationsMixin(ImageEmbeddingsClientMixinABC): + + @overload + def create( + self, + body: JSON, + *, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for given images. + + :param body: Required. + :type body: JSON + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "input": [ + { + "image": "str", # The input image, in PNG format. Required. + "text": "str" # Optional. Optional. The text input to feed + into the model (like DINO, CLIP). Returns a 422 error if the model + doesn't support the value or parameter. + } + ], + "dimensions": 0, # Optional. Optional. The number of dimensions the + resulting output embeddings should have. Passing null causes the model to use its + default value. Returns a 422 error if the model doesn't support the value or + parameter. + "encoding_format": "str", # Optional. Optional. The number of dimensions the + resulting output embeddings should have. Passing null causes the model to use its + default value. Returns a 422 error if the model doesn't support the value or + parameter. Known values are: "base64", "binary", "float", "int8", "ubinary", and + "uint8". + "extras": { + "str": "str" # Optional. Extra parameters (in the form of string + key-value pairs) that are not in the standard request payload. They will be + passed to the service as-is in the root of the JSON request payload. How the + service handles these extra parameters depends on the value of the + ``extra-parameters`` HTTP request header. + }, + "input_type": "str" # Optional. Optional. The type of the input. Returns a + 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". + } + + # response body for status code(s): 200 + response == { + "data": [ + { + "embedding": [ + 0.0 # List of embeddings value for the input prompt. + These represent a measurement of the vector-based relatedness of the + provided input. Required. + ], + "index": 0, # Index of the prompt to which the EmbeddingItem + corresponds. Required. + "object": "str" # The object type of this embeddings item. + Will always be ``embedding``. Required. + } + ], + "id": "str", # Unique identifier for the embeddings result. Required. + "model": "str", # The model ID used to generate this result. Required. + "object": "str", # The object type of the embeddings result. Will always be + ``list``. Required. + "usage": { + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". + "input_tokens": 0, # Number of tokens in the request prompt. + Required. + "prompt_tokens": 0, # Number of tokens used for the prompt sent to + the AI model. Typically identical to ``input_tokens``. However, certain AI + models may add extra tokens to the input hence the number can be higher. (for + example when input_type="query"). Required. + "total_tokens": 0 # Total number of tokens transacted in this + request/response. Required. + } + } + """ + + @overload + def create( + self, + *, + input: List[_models.EmbeddingInput], + model_deployment: Optional[str] = None, + content_type: str = "application/json", + extras: Optional[Dict[str, str]] = None, + dimensions: Optional[int] = None, + encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, + input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, + **kwargs: Any + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for given images. + + :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an + array. + The input must not exceed the max input tokens for the model. Required. + :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] + :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should + have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Default value is + None. + :paramtype dimensions: int + :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings + should have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. + :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat + :keyword input_type: Optional. The type of the input. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". Default value is None. + :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "data": [ + { + "embedding": [ + 0.0 # List of embeddings value for the input prompt. + These represent a measurement of the vector-based relatedness of the + provided input. Required. + ], + "index": 0, # Index of the prompt to which the EmbeddingItem + corresponds. Required. + "object": "str" # The object type of this embeddings item. + Will always be ``embedding``. Required. + } + ], + "id": "str", # Unique identifier for the embeddings result. Required. + "model": "str", # The model ID used to generate this result. Required. + "object": "str", # The object type of the embeddings result. Will always be + ``list``. Required. + "usage": { + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". + "input_tokens": 0, # Number of tokens in the request prompt. + Required. + "prompt_tokens": 0, # Number of tokens used for the prompt sent to + the AI model. Typically identical to ``input_tokens``. However, certain AI + models may add extra tokens to the input hence the number can be higher. (for + example when input_type="query"). Required. + "total_tokens": 0 # Total number of tokens transacted in this + request/response. Required. + } + } + """ + + @overload + def create( + self, + body: IO[bytes], + *, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for given images. + + :param body: Required. + :type body: IO[bytes] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "data": [ + { + "embedding": [ + 0.0 # List of embeddings value for the input prompt. + These represent a measurement of the vector-based relatedness of the + provided input. Required. + ], + "index": 0, # Index of the prompt to which the EmbeddingItem + corresponds. Required. + "object": "str" # The object type of this embeddings item. + Will always be ``embedding``. Required. + } + ], + "id": "str", # Unique identifier for the embeddings result. Required. + "model": "str", # The model ID used to generate this result. Required. + "object": "str", # The object type of the embeddings result. Will always be + ``list``. Required. + "usage": { + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". + "input_tokens": 0, # Number of tokens in the request prompt. + Required. + "prompt_tokens": 0, # Number of tokens used for the prompt sent to + the AI model. Typically identical to ``input_tokens``. However, certain AI + models may add extra tokens to the input hence the number can be higher. (for + example when input_type="query"). Required. + "total_tokens": 0 # Total number of tokens transacted in this + request/response. Required. + } + } + """ + + @distributed_trace + def create( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + input: List[_models.EmbeddingInput] = _Unset, + model_deployment: Optional[str] = None, + extras: Optional[Dict[str, str]] = None, + dimensions: Optional[int] = None, + encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, + input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, + **kwargs: Any + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for given images. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an + array. + The input must not exceed the max input tokens for the model. Required. + :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] + :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should + have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Default value is + None. + :paramtype dimensions: int + :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings + should have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. + :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat + :keyword input_type: Optional. The type of the input. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". Default value is None. + :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "input": [ + { + "image": "str", # The input image, in PNG format. Required. + "text": "str" # Optional. Optional. The text input to feed + into the model (like DINO, CLIP). Returns a 422 error if the model + doesn't support the value or parameter. + } + ], + "dimensions": 0, # Optional. Optional. The number of dimensions the + resulting output embeddings should have. Passing null causes the model to use its + default value. Returns a 422 error if the model doesn't support the value or + parameter. + "encoding_format": "str", # Optional. Optional. The number of dimensions the + resulting output embeddings should have. Passing null causes the model to use its + default value. Returns a 422 error if the model doesn't support the value or + parameter. Known values are: "base64", "binary", "float", "int8", "ubinary", and + "uint8". + "extras": { + "str": "str" # Optional. Extra parameters (in the form of string + key-value pairs) that are not in the standard request payload. They will be + passed to the service as-is in the root of the JSON request payload. How the + service handles these extra parameters depends on the value of the + ``extra-parameters`` HTTP request header. + }, + "input_type": "str" # Optional. Optional. The type of the input. Returns a + 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". + } + + # response body for status code(s): 200 + response == { + "data": [ + { + "embedding": [ + 0.0 # List of embeddings value for the input prompt. + These represent a measurement of the vector-based relatedness of the + provided input. Required. + ], + "index": 0, # Index of the prompt to which the EmbeddingItem + corresponds. Required. + "object": "str" # The object type of this embeddings item. + Will always be ``embedding``. Required. + } + ], + "id": "str", # Unique identifier for the embeddings result. Required. + "model": "str", # The model ID used to generate this result. Required. + "object": "str", # The object type of the embeddings result. Will always be + ``list``. Required. + "usage": { + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". + "input_tokens": 0, # Number of tokens in the request prompt. + Required. + "prompt_tokens": 0, # Number of tokens used for the prompt sent to + the AI model. Typically identical to ``input_tokens``. However, certain AI + models may add extra tokens to the input hence the number can be higher. (for + example when input_type="query"). Required. + "total_tokens": 0 # Total number of tokens transacted in this + request/response. Required. + } + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.EmbeddingsResult] = kwargs.pop("cls", None) + + if body is _Unset: + if input is _Unset: + raise TypeError("missing required argument: input") + body = { + "dimensions": dimensions, + "encoding_format": encoding_format, + "extras": extras, + "input": input, + "input_type": input_type, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_image_embeddings_create_request( + model_deployment=model_deployment, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.EmbeddingsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: + # pylint: disable=line-too-long + """Returns information about the AI model. + + :return: ModelInfo. The ModelInfo is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ModelInfo + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "model_name": "str", # The name of the AI model. For example: ``Phi21``. + Required. + "model_provider_name": "str", # The model provider name. For example: + ``Microsoft Research``. Required. + "model_type": "str" # The type of the AI model. A Unique identifier for the + profile. Required. Known values are: "embeddings", "image_generation", + "text_generation", "image_embeddings", "audio_generation", and "chat". + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ModelInfo] = kwargs.pop("cls", None) + + _request = build_image_embeddings_get_model_info_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ModelInfo, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index bf79cc6d612e..31b7ce6329f1 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -42,6 +42,7 @@ logger = logging.getLogger(__name__) + class ClientGenerator: @staticmethod def from_endpoint( diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_vendor.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_vendor.py index 16b7610458ec..8ea240fb008b 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_vendor.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_vendor.py @@ -8,7 +8,11 @@ from abc import ABC from typing import TYPE_CHECKING -from ._configuration import ChatCompletionsClientConfiguration, EmbeddingsClientConfiguration +from ._configuration import ( + ChatCompletionsClientConfiguration, + EmbeddingsClientConfiguration, + ImageEmbeddingsClientConfiguration, +) if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports @@ -33,3 +37,12 @@ class EmbeddingsClientMixinABC(ABC): _config: EmbeddingsClientConfiguration _serialize: "Serializer" _deserialize: "Deserializer" + + +class ImageEmbeddingsClientMixinABC(ABC): + """DO NOT use this class. It is for internal typing use only.""" + + _client: "PipelineClient" + _config: ImageEmbeddingsClientConfiguration + _serialize: "Serializer" + _deserialize: "Deserializer" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py index 0335784ac1ea..e9e1b0469645 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py @@ -8,6 +8,7 @@ from ._client import ChatCompletionsClient from ._client import EmbeddingsClient +from ._client import ImageEmbeddingsClient try: from ._patch import __all__ as _patch_all @@ -19,6 +20,7 @@ __all__ = [ "ChatCompletionsClient", "EmbeddingsClient", + "ImageEmbeddingsClient", ] __all__.extend([p for p in _patch_all if p not in __all__]) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py index 75a178d76839..fad042e5fcee 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_client.py @@ -15,8 +15,16 @@ from azure.core.rest import AsyncHttpResponse, HttpRequest from .._serialization import Deserializer, Serializer -from ._configuration import ChatCompletionsClientConfiguration, EmbeddingsClientConfiguration -from ._operations import ChatCompletionsClientOperationsMixin, EmbeddingsClientOperationsMixin +from ._configuration import ( + ChatCompletionsClientConfiguration, + EmbeddingsClientConfiguration, + ImageEmbeddingsClientConfiguration, +) +from ._operations import ( + ChatCompletionsClientOperationsMixin, + EmbeddingsClientOperationsMixin, + ImageEmbeddingsClientOperationsMixin, +) if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports @@ -33,7 +41,7 @@ class ChatCompletionsClient(ChatCompletionsClientOperationsMixin): # pylint: di :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-04-01-preview". Note that overriding this default value may result in unsupported + "2024-05-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ @@ -115,7 +123,7 @@ class EmbeddingsClient(EmbeddingsClientOperationsMixin): # pylint: disable=clie :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-04-01-preview". Note that overriding this default value may result in unsupported + "2024-05-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ @@ -185,3 +193,85 @@ async def __aenter__(self) -> "EmbeddingsClient": async def __aexit__(self, *exc_details: Any) -> None: await self._client.__aexit__(*exc_details) + + +class ImageEmbeddingsClient(ImageEmbeddingsClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword + """ImageEmbeddingsClient. + + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-05-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + ) -> None: + _endpoint = "{endpoint}" + self._config = ImageEmbeddingsClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request( + self, request: HttpRequest, *, stream: bool = False, **kwargs: Any + ) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> "ImageEmbeddingsClient": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py index 912c1f7e8f3a..34483b59956d 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py @@ -31,7 +31,7 @@ class ChatCompletionsClientConfiguration: # pylint: disable=too-many-instance-a :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-04-01-preview". Note that overriding this default value may result in unsupported + "2024-05-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ @@ -39,7 +39,7 @@ class ChatCompletionsClientConfiguration: # pylint: disable=too-many-instance-a def __init__( self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any ) -> None: - api_version: str = kwargs.pop("api_version", "2024-04-01-preview") + api_version: str = kwargs.pop("api_version", "2024-05-01-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") @@ -88,7 +88,7 @@ class EmbeddingsClientConfiguration: # pylint: disable=too-many-instance-attrib :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-04-01-preview". Note that overriding this default value may result in unsupported + "2024-05-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ @@ -96,7 +96,64 @@ class EmbeddingsClientConfiguration: # pylint: disable=too-many-instance-attrib def __init__( self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any ) -> None: - api_version: str = kwargs.pop("api_version", "2024-04-01-preview") + api_version: str = kwargs.pop("api_version", "2024-05-01-preview") + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _infer_policy(self, **kwargs): + if isinstance(self.credential, AzureKeyCredential): + return policies.AzureKeyCredentialPolicy(self.credential, "Authorization", prefix="Bearer", **kwargs) + if hasattr(self.credential, "get_token"): + return policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) + raise TypeError(f"Unsupported credential: {self.credential}") + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = self._infer_policy(**kwargs) + + +class ImageEmbeddingsClientConfiguration: # pylint: disable=too-many-instance-attributes,name-too-long + """Configuration for ImageEmbeddingsClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-05-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + ) -> None: + api_version: str = kwargs.pop("api_version", "2024-05-01-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/__init__.py index d0e8dcc776a6..d3ebd561f739 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/__init__.py @@ -8,6 +8,7 @@ from ._operations import ChatCompletionsClientOperationsMixin from ._operations import EmbeddingsClientOperationsMixin +from ._operations import ImageEmbeddingsClientOperationsMixin from ._patch import __all__ as _patch_all from ._patch import * # pylint: disable=unused-wildcard-import @@ -16,6 +17,7 @@ __all__ = [ "ChatCompletionsClientOperationsMixin", "EmbeddingsClientOperationsMixin", + "ImageEmbeddingsClientOperationsMixin", ] __all__.extend([p for p in _patch_all if p not in __all__]) _patch_sdk() diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index 3fb182863a1d..27e40a349796 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -31,8 +31,10 @@ build_chat_completions_get_model_info_request, build_embeddings_create_request, build_embeddings_get_model_info_request, + build_image_embeddings_create_request, + build_image_embeddings_get_model_info_request, ) -from .._vendor import ChatCompletionsClientMixinABC, EmbeddingsClientMixinABC +from .._vendor import ChatCompletionsClientMixinABC, EmbeddingsClientMixinABC, ImageEmbeddingsClientMixinABC if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -779,9 +781,19 @@ async def create( # JSON input template you can fill out and use as your body input. body = { "input": [ - "str" # Input texts to get embeddings for, encoded as a an array of - strings. Required. + "str" # Input text to embed, encoded as a string or array of tokens. + To embed multiple inputs in a single request, pass an array of strings or + array of token arrays. Required. ], + "dimensions": 0, # Optional. Optional. The number of dimensions the + resulting output embeddings should have. Passing null causes the model to use its + default value. Returns a 422 error if the model doesn't support the value or + parameter. + "encoding_format": "str", # Optional. Optional. The number of dimensions the + resulting output embeddings should have. Passing null causes the model to use its + default value. Returns a 422 error if the model doesn't support the value or + parameter. Known values are: "base64", "binary", "float", "int8", "ubinary", and + "uint8". "extras": { "str": "str" # Optional. Extra parameters (in the form of string key-value pairs) that are not in the standard request payload. They will be @@ -789,8 +801,9 @@ async def create( service handles these extra parameters depends on the value of the ``extra-parameters`` HTTP request header. }, - "input_type": "str" # Optional. Specifies the input type to use for - embedding search. Known values are: "text", "query", and "document". + "input_type": "str" # Optional. Optional. The type of the input. Returns a + 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". } # response body for status code(s): 200 @@ -836,13 +849,17 @@ async def create( model_deployment: Optional[str] = None, content_type: str = "application/json", extras: Optional[Dict[str, str]] = None, + dimensions: Optional[int] = None, + encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long """Return the embeddings for a given text prompt. - :keyword input: Input texts to get embeddings for, encoded as a an array of strings. Required. + :keyword input: Input text to embed, encoded as a string or array of tokens. + To embed multiple inputs in a single request, pass an array + of strings or array of token arrays. Required. :paramtype input: list[str] :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. @@ -859,7 +876,20 @@ async def create( ``extra-parameters`` HTTP request header. Default value is None. :paramtype extras: dict[str, str] - :keyword input_type: Specifies the input type to use for embedding search. Known values are: + :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should + have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Default value is + None. + :paramtype dimensions: int + :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings + should have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. + :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat + :keyword input_type: Optional. The type of the input. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: "text", "query", and "document". Default value is None. :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping @@ -976,6 +1006,8 @@ async def create( input: List[str] = _Unset, model_deployment: Optional[str] = None, extras: Optional[Dict[str, str]] = None, + dimensions: Optional[int] = None, + encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any ) -> _models.EmbeddingsResult: @@ -984,7 +1016,9 @@ async def create( :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword input: Input texts to get embeddings for, encoded as a an array of strings. Required. + :keyword input: Input text to embed, encoded as a string or array of tokens. + To embed multiple inputs in a single request, pass an array + of strings or array of token arrays. Required. :paramtype input: list[str] :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. @@ -998,7 +1032,20 @@ async def create( ``extra-parameters`` HTTP request header. Default value is None. :paramtype extras: dict[str, str] - :keyword input_type: Specifies the input type to use for embedding search. Known values are: + :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should + have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Default value is + None. + :paramtype dimensions: int + :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings + should have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. + :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat + :keyword input_type: Optional. The type of the input. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: "text", "query", and "document". Default value is None. :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping @@ -1011,9 +1058,19 @@ async def create( # JSON input template you can fill out and use as your body input. body = { "input": [ - "str" # Input texts to get embeddings for, encoded as a an array of - strings. Required. + "str" # Input text to embed, encoded as a string or array of tokens. + To embed multiple inputs in a single request, pass an array of strings or + array of token arrays. Required. ], + "dimensions": 0, # Optional. Optional. The number of dimensions the + resulting output embeddings should have. Passing null causes the model to use its + default value. Returns a 422 error if the model doesn't support the value or + parameter. + "encoding_format": "str", # Optional. Optional. The number of dimensions the + resulting output embeddings should have. Passing null causes the model to use its + default value. Returns a 422 error if the model doesn't support the value or + parameter. Known values are: "base64", "binary", "float", "int8", "ubinary", and + "uint8". "extras": { "str": "str" # Optional. Extra parameters (in the form of string key-value pairs) that are not in the standard request payload. They will be @@ -1021,8 +1078,9 @@ async def create( service handles these extra parameters depends on the value of the ``extra-parameters`` HTTP request header. }, - "input_type": "str" # Optional. Specifies the input type to use for - embedding search. Known values are: "text", "query", and "document". + "input_type": "str" # Optional. Optional. The type of the input. Returns a + 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". } # response body for status code(s): 200 @@ -1076,7 +1134,13 @@ async def create( if body is _Unset: if input is _Unset: raise TypeError("missing required argument: input") - body = {"extras": extras, "input": input, "input_type": input_type} + body = { + "dimensions": dimensions, + "encoding_format": encoding_format, + "extras": extras, + "input": input, + "input_type": input_type, + } body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -1189,3 +1253,517 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + + +class ImageEmbeddingsClientOperationsMixin(ImageEmbeddingsClientMixinABC): + + @overload + async def create( + self, + body: JSON, + *, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for given images. + + :param body: Required. + :type body: JSON + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "input": [ + { + "image": "str", # The input image, in PNG format. Required. + "text": "str" # Optional. Optional. The text input to feed + into the model (like DINO, CLIP). Returns a 422 error if the model + doesn't support the value or parameter. + } + ], + "dimensions": 0, # Optional. Optional. The number of dimensions the + resulting output embeddings should have. Passing null causes the model to use its + default value. Returns a 422 error if the model doesn't support the value or + parameter. + "encoding_format": "str", # Optional. Optional. The number of dimensions the + resulting output embeddings should have. Passing null causes the model to use its + default value. Returns a 422 error if the model doesn't support the value or + parameter. Known values are: "base64", "binary", "float", "int8", "ubinary", and + "uint8". + "extras": { + "str": "str" # Optional. Extra parameters (in the form of string + key-value pairs) that are not in the standard request payload. They will be + passed to the service as-is in the root of the JSON request payload. How the + service handles these extra parameters depends on the value of the + ``extra-parameters`` HTTP request header. + }, + "input_type": "str" # Optional. Optional. The type of the input. Returns a + 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". + } + + # response body for status code(s): 200 + response == { + "data": [ + { + "embedding": [ + 0.0 # List of embeddings value for the input prompt. + These represent a measurement of the vector-based relatedness of the + provided input. Required. + ], + "index": 0, # Index of the prompt to which the EmbeddingItem + corresponds. Required. + "object": "str" # The object type of this embeddings item. + Will always be ``embedding``. Required. + } + ], + "id": "str", # Unique identifier for the embeddings result. Required. + "model": "str", # The model ID used to generate this result. Required. + "object": "str", # The object type of the embeddings result. Will always be + ``list``. Required. + "usage": { + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". + "input_tokens": 0, # Number of tokens in the request prompt. + Required. + "prompt_tokens": 0, # Number of tokens used for the prompt sent to + the AI model. Typically identical to ``input_tokens``. However, certain AI + models may add extra tokens to the input hence the number can be higher. (for + example when input_type="query"). Required. + "total_tokens": 0 # Total number of tokens transacted in this + request/response. Required. + } + } + """ + + @overload + async def create( + self, + *, + input: List[_models.EmbeddingInput], + model_deployment: Optional[str] = None, + content_type: str = "application/json", + extras: Optional[Dict[str, str]] = None, + dimensions: Optional[int] = None, + encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, + input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, + **kwargs: Any + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for given images. + + :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an + array. + The input must not exceed the max input tokens for the model. Required. + :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] + :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should + have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Default value is + None. + :paramtype dimensions: int + :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings + should have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. + :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat + :keyword input_type: Optional. The type of the input. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". Default value is None. + :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "data": [ + { + "embedding": [ + 0.0 # List of embeddings value for the input prompt. + These represent a measurement of the vector-based relatedness of the + provided input. Required. + ], + "index": 0, # Index of the prompt to which the EmbeddingItem + corresponds. Required. + "object": "str" # The object type of this embeddings item. + Will always be ``embedding``. Required. + } + ], + "id": "str", # Unique identifier for the embeddings result. Required. + "model": "str", # The model ID used to generate this result. Required. + "object": "str", # The object type of the embeddings result. Will always be + ``list``. Required. + "usage": { + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". + "input_tokens": 0, # Number of tokens in the request prompt. + Required. + "prompt_tokens": 0, # Number of tokens used for the prompt sent to + the AI model. Typically identical to ``input_tokens``. However, certain AI + models may add extra tokens to the input hence the number can be higher. (for + example when input_type="query"). Required. + "total_tokens": 0 # Total number of tokens transacted in this + request/response. Required. + } + } + """ + + @overload + async def create( + self, + body: IO[bytes], + *, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for given images. + + :param body: Required. + :type body: IO[bytes] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "data": [ + { + "embedding": [ + 0.0 # List of embeddings value for the input prompt. + These represent a measurement of the vector-based relatedness of the + provided input. Required. + ], + "index": 0, # Index of the prompt to which the EmbeddingItem + corresponds. Required. + "object": "str" # The object type of this embeddings item. + Will always be ``embedding``. Required. + } + ], + "id": "str", # Unique identifier for the embeddings result. Required. + "model": "str", # The model ID used to generate this result. Required. + "object": "str", # The object type of the embeddings result. Will always be + ``list``. Required. + "usage": { + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". + "input_tokens": 0, # Number of tokens in the request prompt. + Required. + "prompt_tokens": 0, # Number of tokens used for the prompt sent to + the AI model. Typically identical to ``input_tokens``. However, certain AI + models may add extra tokens to the input hence the number can be higher. (for + example when input_type="query"). Required. + "total_tokens": 0 # Total number of tokens transacted in this + request/response. Required. + } + } + """ + + @distributed_trace_async + async def create( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + input: List[_models.EmbeddingInput] = _Unset, + model_deployment: Optional[str] = None, + extras: Optional[Dict[str, str]] = None, + dimensions: Optional[int] = None, + encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, + input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, + **kwargs: Any + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for given images. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an + array. + The input must not exceed the max input tokens for the model. Required. + :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] + :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should + have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Default value is + None. + :paramtype dimensions: int + :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings + should have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. + :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat + :keyword input_type: Optional. The type of the input. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". Default value is None. + :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "input": [ + { + "image": "str", # The input image, in PNG format. Required. + "text": "str" # Optional. Optional. The text input to feed + into the model (like DINO, CLIP). Returns a 422 error if the model + doesn't support the value or parameter. + } + ], + "dimensions": 0, # Optional. Optional. The number of dimensions the + resulting output embeddings should have. Passing null causes the model to use its + default value. Returns a 422 error if the model doesn't support the value or + parameter. + "encoding_format": "str", # Optional. Optional. The number of dimensions the + resulting output embeddings should have. Passing null causes the model to use its + default value. Returns a 422 error if the model doesn't support the value or + parameter. Known values are: "base64", "binary", "float", "int8", "ubinary", and + "uint8". + "extras": { + "str": "str" # Optional. Extra parameters (in the form of string + key-value pairs) that are not in the standard request payload. They will be + passed to the service as-is in the root of the JSON request payload. How the + service handles these extra parameters depends on the value of the + ``extra-parameters`` HTTP request header. + }, + "input_type": "str" # Optional. Optional. The type of the input. Returns a + 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". + } + + # response body for status code(s): 200 + response == { + "data": [ + { + "embedding": [ + 0.0 # List of embeddings value for the input prompt. + These represent a measurement of the vector-based relatedness of the + provided input. Required. + ], + "index": 0, # Index of the prompt to which the EmbeddingItem + corresponds. Required. + "object": "str" # The object type of this embeddings item. + Will always be ``embedding``. Required. + } + ], + "id": "str", # Unique identifier for the embeddings result. Required. + "model": "str", # The model ID used to generate this result. Required. + "object": "str", # The object type of the embeddings result. Will always be + ``list``. Required. + "usage": { + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". + "input_tokens": 0, # Number of tokens in the request prompt. + Required. + "prompt_tokens": 0, # Number of tokens used for the prompt sent to + the AI model. Typically identical to ``input_tokens``. However, certain AI + models may add extra tokens to the input hence the number can be higher. (for + example when input_type="query"). Required. + "total_tokens": 0 # Total number of tokens transacted in this + request/response. Required. + } + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.EmbeddingsResult] = kwargs.pop("cls", None) + + if body is _Unset: + if input is _Unset: + raise TypeError("missing required argument: input") + body = { + "dimensions": dimensions, + "encoding_format": encoding_format, + "extras": extras, + "input": input, + "input_type": input_type, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_image_embeddings_create_request( + model_deployment=model_deployment, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.EmbeddingsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: + # pylint: disable=line-too-long + """Returns information about the AI model. + + :return: ModelInfo. The ModelInfo is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ModelInfo + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "model_name": "str", # The name of the AI model. For example: ``Phi21``. + Required. + "model_provider_name": "str", # The model provider name. For example: + ``Microsoft Research``. Required. + "model_type": "str" # The type of the AI model. A Unique identifier for the + profile. Required. Known values are: "embeddings", "image_generation", + "text_generation", "image_embeddings", "audio_generation", and "chat". + } + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ModelInfo] = kwargs.pop("cls", None) + + _request = build_image_embeddings_get_model_info_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ModelInfo, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_vendor.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_vendor.py index 3159e318c3b6..dd91e1ea130f 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_vendor.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_vendor.py @@ -8,7 +8,11 @@ from abc import ABC from typing import TYPE_CHECKING -from ._configuration import ChatCompletionsClientConfiguration, EmbeddingsClientConfiguration +from ._configuration import ( + ChatCompletionsClientConfiguration, + EmbeddingsClientConfiguration, + ImageEmbeddingsClientConfiguration, +) if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports @@ -33,3 +37,12 @@ class EmbeddingsClientMixinABC(ABC): _config: EmbeddingsClientConfiguration _serialize: "Serializer" _deserialize: "Deserializer" + + +class ImageEmbeddingsClientMixinABC(ABC): + """DO NOT use this class. It is for internal typing use only.""" + + _client: "AsyncPipelineClient" + _config: ImageEmbeddingsClientConfiguration + _serialize: "Serializer" + _deserialize: "Deserializer" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py index a83308edc53b..50530f45bf51 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py @@ -19,6 +19,7 @@ from ._models import ChatRequestMessage from ._models import ChatResponseMessage from ._models import CompletionsUsage +from ._models import EmbeddingInput from ._models import EmbeddingItem from ._models import EmbeddingsResult from ._models import EmbeddingsUsage @@ -34,6 +35,7 @@ from ._enums import ChatCompletionsToolSelectionPreset from ._enums import ChatRole from ._enums import CompletionsFinishReason +from ._enums import EmbeddingEncodingFormat from ._enums import EmbeddingInputType from ._enums import ModelType from ._patch import __all__ as _patch_all @@ -54,6 +56,7 @@ "ChatRequestMessage", "ChatResponseMessage", "CompletionsUsage", + "EmbeddingInput", "EmbeddingItem", "EmbeddingsResult", "EmbeddingsUsage", @@ -68,6 +71,7 @@ "ChatCompletionsToolSelectionPreset", "ChatRole", "CompletionsFinishReason", + "EmbeddingEncodingFormat", "EmbeddingInputType", "ModelType", ] diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py index c0a9ed0785d1..ffa1e646fa73 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py @@ -74,6 +74,25 @@ class CompletionsFinishReason(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Completion ended with the model calling a provided tool for output.""" +class EmbeddingEncodingFormat(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The format of the embeddings result. + Returns a 422 error if the model doesn't support the value or parameter. + """ + + BASE64 = "base64" + """Base64""" + BINARY = "binary" + """Binary""" + FLOAT = "float" + """Floating point""" + INT8 = "int8" + """Signed 8-bit integer""" + UBINARY = "ubinary" + """ubinary""" + UINT8 = "uint8" + """Unsigned 8-bit integer""" + + class EmbeddingInputType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Represents the input types used for embedding search.""" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py index 0c0b3c5769c5..42c9d74fb4c1 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py @@ -598,6 +598,43 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, **kwargs) +class EmbeddingInput(_model_base.Model): + """Represents an image with optional text. + + All required parameters must be populated in order to send to server. + + :ivar image: The input image, in PNG format. Required. + :vartype image: str + :ivar text: Optional. The text input to feed into the model (like DINO, CLIP). + Returns a 422 error if the model doesn't support the value or parameter. + :vartype text: str + """ + + image: str = rest_field() + """The input image, in PNG format. Required.""" + text: Optional[str] = rest_field() + """Optional. The text input to feed into the model (like DINO, CLIP). + Returns a 422 error if the model doesn't support the value or parameter.""" + + @overload + def __init__( + self, + *, + image: str, + text: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + class EmbeddingItem(_model_base.Model): """Representation of a single embeddings relatedness comparison. diff --git a/sdk/ai/azure-ai-inference/samples/README.md b/sdk/ai/azure-ai-inference/samples/README.md index 8f8d3c7371ed..aec54a1832db 100644 --- a/sdk/ai/azure-ai-inference/samples/README.md +++ b/sdk/ai/azure-ai-inference/samples/README.md @@ -26,6 +26,7 @@ The concepts are similar, you can easily modify any of the samples to your needs |[sample_chat_completions_with_tools.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py) | Shows how do use a tool (function) in chat completions, for an AI model that supports tools | |[sample_chat_completions_with_client_generator.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_client_generator.py) | Shows how do use the `ClientGenerator.from_endpoint()` to create the appropriate client based on the provided endpoint URL. In this example, it creates a `ChatCompletionsClient`. | |[sample_embeddings.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_embeddings.py) | One embeddings operation using a synchronous client. | +|[sample_image_embeddings.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_image_embeddings.py) | One image embeddings operation, on two input images, using a synchronous client. | |[sample_get_model_info.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py) | Get AI model information using the chat completions client. Similarly can be done with all other clients. | ## Asynchronous client samples diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample1.png b/sdk/ai/azure-ai-inference/samples/async_samples/sample1.png new file mode 100644 index 0000000000000000000000000000000000000000..ba18b500872f53981c93aede494a07b59239d9fc GIT binary patch literal 307178 zcmV)1K+V62P)Px#1ZP1_K>z@;j|==^1poj532;bRa{vGi!2kdb!2!6DYwZ94|D{PpK~#8NZ2eh} ztXX!ZiEX!e_UW7xL(a@3b7W3rW|74#vWi_KlU+p?*-eV*Y6((9EkJ-EK!6}!*ALLo z&{f|E5YRwL&`kpZs*5bn$xP-Pkr5~2jP7pp;AZA}o_D*Gg4A}x!rjfjZx3sD*Ra># z-^u^w|NKXHcgJ=)?2l!4ye)_0zT6$|c;A-W{jM~dtU-UDqk{g8I)m$9F4U8_yo zjK9!VIoXs>w?&^FuDvatUZ?bXlXAP;mfP*R>>1PD9dqL63D@uUw8ebrupDoX{4NbT zWw$$)?fwqhK{v**+n<#EEq@;mWlKBTJv49vyiQI|O0&_20YWsHLzlmM!%pZVjrVuQve~T5ZoST&8q(zse$ii}Nt?|1 zcE4pVG{g0_brwb` zNtw^LWj)Qv>6h{M2K_SW4;cH9Ikh4`%f-6f+$_s=z-L;q&H{CH7XtCVfmNsK+ zF`sU?&ou=;F>mJFQhby_{`Q^&zaQaKt92~>PN($RjH?5G_=x3qmyR;w+VQgvmg z>_F$+TV<}4UZ+=1$MT=?H{k;ejDGQ*7oe=+=N)qHh+2&87CJ7Vk@Iacu5QPB)gk5T zqzsU$E;4?Ee~wj|E8e$(zKcycZlUcRIt>q-(64dg_q6Y4%)iZ;Iz4F3XMDu`kLAV9 zrYyOyjZB|S;NclR&(F$W*hBTu8EAOB-IwKhUtV2J%Vx2SoOYn;aNL7-J!EMRIs^mY zOZ1xG8x6+ET%#*|cDsW{w}-NtZqkM-rCy;K^wogg>NWbX-9leJnPx}29n9wSnW~VgU@DkbR55Ooeh> z$o7%3`GV!;)ofFyH?y(_6B-Tf9W={$%ov7X8$(dPloc^RaFaI&_;!o_+$oE+gV;jj zX6f|?;m`DXR-V6{mE{aRa9>Saj^%}i`Z+Fb|-CiEQ-$BYxag3s!!^0QfkH}r?*6xaowk$vR<1fIsu&`P__x8P>4J7%oTWyTm< zWpLUp2lVBivc8os=;qx48>`wi zZux!`{n!3FmL_uRyq!y1TSi+)jj+VYT$b1ebm?$M=NOuu#NW~|0bkS2x-0Fu#_ zaD4!AOka>*P@&y00l+;n*jtq4fPp*VcX(9R3}UlyK|mZAog(P8LpZkr1B}^AM+9Wc zwMUea(FZV3=wE@o#R?bFAxOlPaITF=tndbulC0NfU!VYcuL=K{yzBx8i0C^4{&e67kHgSlQG zN{4Z4(7JI#8j(SzgO37tP2?^zs65lZBHV%&9l*QW)u_Q^Cfx$m2Y`MP`g?5!a^;~K z;}(W;JO*6)@CBo&6Sdg@E*eh&^+-RWejH=|?be&JMqzimJB&L9RXO9HTS1bhb*`0@ zHaw5x2`vTtJ7h2bF6V7hvT?oikeBMq9kkd$RO!>funR^l07w}hbWY%jAfqD-ucZNF zKf%LRS9Q$x7``LA5_LZ6JPQ;UFXZ{IF@&Bv8-Sf?2Z%X`4_~j#)eIb9Y^N8!a`~iF zE+3zk(P)Sx)#84D2OjHiUA?|8Yxug~p&RIJyW7C{b>O)?0$Y$R+mRm{zBu9hDjw)~ z=5q#hV6053($4RB%O9_iB7UwjWB|~`ICtnrhd=YBtvz#G!iOv5N*=1?eXP(qM&fit zD1pN(fy6rRvF$cu*aO`fyBMo>>k#Mf4nE2w>8?YcK!}#A?kXoz2Z>i%kvCJ)F3EAw#7-Ci#`+`@@aP8vAiwx^-ntdKJtg*|w+$6=3g$Hr_nI5uJxdSWmsZM-** zznH`Iin&3F*P;rD#{mbnl+jS5sm$;p{coY+9)}BMjLsQB-okHfn2y{A+N=TZ6O>MI z(k^IM#F>T;M#xnI4)2jBJsh-7&tO3er_F{Ks3U_Mh{wp5bCh!3q@B7hA$4AYv7G^doH|8-)d4fB~w%Fa&-;M5(k6IYXD#1_EbW1Q52W+EsifzcAH zZ<&Ea@pUH=`B1NX&*W9Ph#|tJFc{_go!8j5-bWiHHG+!b@UiIt$=Zft8m%LOa$w?u zfee$dEhlF9jzHMABXjIJEW)`!(=`G^A{=i4v_LJ|2T-`U#l1ZQvWo}TLXoAx;daH4 z6iyglxgJBQ2kUsDUp#b;yI$)Kq1o}>0b(AY%|UOI+qWu@*@ukEDL7}1ihm0@)sY$W znq`Fdh@D`ZcV&Xn7!eglL77V%W$WmGK%;<(9w2S#z-$03J$D62CMdibJ_sG9@GXxt zYAxp9!P9AJ`~V5PPopda+<&%m!hOC4S>Nl8-ZlaQ(6=#olhZQ=gGhXV!U;+Xu9;_U zJ%n#S)ZQj?x})9AW>;>e%W`!y#}mCpfCViTi8u4Q-LE68XBdR>pc_Lx2b`7|KS5!& z+LsqsSN9-bwxJ8Hh@@oL?i;8as&q*e&lbjPH~-cQbg4$ zI>(73df>Z0hFs@Hkah5hadUgM_GX3ofi-`zs5BSF9&GKlCicC!-DiZSZM==>(z$3GoW={OwXoSh9z z7bDQ{dw5}VyIvj2i)p1}!?A;J@@RnanaOJLyGEu(AJPuohbN9hOhC6H57k+wyU6{P zXm3G;YxJmND8_XH+UQz)&+j;~oeKIbbh8CsiZ90ljybY@AfVDgP_}}7^dVm9C~S!k zZu!}QK3()mhtueAGMS*q@=#|JL4X)$7tK7}A&YGczfMv^=ZCqsp_lTcBib^J6$h*a zSh(kzhccy8tCP`CHW}AqseXfrI)f^ZdZb{{AKlT0c#eGaq2UMv1g^9&TxLaMjP80k z>Ea{pD{}|%3SHXZ)U4JzSrSpE(k8zu4{;1*tTD{|uHx#v#BrlPs2d)&nVUAGjeeP} zy;WX93MIkz>h;n?dvVlgtG~#HG=tQ+Pg&3y135A5G49N_KY-5Y_!v3WC>v~uI7m8dTd+neB?}yjYJ;pwp9Zpx)n5s0H?u>H(P*P{0IgBRIX7E`rx#V190j z4orby5K`e~^hVP<8Ok$iMt>S$K~X~JRnrgcN(56crU8?-(4(XMFjT5KFBCvv?xQez zth5B-G%PBb?b1$lF91O12dF6kn_G;@2ID1=Ko8$Z)iK9Cim#KiLtvy;Jmd7Kkf9*7 zm~+m!a@*;yF@fE7%jle}o)*=n;90&XrR2+IC;S(ft!V?#;& zJIJS}DcDusA!U4~(i-6?ltgwKj;?gCylW_+_tyz3Du)bXwiqOYlBjsAbHfBpzscvT z*K@vGLyH{|^M zcRludP{-h1&#qz^86|VT2%}g!lcwZPbS|N&0&N70)5~8dJG|DwDi4uCjlKe{oboRY zn@$0IGD>q?L$2>a1C?QE+8kQ;08^nPDPyAo0V2OEzZiF%lHs@;89E>nvl)>)*Y$D4 z+8Ah^CJje_064^{F;YK3FCA!=yc%>a4A=LmA?7TE1Vt z;5X;7LT_!O0|@9e3VcQ;EoIEqx@kXHto#c)w%Z021Wi-9F1_fGa|fN8mE$_IIXJwg zuDg8H)d8cA*YN(uwZ^=|2S#3ku+FGXue=d2_KfX-15%d-#Q|tSA8o#CL1*8IP|-1+LLCa}I8v@~ zHq>opYlj@H=G*e}+H^BxM{aaDHJUDq_0fso2cLsJ5I~R=Jk?oculX?`Ubn?h1oEdoX~${%_Y_ojiuN7f9{EsBbM7fZ5m~E#BvP0kkVkDE?tU; z4dx$^GQAs8bTzp^v?~oPLy5uBNFbj&HCTEYWFb=}cjO+R!rdi6e<~8V75_^`kgX z8873jya1=cUo&r!4KumOsxrA=nWcu7rp$2&#mz0jlUXtY0cIH6qq8CJ%u@CN0hiQV zW^|6eH33=nmIm-roi($%GgD!I{O+sLTe}VY7)!%-holqWObi4>k&+U3yK@~x((nOZ zOPenR4{$BU&2)|5ss;)&EkY#!u8P;;osVH z3{;4Wkd4@7zeZR_%3u@`p|wq9I07Go#XSIxsegyA>*!ZpJ4cz zb`)caH+>9nYD7?0g^_XIYC!Mluw0^WrWqlOGN)&{)hPf#Tu-^(`sRCS<$R%x0ws|J z3dD{6^~6sw?#D6)Sb6|jpJ>|kOM(cgc)wf@#TzI9Mp&@X>(4qJZLJx{mPmKEF>26Y zIe#dw!M=~)1jPrTXchz+iB?fBV; zFUvKCd%4=-feSp1EGL|wpG6pTten5=hF7nzh{C5Zg?R)WnG*j~(KN*8mzQNUIgO0X z6?pE`yA)4NVOQDG8PvGLD;O;IRSE=%`{)bVNQxWpxq~q?$~SAH)8QH(oK*@VOU^AG zE^Vre9h7#A>?t_g-5Po>bD!-r?Th z3k_q|)1b2stHR=%o=Xs0jSb@#IQ>jn(^FS2%&d&F=G!&VloWULl16-DV6r`Iu3wr{)Aq)%i$#xWY9_eNAY zOwz-s$)Uq0Bm~;i))BeR;$k8oi~!&1TuD#=DSLt(!woJ-2iHP1X395cQI6>9mMC;~ zH7&2Nw&m4SdRsF3wDuMv;TEUn?ia?=Dm zs>7s=>NJeT7y{-w#2`!8z!p3RP+4<ZD7*cK4o!;$@3=9Vg?r2E!!+Upl~Y!HkZW8r#?s zZ~O2Ud$L~O%q$t(lK$lt!I+r|4gGo!#<@Q3(sjjeWy(M zWH7=P0WeC2aJ*@O{#{>}fO?=R1K%_MwGq8-;Iyo{cIUb!bJs~SWmeaP^tPoGqeS@o zc=5gVu?$c+GZYqSja6P1mq8-Vp@-n5l-@C>z0SqTblEx)+lrqWHW_)?uL2AfsZ>Ek zsS|y{+CEpoah(sOfkjVY?>^$i#3snUEox%;^$-|5*fXq)Rz}N=#`{?YnOfV38=(SuT=o)kl zDv`rdphiercQpmpgR&r%Tb!*ZX*`~mt0$cUl-OkdI04BBPDM($ji<364_ZZPvjOS$(MlB z?+v(vh@J6mH@h<5?8{=KGsw7&+Tf>S+#*ZLTBEtg;55sS{&jSL6r9Iuk?TxmfVK&KX99YpreQxz0W5 zm&s%h4cCD>ZcR~FC(0!VBQOnR%}Qak;Fa01)B-SPzmq4{3mtBkOo(xdQvjQei_2KU z5%NytEZ>(z$)=d6i(`2;+a(%q0@#C&YwrTGnJbzByMkY|>nn20+rY#WaIFcd*k)ac z(YJ+((>XkZw;(`3*G$}aM9^~D1E~9jgqq9k|C4ehWq6hWG3t27%u}Mjc(WV6h4>Z&c)1$ z23MIB;|#Di7^d|cBZM<&3f^(i0Mf@l4J2>Z@6`zF3{vZWVbHmDJ-8ZQ3&oHJ<(oEB zXzl#?=bX&+cyH*#*s?6hJ<{Lwd@{w#2-uO}F1AMc@#gvz&@vx??%=NuwzVVL0l!y{ z_ytU2?9jwMlMRzT>5pif`v+i9A3Rcjo!dUPLe7{Bq zY{T(G&SMj#hm&AF$zU;3^*}o#uB^-w+!t;t$8s^lY*PyL6vN^}l-TWx)AdK(Z_7&l9m0b>JQ zjX;tTgm#j#)pqPXXk`kng2oO{RfRa<Oc<&!M)Y!!AyRae>h7n^B_waL7k1+^agOZSktvO?m`ZfA^Dnm z6`UZeVCMk!4fEU}AE}my&Kg$7?yH3nwy16hpUhM^_N|5#pa>!SzgZ~tz(1g?G!_{9 zeVj$WAW<9tBJWJ0*FzSND^sWjP>C3kYty5q4v#v{kjVAG?3PhaP0!PZsa*>PI|Cd* zbXb1RaqOlG3!QLAAmqhqyF9v_l)>bTd-l*(oivi&;0zj}Fy_!!{IF7+JtuImkM5XT zO@-r>?3(4plsT+7(8db%ahdc0Qg|7;q#G%_zoUQo5>Pb7ZMF~jK~|)-m?U6pfGiMO z;P6;iu@ju3fJ-x{r=v1C8xR2^E67W=v}Tt2$eZ&9%mII+Wu?nC;aW!m&jDriYc(fw zpKfr%bO3jeRhKiAzlUWs|EXqs{%6(r25fL=kHBw+OF8s{$OW>4kit)OU#!DH&X}e_^E+J0ymsE4R)6RXn-SV-9;Bn%(}d~U_?w`%3q8=pWDCo!+GqH)t#wzot7g3 zDfH4Y#OTp(Gq|sA3xM)lUDClzL7X%uXwd;v4#kZGf{vTd%(S>;XGlQfG!Fo_;jS4H z@jCS-X@b->_j(}qT>{o!MUT~sxXcI3OrBCFcKmitwmQiL&ga!`9 z*%$)CdenOH(p)fTW}V!$6CjSFS@4xL93mpCthp+RAeeLjO#$BdNQ2ZGKqrNRq0~$P z@1(I2A`Q9Ui}fuY0(k5ZIsyx8A7HIrqB=f z47rh)m>@uEh(1i9Si5j>ahBpoojrMXyKj^$oYrf%u5rC}K4;9!VomFE z0>aEW&ItWT7ez|88rjPai%X?WQlA(TqwD4JNR)J_k&^Nb|Ia4fa&d`n;Jg~~*ZLfs zH{`#AT-@Pctygo#yrzBl8D*8H@K7U#>@U}MWj@C#oGv9`b#QT@?@rJ^E>8+x&=zQ* zt;X23^8FU92quSEd|nM2pE7`pveLpz<26rq8OC|Sl~A(?GU{JNgb zEe_ThsME-o1#^CvqBCU5@tIBB5*WBlsUdTRl))dPuDAwfrYfCD+rLe!)miF5?vlmj zLjzj7Rg)c`#!}g5)HsVZ{#9>eLINblc|f)fcUy+c7|keRk8W`QT*}pIS`?Q8x(>93 zC*G~YB%o8DU`n1F*gzwtpLSSs19I z?Gbsm*(y}bT<*?3* zS!DqZ(uI+@f~_NFA)5w3nf0Gs_U+oD%O!fwNy#y93nGhRAJL1E6q^ zDdskS=I@G~`*+ha1BCnht62a+!j!z}Wr6ZqN5G5%G8depeKSBBEQTGUKC(bj#pHwK zYMD$)QfplA*d>@UuVd%7AER6gw%j{xe*sL|^6Uh`BwM=pu8lsZFB zv!1w&7Tum@2F1fUbI^Ehgn{LbX0}H)8!V|w}%+seotNzDMgm;A9`@TJTg5lAUh5Vx%L=NQ@hsvh&OSf@|!v>Z|#%z z)iuLNx^%y~;CP~*Bfs)SNAP;KE3X-UKpVLj!$-3;>SPanbUr$K=?QsqEG|=;>hHOv zMZlmy$Pa^*;FEHu(LkQ|IBJG;2i15=^)##V~G(HA2OCI%SP%(oAOZ>Sj4Ig&8`vw zNS;V{=&wQr;iUaai=iFUJ z>VUT!{d-sbJ8%y)F)($CMtX|2NXjvxppO&u`9Asq}*EYLHEGK=K^{X7}d)JCbNIw}>z{ad;4 zycjbG2D!1Xw5x5}?acgW|Ck?RmmjgqOfU9~U%NOAI%M)Tb`n`hz~y4R5l5(^1WIWK zBw_hXLQ0IPT@s%LVFKng;80Tz5X0%xXkAa0;(9UwC_*|gB4A*qVvSPtFd!xEIA|{A zTA8b1${HvJ(jH`!3TT0clSzsQgJqRw4~Dm!5LCmc!XP+$G8NWQb`{C70Y*tTIc}vM z0oKdCGg`+e>22Q~G*%E8;<~0MK$xC3Q;~uzlu$fDpUMhv+;o$UOp2HIOwdGDG8aaZ zEQZyXZk_0KG*asn^$6$<#=Lipm$`(j%;PS5RRME^QQGO0xGrzeTnYqv>q&axO;TLW zuxTZCjf`Zlv~eAaQ8S0z6awkh8o?@rb|!FWeD#<;yk{{tH9td;RtOm!UqMDWanUHWQt9?(6#*6 z7<;eIn3;w?ZCa=%P`kt`lhRf#>d%^K`HC^4sK)I!bW&thc35_+Xf|atV z1FnoUn+^2up`O0U+gPJW> zkqhXL6C}oHQ$gU2)2P&a+=A@^Fgww~7GMEX2_P0YUgaA0#qm{F+`>0GWBvg59vpL< z<;~5aJo{i>KKM|_(`A%K8342c=%%q#{yT1I()darbaBsh>TtG^f+ylgEpC<6Vw1z9 ze`sgjO%Ai-bL;3Hot>1&k0u!8F$6c|%g9|KGpK+baWL1_u*$#Snf&49!7oSV=s5-& zM-L_uI=z29);T~P=qG54%o%KW@RaNCI^-IAsUx4WZmW^vXDa8x-fXWz2M;(ewj0^z z`#4TauGz6xz+l7qGalL&TdcN+Lf|ev2a~GI81-6AXD08&gRy8{n=jU#AlGIZWvmXI z7;hHSaYn~!HwC|0_RyK8hd4nR>ne}VMrR{I6LMsqW_!&LxwASRnw&6b46A{n+e8yog7eCx4x0L?X6sPeXmui_mvpNE zZOBJoDW2Vlua1EJqf^zGhY#@Ej6lvYaL&wEhsHozy|fK^0!QKfZCNZWW?j^bD0rg0 zuU4~QvCgDoqkcP9Wyma3UgE8!_aDxla;JW%Pekw%#SzN_CdL0Hlo5l5RUq>PYoaXv zmh=z5x^LIdBx?4*#+zia5GPwh_}sJsY+}%+dyyiDU@LpqBO^F4QYQmKD;zPz7)cdM zLi43V3b1&i#yW~hgy7!GY^OsJ6axQwkHBa2Zh=dx_+hX_k(jRgdB{XkJM=9hjVwH{ zC53cRVAIDK_md7f)5inU<7{S&oLj`qC`76B5{>YsKkX~Dt_Ny#72Ga>(T7kIyxemD z0CA6DulrRM35*4_wlFS-i$HTvCTqDffVL8i(N@;Ud559W0J=6CrcDKTK8SMom1$x= zgWh#r9!49@Ay}r22IEtdgXj!Ev%qaRTSQUZh9=#YsmwN7rUjiK=#_1J_gjA~T+s-n zn1iTeF`boL3{L|u*YzLiUQmK~%Yy1qrF3f!bT_@hOGjXh;ht@v9^zms&%;zsho>lU zRYV;VjjV-f9uSi{39i$Fb}Y40-P5>EvMvv$Xc9 z$ooF^0hg!ka`B{JI)edipk=&OS8EO2&bD$X2br=~#lp=~jJu$wk;H69E2XgxHH2Z# z&Jo=+ae7$3AaV!wRx>HC)0+m}X*DxmaDm^m>uG<C*U?%ByYQYFb#Y+8qKaF4XSfd*lEjDy?o6Q zBOkep>J~F~((kRglEU&%1&QOKPVr%a#PC3;)~L5S7s|49P?m3Ry;fQ9BWSE4XFPWV zFOu>AmdX+NVf|Ht6O6Ai zZc?)$&Su`|ymRCF7P2Isq!5pJIqx`F&`bKel&cZm(}#68V(NS`OR!+uwW!Z~5>bG^ zgE($FS2^>9mvgbECxv@pYCWGoxwd#&EVBVR58)&*gVgilzId>9;1mQoJuSgcrkYN6 z|Equ__?ni}A@Gm|BPI(&1RIrhV*wRWO`SMx@%uLDnd%|-17DzFm8g~RjBVRq5YP3?&b3{=ggxLn?xijJdpiPCi zJxnj%RImjLD~PrvLY;4{%7xj0c8og)U*gu6UYp9H5Z*FIgarfQDPj{8-=kQY7KU&i zU7}bRdT8SspZk?HT&6qq$Wdo7Kmh^{A{0;pAT%N{VSxTICz+m0_^&!-dfLuY?@`$Kxpt590WICeIOyxB)v>$1!((dQ zBDQgRS&+jH;SuzZXYPB>x-v3gRJ&S0SHNFEcHM9~UBt-{V5QmVz-Ss{hv2&|tfA0I znp)p%b;i(EM#&d>oqdzg%loZt}^;WM2yC=ISygyE>O3{v=Mzee*! z*~snbXjmRkMlnuiI&M6zk9)EuhWnEeQEV_*^fn<%2u}K^Z^Y1ZC9%@yIGeR`no|zd&u17$YWnI z97Iscm1B_JM#nAzxXa>hj}a4K5ty?9eVhK4Rzzb)QPM0~gvce%2nMJv_&H`8g$0Za zIX0!r#HFpDYZ!#tL~=$`e5SF}!1ml{j$QW=LpluJUQg&-)Jp;vYcEDm<-A$zNac7Z6(1>qe+&h1H+AA05gEAZs()O)J z5czRiu9)SLodvQwCDK{rR+lK4EZt;T=-35!7z0Ms`Dzpp&tO911jiU%P@lwTBh|YD zhKu2;>ogI4&Vgh)lK#WUcyb-=&n_AxW(y!|hrDSE)I0>n!JMw|wJ z8G4OU8Jpx2o8+-PG-{7rA#WalwgVfp`v_-3r`Wb^V6|Ank5wEF+l<4eAT$hwptHdQ z{EeQ_j~Je!wm9_$bK<-d6$`;2Xc*apEi@Lq^HrB0l=Wz3K2u=b)|~^*Fo`g-F<`uX90|IqbcAeiGU2IxIcG+d@r(8KZA4xjv1U%Tlg4sJo-+mmFbJ3F za*d8@mcqS)zAf7Hpf0bm^|}||VSBFwfhOXs%dp63cAa4aEWWc*jk+9AR$U_4xjX{x z#GUF?#W7UbWvBzI>3YF4go612ZVfM;SsY>ElhL^oNx=?GP|>A~UXI{vq+*Hx&W>-n zh5;uvEEsW(1gH@ip00g4kwi-VE;AL^3XL*AuyCzMFRY94Omyy;)0F$y5eCNI;=3+U zhMrCeH+1c!ZWzUcQ4Wc0pCEb!-H5do#&L~v0bJbIZUK$a;~J24o72ieZ^3~si{48WVx&r&et!ahnho(wZS=Mx3u|L#*&P>me6$fH7TG_B)ggf9!G z^C{rGf%bGB89KxO8#Qkcd}XZLyHKj!=W6i$T@QXnpo(lBdJ2W z$8@eEV-^8|#Yw!6AWgLczi|#@1L#8sLFc400a;Q!6&m=_hnJ_!^I~GEdzk*_I6g*W zR{mP7SYO_Y!_1g0Mr3x#Gkk(qnFrq)rMcZR^(Fy86yH0YgY=SL&|c+t8BAr@VY1K2 zKdK%*RbIRDSKwJ^dFEzL6oIi6Om&3tMc^f%=vEBqti*e0hq~il`dcf*jZlbkJln!F zzq~T4G)rgQh=wwSb(x>8dv!t_mu+YqjyQs9z2g(`vcm>Bj3chz2K1E$YekM_ppynz z3z)aJEokWhNlTn@vjFngwa_keK)#r(lWwXL5=uJquL*4wpPWO0>?A!89vb;2OQZ3^ zKxV5T?XK3=4V{$pQ}}q^CgQ^|AWH{$=$5TSW9W}SeZc8B%0Jo@4zXGJ)u3RXkSKwOsL~o;Ip8Sx?QQ!N|V)7pIUpX>iRjwP`22XOunK zRbTEcbmW68xn5bR&oJ2Dg4-BBPZQN>G5gA2*X7|+iiNcY;C-5MLMj^|Z zMT@9&euDuGo#+G_8E2;o*YjU^>HM=)2d_Rt+WX}IIE0zcL1I2O>$9W^NyhelZr&;cO; zkgi!$mMBpe@ut!T1<&U$6?lJ~8pjn}Qf_sGYbHqNOd2*41sfXj^5Po$KNwd&XV7{? z5-Hn-7-$4aqQ=Xr-j~H&eI}x3mx^P15K#E%TD^d7#jPN!0t$b=Eo?|C6b2>zh#n3DoX`)*F*xiI^dnj;q3w zBRd1m8W#kX;ckg2Rzy}yl++4g$E+t6PgmLlmPeLu5mcVwhRVo&29rj4f{TSeqEjAL18>zizNKHY9!3p^ z=z^8%arn?BbxS?)FEcPMM|dDojy*-z1x_6Tmyk?rTLkA^>~Din(vkJNcAe`X#?B~p zIPOCeZ~#6_*A#XUDVVib+<0CB17~bzY&1xwdR@2FKg6hEo-$P081_%ly4K~_S$ z_HL0%J1%#1H}-#yUM-I|U$;IDy@7RpT*8g+5>IxT6X zY*uHcfgU+F`_cAidoANX0mIBJTJV?biy1%UX*OHsp-1xA49w-(B*k?egc3ld?;Npe z1ZGwIX3o48v}OA`;$~|Y)4c%|PdqGY;o(_efJLI}*xBj0OirOU4vb4w(RIg(K)Fnn zQ#C_x4T|3xIf8Q<7iMiXQ>R`T2^o=nmrhO7>u4Ipq>#>u@ShpU^$Hnf+%AjJRCsCM z8f)bSpB`*s4q}ig|I9~2OB3_Ak*W%#lxdfIrNbq z9`02mVbBAtR_{+w-MVc7?1P~;3P$eu{lv)GF(bRy&uBC2vC;%jp$W1;|6+oHgS#aZ z7VW7)N3SztF6yutSDo-IyD)xFDHM}jnq_3@I=qE0aT?5)Y2TQjf6Mn+H2AXzAL7U$ zqs~owxa8n>0!l16G-lia8XqQ|z;Ab^uw}CJfijYmAF3f} zX0pbn4I<769AU2gFs>5;4k9YhiPrGKos@=IL#|Rr7-G~FDl4?zQGl^R5V5;uf$~{2 zl?=m4jaW5kDc*1nh>?fKxGtQ76TrbfLUIal`lKR;&V0BJ&`wbp*O~Ix?i|q^pRAN3 z4{3=5kQAOE%#9vS6M&RijvG4VOlL-+Va-vz2xf0+F&JJobkP$%Y3faC$kWWBhegW>A5P5B$8GG_G4RC3O1 zz>+T)I7b>zohx|fAuo{Ib(^JR2d!)#__0z~G283SX3Mjj@@eEGLcWA2~s27&B2h0Bik@PlelizzFoO zC#xxM7Q&jkwb0Ydmd@G`;i%cP75bkoWniBtp}CY$gC}%e!w9`01oOv%LHDeAFtV0L z6}-Vtb&e@{fR#=#b`ZVwfilpZ@k3qasoYe&c3H^aLY`Y7CN_%wI=$kfj=QpM=GEm3 zk5C@;`xrTQE8vjRkHw+tl358%XvUOrp_9r^17{&MMetU>JKj6!tKQ@&ZuBY3T~Y>m z`8MKCVHmQY)4&~Ik429-$MmD2R!izKl=G)u`h%FzNlfGiUw9~4s_)T>#)*d6DBb|n zLTn!dMH^m7``+YP5|dGVCV>Ue+Cwu5Iv*hk*dX7;jaCcii!rqrQx_WebV1Kw(So`& zC1*RNo(KHc;OHS2VkD;Xo(>MO(!XMfZ-ZfM4194bXT^Sk<-WUFl=EsE4O2uscY7ot zWRB{0cnZIS5cSX{3xC&^+jm1_l_(qqlF&IPo6cpnhby=)k@Yk`V_+(Q5mrs#-=c6v z?hq1M2k;=U-kC~fv=b%dZ+gJcRVTvFqVPtCaWM4mO*be68dImkFd(kPw5>3S$=66v z26GK>BN7iRP+;QaaKCTcw9w!VrOS+s+WAT+V*~N?7(=Fzlo{_WO6PX1Ex_W|trgd} z6{mX0+zW9nd{R|RwV8I(aQI+}0S0rRk?>74?Yd^N7%7}X$hk|y?g2moS+)w&z6$Jo z1jUD?4(Y1_VyHMAa_t@%p;=uLQAv&D1YTP+<>u$K#dSj>f6x8)F|6`pyK;Z~ZJFK7 zGiCu&5L{w_>ruS&R2o|})6EHk9vYI=J}h@UQzCaO;N1@9;VecKjv;H;2v-XA=u>Z7 z&(xH*Pk~$S7UgEXAWAURu8h?qVu!A|)3}HHsBEr7d5WD8@&dm2Ru1ZZQloCh3_a${ znrTpoHJn+35PX=Ik&tZ-b+)oLjc`LpkC2^=^o;M|mojO@;jybk(4nvE+n!RVoY!o^ zKSUbx-oN2Fqm7|R76%#GVZ7!WB3X=_5yOM`Y{ZoHLjXqb_56eLA^pce!5Ls~I!qct z9}8IzNkT_-AQ(;s2_IZz)h|vQe1^@8L;gA@bbxs$f&y>^Mk4|Qj`opdokeBZ%#)E0 z&YO;|@)JV>-2g*CyuU{y_omm#NT+SO+?LmKmroY7aVXeL#pnojI&QAnZ{X*4 zMRdwN)x$Mw&%NTv==xC$pj5lLqUdp=qqYUJfLdGZ$wGCzh zonwQ!Ns&Bd4O&Swe&aigI}O&XYIPhvE?WaHE-^gDsLinMMk6f^Doq`OMTnsnx>T2r z>4&cx*rK{s8~tM(ew8#jFC>zCYcy}R+XJw|PxTggkCR9vI_~nxnuMg~k>~yGiuw6{ z#UY(QWn5Z&7?uacnYF2p!L7AM)&ppp5svUlUC;AS)OqF%g`u5#Rk5LFfc+_^K+7nJ zPdAdtbY9bF6+nD8u&EgVNCMW#RZte_0Z+Y74KxGL^YUD8y|)ZZWkyjXn2N7)m*`0k z@nNC~8SL+IC=^1ei?HETc+y)xYtOWWpt)Y`Si)Q`V?gn|dw7*@oiMegUZf&gU=Va+ zc%md67Yi$VX0KaOHY<#fOLY)N0K7%9P0M>8fJbur)GUU}91#v=QGlUo+NgckJ+24{ z-m?3cdk7@*lLI|0SdtN8HVn4dq)(KjLL~aa0q9SB9`|V!{FX7Bp7rpN>uc8nJ7w4(X1-QRTil|Is9f^O z!%Zf`X3~Upo%m zX&OHj;RFnjowi5Fp5SZ{J!AO%T>!B{w~telv!(@i=q%9GsK!|ed}axJ ztT3R)KXqVj)Xrpe`UD{(Q~xQ#OqKazD2WO#hBzriJ+6=GSW0i5qo~0F?m}li2rUIW zBPxN(S`L9YOJR&c!{@fJL{ETh&N6Ux2j)&2f(A2UC>kkc$SQ8exQ4#a*mlf>xnyHj z#0;26TKb%UIZmL4Ov5@tt}jmkKBGXeAiD;jah}?TZs^ymPri(zJteIcbJ4#_gB(kv zD2j3^P=k$elAS8|)}R7FG*E!Hm2wp`Jn_qA0C{RwL^+loE)`79hGjIFz>6_(0k9)) zOfM;PiyfIFyf3BC{4APkfg4TN>}f!UDN!uO*g{*V&D?a`9F#a<GT`j>1 z95{7rf@7-VCuktp+dRf;fgef?+u?T|*WF$A9-af=Q;04Zkza;F9%6&{_vUjP*ZgoTVa} zM!Sma!a_=njx>L8*bQ7#7l96HB>Nbe!Dtwr(@-~>1aun87+h&4CV{PVl@vbov=0xl zGV5UgBJVBM^ht9b=UTUR!3lKPV_jqHkPTHpXHEkidNF>_O>v8Eih~)ya%Vk6oI*Q| zaj!p3S1U7dkoimpAZd7P)EHdwsg+D4x|Yg#NK|XO%t-1;zz_Nr`^07Er)OL@RM#R0^WM!4#Ol#GY=r$ed z4O$dpWTf|#RF(!NZ#}Giq9>oFtj?xrgN97W=*>7dBd2ttp^A{{g{V@{QQe0?dO`w9 zPQ&vO2aHqEL!k81heVQHy>;ZFH)zqe;}9HmGS;+H6_>$eTz7dKAwo<$GXLnhJ76hs z^b}3~x(&fSzE-$`xKS3t*vNf@a#~oo;mv3w_;ugBqQc*fF)2Ow^*3-hPRhXa6|@)) zJPoajpdcK&ds3Gh1WD7&o}48`lJ#J|Ueirl9gxnNHGr*9!E`-G#ZzHY9!<;F-yM@g zRB&`;s-ai7RhU74%orx=H(P(!`?8vCFzz~CcUjMu{idY_7N!euxLt>PF@UBX$2cMa zrxD=mt9g0(;tB&%k0y>VL2hLP(l9XJDxm3l*W#q32H3VO9x}>MX z5yE2@!zXg72p06j>qmKA^EUcPu_g`Q%;y+XkUU0!|8uWv{nBKym}Lu|%BmqZig9=X z-$>)uxlGyMqI5{J+HsUxXjG9{>S_vMmV&UMGp z5sWN~t5Lni&|5JGUg+NvM;SsNRXEe*^^vf*+>b_T@Mu=&z=Mf2?lYXY>9Q7X^>C;! z&Y*`5j}DX$HahKD0_u+^vROzeu$v(=O=*@dvOs^eh)e^b@pi6)rcO#UKl7|njWpm- zJ~De)f8i&9s$7Te7@N(RC?*CU0=^}2-cBHpT|AKP+BY}9dy!+ zYP>H_H)FRuUnvwuN-_{=SHBSBsxe$ao0~<&k0HD?<8cxvu%@Jq77d1~lLEMcAsQRk zsZ&9&d@^4hmsk_-bbH**{0O~0iLE-<*&;e=VNAgUoa~WBiu9vV701jXsDsLxwF3#l z_|6Bk)U1RNCVcASXoEu*zm)KQO+wsPOOY>^##rNd~tSv*tre3(jgK)?#+Sc?cQXuQ;W3mHAKSf^3v zlrKV4h#DtRfQ^h&ohDhD;A zR2?O2i@IQg%Nojpw9K}C3neL}U90|9O`Y;)5FTgx_AYO@+W}<7> z$}y_vW+;4q2+_OxABPOSF+b&-DTPM5G-g2zys|XQIK9t#=fOvGZMIJuDcg?I!d)Fh zd6zS5xG30#j%nL;C>yT#>7{s9xTowGr3iQmrohFR7^DMr`vu^Pm)1jpJZ90f4NWm; z4~f9Q>3Nt^V32}UOu7t%uqv>VN-B(Zeuo7T8*5A4;)hP+}X7utf;!#Fy!vBEYWuB8NYN5Q zJ>sMonW-Eec%yUG#VBM8qT@hl+9-WMgTLVzpF?vqAz-i?^7M^XrXz(99!S0TJ4ghVC@jp&v*vZK@x9LED1u82LCi z&4y!|bKxVG=odQT95k+8%SgD_G%1EcNMfv3qIa1;okX^CXVYbo+k1J^XxgM8Kr^;; zw}mENseB0*e59cyqVag!gQ?E(S?OIoDTB#5MgU1gM~u{_@LC7o`#s67m-F2Fr9XC$ zIpdI;K1q$vHbn3`3>w^y%PiyvMiQzE_w${*JG2$bR%oFN zAd7Ic@&=kE8-yIhVY`@N=z zLMW7VcOxK>$gz&nINu#n2>Xy0(IW&#tkZFr&SxcOd@x@PeyiEfxOKji!!}N?YxFv! z+D6BzQ*DM?d^SU1yKqMJNG2R6Yd2_&o(drGDuG2e?42NJeK4_V5tB*`5{OshpH}w(0-;UE$H;MS-luRe;^vG8|t3ko8zh`|{-;Mo%xx_7Rvb zbpiqb0t_}xNP$wk|LTn{i4fOovlK>Xdcp{yMqOb?5LW<1J)KHprr0n-+SR#92mI}X zHH1Tqdk^p!_Ar|LyQH*FCyc2F?*sur%IfwyMieq0KXXTb6egpRaM_mW->P}R@1f{K z1=UzvEZE9hA%z!$J&FV)xEXty2%c8;AY+YurecGDN1AKu_8o@15 z089;-3)J#qh=+W6DLBAWuG14VDth(mI<#!z;R;fzfWE>r1NPbX zEe!=WB3Yd_Pf;`K?7Kvwqe0saR=!OzuAWbx^)Z}}>zifv-b*8+2cz*W#?1A%a@Q@( zMXi8NB*1(iH%+M*E(0NdrrqF|%Tg)Y1lX)qQLcSU&8(j>#xX|E$LJ0`gvFFAFLfm8 zK)%bmod%EVy)*YSR|_$98m%W0lxh|MpyPh`x_eq%t*Ev=qX|JC9XQd+)+tfPob!YJ z-j(YOe8H%H>0kbS`NqHclk(&X-z;~NH*s~wymXW-a#GHd zP0RX@<`-HcFXEENA}5WC(Q)gSP9GOy+?65Z2O4R_2fZP4Xq5?c)wqE97#ss?Ka8Q9PLO$iq%d zY1^m4hDC>FcXaY04K$5BF~8t1V-O2-%r*|y+L|2kPE3IS%2Fa&+LD&Ow-nE(11sV6>?&IRSCoEL(OwsMm3BF{Bzv^#?v1plDEajy&dB znKfv1dDx=vS{o*8A+PdAr^&v!#|&1Yb>^93O*3772VGNm2*dK+50|s}EJe|Kct+ov zcZ=>UoYQtmM;#!uVMf1Z6{>y0p}SM&H1O6LSU;nor*GP0=Fn=$n8ho%7FYda7__KE zW|jcj@_kMx72`4PLm%A>4-E!`?eytZ$=TeehR(sOXJxjyL1r}Gv=rw_>;qE?0^QO` z2aKok!!snZmcKqEqmZ*@;H766G{5!r&;7eZDF}}f<{p%Qn-r9wI|>9SX(&ANS}BCq z`Kh-Wuail6Sc2zQ!vVgLU|b|47?=}<$|1DgNCkxOJ-{zB-R`9_S>fxts0J;})o`HD zIbcTSF-B%v+b%0XQPt z4H(t=2{uN?o=#_qxsP!-JwNKXg=mB$V%9?sP$2?{02#-xXXX7KGNI7ug{_EKRk(mK zbBMygCIroWr|lJU|<^i&zLr+2HceK3?nwZUdPZ!aJe?~ zg?>Elj9V24*DhrCYN$=?l2uOKGG~>;_d}M&(WDAk$+0X}?sT|6w z&W))}1vrid3S8HT1)fq)ll1|O0b>o>e6hx%AabY8E|bWP40Ub)UMP*`vn8mS-L zq!Aa82Ld&QQ#nuOh-;HALznZ&UqCiS30+j@YWmyzt0Nu5C`v(N>HG}i-G2L1<=g+W z|EB!Tzxw0yxzB#BoIQE7%!oX$UcM|hyq%8)WUfVDoDH%?D~^c~GX&kaO=^oVoo&j7 zNZ)=0dyA|BYEq4^m5YchGHWnU&uWlYjOm`P``cx4C&iBWJ7=R~eL9^VL6N4oM&pM* zf+6aF{Bcw)B22NlsL4%UqEyu^w1_+E9#EY$>g)H$i?dAg+Aq$>GE9whCnp!1&* zW%gzxa{+SZs>~}>OCtBHnb{<3iFAM`>q?UJuq63p)vz)(hzG@*i#SR$s(hAIQUu8LtMkuHT2p>KnMHprG+t$*%|sMHp@?) z5;05XwbREb;(H5+lN~k-c4Ck!FR&(zSqyXh7#-U8GcnbEz%#ES3bz2f%9(hCvdTN} zqkplCKO<+ozRRCt3bxkeZ|LQm#SYKz&jB^)Z;CH>7KV%e$`u%wx+;0b43T3U{fHs` zcn*i>Dfj>+mrIDE3Lg2Iux1bi|Y>kk6^4J=JnC{Mzq88~%pnQf@ zF%CU1!HVe4w8m0gyc{D9ii^XwS|K+WB z7=@u9Q%5Uk@44mf5O5Cet}r7bjT|*(Een9T$+hmnh|_g1;E8aNgFFlbIdtt_$3eMJ zz&(J&>i8aGgN>oBWAO;u?5aQ@^vrz>sO!e2^<2XifOT2~c7i($aI*|2im|jlTRWTY3=U03)KJ(H&^qF_0dD(jOu>%;n)pSc0bq zy7&N}I7IT3iKtJK%X^1_y^S%7fQ7Vh8yr(-E^2^|j1Ht*v=|0aXP&(1ujq{*YQ)xn z;nSN{IeGl?@+beh|6}>JfBHM6({!s=DWCl4r^>H>>tB}hPk+ALAhYgyH>)-ok1(R0 z?4Ypr#)3-=J&j_0qM%zWbs%&&Je%JwPvA3ru0|#fNd_e#qN8Rg_$&@depj!MUz`PT zMfye8b1#2Gi_K;kr(8$XVpG$z)-R;k5QDt84Rm}7kUhWJ#&OdDZX=&QjmwOgK&pX? zQO}sfGw36~i17HBVVVg6jFC5uazGm@T3=zhe@^u7bJ-bs8I0RFjF>bW#-z@{8QZEs z2lxcW=m7buj=u84b&OS+%kj+e9!Dt$2q8oAsH+iT3}*erPccCpG7FiA7+OjTtE<&* zMvrJfPsSeb<9iLW#g@`O^#uou6E)(l*{@RAmFeZ4fCx?FMf(|Qiq@PpF-=% zKMgVuhvg}F9@p#n5P4K_7g-lb>yXjwEqs}W4$7PQ?l_ep`&Cx*P#YoJHqjq#h-=|4 zP5Q7i$D&N^#dfX`8-zBoPMkFOa|tch%M20pwKy-C6`#=ok7_l(_P~suL8AeRbvTv2 zVpx`jX~S{(e2p5NyIcVsldgx3*b;HwzC4@5;^YqCGzL3-$|qm&a*XcKYM0^IgPNc{ z<5RDa4M?Dc1AtP+@xei2;E@Hh8Om}!YMyaflJCGn8U`--_?GxqOAhcnf>jVw?~ zOwQiyq}>2xj||1w@&UAwxHK$}G=Sy=xtun@*#Co=-6 zQ1x?#UK#QqeQ)oRvN^{lxz>)`-_)!V5!4tNvU z(dT@z#Hho6jPweSvsznawFqg5%$yPB_2|QS>S=ngi~Q0c_%M+u9OLwOT9weZUcMl) zS}r0N3cM#zjZLX{TnFE!fJboj$x`x0;Ia6`YmGcnb;e>ED#(ClCS@4$I;IsT-{50fGpB{Ba)NyLX{-{RS z?p1Z!!M2shSFc_rJxX`HX%{(Hm)yoTM#1H+z~g>cogL@q+B?%nKKLKJg7nbMbx&wY z-zsx8?tZSvUdnWREb~Uc{MsM>N$Gv$=gV3J`u}=;^o@^~pZmhs%j6@UEY0ztoL`K~ zlSjRBe(96s2J}0~80S9S44y4->aoHa7oXgg?T56f6RacE#UTj2R7LbH;Dwy%^h6(A zpJ$#vPS`bNjhN1ZvTu}S1Qf>_+RHa3NFy_#FNl2(BpI5IF+$)!- z=lrahteistHYi{9`QOY8VK{;<9$@3y5JqG+Ig)GW9f!XX@w+F8J z6Vh45Ej}1ID)-j0EYayLvVKBaI`Yo$odS)H!H2a_I@oT7yxrnJiU;)N_(T9t z=5*XJ@+&w1O2;Q?B z&>!SvPsh%|t(?x+Y=J3ljhzQyEx5aY{0dBf6x#b6s?x zuBWT_bz7?xQD(S4cnp2GPUqi8PHSgOubUA`jg3YUU2CUHRi)V;jAWGJm2D)@}aap`I02*!*>qiU4ucw9fODNrLWh4$c< z8@xVbFDXF}Byqi`Mot=b3E+{A0}SkeB;+j@6kufyYD|k zy8rQIa(Kb}i(mdmdHk`TD~~_=c6t29xLjOJV))F+nN_*IdR>;&>#~_oq315L+U>(D zj9m)Gn3rSMsq&nEPY6pfp9Vf}#^>5JG}8In2G#5G?HZ6Z^FP>}OVafI>ejCh=> z1+7MY(o+7oq-Anzk!QB=B2O2mgK}}+O+7-kLJB#wrkumo3Z$A{FtSIGq@0R>)+*#+ zA>RB~W;eI+->sq+C*o`}&@tEh?064xQMZkZb)}(I5gIrJ&hEruf^w-Bz2dB7bQMvUJDTqO18-BU01+W3YNMIR&f56azn<4|t-k z@K4>$864U+es}H`%~|&lzH%L-W1P;v@~391pr>a#z(Z}>Lv}82WLwA1=eB2gTSt)b zq~-=W@g>j1WV1L1O-&D~Vvcd<(WL>h-foW)dCNV6NB=2vIX?qhg%i-uY)GQ@Eq&|E zIaUe5t$XH`?SjlrD)Wb_N(9~s(4dB=%Y=mV9SxZZ&`>2+D8Usp0nqOx3N72Io`jy2 zKp_xo)Eamo-eYRRHBHw+jZmU22!QSCeHaNvf*?$eV%5OtW#ZN81!Zp_?E|0ndIl>A zEP_ZiuH%=$C{-T&%HIZXDj71*-+(y4U^S%Tpf{zV6(|LQ%pKwOfB_AHYfx^pGE$t3 z+huYNkYMQSl}oGkFm_39(TNU;TR&EdH4cUb1TPQ(whz-<(um)f1GGYsU8`sM5hx9d zbHB3yi0f?AwiSHW@!~L{)Cy|d6H6Pscl5IW^i&qpb=A{za9o3Nt=>CIgEEXJgE%Pi z#0c1aUVLyxjpu6aUS$-lxkuJK*v0iW0SC&!Xy*|F95(Vdj%A)0=m-4I zGZY{}1!#>sG%-c&4JvF-pM!*+QZ^w9e)ElfnVjM%G6$bsFUSg99+c#PI`Y^v%Z*YZ z??gCy=PGva6WS&POZygVdaSC(tBpgFT@wxrnb+Afn%B{lkKVUO$hWISYA0qlvod+| zq+C9EyZqIE`t$O}=_pwlU+$22hB13ZTU%u68{hnOc=e<_x_sjwp5LI%&1Tog)go)T zmek05e5qs^vh4|{%xsRSMFBTkJCPd+W{l%J%s^S&;HhrAg_!`NP#me6vQ+kzEw{Hq zS9NmF7y&(<%r3mOsI5O5GA3(rIyhKcjO4Vup0D5o^Jbg^KjMYl8BplNfi3Pp$g)iZ z#~fp=D)Fz618AEAz~8j8@~oUM7iMP;$aIxw4eq1MVR-|`XL3rkFDNrsw2f}7aSA(_ zcN`z^BEZdO!g!W0G|Uag4ld1bN@vj0xp%;VQGbjs_K{f({R9UEr%5MGVxbGQp~X>edV*8mGiV?a1q>L2~9#i=R>N5y2% zERY7WB-=I_hYq}bC|{tNFieCU&)mzL`CCS!Y+N5N)y@IztZk|d7`?i;T2D-YgD8A1^JQdC zN+PTi#NEOtk@cQ@ni#(@{tCpm2#sE^-jgXx4)!imgnA#xYc?@EwstyL|Mm({gb+iId|| zq8^*-+P2vSl{3)-?J*_fT?R5{fXfr^te98kXg-G2{kr1`&gAJSG#t{OP9J&_o#0yL z=y96m#1t%YwstQrJh^o=IHkjJ`TWOy;q)v^~FrzYDuRdn=t1BUq6 zU!$+cv3hmncl5(G!g_jF#T=t3qrzHm!oVP|>!~mdXS!PkIP|VPR)Trrnr9^}%Ju3b z1+ikE+Xy|4&+2+<8olKn4X%{X=}`Q2sD0>?MT#EZYH_M`t@RZ~Z5W*`P8ho6QjEHK zij(>#JUu-hK>!_dQ|q+Dn|KrUIT0`sdTL;$hlNkcJTkr*A{|@g&_@Vc>A!_(_#vK}Zh>Xie;_C(jznJM*;T&mjBo_T6rqC5y1Rq^05kr)iI?rGd{hGb3ktA~u z>M}>JwR+yCLt2|-fM&)J>hnEwV-C(u-O!Otrj`B@RB*r8VbD{Wo{0i(ac%m5hkTf8 zjgZaYxpZVU&Es3G?622x6*3F4xDK)*4~f)qex!$ysq-*v)oi*A)M8%xp|1pZbnU$Y zgf=95=FvyRMqS<#P(*7v3F83s5ZdOA_6aH}3Jq_ravFPllabEJ?jD~uy zW9Uw%Y9Cy-tcF=U%5n&|8GNPg3LL@^rUmcmws9aCt8+@KpK+OGskQ~3$MNv^WW7PY z{zu$_hT0If89EkU2^zuKd=IU3hUB-kHxKLoey7eQC~@o%ClCE{ko?iG$jG#?DYtsw zypiG|#?VL-Rgdv=0*Q3_T%*q)m4Wb2P*DJI@_J8S1++>qg8e=$@8N< zTRzpKMj4V$f#DS+&9$xxI%l&BM(!RYq9HjYsxdMgO^AX39_Q+*LlQI|J#AYI=(s#z z{}vP>7cYHi%mQA(6X>5T zf{KmsD_mw+COOfrrrGB6by?0A8K?9z1#1zYO4DJCJ{s3;k~(GB6@J<`l7lhNIRwyp z1D*7keRQKvRH}}lw?%gDhc|*T>*IqUvQCLC?9#_w=@B(rAZJ#^2jS?H$VK;sdyn4h zLzS&UuA}K$>yyzWN|^64x}K`%x~R^8vTT9kV(sz*vL%07uGwi+aGtn!v9LH2zB7hw zyQKXshE<$#&-mMyMDC+rfZY_bZTohIhqQ7m64u9-de(T3m_+70-{B00NFY)NG>k)> zsnNtL;&H$&@{Q05EbiyeK}gYAvmrREI1_h{%P0k>gD%Ubzxbu{z3=`YOA5d7oBtwC z(Z+jt+tb^_VYz<)S$X=?cgpv_^S$zmpZ_X!dK1|kB;pnjO=laCDHBFir>8g^XJh0< zK&#GMn~*-4DFF!QHV4QsE~9q^Jn2szD)=w(gkUlZx;J&&(fweBd;m+17_QX23@527 zcWvMHvv#~#mFe75_mE7+B&{q2bc<(>JJuIKj&uywJ;6Lr{G)&IBnO%F+**cQA#*w; z4#q6Y&1{Y>^soUatp1cR|jV6_aEQRTq(xhjLShG@T=DlK#xCE_;p4B;*g?$vQbp#&d zl*bD*hdg_LHpH{+AJkQh-YTS<*xsx6mbR9pJWi9X%XWS5_9npF7 zrE}+Ux^ziE$!AQLL8UrZVFm2@QqMpE%RvL~^Fsh))VW!Y&msC#kbX!>*jSGB!}oJg z%uTt8Fsld}P@m!Kqd}1m1|VA;7z1H2^-u%6NSpwFm(UeN-0Tli^*HgM1zeTIl+}K3 z3K;JhL%yB6Fuzkymx>pj(H-4c!stY!a05w5)h$p>;VehKsbF0oO@~e{3MbG z#04A`!gP?S6US^ixmJH_9QDvWX0@jNa~?F&tDd9=DA9O$=Kuv!aIz-L z*y9K&t|);{P>1MqFg{K6;h{9{A@=B5ofr+cLgd;^9vH(|B0Lgan$nID^~j?ojr!n$ zC-yymZsfj5`pC%ARFv8j0q2@Ab3dQJBNRXbp42aqU9tlznT2T>V+A|gX!`IBCrOF*5p!x7<1ssD4nu#Oi;Src@-Ffh9qQa>D(1yZj=^VPq_3!nnh3=Asv`1G_7iZ@yVR ze122@_OE|fe&=8RLHWevx3k`UO&c5DzRr6Xu*XlzhDh8)qFN|I+0JSRHjL2Q|fw6!QNpT|UkrU+7f=cC7IcH|@ z4?ah~1#u%1=FPnN8f!k6!A6s=XWPc>#ci4LZou0fayF2DJ(mr*4xKjYbZq$tO=_W+ zX;d=-?g~(*bSThwh?ZwjfEQD$^omrMkV(7wnJaWN60 z5tcH8fz(*4w}LxXna{uo^kTMZl$#|wfXwN3HyDoxFI_yEWck5?3h=cc4qZYn%y`gA z9AYEvoX3tHT^42 z8v~c+RfPzi)MymLgz}}R7CmdU=~Fysj=C?myCuV>clz~-erSKTPG;wW83JU!27+QG zG_i(&>nk5?f61_9+%@xOidjcT`7&##jEL{*vGQd^E5GVF`h3oph&wNh7=4&+J4AIC${L;sw+wIwc9Y99<%vNX-ubn1LN z?j3<;B5TH%^?L&u46tJ}C^MsEAL^z^prbBE$5?}htJS-;f|ZCM^+aaRm(LJ3kM2 zVpt@mHbUWx=SsVJNv?loElOdevAiT2j5Q^ww{1$eZh^p95+Ny_aTXYJz|;FoIpH;# zdduM>qPUDx!5FZNQW#CUm|9^b{B1Qiin6WVxCVEJ!CyfW4er6s{}>|Apm#o|cTBw~ za2X>6REc_>dqW6lxCL(F7P28AZps-0Yro(Z_iI!jA@^mEzLF??nXA7;LX3+=Kd$jH z52BoGgJEt8ph~Mlq@bwaGQsH1<%R5{24Hn)^a4|zY$OYjHI}BC1Op$a;+m0kl_nZ@ ztK(B43U5L)+Hif})N~G`VN9kZjqqQ*nj%yvJOmLCZJS%@N6^vHYXNkO6oHqXKKNq3 z^neGhLIXTY-BjXM{xS`Z1+@^-Epcx7Fgv2LxO%-TudY^Q#@KF%Cbt;aK65ekr{L@K z8Ckr(zRtV^jqK&6f5F~7f*`7!BJH3MvLh`U@B)KD-#NOJ`~22rl0DW}Aa|F*=#;1p zo+3RML5q9%$y^d?LIY? zqq;U|=3s>*qYUOC4vic9R5@WqqG&T1p4`_!?(^tZ~`;l+Bf48VCX&&64N)-{rqELN7(V zUWhyOkE-jR-W^CY>?(#jR0Kjr-Mn zU9J~qSa$TyJY$4(oU07@nM+scP@@3NvJDKKvnt&}Fy%Jx?%u*Ri``1y0kVR#VhLmE z;}E|6WK!P7xT^#EJFiqQ#hA3M0!*7}g8|TXqD>7N_q#@6YRK!GM0XC-qj8CibB`XjBbHEZwJ;sOUh7Gr zvvpQdHpL$4=aLALfAkcKf9p25*DE{+FLTfc9SQ4cSBShqTP^eg~(yYswc ziMEdca4ztWzYz}R8d;@Xja%R1ovsHMFee4oe_YH|;sv|qP+*gs^>K($im|GFIY@@F z=*e2)Yidqmw(9g2C3Oqp5y04k94eX7q|dQm@isb5s-MoVec_4GLf^CTHKz2vMwp&I zpQZRr_)s_$eCNV&;Je^3j-UL{NVXf@ zL?sHf*%9=VS3BP9hG;;b(D)8=#Hxls0LI}k%7lgTTyW4?(IGMeAbnCd0{`TLYo;DM zYC2NhWv!EreTEzf@NV~lq-@aL0BdF#nzB4Wmfk$Lgn)AHpn zeVH-!Qaj+`AFf&Vd+-^)$Uo;LSQyc0 z4voh&n*r0|S|MMao3I89bsThlH70dg4(FUcUcPco(i)En(CU?aR)96y(vckvRWIhJ z4y9wT_)h*bnqZyIc6WeH28$BVFuqie>nv%QPrw6o1zd4AirSjf1LI7a$&&0EDGefW z;HVgu%`mL$D4_=~=bCBJusJ+yUv!$(TcduTGjB1RwAhQ4ywn-T<2G%{rXA4+4WNs1 zWWAMROs#?Kfn6zzqXUm(Po|Q$jc5o>+gsOSQJ%|VgYl#cCKuSFx^qYg3l|wfw(v=J zomPV~_+$Vz=$|F)Q?u1|%w{Mw&TiB8Tn{m=+60|l90BO2{n99#h4FijDvz@Zjq14# zU0pdAD^Ea?HNgq zFg89jdIaMx2HkQAc&p?}TTWY}aZ@JliFF?}l0ZAOOka9)h=0;LGFKz*;}PvQYv{Z* zD6V~}pvhVw*dD3qx}$fiaMmbzJgs}{jgq>INrRf=th9w90ZeP=uIFzRXST@g>QSE- zEaf2s^lfD0b}jer%1_sFmw1#PJi94dj8}Xt!2=~SRc;YZ-SuGckM##cgn+{Jd|LKI zxfWi+x&U4tr;D8E4PRg+PA8*k9O=gtmXYPv^{h;1*BE!40eGO2>YeN0xX<0gTm-C} z*%ZLMh5mhvW>rk(pi<2UM|rgJ6xq;ug1)Yi8oiAM(pw{7DiGyB_*LMM+m2knWjxs$ z!{>Evl4&|mc&rjE`OFlfkM0$e1TE?95{!|t^SgtmrYsZvA(YaVFR}&jjmM*MdU1;5 z;NEt(Rq1pXQQ+X@d}^E`{#it*L*ROtK&ulYkj9JGVS|=Ki|S4-V4_cf$>_zgb4`tw zY7`+4Tmo7W4P@Oi-aPYx#^)I8moMh!Zrv`w{Lg-;eBn!9DlcALl@I>vyJdL2EO+m` zER7GRJ#|a@=tthhksX&Nj^@)Je^AEFUis+TA1$51Wwxf71(}R52~Zf9fCEjP zlZL@s5sRCQ>I6$cP5E{0vBgj6r86Pj)n`FXjJboqJ|xCNPTWlq4S@DNqdc%h)H1E>a*g_wbM|%a0lnyS0MnH3Dmqp9Wvzw>_TZ4f^ZDYoynMMS zFJ3Ql{=VD#+@g6lXqC(JQ}|%L3GLFRCtNZFKpdIiGYwWU5RBP1S#>nahfd8;ogb5R z8ECOtM?WpF^5uSP>ouBf4XCmx4@@s>Uy)y&G>xuH1^zUeu0?0)0P>%jrIr!3wFl4{ z8oS3_baUQ5zuoBur;+cve8AYzJ(nsxURgf56e-U{C>^9cl@6VHD1gqN7{wHgfjOya z_q0aGdADT@;)YHrCXadQr1{8gqj#MmYXoGvhS;@!4UAD`9^))6=s&wAz-P~LS6>X8 z)a^Xb23a#xT(i46OiCPVWPI5k%4AdgibLYkj+kyFkA|q4p^?sv@?*Nc+Az4oPq#+u zBv|;Jr<2l`c%nViHnkdV5$)sD*3$)*>AEzhodO@Ap^k+Gj@rExfD>?pS5TYgz$$15 z9h3=m+u&0CHE?Z$*>S-5F3QYZrqbOG$7n-q`IUY0w5PLbJJuxV^lPtZJo;!h^H!Op z73Ea6D+pO`g?7rTJuoW%F*?&d13=cb9)*`t^R+_|MD+0NNt?&S@IbY=A*e*+TCWp` z8g8EfcynNh55^Z5oD8=M^jV*9k{TJl^C2pE+M0Vv`(u>XwG#%&#E*L%0hpsVm!bv$ zGW(EGcm>Nnmp<6vJ<9HFcTG*O_wau!(d|?3O!DdFB*mnbGIl*+!PWx03C_p>;6EKV z%XsX*W79kK;R#h1Mi7x;G+d)fy5He5P7QVr>|uTy3jkn1pT8sGfC_z8KMe(Stqa=L zwGo-9aOf?%-_`F_Drv-L2!_Q=0}N-cH$>oIG^BIMWNC^U@X~p*NKwOiun@U0EF;T-mKAsLYggjrIVFkYOtc(s7$ za_&zndjSs=84;}Kt|NX%PiAbAaz;4>rO=Y^1VeXwc%2180;US!8S{@kZK^X&y8Zd{ z>#|+JXPf|)(Fb^#o|En#_31ePDz1mK2rQo8E^s9!4KV41xgMK+**I>VhFGJOq}3uj zwq=o(>3AbgPjVAr9g%4}E7`~AZ9NACj2H6wO*eqP>s`%UB(r)|D2AO79b^0lA;S~;*C}Sd+CiKZcAVv?+ z+x6EN2mx5=&@jhwQ;rxbv=odM(<#nR#SJ4JEr)>0e`!NU(2S8(Xq&!f{zIIz6xX4T z4QM{SS(evVU;zDUz+3_xArl@ME#?UJI=DI#I{Y5#>N>RaQcuhb>Ihiebacl7a=ls} zh}=CZV9FSLE`iGq(BjhLl+{E1Z)++vj@8L>J~fpM7QrJQOk&nY-ndT8NED6SoBi|) zYh=uNS!i1`FQXC05B{6twxFyQViG){0~=&G&(~+{Ix7}-iF>Y9XUh{)upqT5rs60x z@9U)O&!s;F)*6}wFJc|ho+o+gM0wtXOOh^|sH4f4GFS~l?&l7Tm(NQ;)xsY$1Tk*V z&iT6&;Gwoe2gW50#39(YMXr_0hRcBi&)}zTxq~)#cbk zz3|l~!erKv7jaTd3vO!zY?E;`(H*3N&lwNbW=ymPA*#+8s3?=#7W#KQOq015CNRHdf{$2MtC24^}2{;u3Q7kT9Ge(qL4eSoCu9fKJIWF67VxZ*f4 zN1yU(k)+J>bKe!Sb+VW*{nkQ1-x~I9WG*LsnhDM%u`%GiGVg9M^(WaYbjeI?vPaU3 zYvQ;t2yLHlf-61{OjJ;yLqXIV^j%LY5_BUGT!R245C zjltS+)+wA65rbVvX^|=odv7pdP;DkvQ%#P*V4L`;&Q~%${p-b%Sw#hA}k_4lsCN zhg;IDL_g6XV?3S`<|7%iZ3tTXFdjjDh+!3IUQZWUgID0pSm@#UNJtHzZ>@ON*r9DVuvH3n~$tx)bqw)m$TVWbH96w+0Arao)HmSsUDDpP8P|miAt2Oi}9qKk9Amaj%IVd zw$6il0Xe7Ulc~DM9X=1k7t9!o^3Ir&t_5_P9uk61=wP{Avc_>pWi|4Rc8EzR`>u>0 zJt^;g|HE=QxF}!zC;y~;^5;KQrU>!uz2~Joe_a}{Zb~2h83X*g_n(#Nt5=KwCkEbs z{8OLAVYy`9?J|8aFRe|peChLFB|1DqCeO>pHEZt8eqaIjKEogww{YM!cz5#7F(XeF z!!B?RH|VJmj}yL!4dx~i0`vfsOKQ?qrk!3g1;-RbOjmVpm5*{^aiOdhgU>_D56bx& z4z*4rG!iIH$LGug?skbGOC|hQmnB^vb=L&+fCp;3a^yI3d@Z^opTsk@)CbAbBXlvp zIQG&r#vS8(i(yw!jz{Q-&<9(aI@Mk;jWJS8R%gOFw*`BM$(6wfF(rj&%%`sB>pU0; zxg~Xz3Tv@g`P4~3aqpR-fvU!@KKnz%sFTLr=*tJ-SSg?EpbuE#rw?gyUQYTB+_hF? zwO-JIg~>Px;3utxFTpsqNqT}CF;M&$XjFE2>lns{AQvvbq)sVL0Q@t0w=a09qYY!= z#{pb{cLs3q+yXjpP;cS@X|SPrPPPMgqS$=rc-%6U!?lR|T@JL6*I>e6E6YcWPvhz} zW{fn<8S)*rLOG(X6pcgt)JM=XQwyF07Qy6dOt~p<#-IE7#Gr+KrA@tY9|uRiwb37g zE_tc3wq_*bNZo|Qs%<(RvjLvfv4pQ4$|OFz{$Cv;*-D={G4~wj1-AJMOvZ^ba?o%f zB&Nqr56Bq5PyM?W8f#ODm48|x7DV||K8uAp?FcC;g4-V9hxgKV!Ku+8T zQMjMowHMFq-R?|jRbYKceci5UW8?Z+U2m2Frh`;e_pWXMM}CxL7{i$PTjWz%vTY?w zr~=u)k%Hdru}rL%hjCVXkBIvEqe12nW%gRepuig`7;U)Lj2nuVIHyDlrUNXw{Mvb}$8@RS?`q8XOeP{u1 z*I?jH9yf|WdkWGZ596@7ZvwRe>-n~P_+nPxfBG5_+2;Iq0Z-vcC9Qygb7c{pDRb){ z1S}m2D?;6lXT<#KYF5@nGD#OQy*}6FTzSYWtt@!rw(|OBjWGpi1RDX4x%ptC6zR~f zmGjMEvl@YlkEFXN5j8qcR+zlQkpm$T&O0 zF~UiL`Z57|X+k>&#>=#^^0r>pHC>&brK!bZx6J8l3eA`N+%Utr-W|7Ps}33*MYj!T zF~o5aaNPYy`w0 z3iCryQe~`ewW@*ItX& zwdwoM%l2=-SDLS8r9I!5pMB>?amaj4-gs2reCx8bG?LKs>L(wT&wk`H<>JXlkS}<_ z=Q`a+OwnnHBeLCts`^v|1C0>$MF-7bxQE_I+G|sgW#)pvt&zFf?8R*SAMg+^$Uz0Cs%!W|Jkg}k^fE;2NHnP`E;j-dXUT)Jk(6+dTGpcl6<*L zl7K^}1KLVEGhZ%kt}dw zo^}2=n`@wk{sa7CnzFC`lSUQCJSa>$2@w`7%NzcRp-d`Tn!+OQi)=u{hohl&c;oac zH)0Aj7Q=$g+>kA*sWaNeIn@cw$(=YwE*bf`@?>Og1~R)1gsOWwF{1FKPxM|htj=`fH$!oRd}IGdp)_KtpB5!8?NU z7Nzpj``u@6W<*(vV?e)o&NYg@|Er?wZ)4Sj( zm}URWjuvtJbmYWoY>EdG-DfZ5iB<9*WnK51uejRm8gGdRP1MfluZ{({Y6Pz{EH8CT z8YkK|`GjMl&LJ0(FYn}s4<;K9$FvEpp`YV+x7RKDqjMw%cp|A!(lm%PpqMSpJZbQi zo0^eOZk&&Dkw6stAhFCW*mm>Vzy6E=E`?eccT*?P7ljlB5~wS)ZqW$=StdwRE;@k; z_YtbzXEk<`i^g@==s*R~C?=w&GsQ+l>9{Ag*=G<|92(`D+SJ=IC8~fTu%@lHR#D>B zn36}3Q&Fygr+EVQD6B_7Ru5QZszgu3fgspcPSuM+zbmIitfq@y!|nDD5ZEG|ar>NFXT0%nTOs!J<;U87XWdKh5$u}fGFWAPE0QzB~HH(hCJ@!||)+INi_M+ign z^3|$bT`d@rTWlCBbTNv~_C)5Y=W3*Blu)aHp*J46d`~e7pSitliQ#gdUZ(@%$#Dvd z;-L5GcQ>F>0K(bjO&AoF(V1AZ}nw-|bO zjO76evcQu{edg)9n%k~Cti?#kYF`hS=?^WOQNi4jau);a@`+168kOa8ALU3P5i~eC zyDUHVg)f&+^ZxQTf31A^mw&bNFQ1etGrY|M*v>@$_jqrCkpLYS6|B zhIRDhjqGS>U7VJ)^9f^amHqmvY(98e`u(%A$DuQAdj9FR%e(LW1ZV$-G2X%p^v6gF zxlqQcQP)T~E9D=pHcEmPKHbgFs}oznHcr)gUS9J3kU;B-3mv8^!e_ej-cmKG2 z^P9g>KKk*WOI^fvyDrn4oAL~&q^o1ceaeG%LDn+BzQ}~_D;E_kksE;%+UWqe)i*^{ z^evq%rnBPMWs+o<_{_tC-h4DEAA1`Icck-C=aLk;I!|-AW(A;mW{6r>BPiw}V??gv zWzC#bUTN1Yl(O*m;1UejXw|hDuhHqSc=(G$uhAv{86-fv1BQ}2Xp3=+Uj|a#m!ezy zME+c25?}ND4{%PSm19b2!y?;y)UtF?m#Xo%&`nx-h>mqD;*-4cI(1Zm^K6AK^kU?+ zO+V22?37>wS>NOAm>snZv!LpiI3P~Bm1|BAVI)~IB`&Q(0R9ut4BT{D)j^$Kb=-hL z8rI{U#Xt2z$D?AVG6b|RLetO~X4(sjHEz6(l)m2! z#!drmkff90L0|GAkLsmyGXiD;bK)v*@lQuhY_w|rmGh>y0)+LxlGy5EyLd(uW2f?Lmv(D*ExhK*MvCUH!=s0@x{AH(hzuJOP%>SNB;#=tAox9s5^Qjf6o^EDnXpy{)vr_&j9Ao>uP7`I#9oVsJRz{zK~%OlT^cmJ{qse|C-Lrp763->{f zEFy~Wb3EN-6Y`i!L{Ep$!+G?GU9-J>bWz4ceae^0DTuqLG|?@xYWi)?XUm1>^h39P zzkKE!zgm9pkN)T78{hhuKZcsICRE&lj(rAYn_9fNl@t=eGn`} zMqd&PX-Krl@2NLg&SL~z9!hGG#-wR?;937?rTO8ey!GT`0x>+vD6}tOdfBfH;|ME}% z+wx0a`?d1;^3BMKm@yolmdnR)m8UPBm!JLg$7OZ(tn{G01(4Fr~P09>B3VAa$1PA`-TLE2O-+~!&_ zRA4tik*3Oxe77jex#*1MfhZ4u)4l0uMajx+anK5L*J&le9&oYkn*KG4!&&ZveaNmf zR-aPE4bOb5qh`gji~+rT`9#sdWE7kbZxvCEV=}c2!28^mdo#lk!-@7NT5EW`TUn@l zaQW7Npm%Q|)d81q%!)DRok2t8lgkhP;QVsGk*YeYUb&2=i82GEG1H+^v|L(=d@*iy z#rs|VH|rHyQO4n&1*zAw>$K@3c6C~O1Z`|c+CrO6(a?T*<;f8eQ0 zXMOF2j+=5o_xoG74r>>ki&$5Uur_JE5u=ooO|Z>C%$g+w8+Gs@yJA+t2XKk|*(Cu@ z>XMV^EL2^qhFqu0V}7+o+F`L=o=O)SJ?N6<3*=6@6sMIhpAlhz-u$g^{`|jlf-*qn zW*;&Gu>%z~Yl09uR-KoH2ss;_#&X`V^!8E($+at5&lU)Kl7hVSTwDXL>-@f)piIRkDB$fnD1;{IAI1;cGD_#1Q^{bI#MG0Nc+N{aplS3K0I3sd^6LOl7!6%dHzof7Dnvg-?OVM6tR?E0Fed>{2D@(P!8C3?QxPn} z^ZfJ6QzOe>IYLvP!+dpx6L5{O25|NM_1yIuUAN3y9%FJm3J59-9hnnz*ZZ~~!P&Jt zmqGMoEkvA*#)(M7<2ZB7Rc8l*y7qLt%`wrryhs`{$}C9$O=xC{*TaXjU3#%bVuFjy z7b_eDBUDM0NG*Wgr7h-v(kW+W?gqdiVf?0IeI~i<_eON*kIu?ya+c^&Wt>e{<@zPg)Khbb+I(vw=K6Q;itdxPs@M#Z~s^2v!D7r(e?-vB5llsCTm+47lh|3>OZ zp8e_Hl^zDQqZ+=xB2pxR5u|j8aSl`9h5TK7>XY!7{*cGj>s6V)|FkrTuv_%gLN^|N z_LJo&@BTEk5=MVM8dYo+n5(QfMvW%)MfP;!G+Yio%L~j;kl%9eRq2*L{@?tc%5VPq zzsBhGL+}6JmsuZog1qz2kIHubs&qpO=&HlV7$hT*75$7c#R0YGOZc-$%X2K0JtIJ~ zR0b-m4g7bh%jZG}gz#|G@03rxH7SpujF?OX)UX42vo(VP3&%VI!tA3sr~_7OCCsvI zK`e~PU{ceo0xfhf+MmtLCNh5<=Mnlj8Cy6A769oL%#-uR#KEtVVqAZyqG^^nm0{W22LNZ<* zd+TnjZ2;YggfaHRNjW_mGOs>u5JZ7T%G-LgM6WT%=!a495?Wrr)`>RzY2>PWU=VpH z8kd&zt-iSA7vslUtT#w;jCI!zSmFHqPM(P^;?Mo=4DC?1vN1w#Wh~BByoBQt%8~PZ zPUysbv^+zFdvw4yICGxAp`$j!u=a3h-2$h4q(YxL%QI=nXu%_QcF+g+8g$sMQlZ17 z?djnt>EzZ3O{cOt8!5gO7h+Vk1@gdvMFZ^!33!mrT03IK6k5kd$){$bc)StDB29F* zn&^@EyJkN3Szu()ZdP1l-I-Znr_Z;1UYB{2eWybmU!C@lCw*5;N=RObYD!YU@_RiM zF9wvDbSA_?ZKd#YO+m@H-!2}S_qC2azO~=A3MQDP}y@AR)mwbm|@^OVix@AWp6dWcKkE)eF z8aQPDMWij)MLjmuJ>yFvmh35~ed(601d}o)ZnZz3=H7Zj9nJ>#I~tWxAk}Lf09-vj zmn{N;FnxYz0U%e>mq6|@p0nA4{_)ru*HEvWdvucAWpH!tF~2YgdU!yH83?_2*DOs5 ze&q9CD!=!~|8@D;laGsa|C298r_Wfw^^M;s|Mq|Tzb~KpC%;;Doj!U^r0M!2@}cvn z2mVlCr^A$!3jsmT3@}@uCFF8dCURv_=fL`b&{^5KufA4hB2#>ZptXFENEF)4l%o{X zYJ}|He(qfwWevt)h5qzM<@_tZP=?6+iy!>79A7@mn&@PJ^QXQpW~Bumvj3X4x>vJu z_~AR{)jQ8Ccwk5r?CRfeoOcmu}6Zy}UJe zrlnWb-Bg2Q+R;lk#UerFEx6G_3%K@AQ4Lyvv4jhco0r2UCs1rQY+3OaM zd2JIhYT$g5-pZp$P1nj>Ks-;pB!Dr}a||H~yw|XM;FlRf38kYgo_0IqU}d&+ioH5F z&@x+fc^e=lf(#}JFY2mIs6WUQ_wXri+JjGK1CETpqfl##ly%2Sa3O{Y=w@

>X}Pl^_{JvUF+O(hCa|))7w?gx|K~JIsfT+{4MK$YyoGY8lrIBYI$Qo!*)Efnm;u=>4+0gsVrZzk-(<*7G zrJD|sku<~i*ev)y54d6OX`FFt{fso%cqfwQSvo<=jXKQN=1bFrMtDu1B5T;2jA}BD zk5q!rnd7pS$*ik0_kE)&Ze!H>lLn?Rbxw6=1$6?h9X7gf){H9PnDQP2U_Es#O%(`o zY&&iZS`(cK|NicGzw$cI;6Vi%lwg_Y=bwb}iLjg|#lR&*OSvl)SC)liKFKyTK9 z5Ub9mj7*T;9-nEkDCO!7kAISR8sBR|m}`jb`SF4RJsLx4dwoOp01Jq+HyFeL9+|~x zd2}N@;!&@oiz#!#TSrNlho0dHXz1NK=9VGW1kbh;1H9-AyR(96QXrNg8N+r)23$Oi zd+|?&4dZc*!s?z;k7*PvnIAN5Afh(UCS&FTuqYnMX=FfJXHNjPnV1hk0Aw87w$7k= zQCMi6NlyZgh%dd{Y=*&jE&4L5;8sMNXsDDvsy6ok8$(;_s5x^~If#1uJmcxfQJQGTIdPzSr#;p?LELFwCLsh{31@CT_n)zwwWi-)1XyYC&7!kK)=0K zh(9iU6V- zoxOu-4i0#z=?}*laryKj%2v2zh|A2gb?=Uhp5ykY)hwdIqg?#tXHiCB4?guO2JvEt!M}_p9(m>I)2OXx&>VVy{Iysi6!ZXTNy!VXUT$ zXngsznTLldqICZdLxMrF@eT5@ggz%2ryYjj+Cz6fc$9{ve!Le8z)|(npJGbWXufza z))+eFWs9uCfYE#D#aNH8m^;P=-mxaaz2aqf?)-Khm*^@~N_uYU7LA@rOukf~DbMzF z=-av#x~dcP0JB10r-k|H$$9R(H5BK774@P9-XhwkC!?5H3CsO{w%8`3sPdhOB4{GCw+WLg=|dKUNvriS0pbJv0tfp)>v8WE2>6?huS zu}!Vxu%J*7Eg)&PFg|raH~O<{nhd_Y0YIN!jx+6N-P7I>U=09ptC~?&fx5Ot1MfUD zWUkWXvl-<=s5H9DlDaJI(JgDfvVNcrC>SD>o#fyA9N*d=BNu)N!fixjFA#T3oiV#m~H>Tcx|UgSFMhjvW)H{>rcfmL`0_d#FG zCbp-YKsP;V0D}$|dP+GpmE*T`)e?AeoO)ip=YD;GfZn^2WmCPTGio?f0x6v%9bRc+ z9}lAvypj4vhg5&gnud%{&#&`{be%_mSmpQr$>(49%KXO%L?|Lj8Po}87>U;n`=+6C zvQWc3t9cPvog2X*pjpEgSulh8N)OWz73fC-&tag`GrY{HZ8sSl6EZX+gX+1%3tU_0 z9=CY0iwr-xEiVmJJ`){}kXej+Ek_80S)7LCbXTN6b^H4c75}BR)sbTA)y5~&bjNk*=ye%QC5M%1n$kv5IaA4 zSa?broRoQQx8mHtGXxBE5llG*1PkL{p-A1*QiKFqbC}OP(S1Cpse8I{mm7rH7mdXQN#$(R#a^KDwigFHHI^M|b*n;p{hlKTt1t}BcTLRb>g? z__bzj^pdrbc~A*+H`0*dW(=7gnAaNGLbP%p`WBB}kHa$prYC1 zLXHlzXhqj?rqQdf zqCfB_TC?QSK6Z*_7JH_uL@iL5F| zw#jYWeGr=y4;-pR>;C=R9%n?VGU(?vwh@Z1asN231RHtPCOQLv{XX8=W(BYt$8Nre z8sndxoJZ@EpNPG0d^xV**&gGnV3aG!q#AtVwy={YS>)vT^6MMCT>!NajtT29hyLmS zWi}hkutpX+skdyWe0bQ4cAvQ$4MO&d2$SoNaZW`pU)Xbf1<*jhVR$s6v*~S|pfd#y z*0jiS@(v%zAfPMYw>%#l;GwK%KAkZf9PX#CHXJ^i!^`|Ls$j&Rh_s?NHDH1j?qwmL zbp6QVHR6|roxG+lHO;s9aNm2%821yf8`X1*T)X9#I*V<$F}a$=bf$xdZs&P=>FRLj zrrk*6%^Xb0uGMR-MK=BlWE?{!FFJPhrXH-FMLj~?h+?-tL!UNJ@t1E0}=xh2B1fW}=f z+HbvXk#rY>J4C_7t`>ci*#4m{asdzAuUcz!|5ZGFbdf(#J;8~2iIff7jA76PD6-DD z+|trUF}Oh49umqaurhej>XwBwB+WBiDAB8ek$Eb3xv3XsS1_5Kdl)e>tzjfX;3Nav z-`h?~cFi-C)Ucyp4ZigQ7Vph5t}=v3F@c(61F>#y1Vk#6 zavE?aJ~elW5%r>L7)Rz&=}4=+emkpj4Lj)prl5o3ov%PeBs|CP%kXW0ui| zXL#=B6ZZ#JzslIG8$S(aib`EK3oh-T#M3D z>DV?^Bd#an=d4uCTsQPoclk`~Lz=x|w1#^KuHFEz!8d3Ot^=+>6BR}onk`3{fKB)Y zW#S^46V_9x$sde&jZrDV$ECGv>)81mYr9K9Drjw?UI`fz0B!)ZMlSr~vH5>x$@OrY zsc0Cte6GUt+B~QQ`dI8$MLFd83o0Q#-9Qeq(;U3hh2BP1YRuhjY4)#1>;q>CBY&Gc z?VEZJ$Uuef4N6yIyLSNC2ANdby)343-6wd%8}5DZnb+dBx{agHewv5Xvlz~ZNXgJ8wLdh&wJcGiH2=JO9?I~zyo;xcaCM%!t}^qqHDcc~~pp!8YeM!5la z*~VjdqmWV72>vo^Bu`tIb#y$4!+S@pqfrp_6f_siW+5i`81(HC-#}J8f8!E;Vl_5; z^|llj%ws$USaRQ*3X(NbRB{^^ywFAmWbR%jLgtJNc#TF?`JAit7d(&8y0am-pmMFn zWwNHv$uXgO(x^hu?0q0_uttIby-UyN(0HXD<2gES`~525!lpIoP&{UJ*b-1RLr>c5 z$0+6g5L)B8@d)HzgU2}eS=Q3-kj795k|ai!Rz zQ$fC9l&z_O1bh{{0@Qiw9$^Dum%^!KE_olm zQ+F^Pft=O;zR%_8KNbmIcg#i9porwA^M?$UbqL@C`C&Z0N+(uekvf(^p=mJi-}!RU z=!P%**XpUKG55htF!QS1u*e^>2qr8`dMyYz#J6~93GodF$!9h?kWoatR!ZBV_4Hy9 zmm|Z^dOV^+s~ACk9ee5`un@@x{IgjR0(W+XuVhNcBav)DYmQ4L?6xa&!5UhXyv&&U zB{8~oJ2KSbS?cI;J%mpWGWWD{AMJ`VD)_W0CFc#Z*hIoG zl?p)xQdF};jzUqOUhiK&GFr6+UFbKrNAmk%8ek=ZgF~Ii}P~? z4i8@;N7-4ey1v14fWMFK9dj>JN#$6LXV9dPd+yzOXUJ4Vs*o8bs#o1MMbN`kubp|B0_3(;uq7+;&Jto#hy<%9e&AdA@5$m8A??E*c zJcbqm-vThTD%K8gEqB;0U}|_${&9>dLvy7Xv??BT^UiV9?>!%zgZ=2=e-QEDPHZqr zALW<@O*glz*x>nhP;^a<{SN-KuYdw$1DVA;52O8=PsOT#z*r;NcZX4KcB9eXkIm7& zi2E;QulSpvy%lAQMCIx#$_)$?K>U^k-1}$$Cc3}z-Dtf0T2ws%=gE1V(QY@u4c>io zuNNf^IkG!`^PMy}wdY@mQg@JB?Y79~dNhu*mF{;A0W2rcKODvuVAgy25c;F!&=*yJ zqZ=I{P4uHSt>ItK@_e)=TF-~S=wxs$atf_l#fSmX7#;Jtz^ zl_l05K=Iai%9>YW&;!sTGuG2UCgh2v}hzho^L3+v<7)*j<_ov#4u zC#Pe6FCt?L4D*wV^Nc=v#=2h`T$UzNg+{%Mekn}q0E-)~tuZ`rXxvoM=wgn8qaoA* z(D3PLsu#p??KG6`Lb5Pd5YT0u8X}Fc7wH`u;hO*&^$gSVXYB_a05T4%$wXNJ7_Zg;U4fK+AOT|T)@k7qA%R0P~cZl#}c z(d+gRiQ+&UZIqt7p?A+@Hw9_*GgA%F%=KL%!`dAIh;x!wMhrI5R*J0y{{YwA^jyJ_XCDIf=Xr;1c}6}yns#YP)^F5sKb>b0v6 zjRqn9Dc`0(4702Mbvk@*k%|GN!?&I*pl)#veDp)-htIQyHb%11Quf_|Cb_pB8p56) zHuJ^T^?-BFb{ZhnjD~^#=Lfi%MCZYVB-{#db1fpW@WI>&$SD=L#vNjz7~CVRAzWUU zPGY!A zk5}ZS4zwL*52nJl#dti=E^HpRXFY(=JTOemr0pxezh`$R7MOX7%5Fhl8q0GLH1k=l zB__aun@txU18XrFN>gCMvkj#tO5#}}I@d^f*Umd857X)Od&z_zD{0s+l|1kD4YMO` zdVpJ84K?o4_0CySD8c7tHwBx&TtgIK7I;FF)=)qSZ=g7G>y$JW3@~=1FNV`= zS>VDttkx)2m8~fp6hZQfwDcdcrf~6EBSCcWS_&^jH#Bd!KEwFDRu|c`b|nqF*Sp3j z+L8d0b#Z((jC-ZL6`%ug>w zT^Qz0tmgA9OznU66LJ0QS@iDQkM?r_0chuE+n;|eY8dEpw-fUn^BMMIe)1%$-C;Ce zdM+Bz-NWE89{}X-qes!MZKHL381si8WZi@ukwEyBPe;7^rI=2Zv4$tQ_Z~!@d)@x+ zUq$QatWN z2|&ET1FydFeB5$fNyW}{bKf^gS-GrZL^h94UPCJ zc2{fg+9J-+^}qm8JV*UIn#_=AbQg1DfvN@Szs zcynXJwRr-U;SjLL_?9|E$cGN+9MH5{+C6Rt?JX`;w;HVjR5Gq?M5ADRnEo_uuHDkn zu!l=Jh>Y1CG&Tg2wi<1*wYqM*0i;(Aqwiw&;h{AGGRJU>{?@6eRXbULxLmO|=$hm4^MuhC^!sye+Fc?Y5 zmrekn;zI*3ZQTK3-+RFz8b$;Eh1YF`nEm7P!Jra7Kp}_^=YOzOE*iFifozz#h9Bb! zV`Q$pbemy$QInxp^Vx=vFU}VjZu9A)OBE|h9|PM(L2Cf4Ft(f8#pN`{czos;a<4p} zH*%o2?Y~5UJkPL16K2i!v`|JNvmnXlFFjD89{va;P@D?qHa3thhU9R6kRh8vyBddw z2w_vBrhto-c5!(=iE|X(asg_))zdOrfrpE6&&CN*Mej_bms?#}0LNw}qzh*o-^{(L zh$tz7bYl`pWG_t+_XJLu~bm%zOviX_?*I(ufs-V zDaK1i0pu|oBk4t&gSYt0aJ}^hdSq&?;k_ZG6G#fwp=&C(P4o`$#A0ghlXXVsRBjtT zkEh)D%qOFQhjsCjALE5$K=FD@`{p>|SG?5bgX37(D}r_CyzoL?k7nG5F{0E=9i6@R zL3IAXuSN4yACKN^FGqEJ!TJNlKJn>XkI_4CM(?@jqV)J#tUh=rnlHZqGR2#EddgZc z&tLg;)b{sd^ur$mv;j>h@Ydb^xIqt8SM%6Dei)UueF`wJcz#=qhxE+iq}!Q{8}sk~ zDC)>s{WGt!o+xy{Mx~9XYNs*z69reEPxUz?AB+EfIKLF>XmvBmlgjD_`QtWsNipJ1zgP-Ku1$) zuCGO6dcroIvLf0dS=UfwvY5}}YCKNe;^|>-d9^SYA1e7rKF=a!#-1P;!)~<99IM7X zi!#xdeGIfwNxi|7lXKovU$zqL8Y)-brM>!HzS)( z3!Sf?uy{>C-$&=lt(!IApZQstFVNPx)9bYGIP>~~Xzt}ait2V9##S@|sE)1PbT@?2 z0%gR=0dz#})wAja0lbk8PtY`KY`9%MciwYor{^p9Yk_yy+6ppml0`GApk_4k@hwnX2!$2XfsOPU{SmGkaijJ=NoD>AMH#L z=@pz=*QDd%ZYse=<@dh(mDjU78cZz1@DXmHhvrQR<)5TtKo*c`L|7+&xA4L0&LVeX zL4w{~qs9bpBA%XT+#x=bE^srn=tk>FF%DhS@B}?KD!I?LP@~PM_`KW0_Wr|QAnaqb0MNP3(4+c+_7oM@-7bct*+EKz^Iwa0aagM z-1#0)-3n*E9ODj?kMLq_u#?+FP$o@*i)fzV_}ykK875`{4JT#(lvU}&urik&*xCRX zLn|WvruS%ih>FE1YsLwq!3-AID1@#gQ^q9S#g7~ZqV5RHTNN>@(0g|A* zhmA}olMH9Mchu@j3tkLc&j31x&P1gOr-%Lc%*ot30iTurOrr9BrE-rf%^xcf4jET-+-VaNCj8QNW7c42Bu;(Y0EYq( zCC?CX7kXN7rz{w*)kAm@@4AlB?jQ7_BcAeXlwnxMkWFkDvwPt!h}?hqrHnf136(Gm z6;B&{>0?p<^k<@p;#@=6CSH8^wNFLNu42b$s~>+g*3VAT09FA4tNA!;!(mkM&h1^% z`PHumNHey-!0`Rzm$A9`LNvbe)mS{nSn56f=&u0Rm1x~LifijCP|mx{%h)}8iryVh#Jz9?_AqWX((&lv!G6?U z{un#}eGznbz4ZD3Vm|{ol84GXCu7~nk_DK%8Gw*^2?{iH);ctYeT282jeDNG@yn>b z^hz|Kt^Ekfc<0j}{WK~z(!nd6UYti0IrmJ5H3}NejBAYDwpB?_%I$d?uDkaDm>NXJ zaejHubls+g5kkfcOJ~mz?}ry^yKN7WGTLNQs*4dI2N_UScrn3o*3m#Oi$c{shS%f~ zL500I>=LS7vEfV6Fh-q;oMQN;qhC5b>H}|^5gGm#Y_#wmQ_guqFB(x$T;hs3FB!Lm zz=V$ao6_yE#weV#(=oun{0QSmXy!1?Ig2%A(ae9e5fF1z*_Dbp%7ljB^+XN9*bAUIq0Fd+ z=+W&ii_BwpSfD(*mVeoa2&Q%+Eou<~%D5u^YHk}Vl)k-`t!Vbpo4tL}ZBg_g{il;+ ze|UP`teK4CT3CSMe(h#MR4;;Da-ZwWOJ#+BssZT4`0Zxp2~>ubZ(tGy&hzeVxYHZ7 zptqq%2+o*IF=X)xbJlZkzUcwO)K-6+LqvSCLiY6O1=n0hm-Q{uJPZy_f{XZu`A}+^ zGvu{vl%Rs5!FFy&WO^85y>j_VFIC33TD~{EjUhm#!aO`}%C2ZBq5NmdRgJOHgWzS0 z{p7{=!7v8LcZ*btLJBV#{Yej*4{lU`C<;TFHgVbtczCYGIcpoAFi(Cf)&!wnO~=ev zFjkEgMnGPlT}|L`3_c3GWC#xhYAF8(k7IH*#khW$@5W=ZU=J=#WNnyK!BcKs6&526 zvE#Dvam(7M7=1=QpZC*K)M#Q*O4~{t9N$58T*nexY_4Z%NacAAzx?O;Tf==6A94ec zZx_4{Z+20rb_TO)(A7KdG1htPxUT!%e-V|v<0xTd%JAhBBfPEKVXzBNGRpf$00Y~o z0xVy8Io2LsI~zxZ`PTH>Zt<$_y%e2a`C`of@ZUuTx!ePMlve@@L*~_3{rtyx|Imxi z?=W`LpZ${cyNThiei?&1;rWbrGGYvqsBMv%GrVP9-{PHY0C#dy>j)k(Eu(UT@6!`J zIt0CUV)52H$(xlfG6NvNWW)|IpdQ>pml6iXbVgaC7|w#nE6BCZ0>gzyrW>!B3&8F0 zAlfg#7+XCVcym6P=1v9Co&}(GD`ZN13jw(mbg2R;ui%j@&+y6ogkHY=rkD#vPZnU zt0r6kRs{?;sNrMi1Oczcy~tOm@enKkOg#t05b)k`FZI4wK*4e!w;&pvh(5%qX;9EV z>K#*fhkF<|JaMo;WYn|AiD8@zn`ZIPab@>Qo@=y0yPzS|v1@e(8BNe5?ezd>8YFmI zpkc_}2&AETo5~p3TVtr)YG+Zi2KPqWgYK|{o6e{XqKV^>)d<3!tH)RrXTf!81l2bXs-DjL5Fg>resvB~7t==ng@ReF|1< zYzM|&(55siyGAb^h~rHl$^6@lJG;g*Dns@fYIT?ntM-78rT*2>7k%oz7n9+hrh{BR z`QQ1NF3v({c*Z$OqoPBo&fq47HUay+?|t=kIYY)%5vtoAxHr}#O5}smPcI(;aKoxQ z7!{tVtXxw_R9@Ga3pqJ;o6`a%!oZo7gVeJ)z#H{20u8JN;mNaUh8Ajg>E_h4;}G-6 za1@H(lZ<2%4Y@$W+`G@c1{m0g#YHfT&!7@D%`lAa1$V(^q&tUw-q%1^`F#Lm^->t! zMp5T`j?kT+p0PN)BBzrKu#E}I&@F4`(Dd5-j9q3lyd#+nqZu+?TRjdlihez@&c-X= zL&39wi`y*}&_vlN26Ln`Z!)CJ%pBV&CUa?e%Z84en|U0!)>!TDQG{wA?vQ&d?F^Ly zpUY98*BSm^i7VG3*R;cLSdj;Ax zm6t^7dPwH&4Yits(@UxXzz&~(A^Lb34M3GUKh}5^&2Ro@j2^ug6~=T8y6=4X8!>zG zI7)ohLPh1E%~;Rgei&W6>&Bh^nEm|^vu)}AzxqF69QWe}x^5eTSUf$C&Yk0!UyNfh zM7ex=KdyiFe$<#xJpTa0xXDx2HuG83?%j*|-~9y0qH-?`2FN~c0S3Jl2KwwY)}t$g z8J*IyQyB{9?YDTwqiBBWQ?Z$$z{k+zbDxj;@x9m}hdbuqI6jQ+llP)AyNm!p$Sb?o zxE}hK)fIp%+`+ikSi)BtXHa{@-d?QFN72NAfWUF~!J|xHKYUwiAH5Q)0ZD~LC0XN`^ylf`4A89&!T4<16}y`kp~n4Z)zVvVF3GMeYZLvQzztHX-6x*8_* zq=lDwuNEFVt}L=;emV>4A@!Kxx7g0RO#SFs+(EE^aFDuY1$Z)dUqsY^#9N~p)&=Qk z;mjdBnR_l79fU8rwT3^7*-~F>z>Cy_wnk&F(ceG1kDY3zUh*r8cA1++=WBJJQ37;< zD$}M=8C3yd8$s4yPH-4zOXzBpgZt{Js6@3}?lMy!GFjGN%?U5cfBwb|b&!@Bg}|Ps zy=7eRQWJUB@m1$$(XG7B=he@yz4TCrF0OIZFy`tBpXXt1uo^N`j>F?&RQ}+%zxKKc z2ldE6^dt*U#jveN%&>sy(xL_bUuBE0m&|f;WmUFEMY8Bj!M)<$jyY+J zd>e*j5aa0@!#ZXWi}1dlc5XM~dJRD~H()LblVcLC=2is$SuJTng2heznF@G`(ei)| z8PdMZdNX(M*7Z!SwBPI75532NijV;!EcWtx9*-ZN#`J27(UWK}D$nvBR?M4&HH_AW z@ry1+f}CP;G@=TK^cGDUX3b~^-}yS!NMz5Vn$Ad|#Hz{UhJ~zKFkfcJs1~$IWCuO3 zp@&=S3}3C^$^h#uMt5@ZWetBk7gu<}xl^)CC6)|o zNY0-r?9eZJAu~3+Q3+K1yrlgI#Y!`guAZCBQwWb+NemGpZ~%_+WCDXBkkB=|?`32` z4;0F{jxb@@oHPP2u+c-2iPf!+%aF1F$|ytjy=L4pafLAq=i4}#tfI+SZ0cix$ z&f{>AV7Gf&*~=}Wg8?2s|6Ej{+4Tf41ib}cu8|zJMfq(22KF1b2R4hJUft^A9OHEr zt4j-dX3=}`<>($AW4LPohGFvfHGHjNkFM& z$uy3>`D;=AmCr@_mCwidgmpSPiUxIb2JQd=|MW>jK~wrq8fw(vH$@WCz5ZGYjjvA8|(@<-?Px!Ye98 z1U#z417x%onv!Ic)yZYtK6oXX_7J!l#ij<3eD&1~m+!iJ z(R%rnXj}Zo{mh}|e*X#JK`pu(b5D#Ka$Wo0KaE>F`uh24Z19vEl%A2@@~&sID82Mj zG(PSHnZZiIY@}2VIuO;4Zsd!3oi786_xE2 z`obq+aopfe)LFar*;zE7yBF8ywXY_zo-()5NmN>Z;u12G5?P8dV}Hu?@4s*-jsR1F z_A9P;_lZqQ-m!WxNRvlvM$oXXd z1m+|7QP&x{xf~Z^eT|?%*c`b#kpu)uGscogvQ?~M}bq2^8+uC*7*Y*~cn zjmRpmF1_hhzca7RKi560-RHL%g^+%xK-G(bp1Vz2=ubRtWW;&cT@V4zNM4FRH&u5U zPG*3-U|}8h1HGxYdc!!{fUE4of$W$%@MK9-efEN|@KBv=fpHcoOCNxPTiZ-cT8mW0 z@P{d9=wT0hWzx@V&S^y0b)L?a-7D3l3@d8BbobEab!mw4L^!`jE8A61asepx?O}mi0x?Lk$4g*7)&W1KbxdcaABZ|uXqg-0TEUPkku7Eg0rhQe?HlgnVWD(p zuDb)`S^IuXOuy@2g*q#bkt{g1+!) z<`x;YQI9(EaXTHw{OklEi1F zSJLQ$buKDp`}DgmDyT-gP-y1AZEq@wn2oQZ_VRPF|MK(E+#kmK?>&sW|J}ce?$>@j zZYp?3zw(9fpqvN4`#W*^?vogN^II|d(GR2lv5#TwuHxPQ?LXticH+*z`PWhZ?3bd( z*qZlWEaVtrTPlMuzYyyRyw&W)_~$>5-e+En>!}Lk78NmcLJONsvy~e0{3_Ub*-ChDyok zZ%C@Y;9*n^vP?2%c+efCrVRQN9)qglF-`(@{Nmv?i$`yz_lv2( zJ7?~ETtIzeBdzBTQvXe%gN3Iuk6yHeXhZ3ap0ky=TjSheVO^00V^vc zQ07L^5|`i^t4{>P>f$wX&;Xcf64cLF3)jd)!%PRdFF$)PAYU49<=2p&XK3gg<&GqD zO@VqCJoj7k-W4EJePSvtbH&hFZ_)^C_AjaPH@eY~{SwdJcDn1^CBUT@`};aYtPP%f z?$u6@x0aENtayj^HgXamq#=U|q>D7No+Y=lY5?K;IpELge^YP*5O*CZ+ZJIL2cBWz z`CL}MGj{Ecy2*3oou77Khi)*^vlOrZ3Y9T|xm{B2B)7l6pLRj}QZxY4F%#^e5A`UL zE{t(0^-Q&*>(JAgpHBdw4dE*0p#gR)@(sFPTj3a7cXg-pbiPI~SOrLK-Gb3f?Q1Ez zI9NK`zQ>Znn}Qs`KWnS5w1J!N$@fM()bl!Xnnb}#@k!~rjpc{M#`o_XMde@q&ab`B zr;;HJm*&Y?DC`WE*x+C?PfyKBiYlDA-O{oM38m-u0J|?`&i57)Kzu#(EWm=XmHX6X=+AK~(2#Qnc_&n=WROIWZ z76MzmUkH%lHqUbQeC8BD(5Z}+VJhjxyrg^P{-8q|a1^0Ox-Rf?Czp6K=yG>GXlsiK zP&{$yP;4i(xTleO^bO$^Z7QJ?G!_(!(ml3S@8cGMvmI;}ZldtaLm#fTaaZtQWweR{ zzVz@CL-Hy>je~1YZy5rtMep!Ilsnzb-&P*Tjqd3xV_8*INPln6}rVsvOVe= zB|f-w814PT=-pg;)!EWJYKc|joil6=nwJS50Fccyox~Y9fRq4;O5v3^Lt-& zgc7~cYoTaSDkHo-L#FWRM;?ZG^m5HL;v2qoGNy10;d@P{D3BvlR7^6Bn#O1EwHLTQ z2IKC%IQS>uiDhR$4dmI6e!}>B&~+6{j9de7(|P_utbg)f*SHHfZL*LN_h1~{;J%6^Zq-r#S_Mw zCLi^`_3MnQ7t5!1Cq!AZE&>6sFJYLZw?&zSLQ{|Nw@S6k9MA>Zt5G!ekV6d0{L#D7 z`|{UfS8GQdy5Bx|%$?AU@4bx-79n>%KXr$VM{4&0vx6qOa~%`(jcu0I3oAT($Pcy#7$4$lDQ8KW&;C(O%YRdt|XSx-}$O?n{9MteM*Ntsv2sEgY0pw5Onax_{R zaQV<}vPKH6cQS;nA@QpK?dT`gKm%_m-1}=d@~jT{+FY_d3e-QYsoS~Q80=2d1D8BK zZisVM&-C2oC33>L3OWQTT|Ddmk9(QVE ztZYWZ>mChmvo!Thsf1BPraUduIT`hJ=Srr)kTJh?L=62mk(=c;UOu`~Cx!n+7mP4tb;BAK<|Q01fwZA8BvFxU8X*>@++k{u9$ zezWdO`Q6(127M;|a>tAZZ9$*38StEXL_ntF!*pVp^Mq8gzheM!`}B+)`+4SS1o5NY zWu8Ic*&(V^3-FjO_Yn}9ep$wFZ!aD^IK;sJv#-A{iZ?Id4YKVXp1X+f3P!)ZaZz9l z4Lc*zxZ8-|3P$%HUSP17bG<&jkirNU`aRgU;~I;=gq9HD@v}vSajaZ+PoMWxu$+L! za~8lc#)7H%=X=F$Ee6BDLdD$P1m!&*(!RZVotu)WgnAqwxn-=+B)8eb#_f2B+=uqv zPE4B^G1jXQo}$9QVqT-{v^fgZ3Sn+wlzVunSPpUItf6LfCUeG?~0vmha7ap;|z~(-a{8<#BfPNFPSx~oPydxO%8paTf z;~l}Ewu+U`?xS_x3^6|m^b#|F!+N-dZGiFhbTlqc1;pTU z4W)T9;D@xg^O?VyvPh-F{H{hYCZ4E;fHJ*+%5)4%na16Mx<(riV%`(wWiM=n*xuUa zH{n(I%x#Aj&VYC^%5I0zYxPizUNqvxHES_1D$pw!**rsB4nZoHQ;Zz<%@e%f3H{3M zox5p7#IhE%84X#DFA;cX&0I<-xUv!r4UCL5@HUgLjhYOGvBFbT@w5%|!V5+UYVd+| zC}T`YD7og*F@^&W$~{&HeeW*MWL!Lg>j8a?$-VPNIov9?W}E^fqZut|H9olnVBk3o z@L*9!8deLAR1_X^;x;@~OtkDNpm)OSw%Dzr)cbc1Vsm;Ca~QUSQrdY!-(npHzy9?& z`r4OZJ?4IP7UjnuMCb9FvHj8a11XNRDYh3s7F*W&{GAVC{{BU76RR=j^|PnZzJ~{m zp|J_hEyi+kaT@(rKRS4C*Ka?~mcH@$B}Hdf?DpgwbkzEinT07eSZ*J0B;4QW{J@Tg;}j^VsJc&27F-$!pvD7?8Sy> zrm=VLH9u2_%S#v-F5=OwBfL^7F<O7)vvoMc!B=hjv2rgl-!{<-JwXMYk_SL-iA;07qZ45iLSEZ!Lx zi$IFWNT_?=Z^g{6(BTNbqJ(TEwHU+h|4rYt?a?&zXO4QfoiQI5ISWkw4yV$3efF`#F093AQXzXT-Qj9J^<1>sp#$7vRByqvQ<;iduU-RGtyn{Z5D)Z@jYWyqOjrxsRU|diF zh0Y>zjiK*6IuQD$@Ni=z3H!qyw1CMOw;uD&#*m3kDD;+HAZ!Tp8&9rW8Big}cesyW zTHXKQx$aYQ?tlWA%0?9e1@}^0DV*Uk8HV2#(p(efv_Xq!tpRj047Y-b3^~a$dV>nG zw6x;BqhjWH8V12ek()C*=i0f4G^@ikihv|T zcowC3?z$duz+!oV;k=*VBVnP+4m+KYns_%`Gv4GV=tLAZ&){5LA%8 zXO*j{kZ=?u9^3JQJRwe{;+8XyUMwSE-pgh&6$rk#yvVS^kd&QWxZhH>`(d$GVn$Mr07op{&V*;UNWpT-T~ zB0bC|d}TRg^Kvmw)!BQMW%l^0$2Zr&-lq`Q)#}?%rXvzy1%R{K{+0 zYZKkCd^xV)dBTv_ap&`2j?HwIO1C9Ig5KS`&qv%jX3bETfXEtNWpjn$(92R6pg?v{ zpF|B$ZusqQ!IOA$@1J4p7@xFy`>m+G|8|t`--*(vUyEyG>-x=iGk@;+5a~htoX>tb z`Mb(9W_a4OpS%TtfIswZ0pmB1p5<0JI1K*O0AN7Ua>L`S^$wubxPQcRjra+Kka_qJ z=87`FrF8lvwinN0{qT*re)K_B#P8sHoSigU?qlDg!*){)D#}r#8b(Ky6~LG~Ev%Oi zbj{v;D^|}~XJos)e*k#9javUGtXS7W@Sg5gs9_}PS2xkv=w%~IC@*_T_+=>lkhS!n zr48QubUIG2UxTJX(Cd>2vU4bUSi@ktO#Ps7HzI1p)23WE7?n)5@z1WBdV5(v!)J^P z>7eMzdX~NV$q;pJA2Xtdku5^H=JAnNX~9_CJMWkLi5Mcgf+EB7jEiyT`DgJbGJmsP zAlLKUaiOPdbioLsJqv8sV%?Bq*_8}4qZiZoI8J1l-%N)r0F{Q#^E6TSg2U4}T|9ip z%oKQ*@@I~{2)XO2dWFXWV*=AYsP`BuDngIFF#gJ08<}XMH=V1!HZ--?*InCyDcjk0 zV{do>tpJ|zs7@A_aJ>-@_zzGde;Q>#Bc?u(My3e7mj^WEDXi#PK|vORN`IcmrCyQi zr7eRH++Y0*|MF?)sxzbGQH6J1D{Zg< zFq;dnQnFgoEpUAWkDKc`+wS$fM$GrReQ{BSilzvVAc8+rXn7=`v*y9#LFlFTo^$!IxC8~^y5w~N7QPK-Ne~UU3_e2{ zj;}mRALRfO3HqET)W|gv#LL6(i?=UvQjW7?dI37F^~bF}x!XkiLFpG=%Zr;_Vzf|9l+0{A#v^btD-omv3Nt>may!8pS2v zuY1@BFTaSlK8w|p(-=Q|nr%e&=GOo{L#OpF1_dyt;1@B7Bv|;Ie2h4J&x5C=pBKThE3MO9RM=4x%(F2 zP~|7x`|csfONVJPe{8CxJVeHf0N}M=yCoDlS6Q^`&;Kem4}TfeyLY4f>T5ARVZQh7 z$NG(*#l@fhNgRIncjDl``EN1oc;*-9xf9^($w{;i?AuVtTFIs`g5EcN84bo$F_Ojn zd8cqGp(O&y?|4e^v$Zoenz1FWZ2!=F$7yXR^0E3y+~k zF)>eTaK>-Fi|lq2lx&@&(zn8R;&o|caFg>hI~2X$D1){3Oni+y>LiV>1zgv^c%OT; zbDw{vfi5o@2hZAF$Mxwb)+eVCyp~a!Irn0Hv+z!U>)fG(<4f=b&(x4jqcBFG8n1pT z%Iyx898ku5Ri758-JAd>&|`cuWoCD~i#a?$0|;hx2e4~;u{-EsP=@?|4M;XOKS}4o zN^NDt!nTR@;4?@P?BJGTzyj}e+v+!?XBGwP87c*ad@ZDNKYnEAJAr!k+JL9kXGPtV z`bU6X&u7~s#FT|aweBRbs8gQQ>(g+#kH6rDLjPnf56f|gH+VQ4;B6*sMz2{&W~jLU zCORDZhus(q`_Y6>*%XGw$&~80j4reY4{tR+a-AFuT{d+`Lw#SgM&0%xZdcNY(=ZaP+`U#abuf9CfsGFw`YR2&{84vY4_e z0AWn(I%#a4-J(Q|vyP=;-;{{yWPyi~UmX~2Q>q$tx4P7*sWRg~>kask_QMpk#lw2~ z76EF*tS_*YboK{eEILY_*>7Ex+kTY~fr7l{nj2}d-bh-y|K7-qM%{WgxDVPD1%}T* z_gqGcD*yX`{*PYQ%e%gwu?RC1@d7!92@*-dfZROK5y!Z*tT3XT=VlPA@Z4Px2Pcv= zKa&M|yw2R??GMi5@@kUJ3p51j@iO@gm9S_wM36BgfXptDl2|MTufUkd9%Im;x!2Ew z?)5ed;aEM`=6UxIZ5{xlg5W(KHi^IjnGcq^ZS>(5vk3+!7a67#tjXsj?Tnn~%y4X)vItH(67_ty$nT zf3t=H4qixPk02=g8hB}C3IWMso*GLG4zzV1qQ`tzjzxv#ysU1w$iuVwXX&+@MsQ73 z;>pH2KA!KE$F^-JxiSAUogrWw0j?}&+#3jF@M7fy6qWPTh}r=w1&;gf^YduDj2?`v z=aCh!%=e?O8;rU;4Gf{113!A{V|kru_m`uylntO3EtmK zrm^K&R{DDi6;sB`TgK85CeIl@IEJsWOVB~YEvS&IeEu!hnYQo8}6fh=xOD80{r4javU0Jk;fw6SWN?=k?c!n~~asNFq;IIPtrfJT{wPj(t- z2YqxDB>>JAUiIkJk`1BYx$84zEP*0&0B_WggA#JMhITg_l-P^U1%enQ*53Mv*5Lu5 zsU0^{E7Ql(*gs^fqXcPH=pj#}&P5;jORBK>4c;|ij&bOeRBRXnpsc+1THL<7$!#v!_-)(~H_b}Rl)Pt%;VAP48|c!+V| z?-EKZd5YzyO zcrUl|+9}c^*%I_g2MV4rziqV8ds{7TNIgAVXyevHkK`4Y&zHYN+?uz|298fFVv!C(t|)Hm3sCNggH+t$I3!*!8PMq75_ zuB>Jp-~`+vH?%ol6gx)|cqu>{KwsJo zEDTk9Z`LWUX3Mykcx0-`&#hus`RgUJkSHF}qF$RA!wyki1L^Z$aj-t)R%lTEDqo9N zWHxibf&r@=$0NJ*iN+DQ4&VplZf7wKsD&&piYMN=pFZ1-@<`F>XgtRz;c-HD7fb}-SOUo=S z;i@WQFG_;ay~7K$z-09=q2#QZcD=d-dA58sV$KRSo|tO`7VakW^k zgkrFn%674c?TT+}ALb|q=wuDY`0+Emx-m)-ps~+b&|~IF8;q;o0A#`A8U!o?&xJk$ zCHS?{vG9?xqJ%{BYrS~J+Ia4hQG59l1z6$&75@z&=$3Ewi=I@o13+4`MtpX9dK#zi zy&K~zJm@wEy;IAy1!T-nXmfurftBa`3nHq3i3*^#1Rs@9$gRD8Y%pNXt-`w1A;|Fy z&&AbW|4mfhe;9R@-t;OqfA=@h{Ke1X=G}MTopl_1{cBOdu&v(yd2~PXTGZSYgaWCW zPj?=N#~9RJR57^KgM(Cd8ILNU(cRQh}*+%HUMx_IG!ipf8jWmZ~X{>g-S-Rs5oo)4r27= z61%$x|8$^>^T)%qKo?nT3vQvk={60n5eoI`5FFz*@xa-{D(ViG6aPtQkOvM zB$3Vm^DzCX-IcMj2FDZ|##XP_5W8P~O8(V|XV+2gAt2YoQ_k(3QQXV$y{|_skey7K zLhChpP2;G8HxWa{N@%XY+g8%i`4{u z>iG`pasWOyK%;GB&aoNARG%@U|8g&NBLw9$KDXGBHL!;R_ckq~Lu4(P&+G$m)tx@e zTM3Y)gGIS8TRPaTlhc=fI`F(Hh3VX=XAxDL6*vr`fF16&AXB=_~6Mj&Jl(>Iu3Ag7O&rfOST+9cTA>W> zRU9jT!0nSqvB8iJ@9sy8F*qpK($OI_Z^q`&|1@s?em|;v z-K?2#+-{lMq6>_I$gn%S)RMI*D8d~^4aRfdY=Q{mZpWVjDE0VUzn39)<;sRO8QpRF z;&XRnb7wDVzxzEd#^XjWu(VOSEyd#T!>E4gS7Z0jek(RrtCQ!k{>HDxyfnqIuvsw~|WXOWwYb(#Q_+ego$3UFAz^A1)34-r1W8XybsQv=z+!Rij+C`0_p+Tl@@ z^yrW7M6-Lu`m#3CC)XX|25)sS66+gZur8?!pl`(-VD#+CvyAHGzHJn;%AT91f>&OZ z?L2@AW2{{0@mjm!waTM9SMNRhklbezS`AD)}M2#=%O(nfH>H6XS)r~V@&Cg7;XWPBA@msil1 zIWcF83N`TR6HkOQd)p z?j7UwbI=4bF*l^0*A{>|J@-tXxPkcaOO}fzEk9 zhF|07ytAML+A4T9bg+=c4uVE91Qiycxc?Grq%bQnuA%uK6@gGO27n!9dePYU1gl z*xh&Qv$6#&Ym!Zc;9LLRVm!vro@FSmy>HsUZ6yf1VQ+ZRGvW_kd=Rbu0g42np5Qf0 z1;C5SBAYlhQ1k*O)?LJ{7mF6jD)ty)7%*V-w|dx?q?_y$Uj;mAAVv?L#_Z|}qtVgy=JBQ$4cV#BZFCx1>0-25jZ-XP8DWJsR!`^@YZ{xgz!TjXs;{Lz-?^*MR+0!R6gErPe zBpBrR<^3UZ<*BoIl%BsEyJG+upu-5B5i+aJmF3b9fS_!<@Qhhcz4Fq7D1QOZz0KMi z>W3CEEf$*fH%!b}$^gK?$d7*cPAtx^P@IhS@ky*MMsa;LN~3!VkS`tXCGVjaqP~B? zY9bFtW1x$-^fU9P7QA5~0eK7L`LaGkhTe-U0Ln;Tb1{j{|MEXaV?IHLp{J0U+)1I9 zW=uAsvW8{LvrCLA`INcWs+-un{WfdQz4wZ4g@WGg0^abXO5{|KkOfT8#lw2eAAbO^ znHyFY@j3KDZ8?jobpmi>77N2ebHLZ-=_%}_F@}%fRkwv1{&gGd3Ma(;s^gX4O64_j zWqnQ`17vimFo0Sq0iJTKr)VX<^+n2w`|H&iij9txJwl9@7VT5H9^K^oY7S+-Fj{)R zI4?ZYNTe?cO1*d6e12Q~kIq3qqxo3_^WTQyF~G>t-e7>S^r+~Dvt_aJ?tI24D%X+kD#6e6*^D3br_lAC7jV_?Cn`R z=ooE9J;_$WMmlvuw(yRQ6_1fy8Ku+Xp8V9n%X=Di!|H;@Ttn8usJL{>IyUBQy^H$Y zh^@S?&D6otA80+}2CM5$wfC6Y5pt^zm#&rXed|-NXLTc^QaNYm zI&~5Q(co*~&d$eid0|cp&jN&EV-JnC1qa#bjqBV8e||QODd<0Yv#a#D&fIivYlCp! z-_UecUZW)PcP6HiaRFR-i#WuxQw-i@f&ie%&4U=y>B4YFdv1wq!%!e?m<<+{t+LWv zBlqOV84JK-t}u!)fsOw?-gb--o3GAoTo6M8Z-u>~=gc+byfE%uBlG*WgfEV*wHm%C z7(=d7`5Jy7b{lc%VJq!+E|ie~504q{fe>MQBpzi@#(0A|&o5HJsN~%{-p9a8=+Wp3 z+D)K$v3TA?FWROS=4&-p)+`jK!U--`tqEY^+nXR$3PDRQ=s*L%5shJ0+dD^shu z6nM`Mwysfy?PN6~qCbs?DGR*?(=gu8iv7W0kJo&bsM!3Q^pZw+?g}q768z|Cb4P^C ze%Lc;Vwl?f`mC|z23=>hJTx{EBLfQN(tx5{4gnlp<|%;o}+IG7} zp80Lh^$pj14CvO-9?X6_!3gtQL8^Vl&5K+3Wo<|A@Hj3$c*>L!3Q)UYc0I18A$+FS zMq%qgq}Rz;=93Zp+IZgAfRn``tC&RiWK^M=~gKgk-itvpz zXpeJSB%otZO`zEk&xSOrk1fXM+D0!Z4vis;0L@TFykF|W>lpWq^M$ucS#treVUWrI z9-F|}Cq57SfWCPW9OJ!Vtg98T{pvrAt=sfaZfiiVofhqHU;*QtXF?V{ePjXN$SrXy zQKJfc>jir{ogRJzaKD>PW6JpZ|Mq`~*$@BU(fsKj2RJrrbLet*ff7eIsD}UnMhiT& z=4Jw4GMDCXA5t+7fT)GC_M&IyqGVJAr@(6e?87fB@VV6-{=WNs)Sd&#AVaq!^ zK~{4!$>nOt8mR-guX=#-4d6HRrKVb)olYP$Spc}Ki4t_a^upbc1q_+Hh1Y8}n1;$N z>=TfyL)u)V17g(B`54mIe%NEh`R2)-u$m!s^;(|cA-LpqbqW{xAG*sUT5Z{@UakO= znIk{d4d(HSXaGFJKfPeT`3)N+z%g}U6?)`8aU-^w59h(s$>MIi2pZ;gEiEXQ7IsJN zqUZdYp%<^N+|H=?tWMkDi1Itnk;c|5sYgv)8mV?K|G+Jo@bt_mF+T-J0s!Y=Cqw}w zf5JQ7lX3cN_(i>*gV!Q5XnTM-&hdDh>#HntyXaUK#KS!UMZom(%O6V{GrBsB%I|#p zi?6G=H71P@iR|Ay(1u2SG`^0DbCemA7S$CC=$_1OZgp~TO!n&~8zQlXu1uCWI0owN zG_w9~FpA!MfbmIxg{o*2X-ifvVL_L^g2lHC^H$qWp z++}#b41XyA9Tb?6jtnvJE&U8viaIUY@dPXug!cp%hOaSVj6+&TLyfd!H|O3%DR%)B zf`d5b=;wWC0jhM}yz*yS1uSOgm?dliM= zip`N6i;a71!>R{RRHK4W6#&Ms=5)P{6-@z$t($E;fT8UbgM^Q zZw>Hp?UvPy6$;*MTd0G4Uqh06m@zmusA(|1$)jhn8cm{eu#X-tc1-k-En>rSKsc}A zHB(E?He+G__7Simy9V9Lu4CuJkT!EPDo|2L@#r?EPokc(pi;$i;=U>qBT#CP72f{M ze4b%=xAye_f^(0Q1WXJ-*v%nE8M-V+c+5fs=FvRZ%TV=diV}k7GbEa!xa;VC?B%H8 zMcf+F20%0i{mg;4RBi~oXRTLR*XCzFADFWAG7kUXUo)S29R1F3NBisF#PT)Mn;mkm z)C|x|egw@$m1k7iyw7}h(92WQOaplGTGpii8nR6=%p!PDS>BwDGWRT?*)8DHr_U0A zu+WT|bpRN;*Il4d{le$$DiXD$=c0m#U0F?&UpzUk;?^_xz9FEwf_I=_xsTC@G8^j= z0F^8+>ww`mf2^EhsJ4KtHGC?#>fF?#dvy(1V7yi3g>{2}1-bI(&Mdx@!r+FdUaX#4&M*=_oHis4;_5T znj8HSq`O~QL#{s3L)XyMSz`g?4g6G`1)(=v0QO9SX&@L@5p@)dnQlbC>UF|nYxMIB z`Un_JXF4uV!n9XL>IlBA6+#68(pKn)1^3^@9>DDI0ONwLw@O|e>ef8kt_g)#EQ5iAL%%^^W}B&>4Vkrm@{t zZT|@YYeuC_|3a61E^{s-5)hj?8cr8TWM@Qlx7&*|OwT{{g@ulSlL}y<#{G4i?J4h= zyq!AE2)dvlTbi>bIzuD)L7-?9!yX%Xt_EX21N3)#jd*bPC~16d8)Tb>zM7V{jnnncEEVIXXq>Q)nr zBs@IE6b*mI)y(@TfS`89SAYm30!7$EkmuGL^b21wigg3LW)l}HwPg(RVc8Li1vRv` z*Ri1wgu>9RQH)Cj-0+qL)%Wd=2b;nS^Ef|832s1aGAn}F>7Y1DwOG!lAa9Hr&o*iT zsaR~8%Y}zN7P!Go9(JNG79+VuX3jvp){wCg3l%_7kuF-tz?2%yqgY3QOS@$=i~;Mk zjfuxdVx$;A_BJ>4n|We{M{mvN8oIx{I8Rz+jR5!b0E^iKPXjvc^duE{gv4!S8A)I) zDhi8Y((^YA3Bd3)H5s!AF>OBd-a(z*J-69GQ`hKvv5DRqbuK7_$#*P(V-rjkMJdov zU{-0x^$f(1f}dZG`5?4V>#*2m?(I>a9U%bmYHKTj9pNejQVA!=z;@s*H zp^O_Hl_mcHDm*TDHNlfpV&PLgP`}*I>}y#`4$x^}Xxm44zpR~M*&B?QN1{qcjrH~T zDx0gQNGdS!7J6Jhd=?F4Mx$w?9=F?}CE!bxEeg{EW%_v3Dh=;zq$CeLfo?ZYOrG4p zGucC5jfYIxKfW=$ikoLovPEX|a4)u?`P%2c6wQ0j$L8QplQ`q`eg9|C2J{9%M_PH1iv^Nfcq&sJdY~A+KyLL1 zU4!!Raa51@b7z8~Tdpj&$N@fWkTv+=ar9>J%kBZR02pAHD))H4vLJxn&3F#_hHGnw z{pi2)d>ZB3b3ob=qudu@p?%N~J3QI##Z|=F<5-`b zz5Fu3f%oxt&8@3j)HQ7cwvK$)45gzFjb7aNjDodN0dD~T+YR$UXBC~-R07Oer6*`b zel>yu{A^mavOWg>^?`=A(E{W#KBJ3XZ=VqLkB5|4SO@oTo$nQ1wRA6ftSeV| zwHkKxsM~`xJ;3XFmg-h>-vSDCi$}HUdL0T8WHHxi-;uO9N- zu)a>nd~t=uw6~24nZ`T3~bhfvJH&otM zm8&bgr}aUC1>Rx&5E{d&$G?~(OU!XLa|;)o+a?9 zFh5U()ZS<_lB!(8Ix;`Og*r+Z*5Ib2%InhALP4WcnRZ35SZJ7r-#IbXOw+*cf@d+I zlQQycdP3(l4-Z2o(t(9mA4P}qP#WZsWrL5b%okAKWYf7Vyng5SE)-wK7~^hHYUN*i z?<=pHTQHn#7$C!q7>jd{d>z@y#sVRYvEg-)C4yaSvEarqk0BnbDV?aNsCf)x_BrSK zqwN41cs*1PTu85u!Ao2V_f-BOGSpfuxX;{h7z=EWC7Bl{Ki+S~y~9rIc{Vx=m<+=E z8wfW=A$t&uC?|G}Siz(WA3{`P%4?VPxlj;aBq!5&Z4<(d zE=GBs0Kmm*H%qa%B}vLQkmT>y>X2IF1a^ERHIJcC)UqleNo>L4%?v2khk zUY9N^tjr;^*edr7%d@~sK#L(55w-g`&FdKKht5y6)GUY)BsMJu=M2jEvm3sjM3=}sQ^BIQc@Z&E-N5;l;Dov4i z#Pa+qL-w8fN6{E!wB#9Rv*URTJ{tffkHPn4<^dxXYbuE1b8hL{AW&IwqpD$!3>V`G z;NtftYpvjF_#I=zf;`sUwyw8nAfcl`kmt=OqnM6I(Z7EeBj1SAcOJ&_Vgfy9F}Qb( z*A6e*6;VOvX_t1DOK(~lK?m0bn(h6{uSDtKFfRV|KSc?zvV8I|M*rd8$Nc9%j_W`9 z!*<#O$Ex-G5 zX!)7flJ;DY_pBY@iQlkR?|i`T09nSpxfr8DnLfN#5r{GW1)#SCUv8P7RqRGVc8tp~ zdRF(d{@JIWwXn!>4M?<`q1({p@2td!cPj_O*y8Dyl<@>)Muad%n|5Tg9McHMXBqGb za^NSjspr3%LPO*XV_{3;GHbCKq0|?*37T&3>K0s$ae{T^K|^e-SiNq6f%;P8mO4PWKtJbpTHbd(_2xT_-*f2K zALc}>8KL8MQkUN29wwumqmL2N`ada1;3%F5lb=-x4 zE)c+Kh_WaY9cougk%y^J`#Y#>1s*zjjst~&p~Apx+7$dx7W34p z>S!am6L>)5Zsb#TQ=daso{1-pKHlK88J3rh3^m845iZV;@ONt%>Wm*gWezKJfpfPh zkpMe;+8Zfn%tn>-z&Gw=9g~IP396WbdSBkKlWCsdr;IWd`9Ezk#$K7Q*39d)HC62F z3>`I@Otf8i1Zk|Dgq!deaelgp(MUsUI0THtAni52f6$ALm98K%#+nAfu!+}n4evsX zPN$CnC>TO!$YL=TU4&`1xlIUi-@S{LpDmQR3QYs=LK;5Su-R?w_@Ewlk6O_i@>~U6 zYBHfM#GS+7)9Et9))Bzsors9{b{I%rbBl^!hM8@?Vo{*^sV#z?xhcFh38<&6f|5}U zmy2dEFc9YOkwb*zCffB)+!+*==oWWjrZYEeRjCCp=BgAby?*a?b~-}AuVb&@MJV~k zka(84jDNGRzQGWAlg0Hjdg~ToD8&4}G%8^?J3tmFY=r?AHA|?%uv+{9ePKj%_J;mb zDWk|#`W_%NM39GgR7MTlR^sVV=G=2T7vE3;V<$pwW?_aQ^sG+hSys@?Om5-Hyp{@{ z>pksH;d1{sS|w^N=U|$^)BwPV^>%&&DZzjZYy=g#7Z?vl2G{F1g5lavP|Su2kKu`q z!p6Foce8{RD$&0@e2e0pW5BbwI`kDRSpjZH z+>_>1cpg%;|B08QiD#d5wBYS>7LzBZsqBqv*v8Vxjf#7tN3ocpt(hO+AEg0`_Np42 zM%1BoR`QNO&M4&@uis!45y*^E-56?zPX@i{-aACG&w2JbhWvj2GoOmVOE1M5{<&{8 zdAk~oI|orl=IX_c3a>aZy6dx^9BmI zhrxgG_y1LNzx`Wr`0d{YK=fnp!Hdy5yqEdlXW#$pxE^5yP2Vslm5sgO8FU&;tJjq^ zbKx4$3ViB!Aa^A@=mF-IZasvD?d;|L^u|lCMEm2P!YHm{`IEPkhFj*fKo-+D%%)Lh z?l_rogzqtp$V36b? z3twAb{9NoXiWSDN0Tiz>vK!Mg7{vMzU}67#J?=6Fn`hVElMls=FoE@>g@e&pmNI|u znf7WfBT5=pX>2cYnO0}WNazxMpTQ}w7UWhd*cI;Sny6De3DnTH-6nxQ&}bPobXD#Kz-h?C zRroudL!CqbZR((P8As54`iAS? zSN+v&3w&^%F!nk|>Y1h6Y?->=ynPCm7JI^fMe`Yfgef8;)!RHM?b`6L@((c6xI!8J z)Si6!yHrKz*z&zn0c@h<)B!#r|MQ;6!bhoVU3h7~=5LL@e|xVTGH>yom*^0UYK%(fX#R@SUFaGsTa`Fme~T_mFtwt33w=_;O{;X&|w5}KP>72Y4T zKtC|>4uryF4dtpRJt0dJVTE=buwdxLK`0;&*nyi-x{Wvpcw4R z10R2=*imqx(FgZ?asQ~D2GYhH&a_s;qk=%TiOds~Gz>{n*akzh-c(}3xF&k5C{)89 z_Lt9NQ~7*)5`g$|`*f=HbxxhxO>#u!4JCSZKvq8z=~? z%Ui83^8<-NtIMk^JhNHStdAFMa|BV96`r|2^+SZmFVNB&hGlxdMi4ZR0vC^Kve7_jey*v(e1we594W+RNxL51ky5<(r440`9R94)30|=5{m7YaW@Hq6^VBj~b zwHx-D7y^jMJ@90V6mVbja-*jw7@Jl$+c5uVLisR4`ps_?&&1bYrtTz}+1Bck!~Gn466OuzEQ82s1&UG%^G9q90XgdRt6 z^Atewo!^L61>^hhS=2i4C}YEV0V;SNK-zx!K3m>h0`RSAP_gQjTMJWeBI7TAGO7SW z7CE-hos(Z2bH<}iZ_#e7Dl^9oM8ZGfO_k5`h92xJ=F!L02jY>5X=p0 zaEc;iG&THh3UA5$TIinvIz(BuF_=L4%qR~uQC?K?LwHs_W=m(~M{nDrD|MP0qGWo~ z)Ct!Ms+BFl^a7bz|7wKw;4LWAIj}S7;a*WdX+GNZ)}dp;Qa=pPZH`kNjQWX&UMM=- zT7d=Q_Ar<{t*!|m;W+~Gk0O0WY|^kpL%V)jIOx}Qhl7VKxdp7%!xQD6)r$Qulr_`1 ztbN)C4IX3XIqK&w9=uMZsX?8d+@^~j_wXI{s0P*af@^5~mkx+yg2lPlO**yi9TybM zXJc#=h7lcUR6zNXt~!w($7?jz7ToH;qKhAfTivU^Vq6{srhYXd%P%SCuBEj&(oPTG zU-a00p1PzCPi9j!{&swY18tGyin#*{r4zb3dzN6cH0}Z$^{I`Hl3&vn8$qd|_i?aS z8yrJyooHyzgqEPvle1--K6GFpJ47G;m9om7f?G#$^6j!)EB+ z5Wd~;CU&L*v52w+g-$jPK!K>)N{@x`G_zXuHl_^JP?KSEbD%~OcvQUnd^=va+XXEm zR1LR7fh5j{3H3X|D%A2sDBg0`ZzeNd#iWz1C0hw3^7uBw==ZSC< zrKwC{Ib`csr!2xJ?6>$3>$k0kBeb^l!$h$)8Ba7=%I zksU(2{y|jgR*S+&yzXHK=A#NAQS4`Q3z*104_QFaZ46W7ZVKR`aTi4^WQOhChA7BN zR0>TEGYA+OjZ)k>(71!H+YJ`ou$ZBtB6J4paUIulEsh6&?=ruBaS1AAU9_!+!I^zN zA6ZIf5-4a0vg01!hxxKTMl0(tSU;;iJxB;Wz*vh;e*!=|-x+rF#+laO+0!}no33I6 z0CfC!07gAPW7E_NYi}5AgFw11lH0HrtZ@~9hM}<$k4Cx-pQIu~zTf8T|waqKf#qMyJJicZgdhd1%v=@99!1Y=gxmi5Di24h!zz5L! zN572HQ+FBguq!|qbgUd7MC&`>MdqQSgp-EPH1vnLKixxYEIKk$X1Lxhg0}#MTX;tU zDPQ8<i{7rDzmF&A))hj(_+661|sxjrWh?(Csmty_Sv)Dd5iB0bys$c(Z#Nmsn!|b=fumE&`m_a}Fh+xkG z$_9F{j?Q2yqRwkOJa;{K0k{QI=6Ego65OUifX=p=)|jh%(m3(ijApXV);SqA=Cbq- z;d=fxvxM(n`u5sZ(J%z73YW{GkG45yD?jaI=ldQLJHmsrWg;};J}7pG zX?R2=m#9P!#w{hO#<$7n=M#`C*XKS)uJ55BpS#z}(0l_$V?Sv5Q^rn`p&<3JuRyR8 z%PQX43`U(tOmB8^IT0+}BsggU4jgCE)=&o45Jq~xF1BG6_l|l*tlR8Fcz$(S7%7O~ zZ`a~zua!ncbZY4E63>aZpi4i;r=rpL_4oGT^z0;_J^6sYt-$X9PC5vDAMhk78nZCE z3Wed4X>E$5(U#sCCXLed#(a}uXNA%{56r-O&RvG^8~|%1>})3>k&zO`vzo{Iy{zn) zK|O8i67MQ==Uijx%#f9uN9qf(7V{~_b_PS3Mlo-XH^=kz5adsV!Tg?8z%rB|PorfR zP3E%bj^zNS3u51M#w(x?j*RV5Sv&&}{Fu;gg zJ$+A)rv2c#sCQjMZh=76`Fk^)W4PyGQyD#;Er$PQGS5_i3IhpOr@@!4^pyxMH4L0r^0;=FLtlh>fc5dstdk4b@!bV;^r=V29NI_!@ z?JR)YFkaWDdjDSRKJ`j$pkIZtlmO|Ozh~{`+3C9vp=~ef_wL5#@oB7|oh2`@NbpiM zt^r!X>kW9K@!&8k>^JC&^!i;#=_nt{DEu^^YcZRPUy70r=P7|Ffu{K8=pi1eK0a0mh;XHtra1uf*(h3Jvz6`Po-^ zEOIlN$Mm)wgFpD^$mR;*avI&|UX%sm`uz`LHy*{!)5pjLidh4~uq;H~mT))#zzdAx z2Yjv$U^x)DAh)KiF0PDNs{D&eY*U&x-s%dpn6nOw9;u$&;SuA(i#M0;E)@0b8Ea|r zR=-n^KKE9qWzTh#UE5Vh#FS zkDgGbR7LJK(p&X}P;=v1&CZ&b7EpJ3@YTh|dCUP|Mj_Ng9_qwp%m+ONUwDkE!sGRN zsOH2igwz4lb65YFj_}KO zo0^<;fzI4A=jsjub$9LzfacO_=%Jv^2}uA^q(Wmsn5fH&-weSpS3~&bi$%NwF!NM0zyPCgci+^5IUxjv@n6_w zjfJoP&5*msz2VLP82Sp&{o(uXu&5wlXv6JM)LrIhzRY8Hi@bk&<0t@;uG>OX=8nzc zKga5B1eqh17tC4ZdP7@=a0DqLZ<#@y=yEse$@Y1 z4n$=#HVhIUo?uu-w|d~Ba-$gpq{b0Bk&-I$I2Qq7D$W<`VBSTd(IO}-J{rh`(xr0Fp zr=VwtM_FS1tY)|Oa2e&Oq4zb&h4LQe-uUibKW68nu&ry`f-Y`rc-$z4+vBQs-h1&r z_hUSIU@Tbu-u<{8dxDt)>bw!Syg!D^1?8uPN$+8W(KjMsJ~j8=GUgq0&U`(e$?!a$ z6rW13!p>SAYNH%A_6K=h`{?Xh^zI(T{!1?;pJ>n~=NGZI*bC#>8ZsBgR)sFp$B(0Q zGtW6LE=Q6uqdJ?>1w4#F=Bsw+xCm!oUSRN0ykGpaj0n{6P@6A(tU&%dJTZ*O)jRLR z$zT6%^q+q|Ti}{*sbD0t7!mnV5!rl(Kk&RYwuZ{_;@@~Xd9aR`dt)^`yjw@^Lc;;$ zI9F>2ZUEYbU&}ZMRe&%I3ca8Ybf_L4MRg4yx*lBX`TZ8HZSf{8VpOgXDtMQ55Xi#k z1z$2Il^JuhQeR-lK$G^#v#eVKKr{c#{}k&V{AF~O0Q0@WSpE3#qs2X207_G6TN~iW zZ?at(@3YA-b$F<@jt0h}I%M8%_v%<7UXRnK(`bL=Ydi{LfNb?X`C_u`?9I1h{fl2j zm36M*!REHjWMkGc=dq4CQ@KQccnoo?*U0A@wR7*fbJEDTJu91(^jf*4RlQ*QaPQS; zZI137Fh{q=>76g*d~}s-VExOWH%Ohd6ku|VDO3${MkVEWXk*(_i_1Ju!t_|CG`P7A z%?f%Z`99RasZ06*hXLb2aFsd1f}X$E>mdrjjM5E|D;*u{o77n+C#M;?v$@OO-muU! z7z}lRK*lX`>KeCO>Zum@#~FbyjDlF7rGsHO9SP!|MS4Kv?{>RxZ!hWRwa!=ByLa!1 zxixsL@aCPTbu@l?Ou00%rpnVe1>`ulI@)ToNy zw6>;Fai@UMR_5fajXE;NETB`rvli)qVH@n=sjc%!TzRKB$HeX8zOJ1Vh&1xKsuxx- z^{Bu)b+)#QXLq}W4qwb~quuML9(EqW0k1Jt&L{Jp7Odym!Yig?DqsKdE3c32)x5)y zp^Wuj4+a?cVK2Q~KWzVBL&Jt?GO=$+9zt3fFXEVtml%{0N(>~AG1aD)n2ZOTd{mX_ zK;$?XdAY<$Kr56w5@Bqatva#?b8Aq?osap}b42vMz{?!D{S3vM42E#-A!zr8ZN^>! zjpC`n{3AoL2*VJ=q-R+FF!Zl9+v77xQRMGg(-OGKs+m~#`68!Gd6pQ%79WB>r}+38wXXUL}n$ia|KXZdVYDE9{R zK!24+!GzqG`54|aq*UkgqF#-J!o@?1>rgIPxyvtBHk%gd4~!0g-Wj8zf9ov9coQB9 zVKm3jYpC?xeU4iO&9&(%*yea_?v(lL_vqtz-|(H4$}l2^6(b4%4p5*iJrdJ2DE=_`Z`}J=?mrnFqHzO#vCtXi(qJhEOPA_8l@VzvodZtF8 zyq7yVZgF6)S?|hz6WxVjhu6+8rtqNhY*?4;F-{)HwFd~$J4OPMH#~y4hs?Q!SfN+f zdd|lcpcu)=kx}l@bBs~BG0kR!n4~W}WdW$VL%H;*X^phtsnAc5%Csor1f1q~=!cA` z`}X?)Bfyy{S2!ZY$wpg(z`Z^6405Cwd9Z&7sK*8{kE_e`I6ptnYjpnF-2t*+07X*= z>Slqzd3%fVQvO&&cT89i<~V&_-8h*{`K%EHK@xM~J@tqc`;OK~ppIO((~sk$!w+?q zIut#hDP#H9h%!K!vAM%4x59Bp&q%lFj*gEsa2uhYGh{zUKUo8lBE@rzO#8Y9K3k?^ zW0FUB-YuVVH;?kUW(_5hI?6d}WDTpU6XgYKob>bc$~E3@OZOQb%3})C^bJImo%x*3 zA^HHFCG7^R|31!$+s0Sd=v4SFQzG2g`w1qjH9#eyMhal#t}^rYEO}IZ?Tepy-G?># zU+-rl(tUmT!a;r&~#R9RYDgeD=7gZ12oRNdHutF5(B5q;m^o*H=M%D_%^(~YH zhTDAKJdH&RqCz-ID-wMa*vrq~#dCLmydJS$e=CV1K@Fulh8bSL+{M*U0}{X%kNGf6hmz+m3bD+lP-kI^AAp{Q zfh?fR8U^NW;biUra6L0Lr69O0dd#KFDVIUGw$|-|9v%ODHp39!#tdMm!Z4rAaJju6 z2^xdvD9GM}DP%ZO(4k-qyWKd1X$L&JKLhvx1{f{hqAunI9WulXk9FF5?nYQZs|-p{ zuOe8%i^(D}uJa_cM6J*t+ltY1eW4xq@=NcoIOcd8(-YQTC4-mL#%M9I438Q1EaR+s+*ReLBHbR`iLqnGxV2yVc&xdwf^p2;w#IzWXPPPi6+|5&bbYxG z3c}v;0@L)=FD_8t%n#$H;<k%(Gb|mqGwj2Y|{e^0rFNLs{%(<~nKcwZZmE zkYCmawM~lZgJH~{K8^W1Z$=Hz`Ro_(#N@|6i{2}rh~t0o4`cr`pUiD_p5#^oMCtY2 zoS~eNZBHcg-k@C!Am9udp8oXb(LC74GjGS{_yF&H5Szy*QA26ki7-Kyo*l}z^z1Au zS)%~m@s`ca-%L>8OsIbBP6HJ5qmz^cyT#qYOVwZbj4|N20f3Yr+>g~`PpHGo?)DO7 zW%D5E%+I6^^h_^J8G+U%_}6t_qX)_uB9Ef=to@x1NNE#|t1;u@bC&I_;2U_p+~hOR z7KV#JXKin}Usl6ITftMc8>P-L);J{V!+X*D+SgN8RRN~sN0*ErrT^}`Q3k-&&=XIQY+Zk00?rcu;L8bJ`yc)a!L=~)iaT(HL7)~h4hp>>WPA8f+rm!=cIx5O!+Lt=H7^{8Jz?| zSucPNu#UyzI#sLRyn&uTCg6K_k|C7HyOB=#QfA;w1Frqh!P7X)FTR&OP+TYYmb=A5 zfT!LCY<5h$0$66qL^@8|8Fdlkcb?iH3t=txc6Y@Xed~P%6I_QfMG(vb_Xf4xCuu?3 zYJtKs&L+mZx%(aGPfdAa)@l)%&+iTWt@hTvAA7f$@dCowX2XJ_VluNreSjh zCJha1=QbL{S)yl+oZ&2q-+`G8W6hU%kh~^Z>)2M%tzRXDMaI*(Pg8-qO~p=p0w2TG z3eOG5b2h(BCN0ia2JyjQ1=B!u?r)(n?4VlQEwBI98sc{+Z9k?(^S>tBcFH`QU@h!*1cb#-PWXQ9Kw}r{h629yQxLK88Gi zp_6Bfd5N&ghdiHop1%7|o+0l(^r0u?0X~-SxQgzheBp%8sa-5wGc{%;jo~q#S=_o; z8ya0R#}#W>!N~TWdoJo9dnJbd^mn3<=YI3!@5k*=|27U^d=Pc`Z~Tj2#u#I6$G)vm z0|2wlT)Hp56s5sFSf)s`c;JmW(p=h8Jk-l^Hm|X3-u9QzqR|>85ZggdjcFC{Eq?T4 zbrWNSe%Jh>f@5yYyT@@m7wp0>hr9(=`R))r1Mi`?JV>b0+T-_L+>FtAI20aaRO-MVI3YEFv|kqfRt1-{OXQh#IK9O1 zp_mhNFzrI|q&Mk_VZg9V^|)2lf?^FHqlGqls9QWyiw}wC%Ibq*D(I?wpr~t=VX& zPD*3Q*jE}r^mrBs!gHn$tvQmAI$fy)_-9?ijYbijEC|*B>sZ)KOe1`QY&$m7j7Cc> zG%V7Z+)8Dna$yeci`TGjx5$yTS%PNnF`1sDSLW#`YM_fQrUi}F$*x_MdpLc^Xi7jp zV{MK`T>a5h$BlHVU3chC_QD7Y)8e zdOB(yfMSbzXS-tLMu*usspmDA#aLJy^pJ){djtdnDey;!2j`J{5W_7NhAXV*&i=R( z86|3NxeO!Z1bLvvA}*8)P35jvYvF|fbrGc@MiEXH>2VLM^NX;&NF2n`uExFNS{xj@ zjfh3$zR9o(HH_lK4c|-v5q2XIof(#PPqq8Sm#mqKn*~Eg4SaSkMiKUqIrNr1BiEuo zBMMD~*aif$X&%@=Gr151f+`hVWh%te1@>{fEYZL z{KDH6jI_GY3Gii_W`7@rX2gPL&rwj*NpTi&w#9|Mj>$PYPldvU7aCR%3cAF=L!$Kl zJy+U&wM`AUdx=pV*)U0uhev1Vk2Mly>rEQ|vPeyX&cjjc0s;*-KuK>H<)pX1(HJN+ zdb9|DMxy9=he1I=4Zmw_x2$n{*iEBtN`PS{pVTY}Vy@=eHyHEmI_PVjG{!Xzez$OP zar!jIZ~Y{WzVgM``}Ai481ty^F;+ZV*QkxRoDm$qv^wy^hH=?iwlU~N_x^Kn_{uBM z#pu`ER@w0wTKHXlRbH4|4W3Z2Hz*);{A;5!7*&;*g>`$!cQbFg5L7<@e9ob@Z&-gn z8pC1o+xqdt*j`>mQ^47H>E*iJEwMo0GW>H*{%?i2HIT)q8n)IRYVfE)#l;VXaf3o++j@#1r_g*SG3 z^Q_PE$!XjIR6HuyJo?UXI-oE+A)-GljMc!h@T>#CpoT#JAT;kD@az>lhpyWKLw<290BA8kEt>pMO;~c2!Fz)t4G#oFkhbQ8rA?5vnLNzsjK6RXkIUG zQ?@m1noIfCLN9^3p?AGtu1?w4`RR0vTx}YBUe6dFzo?h8sTlI91F!&)sVg+DJ&Snl zqz9eqOIOGqLNz~H}Ez((8#X4 zzU%d93xvUJU}6|Rqdtd8bN{ky!k`V8 zd+>ptXc01TN>=Rg3yff%wg_qK{WAd+w+c^I6@`gPG|$ZKF(~FGEHto9qYB4@3sKYp!X!vx zoWAB^DwrQ$HxCN~%Tj!%2iUNJ)v&TmcH@FFiLQlUC}&%QrZItWJsidkZ04{e*O9fT2Jb23zr*6iyaNaM0^}aEr{Vw{E2*MvJkD2o(q!accwxr0M-d>l%NH zS46rS=GKMQeGs|rRYzCub64mwhyWA4xdk*5-Mx1z18A1Wl=ZWia?ApKbb4Q>JBYi7 zz1ZvN-mpp-_YQ9f@_9H428M+Rmi_2%iQWq9~b>AeCntzAa_= z4q=2x>wqV>^64e&flu+|G}c>azIXpl3Zzkq@h{&?@4s>PD5`+B8`jf^fF0dDS0L#L z-`6qVR@xh-so{~7-+3#RKly&FCwTKHtNO<7vA%i|m;dn( zWAxJ>#^T~4s#c(*56$s}xm#q6MTqKA zFB&i1i7nTzaTrQ^-D}PDAZ|xSiO@NQ?E&n~AYWdwya#_V zhU>RJi0x0`jPm6L{KxvEM{0)oktMkkhVcJ9gK_e$_o;`NXRAv?-b#mv>zBwaFpt-Y z$Q|5+Ap?-0Q+2+Kvf1#;FOL`<0)joT%I8`W;=wxdm-3_m&k#2}r=xN?x(XY#WMi?K z{JD#zpnzdoJf?xsIXK$)kgQ%5$D0a(t*1fn;K-R$$nzTDQ-3%8WD&5>6a@VLi24s8 zS+gv^4?9(rRh6#NlzsknzxF-dJw1MQX18Z{cV>6iVF9cIb}0Y?5C}#Pf*4W+1&LIs zLxB*35EWt|AccYiNEnhJOmKIB^<1nOzcW2E?fvV1{*}#2pOt0x`<<^_s@K(3mHFlO z-Fxma|6}gC7Oq(g=IL@8dp%dZXpLMJS7X#{hpn-9tF>d(^R@m(&t9)z{h{8^(7ZN* z&zhnz-jOLr#;HS4MfaIvMXSL++#j1|tk zYQNOIS(IiBzA#+HPd?}RWFQ4^+3u+74>NS?+M?|_eD8fR_}MuR+G-0j2mu9M3vH7i zOIw)Z#W35*P1@P;$LO=^V2ihPaD1ja*m7qVc3>bKxvz#^dTI}()xwkLZXN`MPxCHCx$ZByETNE z0DuKY%y_O7i|@iOA{+*^1H z7BD0-WD%!W*`=F2VDrvnB!7Gh8>nfF^)m_JsFa&(l-StOnJ~cl(B4 zdi0|7gK@D}of(0u$71DB9LyYp_Szcq5We%h6R!37Q&Gd4OfO^;gV+L^atjeDhTN<_N zd72Y+mbn}k-0jnNABNPp0|YEy)Bvw9H{c-w$xIBoqnL-k%E~KDD!=6EVSHEMwM7)T zYi4NG2vmlMxyJ5wwkPXuwzU@$bj+-hJ*5(&jTh$F-S-pYyzS3}SX?(~Fyq;TvRwIjTUJBDXJ;zeSrCr3uPX z>A`Q-S~v$onzJY<_aF-tDi<$CnR)fzzRno?8NKS=+lmQbSU^5GMhQ1pW93tyh|ACY zX!7dafAwzxC}*+pnNP&g+c)Ckm%kDZ{-s}y`s!L5S(Y#Q+g(YYV?LIj;n(4f+nF|S zZ1bM0fY%KAgs&hMpF=6|9tPH}gcy4F37-JWJpR}mx8d~4>MBG*i9dJ=lEcr)#o~i_ z&bGC^bw3u)uO{!#F_yB=|NhHSJ$F7zg8A3pjQRF*oQ;N2+E~rJI@}OvrUhzCi~&9Z z44pyi(?cVQ>Lu2n_h$yfn0xKj$gC2xp&ybDfIzMf41D%vR4HEBb6MUBM zm1fZgfFcVJ?a6NboJT~XxrUwuT%%Krs_HzsJyavBt*|C%9&pj`p|`oGy27Q>;HmGG zLG^j2c3Hsqh-@gJJm#7cQ~JOBp2}4SLYBD7v$LO5l{+ z^Wbv}FpW%@5)rs@`{X6BO>m@f&+s1uHP4-gkw63or=eW3qYwj5R<(FW6~^_Va1n_; zh;4G?1VtBmAR0I?^J$_(jjj1?O`_2Pjr%f2B#Gj4_`C-O6nP3Yzn$(}wPfy8f-t)6a4nSZ&=ZxE<1jB%GrSJ5^FV*T?Re*W8kKnf zhoK!jK)`|wnO-i;EYjBCX~-;&GHjyJ&f^4GB)dvM0_Jg+Ol|R*Olde;@4#bnp>r-m z9fhJX6h%7^L;Eg*2GNCAfkTO;neXMkU&dr{3;#hEkJ7bT-t8-_B6D+r1wVSA9`5&9lpGtYKxGMQe_8SBNQnLPk2 zPzXXNL-&qKWy_p02hZz=dfCbB(2(b7WCTLe)}|5QY>0)q#qi(;M#Gbp_-AhFgmp7F zoGpd%>irsRZKF}4H_Bhbv4}TY#fv#*o*s4Tfg{J;n{lwa6ZhY_5%=)m8jpP>+E=ee z2|&5P+A9;er3_kUPio}FLvLD_uI5pwb|cI50k503Dvrd2Zp>?ffi^^b0$*5NpNDM# z6v}!j@TT(`b~_;Pa?HX*M`J)OG_Am6huvYy;L!xbf;VT5d~tm}njWTtH&DL(0AL-U zd-F~l+`19n7hjF~wMS$1M}I1F+})R6jn2!jX4u=R^}QEgNt!OKVe}c>5eBmH@HK3> z1*Oor-HHWwk?0{SSkM_E;q%M~KA&Se^Y+ty>rPDW?Z@G}x6vo?HtcbVJUW-^V-IFK zS-sX%4CMkIw(lKNCzss|~H@L~8j zL7;M>dj-FoFa=jI7C@Pv z(8)9W4omWUzz^Uo&%}q;<4K?A`|KihrVY`koBGTGH?G&_`E2=3#)n z0SM|F05jK2J7X8rOe66%L2qW~!b8TcE#o zdJs?%i8zTCPSyI#M$G-(mp}8oC%#Q4pimTrgm9c1#U+%WC`Y1e)Q*lo%1l7wS&gpO zEvj=N&6(NZEAtS1UPh3iG(zTrnQk;tVCyU8XfJ~-Fjy{{)#XK=TJL@aBH|1T4ztsf z#Sd@4o$dXFd+R$1I=;fKW~ zvXf}hzUp~`8FS7qN*HVncd8BrUMA#!637wMH{@Q_RO|a&v2X_%dD< z^eAe#uxHT26NN6LKB$;`D@1~dM9a`h<}Jp=HF)_4t{3!galeb_*J1@F?vcZrclKh9 z&#jntV7Rj=;yH7G$Qz;)4V0SUC8m+Wz{(bi8bO^w;B6#xY_k{ES`eZ_R;-L1S^Yf2 zdMyGV6)3$APoyhh2n?}jp(uO-V`RA5T>Zkb#tPmLz@qq0P{NtkVC;qyhdbM`bN5~x zBS_Q1L6m;o13+MQZ9Qh%D{;JQs8))nn#>d9vbtA5g^?WX zjq_~wBNS!{@76u*XaVL{RK^M{9lKElC$ATwjJc)kV9%pjkrx1U3D0_j$8N!rK%}?Z ziS{Fpg(tt6uRp?=_HW;g+RA#i$yL^B7q8$+MojUvU;CH8l^%Kd@`adOe=qKT{|B-B z(T~K+!w<$>y%D23w*gy#y^EIs6?n1zZiM{-a0X^HmWp0bxG0a~~58hU;VYbg}VXzH#xV+&E;LwcbTrk>(=08K{Asxa_Id8 zqY6kyc$tGnaCXMxJ9keB8jTjXEnEGJEW_tx^nXS}_)J#XpQ?A5jI_fUFh^dt#s+>i zRjYy9-DAwq(WnqZWq5*?K$i`9);x}mfTKu#s>_T@E4LqRLpGY7=f7hY_dENs+nHu$ zH23VIvy3*_;L3W6qN$Bv4ydq0Mh=XcTJuooM}dy1HpU8{NGqF5`B0<6A`A7qbj_YA z2@JJo+D_&UeT&G7+XyF_-ZLGL^&IGPqu7qgJ|jkX4o?o^(uE7@h-tfuoco9c=OPuH zTd9^B8ut&3k^{wC(Ob;@>?~sD3N%^}e}K|;&t>KfSX7CX9hD5hAv_AR1$E0!Pv?O# z&6hB2tJw`FiBt{Y=uvx^ezrZGSiKIBSZu>0d!uP=?VCqBNaMc9v+{re7Q5&!B$Bl% zQ;)$dYc5$EuC__>Fr{U%YUwBC<1rMM} zBf`HTO<-*RN=D-jO}s_}yHJKEC_V+#ZH4=LdyL(DFknUFD4zJYOHi5?!)*8Tmg)?U`w`FSVKy3U|*Rnk>FDuvZ>vd3rY?OI`-+e=is z3YAah@>%AXhrFm|So*1+x>bqmX6o&N#o?|GvUPhgcP-I8U z&r@cYPL5%+eLr?O!?^VK{>wP~%on2a_(x;0wVL7S?pyCfSY3@QxKG|5<--Hk1mkHh zcLAcsbsFdryxO>Mp1G8>hM{85a9$hrhG8!WqgNx0r%Gr9K+wCW0z3`?4@-K>EUK+N z54!{8s}d8d_c87Vd=D_hClFjidbqtC)9oJX$2~Fh3-ZXldvP|{k2nA8e;l{}`G3#& zS7PbopNK0z^QEXga4Bk6F30&F{{nnfkL^GCTHJo|#pu2LRy3E}F~5X@gYJhnZpAEK zeG&0Mc|%yE4QGsd28A`_xdVET=z>%Ccfa{=oVFU!cLMDvIlcq$1C2c^Km$K-G=p^; z00fNC83mX*w;l&O@W`F*m`4svT-#~^W?p=D)sx)R5gtsF^%UzZF5?|uKZEDzHjc{=D52XCwqJ^05BeU zhP!wp_)DI|sqiZs1HnU{o8Rwr@^ufY@w7UYKXsgj=mc3WhE#;jHMZ%EAxpBaRnb)p zr}A%|l3>$^4bg$Nkv4a3_eX>*4WxS4gT0h)Ww;OMm|`R{l5>aygHZw+=9=1U#_i9? zSZsU8SFLg4U8Og_)boI97Cq@EK(Xeck>uMcPXG(kbUcD=2`2k6>{c`6<3Hmeh>{nCKna} zdCXaFT}O?Nav30kai}kxn~q_oU!n6Ua}!7v4i(SMv8$7@MciGZtiyo|luoJp*;!nk z!~OZ3Tle(r9ZMed&1<$dF5~=KSZg%KL&%(yc4dq$(ZSZCG)m?As1vo9S+m6&Hkw8M z1m4Jdo8!+N5{!R#}geklY#uBU$m-860EW zEwsZpV+8Wh8}1>RQ{#)6B&+fVYLCNHdQTL>ZJp>b7^R<2|^oPXndr zdBS2!M7;q$sSL-ksNMZe?&DVY&7axu#lvsrnAZeCW-n@)3g$QxIZ9Xr&JeeVu?XdW zS`FiJkKusv$TZN>5R9QJnJc3Upz#4Y+RCrFZy6AtwhS+QW?0T)eOn&6PHP&&9o4ce4fSnmMhRB2VVA zuo)_YXy*Byr)#GWWN8O$m{A5NV)qf&+af!Wu^>am;@I`J{jv?LadIPMp2(YkKV1V zsN!Xx8i7OB`;6syx1Tzq1lSW`kMY1uct}eRKZp`ETnkUaYxD5Y0-ry+c_+eccW=KN zWgH9~;yB|uXh(oAa(ML;9yxr(TA$)TWTPY2F~5v(VVIQT8D#aaKY$;U1Pm(r#(oDd zD}Q2ukN6xiR{~IZruP|qGQIJBj5|9q*guHbkNsGz{`60z9QjoOI5e&R2w+Kz5YLd& zGDfkv=4+TD#*o{(;BP;~udFT4Ku0wgi#c(O;Ly^{Ah9w~?u+ zUdIlhy^ID*e(Q)>66kh-?$LWL;+M#k4{5tQe4=q5i02tioFA&P2a&;3x!nrzk0fIF*I+Kns^@86#A^T*YXN!By zWbYQ|p9gc{KnuDAaBfu>K-tVjo5GNBU>&Pz66_vB)Sj5$7GTV3pflb-Uu-|Hf4WRz z9-}D!Do~*zVR1=u3XZ7M_IZX)EHrB{F^k=bNtm$Y9+aUwx@jjpWYdkPx^Nyf_-u&kv$ zj`beg8>`2LT%{2gmD-uFXLA}9^9aK%XwdtY+#+gGV%laNnqY$eZ1Fe&V0n+0mADu% zJ*!QO%)tKC2-E6(6#k{aDx`&F7X->H<~U&#L59GRMalLy)=!MS>Y4WEDJa={AMd?)jcV|D~1Xm}eaq zAhKI9m6yS-+Zbhx9H3=>?Od#X>e;Lb6BO*-07P|qP?UAY2h9~+-ddbkP0Jd|UsgJwp_|GpYngx7c+Vq` zrcaKxIw?n7mS@1TFmcug2%r;k+azFOzJ=4Gr;frjWdZ@XSPv0Hiu_OBydC>5y%VQb zo`~_vgK_fo7oz=_el^Z2KA$@)VEF|8KjgkQ?!|XXY^cOx zMnIPx2i*odLWvvcN}VvP2dQ^zs1`>E&s=j2+%c?c&&%- zKsrn(rMeqY2mq1%mEvFyrSx9?W!33{|B=n{ZrbvvM5IhJIHA2@4Py&3xOr7Ea&*`WZ>|dg-FhTSTWC z;Zy?TJAq2R2VHdTG9Ac$)s>ji)VVsyj#0a!o_1}FINDo9^=vVA_Ap^$2~M40>T$-v zoIIfw&6_mQez=osesMPGBn|NfiqB~Oa18Bam*V;3KlIe|YIo6*o|J>I-?4iHvp|bg z(b)RBM#aV87EDM5Z4>4vp66>7W9OL>kiIhyMGkBR$Zc$7Em-*1XKchK!5~KkighwAd4={ISU~4bkq1l z$PA|#cZJ|qn0f?a9wu|52YZGAAEc6%fwQ>;D`ZHaHr>)!*0w|gezDuS_5zekj=23Zh2#2qhiCChI`(j2KP8(4Np;e?(T3`gw^vM zl((l8?tJ5$vG%zij+v_)vH9(<$I8>s#P~XfbQAQ>w1L!DWVOszo^f!<7d)OHG- zFg~B{dUfBs9j(uPigiAYV}TU($utG;%YSpos&znitMlyrXf4O_0Phz0m4_`_H019A zE&-@GT?OE+FBe(65Lr5B^~{ci(ShneR;VeB3#li^w%7?tsfpr2<6BleNQ`^Z*zWP#h1 zbw0tvu1903irz4_b(GcAvc6v$Y`fg$K4+xET}lm$E174ZH=rOF>*eWG*b84k(Pj?z zKv$RULkk-urC`BBR{rZiaQuzj)lzq@iF(R#0K=U*m}VF z4u}0%YBVzlVo{gTE(_AQHR5GI6`Z-uHJ5QlraTWLi$ms)93#8}0ju;Kw_dKYZ|YQ? zQso-=?PvE&UgtlqEp)4ER-ANM{3{F|%a76aSw3|mL2n+j=Nj}$VH<^dV|1|rr^v8u!SLa%%WRHW4X7hB{+FBt~hI$p!QH5OS zdVmx!r!M1aQ^-16{FgdfK%uj0U-~>ujEg;KHEk>Z2}-iC8~PbJT}D1_hLg2dII`Af zE#m|{c;#x^H>>~M7F>AsZo_1J0GPSwKJ(P`qDZ|>cp#=25eva2qKLx9u{EYSQU{u$ zasD|W!>TzM7HY)Ace@tFF(4*kWBn$~(`xD^x~Qlrt`%Lmc;3qb1e?ip5L9yx<|({$ zZ!m>9PttR+>RbbwMQ{)hcINXQuww4nXQMDNY}u-o+xiT*8Hk@8yB(XnsRFSK*r=I9 zgTg0xQrXA{q^hScSSZ@$qJU7)Mj7m!rz{#dFhrgUKeGsEupxmJr|yFpA$1VC;7O0$kf_!60v1yv z3WyVviJX6?7WmJ~QaQwJFP;Tp#6m-y3W{+!PfwXa9_Ja1Dd1dY2$aUYMyV=E17wVN0a)<(d%v$X>+9mIk_4?v+>U%-o)$=6U) zeCv@U0O6X48dHuq(@bjtc`OoN#pK)i%$)zj&ev*ZmdXgLe>%S4*H{OWlt5>t9`~6@4 zt$5%s{>AL8-?b}ab2YB~C;x|tXFndJ?|c{Fa+F)V(mP>Ji+JZAz+wTXYt+MVo42kh zmr#=;j%&Y#`g*d#v$@4JwC@%$e4jzReqkj(|$db5w#qE5PyY~yDBRES}1P^WV zeuBu^M<0tLuIu03jq)=e12EyO2-Q2cl z(SdEM`fj%TN(#|S{ckRE_gH}AgA#8eH@Ux%z4O~Xjr7{#Y^kR$$ad0 zdnuQ>#g2cMS zjS;~5!_aXX;M;@-Mn)yP4uFoH`Ej?=?d+KXI6|JQac;edA*y4DG3d-iu&?gZm})c( zH=Aa%H@kf^8ZBsn{9FHG9g6zG$Z+VqhtzLvHvcj83`cDsBg=L(-3xe+ShUs0p zIEJX|RXq2GCjb?wOd`k}8f*8GSaB&r8uo{I54%wvBM}3bunS{a32!T0w-i|@QWPj3-WJFJb$^r_$&EI*0M2IlNJooV2VLKa10}BVO zF)+L)Q>joh@1vrC88U@{u*l2;6)Smp33^UZmTt2!j0lTia19yy83j&o;d2cIxdwVy zHnf?*Z0;{jI#mP>fzs+`+69)52G_15JY|&-ykdynPH7fd>YXAFDZEBQWXvOIIh(XZ zg4916a=80=Fo-$$K##_-Ss5ej?<#*!NbB$QVgW@|HXMr=W0?H(cp4`Y&^$t+a`F6l z4M#>f@TlakQ$U_xwULOGU;dSthaT_!;UCA+PycMRe)PH6{r>AwfBK2oe(!F~?GK~> z_UmzaFp9_j-~TXbU-@k8zxGP3Jn@M*+uV-@BT0Ix7+Z^U4&i4Fpn!J_zt7ehQF-X$ zsGqwWlY94K_r2>mcdNElSXi()L@{kVbTx(b5Jgx_(yIAc<+vV&PsV`cDTKh`E0@EF?2iy zPz~UzdB!`ld9R2DG0w@}PMlr57PA^_)YlBE|W{>#&)9~O8_T# zAL)%*tE0}47vxQAf-=miydwjMSMH)oox=MWzQ*zlF}n7)H=?LhV(4FeVKi|#Jjkf0 z9=&>X04VkRef6l(OdSn(A~n#B>Y;-JXwLnIqtn91bJF8bSEWU0phM z)@9jXO}oJo0+3gF8qX*y*KRh&m;?QjUsnLaRtjnk)NsP04gGYzgaa%m?$WD`B( zbwxp+^eP(f*rUL%o`%4!P}ga-_DX=og7R&S+~PQRmz!$5i&`P+!8bqESw%gS_R3-N zKCjKOR>}@T;(ct#2X-=3Wq_bfn?Z3;X{VmD0zP+RF%KO*ok1NMoe!JP`P`LdbP03U z9*+QJQ)ErBC(z3+edxKlpZw8}J?{l7bs0wkqu~+xX_(E^`eh-E_T7Ao29W8b5gfoo z_Jg}l+3au3XH~o!aThDYB?yy6T;4wm=9q{%RY9mB zS@T&kg+^AcuuwuI&J@xxPxv0el)hXB0D07HBfCb4;#^Rp4F&kh1{Eo!qHe(pA3_7z zHBPV9T)lhGvoVjH?RaEH7)&LS%*DqQRF$YghskGrZuMiVD2mC@qD$7lTH!w2g6p&L zmD@o6WXz0aDN7l!{bZ&xmISmUkDI0PshZCr@C_4O!Udk7EO&T^yZZ|}zAfBSDm z%D7WGLqWO#lQSWc_`kh#M>d9FB z{O4omg&*YUY*s+mS{NFhF~)$8w>)%)apL)>GGjjDy*@4v ztx=lIs~2O^8)c=rIZFEtoUjh&^bZjJ_Jh~(My7H3mwqnyo{us37k}zYQGr$#p{{=B zQ!&6Oc3yZjp7`5;GaAo47CYbkUflf6-;2i8bFuW$wdmg6iR*v*?O49Jff1XD34lvq zF1-~2v)&da;W^p4>*9J;&=VO`Nda1R7K01m1KJ>ub zKRtgTL+Dcg>(X=2#rTc4V!X5#%fIlKV+u&G2#U}hv+zd$##S_+`ba8&3!fTS05MB~ zeq;wv^X&DPzrBCd2ZcZGj83(d*}7(!C7>WjUhat0ljj5EoEEpeSd!&P^sL` z#|ZuCc_j+G1RG6T3D#P2;J1P+7MK2*vmz|YzI~yo=hd17Dad1``z;a zv~POtrj@0!4xEu%cTTytJ)JVfZ-kW(rf%RPsy82p47{g{e-XgK^Pt zE68rwl9&$IZCvKnE#A<&nv0NV{0DI{bcSDe-`B0&9U(jdueFwysS3w@c-otl_&o%b zKcj(S^#mS`s)sO`_vq|x^S;Lw;@O~N8?EIGv5xRO-JT&WWDe00R8Ql6kbCtV547R6 ztW(e`@gijyi;xkWFdr1GJXwa6-hc@J!Xxr20rT{lDi6JU{t~| zJVXU}SU||4LK~|@4d+U@3_TsetQbbuR)H`dWl^01As|Zum|e^{80K*sqLB#ionTL; z?7bMH15g-bbcI(h(DxcJW)&|ldmEtCx(L5siOdNwj|TKzDOI9`K`k++MVXs5m6vRJ zSyp)Y+#K_ghK_00$ueihD07=DF?1`_W|$I>*XJz)K#L+m>pM`K(9ebq3V9<$M zfAsZO|LkYt;#YpM*z$%q4;LDPlcdZOWDg&oVgQP*IjohDEG}t2_3>;|YY`db;TjLz znT_5(JY{LBM`LxlbgbYhKKW=I0d&rwk3i=7Km2cE=Kc5LWb1z1{KG$qAs}S;&wjv~ zSK{hdzKF8vqHL3v-fM5gA%MO1BcF{i&orNJh;{Xc ze>g@rZ^se*xAt>?A?C6T@lp(LZ$|aPDmOtc018W&E+$A7V0%*B97aZ#J!MX_@4Om^ zd`Ev59y%`0;Kvgekd8bb|AR>5F>+?HrSfnFk9XkFJ9?FZObsE%HM__mhqi9j;IFT* zLU#)!ah~vwZ9y&ARTr2ZOnHG<)zumli{|W4k-ULEF~4k-P6K5QI=y_>&LZ9-@~Ga# z(Lh|}-}9^WSZ+5kq;BD2e$3(i_I`o}$56*gtS;jrv*x|dUOG+rS!;EmkN5Pf5cRIx zxN@hB#y+<&&tY_M;3k7;ubhLH3(Rwn%)Ef1Q&e zEo&jOd&WH=hKI23MP#&w11W8!)gd}#WEVzc$oKW`)AlfCol=Fx-A1;5_I20EFQ2QP zG1{v)Ep6@Y=h+`>Am`%54g0$dFMAI#SLteGG3zdP_J>gzZDOV!`LxF0G^BEwK*qU3 zXS=3m6an5gQlmF-y_F8K_Cq`3^%=$oG--3S`eQ))K6(LD6e}xBaq0Yd?spR1bhZI@ z0Hqu>aT77EL&*!k}4%q{26RWI4acMuo*UllGrR%d>Xd zt#Tr^V%r=8J;B2>$IZY2Rx#*-Sn$RyAkpv*Cgu}LF@{k`7?%$ZV`#aJ39%P^xX=sS z!~A^;1q*1%USjIyNHiD7D1v$JArEr9SF2XSqjeF@%VW{wC^{XB=1u`3MWKaZA%(!u z|9-C%d)r-zUxqjsttyHi0>jcD%vr`k0YP}ZrqNL2t!G^rDLRP*cm!fep-{E zU@#=sd{!gJGp%c|o0Z1P;wHnbQ`ZLaWINSr;cewuoeKtb5l`RU8Rlx`(=3XCr(7o$ z&TwZx_BXef?@_caUx~Z#-i`~u`is&0;m^nJEAPbWV^{Dn&&8?heRrSFAIJI+e>TcI zW0856pZRDkpy=zTfCiNA9-h0E<{Ik~^g$M*{LsTOi=sTbcR$|$>i6Oh?{b#)TG~KB z0l4P!?0Cn5VE`v-q}}GH@pIuk@TPgb9mB2NI6Hqe&YI0Uvep7oL5h*3M!SvR7m*}E zx;apJZ2=x003vE@?bv(s?U-$?rN?#SH~u9?Xel21KmNyY?%8Lfy0H;6%(F-_)MERk zH)9!aP-?c~3?)?2AYyRkDZe~uCs6jThuhvxS+BwDVLb5TpTfh%xB$KmR^cU-`y#wQ zhkOq=_Y!=^g*Lq5TDwmCTJi{8*hRp>v6og8bfHX^0CLG^PL~Wxv%_090~xTP3QHu;{2cc`LO?f_x)R0 z^KoMIOI~3P6+EI@K;pdJEtMOArivMTHN|_Md-czvf`VsJQp`LPK%n@ce96P=rehTQ zk!!%~a&;ftZ1a4(Q(7-#KXv!Ss{?de*49^|*#fMhdjz&SyPnW?gt0VYhCV?K+iMX_}9A)UBP7z>q_6M=k zHSCTz&HXg&?ugOb_s|rLj0JM$eFZ=VhZe^j<6(Nl_)_WvL;Xeu1RK*s+r?UVx5Vd% zTx(PSpu!Ze)X>*$Yc{H}|Al}ZdNLP*h(;e}1GqHpilfJC8rVbbDR4G|o4IThgy3L+ z{_y_Ok)8JdOz1EByPuvYA38DU67)lrv8Wr0egVaEq^(grQ_Q}OX5n{>-q4{u$4Ihz z=U`$7Rg6FSKI@?vU*V9P@vK@JX?JU7I#62*Ut7N>DD%i_f6wE?`MZvO&@uLYnf?Ss zH0wB=$d}5|!eJwl%4tS~SU)2gM%(rW{nTyeH`aCy%<2}JTo3Xunqm( z=RWbs^J#QIFA`mYXLlo4LG+|?GV~ykys4o^Tjr&qO=bp-KmhkrE-!mffrb%pPLu_) z^sX$DP)&5k$*=s9~cP?$gvgchO!M;MJ%IGB!;;Z4X2)@VW8YYiYJ$Aj}Ya- zH4x&%{rK6;hVj`{qEUf)76le~0KBhZwm-U^fIL*j^NwpMCKt&24u-?n+uO;x*7Gm} z3k!?A{qFrdwYY=HvSuj^o|b1z#uPYS7a__xGO$KgCLIi{t}f`u@^Tpr14vMLSp*{u z4=|py02k4gfI@Vi%9;D;9%2?&H0YMSB7cR@VccCKh09_PSP(%L1QlT{NS9+0VEAmk zaf?liSh(+2g=${w@Srfn*^UzBD&v}_5Ns%D(q4~O!BG+Ww+7sBpRFXlce7EA1!!qy zs%Z-MXAS@Z0pf+Dlnj$6YM#;;H&(7;^X zv^R|9ryc?zlw$U&kH!Fvy!@GuF_%%?`|7u%xx5^&|K=aW&ENk0xb(@VV&&6M$E|Pv zfEzHF)wLMB`gTmd_pLa4>&*`o!9ldIU4>`hU*sw;Z|*(T zFsjHZtch%S40yK6d3fV)Hfl1VVa~q&vXZv5V##sV~`UA>IF@f6lo$2mAZ6 zXtWI@eS~b;weNtnIpOys=#Vu(DDOkSRO1Vujmhh8z#H(zd)FiOnBUb0;}l*zxqLOB zQ2{Ir@Xp(x_;@N*At*u;vmb;!r?>CM=-wt^?c;IQg|`^H1)WDcvxE+i?=)Cb?s5Fe z_hVu6dYrxcO3WM%Vjjb8G{mlA7KfhBpo<{7X(ZEN$HlOX%XPWFKkpX zb`=?5bgqf?<32h&8onCuFO--EhTR?;R*36y7wHgp;Z$?S#t>a;Xx^>ThTbcBuRO%L zX+P2dDlhSS0g`l#tc|fxg%7!=Em!jL~Y3j`^0!7~& zd3EQ2x?3m7z3wHPWY^Abb%6J9j;_6RVmT(pul=;HLXhO~-x_~z&G^*oMj!O@jk?SM zl*+s=*uutf-}7rt081HNd>VuPI3v^QUAH9X&I0t@+-E-a=<_PpWG0xzFk9}cb1hV| zFjns8=0S#j^b|D4DtQNKAy*5e*lOv?%`wr%ocIyVkn!5it-e6(V`%t=%$9?^NbAZU`$p}aGto9ZCP0m z5p{0MfGO)JqDBKHgyOGN+i`GgL;<7JN5EmYRktI+#DbFS!F@7^q3$#P89)I-4fE^8 zjz$G&Fl4Qv(BMiZ4}5Sz+-fLrkZCl)MPY`Z1V@C|HT(eZ^j@o3@h-Z=TVYYm@w>nZ zg22H!$vpnF_|B92M2{n$Ax#W(`&p|g{T8%n_yq(WMQKAG>Dp{JSZWQS1&!wJMhGF} z0QIV9G~#-sf)tg3jXPvM`9RuO=|6_?-S(xa6eM|EC9_BY5&`e^hcFD@Y6q2h;E)_kdU2(&9lU(p37`q0g;175ILNK^kh<9(p7V#WBXlkn;Mq2V?K< zW?XvaiI{I!qjUFe)YmuQ@zbb0^kA$${$$+$Xa6#0-+VRqub);IW9{lC3>9=wV9OXa z06fbzt6sSps}EmIz)@Q}pZ(BF0MY$-UPWjTdU@;ay_iR)JVNpmMQ$B}#dd;+r3*!2 z+i+_uz0mr`IXr)iAN0&<6W3&TdA`^xBS`k(o0ZRgDrVtmTX|MDOYlwJ8UW5;;S?O=rNY(?wg3(nl;Vf0_V5w$B1#Qyef zre~IxTYv}FjC*weLjvj94?>v}06@*ocC6Ro&R!gM`>9vz>uVAB?gR8tppQQmz3;yk zgPmSn{`sGX-Yairlx^|Cc?@kt+4(cCS&Wc%8H@JT$4HIecsr^9f`i*zF$btQ#hB~@ zTo&YOK>p;9|1e6gelKEYD-Q49jM548hIOYCWggrxaG66;g@;?dF%qfvgQXm84Di=&CVor)qtMP2=w zsY;&h^W5oV4n=*W202D=>y0~Cb-69r?Hi)5hLq@bXyF!1y*cIt9LwvJWkHMvUMEEd zMj9GnvCvcpF3)mjU0e%Qkj7odNIN95nAb+IR@y#kBNJOxxZt5i@VS6pUFvw%_eO@K zPqwm!XLdW?)ZgoP?-wt&vp~`&ZnjF-;ne%(#1;kZi z!pMV$UjqYrl#HT4!_&EMnD?s+H_vu2Wl`M^IT$O$gK&&93C2aaBPj}83S;Ts@gj61AhFqmp)#hN!epDsY`@Exd{NYGILiC_ zdH0YSIuJxO3?0tml|fH?_E%tXPnXLy2n?DuVy&P`GM%7pu@1_{d!P4gY~N>W7P)Aw zou39-p%FbQScy0xvLQmq%smtkJed)wRs)guF6BO65v-_N0A@9BpLg|5F_S8ob)a>25frtNSod4nvN4dQk+b`qAL8ta3kH*&T|9+gSv4$V{2nPBr zmOuTm0)(&t?g95)aZ>;gF;<^`8exY1%%^v6FWaa#AG#9djq@>i@9h}A^+xpHyN#i1 z#C)}$^ft_xbr0~M^bkzhsHM_u$K2{U9s<2sbT~b|w-LtA`IRvm=78%P?j$&ly_a8) zV-SAp$tSawZU0-}i@X2mpGWWC{&qCst64pO@4OVfH*dtsr#==lJDqs@|NdXc@SU5n z{ik1xJHPS!F}QI%y|(_%&A9u&e2x2cV*RP7qIP~IZoPtGy}uRP-+UpBT=U!o=!zFQ zJdE8}Z%4&(z#E_%^1J)4)-1+ljxYY-i+DTpdBDWpgeB`^BXqS7!XBLZ|9Zk_s=7v$pu;&yWJwY#^vSWZOP8;PkkW~PZ{nFJt>#M8TW8-WF zBaE)HQoU~;d5X@^-(HxBm6ly5(U06@8+?1>Ph$ zP*-}m)@ZcPSn!rL@T|cBPLlw{aJ`0mmo+fTpuW(6ske;~FB+0&o)%T=NoTPjhR+U} z7Uo&xEtq!yy#_3GBx2y6ZwtegU)d!V`equ2>vW8CVzZgsk>=eo8tsr~)7aeIMZV7RWXp~7?UenU z?Y-#pT)~ms?gTVNy%o=S?o%Im-hwt4NpD?4n4X40rh#?Z7&_6H`y$;Er}rr;@noE) zdG0z293+v&3yemkT9=qm!nUjC-OUvmva*06d&8g3aWB2+qC<}iYUo9TWKhb&H;s$c zpnCUiGr$Zm7G?_~bm|t5S+%O*8!EHE{Doy39{@fWyTHW$_U5_uvfa~J#hA)~61P9} z#JUMgVee**XB~#CGY|7yAbW^rrE0^d0{3ze_VFAnX7JRete%uUFe;Bg_y7wS826%Q zj{q1|5SV!uF4VwJ5L|d7*{O^;f7Xszpc%v5IjqVOe^Ql#qKL_#NDs|Sv>O7Uyjq>D2BiJ8*%X358|l5n?kL@ z$u3@ur-b4jcLq6MLF-&~DV4@CJnvq3J-kDduv?b4zxzVWqTFZZ@Mu_YmGyXgCytB? zz&EBMm_eby0E?&a-a->Yg>@#T%U_3GK2$V3Q2d}*v(^ub&Ih6X$a6k56c@+?KBMTu9UwSEK z-+L=6@XYw-7h|qmLGPeoF?em;=)UttoV>*A=mP6p+>+`Ef=1}(me*Jf_?L0Zht_vk z2qVEh{KJv=dG-ZRt-PmuXw$U2>V?CoK|xv#vxih)&k zWvvhQMYV7p&qWW~N5Rk8C`FyE;nHYjp&GX{VmTEkAa^I|yS0^-XaYvg7+s!c4v$(8 z*0X;JbM$Y)fpSuuPhq^RFfhx2&N`scqB>kXUN_+#17-94Z?;hyT*9C<8{5)s5;_ zjc%qJSsT-g3((JalwN{jTTmhb$9KQWd^{1MkI@9ioo51VLQ?Zz})&* z6ryW?tuxc+8uiuI&cQ45Ts!7@(3f@D*jU0|)pCuucY0Z`mG%a%G#VjjQWwsB=|`S< z-oy6uL>jFWU+x#wn2CHW8px{X4?HvxqMy&boawon+eNrUb`V`-Q-#4~fYIVs2YSS6BOx4_yq{$vo*lEU@R6>X90`wh2bFwFJW9jUP`om zak++~VbKtxqC=G4wz(A$6+lc6H@8_}7!46V!xeqU7PA2Z1qgC_f89 z9FL*;?X6A{k{czlU1SlUDAg)Y(X9Z+4sBMT+h|9CB2sj#n>iG>hWLIH29}Nsi!#J~ zDh8Rzbb!|zdQMgXr7xMI=i4HSy^iBPiI(|JJOjB~G_Bz_Ebs0I`_rp<{7ki8;kpI_ zELsLcpp^UOJTWvzW)wohVa{D+?idWixkbxyx~0(12p$Y0RsD7v+U?GI;O)bPJu;+J_jD3SQI7lh-in?xijigLG0U z*^m4T-aWf~F}7}QMGasuf9-*&J^pCUxpDbI)Gl1j;to&&L@&aO)rc zlc*x>$9S0sZ@m;ttIJVowBir|p~6x*95AmrXj@vWK@anQZVlu^c)8{@BZhzt z3lj~W7e`AP4FW(w`{Ao^pafA${JxhR;TkdA*^Js}KNYKwJr<*H{8>~Vz8vje_;OS? zR-{QOU1$V;*PjqkHbXzTAvc3{M$Q zX{y19kPVMcF-ZHr|0gkz44bF6A&dLHG1YkxkUTsKojpF*h}0>1O#V${fsS!o-iYy6B`qsW1{2p$dkZI91|TYl-xH?MqU!Ad! z&9q?e!ll)?cz!kJp_{c3+q)j+T>yRUl(lSG;cs;L=f3>u=T!z7dVoNNbRFQ?nK{~{ zJlRcS=e|g*J}ok+Ver~`?5pebXdz5R7+NQ40R{shLbNFcbbo)CizDj6Wr1l+f#Vr> zcDi_=D0L<)O4Ngutt6WW(CWiM?(du=0)mG5uEihzk^9g!?ktKY@01s3<6NVZM$m02 z=Abo%He$&8!&nv^XxL>et2-^Y6ZOrOj8*_tS_n7_)ow_+)c^(U8SWOP*-*m0>UZyM zVN@W$hS$%scrJV*Wac6b49VEQMWHj4W|b>^oI3;#5K1PO-Sj*FMlU!UG4Psm(lg-W zd>OvtZqQM*tf93-ln8Nab1Rj+>5DvJDCJa|pkx?SwWhH~8N%f51C_}we6e6q<82{| zYuefCC2(2-!1%htoFIzF7L~nLxK=ZQF~EcJXCiBO!+alX4pU87Bhxlkt{MsiXrVaH ztuDnfbJGAXK+6h3wE%z3A}qQVvtB3z!e4Vo7;nFITMg60U#30stap^)j7FGmyNTr% zD#0FORA4NKn(X5Vt*yoS<4?xzH{VM7>)B>VQq+xMv$;@pZ5gi(;p%p?Fet0G1ts16 zIDt-Q%a>#R(z$r>SN>dX?Q;FrAAc;1wyKvd#qoQ$qW_1#pE=dlpZsFn{dd0`@y%~X zTZ8yBUx~Q!HoU)p0S7?9xAu#-@kWE^x>e6SscRyQ1#Tx+^=mZXVb}e_(;tZu-uC$3 zZIsn)l*+X<)>XkXb2{i3-6iMYM~^VBFK@&t>z0uKl(-R=-0usxc0G+sNgoRbC0J$s zTrAoM=HC6d`|_I@)BDl9dOmiM#r9ABL|pwle**)4H|8mL;5EPMJ?j}6<1X`8x=oe9jgYBKDUS5yI3+vIlc^3nYHw;gW zcRH97*h6_-U5R05FDh<{<9#*rAqvIn?)eKF$yZ0adoj6n4<)amgXYMt#^Ov}VmSHZ z&qnq98s7UWv4~zeTwaOA2Oo)YZ847bc4GS0`!VBjwA)*8hQSonsuSJPHxCe>pZ9Pr zBMfL@cuYfa4DD6qdUhJABMZV*zIgBgez-mMSi>QP)xz2ifKI(+v9CGVOxN*#-%z>W z0sgSR#T>(Tn`Ti<<91N^i@BQSGa{4#K0$zoJpp=H=LWz_eWmX1S}2A?A}QP$vTSd7Q@7=c#>#R~>4_m?J=8Dr0!``2yo@Ti zms~y+{2PVPAm@?S%*SxMj+I`lIdctWc1mSj=8g{pZ9HFxqKu7k%c)J*Ivo!hL-x^y z+87O@b2l&TcO9Ul;j^yF)T4b^S^zhJrhV*-ZQQH_^fB6}E>q9yROzv66fJaIn6F}_ zi^fH&j|1i)IMS#anRGtVK-23QY4_BVt|g0r0e0)RU|Uem{dtJjs9BZ4!W*~kIe-#p z(^CSCE-yFhaqiqkf+6jtv1;#OzXS`1y?3uS#?co(vAJRZh4~txljLY!S(_LI(*v(JEDFs3PTS2`{#`RLxFk9~Ok{?~h{Z{&q%Isu-l)F(J`E_>57UG?FN28FGYy z6j1o3qA^!*4p{VUibST9M-&>j^qgB+iZ=60Z<~9Tdnk1H!DBUNP&OV?G6#U00jXOB zK6`eUMm($P1x|YA@(V*UEh2N9@-af9hpO_B-WokaULFN&?!BatGZ#aXwUt%I#M&sR zSQQ!n$W#pI`V_wA$;|C6z1h+IdoeCm;-UZK??n3xpN`vq^sRXBcm6Qeo_Hjhk3SgY z<)y5Qe*gD=FXpX8f8asXOvLv8_^+aIgdynPkI`!{MuqWd)FyiZ2I$nXvKVlM_q%7i zQ|4?VmG&c#!~#I{=wO%!ad^i1()s07@NWGYymvjl+2%vnFfe!oc*WBmUdac!N?R$j zILs48j8fBEv}0a+Ts+r$6!Y*_yVp<#GHV`ti4mH`;2(54(fZtH;=*%3634AptbXca zvGqq^%WZx=45MdjPhZ3HMJ}xs>Am)D%-_5dOL{VRy{*rGI?jLk<8kYSms5c~_~Xw- z_m968=P#a*@y)F`c-6wTaV&rAsil>@GcUIdh*LcL zrM30wBO`9#ssU6j@UxlHLKRt?_A%~-uy}uYYRVd^A03N_R(T%!F?#D@rifO0lx`2b zI>Hh0#KAGVsmE7CKAV6Q+kmTUEud1@sS}O13GXvsi=1oF((zy&COpG+H65cvr0lzM z#E`g=A7#o3J#7rEq_&TEWa44e%kz>F*%4IiS7FQTm69wyAT>HKYD z9Brx5YkLa_T(pO-opv=rfooy3?Q9O+*~xKULUxQU2!8xF8euJ%wom8SG|L^FOKroG zPdysv)>_Hy)+7ij1TaP2f^@X29S##?{>bBx#M~D?`}p&TQehO0An(IS6TS~1aFI{S zFa|etk*v;K$8+DY+ZdN@fWkyYVjAru z7nZ-PFu(V*z|1Wfnl25Pd;bhC8A7pI&@{ljA!XiMs?5as)nen2NMvehjgmt=~3Dk+|dWFe_6%@kq2Pxasa6WO&}ZF|*9QndS*) zzQQo_69OwB&cQ&Q$j_Xam8)YG$?ayIBfrwDBw`au7a{qxWHb$?3n=sHsTIs8Kr!9n zmb($&7D|k1n9E9($XD(XfbSfJjA)KrudrN^@epL@VQNAT-*vfh<1XN9khJ&c#jFm^ zZ=YX)5cgjCb%=s+TcJ#y`~kzx0DM4$zwitVDMJNBDGVq}o^B1rD)YL82d0rQx`L*O zY8Pb&XhL>#JDi4Ia5wAx82HTTLAD%q67rCOmD^+xxEX{YE0kGNUy~Q)B_A%0yw3rM z*F#lW4_w2*R`6sGvNKs$ptByXu|3*x0pr+aFDOuhXH(YME^ot4yfzGA1B0~t{Ws#x zfAJgf^k4g>SbO%P@&0#TiTiI{j}ttrYj}QkI9&VeCoxvHV(poaMjxX!{QZB6H@X!I zwohd(6|mZ+i_yf8c|N#DewG_}Kr3wwZ#QMgqjQfZlUQstqrBdZ#kPmv;MHt(WAW@D z`rG&81Q4pwx|5+eNQZkvha=>p>^4#qzzDEcTV4a;)BvjRCZ6R&r4CKTQEe|}9$rP7 z#XHc={oVpxx9S}MaCg7+Qk>#BpS|^N-2c;WN8{2)<}T0Pcs&j+0>cY>=&$~zn11Jl z*xkAp=l{dM8SPJhCbquyomd6{c|7gg|LNb0nOiq8$PYvb(ArvEi`{R%5Zmv*pXrk; zKk>zAoLh@CjQT0#DzP>i?)rn5GV*c)EiC9-e&kwAP-6Y}@5IUjmlD_r=sXbSh&8G= zYq_(b43M+I4(O=J`@(mEzvC`IkKY#o07sr}vALDT+1AG8Cm)SD!0gex@5R~HPRwIS zJvlQAp42A@)%gBShM`TFxGhgPt^oXxF{Zt}PBb5W1la^69RU=P7nQgGT<|A9xw9j; z@FGX17sjTrP~yO4z}Q*r1Lk|Iq56+YaB7(I+G={5>Wlr(e(E0kN>s8@Oabz>ipr;6 ztwn@+?5bgGlbsHbSyUmj9&jG(E@YSl*a=P~r?_3RB{?G5?_AZrxV)M%cJ2bi(w zSiR!!$MPfh6|@Kv4guWW*-CQ5`=+7{r#p4ivwG-hU?G}~b4*3K!^vo(<8{}UHYObk zt`oGY8>~-Ir+eQbGVQJbYhH?Cv{OKsTf^1;Mg#;KMx8qr8=kmz8hyc7)g!aWm4&KV zXvbP)w8r@$vz`WLt$>kH3+&XxI&P*;w2Rs>=O8~>gXJM#w(YfO*6x6{YGHq@gVQE? zT(9eBeTEv&=0fh6*xcI#m@UOB4%9k)W9Pcv-99{HeS?jO(1z%5qYd)IBM)E6cF1#I z{LDwPiav`bP)-&gcruuZR?qbmWM)Xvhpw$OV37IPSX+W9f&mvBZwe2~30nv;=rK`| z42;i8GDQY?REpiX*; zV;RP(Rc7;yd45B7}E&nT+>^*@4XCxAQ*0tVaE zi-P4oSo7b8?cI;*JS>)TC%^{CvWcOqHALau6T?2=8AbU3HHV>^0T9__#%4EJB!zOF zGkWCrnKcaDk2zZ1uKVw^rF7;EQb_TPVGieP_}SxQ1!1!YN%O+_=%FNgyL%~ki>-E4 zFK$EwFY-uFYrwi2C1Fj>ZXTkWbgRXXkA)IpfPZOnepA7H~|!QZ{Ccf!9lEmzFnv@09F}ejt0uGv?-mb z(THP|Ssmj5@6W@>V?0<-*Yi-FIe=RpLo7&GEJX!cSbNal?L_0DhoXexz5TC#Gs@hj z!P*^k`f=fx|3XwT+SmW-zsTJOsOp$QNeFOqdP6ZS17zQo@c`$IZ)T0l?eF~{Y8TGMnyqu+yA2>% z&Yt{k%d>->ht8Ct`NXJ)vcuSx0LGKeZTNx~4Ax0{zkdZ^v0=Zoe zW9zOBLEj1B{CK+q|DZ3BDZ%92B65X-b;pN%ClGLp9AX5`r`ZK zoqPFQI}1y7qD=qEuEubD zs$Y_GDb@b8juJmuB)BIu!+2GNIWPz@uP zH<4-j77N)-YP2W#b!(EtC5U3plhk zng8IkZhfmRf|$xP(ZYx^kf@lTyLq)KhSpTX`brDL2AyG~eLN27;~w}A!`2zz;yHTW z(w|4b{4fynVyyu$0tx%_&&2j-H*Vdzi*fHnRS*Ep%`y927a^-b`I&{4eAxLOAea^i znp11!)d_e%3s#%j(D1d0OJ@LHu`Y?Z7+&jNkteS4!Q?ewDp4FV?3`US4nxTi(QQ%|HuDPtp3R7V)F;D#O5on z#f4`c#e=NG{{6jJWGKULd@K6D`|sl7$DWOYAACEGU;SnpZ!BMwQ66@!vs%|iCwia9 zplrPg59-iUJa&f#9--js0RA!*m&Jw9$qslkGsk$vZhgUMvvzv!S(AX$FzlVJYT@(JBNV$ z@;vls9aRuE<(ZeyxJEmZTjUtSm@&D1>U0j^$o;d5o@exGD0TQ2lBUkmFce#O=g>8i z*h5ig&jfki!aj7DZ|k_Y?Mr=Xaj2j7b7_>}f4xE-2_r`eo{oiErW833p|V$n;Cmg< zy$$$27MQ3)_~#Z`y=0r4saLXyk1@Eb#@x3x3|ZWXY!qk9Tl{KWPnGMeJ37R1*w1m9 zvlrk9vXo1=A{${bf~C{qzWU?Caq5-SGZ-a-gX7jHE88~v(y&2B2FRRL=nPgvZiFHu z3;-Kz4zv^YB=I!?Q34^Jt%p4x4;W|RmAh6NUDL4#tg}&I>2D*LDQoV$i`uT!^pyK! z_!gRu571pa_xJ#V@7gjH>BBJj9Xj#1+ZzjK5ZwTvXBY{*)B&lJdE+P}2n8_ENp>7Y zoH|@Dt!oQ2Y4Fwa)>jC|ENW!nJQrZcvk!Tm&36PTYpX?b8#^7E))8bG^7mv=^}6@B z2-S|2!dMpAri4!f`c>#U{Rl2;mRn( zGAOu&5?pTDg$@SieGRlg!ZFB58fY27e2R;pCwk1B41sA7%e4iRTv1`JXIg?``zSf< z1ysr^0`r>_#%rfcLu zLKaNw9SKF<$ISqs9{`Ql5M5}>|1oT7__3_c+c}mPkNf5sAl}n#w5~mX0mqYM4R+pq z4^MrX)!P*eT&1y;WAqI2&6i(_?(Mr6)mF@{t;7LTIfg!qtLuz)F5dj@e;;QDC$Rv~ z3g`~Q#=^80)0e&$#~geV$W-e`J`2Eshw9B(#JInUakZPD z>9ctNR2cv_`1gMj^}TMi@UF9G2A=2e<=12LKmP}@_Xod^VT6A%oG?pV`SOoM=MTRd z^QY5z`q#b`U;n%RMU3xvV*T+eQQcUHw}0nP;`Z-D+wNXWc6OqDM=Ez%#mj8z6!l0X$p>BWtdL z>tgk_DUx}2H~=n{FAyKk_wS-u&vcynz#>?mH5d$HXM2;M08Dt>j(A3=+>VDiN6s)L zt}$|JUOIIlGGQ289i-9FKpM{1bJJ`eBGX)ou0(h8o;t0lLlW#;cT_@`)RA|$!dhKk zbbOm4liY~sYQP-5243e!I^aApKTU;ln$b+|(P*t>6HEclbDg_&meM(t4q3m1Zh$K2 z{Th0byTDI@jCekWP07?h^0!Wz>zBJ1n1-{q1+8L!g58)-kPkkiGnAot=%IIP7dmxA z(aEltG*QQE&?{xPxe8fJqm9bu0&yaH$>Gq3UR89iN01mn^cYTMKVJR75{u5};+ z2@P?cK>|HI`@YO)Auzg*`EUcHiD-~wPTmQAb}JmiN=Hv#84dffy|6y~1$%oESU z1IZYAm7p7=+;$?b^%7tGLkT!T9~7O^KhJ_ zIA9S>U0l7eo{cEyPV95X8 zy`9+J*-Fnu<`D>JI1{9qMnLI#UqO-M&9npaK;1Ck`Al6DghC5L%R;cp|Ew{zMi;wXa@4;luA3effX*gYU=m2j7dy%Rh*@oA1WrxEquG zy;%M6pNjU=Po(kou%70_4`NtQ4y>yNMZ++MVm-n;A8i3JNA4|Nz&H)lE-wO%j2x5= zQ}69Zwb4v4aROLzUed-jnCgLIR6Pft#ef6!)BrV*rMiwM3Lg#uEXt$x7Xl~GZ&w*~ z-n#>RrvT&A*u8Zls*gPujg9rF^4#u?d#uGNpa*4r^>XxY-$O=fasH>jl+lgucfOOJ zlIe+g4EyZk560e`*OQ($8ZjcV$g@0MPET#p9Yp8VcjD~sPSo(m_uqXtmM^aXteWw? z|M9n?(QW`Zj-tJ?ob7K{FJ6dwc>NHir<@sruQ!+C)UBv`t44iTAEQ1;$K#lXhx9zH zCCDD+(unmv)97n}QY}IsD8 zSXnz@LE;=bQ9v^3?ZpfV^B7&>E(|m|pRxCZ`US%)FTrbip9_@|vV9OI8um&f&Y;}{ zezgl{U7$E6b>Gimos(Fv*-(7#P|zQuL! zQkX*fnuY{GYZ|p$ZKSs;ZBIThh!$C^Q`F(k!6LadjL51Uv2#=IJd0!rp2(gF%x4Zm ztbw#(TvM+HTWqH@!r&Ph)?*y<8eTj1l`a~yJT>viExeP=S-bVt+_|~@Gm9Vz^EcI@ z!y|7RJ@zw54L=1Udfx(5#$_Z3``STHm^p^Io^8hwaU2!s1;43J?cw4YWdvDU!tX43 z7Dg^DS^G83;$~^?PAvD(tG7pkeKlMu5ynr z1nBmy0+g`~1%W>BE)`yxQ^J}{_D~XyVQKy(^Ai?Dq}R#^e75@!-4C9{5-e;M)v-W& zoK|giyZgy>tu_jjaT@|R-(_{DVdxX?pG6xmqLGCuia-XlNCbV5Mlr+MhM5o`J;_A) zT*JFCi{bS{=mQ8r(I{ks3-~SSMnG(Ti$V~wnjdq2wT$BY#zPkJ#2ofqJa}a_R@YW} zRvCtz#@)LpTvoQm+_HHAW77z`$I)s`4ZI;>dlNkk^g{%$akC!8 zo(qo8P^^ZgqvQ0Li>(=iw3P(SGQG80=yY1*RxZ_q@AK*+v=O)cC1b z5yYh7`bVG2wXi+w@n8b*J%|ZP;t*PQZ@ialKX!{8fNA&5x3edO0AQwy*M}VRZ`_RX zmwt#bpp{U}%}+lQb9f5Y!7Oa7#>Job(b#?C{ir{DEspNpiP_a=Jp7;kp8$$Wv3Bi& zI7DVnFqD^`eKJ#dXJx!rWO)DX7PsV{@*28G889ra+zH@3FW>#|Ms|ifd&oI+Vr&8_ zT~@b_sniXxT6d7%J%)%yPtY|6SS+mKrNcXA0LcNLUDTk$C2nauMt9U6y@uk&zzE71Z?!UujvJ^+Y7cp{^Pyohw6|1?~zuX5zd~DTzB_nJ7)EB7TPh+mE|Tpp`K!H%*(dh zJT}u38td%%IX30Qe6Snl{f*GL<<-1))+%s~#`PGTQ)7HOo`&6}vEW7exD!BLVOWf3 zkoKqund2=BDtV^6qdM@~PPfna0CFdW*O4b=$mp=^#A6eLoSp$%c)@2By(!Rntcei@ z!MIW7j8dwuK8RH3L-T9xjA^^#RgFAYxAzn*a$i(rM;(oo3hDu7C3- z_yYMB$g0mZ?%Aw|wQz3LYBSeNXL51TXh)vSsMo}xSUhu=<|W*IR$Z7$gpgr%l!75C zE1N}8B9Xn$Zsvz2xdSYeQ}v%+|fdEKy(d8rg62CiV}ty}f!DRDkaaFJdwV>>#6Zmg(` zt_S~N!HaDK8vv!bc~KZ+>G$#02gxl(m>orrao0dUcx5@(*OnP$g?pUE=Js|fDGOZ` zG*O|8YSRk=PX$0AL-?(C=h*Vl1H2KLX*le}&hB>f_B(FyfH6=SG8@d4fj^L^<5%cx zaN&In+oe~*^9=`Cg_^q-1O{Ahb?lz!_+y9^av5yZ1BT#{f{*zu>t)W+J>YuldV85a z7Ond`pVP=`tOXUBb4O6ncd3ZaxChI_`j5Fz==F&&=3V_U-@5UeeVH|F4 z#_l`sG7pSIzZ-);`+6MTyBDiJ`?FDg>X|r!zq4=$1Fj*R2Z^7;w;FAG%+K4P14HX! z8wYsp^UZc1Fmr}ts^G1sQUmC@-S7~PaH!E#7+Ke2yeYfW?Yw^@N&tsbg#F}nnhHR} zpt3W+K09BH^@kq>+`z-EX$8Y)fnVlnp-mbt1isQ#sY5?jy}7m?yMOf6*#G)Bb6$ts zyFYUj>;J(oMF$>hW1P?Z)N`@>t?$JKqZlR+HbzY~K${j+RAd*Lfz ziW!Wx71wsf>$#;2km6CmJHPwKQA6&w|LBjI6Cm@AyK&>6{bo$A-;P)R#ji&ReR1dO zFUB0>JTbKZ?;NRsFRn)A;VZFFuV#crP&vQeMji)Q?1&`B0EMT1n#EYLy3nAKba4yY zBAz820d3`TcnXizu(tVK&~LP#{a7pjhGrYh>{W0oxB*~j&}}uRqz;JIzzIx7$Pxo%T!6DQf>x-rwnl=p5E2SAZ{-eOKB~}qZGE*cjDkeIJmOXF z*8^SL8E|@RF939wpqA?dsOky1-)Ms4x5i21X9FjrN&mm&tJ7H8LuJhR;yfB9MDKV{ z`9r&*@m{p?7Y(gDk?yo+4B!n`<$AVmQ~FJ49Dm z@PJ;YpGXj$jN!&vW1Tw#^Y|xOOrxN++x6^E???@|WDWsC1ka?$Z{e6EQqemZbKOuo z4GfpTY`ivi0`muqijXdQu*E!|Jz_yeEU0_JK~fsKl0<dO)^X{h^J;?C8 zh8(1`w|5@{rPppTgKT5?A7+(V@E#?Yxg;4gySd^1+u)&#>FN!i-j=K2BeEoMl!U?zFX zs#C$oZ06Z5c*)Sc0+i_lKn{$h@!j3skNfwwl7S6V$)L%6`6ZxnjEEfcOmB$S45<}e zwE%g%u7~WijeeM`k{NAAF-3@*4ZMR3%Nd^cU={D|k%w7>aAcnWbI3BM?+7VaA3%ai zCW*;??ILH`&1Mq?m~wu+$2#eKDRBMWt?WszAeET29>KWNOC_x!l}&`P}~0efM3i#rxs8@_?xq4{tfc=(fJ_*;xDJM`PvFpNZSQ{=2cn7@O@@ z9NxYe5C7U%xtPJQ=*7iS+if!8F|?=9wR|3L-ex)IBlpW617NeCMgsXt zOs%Zm+KH1pccKD_5j47$&d~a(Gl~UxQXpmTkHce?z0L=^(z*$h^8_9^Qopg@%9Q+3 zI5b8xl}Upu07k~{Ha|hEb5)OMMAaA8$f!?pixu;*{=vxO834)iJ5U)J#T*}*I+ZqF zy9gf}{$+fM9P*`3Gi}AMT!+)aebtXf7_&%AeZ`y?>_(Rct2mFt0$ayn`c}_cfaCVr zet(}EqZ4_}HJMuP!Fzd@ggP8!ZMx7N@J3c)?{tj3mpgtmc%CYn+YPz*0pmwXkxyt3 z|GIZwAYERprye#ksQ1qd0Y|LMuot6IFaP#=fxmqnVQ2ut& z4T*bJw0qh0&R3R~88pT?nXK4)CWE=HLXTCZ&J&M5U}Pf)cK1>EEod>clMBz|~$n1(;CJS_XCA;H#t!>z|=56@s0n zEO_FUS?pp_jAQkf+P&>wHnXWAoJD2)2hZo)qWWF|P#9oV>B<9Cn$lk7AQ-bi$pIKT zbG-L=M$rRUiT*DD0^Js8wR-!sE_HWe}IT$M*d+tZ#=6C)q4&d)mw-cN1--bS| z*nR8WSYn*pufEARnJ>IO(x~y-DcGjSGNQ(PmC;NIsqEm*tj&Pm zVY^sd1(@g|2(B>vwp=K$tp{o>$~qsFN*+~fw5q<^j@{SZfp^Zs8yM%CH{&eRX%^=$ z0s;UR@TVyxm9K@lf)LXQ7R{dVd;|wxM89G94x-BULiBk2K5{yXJ{1hx#HQOD#|Zsl zgiPMB9?2=Bedm{~`2i42&HZBlOpjW$s~%Qw==GU)vzSi9JDQ9zdVoyUNuyvrkK3B9 zFkisHx^qMDtk;|AO?0h2LEQSa1dZKRWt0?VL^lfP`P;38tcwu`!G)1k!`doZ!@+}o z51@Gi)g#Ysax)9)3T4(yus`TEfs;qX8X6xA z4~s|xpD_{wnNp_k)B>~U73gQv8N=5)Y1VvcmXuS^`7lCjgd_n0 zYi2>(PRH%aMRBqQKf9B1y&6gzs9rJkB``C(V6P8tg^r{(T-q|hi+oiSCJNxDIRA=>_BY*olAZ2!1f{FdKx^S_EAI0UiCmLrQxFV& zvC&2WjhF~JjOI$yxleuUk>^VcybZHkP_(#k3c)}G8afqP$@Z@({CSXrNC<{kf$7N& zQN++OdSi_h1VO3jDcXxX!w*c>0y-Jkus`boFhD?9EQzoT2v}!QRVJ$9ws378WLZNP zMNwKVPtEZhbB)x_69@+J5f0~&Mu+R~3@yPpz}l$#jNI6hzGYm+vbWGOmW*b#?oNbF(9^ytzw*m*@fUuEF_vTVU;cWW;ytfC{d7#gqCUS4odlh7uGakT8|Yr^hKi^mbQD|*T=-iFEO8ggZ~s39m0r?TCy>0Ax0{41>?W&SL-lg**q0E*C{vV@`Hw<7&n z005$yh9TNg9dA#W5;S>DQ2p5rulVkKKa$tDhWV@;MIEwgM$>q59diz&zxbigQ48da$J! zVR^{Zqs5mRXX)qZndw=6eN-D$jSVpdg2@xv8B+W)ST5eebk$A1eC=Om` z(};*ttw{6;S5d8bjvm5k^BqH3R`okJ`}$hEVD!L~(L7N}#4WmVpQ2kOR1g|5^DHiO z1w^ZtDj=~pG3*0VLDWbIUfD7<5B+7N)334M7Q*%;gwfD~duE za!BFe&#XQ0|&k&4WSm@$*(gi-oxNKsiqJgIu5DXQYNbcLR%^k(j%P&QxyA^}?-@(ANnWvtM+Z#_~ex;qBT>1RP%<*dQV-mE(fG~7}^rB_~F zgKth^`Ozm+pieAP;<;niN$@kz+PIHCkN<@?1#c4!Wa)v2;^^k>nEcrbarDja#N_?E zvGs$uU*dtUJQt7r%3p*>=He6o>3@_3ao7L#Z^q>9_oI8`PCW3r&&KroZ$ulq z*>6901Mp9e^iYqDH8_JG?84Xm+P7l{W!h*pk(GnE_^D46X$!oP+b_HtmG$-5wTN`w zkIwt|vwq>Sz3+`c4Eq@sK-xY~&;m$6uRwyI637mR7GN2j^ULN{#R-EdIdh%JI(%bo zhGF|WV2AlHUwbf)(MJ{qmYeNtf|F4d0Gi5mdiPcw3raBX9;$PCf<9rb4*|||MhQ{O z8D>UK-GX>98bz(`ndTZ7Z8!uT0yV{na(kG@eGw3A zQI|SLN6GY+IdP-8S%Hsk*TGX4<^Flr#s2X|avhI)Q*Ak!j`30*5Bv-*bcC~CJbGiE zIp~ZSvd_+itgl7A+7j!lG>Qp+G4|?hjfQq$7X4*C*TL9gZhHXOn!Jfaq@x75M*q1B zEZe){FsO1;-@;?A!-w7L1h7VD`GvWv7nAL>mJ1e5z-`?Z!(gmNx{bQPw=*{9GJ9wzwx=+@XxP z21Ur<`U;#J)XpB|puAw;7R(rqS&oD9EM@X@G)Iwf?jNr{ERxN$eDD)mwg=j0x6 z7T5+nKF<)13~DIUFO8OS^B#sltkNALl%nb*4897){r57id&S$0#b~(=4`y}_A~+SG z&k~*brD1bkYb!;>#1Prtdl>ibAm$aWcO-= zCwL9oz%X7f4=F5}YqBU0{C5vvncD6vi?yb?Fy6=4sBWcqYJuE}=D-S-9 z0IGZAX3Rkc=Qyt-!X^kdETB3OEFoa?jG^RqpRL`P!I-r+-0yuH>p$^@d|r8XGmUqR zYu!6Nhv%S|zHnh9$`{YY(dNB4z>~K~ZgFib4gtl3_uj!3R9C@t6+hjq?| zB>-2D07}g$TqQ&PdT!2jyuBTxPCptCKN71K*W#ozh|RZe#Ob3?$0PsQ|2|4jT#LDt zR%~6r6}Mk{8y<{kR!VW>U;KJ(eD3qndi6-7TzKT+=)Lx491m@>q4#D8+uW#f zgN&g+EJ1Ht4N?|R<}K*dy?ZwsUL|T{Zi~yUh*RL=%SC#FqHdK#>-ky@TA8qf*k_YJ>C6%^qDEaGjwhtPw6oVIQgeBux4p6!Z>1_ z3=uEt*)l)9Oo6Y(aP}rK)i`D>Swsg9n?_w(s%C`d5ZcefhvixW`NA<7*~JqXgwOA6 z?WeQU#0j#%PlIJtR(Te17(yTPdbyrDD@AuWJx$Yh9;rG?0AhMbnOEnT(|1wS>3O!h zd?Cn${)WE&QkE@V-q8Ec=4Kc&9f|ZnHQ=rTTY3wwzhP`32 ztjE$Z(;g^W8f_!WwijmzsplM*&s7I3)va@};n+M*Xg?iCowke&vJTp>v_t4k>!*}a z|7ZB!`=~F}$);+ut_mKph|q($yw<-3WQG1eWnOOKn~z@C!)Q)39kaw5xhB>d2wruj ztf$I4jhTh?((NcKc%PlcVFY{}9(Wc3>&GIk;Z!W){1*LKk~FCUq*)c+V2afu$hlwq znNK_~GFSlNxeywz3}JFXZd+oKGEZ)Y;Jt%G3k7Cl$ihvy!Ldm#(5qn!kyLgSViK@p zVXSy`o-}rZ5@!*r?y0XEsDow_ffbOcMBZ8ck8rqcEpt*Vo{KAU8G^HOU9(k>Djt-% z4fFHn8}{)e+|+Mnzk-k1dWzbiF#_?Dc(Z1zMTvKT5hK( znAP>ra9RMUpqkT?8QrqyHWu&iK|3m*d5b}~e-`HOMyJz@yLUU#_%y3N&*K@JcV(Hf zphG6oqcCc6csOC)&=*FP7Y^_ePJ5Fm^~M-@7>(zfhlfK8uN}70@TfqsVd;^JM3tPR z7i0H5fsW%5U}Qx-|I9IZoeJOc`=wbb1m>*bktXX8UB(OS#+~oK%=|&PHr_xf=8gks zYZdAdp7RJVy9#J3LBqMjaaN0t-hVG<2m8@~=j}MUe=n+hW^n69oZ(fE5z_LtYtRS9 zgXhN(05TYAL(_Y`IOOxQ0MK%)73Bts^5UhKH>7swX59bI>oMByqX9JL^U>b{IAKg$ zm(OF+E79B9!f;^NP(Ft})>LrGebc*#Khr~!Hw|$~>!sxw?sa$%CCU93myijRiS%>7 zwmhY0EPZ#bzn@|I8hqHhwTb-HqTgDHkN*$b@6y1&mF%*NGEe~L*#cb@<5 zfAvqZ5zq|4Xk1*4Ii6i#SpzuSEb`id%#~$~@(`oC zo8!Cm+$Zxyyjx%WMqGXNV{z~8ce6KzEmMaWKA1U{JoJS*xXyZW2>|(9dbwS62%SAw zz;JGFx05Y-UDHD&MS?td!lI-aO3*HXHp6kD+_~Vjo^4iq#LtmYa19axv##(yI^3rH*Pk3$zdh3rY+*5a16JTyxV>ZP|ma^TnQ9V8C z%oSra57D!Fqjh-NS`+UrC^i4C@wNe$1-g2csn4OKg>iN+bq+RfaeS_ydFvJ+PFaKq zQODp`03po=uW?Q8+5n_gs*N;y>i+@q=$vdyqhTsS(Bn?1DaP4? zOdVPD7n2u=xooC5ytX3P;#o#4yjK0Jj@L@+ty>&z>V=yoU(Eqb?OozA+zC{fTj6MO zS1vP>d-d|Uq^o>lvo`IQ(G<_7pRmr_yz!_XbARDyKL5PATos0l^Wi>0wXe*WZa=R% zm=PZP_`sa=EU3}^d(qcjPv6|I=M?AG0iLX2`c+hF%A+7GCemzpYEQvrPO9jPq~=$G z01QmT<6DgT2toh=|MW>jK~!_!JwjoQL`3HOtjN^B+0D&HD54|xviA^nJy?;b6@>1& zH58-2{9#cZ!zvhAbjW>2C}J7S=L#-hnH&Yba82PYI(L<-i-bvv-q1=<3j{U3hKV4q z2zk--=WQ+{!ZuW1oC+i@az*&|JtGP$7S z*pgk~p!2-O6A8*TmQq$_lm;9=k*^tuhCBPXV~zYLir2tv^K&~E>~W$ebqXz^w_w6v z`->PKJLZ{(wQ}4_^~D;(i+9n#yOm0Qc((bGr?P5%wBL*B`dT()8SUXMjE*t~I}86U zxFrwod3K>12Y8IP{@_nCigDq|hZ!$?1sI-1W@^hQQ`XQE;53vfi1GG5%3vBRHeDEDYV3(+NsSb(+mdsWX7f>tRHKkmp{Wg%CjYvl*|e3-1~n5pD*DJdnEF-+mG!R zUypZQeks=fw|^^^e&$DF@5MJ_`0n*se(K3ML`E)s>e(p6Ctv-){1>tO@Pjd8440n$ z2=haksSl9*lkKfoy>gy)0bpXxi~ZqqG4A){sNaJp;W0o)qYW5muKRfJLjcVFTi5N3 z6{j}0!I0QSwsY%loO}9-xcITBW9yCU$RkP&K&BFuUp#l*Uf^{>6F}25;;mkvU9cJ+ zfB~3yKf1gmF!2DCad(jCwY&a%@7+veY(M_--R)>?Y()3Qtr%aw5f%8r9SuW%KfZep zV}>4(pADTNXM8FZx4O#6m=1wCTvSLJ#8dQz#hVNixmAZ+BLZl^@Qvn|q60s7=55&I z9(_+o3ia}8OesT1K=-6EQhsW=Td&Wv8K zMZLrRK878GW9kyQ_cdi%(9f)7;nd}(LjBundjW*c@)@;SD_ZSUz<3W@0wCaJQ-m4f zK0AzEcuUv(ls{8CT(yMQYB z1dVXF98_gA+=}leO0wGQ@A_ypQeCUTNGj^2_yx#hw2# zzpMV2O;xg9`|!MW*0geA8#V6U(@5_$one+N9YYJcca0KpPg5c~de&cLEe*VBJ>2^0 z8uuya3Fok;@H@ajx(Sx8@5$czzwmRPf8K@H`~A=miiRy_(Fn=NqJs&Dv(r0FFLnfx z2Y7!f_$;OZxf`p}xS1=~DEVL+H5UT{Sn2KTIUHh)`K*R!0d6IAjJi*PL+d>zR z;ap){t#vTuredESoW%}E6i!HG>I_sS7VRWhgJjO#eaX_~aH=8!-7@}J5j<;X^uf@- ziqNp@_I5Wr?s=W(K`)zkXYB-NOG_3bE+iO~_G&lJQuf$HX(EH2fan7-I0rf|jl1gI z6dp8D%)qdEPzn*tmUES`MlDo8yqp5>YYK~epuuyEOutB>+=*~(=$60Ke9*ewti~M1 z-7utVVsCR>)b4L@r-64nnc-d!!0;Yc+0Wrs*){Gg3;0f=xxT{dfK}+CSLP^joZIe&^7pKj92R^Y>S2?S^rtqYSIVHE}xH573G0a zo##CjupRNL%~tfTzXy$xC+=f+IF*jo?R)QCkM@Og0FGXqJ^Rsk<{$iD@$e7h=5PL9 zY`*YPT>at?0g^B{tl_Lk5XLofNTjb}N)$WHEWX77O0D8;Q8Ud&vrY+x94 zyxZlT(^v*bK=Ldk%mZMUi=7JfXtD_whR)Drjk%ZcmUKGYzudWfJIYJVC~^PxLstO} z9_n*1dhfg&=Be!kF!ZQe^g#npX>Rr?4!V1J=uPIc`9^s08d0nOU31s+UG_JCk5pWG zgCzi`j)7a3EQU2Acml9v=CQ@tCPs(=4t5_KSi1wC30#mqWKa*D>lttE=HcENPd!sq zXnL~BY^_qXmmMST6?8?DIStT99(`>D%`+_O&~-x@K-kn(o=4GWqBG%1n~;G9(VG}1 z#vy<(M6G99Wo+rlU;xiu+~7GzZ2?TYZzNSmMQ_}6%BkSVx&_w2=Vtz!FC!1038L=N zAf;~7c)2z#sL@M}mC~&)hIHu2qQ$F()H^l`Xs-|psh65<^s}D<=%9}rpBh@*Xu588 z*2v09df?tu9iowyc5d%euc~)#Rck~f;UZ&g2lhTn}KORNNd$dr22qhxJDrftCz0T``~C=9*PZ0(iLHrhJ(OMmW% zo_F!hTPhSP)btSenZ;Rn<)=q8Fy~=(#~3e=kqv9Icmo11BFqw9BDica4IgLnz!83C zXSzka9+YVnq-)qb7XzKHF%X5Bo7bRPa8j!=IE2Ig^MwHcd12skU!+JBMPea=N6HQd zc;Sp&0a>ayqUi~87ds(rZDArGAfKN_w_Ipe|o)h(7;Q4e&LyV z)Cddr=;8U8mo(RCE+-qy;90lpde%IXe&2Zr zpi~6fltLB3QK2FExsZYoKj&U|y$cb!M(2z%N@E3WdN{^wHw-lcL-aEIsPVHuvdY&r zun5WG$OHy4GoKBbHSSrWr8b$@$ zWp8DklPDeM&;>8k*!+1`fq+GKK8%*rMUC07ozj#I{=xz7~QxJs9T9w|M9=Z;~qx$5C1qS zyE}0XV_WdPMLhy|Ju3MC`m!dTiC@7px5~MF{(N|J@8QtHTn?l0z{M!x(RkvYd20`X zX|FEf5f0H>4cwh(dN>U{b@zB%uQA3L)$l6!Z{3MAXlJ{o(1T;r@ zdlNomH0jJRC2O?IdJOK|jnS>!fIL7P{4lz^34qv*<@I%pRE4!aifMlrr3yI4NThd# zvbPhUyBF+_p;u-tj=X=w^+w!e$XUR>h*3}gW5JK*IKZ&%Vu(go%{TCX0UbSLbO0?p zhFd;X&k7dRMHXucVg$L#wO`AW8Nt;< zh1}2toy!FEXbt5X<#6A=va7RXy_ZF;&PUK;)H2(&@;6+RuN#t2;|_fUB|d+5uN%AF zQS^E?kO5>FwO}r)D%U{J;sBhJ(UB}bR_+C}*dXVtgPL<;oEFI$4bk?wCKf81qUh}R zqsQE=?Xc#8o8=lAfziowORPn5g2v6Q9_v%gRR@v5=lW%;7kcaHtsztD#FI04k}>G@ zTa;(2(tYr*scU7i_pH^3HVi1j@Z5rlid19>i|s^n=6Lo+iY%hV2!;z57?_4v!&neT zrbRe-w2^?!MG1wur=cMCYg!y&1faOqXMtSKyw<&#R>VU97LIWuPzRYmv;x#{4D-df zGa-oQQKch12bs)s&0CG7Si-Zk)uM=6LomS0=|Wid|FSmB=Pbd3I{`93l=F0S7!Jmf zW8!a---o#tS3+hLCEMFsMpw|?@@DABF?*6)w)sW4`@>NTK=I~^RTM@tL|kr@Fg;+w z#y-ewd#l63Ow;SH8X-VPr1#0GAs9TLITW@?kbBrpQ^tZ=Jhup!348`9@#*j|dZ0ZE zeq>rh_Rdwbi~RCzx1AKZDo@rk>XG|SS+gwaz^Hf-h2VqPCva0h@+jEh0c&P%9jIkRf0Lkn9F*vSdBROx&qtV+hYvq2*ci7aOVfFCA}_u=GkKX{Sa$o zqnOo4ABc0Gd^+C7i=P0Jo9E8w`Sj-=yB53G?_^JXQx(Il-8>T3!)63)(hcIrxYOZ% z3=e#_gz>{NXODq1blbh}{8{Y3_+p&wZ1LV=4Da8IV-)VW%NO7m^v|Rhz56#ax|H6v zLJPmj1IO^P9-0c=pE?RE^u_WLO1X}5mFM*yHAaiftB8uh4spcVxt&fY7qTQ>unmRI z!RQI#Ext4Yq0X_2K8-#y?^Q+5b9=LMU--}72i8e7YVH%)NSH!I-?6&3YBf6O^G)x& zi^Xtz>M`|!g26M4-Wg#~f9XvMc8p+VkA48RyI_<%MNfTYG*4NY!_iX@`&qa6PCS8+ zF_|-#2@KU|7@KE&=rHLC3)g);!`{p}8%^a z`L)SE`kOk zy9>z-yq1cZ&*o8?d_HqKOi1HmBL(wPD!3Ap_4N#!$i^_OhQxzyiUKl?bDwLi1+Xx! z@HG8WNob@rB%);r)WERU`CeXWvv?Lhobq`*?g0uG!s{g(`qGnE7!@3iolGf!5k;w> zL~BPFCl@3`LoBw>%1T!5d7$zTp}U|wrvW3lAKbASdUnwbIT^vwyIEu*{oUU1Z9EcI zIYUpoyLez2Ptlnv1&zPP%$$zT;a7~pe9}v&-p^~u7*%@bf)UZ*7{+v8zqwA2g;db= z7^Q(=Md8z!7*-cRC!Ne4BI|PlhqXaCP>I>jV~FH;=%0lAs|b z?_(1R0gQs{JmdxW!0u9p=S&%RY_KP6)un-Gtg5YHtwl_rg75i#Qr<)FkS%x&)}IwMKAn3 z=tmhvIMZxpgBr(W?tkPFsVF|481-{2S=6Sg%5IK4-}{%U&3vAPJ1Pe|o;kh6^))Au zis+;_j?T+J2>iFG<6X^L&CIjpEkoHV2j`(dmxrvxKEOlIwS?Xlrj{y;G0Ey@jDR<0 z4UP|4*VS{`rq->GWemDaTnz6Y^m=jB>t;0P07H0eDvfJw-D;@+w7VbsDD;o~zy5yQ zcK9AX5|8Sem`WJFxVM8RZlsX4gO<(n8-R@6T-GvP$Po&8 zjCXF`gGF{ngHamw%ZzKWiMP)<4?6qN-dK(0OBZ9fxf8?Nw=w$r+~Ev^S^1!l4j^J= z043PKgH8~SVH&$jBBKoxK*An#-HRhWn-M0xc9k>hEw5#fqf=sjj$iLi4I*C_h8UeT zc27Ls=0c%2Gi1-__oint)ZjMD>G_WUhI@m+I{Ts;e2J4N!D;61fd`-yvRe(~7>ure9-JZ7Stti-C`87$;7Rl%Dm3(E zA9@Efgt0PD2{BzHZLRrwJ=o$Jyi*pd*WF7`QV^X09|ckD4P}uaB9<==y&;qa%DIK{ zY~iK(EDf-^@=mu8K}C{ABJ2-coOI`CKdhJ+Sg0_alLR*hqR`?J>5_oJjg`STy)ChaxFyl0x;?AXFQa_Fqolh zK|ogKiz+iGB*3x5-(hq*W4w6Q3Sc1HFTil+icH9uxK?`2&+`HJMe1d31B+#t%pw$c zGX_0VE23`<6mzW~&BOx2AWXx=rXeaZC+VF`sB!0CAt+qLRThhx43&;butLWr+!G-* z?~2gp2;hV4Aq_)S+sjcx*{W2Q+O4dgIAvZgw8e>ef*tFq$EbogJtWu8*Q;4&?>(pe zK~$_qs8#Xu_G1SRS?;c`t)`b~TEKR)B@Cu}qmPEe^pI098S`Sho!#ITP>Oa!o4)r> z9PSOGdv`n6W}f#g8dI5Q{0=Zei;YGsUA`EbufG|mlcQL9;?eL#vxB|eXkJ(iw<;at z?XO(Bh9R%Vus4d@qoHGQ5vr11t!Z=b4)Xn?a$vt!$xE!;p;P?pyD~ z;?)OZrrr&)j zYPMuPFeP?SC_2HEJcmM)Z#5#)M)}J02Yk`KbR}AR9|1BFXlg9#OEKE-#u9UOy%*a{ zv48Vk%t==i_qnT=V(0c&HmcdUcp>)Rzs2+5BXop5q26SrQG=EzG3^ZS?BB~!bQ!Of z+d;Rz3`OUz3}h+~CV~g&@G6ygQ$*R#@EFDE-_S@vr2f$Y8ygX~8uD1>7FmmQW zDY?&`u_zCj0_1mlty@TY@!UGkK1>jhh66p>1-K0lFxco?^^W>iz+mGXo7=3wpH+B} z;UV_}K5Gac01nVYmGK;$H{`CUSJKJR$uhKS%IGNe+^xX6wgCwmjv{=m0TkSG1LYJR za9f+YUBGIaTIJaKiZteoOD|oJpY%kR`M!EqprCW3uEU{Sdq0DGGqyIKsNG5R zW?dTjOT(>0CP12BXy$V@_U6K+n>uPR>gT!`1MyjDSFjECaw)dgX&mRXA=virJ|L;k zMXDW763jX)6$Ug>52;5TlV2X_Q$zp)XU2kGJ@-=T-;^~JlKNQVEf~>8b+JWmtu|6( zltr~>n16A>t=(f}bt#=xqbJhIaalBO3w9$1f(r{tmzxdv#;7?ClLe;G81ut*9*t|{ zvH&JV#f@^#yG3+5in*`+*hil?6u@{`0FWkpKOG@F2dA;yJHohw{?yt|5QDG`f)sHS z9wOl1Fr^E}V&?WG9_(x8Zrl!_x9`4jb6+xrUEK`7FpflstE){6yp3n77?yEXg{wgH zetpgGuf{DA3HMN2=XL-X*<3^&!j-F5s2)eZd&uW8WM>ExV{j!rxvl7tTo^l*ib%2e z-F4B}!@SHVpTpvN9=xAZfMmc7g~60Eo5b`8M$;3981<&r6B;HBs$W?z01)u>Iz42; zlA-Y}`*ovy%%3fxgwvyh88v32TZoejYuMkA_oDe4_vSNaPjwU6I1f_|<6%E(=zES? z<(HO>pL&DV4j8J^xLeV0=&VRzq^*@{vDGKIqT6_^E# zLV9T^@Z(XhP+VMVQJQqrkmyy|CbqJ?lJt`J_wn2y9u{8Bwo}FH~grS9(z0YAE zMbq7hQ#^$pip0FKBlF;!#<`U!x2iG2IOaj+=8o$ZH==~HG7?a(HSt^r$l)M`)^Kp| z{wCuAxPj!$C_nq2+ZnFAyA!hj}r?Y+!B;U?7nw1j-P%i);{y` zIC}Z*C|`RxmVfv&F@E{Ym^G*Q>1QH3qqz4E{!#S4{+-zT{XdSg*WQTJ7vGF|?xiEH ztmp`2$X2C~9G{(91j~GpUBfP{5`V)RjU}Fc8vA!Vf)=B9IKf!jO4lPt%Q5Dj!~ITH z-B&T7ldWxx_4~2E*Fkr{Qz*t!ubV}R)=<>=-UOVq0b1&+gZ=#&Z*Rq{d0oNmqM!qs zo?=u0<{3T2=(^=i+0vLWJYJU?7)In#UF8|(u4_i7c*mU{M$?L#5A&($E2UfW^Bf3~+!m&<7azdH-G?V=};qA*X_Cqk9$&o8ro-fV_q7 zK?I5v0b`wK0RU!n$6*=a|G||KLKLl@tYPgq^(0!%A;Y8VrYm2Ugm=Z41x!9 zxmf^)tO{6K=uiu5Jxwi}T%p_a0Q;59IX22y7OX;r*v!gf6Z{?hD6RF`eUX7r}#a?+yXw+^B1_+2%DQ=4_7fLUZN^ zsZuZ4*F8_Y<9^akM>LCz(H}ZT{xxSu^44i3>#JYRri z>N5)~`P+lnOtl(GP`_))b;#s<`vsW3Ky!01ojRT^sLkB(Eq;HjXd z3p^++x5*(lp2q2XwNV4~ln1bx1~a;|(yC>FwSdMG0TC#Sx)Boi558JoSq{&UcMX(@ z)L+m@UXq6g=yE*xPd@KJJ?mSqtBX-Tn4Dxteyck~abZAUbW!K(GTw)Xk3}-Hs1TJ5 zYr#Z@pGA1)Y&Goe$CP=7qhS)wK|(PYO~a!%h;V7>7+?*B=AQJe&HH&@w^z6?*fy5t zi9Bc6LMn^HG*%u>y0*5==V4Ho(h5`y8HNzZP)t!9P%x%jQuMqv=;^ugor)TwIxoF- zl{LRY@Z{ZOcm+nGcX9lwrz%sfplC!<3b^n2`6Md;OYq<{8BYtt;I=cFT+eA|r<=_! z%H_FeGiP%sdhb@FimcVb8h3NHdhR6{){1HGZ6v~xd4D4Yo$cMEsU6*l;zNy@UhW6R zkGVP?nc#R@Anziog7C?R&L6{UUTYp8Wl@b|>*8e?p5x-YPZ@>QtS_?eEr67Hc0*5k z`zrP#WaWIVr1yOCAz-jSbZ@A_UNE{IUIngSI6@8c(-yUiEQN@-Qn1vOj@iiyl3dT? zR6EAP14P{DwS1oRR9RW{V|dJY>!Iq&>$zne0>WhO)86&7DE9tE|+uY`!YUbT|=)qi@BNYdr%dTkhsD{9Ez}i4CKtGQ>}<4RkXFT}+A!Q3(?%dzr_$7AJ(Kb!p}Hvh@LiR0Jbj`nju6!oiDqx9x`F^l5yEcv7%UvLj3DhhjC z6pf(DOW9WD1fZsM?8}}XnK5z~(cjz80@q1aNf*_}u9x7bed%20BrPOutggl6aD-u= zNJQEJ)O|sUU~e47eRr z1D$fA0ZRGBV}yo&!B)A}@{JCIGG>vg1%etz^@x!$jZA(yF?1PrK0&Deu$uvni7qtg z)yi=fople7OyHw8Y4}t*Oaq7arOX;#tm@6f$NSJn7;Mfr+t;FN2Yq+_;1u~R0Rhaa zeAonsN2MNebnDBtSZy_uw(gxyec>FGGv4>709$@FN~rEt=EkEDbi@epIX|OkZ^u1z z-g=oV42_c-j`3>Evh-bN@T{P4Ep}?c`?zDx(qV zAJ@Z(fliPkbepa^OWsnEe7G(~C_k6y(Z}{5$#(3}LNMg#c_=h43?+-pXIWDKjAO4uFVnkjV>6|lDN$b6 z(erZGKp-QSOkKrm>XihK&}31k72Bna)V{5sTgCRFf1%%aIE|g&7z6LtyR&!AwD{`^n=(DNcNyYh5iT1PqfnSrLY1 z{)X^GrM7g{c+bq)*=ZP?Y21%v5y3F0YuL@tNHjwo2ZuIv=rPe!tgfvj6S^N>=I-|n zqla<&Ff4B+uKAe>bIFSMWFS$eX@g=Lop-iq5RP+M7#sc(mzw)>ocTV4Q%fyEgDRlK z(5FV&TZ(9ND;ziWn+tO>6(vJk_R!aO30&OD;UNQh8g{yKU#ek2YXjWIS40my^-jhF z{NSyNAVs`Zx0@IKP{CTHC&Ke}xE~r8AV6cN@l%k~@ba4X6-kR0vy~*zwRgNZeHn~Z z$92t42`kK(`FCjkw-UrwB$ji z8{ox6x8~4hp=TYfH-|!><5_yiI4Jq~V#|<2Jn9Vg7t=7g!8dx)Do%ujL1$Yxx2TeZAiNVhp7!{|+kPpSeBM%`L z#~8&snfElmuffPCGoDj;*S*gkA3KAR@N;_J$M$^30Bb zu*mbxN88)b$s_vzAGA<*3G%slJ@SV$yfX8sv{;Th(4)4PZ6m`t8s%{5X`-0fiF z4S6p6Jd{pi1%qx{B6FJ<>S?D7cq-=^{Ud18x^OXSD=X2&ThARf19R$nJRuT*Ay9Lldq#`U70y99(diJh9$H}z zUdc2dv#`yt;cbtvO&!6vdI~=!i|G`3pE0+HVVJQ(xt~X!qI>7vN^1@r&cAWqoKKzgi&RloADJP_zLtias~~J+Q6Si zZta4o@GmUZ;3;cg%*z|uI8564zCBnx3*ShKk!7O?+8XC$xHD|-iq|M`Ow^7_^BUy{ul-u!!bMEqv-JiKxLRhMsPoMQGEy!!aKF0#2kX! z+aEBA9ma;2%nIn)dJh>z17=ef3*bD9Tt&5F*xSMo7TWDHwi)c>IofPrAuz9n9f>{y zZc_lORTB;JPvf84J)nueAgh{rZX#WtmN_cM?BCPl0~ShSqDO7M$*2TM9tKFmVu2ou zT6oF|Q(>Sy*48$$GNfbc;zekboreW;OUzwE?=}b#tBS-LgI3cmLMTxLRnJ{v^gLf% zL#n{Ib%yEoK%M62&GQ?Ik=Qa)(KA_~AZixLVcan|8YI1e58Kb0x3X$e<$(^#HQCu4 z#{C_Hg|SFSiv*h}iuE-V27pAMk=3)TsSP!N3Ea~)wL4Z(pW(UZMoT6rEb9$KmkBg@ zCTcWlD9JhaWCmcQ#}4LVeG1{0p0m8~07bLOKq+y~1ws3OzNQ*Z@!-Hz_wkKXm0vH0vWap4!fob-F+pZ|KaG?dHhasRjf zeXKnGaO~Z953sTyON{jhPrzco^mI{Z3c;c^2&}sv^el9A-@n^94QC6Mkg(JjA8v0{ zX}W$AQpG$Ez}L`PtE2Q;;Ocr0_I6`%e+LjWiru@Lc@F{c2?kt)BoHtZe~ghi=Uh}>$}L<7W3W)AP!Nw(m-&(zS>OLw3mkZV+w<~ zy=s8hwcj5je?_R-z8D%*+rg@8^BO{a(whtd3WsZC#4; zlpvn#lp`YnOMtc&WV_5gJrPcW-nMom&+V{a7&*Y;>X}N{Ov5IBqazuQ2FZw>2d5aF z)Jd^Pj3mjWMqjyw*?h89=Nj?)5{@7D)T=h~YU3WG0|!RkumjH7=jn{-xY@-{@4INW z<~BAoxRDpu#%DN2umZNINC|6iO!1mBQ}5_y>!|72%+Grem-iLKK|h?uosM>^@ZLph zf#79(*+Q4+NJk?rX_t@zJM|d_R<9&)IA1>JS2o7sTU)2i^4q_yFEWlt*vSbOBD-q(KjG`b*~0jS4qYaTOd^?Mtp%+5v(Br7FBHs)b1959@q zM^r;t*O#k!e6}wYl|U{gu194UMvyi+U;r3f8JAn|#XVqvDS~UYqW#gAF>>bGl1U8f z@NH%KWZdU73p`^r_quCn;o9h;1V*AT=&SLTfyNk`J-jC;E~BL|%QP^Hf^uZY31n#q zBk?ChbF54cf)_?aJ;ui}XKZ_1i_DxCypE8Gf>l;xbqqBh&YUvO&}&}?fM`rRjBN`A z?$NEz#p4^705vkQ0+%P)fdWlaG!eK42DR2IdMWEkScGR3L+?G&El7C#-VT1=5yCDa z#qe=o6;9RxIN+qJW0%Q9mM|^MnqIMCMV_6N{V=_PV>{WconhR**UP=p>R$!Cyoj=_ zC^&lUKF83w3h7YYfdH5<19kTCPW0IMJr8t%Cq4v7Djh+U^gj~}pq!7n7gL5Y_@9F1 zeYsyDUMOR&x1Zr2{-%Vv9v0U)7pJJe6kixtZlMv=L$R>OJZN)cE!Nl1MgPvd*n9V8 z%tE&l#^~V~EHKyW;W94;zg>?rl=Ye52;Pw2tgN5nk;oS?ZC1PIaiuV^4OEK##D>R8 z7~*c60w|aQeC1wv3-2&4%xiI}&A6Ejis9&3z=Z*Ue%>q73)R^SEz2v?Yz`yczH%W( zJKbzQTUtVvkcXA4mr}uaN}j+%kKF@XCR@9?kNXT|pI&+$W5hh2#~E`TAOjWv_U_$} zy<4}S)>%yO1}e~Ur7<7*d;5(> z*(k*#TcckVtE%J$Im%w{Ngu&qXA3bn#lUlE&nO~%;Wh4%(MYJ<1bx=W=s60s-32p% z*EAIBMpL1-;ufHrdQm=%3aD@FtTxW*lXAwrn5$mxxsBy$x9t1|oR;qD7ywN{7iq7P zVC1kcps0AJ@0QL+5G=Nf{OBpTe_?*r)=$uvTM$l~EJc(3%nnnd9euKa+gG z?>b?=r?-7>eKpTOSrClOV&mWwm6tR%-6CjFpPv9WRdGfg+%08|lIv;IL2%<*WPcLw zV6PMPsQ_r8frsw|dIAA+@NNNgVXT8X#6#B>*@4sOwFQm>g5udVep(Fa&Nd4-jgq*) z(Du2{ed75Ygk)zh$>s)nyA};JFgz;_7s*3OU{D?)xeZrqoO)TzW@x7zD<~uPmu3-# zn=ln&XDKoQr5GwXgV1itSZ<1_3Z-UF1=1#QngEG9KV5`E2zVa`pG-CGFnxO^!^SpU zkpZ_co*ljyK#u5)SP9K`kqOZbI=k_jc3hyKn+SWA-u2eRH0ETS1MM9hN z%8qnnvG=>rtE; zy-0zPVV$eU(TdAO=;Ia=PqdokF|Y@@?|K$Gm7-NIu|}0xSy_o{9W={5)P({X!&OB{ zQ_lbZkpie9IL#2h(E#UYM<>VU_1I75gOI3*rqB%#kbB}8w+np+I8ftde$m&Q7SG7) zdDenq<(CRNqa}PP6h7I{tH`8*%2EVwXt8dkIy9W@?ZgpYhDCK142LPFGrfWTFH!&1 zq*=Bu_Fl7WdQ+yWyO-HU`{-;*2q92l10S3qh=AaNOM+{@#`;vmxq=7+tHTi|goMyW z+G{V@rc8Qos`&q&*)1~bt;~GiImaC1@oqdLSXkF1^SJp6znd$sPa4Am#<1!2x!=u; zQp&7boMI+&%^*4d{*SgUEcfy8 zG-;c3ENsJ#Ll8wYUwXzyp0l}o|ERV8F5&!R-tBGI#2yoc&<&EaF*PLkx9j>hQPAsa zIq}EMS&N009?07j@#`6~yR5xP& z42z+ckx^?8ka1QgIq(DH$F~Lh3#xX((Y4VHXC9LOe&}{vnUZJP^4~a9X`m zc&tRld#ukV%@2|hqiaPvSj>!(y}g4WdTU^fefaSx&+u{);;P@Ul5)Z9ejRd&8kd7>E&)?jP7sr@rvg zzajQfo}f55j6McAL`&DBtG=AJG{URv{=I-ySqaUZE+HAi| zAYcfJT38JesWyiZ?4ZXKr#6Uk#Kl(e361p7P2gjC3gC&NYSr#G7VPy9rAxGXBo!+nTXDGB1= zcB-3%{>|lC3OX-F-jHOBwj5T2)b*2p{p)Xn_4#spq!p*gAPLj+q_o2Q2KkU-1^#nH zZZMnAn(p1@`I>v@k>>f|D!%L6-;B*?;gy1p;AYZjiJcK?`KxL7hw1k`Mq*N~pBZ~2 zKjO2^6wT6ex3`ur(Z^*l_?Xgglyt&xWQ^h}bHnE&PZ~|@(Qr=$$xbl;43OXf^gW9({X>2 zC%GJLWOz)WaEhNZC&Hp$g2E;fl1?ML=rqo#a61`wc?-Y2*Y9hqZK@P*lLxJJww%S! zl((vTDFjCzYdAxx_vxesM#%=xi6HiEOD5-xQ>>k=ZOGu{T6o~N%p8&>YebKyEz<-@+96?v!Khv1zl@@lvM378l;*!4Tscv!AO z%6?YloBgdUA&2be*(9KZG+DDk$gC_Ov9Y2;1S?N?12ZU25U!RhMDf4-{f89Zhhb`k zA(OBE5@DybgsL@V=^+uBLY37*M0JGUc%E7AgNUB1SMN=NM%d}$CnbgX7#+eO>ppeF z(E!%eUnU`3tgD{)?Bjo(fMrQh;_p9wTHe3UbL{y-P6`yKwd_w@xYmL$2*XqaWTSSU zEi<05wavKN3w9A?W9Z0to{OrU@LW^Xx6nA*nVV1zYP)-&$140&swKshD|pG6N08hARt zxLZB9SqfMs3VW$3FfV(eUuN{ZY`)Ke^#nem4K#y6&=(^tswO+=iT-OHem7`W_fao` zoR$0&;|9ZK>E`aSWO4%R&?0YhX?eo<5c4j4sJTYmB?8)n(gI~#uQx@XfDeEt0)1^>KpHx6E>bob*Uaw7Op z?x&~IW|$h$(F@7j8mUN_$B*>POOexcijJZZm9iBJtx;^`*=kfvk)egWyI>r{t-?0} zE?V?69UDtnJ{caFWsvkYaX>*$$iDY@(Ov;!xsNY?h(7)lA7EGVsUn3TNJ@ z+(k1eXR?#Lfu{+3$6tyVop-P3ODNix_OiN@0yZUmd4AF}85Cen>DkF;p7CEiFtuC3 ziDM#verY>vxYJ}u6qB(aT0kEu{F83Mix(LqWHj$z&YB!_zztENefj&3@t;U+b6of7 z_d)C8c!>b<&PQQ42PsWPoLVT2a;%rKmx3b8eWRgK^xH|oB#%=9tKy}XEP?4+++Za>1ym{CgPqN>IwS8U z)d0QRGje(uNOOje8*ptcV~q%xu`t&TLm@e!P(Fatx_X8X(dC3G0Ojr5H?v1xRXxS} zyFb2P-v5};YaHR`&4APdhp;ICbRQ#Ds#zEc=|0;$AkR2oy-Fz_WI=cD*~RJd;}4%E1;7K>dtaVX2oNDm zs4X55vTvZSS(Oq3f>>6ZkQYHCgsA}WJnQ#a${q~B&uL2pt*vcNQi7LJ?u2E6NuJU( zz|=3kr4lrg(7=Y}cXoL{FV&X12p;UUj@K`jZ@>9!sysy`7+e^U$BUuihQadU1w6Kz zKmjhtqCe+r@ue?3nxcG`0^9uNU0(g(^5xm)@^bHRdGY$qvh&S1%R?Sa z(lC!A@9vl1E+;AUm%^&%-2y*{&#MgG7cJuC@BVIz6fb@}8YQ_l^@;AoqhhkBXd)iD zJvwU4jEJs%^Pl{8m+$|xf3bZ2{rAh=zx(}i|HF^V68%1Y_+fd_%Ju4OdG;Uu)snTj z-2CbLrvE zqX%DqGe&{v2SaO&s>a;@D!iotZ;#H9^YSu*rjXK(2!)Wg0OE| z?E#ug3jfHNn-4`yk!WpAA&cAz1Cv#R_f>=GXf{Pz(GkHd z3_Y?U`o;4+^k00nxBTjxSL2Lms-p1@Pkr?yb1Z_@CBq;dmeYOr^}8|h5(~jQl@O15 zU+QArD*I{Z)t2cd8h$wl;VVT8w+yW(3EH<`f7N`OYim+8hPQa@wnk4u9A~EcCyyNL z_VziA(!1dpT?9Pqm?Fvy)R{wkHtCDjzrBB$&d7NThP+@c#_iUap6g@GtJ`WFec3K& z2Mb5lvu>`U$JWmfwcBeu<<96(^mgJPmtN>|q61`0^BC{Fbx-|RV@wT6bct5s+%eyC z4ni>dbo60S&xR|3fjdhYUj$L1M;l|XV z{rcD61Vbf287+*T#!zd33>iIWdwXZ?U$40C)6vD^&@~6(Twh+y*tRr_YCZc$|KRWc zhJ~}AVmZh{K~R)7Z%u$M?$*??24~^-teUVF+Jn zf}qGkp$fvHfK5c$-+crRMWkY#^@MpSawjo4x^Z&Jcs!yJ(iNJ-IIPKG`o^|CUN;4# zI#XzcmruFxw?2yO>gFQL@Mv;agvj<*1lT$-kTk_Z%7dr#a}GTf6OlI0?Y)`rP6VVW z4rE@1Bn^A?QO*aSB1hIYoItqeV_fS-P@ci4VZ9;eE(RU09RkKFl`}fZsCx3 z|0WC67Dx;nNRxpSjUvSWot5e+Qa!mwleXW;g z#j=;fcJ0(tjFFIVZp{oG4VZ-Nb`D?X>0K=sfBJD0)3fN&y&D;+*2`Y!36D`ru>>bp z@Cd5rZC(vJr@h%KrtprzV|_0bRb~tdJInoQg{tuA#er)41X{S%EN3HY`_UTW8NKQ^ z;-Ls(+5OeGDb9=KJ|k(fvAzVbYbRu#<*hWn)o<+VEuZrm?k>-ks}G-+FO9j8!jdM? zl@Bd4SQ$6FZ(jGF!mK>U@EBy4n-u$H3Xz<-`gAmWeEY}mg3Fs_?;rj{QNiWaKmO}% zsyxZtixDma>CQj=>*f07B8C6s)I`daQ|n`t20wY>FVP~SpZ9^2;c*mD_xqjTvN*;^PN;qfpJyreqm3`iY4g8n9YXH#!^nkC+x5_^qEtpVSt&AyR_qeu z5ShWQ~5VeL=uV3gGDzKvZR(B_5AzA8Y&oJVp?kONTwlIH^~mc?QeR z2ZO=)YQWe-ZkIur>0WZ4A#;1rz>9u^r=rc5FV}gfC;0Kh(a6n_!H-fB44LljJ}{G) zHxH2}a1OO&crj|x?i6T?#+pMoo0^>{iP#$2?H+5Kv*;Tr@fsW%0St%Lpt400EJTam ze>xpmETZewN!{yy{byf?!u#duC|)P8!jJi`dNR3{E**z2p4Y+es7(LXWLPnH` zoD2c~_1o7|EXHc*1s|e)i3qR{=+ud5K6*mhZm% zm@xS~73WS!lG~O$pVpb?5Rl|)2_|`SRosw#n}u|Cp)YyDxO?m!luJ53TRnHJ9ueRa zq(qR~04la1j1;;EjjhTifsu8r8rC|6x?bfOLW~~w$63!jSsk4e3tB${aIL(2kd48% zv$`ldu+l%;+38Ow8gh6T!XhL^AhtS)Vo)mjrq33);0AV+Hx{;;TrsaYf@ywWh_S}Y zPN-PVL;@x}op1}U7g^<}x)g@fZkc&%j`8l**0R+c)D}=ua%Ys}it`A(H4yFy9KMC_ z5tM!N$61c@R)~#q70O&k*L%Ns(`^a0D_dqBreghR!teg#df9$`&|Jb* z-;ANVqtoc&<+Ahj*D3Vydwn*ob<@k^F+@t?@5OZ3yA|>$?4p&M4Cl9h`IpO+#`la- zRcDQ#r?B-`zh2(_gTG$h|C|3}*?abRdHwA#TkmQto&3N4?eg%$#{@cqbXCqRznLW5 zT9;?7+t3KD1Y|r0X0wMlJfi`0G#Ct?3=k)bP4p#P!l8MiRlM!m=W@`5XSm@2+|QOQ zA%6mFS+=DQQ&elgT*MK40{bXvbF{@RW|-8%d>VcE*#w7H&=m1G$>+_5wd7Z_fA0%kx(o%W>BqM=#dESVoIu^aLj` z^ZRK=F*(F|m>g>N9HuxKuE(br!{2@%?>PmRnn+P3-{K+p{9k=_xcvHS9op=bU*bar zaM$CowB%XOmxm`kgCQK=qG9c|_iR_|{u|+7t4&#iFko0de0ZOXvn5X|OJoteY_ol_ zyh)Y|)idNZT7l~zH!wZVQTp-6?}OWu<<(&{8)$jVjKRPX4A6tFfTE6$HdRO$PMhd< z4wY-rkBISX9iyOwt4M@!-S&1ee`mjWuX2s|ItRxDSGj(23Y>(=PlCgmKYyA`)yIEx z^wvYfBeHrCUvYpKs98dPJ-q^9bYQ z{5WNofs(RN>Aw}DQ>a7ajUnu_yQ6Dg>y0qwx?VQs7!JWrL57|Ha#pP1Y4aHK??l+F zR78k~2{8pEekb&uMel1Blq_1xD*=VrV+{BkhG{M*DfaiLy5_N11(m)mU!_dmys+$mI9Yx(*(W9TE1QM_fIrtdEp*4c-T8Pv83HG6b z?giI5Wvut@d(~J+C#&t(ua=kLfw!x|l)+@*wDBoEQ3WCTC&B#SSHDacDFC_(E(!4r zNj>IA=x5+;e*MjyM0X$E^5A#hzHK$lOUJ*rZ-)n5PN)!Md~e==T=w34)pPPXe*A8E5zW~V0QQ^vua?Vr;z1otUY@Yt zhKN?4`?W4uDRxn+2?qwj*MI)Y5#ks6;$NOFzx=2FrRXQ`X~H;nb7uO@$?>v}X1d|x z=y-&mqRtn4yUV-3|5wZDAO1LFF}w)R@o1YHC27szD4))o`1wfVQI;WW6mRzk8^SkZ z4q<&6BLHs!3%=y7rVJBIPH@}Ya|+<|6!~TMc1^#t?9_rxbD{f;>F^_}LlF)SpgGSs z*`8&N6cOhGEXV8WbB2WHxz3zto{Ut48+r0(Cmq%WGHJlzr4|6~$kppa2pwb^PgMOxD48{RHY033LqAy& ze1-bav_oXW*ZTDYutx2NrzR!(P&hh9Zw4y4B4>V;VUs8vr_qtG`T{6~{P5`{eGv}G zyPoNJ+nip!(!6~u)_eEW*TMO5qA6fIeHWrF#|2L$yR|z6XpXQ=4oiun|7ZW5|N1}v zjl3@r&2wJ?NMXxm#6)?3v)v|d&lWYsBCf&1Jme_97a->iw-`+NW;XQ)un27`n-!fk zIfP^Tz>AbStA$b$I#*?RZ?B>X;_g-8#`^#8@o4$}hqJLzcO%e4m5GQm0RH&nQBMYh zJknbYbcAkGTo^v2QG#+(o=@OmFv{GS%okT{AqoY~yF_>_Jqk{Kdy20DF3d4rL?0+d z-uwP`^ALt&VPXx#odnxC^+T*X{w0CArJY=nCKYTh}u65mtP7t9~i<$#g zYO5X`P`n5>)|l%M;+K@(vy|dXoyRihT+6~Gi2Zc9$5iekE1&jk-;=-mNjdP?t$Sb` zu>C%Yqj`JNOfyOFOKV-dI+pNu3QMHHcCXp18$D^2s{cEKhPSSG&^;J#w!bCdKHntB z69!xMgD3dn=J8-}quVh6ty!TK!>Idr|LX58&))uGxxI*HK7L$`F1_{H+)|I7dRa(q?%lWa3=yB&-J&+sba+E z{Z7tm?Mrz+G8kGnqZCpdug&wewVcxTH-~i$2gjxR7mcA$&YuXu?hZNhVmVHJT%Ko; zh(?fa%|UZHXMZRdpSkr627rDs$MNDix2=q>!_}}7!APFjT8P%@G*0svy9%uT+y9M! z`kN^>h&d;I50XJpIK@k)s7`Kk07?p2F84_S2g3M)a-H(h7GaSNqfv3l(qQSFwSfDW zOl2q!4@2)pm=KGqJ{=#8HMy17|JB>W<<-INc#|KGPnRD*$e}$fdt1*M*C{|Rmy3ka z4?lj2x#dV#%bCF7&8~_(!cLewxlIEW9lZo4h4ZibTOVu%GNKJmc9J90g-@kEf$;FS z=A3im2JxeGubhSdFm(rCzuQ}_qZs4kr%yd2AgA0>5~F2RI}v6ICRT0y6zC4+8x!{6M%n>5T9aE;k1dxXWmi4$zo}XF7&$M$48%P*T*?#dr*RI|={BiSDe~ zIqlYV^+O8(`f=I)i@#i6{OXs>_Ah@iC)H8pcSk47!>8jBh|*fOmuEdNoX6}>d(L${ zvH$H?!Q|;!^$yUH2Dtigv|Ro8etGllFQ#MOvjiujV`$2p)xET!vkxDZ+cVYnc}VdS zi~sER-z`si?|EwXxDs%{0R7y z`$ri+@4P`rTqt~{h3-bYw(?d=HX0$|DPc((>lQ%D;Pld+lb562sPKX@M+uAC; z{>xvy8~8ed!ZRG@OPPs^P%2@jwZa-l125)8bgk}zndmN&0o zHI{9NYdrv&tf0FG&ii(Ofx+3?)vQy!%B0F$yRbhvk;S50HyLSgC4I|qFa{iJR}44a zdiG!cul=LnP{d((-VnmJ^-B9%70k9by%LU&xomj=h$+I#+R1K>umb!jxMi_*!j=w- zm4Mb>mNl+2T?m|2YYP}l-22lR>G~!nf3bY?=5Tq#i%EH2{w|m~)I`&*42f++T&OTsWn`OUHBw*gn!Xud^H@)U~xB ztgvkRiD>dJ+Esi z)(|>|g)MOek_S-Iw;!{(KYU6;F-TGMOChjPMcHk zJ=mTuY(laR(Kip9r@9e6Tz~vHZE-2GYsz|zlDwo0T^;-wJiPoX4PtmntF$})lD*80(plD{iw)l?nCG=;n zIfWBGv<2k}F`fsfT~B}!y4&%-Nx+4A8As`Uc?$S(aI)w5%huhq9lX(1Bjkj*Ut;5@6MlzYOzpp5`_2(4vCU^`Ya5eS9^UJcQ$GoUms51S_{?Jx#g>%-Qn@HH*-Odk4dpN^kJ1ZJl}e^1W) z!FImKU>$zK8@hY{KI6DGlArW}ee7_~v;X1a*<4FDFu*C{q0?|n|4lk!FdifV1LJNE zJL{R`njJ@?t7EV>XJ6*K?h+tx@0Y7&v}g{aRWw(JSajQC(t8?+9SmΞhxjA)0_C z;2a&$cZvq>N}@+ZJ--g;nsqTc&DA{VAM;nor8s?PzH!ue(QD|J{C)P%{wsg|8wAZ- z8G4U+ysGAdqrp!LP^>O0os-&EaOsPVPi=21Rm+&H7S|7g^)Hv(2X| z_k$_gcp1Vdx04fx05pdHW{rxU6a|gd90LohA~K(0lp&&r2q0r%FT&YMxWfphNAS6O zeHO8{HuS^OoR)(T1A?Y>t!b)AV-)KZMR97<>S;4dBF>0SWv)=vM|kF~=FN*>l5@j6Be3VIpFhpR955G; zl_fhlqKN+{d?z1+TUKr3z9fuW^V43B_vXGn*<8W@=V6Ij-WMW9^o$NTvK8z}hMprk zF^>FKAJ6^rr|7f~l1Po^{p8MVbI{UuJd*^-V{nviAyl>w4;v$8MF7rF!8w7#*pa5u z#3V_!od4p7!=vKM&8(Q~Wn)*V!b|++Y2@ z<;mM`5MYWg*hDv1pWaUd>KFg`A1z0}{r&1Ss|{%Glr*SJkykL+%9ij`DUZG`K73@A zH~;O4@?@8+eE?E~*T-vf5gxbuH@b$`Lh(2d=5vN3MSgj761+Yy`|sZ7o%UQjlQ7c+ z2(0W1_?(b*FZ@i-H2e)K1t`LhawIhSE1piN^}ICb7)iV>^yE`A<7ZqxNy$!> zjK-(PMz|$Qw7&H$0-my;BVPMCsiSC%pvBV@x$C*sKZt`bG!+do3MkW|D~3tWoT9bn zZ2Ouu%-mK+oM#c@?lHH_jox?PDsRB-O9t0N*TvGyZM^dk?(X00E#I}?9}~QG_KZQA zfOTv({#^tOy$VE;30;ki_F8Gq|kGJGf$}~C~d6}N^T={Js-Z+11 z0N~QRcJ`C8qs;VNvF|-xKxyC8o;BK_tEAE>JW*yf0VgMy$=!!#Cwg5AX;<1}K-wlK z@+-nfR%)`fGsa2csLxf>n-@cPoCkua-<`u?y>!D`aE*>dj-&0qCNk9*g)L1?52D>y zDROhd6AIF$lpOuaNLsfOKQvzWyM3J0rn$oVxJjAg~MRVX1`O*zX`5JbYaFJlV7n<#|1MqWu6@vB#d%iBBy;WbL*`yWr2 zPe(^1q%kf9LMR;dUa)SQo*%}8|MXVCpvaz%~# z_1nXqt-(V?7?;cM*qe)xbVw5JkA-XAM3?|?} zX$o1g>enq>lx@N!W_^DY44v(MwLX*QYYY~jy!$V|X%71tA1Rk%`qrU39#BV_B_Kvw z5}LtGapAgumqC~Hc(sPlg~0+ovDn|Mfra7W4buwG?eD#vv9vn2HLA-Jc;?kz(a0u? zGW@u23d5QQuY44E1WZ8Zw&$t#jIQGm%|2-!b1>}T!;eD{L3!R_i}ovn1yd0UD$0F~ zq3@#+magy{C2W7WXHrBw;~A#QooPPCUIJq0FMqx4{nP)-a>vL_kiUp#H@|tiJh3a{ z^}A*J7r$)Wyvty`_2yLyO8&J$G7#b9iKxl1|7>~w)$3rBn9$!KgJbjctL6Gne_9^T zj+gzv_m|7rAOEym|Ms_We_mcnT+!&wSo-pODS5dz!sIfgn?ktP?J<6GDw~tCwo@3q zpyz5_f+?d==y_6W!Sg=hY>qE8iuGhCobE18md*a2t!>fkRbC^WW;CG>=hZ7p1iLw6 zH2_Vt#P8@NkpCg&Zd&u`)f8|uPPnRozKxFJiTG#osM2GN zX+BdYkkQ9mXMiwP%yEs3d46kZovB#Kwv$FfFakOkO}d1&*ykKc#C5;^0vnx z#6xf2eLFI9!t{;n91qb_krurV;8xUT3*R;_*@o^YcDo2fL)YVHUyS_J07gOV)phbJ zcry&i$T94K+cjRo^XR&D{hXp6S=u~y@pkmdD7y@AqBj(IpC;QfA{y&u>zo=7aL>z^ z68ie9cgvsu*)NxU8x+3WBx};8Z{PG8in8+wES{tTZw7W`qNryQx;0V$tv7m5Z?TvD zb<*qkdB%EoPl_rykgan4oG(p^5F}@lg|9H$x;Hj+QM6-y?$}G;5YYoSUw&D>`D$-^ z8&l#Alu?<7z}v)0f-Q2**Bm-F8&v?`=ioO4mTuI~tJ?{2d)t(oP$ed@bu6L{NvY8+$BT$tfyn3OtwZ-?5_zr5 z%d_SEk3aNzo=HSCA>;Az%#{a8ISPBR#_VZ)o)KduF;vgs{qIXrBy=AhR-w@y&5y_0 z8X}&nJWL@p9{=nG#^528)$1uE1cm+j+df=pXOGA5(=$%5vRH2(v$6@h1Y7{ZSR3K! z>kOK|_~ricCT~NPDuquu%Y`vC9?$ZOzzuSiii-EezKq1v93$7Q40GkkNwp@`6tVZH*e+)@-NLZX}LTLx2He+kaEdT zY-*n801T&kJrM;9J`Sp}J@DfDKQ8b8um6|j@sEF4?tcIMvi$IA`S73p=gZq)|6u=c&*onJ5ar3K%^W?tCF{>E}|FZAzzo6K>vO^@*4jds|a7j%y(KYPM2x*e7F&Ya` z@W27t+0&0b+Gu>I?n(9C-Z(r)KawEYe97;ZFEWghmv#mTOFw?m*P!qC=0DT`G`9RX zV@iW8cm$6KdpSEj50~V%X(R)ai)6;eR(v!H)d%4Htoa6DFzojy83EfnualKKDI^}V zRB(|;-7`>I^ngaJiOjv_Y=t)ljA*aNkVv-(^}{E*&2}FnCgn<|aP&krq$9t2o9^6+ zR(^^~AOQ6h95N}!-pGE*P-q;wBJhk3p41v$6`xO$w53L)YDUx}^E?pw1XjK(M{*uzp1RWD1Pr8F;RUc$(etyh505Vw8+|dd_Y0I%{5@i>NT(tLF7G zhE%*pkt(1gBq(u}=E7ot_KUCgTgPVeAXKuxv!c!uutGW*NQfMgd`)przlu`GYqGpV zE?89)!VFoA5~1M7&(ahLoDhF`b=i2DKVZ&Q5JG)X>=>NBf>57RU<&V0-Gt={zOG%1 zJw7Ku9x|}*ntw(Z0nqi&o;;t=5B8off0=SW%nNuD5p2BBJZH#0SZ>W+L_c(}LKq01 zutIP`?YxbP3>!Ee3$bw{iRRZFx|Zb`%Ld;p#~9<_FpCuwDVWxG{4oXD-@Mwj-DfzC zL7ukGXW@KK19*xhA}D5SDBxvw~BD$u_q81y%P?#%SB z-%U>L#s1;4^%sACd471S3^qdq!Ya>9Pq`Wr6&&mJepD#cDAOFv-(WRWZ zGxW7qzCAykx(Vi#b-?kMO%#1XeNsxjADD($y$`==Q;m*i%-QLLVsj8ALcDqqz%P0T zLE#RKMNh$BG=<{e*-*?pX}^!b)xA7;I%iU;8Q{&2;V|BDpY618kq)?5 z!*rXzVz85E(#MP{sRzd15K&*!pWr_&l_y*-G~c=}l2Osb?EUXOdITx02P}zTq*%L0 zTJ~*5_5RNDp2^^8=&rNwSFc|$d%NmcGD?Dtef9SYj&y_Tw$f)#O$4vcIn;J7NNw1? zrl1wyjgIO1-5d#f+;zIcM_$O9{8}Tqa`_ov5$d!5{(tkI{YDoyH#*7a>sNadUa}>O z`){AQa0p~xWK{|q1mP6!_VPBtc(9*06wr7n3RQOU3=elQ5K@d=TQT)037zoPl73(M zCLbOMZObb>S>ETN|LzapB}WjiFy319CU-?J2zB{e$W=wJ{5g9BS`d@43FcfqO5Oy8 z6LGb`PsghHvy|q3-R0H>FO)Xs#U#=O7|wNiHpAoVJok6+4jL9gHP7C^wY@1yW>ry& z6yFGu#=*oa>G*NxxYdg#OQ`T}o!+EVU6Q+TSjRCnp?-D|BARsbhr|gRhDZ&1zBGST zm2|tY2x|L{eH)i>(gX*C^OSygzxJ2MK(L?b8P9#H&bp%D|wf6HmxOFD%}F}kq4lz_do(o>#kOh>|Ae)zat z{P1Bp`0DMl|CfKB2cNeXPOrcFLt{0ujqsCzdfK9A&-SNJ%gw+0|13|Beq5g4hwt;_ zv|LTGL&dt$plz1Q+0R{M+1cn=4)2E;ENv%e^;moXPc)urvM@T1} zDXcymJi<%Tbv54NB|P~#p=sOH%dNdpMvj49dmO9=1e~iQXz-yi(H{QKFTu zo$z$`DQwcTWg}yIitC!wtgq{*(2TU?xKk!*4-S16FEGE!3tJ37=QW)sum0`r+49Gf zB3Y-Hn4lFt744!Vk@Xy<9Ly(8&`av^or>z7VV}EVt=;&{T%GPIng@53vLew}2WwCB zIl8*}aJE#Ck73kWMMiyfdvX^JRe)3W8Jyccg|t6MLqUuaLrHA4{_6}l#b9e_I)~%( zJZ1QkQ^lBVjbziw(Rxr59?}7hAw@r+8Reuif0 zpjWRQotw9Q{V?Zw{0w)y3UmoXgB%}Wbz7``gC$Wm5u^+pDJu>n-Nk7fI`2O?A^*(X zTXUjbbGJ`@FPI%1reDH4XG!IMB$Vub)q2A9Rk#gqM^dQC7|{o^iEc1oH7goB`EvZP z9c-dgqQBBxo8eY7F;R$nE0q;JM5gX4kBh{tz!4uVn7{-^)sAN(ek2$>tpyMwJM z0MnuAJfN`{5QyRxP48Dl#!}mwj&kxx1c1Gb`l4&8sxy(iwQW%VB=l^rAEYC`ovmjP zYaWyQM#}4b3iywI{IP}Jq<|yd0RM6$=11wxV>?AkVPc4B3)%OeqlxI|Gbh~z>UAG+u&!1dLFRupQ)m>DmbMHY7FoUwnHwR=d2C5aoG_ z_$U-=EhcJoW;OWRd8+p;V3DT5?J*&>_wcX)K?=cJz^6EirX}emC zlP3+{ln%Bj5xttZM5vkhwp<@4bYxzS9)%a8m1Qd8@(>9*R*kyP5Pxe+ymGr7jJ1 zvkr6f#RwSVI=q(MgynWGjdbp#b5yToYZhqUdqvC4kY?-n%y$dYjSfLMmU+a z!NJuQyRAIMs|*sfB~7xKrzHZzQ(9{|eh#w>wT4Hqj-1E^1)GB->q{{cx_E0+MInht zM%r2b&k+{J$Ka~>R+7lbO-TqPGqAd;zeVMu1L5ar31N^;<_M{1nj-bRwx-1=;k;)( zB=n3q;rPB0J`c^4@FJNZ0*Hnva|W$->!N6lT{h1U#rYgBHPLA`d>IVxpFJ+$XQ+Rd zA;rsf#3}`(?#HtjIQYjd7C(Ji2fkKZx08|KxoAK^u^hTOq?3$0hKAXi+rj?!vK76l z7kYhoIDE*_5Se0-uTjXG{^ne44qi4kbA(dx8dt4Hn&K6;M$(IN_Ic(nH-Fk&Ew6qy zMg!W%_nUnt&n(&|!i!GiwAI(>NpUH?)1X52fj1IN`ECZgK2zjN`78Lt_oBep(Xg=@ zD3QoSIRby)`pL8%x5G!l>xT~?CR&DuIC$j#H{ZUUH8NDgIY!M;KpwKb8+B)P~e#<-=JfD&fyg&YF-EyfYBVNU@AWv{snkCH_v{NM|bBi5DoUWh~vVL>Fp zVXNIm)*vp?-f<_dYI_>+3)@giYuIc&;e?mG^<@?p<^N~Dcr&mFbRpIm#@AU(3>@95 z>=~y3`>e-NVkmsAf@}>Tc%wP8L>*F`V~nbtqnbVeFaKog@;3eD%zzT8QXbZrFb z9tPWl0Gl@fj3E&w*f0VB87dnXcBZCpE@pe_$<1!tKs1?!J9BbGw5N8+u~b1L~uJk zil`?SFaPd$LNCf*u96uaGhF_*=N(6jXv85^cy#twQx?fgg}yCkWaYGx4(G29UoCGA z*1iZ2!B?72j$4HDG!J?`R0b(!@aEJgptDVwG7%M1q|D5wl%(${6Y`VMBp0j?fjSdm za6314;`Q;&8~=049q)-YUPixO_HWOYLHytR8OR(5_*Vb28t4qP)|SHI42==jB)9Sw zSLOK7b3{hAhW2P9mH6Zo1J4+u2o#nsZm(W_9j+c*>v=FqjzmY7r)T5&lRs!jvF5?6 zx5+Kx=*x+ss=Jy_pRGYSTRK54#u$SU0{LnVcNu!Nu3iK$p>9Ric4EnGqf@#5**_m0 zNMow);PFDa-Ck4E(0Ck3X%>yg(BkU7TW`*9aMqJwie(N+3qITF8J@hZg3cXb5OG8% zm7K0{65i2S>+GIi{o}okpuvvsbdvZTlk5V%L40xYM%xl})em-Z7??n)VJ*)O_|8TsV%>60G6d(iH zGX1K0Yf> zGze{>7#39kZajIEU%r#K+HTGX-H1|`s1FH9At~oI3(Z3?q4MyM<(_a|30GD2)*yxw zB|`2a?47A#4Yr?~Yct;J{vinYtln93JqeHyLO@+*iNn_H*Gv&%#_>dPyyrr8LdY{G zbrNE~&l)^y{(_bSg%?5u!&m!T8C59+3MCw2(4CaAa372P^6WGmU37gGe^&Jr6eg?) zmN6E>-ANv-{JbMeF%>3y8C*kq295ojtLKhD;assLJ@6v0Bhfa*4Ne3#g5;eHjF=Olk|jNiW{sFS20-)_irWn8TDl(nx|ejFF}wb` zYE?{|9V@+@K=l%X7+Meb)%>`Xdj*Xzs2y(mBgp&bB2{ccQa(I09sq+?C&0nJX zvp;>;dkNV;`&ELg_4ghjG9k|Gtx=c?-EI;Jz7BrzZWQ}$E$c~7g5lYp{_di>I)S^(o5j(oH@&yL_jW3LvnQK#hU|s3P3!2M zs3_0%ZtyS#oMtaR%TBIBvza-G( z<8dyG9nFoh>hsCHyEdgg#=%n#gy*m0qAPz=@S-@~~gzCux&I{#ECkQhS?t2L(Z*1)a%azXH z79B~!lCRHSXno6@zVBKogdEFs58im$-+KoK-J_`&hb-LWp$5g^p@nm+Zd{S2%j6z~ zip?oQJ4iNO=H*8s(}1jN4PaX~?;QP#v`Q(@fiUr$sERq$4N@hq-@Z-P#hmZG<=Ox8Kl!iy z=3xIIWki{$bb62wJF82-aSBf0Lp3JmLBNnp=$~bYP$DL4qDUF@5C*n9A#h%N&i?ZH zU~?X>7>2j}yFY$Nxu2)l^T4|wf&oCqpz$DfbL(e-M#POpD63{X%i0M9gB*g2f}U_i z|IQP5A2U4UW%WQ;Bqx`|^PezFfS_>XViAPyc1JpP^})BV_99ZD6bn%_c(r^ux(blD z%l1niy3js_LYX1>43S_W6a{AX)xStlc((Ca=!Yp+7AmT=7RrHD*+sMF@#-TULU+3V zH789H#?T4w`9VX>L)GTp5Ac#25nlRqe7>AsMEqoO*C0{~lR!Jn*m(0+LB>wci9Mb_ z#*k}&d$`g~>B?Ty6sVl|+yX zFOz{A5dd(QE__+*2$<#V;R=NqS-b>l#_osh;QZWm!D!anPr?NAX&0MJT^XSIp+)J`*xVyWZ_@%-y;vf1jaIkH>!odc2JuNCJ!$4c$f0lviqJ==bQize{mmEgRu* z=bNwcs<-l%g{YfT_*OwX0=$2Oh|}plC)gkF64D8ZtvqedxsSdm8e4CSv3kEL#EfSZ z>vQgW;|k$l{ONmf$B6{I=(_EU!}ITdT&{n7Kl48P)wgq$?d|DF^o6dYw`ixOjZp)S z=rbTRUVLbc(YTPK6Z2-jY~$NSvH$fq895uv{gu6-Pz=~eQ*}NT!qJO&Z9?ge%0YjY&MIyfB&~a3E z_6~c_{zN{dE_wN688RAq))*L#;SXU_l&MCY?O3r|*)oFGmX7e6(44hBU8aSv+*4qEkwHQrrY{ma0X&O-J1Z7pY1k7f zjS+R%JTX6#S;IaG8nZB4A8VPA`keBUlOC@SVY(r#+5))dhBOOe>>%720oVOBrmvR? zAP2uVtKRirGHC33Re(d3VeXr2g)^G(#Eji9G=-o<6s*&hzthvMNofoJ=p=}!(E({UM67a!H6M8=@9BB;U|VN8pDjzu@2!|V72zi0k%HjX>_V`og#bl zSAVhW{`s%Qt8o(H&B@s)d3oZEgzXdu!dLI{Vm6~UdF(G=D8a1%jf}Sp#c7=@DipBI z4Z3VSGi~F^wLhMpE-wQVOO49)`;SAT=tRzaQ6WDT=18->Nmxpgd5{FAu({ay@=r@ z^sl2{?}T6Sp)o0_cnq4{uVNeC2}SvI(WAi=BfHN%i+ZAf71Fux+0#6?j3{uHYT-%W zTq~FjhS33|cOp27H=6etk655*qQ^oZy!Oaw-8oiP1lbaT_Sm)kFve|vkm{3#(CBP1UEPMU6# zMzPGC$)Ys`FDJ~%GC$BDKkW#57RiF8FLqKa4Hi4GtItd+r{^Bg=lsSbZl6fil^o5PP2!3lPo$$iavM!rS^ zLo1M= z%0aarTh%$i*WuwQCbvz&0yMS1X%NS=I!aaH$6;0pYhNA^V09O|&Fav1-9as^$WhYy zVH#9Wbu2)k1_vvs1bqAM-B_c%EI8LtaJ1<5WA|I)dMw1}OGSHg?xu-(0l*gh)8zjk8>Jwc5&ik&_n6v>#Q@*_^=3 zOX^@N;&d9`nun7LSWmQq3%YZiX96-KMBYi=z#8Hd&9S>8OCO`PHcJGf%hqtD?V>8~ zgxpj!_j2!e>dlAp8pZFUVMe(0c|xK&d3cW=q3W6usAwp9NG%N0O-HmY($Op;Tm}0b z5A-nuEia?_p8oNV%a8y1-z+a1MxXcFJm@>G_eaxchVZyMKks#xN69c=tCf$x>zZNG zXpqCwMD8-eT0}G+a|Vz5tm+4m0>UK$Mk(*bBU{nfMvP1Op8nxa%a_JI|L}3SieJ7& zAG+)D_|L!pVWqL=j7M~uvj<(VoeF>3^e7qd9RLOMfpG%<`s^w?J0HdF&9jdmra%4t z{YCdt`0-?PEBb&}cz_R#VmNI?@1e8tMkt2HCLHm$%IKj^w93=z|4CI1t>vYp^n5lX z-GGYHCNkIk@H(OP#zFU0Xzm~cvgH^<)pS+N6`ZiQrxpK}^n3%O55@$B!UJDTI)-CivpxiAXUpY>XRq4!rLBLrR4MB3<{3cVFc^?9DnEOzMTuEkSkA*qhC58{fuv z!qy@>AV^9Gtw7fQ{S!D$I;TLn6hCK1G=a~##i4@;bu6;3(>ra zBQexC<@j^~Ie7T$b@+1(`RpA=TdM#&h>JFG-IaiE$cG|s(%APxlwYBN)cG+{ss~dpylCOuI@vv6-VCl)# zCo~Yatt~gr6$BY&PGj1QXdKF;rsCDxovir0{RySB7R@h$q$nt%VDdDBio#UH=p2FG znCBu+wG#x$u(0Lw?!9(`x#;*FM^Hh2z@i8p~w&hyXT>CG)Ix+a~_kQlOE~Yxah|7 zcwPvA!8RUm^rc@vSUH600ZsSKo1BPABua)6zE3iIpR4pv)}X1`>)zUB`)(4bLaShd zj_rFFr#NYV@5ASHxD}>u0*Y}wz*aR}CZ zJPPF^IR^@__VU^re}5-~xO>RMon*?IY6PEf(sVgtdQ(ucdT+eTi$q}foD;!|n|8so zj-0OO(DZoUqKcI2)PBJEi|yqgN8s5L&dBLZqq~lb1JE)&95c;)vhW#{TS>*FG zO!6{Sfn$R0=DZv56|;BnnE7J zCzHRA32jvqI)8}a&84Ac4V1jKYI*}^BV@lkTlNNd1me0?LCZzOi|e8O38B`gu+Z8+ z9;pIV1uN913lSpj0j+)ZvbRcwGf`9P@K^Umyw@ps>$rbJ5Lx#T?^Jc}Z7jR{ezIzL z7ee9+Pc|bA$4w@nO&}l@|)U^gSXV26q0xpI@Qep(coF*1*g-M;r zrU;P`9rYW$T`zp6hcZXYwLB-k%g3v7ZIy$|5w?Ag=g<1@y_fy|lmMYj`djFnkRJo2 z@f)U_`@8v3F5?w|Pqa#qKZ(ICj~6lNjetFJmjTsv>o|V(nRQ4l^|?9pEQW7WnZFtRna{KZ9@+>-(|9;4O zzs@MQKKhW0aJyXn@yF$nahC$Q`eVYb^$9f!$1{*V-z7Bj?x{aHZaC92bPeH0&+@Fn zn9*q)*kjid{`dXeyL~nw8m}C%A1B->mSIHEm}g@wQoiUHya+V#J#w+&D9AUO5x(A(u`arMfq$6U-pq=~0F$KJ+sJSKuwB*y|7d)m=@ zR%9^-;M^I_-z2AgXkEX*JfALp^5$0T=uXj3vPDWu7feRT93(}7_AClUy6mgN*Uk1x z$|c;Uxa~gDlOjvjB5(IV7L zeu$`u0&e9kOJ4=O#^9N|t#!hEO6~SmVIDcQ8l2jEQ&e!NR)H)Nbv!#i4Ne&v=^A^w zY0|?C3|qj?&rZfT;GKJ(NTl)6oijohQ7{a?lcI=6MHEF$lD)b;i2w;5_*wH|Y2_>$M_HdTsU2p-R? z`_f;5QNl^%z3XMdFw%r=H+(+(PyYM=wcpV8@^!*u6yioiH`VK`hY&}2--WcKVDDvh zgv8c7j88~d6c1GBj^KIy=3tuOsOHn>_U&M z#W1~G|LOT!eMmve$v~@prVo!>}8fGK5DEk8Eo`n1kghv?;xuB<2TKCmF{bs%dyUv2( z=Dc@At;x!qLT3KaN|(&MY}eG?&4DL7Z(dJ0^y>Z5@*s_Iy5`um`~2MJSKohHj{okD z%j5C!{4KIX7*aZq3hOeSuJhOpPDt5zD~!pofE74~ABuPK%w31aq)y~rM@Y3cpTQ?V zwL}ZnPI;xG#&;!bg+8;jTO$pyM<4B%rn8q`wX$wqh=t4q2Tv(j?(x99ojc` zKpaoOz367Wrr*1MeSTjFRl;0LPJ7G2d=$TxH!mIuk7hLr`y3tho+T1u6LB$r`XbNK zI?RsIZO4N-ea_=#Xq(cEg~mN6WgHZTFK5qla>MQm_@2G?t&e;Ihna_^Gw6qInqPBq zPsG36H~v|2{ZDyMM+tvN#4?=TzJ9%IZ8GS|g_p~Dil)b}#{5rnB4hBzn*+V%70cZX z&h!%T>>p1q$8&#*o*NfWesy>-$5Vp|oRY=hptpSs8iNlkt}f!GWY6nY8_VHaMa+#K zEw6Hk!*ads?7bS9O_@H#`-iXJOv=vj%}1Y{iLf40BjRy%^l7|*&m=FLddXRUPRi9d z=Am$pJesYmr%GlZ~j35kK@deMzFV3}Ag$%u!o&-CsGb@XH_~ zi}7LzTWONP-Q3k|iPm0a3^PiBnIYlH7ImVQ)%8C6Q#OJ zH}l>pdwm_81K`BZ!?$0j!(J~>XRjv3mhyG;)=mm21;JuMs8fNN)ytw~$xW~6gcZd; zUN);}7zmTH0(X~vg&-`x2<+qW#qz`ZPdG0{k{}L{jDeNOb=`3`JSEDi3L%73gfKbl zl;c#YXEoYZLHNkCUtVeK@UVQ!%Dd%d1R+3C7{WRtkZqyx{#4^m=rQJqa8>ZL!i2JS z_x4kiY7X*16CQK?W^+7zwL6{cgj*&b9ieG<(*grpz~2uJSzuFSmVvT7S@Vm#`+Lj& z!OPYY0faym;*@;6-~1etn<6%5_b_~PVCuIaKnSF;tZ@iASPtUzU?)NlBg6@^eW4L zLTVATNWp}fBSJ-K6sNR=`HnYly4KR)1gwzSvy34(uel<>8>?$$h*1u$8H0OYHS<|Y z^i#O|u}kka_m;<=$*Y%_=OcX1kV#}O56xTAQxh3aZ*t#^z=$0aJx!3BkN=ulp2bgw zT5I)91*sSj)mRU?=^4R>(eY#}hS?WVG_Nod94>W;>+{WUar@zTxleiDC8Sz1l_IC-*Osb(9V^2CHSnTfYMH^uDlQ$bft6PEN`^Vev;CIuBYbwSp9SwVT zK?p=C@#rX|;Ah@X5>|vOgJfE{!gHTlLl2xSdA{%=9H0XafH!+*kqBJ8l%qyNd5$a3 z=hgQTtsC5CD5G!fHSxJ>c+nn4=ioY8N6zNX@=6DmftoY1*R#n#jue^q>BGlGg+ILMp>cJg zSBfID!`{2evB`mmHX5X#95V96c_tS_`;?Ym4cAv^%lT&~P7d`k${ys#L5tS}-uiw6jf1WoE!}nS5?Inxp%Qdn>9vFvX2DjG5 z5R%8&mGJ$?Pd#76Dcq+s7;yux&#Mu%9zpJ06&+RMgFf_~nsK$Ejb&^-B)XBtBa$uJfup|vPy&5u}ke778@+%K-1Z@`#bRS4nzY-B0z z?&P@(Lx0XXj>riX`2e3|c?OJAoqF{0=G9)$yj#xCK4vH+78zZ z7_nvzUdQN)R)pm>1Hr_i6GCu9m?6?R_dDD6K(6%z1jXg~+FeSciJchJ&Hx7nVE9iE z81u@rLby3jcd9;P$r0`l_Uu`LqVa{LwN=EBWC6nQoQZhRL%kRltFmm(V3&nQ`2z*K zf$xN5$I{DS89aEOkd}i}?4d!=-4N=9+J`(t28Oze7a3U?-V5{7HO6L*ia_+{2-OK! zN4S)j@EKgMr`GkH=%mY=F9wT|f;;P(qPx!6x(>My!RW_k_d~|keS+uV>BeHOF}7Tl zee3fgh?@jluOa6AHN{3T1oa4}w2nDaDqxC1rXVb)^8vw{4EPUYJ8)y#Tb`bULqj%7 z3S3B+(4XBRUHh09hh8*~`FwvFZljf>)8#JVCwIos6crH4-2e8gABh(tt{NmlDjM{>**|lRR_rcS6=Ondc`;3oGQQ0z z_8l6St%JR+;b}q#eS*3FR}Tzti=gy5q1GkR2@{#H#->1k62NwF%;50?!9V@%6VO_J z!uca-;3*lA;z?-sPHT-8&}M63_)J>EoP!nG?9Ik(&MjW!nkOl;fCThY({Yb)88DQj zsAV`87`ce|Kipj`r@{R?xKfmdhx^g|8sWrOT6wPCgK&<~t` zPOce`kzyST?a@iWmHgF?TXbQZRy5R4#cm3gdB$+P7Dl6SGIH(EC!)#8RY-@{SK~n# zUbMxo1c%zJb0M?UeLX}I!u2Ej2t$m}yhMKwUa7?2Nj_grvAc-K-L*r&)=q%KxAace zgV|?a{g95J2b~c=5p~Hr*Xh(Dqv7exo{{~Q< z@J<69fXDJ#Lob&Dh5{kgIQDcqHg>kIgow%iHB%TbC1A9!PyVNjQJ-Orv3K}c+<)}-7acD-&5dkV9Y;H=Zv zZw`Y$g3EZCEphh02SY@a&C$@FatpcdK1&vz_@)v?qFWj0q(`{qDKG!O}}z zh)jQyb;hS)eesHXKD3T_`vKn+H(19$sA7caRN2pUAh(V`jc(?>^*;Yk0o(Q}h`~RR z+xmp68ACmy`^WicJnPeBNaz)K$UfMy!Y{h}Oe^oNy^$CIE`b-vdCLj0m2i68!wFdi@QqK&$(J(BHMU_C{rKb2Y(IYc=1qqE z=gHS!C6F1@UiY}$a!0;CCKED5;V{SvwQDoGwf&+w zWjti`+Z|+A5NAfz(AF|?UIc)nXRp5+Dp(vxE}UwUYa5+rH;hg^CkipO6L>$INzIBJ ziarRbEAFG45WT(ca}02Jn~-{ItcehMaJL}-jw z$TtnIq%@z80j8TM5_GM!g_R zBB=LHhErS+@g{hjK1Vqdq_$N79#2I0LxY1gQjoGqkWNLmkwUu03@2A9==+c#5Lqp^ zESWwNA_x54fCD3eIRNso);W%M8sg=xI2(IiD-SF*iYXz;GZBEf?`}IEd9~6WZoe#N z=MjC(&0CmOkW)yN*PH;Ev&a)Ql)u!#5MV^0LjC0UCn=g zxm`yn+pmmvC&kem%z}66u@B*S^7{#9Tj9pY9Ay{G-D^%@I5d%Ry%cWd*)rxti&{a7 z9_{LaH*`Z8HrB<*qvh`0p)$*IsU2*_?2G=;ph>Rz1&?O8dB*v9$HJghuDCjvIF@c31@XfQQ8DPO~lz7-- zWDx#iAhecIi0x$-NpbHNUSjiAE&nAxl-5!38JUi;2*(fIKidcUnW!3?5=1x;MgO0K=-(Wj zCW{DivaccVLu1SipL-6DoU^&`)Z|S$AQ^8PqLg~KUnLjMhS%ibkB*O*lZ%`S5sa4K z)0Z9TyPkU?N1Slh$6U)<(L{VHMXK&%qF>D!Z_c4Dt>GE|OLn1!DN2i9z;^A+E=tPt z6p9y(OSbe(1;7`YUB%A~aa--ylh*KJG#+n|3ml;})cjNWtLJ?6(-Fg?)KXkm3UQNZ zFY{zyy?&Sc$>Hff$y6yAG}q#~k5jc7-6w02b+qf} z$dD&vh9bwA7v7g@<&+NZbPeNGh@YGdE0G)-*OR~8t@=#NH^hEj-%#nY6-|% z&7si4OHssVO)ZLfGbTg=8U3qq84vazh0B1W*Vae}rR}Hn<6Y+!Xc6Apnlqh3H_{8* zo3H!qDScg0)jk|1daPgMyhnD=q^Ww=Y!eLt8#|)c=^yoAo*B(-1p9--*R3%+-?U#m zU=q?o?l@5%7O^@T*aS^z2qndgnDDLq4JNc$0D1;NNn3e~)06Y@^5kK47@B4(5eN9an}5&Ti|8nW z<~v1DL7to-vRHTL=f`6y3+L!UrEfps?m5?r>OP7XBxnGj=YTt*Z2Squhg{qzM@uDYJ~tkkN)7Vy1|lprSbJEhTKG*-do8(p2q_ZZLS4Mjyhc3=;E8na~x@x0R0P zKxms6OLi1gH!-#*1l5{Gp@2)Ya~-ZeZaf(!f7d_{y)V};96kM`VZ5>9xB02~&cO_i zF^im<;u<}W2@^~^dG>y{cYGBV(N=#m6ofs!N3l4S$u^qDCbW9o%{e-s(67&v^7f{~ zej|2G#!}C|jP@}*e5#z6&n0YlQj2z*8OHX{nz!&Yfh%W*-mTy1Tw|OB`zPU+G8R7P z{eqRhnS+j&hAbFWEd7eRwXGWocU*GA6=GDuE{cDh!>zWQq;TroV9H0#XiV~R) zYYBak4u*#HuZ4vS9N=AfjUaDYtpUxD2k~Bu>M{=tmMP`%#kdp}W{50#Z>9p;3_V*R z%`50OpWdZp;$>sH926Lc>O7t%IBzp3|K{R+`J3aDX86&zQr;{B#PgUl{kP_@GYUj1@K)34J`}UA1D{at@4otKj(47|!SU=w38Q-l zNJ)j*#!P8XOH}iTKd*u%ww0&0CZT@&t@Y{$tM%|7&B{)afiIS8MS;lSTS_?AnXId(d^aJx6bK^$0IVSvA~p8yzqb8IRV$ zArZwz6C6Bw`X7GykTLPFyng-l^6uTc#@r1SCJ?^TEBD|F$1Jekhv$d;>ygjnygW_T z_HCEM-K|J(ItW!pIC_$JuJ-D8cN)uZ7R_aqooScuRO7-}7SZ(t-#sgwzG$t;QqTaZ z6a`*3K10_IC^$i*>c#ZrA32j900z>-)0+G_3S-QZOKW93{Yg$MU}i`-FXZi;w>g`3 zW#o7_-fc8yPI&e4^L2Q-%45G?_Ja4;*1@Ei(Y2jGQ`ntBCZf$qcq%`mgPCS1l<+hI z2q21FEPjrt>oX5y1!2mQQ=PO-)*0vfS)_Tsh`!aHou0Lz%Lv9^`MlaV3{ioEae>UA zu}MH&70?=r5WZ=|kRV!coi%xuawjT<@i8>wTE$8Tivf0KD;xy@x!sr22YmwjRAqcf zz)1Fo3w>!n$bu&1PEIeD^9u}{a)*?J@rw{ZaVp@GV`O1%rCfIQYz^6*{5(OfAw-uY z80<@ph$AA5tt$Tg4@b>$J@9}MM7&R)q^M4>m!l7Pe252Q03L1DXCOhSQq&aeSZooF z6T(mxg(ANvG(~{3oNa4!jIK(4bF)QbdrLoP1wIM)#vt&wo%8jds_?-Jbxz6UK_=;c=q2MKAX3AdfM+jPh6AyXyq!M9+qliuMNi?*kGCZdR zVVaQ0vi~{cD|AWOq6mf`nse=o))42$axa$S7n{ow1>d^wG9vC8@4owQoAN{67{jG0 zq|_7C8LV=Y#^u5A94M~I*Eba*;0eVBYeu2d>IeWix-6g2lF$wYBiQ=8LB_jjCCk-C zUV?k&_=Pi5_~X?FXD}5?n^1T{51lBCY3QNN$AqPd=j-b@AW%3ti>Heo6bU+}*8XkH zcFdEW^hM!OQItDlfVV<1*=Ci5o^_54Yo1!uQu5$89(0Bso}R+EV1d>t8;S(%eT9j@ z%s7s}!z&n|g`YhRio?zqMjSe^<(0r^Xx>C4QaO~=04y3n(vya3dSh&LjVK9zX%I}) z&()x(NGN)=hR>(2qR-ardNtV8KA3THqzo7k(-XfnFq9ehQ(V|PR48~)daLW#{q4OU zUGuQ#7~tl9*IeibRL3ESTt?uFOihJ$czaAs{%t(-H-u@=g;#S>d$Wxe6T0ezcxpVV z4fk{qf(4D5u@g9UW^<<+Ox?D29MmhZkeK{yUE+bgT3X|n}f!0 z&cV?RA={aStT#6CM>09yls>hElc(&^ui5Sy5U!QS;Y zd3tGIx(OMQK;)1p&UNyNvPGjp|DVH~`_PvNh#Sa6#tLWCedCdn=a)IL;mNZ{&f|sf z#E4StjO@t=yRA4ooC$`>S8rbrZ9Dx?It$WQ;;uWTt!G%*BKTWDUkm1Wk5`T+fV)` zuNbo`)T2yT{qj-5u^8g~^k(_}@BcJ_eG$-|fHYQqh6*eB)A8AC+Zb3Rq!=osQKE9z zC0~STS0pEFgUy7F`;lw1~B_j#06+IXVtT&3E$15fi1sP#GmO&&Vqd-h^-zH=$WZ;Dp}MP-|IRmu6%& ze?;GqDGMRF@cij~IX_OY5gILqu*CGV1&>!=lY(pbp3~$4xaW;!5FN|)^46y6z3(d_ z($mKLbQ1h}esiFpMjCO2b;@C?pM%q?L@E9zVa^N)LgY&{MUZAR zOu-q;x`|CEx7No-u;=@8_M|ezCY!H2S$z65?5u;D2k3rI)^n$os3en=JC4Zi8Dxo zxwH_Y15Ne6VFuTtJCp@O=!s(3jQ5A+2>jZ=4vM@dhQ}J>O&&EzO>uzso=u@D4xC6IQfPf}qkn}%M<$ZjTo|O= zDRQleDG>#^4EX!I%U&d024MWez`RI_-%no(wLe?QpBG~sFnF#m&-3^XdoIUH1aS?; z<5QK{D)jTaq8;g11~@~3yj2j#TZbBKot5{2%XynLdNMcAbdX1Px*r7 zUUL-n^~I1wQ{=8%h_#yjA~@Vn;W0`BV|@?aygFFE`l_{s>!}sN*J!I3x`^C*F6Zhp zS$EO>TEJgtn9!@}cr~2W-O!8Z!FtJeh9SONw@`E3oKDBcf1KzGaw0#}blIg~Uf=%m z-SYST>d$7!j%Gbi2_qGEH&+J2`~>>ea`-CGe5|Yh7ZpsHrpZDCqF%R4ZRAO?z;74T zkH@FSDU;ijOu&m+C_Ty#Lt{$bDWT+xfN;W(?qRkUL`%Z}teeZbHB9m>1hZ7G0|*9* za#CVhfhpXl&*UH?lwt5E6F!r_XJyK@^6a&eJ5+)S%v=fY|7(j&MRQG23V;bui!4;Dp9jDy?_ zi3MvF!;{Z%z6^z_f^Gr+Mr%T$grcygbGwB!DGmFfr^pM=7y{As9QJT-pKFF{OnP>j z_mINc*v*)1onBxfG^%yz=MBOvBaV{qd2r3sYv9HMyLf^N6TO%(WJ{Up?S9WV2{$v( zZefh95s)ab-w7_BJzjNNIEn^-Fkh z5}*9&^Sq~kGCW8L`fS8hG(BN^^z63KW)7?IWSVH7kR?~tiNw!e2kS|xH)b7O?_0!1ghP0AO%p^%-FFv#z!xWA-67y-V|ITv=!WtcLpIuAWKon9GunMKXd@(YhJ+4xf3h@HHhu9?zcjpgIj| z`gUo}8RcL7l=j zM!e_K69=zf4L|5=x;38p&Nu|$e*12msuy|K9K_*qa>jh(ZS@4+gGUY>{i0ZTZ|_yg zDdV6qr;kVUHhdMHM0#2Aj5(<%_cI>k$|pQOH2tt77MFdy119=Fo}%xJQuw5J`XYBl zIM6eLW`#4}>Z|`I>ecipYWjy!C%}wX=ngeQyE%*p+tFY~CqtL_tt%kXXa;!MOU7pc-NGUK7ATPLuEX?PC;Vyxsl!?Gxh$szC4aT zDEP+3J5A(8e90hqim4~>9b2j{-|XRzbX5p90BEsScQ?yX2nM9pBeMXAAt;3=g$5|t zyYa5&h%ZuTlQ-iX5C|Cw#vAXZ396>#mA1A%57?s&o{lopt?B8L4WP{HHO^cVU^bS6 z{hc9PIdy!znG5SVU$5i;Y-3xVuxA9P*!246ao_cyQz zxAwMA84o6tsT*wU)qzR>-Gut)ukR0u)lNdT+(7BJ% zGkNvsL|9NG8gqHpeH_Kr;EQq5&}BeMD0!a}Fz=CxqCMN2D}MX7NM6I#Gf-26SZjXZ z;lgp>Lb0O=F-;5h-h}i0+1k>o(V%*O=6in8I$CFVR8c)j8O{Usg#39X=vlbU?ZF#f z8-2cw=X>=?T!SVV`TgIY^I6xAVDEi@KT9Dw$#7CjEy4Q?Ppq$~@iv2!0Y+Y}k%|-& zV>5b$bCbhQ3{S=_A;<6$c2Beq9tUsmZ;AthlscGzTgqf;C;|CzGKhbhz`bic@agjI zBg1+|^VXzk=F&vh>TPqPaMyYYjZSWZJEQtG9N8ytoXs8EWYtY{5563Fq}a%qenwL& z<|nneNR=ip8PI4kT7`=#+-eNFm~2mD3?*)d=JjG9nbP`&+9_iWfM~|&$K);L5dp}} zQi>W_@d)u6zQr5s{MAEQdE z#o)9*o#G#ZrE8Zj(w?yid@%fW3h!f`br|P7l zMa6OfehjO~u`foRUy@7F6ay0t;2pY`PHBLgvmD{*?d0gki5NeoUt7fTRLtH$Q*4)# zvE7aLH6gALi$N~W&J%_$B4XvCY)#Ha{_3YNQ$%1NwLOghFX>O|n{+VAy9`lH((izz za&WeAgw!e2!dy*WFXr=z?m?`qIZci)g6jdJ%nnnkFSi0tFqqC?AxV=BOt9t#c{N$F zSvS9YyR*E}x=~&;1o1Q(4?@b$wrBZj=)+)PDYDoQ_HIP``pxUC&ph!Aon4iT5gfw6 zFf8re-NPZkmoN7MD&(Fm8zFnM*UAvm05^GU@=+=O2+Mj`&%MMJQ0mR6HJ__mx2mEV~F)WPkf9wifUk&_zXVN z;VJr<$WIKxV+Hq?1f7kydSp5gemaXGQ+$*F+!6+JI8HQ3fVqPLCS+K|lpv-3sh@w> z^FHxFVi)`a$c55OYY4On4nFIGZ1=(WxB!6vrjZ8EI}y^o_Dtu!b%! z6RNsE4*n@?&qp^I3yMrvqXOnZG2S2$!@(GS_|-?C7tBPkp783r|M7Ez*5X?$JTOMa zGHI+SK5Sg+7s{P>Q$JxX@M-=E67VbK`(!iPARm%bWBm2JwGN}XHl2YP_#yUW#owfi z{`FbB5G?7>`JliCttQ3hY;V2Cp?GojMgTs+!H)+izu&sb8F6PfEh&AUeFuS7TFsErwC zB_%n^lP>eXo=;cd=h=eIK@4BcQ{gD`=qX*Sr_l9@e7C!N6)`9Bnp=Xa`O7T}DMx-GV(ltr&DJ{G0%O$h5|aqSJ_!K_ zy$&O$h=@m(dJ(eUrrfhsA>iyJ3+?n|IY^N6>V^Fg3v0S3b{>BA^iq%TjX+o+MapJll#; zF;G?u)Vn$d=#&N2fD_gnLHeb`67#aGZ!spYvuIa+H}1(v4jTX7JOiZvtk7OxN+S zJQuAE?$MFCz)80e==0vF9Lwwe=13t^)I4dmI9g(hILU>B$KasnP2E6nXK2|nIIw6e zMlplH$i^n*6CXWmU8Bem+a{%l>2WVE|-6O{BgOAUe*ZGEV?T* zu!HrwZ8Vt{?KxNrUtcAIHhHOE0>Lc0sR=C=8e_CggfTeyU=W-HM zVoPoAq$pmt0A1;Lhd-wRh;v{O7IO6xK6tU$Aq@oHXZ zK=c-$e$IKbHP{J$;B6^+Mtrf^3$G*8hDv>4)s)*0#e zVye=6&c(R~Fe!Db?!VbunNf^yh2_Z;<0A1%$@ERGPZ!Q)8(u*uLl+5_>lEhsMbB0% zGgsqfyTdm7EJcmh>6vH~-GQr$|5rIxBbzhmqP}i5Pqh^3M3K^Pb9!+Z-!*n}2G6}Z z9XFIVM* zcOaX$Cx&dDM#x0m|U!DxnbkMNuZ_+>NUFyDb8{8i6qQ3kIv_wN~jq zn?3ygPJ9xoU1KN4R>tBmM?%2+t0hj+p^fGjBb{Y!TQ7rbJ4L4v2z*W0t!EREA~%E* zLg!r*MufncV|uwp1MTSx9aG96UC!y_?QaXo~sXlnR_53Pk4Tmp;ruHGY{41tN_$8jaB$WXT5#_tT|ypC>)f9040aJAeGaCFzR-S{2Dhi_ ziP_mgzVfe7}hu4(-N8vRuP(K^-*k814~_Qn#I(= zm?WfWDAE0pZNIA%k0>myBZrW`ti>seo9kOfi^7N}gJf3f>?v=qU@0swSIAhvxU+XO zWjqZ}df>&)<8pGY2}Wb|?87W;J%>GuGT=QEKtfb1Wxw!F6Wo~BE{Ci0)#Br&6L^XZ zXV1Ag^recEwf*7veA*#mP(s|?X{(n>B{-y9*F$QWdTZ;)+-IHR!Gmzu`N_r88W(N& zLVN_1sK6+Q24u|5X;?9rdk6vw+pYr)O<=Q52^&gCmGZPKj;JY#jjU$g9!6E=eU<@s zlm~N?m3;pB#c~_7f63s97s4F{5d&lh%{Hngsu=HqA=7;XDRsd}S`}8tVstn3j3;I; zG2{s2?rl(OHTG=BYtDqfbyH9;ZJ`v(3rgc{#M)p!+sQIeM(9M-if|tD67CU*xafa64$;O3|CMY`6V2R&m5@yXE3 z+GMBsfnB(U+d$bo`_H&&5e;mnh@+<;GZ6k14~YIS<|lR7?>dk<@l98@H?MY<{rJt+ zw<%-|R}-OXO@IE&Z#j3dz#TgC9R z?UbKBy!8UOW4Pw#^;2X`@nPL>IxGF%SeUdJqntVJ4;5vGaS=N z8yPjPUcH$}7)7k@?M1NR6ri&)I-+wmNjJ$u@Kv-(zWbU&$ky>QT~gfO$FbYmNq4v| zcrd(-#h6SCPqbuu8>EQeccDTPi zg~WF`iF78}PxjIUTGejuD#m0;(V5Z5Pc%+8Gbk=D6kMJyr)R4XFx&gcjKE8-;P+S2 z()G>N^3)z=-M557EDW<9XHiUbr?70_Z{ECJHYm6zy`I8jfA?eFpP~%{IcxJLSTtn1a1-VZUq_vX8HiPe0gdF4JHg5LLS!<9>Vj& zD_=gZQH|?7_v?^(I^CtNdmKZGp(11`EVFvJJiLQfd$T>`c?A9>;+U-&J@fWT(}>gM z=JI?(^tyhH7fpu$WL&`8$*s~%(-c-!?jLFOP8MAWot-a*y31h-Yq-D}86k6+= z)|-?a=I?$f5?jrlWlR~;7B}0kjH~yv`40Y~2@wOKSzq$*&o_6LXW{5NX65}~NB<`b ztB=RC9cy!Md-Wun6XDk-^W15w(>mmhN6CUACHAw@?VIt&-~nOsblU8;7IRq(E913r zu;P}YjIR6HAi>{=R@zTSTb zzn(eQG@oX(Jl*P^RKTsX?=?=j{uk+rsN%D0c2a-Vgt`Hk@H zGkisuPb#9%Tu-o1Z9um$Af>Pv3>3d>ZX5smp8EUGH_LIfeZ`e){gnDvysWpxo5Ssd zr3jgBbPnEH!}Xe7onM~y+0OFoUwl2D`RUos^ygSl_EMcaEC9TRt8ghu1vV0OoA?@qTm2X4@=Sh0~XfyOWH6n_}N>Y%&<+_mZct zUgu>uPhPerX7EeabWt*F&BISPIhvYCPtSnlr{Ws^yeHX#Yo__3sqNi9oLSa0Z4)`-{ z)B=zt2;%2G_REvxdwfGG%VWQY)~_=dqz{Ik?j07!I8R13HwFuG-_IyN+>`!b5Nt$K z(zg!Vc|L90b?g(8cfXXXC#YL^`}#0PEDm_mxgL`PxVioZ*=@$IWUbbdQeC2Vk@H<5|c({3;7JYcUJe z#DpRzADJ>*!?YNQ<-pQGSgk&!PW~j}OOQ~Cgz?XwIS2{60_63LMI1s@e@M!8Go~*pfjFfwZNFKuEc7msHzZA@bRIUH#c_*NG zvY#Vzgs9<$kLIYwxg-HT#EBLX=UKB1#fEI0@DHB-?IgcBv;@c+*ZW(IF^Zc|2nKMB zmP80dR^Gsl$UhAwYA)bug~GywzDt2FMSiak5V)jUZ)E9 z32|Fna|C;W7xOrPK=pu zj@IQF3Rm&sc!%918Nd_wEn?*z>j#C<3;1(C51SD{F{)KyoDq)r3hwu<-NDZC?(J^!X=B3fU%rI<1ixs;>h(46eo~I+*xYZ8De%^F zad|lkWVUl%x-QSO=gKpaO->aH8j(nR%fJfI7iY=%i}Q&rP+EFIhzv3&!40@H({U|% zwH?g9{rX)b^SS4*)$=pAp05ornK|uPLz~Dgxh)E4PMi2Pn!?|_cH6}+lTq0VV@ObX zH1o7o4wfPmoEwGO1ILI|?tC@Y5FXqnky4x-P^Z1|^hGXQ!}${Fl`e3dUh?S1*Q9~^ zNuC^}FSh!Olg4Jn8}Mhhl14V3BQk+@)Sc=10H^ltYb$$qoBp_Ko)md=jv{}e4JX^Z zPVw({zkc=6dVrxrUcG%ar_3GfIVPAKCYja<*IJZ#w|w~U-SR2p-}BKH8k=1NJzS&( zTwX*|dbg;wCo`ovh3V;3JZ6#TM?`pSRU49rXiYN)Q?V`7`f9&Pz0u|F<(l{FMpej) z;Q&1ezcN}MPzrNoY1aTELIzjiDvFrZ7~Kt!A#R!n#FV#Pd>0^Waf_CQVQo)1%rf3f zP$QzTTnO6)q`zm+DxuT@DA_kV!f07&tpQ;4vUddRMG8+S*)|kLhI|VqwLODKv5(X? zIF+kwSULhr(1F3r4V9yEYzc8|Xb!`uXJ@Tfo7{|v-lue4CP*;FnWC$%nfHRLe7W#5 z1bu#NeT_l3uG{XY&>%i*CMbAnPo@Dz(2uFMr|MSsJ-ww_`>v%Rc<+oZQI30r95S5? zaCMdtNf5B)C_oBmi_sE6!;g@AHr8~!x;$s#2>H6++OtR_Ow1Ww^~KNfy$VpC%NT*J zU>Ig_iC$c{8Zj+n@DWso8Qh1Ed1n_Ha&wwr_!j1U*?PC~l)w$mz$qciyP3i|O1?oT ze+HOzjhlG{KNs{&m2^0JXsn~B56h3y>}i7Srg=REo5zf-&)w(qJHibubv-6l#Kk-P zoN(0U)n}9_&z!f;^#7c||B@mQ-shPUFBBK=?<}Q!6%7j2^Coyeqo`b$MJ<2Gx(2A( z&zyjpy}h0Te!Ok;&~t>~DM9%8`3(IyX!xifDZJp55WCAr5LP8f*PQ_?pNRl~3;rB0 zH$jY8)=+WJiT?eQXqXe%tZQT^$n;!1I!ZY2iqXz!naEbK*@<2bww@+? zRIRHS%J67R#f@YtVZLr-YqXv>eH1d{D2~BjxRau|y!tdaxv>L=!pi{DJV+Qhpal!t zxhTSv$)o|2=P7CYs#k+tt)D!4uTRIv$&6&r!Qm)KwM)9T!3zgKHNCnA&y>G+H0oZ; z^3~z)a`;LQ^tHlX`oaz;bxoAKztOH8D59wEzWTO##!JZ`%Io%C>XoB%)%rO`j9JE3 zNDrb6h~3@SQyxayuTakB0XPBF?zEqdFkK7K|!&~vs)DT76oJsIE7Ke*|a}9c+?>5pM zc1zvqMbCm0f>2~|)C4Hsml6G8#Q7$R!-WLx#now+)!9^yH+BmW8aPdvUd?2t$wSLs ztK)kOz@Dr~wMUR52=UXNGvFlt`kd0i#K0sUqMhMdY{HX#*}R0G0n<2uFrmHf>5)C} z%QPAgN*fQl&n{DNC#UNf%ktE(4h|YKD^g(t#=Ih=REd9Cbe)=JHXCa`hWEJc%eFUN z*xBB4#e+{{;Jf8CV#h#p5>o4*E=W)uF;RuK!~s0-odOdYN{x&F^||~BBx?RZ$+h4V zM#_hzZY>X4<(GGjmk`<3Ghan*V|gCspeMRe+pE`amd(8TRW1k52u0Y&_Bg45@p>XU z+Y?1z>=#}u^C^kzjDgk}u~XE<5$|q2Yak+NU8?I5?NmnxXxp;XJBZG#0)GvaVN8XR zI!8@6z~+e1F!MCQk-zT=H;toXoqIJV%ACo7+s=~B013VXC%7milRFp5S$i(X$*qwK zUfn2{zD7YdE}Zca?;F$h%8waQ-}Rn-@;52Bhv>-;4MlcZ+&ZC7c5aHVx{kueu<+wR z66dSS!=qygf&qD*z?htL-}cH&>0Cxb_DE~fs)F7Yr?5Kot-`k_%x(h09SnFmtZ~ZQ zIvF)QabZF^R%x#Z`);u3c`eN=-WYyro;>T8*tp%;8qhhVO91weK8x3^A@C^Ro2_K% zPIx@6Ps#DvjiG%j;|5Qg=QNR_psleN56n||liTh+ z*D_kxCO7e_hj)$Ztjql21bjs)7%yrrxt+Cg;9d-n@Q2d0J7uS!MUm zNoL_thVXaXUZ&Z~t+Qy?RuLSpmya?n$^6GWU zMfe--Fj&R!&nEiDSl2NUudD{Wm<7-D)$7A#<6iTLcB3nW$QmvwlGP;T)A9RwiAR_0 zY`&_`=iK{nt#$UMCNzyFs`?x+M89CJ!H#PwWT#Mm_4U^iDxbzk(GPjSxRfFwN6d$T zrtL3fPA>20>e-s<4{Lb;-Jb?V;Gz$Gk|#ODNa*_JMlPbshh(VrOM%P|5BKyQEKBFk z_SN90en75VYUH|gAIm`}Z*95dIIJ?ZQInlI0oqaqhEA6=*wiCMD-^9-GSAjT3}-vZ z^1jbG89{6CFP(^2Dfz35i*XpyoH`YryTagC2XFK4>6S6+qFX70Wx2dKZ_KM`xp$)< zPOEkC%+-VXeNqz9trY6<86zuPU=YFq?;>u7N;(9eZ1y~)97PZA#u12?Hls;fxu;%j z^XFrMZ*w|CZ8vga`aqsWA7R~e*wTk8MV)v2a%%>XW3eUZ)5nu0mW32Dc;MHptq5rP zXos|sFunqm1zU(cgDiR;Vz@}yx45`i5rm3G*5l+_8*?K8cbLUImEzAb8d^wT3(#3J z-9dQHR*arQ@VwrAw!AseIcYEANV#8}PWXvsMCmdV6b15X#ws?K1i*SoLsntLxt)Sv zyBIa+Jna`e?<_IOV_l2Zw=v!HZq{gFXW3)jidyuUl~S(1|Kt0ZkTB0^Xbc4xUcx|x zgwcc#_5Ej!3QzOE5fR0Ies#aR55Bx_pxoc3*o23kg}{y3cA{gIVn=%@87Y*_;}=^C zo7+vwIL0Cnbx>ldv$loQtaFrd_vo&6c462z88wB;0Ga;k z(GcNAF^ZUs!5FQH7Fi>lDe9X78OovA-1N~&bNeoaILa%#PVn5dHpNqs@#N_5!Ue^y z6W(KVkp;D$6xBSYz^!|P?ESt(he?)Eh@u@+AlUUU6A+>!_QyUUtWs`Q3676H9(4^v zEd|y*(5(+ddFdDeSsTNrMh4BN&dAe-I@nW0$& z2iMTl`+heE!aIeeA}MVTUtqb8-{--bcg0)foy<})25B zzX^=dz_97Q+4az$Jp8HOX}B-(p!rS9+F&4jKj*c_Pv3vJUjD`D`{g7Uvybqi;?=KT z>mDb%vl^E|uVme{N9}h))pO3zH7~gc7edM+bio^LdKORB7DTu`i2o=)@EXH3zHwFn zS#TI%l5^n2K>hYtZ(G0g*UL%oq4}w9r;LL>8qzNNtFI2@8*+;Sq5w@Edo5toLipp+ zhmoC%QNc*9lh(pa?ExyPoo*4|q(}hBI`Pd~iL-Pl|w?MrV-7^XGX} z(02AP{1MeCc1B5aA%{PGI?W&u!cNge3uKH#uFwJaWEu?2P%9^q>~OY)_b7F|b9#)M zr94uOM<-J=b#!tu(KA$~FS}H!t%~Fj{rLE~xrJ-^yTM0256ukS^enw2IIlc+^rG{s zG{?hikBi39ZgUX@Vc<)<>DNI|DVW0>>Jc~|}?#2k z>iP(MAVV?mC}df}uQYi?Fo; z(=v*L&NQNNAcy%MXT7sng#ce=rEkQzLOr_bf%h06DS`dNy&)=HnPx|U;+EVcgaKg+ zx-?f|#5@Flo*j4)ByuuGa>Fq>MZ%ka3j{H}2Y6BmO1+aA9jaa@2gW0#sGINP-hwGX zjUM1$<)_xD@NM6}LLc6gv1fNhv@-`UbnSiy5*hl0ji-4y-(3n;!~8!5NUjheGRR5Pr#p40A-|?}XUkdF>Z!36pO9G$_nGITz2D6P z%p04sQ#eaOPD-IQA0J@4kT|=Ugw|(o6u8|Rg0L}d6-~0Q^*Wdms zSnCuis+Y5K>0Aw7a%)2q(IUE1NGd%C=IfR?bx+AB%3^9H;`3;fEVkd?c4!WtYejQ) zX=9X4Jx=TVl5%eOjjZ0H&oT)1GGbmGzUdh}brDz%t#+5aox|i~dZUL^=3y_Hk|&*v zzDHXK5n}W+A35Iy2p%+`i ztzGKWdg-HkLLsdh+c*CVq~yZ1k4_j|p6d>mN1j5JX0gTk&wlaM^7a+E?4doQ@zoRD z*@F1wZgw9q{>XFA)%1>uPSC?do-)3Ch8Gm+?QWCJ8DhzRDSU*X=#tz=bDSkS2IrWF zK|}|=`{ozZV?cqj^@ut<$7ApCtLQA6ik6c>q zZD+j&@V$eWHzFZ12oy)bPA)9Q8!v`o(1Z1mSGb0>RP+-zw%)BxKe8Tgo-D^F!dGkY z->!xi!7+9oA&Bbb-tB!<>)<}hTJD&U#KJwke2gg9FqNuY2jq}t=EF*6Ln?WYxTyukhFi z@m`)oFv(R@+7w9G8%E{9NEhshX!KGap}t{_uwXU}Bj?sU3APAwdK83bp=n#iJi|6T zO^D>vM)-8ILl}7YvrD2E&xWN=hkLeA;B2M&9Na07B)PEt2$J4=ND-e!M?a=uPZ=J) zKJ$=+ImN6i9&!T_Q=kl_|fkLDE7$eN31X<^lR)0h*^79m(@gFyN%d3)UnbY%O z=Md9VIxoY1_zmXr^WdZ--Py%m22k^hQJ-(+!9^=iq7~_-=iN`?YV?B6DMZD2;7CF1 zqcDYI=GFWOeM+1cMu~&*O|J=sQ+C}yq48kmuBl+}+r5YaEXT7XH2Oz4uCLzj+Rr?; zXwFf??!=ewZ`36xDcBE^v1#M*KIX>x+wbN&rv&y4DFX(E2nBCM-)siKM9J`bKPG1x z9;F5pIr3l{c4)~2U28azD(my*BZki;xWXspFn&wMH(~z3xMm1#F3YC`@Bi!U)AG9% zypZ-7g?vLqaZ_W3cP_bJ*0EHAgBn#^|uEkGzoQHP`7Q z&JbyyyyFSIQd)hc5ZFOk_SMT}^XPf6l$UeLISHdU8&`OnC(1z7m`lNN;2QX9EiF7O zq9>BYNZ#Lj-E)Iq(4I71ugQlucgML>HnWA0i%8bIKFopGO~&j8iPcavO`%?0{6zoW zeEsd3NAFp}zIKK<>s?f8j%DRNhvTXHX=3o|^vu_9-!AXI{zV4CdhnH;Im2lT3y~2< zTG&%Nu<>FyzTogtW?e^-#{=Ytj*aWdd3N`xCt3?tC$HT#ym0zxiXekOx>L8nIhw7E zv((3)%_fX4;^*1=8iPMRK8r6sBbn7);80{?>OI^SjO4??+YXAyC-=$cV9|Bf0-qEz z`6)dq0{P;F*5OZ=qfZ|uYKgW*f+k9B-9PCK_7?h?U1Ag2z-6Dq--N?w9QYk8|voGYG5CJeAX3LxKT2wJ>RJE?M^z4lFb{f!}<=a| zo+H=;j>)wYCpJqP2GjM;E zWxlhYVi^IAK`Hle9Q;SQN7;=K?7=eS5gr*TJaNSyJ}3Cr(4WQ}7mdu#4YmX`CvKTSg;_dc=Jsn;h4A#A8<-Y zDa3o4K|lyls17kvw?PRwsf@2S2hBtyN73X}iv6+q-1OY*p5ch%b6XkDFP5u}N71b( zI~kVIEnyVMMsZL6zWJ=O25&JNPA_F{JQ~4SPDCBJ-2t8uhf=xpR2;}lXqXgTi(2RH@vHO zOh{T=)cq7WIsEM9ZanZZV@4G@<@$MwzmAU2mLLA~{p|2jyF&rM#jD0T*nhRWdH1gS zq$brOB@2_^8qH|bvzOem^-RQS4h*}!O!2Nwl{gnVhN`M(N}+?@yz&?625a^N&WL(Al#u|z6`)Qt_- z=;8ESJ=FZ%l?-g>I>%%1&^uV3Z)s-oV{!qwY1p`B8G9#Qd@ChvlvdN9x3zT}-PecE3{ zP%J8;Y`GgsS%H}RQH#vt&I! z>yDxFEFw!{3={FLG2GelCjgpaJU*R+I3_n0BnbpYh`p*px(9oTW_m`*4Qe524N+=M zsuEW#TSNj7o^f$`8$mA9;OFYDIRvkFF`E6b(a~x=QA`OJ;mZ-gJQ9MBa1YjV`dtKy zIJdfAejhFLXI~_0EJ^vamdli(I*a)<OOw_Fx{94M;120Y+mrQ!VA%b-8~>u3$XA$0W*1QDen zwP6{Q6{X?*{XGKC7LnCBuyzK7&&L?(cZ$Y+6u@na^J5D0s5Q&U-!^{-72m_5)dqv? z`{k}R3E?ZWn@)ILAGa;GD^B4WMQNKe`q)bWE#foaf5sJPNncq_~Tj6`;<7lF*(gp`_fCp zBHouC{f~J(vnzn{h_6L!=Jj;>FOJ?X|NP_eavxve4Pp1UueO)3-n^PrlY8S$xtMP>|yKu-Eq2oTy~8kvp+At`|a;Xacw?t z?BHcyy#9yy!(lABj?3*OM}=440(ig8TQ1m^tLYtppOOI#7Ux*_a{upt_lJ=mkyguG z3#PA=U5fq~=ocyKi<1;qiu*}xQ58>DOx1VtiE%JTXv0mkDC8_k!htwBxf)tni*|#) z`{rwP2=Uoeg}?Y5tQ0msnW%-H?jkwYG2g@I@c)_IUZWle3|-nu6?u$NXda9jxFhq- z0lq{`j!w?zj`5&}G0Z1<7_4AW>Vkpy_~c=E`{qp!*zSbdrGz!AdVE+F`ID#XK|6G$ zy!mjm`!fU;WZIY3l>(I zE_E2Y>yjHibPeV2qyTmK5k^%QAslQ$!ZN~XyW1H7DFM0UvA|j=&tG>W+e8&Z{DA;yE-_9n4PWtU=eXQ$JJXgXT9I^K7)^a;S}cfR>sHSR`X(v*m9AzEi@H@ zVRqi!Y`apJcypdYJIL}iV< zAox*~F$pVB9?;&(Q6_z6`;U;Ficz{@v)@(RB5r-0Jr@H@lc=C~q$Ytus7_cd7))b{ zTpymW`UD2b*+qC9MI8oG8oCDhY`jZQXVH>L0cOIdl&Z)~^hP1@Hd(b}KtwZ?8^aR4 zJ=@qx&OBRAquqbkJpX;Pa6*wq__tkeXHjxydFa}Q##Io=cAhHn#zl*PXVy!BQKp24 z{nk8rvXYYj60DvG3Bq&lQI^PsMZQHaH6~+I82;tnc6e`;u6r3hPxW~ZAc&|uh86>0 z48)$HhlA(0uz_1|51sL(MHhJNw(X6&?C;rk9xTy|`+KW-`-Ps34o-T{ zoF-SEVJ(XAIiV)KKnbkm-&`BFf6-V!T?0uTLbxUmfG^oJJZ#+7yNkV~KJec*S2JJ>Gvt2U&}b<>`+(2VW#6clCg&DVQ*|8J8Q8!2efPDa}2GcN-AYD{UTVGM!CGLR zvlE_1R@*WNcIgHupPJ(o2R4QX(%c)4e(~+s%b)$~7wNN|454rvJavsl$7g4!%kigA z!CgcpdB2^-y1y^%t;aksH6eJOS4B8CHGPML62$pgtCLF!(6Lm%Q^sHNbhUf^9J02@ zvzHgYi&O7ECxoLSg?S7IM6cE0i;Wao#JLx;RU{6xW9VM;Sd$6}$Sk{fF=E|V4iEPN zu;UhEo`m~(mXENr2!yV1yS9>rC?OSy^Ab@YndIKX*LcTL1LoqzGn~ET-9rhoUfpx@ z>3F%jI!Up<%#&6S)WlfoMhnrbL2&BwDP-7_gUN_kEkM^MXfp~X6yG=E9O0aDpOiuH z)HGr`-t}BPjo)SHC}`nIln&R!#KRIebcj}ZxOt$i7Lq}WZ*IBOlhPZ<5%nBikExa_)%)_cXcF*X9h{HqNz zGv&4G@qao?-l@EIU+WyhtM~0p+1i1tv8*S(qNjvz zWvZQRy_+8L&4m|A;Zh_}^|Qg_cS?}(4lwftF1!-^;fe2nHuQ!+ajIh2iELzW8grCY z9@2!E`}^UiD3E7tPmP56MGwaDj9w#X8pqgU^!K*t9h&hh?~g+FT*|lmT`v+N?x#k; zcBwIxg3B5~180J}HBsi~(|WCAD5ZNvD*QgJhw-lajGaumXMF|B>+ZWxnBJ!pZZde^ zCwKn!<;n6dZ0U_R_4BY*Q`(SPe(e5f%NtJL z*L8EH^m^7We)(qkrq|1@VDWG@3JiZxkP2+ia*E`{ceeG$5XKjHVoYnA(;S9O&~Pw} z_OB?8hdlCNAjP8tDLg(8k8j?+%^=MYlA37Fx_utLvSm33Cqv`2!nYov#=r@d+I%bS zQln$bALA#k9e!u!QS_hhZQXU)*I6=a-G2_>D)bc-Q}8ceZa4qKk*mB{{AQO`BS+hh z6EPwC$az2)m7Sz+sO__DslgQfg{W-`|1u{_jn~tsjNs_Ib?)vSj69y`To2ZONPeD7Vw{N5 zoSYqv^8^0&+w@yd(nNA1cbv1EfU;u`xPWR&#hvKo$0*IBfG`ozMQ|R=XLH z2!M;z*2L;f=@|3H3r$vH$cF?#iuyLsh2Wn3+6l%eF9pwgLugrv7_9lbhZl~}2+zrZ z_D#?@X^uc}dJ)g`^}*J1fI$YC&6~XBO$4X4A2i_<;kEwjnC|rWs4?#&E*7rsInU<^ zU;Fl_IIF=GZYgFFD!RD1AEoG++zF32&o!6+YzTpHF-Zc!Tqg(HKZcZO2m?Ymp70g= z!xU?gjTD8`vwCKXAWCo!KhY0Z8V4-)!hz5jMbNTF0BG5Ip7%b?*fY_U$6AvJQBxfu zFhajiLtS*T9?`mn#KK|kVQPxV=iuab6}F?Cx>poOA?b9g>z?!0aMpX5gm;UDGs^0r zXHSt$6C@KTO2U8u0h3s{7egb?ND%R6F`sk`j~sTpbd)*o$oHIb)|?awjS*xe^hDp3 zG`N9p)0_oTz6@qYClNPF9EtUr`{kkG3cSYXKvCxBWdTNRjpoF=9>v>y<+NvV+qAU+$qLl+adTD@3D*xHInHjLrbb)SQ`u>0(Bj|RU`@(q|HN2@Ke*nhabSpLoV z$?{vY+5Dd;DDelujTZ>o!xaAO*RPt)vxN24R7$@;I&BcV&G5`njm{>z6{yts2uCwJ z@a3$rpYX4>sq~2Tzdqaz_Pou{YlN}6>wGp1g_?`P(Z5Q;Z|ZjtEstXFUY_{gf#Y5` zQ|_OWsprx8O(gMU`So9YyL|iWU(R#a)JZbqxzuU=)_R1w72FZBl%SmreKcs|Q8U2g z>pep^JkdkO8X8jx&%i%?^Cs_BVehkK-%*A`JQjh{2?u){8K!SWw%Gs8KuClw?>}jW zeZ741CZ!fWIT(x>^V?3Yk3%x+UU`-5#|!vesGTe!&&QbWImqoS8O50*Uvy2h)l6NH zqSa||6Fkn2Pm*UB%jwnX-BQ9Lxo{Ukb0T}cXUo6rH6+H zv2lYS*V)?@306 z6Y2K$*5*XkLKe!^r?WT4`(~kj`V>+_-Lf^O+pAxk%$!- zI@}$7eBY?|SxOFUSbI5-0*krs?w-a#kkqpfY1i2jqrE6$hCvXg%F(TDCr+tir*uq~ zb(j_IjP3--$z`5n_e?9=5I$$KH#JIzAeBp_Abm5gL^1Hk)up^Prr?Q3%;APnMdnVk zlmNjH18W{V7|KV@mao2UgjV}jz^8(I@}m*XJbV;Fe-12xhjklne^OfTGOa=*&dUqJ zC9B*R5g&u>rr)(>^`yWMQS0$91tUBa`!~teB31F4kpne72xEZfj7%sHz;NV! z%*NOpW!@OW7@pI0;6xbx-1TmEXD`^NBr`(Kqi2nCC`n(8*5mN`?^?^hZO-5Ij8g)o zwchoaO8fip{-t?LUbg3V_d1mkSUrzN9;0DUdR0zXvEF4y-(|+FWid>d_99s-vBT7& z`O7QtiaXqB)-nfXG-ZdP{JgVaq9v`9;O+^%*9EN$t<25g zRC>eKy7fE=O1B>(0>SjLxs4J>EJkR(6GHaXaDaBlun0eaduVHl!Cp3|;m~hrV|7(t+bYy2|KC4PFz@k@8yjssL?`a$xi8O+xn9F5P#h)JQl z6iquzfTv8PU&mXQcsC>B{paiD|MKyNmukojaInIJD~7&rg`ug*zC zK~!yYHMKH3;a2(y1%ZKQk~?aA@bE+sM!@j!g;ZgWa}Yc+2O3BbD8`jfr??#B{MGBj zG1l)=a=bMLY1j$e(RE7d-Rn0~d^#<1VQOEfHJKqT?@D(B^R6xeZ!f)iP1LCQ(3j&VGzX2u?vn{0j&st|pRd0<2zH{2oUq$vOW2sk(0v;ai!Ld3bV)nF{w-m-9E=)e{u=J7WE zgtwl&%yLce4j^Af=n+InVkN6k7DnEDx!uqE#>RVF0s2XdqmnhxI^p=?H+X-K|Fptrv0WPl0@fH~XVy5IVR_BMjW zfsbcQFudFdU0Gp@VIURrX^%Ktcz#kUQ%TvwF`|lk&lTRigDyje zunDCTxllCdh|FENyyi+l%f*_LXM!!+E~LBGXkAcqHpUPA{P*GVcg_8%MOo2p*Qpg& z=kX;Ya10}B9FHdEY(A9Hv`B4U_S>R)%D~z4$LF`p(PiFQ^P|9%99hAgXM?LQLZkhyME5C&fN|*F<}N)lMS+3%k13%4 z^bg-H|NO(za+I>YK*OAp_y=rs3RBE#hs`g)eZ9PS^D2dHnlJKD3KmqQ%+mE(M$3(hfTrT z^ofWTM5SKj;qC11PlQp(oUGVV$lE;%^rk0$@FFkc%H|zSha33UZ-W9*#LV11&lw|A zUmqU!%#4M2aGL70HvGbC*OW_74su!ai_tD}$6-LH(sv^-<5dN8d-3kuulAOMgS`Lb z_Fl$}oeZ`lb0j1FV7Qs*x;KUnx)WivJuK=SIvc|PycxG}6fKZVA~NKbGy-|S5s=y& zWJY)Xh97*q@{hU-9VTzOU|GI;dyvzh(w~DUE%-bddb1p~ejO|AWhXDc`s$nI>vzA1 z_Q)fCx9D_yAHlCpi+bD_+l}vHWhuMe;2`=D{`yow;p1ZtbueibV<0$#WB;{t|1)-i zwYI=J&ejjUx3*ATopq}=5;<+cYP5$oi| zG4=qmDFs77m*bf*a)RkTN znKq?7gwIlK75(Im{(J(F+z6{5p=z#hdn>2W?+Fo&4sI#@>zfIMAr2PS8isOT>;5S; zHC7mlvloG@DCF|GaeCf0OFCiza6pg0+6)1^J6}@r!cV~$14aC=4iAkiw($Vec`X*1Ic|Ux%J|Qg^e5G)YlMs-{#0t zzP_hCC`FM7xkty|t^$>CHzzG^c~?_cFbZQ$B?vE%l^(8Eg8}ob3~G9|CaxU|39?oB zFpdb85q#`dJ_0C%Jt$Fs@#?H&>_DJriYY=|b7{^@JMlqr}(TcEjv^hH)dQw9- zyT*<87zJzCS`~G|K6qe2upuxACA{N0#u)WFV^FHmi{H&tXc!zRO6w6Z0B0~Y?mVmY zkprW+8+R0cbAPti8oFZ?cDTf+qikvDJ`vdv-p5lTBYK{30BWKTt;gr-$>pIrp&!>g z+k~6$>srs5kbUDn1mmmVAhGiAE%ddX< z&2sSSARMccNx2J;w>C|>I1#+|+ny* zc?u#!d1^I&uKM$h6vMW4|sqxt0gD!p^H+}vFc9639pPUv_HqQN5G?K=V1HjgcTSEsF7Ke}4=j`mcxd*~X-fG3;*el3z#b#v*^faf(tc248-8wm2Y+ zqr}i8OBi#naz6Jnb|SAam>g`y@rKh$X(Ve;KAklu5%u`5bq0Yk{G>1MGY}MMigLU6 zDPR!Th*jqxE&GJH0shs&e%6w`;c}u`#3>w%p+JFvB8c1vDUXjg<7IupXw%h*6=eHG z0LjWiWP5whr;_#a7op3CMO#u;mXsQ0`{XAi*~2L;41fgtZIh4Pn*(7RVZ;>M0Yiu^ zF`nRS&LKpzh0UI|mvWbLnk`*VB3$FY%sZqoxA#L5;#VwTo5Iy{mvMkFg(v~Bf%=?s z=&AiK6s94=skV+W;Fr&uT3{-nx0od)ZzZJk3P7BJCEyD~5WtA$X+nqR8LazB@oGwZ zu=BXsvLi~dz0G6a{{I#AU(J?fS(=`=T>H_=RYYWFWmi=X6$ByG?@3q&ObIc*WA?BDP(2V=nz|0g+#Tl4u ziWCDHWY9CsU7--;9)HC|7@4OFn8=NvO|ymT^Q%6N0lSB-8iO_&laz_~oAngijBI;O zZH`2)eXKnZXltnVq% z@ob8Paf8AcMPu#aLE9?f&l9{K)5Sl=Z*RNbyJ&x!KwiaLxBad$4&xb<^hM#ZHOjzK zV|b)r^i`O`!q&pU9_o1IbZNUBq>OXp^Z*FO~Kxs256 z8TA<%Gyblh=uB6ulINv}DyTV_P%>lbWz5lZa#@TteILgF`;?g9005llcb@pHVNWw> zphK?F(Yn{Ig`mJ4xj{2JCB1W-@X1WHhDNTL<1D_3bY1g*o<44mG?KEb7OG?@Z-P%PknDMFKTlI-m@)E z+8{nXkFT8x&)Gmz9z5f{-}Nu@_Us6mGcsDAy>t|Cvo>tv9dopRJ$m))a&VY@KT8hr zT{9WE{>TwpL)v!H;Op56)%BMbC(GY|`epg~=ackk-{aYK&ynNDy#K2K2-2}yq+08< z?jigx&ur&Vk4{%V%)n#^973auXfiBRj}BIN!uVb9oRB%it?#rv$#_6E{bGgbIXX11 zNKeEIVpMy5IhyV?d%cVw2g4K7RTSB#d8#RUjLkZ67Q{1=Ej&zxMu2bdbtN%WLx3@7BlXD zlOK=2=bR7vsejW@4Y>c3tJJysuw}mp$tTwguq}<&$Qn;-B08)N3)%SG*tV9#{iDW{ zjcWdACFL_kdfD5)|5Pt#CKF{UxXSSK71Zyd|R%1xP~@_ zP0^DAle`mOdK=(722tyqjv(?oTmK}_V|!0ie)CBn8oUCP%kvOAFE1upJ@-`^GII7O$CeTF z;~+Dr(Ka`PGbW+t&6L6Bvk<%*gm}~9{JtL&a7um?e(8m17M%$Y+ph2Q%l#+I4{r`8 z%%j_xXEWB0F(kya--O{3DBZAl?>b)@Cm~#HWs1R)j8Vwip3XM9fWexB!Bl~$j0ff$ zfELJs>ZMqQ-m{8*1^~q%<9Wr=ll$qlBG%O?W}atlFf{sMZUu*O?g~zT5xWj1N7w!4 z->sjVrv2--*UA5Dl?$+q66!Pk`h5;OSa0f#k<27F22qJMGB93e-!Wf>hl)#PI}M&m z`L8tz6ifm)9-Lx8peWE3SE?~|4!CVl zw_WFW+9sIMT)QKx=w^UE^%)~J2Ubu}c`EY64ve{N$k)+SF5YJkgg~M16!6!^n;Dr1 z0a-xbFY(5w#*)5WKDEx81r401JQ*K~u(f5xllMt+xL4MvXZA`F^WK3NEpCUP^Xhxa zi=tpujCk=lHPyA)t!EBD^>ZQ;kqr-ww~b!CH(k&?V+=AD?fstT8nXM5SIl5UL&kef z=R$P&u;MOoP;m8U%G#D__ewIrxfN-AMcmT zc~-pC^^_qH$-p?<{XWHsSIxtOD9*#27HK2=ag{J09td})XHxi@r%>{Wgn34D5EZ@+ zz~Zx!z2sdZDdF(vPq({wbL{Rs!?oQqM@N`^1qf0{hw`}%;VvaD%IPc%{JZ(g4xjYS z=+Wrcc%D4^y6ol%y*%0(-LR$sMNXPWYqqhq*Zdf!(Mm z%$+<<4jKR~Z@n2GeZIS1&QH!G@RvmwO3}v80RR>KPJ6T8|Mc{_pLfw9C#JE}Pn->H zUEO<1nX-KC#tMeL^}mvgkm#fAnH(mF}leYe0b%qsZ*;&fag4=#BKaIeu$BpU;+0 z^#Y>E=CHq~@zC?G(eQ>GH124^@IRbG;$wgj#3n_!RkuSvj2R@#SgDYvoHgGkFo-7~ zK>-T~e9XgrsOdm-qV(s?;|QU9nQeHEAGCeMRu7fpjz;9A>jlr1ki%0j5t}#R2=X{I z)?`s`EyAKwJvA$9PW@M7^&kWD;TJRWP!X zi0*H+RZLJqiM(g}W_sN>9w8z7voEf?|DEv(J248YM>@x!vZWAc9+ir1QAN0(PXwGx$9*oxRp1tP*i>5k7Qq(Vc#&FOh)V=2XqikcG9ReA- z@sb*!iy1;qNLHaHjE&j4_LmW({3tWu!Ib`|c?3b2%Gbs{X@@?-teN?Kr(}9|YMr+L zU{Iz2oO22&7CR2MngfqqpKYB6d5u7rPE_hGPR_5UFh*-mTR$Eqyu2X${^@)<_fS+! znTayC=Kr~QVOHZDk0*iDZBbOnnj0q|Vs1jHs+IwgwO{pG7h!+9R8d#9PV!^=B?J{I z&x|#NWxu%y&!c$%Hf8y9H2YiY|Gs&jM~{o1f7!pU)Qd!CUO#UWutXU(751a^l7%R3 zqZ6v-s~Y94;-{3#tzx6rfJ$*63j`Y(O^F9E*m<_g44l%T(I?#}~X0Q>-)2FD`*IwCRMm@ zcVYqpZy5!tr# zG>!2jt&d_g{4cd=Zt~ESJO@L10dQkjQUuFpW1AM;$%E7!uUQ1`=1P=yO@G?QZ$&RJ zPr4>YXm=}}@MHiR&r;;@rf1m}Czb06YyB3~UKyVqJ&ZUV5DSg|Pz`Rb*SCG%*n#f> z@GXUNDV*(bcV~X0gBeAR+RaV;!Fdve4M4RP`v)&ZhiHl=Rne+Pujk9t=pV6rDf~-@ z0@a_BO=%ZA=y_OKWZ`qDRyt&I{*7_XwYPqaj582xbe-SU->3gTJTY}g=@O38yZ2|w zoE-d`sw8vLYWC|td8%+R8FuV>_EZg2Dx_yJ@LvLvFUA|^7RQ+bnAYgf*F5nL13X2- zRxsz@(I`15v2yKp5{O!N+tF3lP@3=Z{AOhGK7e$WT~!dd%5RRvwJ7bt*lVZr=)qN=hqi0xCWe(7P8TX_ArK# z-}xM|cq+5!G_CP4BZhcu1-9_i#~4vAW+TNHF+>!aJlo}kJkJw624Pr=gh3I{eGDWYbar|*o~(W68=FpR<26TI3Xf7wIE2#P9Sdo-5Iv7T z3deOx1`H?&Z1ldf-G$L0GYg;tpGF*z7|?Xnycqb)X-E(n?Y)DYX9<#A*$Q4he{q7; zr+|p6`{qgsQi@tEj?!rI(aIb&xZsVqG^@@e`f0+{Z0}1kY%W(9=MyS&$V=afYU{NG z%P*L~%d-Fz+jgv71eo`s?A2(l`MgIANAHtft$t&()pNhSOo0OWQLC07Pp=Pp=OH|BO@oqO~jI!|S$>{k!j9 zF8}qv|K~C4z>gHr8oK?m92^|>eHk7bL0r_u+ zMBGO0lp;tTZgwi8bTZqzCY_qW<>d|C;|03*+k-WQ`bZ><=a?Kn?_~#&D1(EcRE~=R z-sT;um-6qa_h=2Kcx<+3rDu~L)#_Tz&bF`g0UaW8=Dx!d(Ty=~I_Y7l%D`vu(c>IN zoiFe1E;3*oxz!Wr2%KM>cHbO{t~>sqW{Y8K-D6ba2kNJnga}e|qdVqLKYYfa!z#8A zyH08XXql~Y$*Xbx_Kp6{VUxxLLX+6|)tZT9?!>3oxrxW8gdzDhhxiAe0nq3fJHH&c zZO4?>xr)IjN}2B9fX!a_?ui~w++!;ZV76OQbY$L6i^i?M@v|{L%AV3sflJ#H*RA5} z!1^zpYx}t9sq{E-N!L7SOb?HnEH6w^$#~9#J~CPuwaMCJh$tp1Z?YfCWrQ(`VvGBX z`_*LKUZY$DLo%%F`8sfM-n>L3$Tw8Ch8q^d!#B4kP+r@;D3G3IG z_T>0@_EpbzkA!28TFQdJJc)2TQ(G>!^1gK$($C%@GKy`WR>JByMMlDYZASc3fBDn% zF+K#-cA1Bvvd0;iX|))jI13s5_2;wY$6rqpa24M6s>ZO4L-**m5l?=I5sWJ+!2Sy9 zs3zqt6LO(tTOV4Vyf=nFPaxF0zY_yL&oBZrDI`Wu)B&4K&4G|~^uPeRwn3(<%Y}4! zC5$1X#F&28lRA1cju_bvg8s|Ms}*Q$y`aiD1gL*w{+4Tmb1MH$W|T%k>jFA6<@7in z7{Jr}a)%V0mb=zK!Qo}XuElJsC;Bi{3~ss{^D>(8kra>D?8m#&|sT?)9YW{G5Yd5G^?xA0c`;+~o^!nvBnV z;}ylSnScrDYrvz=KI1Z>Fy2!%rB$zV!$b?C z)1(9Z>AR}*?|Pj^pZC4-u5td{LjBx(=kfbh^SkO^_pJe0G8Rf7AU)Vgk){Ae3xHHR zILNpg!x-BEd0XVH{M!#_IU>5cHScto=vV_p{~rH5PRBi;s3if?T+6ex=-y3P z-`(G&GjHbDSo%kg4C7hTQR%u?gVNRVmVswT-ha4ioDY|s!0wBqz0q-Z@f02QIm7(n z_-xV!U!yxm3Sd^@KKs*~zf|4GMt7HI7Im9YZ4S)gfxYPQbG$_ta2m!CMMpg4k4Pp% zqi*H*96TeY@NLqW3Gs^~r*Ez@xW9L>>;*Q+zZwiDfzoA+$hUZ^RZo`K46>790i4o( zcEhY4;Z`s``?_;VZ1ZLGf43>di3juspi>y_U*G~-(q~ir-8J}1)o9Ca^NBED_HThFDUIed*bo{^tdWVyO@ zoUHDHh&-vC?W0k!8YoP$RSReZ2%)rPq(8A@c=P1$^2U?@dZZc|@S0PZHsd~ol z#-rucenP&@%U!o?XrFtKAn8IyXn7NpkL-SPlwumJ<>=mdC4CMQK%}wv~Hg6i#MRd97`FG8Yp__KAt&h;4Zhar4 z=~T+QZ+r`*O!&F)h}fh;W-hbuyL+#NZjCZ;f{J!j?RqdyJRKdzh;}Raf9Id`j6*_e z?%rpVO7s~yjjDRL0}r&)S_IcU8jEnYgMWm~$$&^4quIn8W8djop7<07c8$*&<%#G- zH~stXyH0&Ugk0`2hIhsRWIXAMTRo4rWg(-DhB3p??%!O2nE^ds-RrF1KQy+p?r|3B zPrCFjd6LFbBXk>2SSPgrdJ3T5Q?i(G;e7#$cKqnI;qmhA(qaGMPv0$n_`~;o|Lby^ z=WVzHYP;uJ0e`>z{OhmDt25REv|CT(U6tz^(nwW3NSAHITcTuSJpB?4uH$byQrK2x z03Zuu%$N>xAQ%D$(ur)&!4P5Gp!A#n6kf$kbR+pD6J!l9(_eC^YM>}|hRWO+G&Pa6ttokv=4?!K1zDd$$@rKa;{Z(xt+_DvlRDH1FYj{-$>K0O zmC_cY=RakXn58HB-6!KBsTv`erClL`NxAVRa&Ju#HD1F#Yws|(nQ##@_Dme+x@=K;KD&+Pi?+Ho%Iwp#Tw zKK$CV*FxLXInv5|Gus3}Iv@Ythx5thbIPQ6f43!cy(qbqH*gf8$EmS>Tr^}ee);1c z{y6%7^laCOI?vJJUq3CUP6$nsm#=rpaQbT^wfE5`S~*C?UOP9Jb5VSN0R6X|U96|& zFs4w8{4C|qAZmcau0Y-P0yEY^3yKefXn8{l?wn05tYhmELoTfa zJ#NRaq1Gm>@Q?Qf#l$zGg!lIyWUo0zkLfv7-P1nb+I<|-v^Q{P(L35!BH<|EbVSzC z&Vf0qK`$<$<4|V;3>e6}o3}ZgpWiJ%f4Eux^N%OXRZ5vheV9OQDKcqcL+Z!E(Uro62dSgh=V#^6a5(XDGUb2D@duxl{&90=15owx2- z$flprcy6C{ohU_<>DrXVXZzm$(lq-%W3BMzJm7Mj5!7jH&K&L*^!Kx9G(8F8DfgkM zPEJo32kQ{z&DLJ+hNubPq0MDqi)j<*6qk@2qw*=H6-^=LOnZYdndnBCvkm=j9=`Xm zamz_cs~F2{^BM((CYeEBHGy8j@@q@YZqk3-TdRW7NQ0-NAq|h;*r9H7T8*hcDUqIe z)$hkC$-g%L|M&UT^7p=XWv<=xb9{f>=MzrvyF&P0H;lDcbK-3);8d(74aN%};~9Eq zlz}QX>~jml$eV*20^!{U@wX0eA_4BBaBU7C>ABX2qL?FQo0}Seb?w_(=&* zC3FfMe~=?7Aj%TEygb@nj`s7Chv(K_4BOq)wpYhxNqnW8H)JasZDdFeFqpPSpu zbnTOLoRB!f6s1}x5m$y~U3$jC)HA*rz;T>f6Kz**UA8?dCD1c7@)6OvJBG>75kwEnmA`&9T``b^J!<v$s8LW5u9fx*wYm5>Y{b{ zoc;|ycff@; zqG{Yw9%y&l5w>y`J*W337tWY$CA<-TGbE{j0fW8f3mL0tBcK;&jz7FxPTyZGmmiLY zDKrg{k`Urvn(xVntL5#7+vWI-*LXVx%db_SZpHwu)fD`VQP0TqeTu_(wIZFWU;|E( zLAF{FUfq0Nj!$ovi<>XeAOYz+`x%n`(EH;jo870mjv(6}XUmO3n~m`t=h)O}>3CM5 zJ6^Zrd-oC|O2C2x2N<7_Qf_D&i?;^fLUR`rJ||dmM`N&!Fn5p99ebKmnh$_vJ%x-2;dRRK>K16>c|hM8*nkVexv^oK_@p}l2Q9ecV;x6~H=YH$ zo;PM^-A~1Q_ZHTNGzN`AlXK^Zcb@?gJxes<%Wv0y z>EEAP>)Uu(pZJf-(ASKEXWjRm%bxf?-u>6s_g_;;zou(0`^=7o+2&?#=;!_dDBcr! z!;|zKpaFQ}IpDIHO(=FZS(|?O=?f9T=TDkh*O>EmO4SzI9yhW?w@E39ut_T^m~$fK z<;D4Ub+&>rnDVmn;KP^Ae|u*mBj;G^3_<{jQNX*yHvyT0{lf+xP{?@e9JjZhj^c60 zgH16x*VrRAsQ^6@5y0)VFt+ixj2Q$wQ2xps;I241ag>+!v4n zEa+jLGeau>Zi^PZYX{VI-ZoEteRe>j@84XVFYn*KYYYrLla$`rI_N&xk-Yw?ivJS- zMaQ+aAuyB^0&Mf-IU8$N!PU@4q0ucs+A65@1zGXWcrtKBX6O!jfL)WPk4;9;SvxOL zK2ELmlT+8XgU+!P(lNj~w>l1%tw3AG_G|McWpqp26TQ-TOZuc?NT~oxACBLx_A%K9 zR_yzqlUt8oq;G&UEyUG>-OqU@fIpyOfX|Y91(&8pC!L(0%xR#0dbKRs16>Yx`YFm| z9fxcBUlhug>zV)P|N7tjAOBlkAmdJ&3>66lr|f*c z^_b9xJpP);FffbH6a3sD>@9q-oSfZ^L6YP8um1Gi^8K5mQP?J|NrW)r+G_UOgyeT7 zhrcq!>~)TynACpm?Y)gfN3bM!P^KaUp9`z@h&%joG7LKlj!7Ja z_{-ai<>cbJdwiK_hE&0;qcyj|XnOWA7@|gahHTAYGdw);P+&r>IS(fP#Qt@_gSUqs=A-awUuZ7_Z%%}14V@={T}!ZR zE3r@i@2!W@dv_@H)UUxh+cVk@upQ4HaCcbWS!u6|o(qcqwG_u`agd5lV*H`cs?}qE8s0YSsF{q#Q6I*P>Er4NU-vo#saAP2r7N zkOU2QeUgw-K)~G4&k*__UJ>DFSkuS;>UQNthqK7l|NZi6`B4w?F8&B~^}PW`WU5Ku zwRq!A%-jMW8HEX@_ZE;yrp%S$o}CpLmlbLCmFS?Tj2xn$-xcCDsFY!*yft80fdiiQ zc=)|P!rna-)pah>0sUBqbxSH9q7tKhit}r{W{cVsDmC_rUNnR`bG>I`yJkJ(ivj5W zjKASz<(?nD?Stfy^TLl&hyUCzr{;e%gdMPoR^L7sg&b(U_f>M+MV;{ z{Co@zFpm!}&aalOKmh)o>hE|=a}!Ou0QMJ+O*%uTGkVl~=#qN zV}6~^S06#Qq8%eEim2KU7@_YcBGm1qgJ&HB(5H@hjXyLa0x0pcKO#>G3AgtGIz8h$Iw};l zEqm=X!&zM&EzQl=t${!U-C%8jG@TEh1sJ|9XPPsuLOHZ}XaP+eVeLb_Y7yp?pl%2a z`r|W7@Ghh|4O{xMW9sHuJ_@3TAn^9qZWGw);TXyb0+zGM`{=LGE`=&Ye3;N3z1W%U zb1;yt8}@Rm2K@TvF5z3Zt*8pOShiyT8FYs5|3=>zp0u?N3Z>ldKRWw8#%=6!KUA2p z2*=S>jFA(lzD!8>%b-kq8*d15^n!vhBn8341$KsrDIZ?DbcW)v)zcOBioq>-GhlGK zZTDN2)9bZ3N)Famzr#HDXHTPpCOsLBM=|)$fg%=NpnMTfvspT9(hd1*zktV0~?BA6upgt z@rvPo7dYek-@sGP_hNVn7$rZ<*-z01M=h$okfM-tB-oUY?Qo2c_Q1lPeKUbkeY^@R z@FQh}UY=?0#%`S*jxmser_2NB{W%RWFYEbpVMURs-#}7|c@%;%_Y}P5S-_|8zJk1y zl-Y-X?B7yCe`ziMV+!;Cm4N>>KKMDW;ccINXdSNlrnXI2-Rm-dtk{lm=PBPb=bP?z zn@oy&Y3y>}wdYOfGMZkfmTv?+bt!<^5)^;mKa3s(Kl$I_&KAcp$|`-_bt`seq*$o>wRAM#}wjSV{(X#1A8tS*Hu5SqBQ^` zG|ywVwU1YS6C${2ytjFs?=mWX4Z!|S*O$wG?B{<;F(3Ebo96K)BSt=wbB^{zww5uJFAknB-@i%+;e{0Hx;<=n_-Wne-Fmz~ z%K+U@PF>+JFR@3B(o~=+&1JU_WiHojEOY^7t&T!*Fuf^ypir0mYa(OE3~%>U9@=e2 z!j6V*ojdcyfOrN)v`jwwA~`?3%(&;YIG5vjdcYj*d4AgOlsH(<3ua9B_YcN!{O`3a zw!yYuvG2z7ozc7cWaxe=^g9N;@dE)#!03+1y7GJXJvhj*>KbioZI!0aHj;PVB-E)N z2je1=ad~wS572i{`<}w&XcFH{hr|ZTu$z}oY(4|T*8*pa#d2QaH+%O1Tanr%w)wnB zmw#K{z5RKPodv2LSjDg%2YfDXY|YcqJ9=j72>LD$TM?V*OIgrovu!XP zB-H*md8d<-j9F8T9@)hU+#3gut&!ODK!2tmOMgd)#+xE9_VfcXet!_1^zT4>2c`jj zPPhZy*`F9$?mvF;UA8AC6n2n`DNw8{b3=yRK{%Y|wLf+ZgDav=rm#3Bx}4`l$s< zL{b7a&wtB+-iHeAQYcW;Z3B>Bg(ezhe}3DN6!8g)H=!g7WeiP!KOAe;A{VO~acg<~ za(9S(F=3kw)753hS3X7=Q7A}dX9QOvJ)Wx7?Y>V_zRxqhawCLnJt8xn?7Q+qeKo?^ zcYYTgQ>HP=UBWcQfn8`Dn}P@ihJimjy&r==-tDHtN}e_c#bLtNs$qGIA`~shym+V$ z{kNAnCqFHqR=KycweC6v?^GjZ>oJ3#VJFnvJA2c`$zD}q9Lfiy-hW&VnYgSUB{=F3Eg^_0_E~N#qo^tONdu*ynDBnUEMPt#2=p*O+$bZ zidG2A%`h{iiHD}SL_Z0{FgkiR=JCou5wI^Q;3vz={k=e-gEoXP*P=*~9G;1LISElJ zg3wMOWy*IB2x{yU>PA|PpiU7`OC|b3kS9HnAWXl3hR#q!*LqG{7YfS0<0)nw4i4P( z?AtkuUR2_7Ue&Xiqn#e+Ful5)h~sIGN=g@6wM9z z>7Ic6ZF2+|ZZZZ6{anGQGw$}(gLlBfUG)Ck{l2vJKD&v=f{^YZ6$e=8BFAtuuIdVI z{OrApXmZx|Cn>Y{_t(oW>5N}{@0Zr`e~l0RyLZRSe}8he{13-!q0jyQGQOAZrh9-?$6NzmXL-aAlG8tbf0)sKoD5R9 z4>R^JW?d;`VeX&bo&{1rFWwHzF-O>XD|)E~pxxEMo*YrHzl5e>9>R=dNq2S6Cd z&*=xPR8{2PHy*;IE2S~H$NsKDxWGe3O_4H3Wp;!#r@O{LuPJ2P+1^O+>@K?pd&|r3 zj>gFAanF+!qSsdf2IWxn?PWKapqI$Y{k}Hth?ECZxILhcTxQ_VNS~jZ3 z)MK1go`1*zPws~{psyQ52t7?J_2+V`q3$Xt_T;|`2A!lJP>4>#3)@yWRW1u+I1<-Nmwdy-wajV)G@EQ%(ZD&k$p97BAx#z95{`j1_DMai# z!NRF|p%@!8@U+%NMg4B7Se4dSr@^3etTeOfm-Du=x^R#VzIp^#W&rnr$u{DM-h9jsfmPuo!n@JHX)W)W{uHRlqf?ZQgGSW6ti7r*l&COU0wTf9-XaVYo{^C(6)w zqyokb>_0S4RO6ArXzR%kGG4QV=RYKGhpm15 zkUV_%YIk|Q^<=zdpB0C0zXHVo|N7iD#Or(x zdEr+*gXgAE&WSzR1FVya=(F8-^f<@Q!uTR$jzK;7a2j8U5JbZqB$dy+@s@jWboNHy zd-^=^oN-@^a21-_^7?5x3nU&N|JodmN9Q_%+Zch`m~8Y29l4pV*x%o3?t##N`B{qp zLVuHot#&e{UPS~|CqBlXw@)WAwDPqlXU7Aw=%2UmeoCj=f}1W$ciU?ZP^mAN9S!zR z66RLp{NgO0O;7c2c7%SKgHU?O?UMxt)!dw!K4(WTo`D(*eHF0K4`r&|pRZl=cB2Q~ z#}fv=E;ybepX4j)=+Tqcjwif?yh%SJJyrURcl1C!{M2}(wLT=q776>qp#kDVxa{p$ z)ZI9#6TBIpaw4Ru(R@Cf9+#~4dFEYZ*@ zqw!O=8&62BO0hoNwymriPyXoR^(Z+?e=Qy(XbeV6mjcC*iWD*j-8B5yq&A<+DL>8l zHEqOUTtY7<`g;%F>kt{wPj36Wqk|C|UC^!){L}O6sS<9yku8HA)7d}jyZss?W#HYf z1yB`gAsy+dvU0*E{mu~CvL*cE`9eCOi{<3~$uegpH;1W=jyYAE%k`h8(SFS!A793V zF~Qch6hi_nOw&DtdKec%sgMksR5zgO7{hB{IfUIBN0V>MyYmd220qC4Ew3>psI%Kf z3P>Jjd;4k1ro~J)<=-(mBYoHSZejq6W#$nRO=M&iJ`RZtkcRyH!v{TV5O7Md&z!O} zmEUKIF4QJ;G0so14^j3RET!{^yiK_dU8OmaVNa9|24@$D*%+9v;K0Xw~cwa4K~KL@J>C59DIg8 z@Ft|&gQ7zM(0aP}cn1x|T%;2EukW=+@yl$de7e#|zj+D*t#&D%gjI^i{JcjXC(qt9 z$dH_wGzSHt&8uAYeXq9}$W*xH@?fR|Ij^t>e`=s{q0M}JWoy- z{xn+o{5CpXHWu|V1JvRdd$FJPZ$CFSR(YV*<*lcSVrwV2J#`z++m<)1!P|5)<-~+q ztF1hr{XjAh3H;lg1MJ~jdPCS*K_}y3J$CYXe|h_EB9Bh-nqy?UZuCO@2leAeYlSDC z1JA9$2+_gb8uDihULIuJ8{75W^_*OJ+nC0i>w9~p**BmTzDCImC+@=QW=mxY!Z? zC1?0OdSzg*FOy>t#q_%qGo2{6?nnQ&JPtHVd&t*2cLP1vBU87=a81Ek`dc#?fP?41 z79Iw|11g?_-i)tfp@p)^l-#?s7`!K~;SYnf)?_^#J#9-W>(KMMIlBi3;P#_NXACJB2SCz|JzIL?=0*}YyZuiyO8{SIiSp)daCbe`oz=L;XMguj{&8$qW4J(W?K7AdZvzJeVM22KoA*o^t_^tRzgfdRiZ0M9oX*hp!2nHw zCxaGG+}&j85-b?I0jvhTpOI?*J-b2rg)$JT9|f3_o*ZBE8t>ruXEr83p?MyJZ7Vm? z$hEuM+r2iIcgH93I>loS(YyNsE1u!s=HKJ{Hy-4Xkt}x&fv1R7qw9Eg6)*qNSkAi7 z`zB_;{f89PS^qxk9=z@Q)=kL%w6zfazl@(Jbl>;aj&FI|@r^wGWiL5$;dMFs+1l29 z6s=u#{aH#(eqI;8&nYDBF7ZQpviIWY?mM8MpQrBKy-s@WyMDg!_mkG)egFP3#djJn zeGb67*Lmx%{)JwkJe=ZYcfi($tw4BE0~+3G20A6q@Ke4GO)w5K3?1Rl%hrW1!N=3B zE&X%G;Po6{+4o0>%R#zF3t#tDOw0M;UFo|pk&wXXcEzB#g)_CvNS9ik;9?Bz~ zJZi5=^Q70vKlz&UO!L~)VzzrJG8J~6QzY?wI_~X<^KKI_b)9}0qd%gF@T;aeilwGM z`m=8t$L*LYg@0#<&H3n#jpvV+7e`xDVSjM&V%a%-6F;xs869`pb&L_?!}|qh4i8_< zSV`8LtI@p4n(fTwc!i>(NzXFwaZH3oe>F}*?&Q`&tL!E}YZpfa(ES=8&Yt;nFqzgY zYXuLFmshV|whlQJ=f|zh?ev}Zj1MPg>BG;Be}D7|9{<=D(Q^@^~uzuJo`BfujlQmBT64@4rJHacW+*gF2pYkv-_LFMKac5_D=Auab*Y_VLz3}M2`9J?3{@e42 zJ%iZY={bFCZkBS8_lvPTT!kv5I?6wWvftSO{j-m_e=}GX>EtA%+9W6RAy-DFHL!np zeYotadJi=+tjDLyjV7ov3nLnUFbtQ|#YQ2<&?{qT#FL@g(GoJm6q7FL!kgu@1(9z^7PH&f=j&GK;OF7A2jgKerU;pV9Jv#vMMibu64V9<+(N`{cYnYRFt@{1+ zL+JEl#=YNFK}sD^SoVzS3oZOj8Bv53pFNxuBAUGYDFYMuA>>o9@DS>2oe}`(dcQe2 z3`d@5%^Na^DH$lQb=2U-ALEuTVLaU&eN|Fl-lSxq$iAx}$yP#*@$rt*>Jb;Qf}b7LCE!_(+Iw4YNPVQ(?SUFh&UR{LC|2_^Ch#+GaQ@l*!@a z#S}yf(tE4NVXF(?6?W9>QvM$(5vq6Yv~eq>10)nKRbYdUC+S3J3u)Aj0<>{s=1YvPNdiD?sy`jE%x- z%We|6emO?^9gA<}Lhmd%wJG4d3^An}Gk? z&z}ODZ|~yy6zy?q_&%DyjpyG6DnInRck$AXt=Zdn;g_!eYX;)yc<-my=igexzsA#l z$@~15c=T_{$=m3C(s$3|)6>3l(eHpQATRHGnNp#+H$|+H6(I98ITuZt9_r0U(e4&w zn4%NSqZcS!AZmc6wM-va-)ZvGVoWou=qgol^Euv%FBqZynef5~w< z$a^JQ8ex50-hVh7`PkbRj`s{qzk=M=$w>DmtH8i4=*aHq6X~q*x?Ff?>CvbGDdjWQIhg6ElX>a%b zljXSeA-j74lcT-eKIa+JM>#pt27yMl?)T`S==51eT(?spas0s{0A!rv2MC(Gf@u!U z&bD(4q?&Wk-o0M|XFB?0GNzWQ0X9sht_J$zA(05cZg!G2t0#dqho(J^M~y)Qm%Px+ zY!^D%8=oZf%=G?E7r4Q$Q-=~(MG^?WiB!p_NKU_TMI+YGZ|y_wv2)*0UO3p)W)IEj^glMceUxC(saxu+*$ zh)lkmRnPnLo4sW(Z$k88&dSb+m;>U;+3DYG`;2q(Rc)>)QZpW*VbEbo`NU{F`_tFE zkl({mE<2lzdppCHk=@-1{XW}`7I`cwI3d9Mz=3UT!YPavZ&_Q~ZW{eDi|6uUdAciO zN(soTlZkh~yj^auE|90iecn$rp>9ID-ESjCeEL|lh2o5q9`MI_Ksq260 z=ifiwE&tEs)8+s1>xbn(o?R{fYks=O|Qv zMlVE1ZA*o=c{X+jkQcd3#aTP@tG!;n*vlwv{Ni>Bw%S+{TJ$kV24aaAZD*YtX`+2S*1LiV$+U6^9 z!4q=O%H+wrpUU|2yyyYdea9f>IDxf`GoF`tLEwyFwUT%fpYJud*)7w!$v?V?I>@^- zRve+;=ys2ncX_~%L<{31`Uk7?;MKk839q+%T#-@T&R}ijOk7P)f8_I%ZDltDaDYoS zAy1y~ESq}=%ZBiAI>Z4pm`M7rTlefw$x&m&fAPLCtF?OeY-@noP_(%>yuOpn1|)>x zwHcQJ1E3hW`+G;D{*>NIk9v-0117gRkltrc4q8W%56+OZ?#JPIgNS_nS=XJ;iq*%P zo-sxM^L3vOd7+}bBAURxS_}nfT`)4Arkou=zZdKJ%gDPwgF9!@_ZdC|coZXY7y%Lb zf}G8D9JtniUH}x2vNQkgyBEvrgRS&NbWAVeQ&FM~De27}(+Kb60N-8Dt_t)c=YXM9 zu2bMdwr!*Bam$ON{eaft77^JkQw8wi@H4aH1`JsTaSL4X`ma za?OhqO>U$srLUaC2Lu2Ndt329G<`_1@7bH3;+j){&jjaAy12xoY%JgY0|gBK=DJ(B)4DnZBj3F3fA6Xc8z`)ok2w=iryk>_BdPKU3EKJDMT2anjJHCqd`?q^e zPLGOg%S?I#Hjhzgt)w3K$Zm%2&38xB;nKk{x}`z{B- zX7!KfHI=cR;y&>OC=@X5Ko0>5LACNips*>ByWl)`InG z>*<*);_-y2q~9Hl{h-f3eiZ)Yq%q!PdXxmtXfB_Zvy%^jmvt_)BDNMcW_1AM(he&f z=NJ~DVG&whwRzJkx?FPJ^dH%tHnt9$%5ltq8V4Dsy8#D!n0Ic>9G>Zuz)tr+d~kYp zxx7EQ?z&{aSdv+5AkxEAo?a_CFmZF=Cqt{AGdW@ZwGKxrV%7KfA5GXN4u!lrzM%)u z!)^ff$PNhS2k^di4AhjX;U|lnorl?oPrcF|p3(OdC2p(cPcQnM(bk7xKlxXD{WZO) z`0>Td7t7Jh!|1h)Q_I0RJUCij_5RMz;pElNF4!$c!fq|E-+b5ltlRT$a#(-*@!fKI zelpt@udW^X!`595=tE;$6h$_4Aha8T-k9B5I>6Z{+LF(nImY=aH}`8w+d`hl$Pi+5 z?Mn%O>c?)T0lx_fi@twuiYy*?51!h;5@-=Hn{IhD?1Q)u5WJMrmKXYI~URf{a zvojs0ZpO>N!RG$szx}dYoL)2^4BNevQ_TMR9&@=ebdjSI>UXw3uV$@OUj?Np_l#8_ z&_H+!!dCC_A>p#k&w2NIfh)ObCW7Ei!CB80uCsm+J)skFQ-LnWI)=8t@<)eShQ=7s zv1eNX9u1%{6Y80&ScUc|aYDHozm;!X?{E)wB5S%vZuQA>6biOQMp}j;W+dE;p`kTruQ8gsd~C84v)-v z=#7hFRK#}}t#<+*8_{?te%>DAD9qfAyRIQdZ$@^6=+m zt>!VGH6Faj%L9PN%kPhQ8>c_3>}7Dfr}w-78#`L4DOd3p)PQ)HF>S^(vpd4?UCWS) zfC#y7hZc8ri=;4`29UDXnD_He02_*OjtFj^?xoPtt{H~h)f!d~)1B9!*$Vt<3M+ad-2I4*~Hsih4L;*2+0u#U>}V#a3WM6DEyxtQQCg2z**j zPfi-|3RvhLuZZ9Jre7Stoi_>W3sd6*J8vF7R38PDJY9~Cjt0Q+*eBH881RMm*W`Bv zfARGfg?Lx-L@Uz$we;1xnS-Z&t0q4Hfo3+gsquKfe$my<5!ZGx{kCq_J}2(r=*7^3 zv!V|@VHy3B5v5mA@!OxrkfYzhp*{XNg(6JR|8ufI#;nuU zM8UHWPjWKTEg}w|0#V<718e3x?Y+16vcYshP7}lt56H(mj7Ah*yKa0O6L()g{c-Yv zA9@g@48cvW<_yp^1D}G2{MI8XF;7e$fnKFpw|TV(ux7B2;5?)04afW3sI-4_?vQIfG2;#Yma=u+0}?@5J#JxDEbmN!wOV|Lu-vy6PBuahd2a6sIqLvtAA8@kh!7#4pPZ)24 zEpm~e6OxbK7z_o=*G++e1=Z#^H5BlGD zD8^0E5!#)UjRr_Sm|{r0loaKCdFIo2O2HIBh8B5r87C*>Jqe`kY~!W8o|Gc5)vkgy zguebn1Q8j0wt|58-k|yq4L`+W!z+zj=y!G_WF$6w7Uj!e?{2QO7uF32^_b`=jlgj7 ziU~G7$}mIODenZ5JmD1rIvz{^AMd@l24J+Fq8ZaYu=jved^JUcUJ-OW{HD>5QJ4TK z<;Sz06TkYMX9=v2aqnKS&a$6^IojV{-t2ET{%5mQbZ>YnLlhtHZU+*3i9#3;!~gn7 zHTIJ{HbH#pi>{6S-83+r95y{6X!B9+zGEBM(>zy-zjbvVJTj4+#_m{IMvC6~J?#?p zmh1S7+M?`4{qQwfN10?67|sB-&Xn56fTCwA0(<}dZAy4&Is0(2{QYky@gSOMDw8tp zxzm8;@@kxy=~CABC|G=me+MjO1o1v4`{L!nM6xu>Qr{s>u>$G<4lk9m~`(X{JZhcAHDe5PQ~v##d;yjHY3yEtoJEAQgx>m03j@7^{?`uA)4W%Vd$ z4)HD-5^1>3BNZ{#w}7WT04JK`1$*e-8z0%$rvJvbfDgVt%c=P|U|(uBde9m89Fyaz z7$3OztkJm{OY=ZKV+5L2;j8TuzQ?z-#rHRGh)7bV&!6{+@n;}KTs?~;Y_4QeYv9YP zoUe4b1JtCWr8Ax7@+281&zu|J_F?O`A2^}A8b;SiW8N=+`Op7p`RiZ*wKb)W0txZt zVW6_fM(4}nUoC{~reYs89(w$cUTM66Eprm}k(zQ(sT=^3O)$SXKV{W9_LKMEnBs;o zrHCm=9kJ+G3@-rECkSa}vd;Z?Ck%9DSMLYCY!<#YV|Z zPB&Qc0F%0G9gJq-xx1KvcicvLs25UO^isvpi59-<}U<1u3n#G_D`Wq zXD!<*GF~)6V(g;{p_p)Q;}c!Dh)IRa^o}MxfEcnox235ELbOr^s&Wadyypry2ngV| zhRduk1t_F9haV)!k7CvVPBEZRmmcyW6UNNg;2+9x7LtOc61v}77{}0TP9f-C-iz>| z{h)XE)+)wMpm^gHrrg(r=X=&xp9#_1>ZDz*idw~2V*v4NpX28_c%pHC=G7&1=kc~@ z4`ur~huS1e)=yhbgw~wWd%A_@j80=PioxBp~99 zbd8g!2O#57;A3MsOqO0Y=A&NwfyBeDN6X>%ljUXfKFqN1Y)JW7o47STQ6awFxE0ok z@>w%-tng>;^mems<#ogXeXo1&ro8q=b5fGcbHdc|gLICf)9tB&A;HUp@Krc7=yLd1 zckw_UGFYCmm15oy^6fP~*Kc2Z5^wFb?mLZF!zkOqc-DX^9idJHaD4J~Yq=&%IR)ea ze@lf(GXbyKNaJPUf8QmG&(b&elmV04<9YFZ$&UA@n`Sb>;n)iF0QdWQ&zC>`(;pXy z%c#t5agyN-Lb}g3_SK1bupHzt9UUGG?a=o8^f(}YnSRiO>vW9_=;-;hG32ZqzyGl6 ziRhrnYr0E*d%m2ViEggBY{jH=5ZBJ(^77U9dEhF>txx){aqe`#0h95R$ik{;)T*c* zh-U(%4CGBZj~@CQ&+7poT7%!nJz&A{T2p7q+HWD^q$7`Xo5~tFe)aNAyv^h1P0Hcx zlDf`G9YvxLY2xeFlCcw2A#)eEUzW4$ly_^U#^vPXEJxxZny)EKMZzLSzef-`gA;Md z5tt@zbca8`fn=#iYXtO|FZlu9@F5^@m7e<0oL;|rxqScT)fmi+92g+d+Sm@xu%emm zuk09E#eM+Mhxc|9hz`)JITJaqy0W&+0i7?&B%a1oYqaw!XIiT7*Pq`l@7}(hw2SVm z8o4P>M85O$^&EVTYwl1lH9zUp?4*WXvt>Oxpf~!7C{106pN*xFTCX*Ksy(gA=%N%I zV!- z*EWE*RA{$HsUnDj7YrCuq}Xl0+6)0dc$Cp#rJFGZ@N8lGzSzCGw#7E!+ZJSuRug8p zOi$?h^T!ZmV^kQkxAAp~>Rulm&faz?{^lC-C`sWP$~4a|<*l&_V&Xk}G*$h-J4T~# zBhWv-J55m-tLLxEWq&tD*xnu?a34Q08bo!U*MD|$9Ll>HCZ?3-1N%0f8L(oKa$U}m zhX`kXs+@FVzr46!&MzD!bDbf+88kmdb*&#|&Wo4g7$cmL7nykcL>otRi}D1gxe#0j z!OXu?%nwtPlaEaSI<}Hxo9#skBU5&a?pmb}rSs69MQdR)ags;N8KI8e@}s~Q0|Oi= zQUlaCh3v^vucK2aeE^K+2a3+Y@x_WSE|S z#^+&U=-xt!Vhj%fpjc~8xoYfR6DHx?_a9Eics@&zc%b85;os&UlER4K`>DvCwXryi zCmxM%{oB#T)|*E>^F_yKak%5qAO_5yRSGSGwFPB|?px1Pad)I((CG1U;QEXvXI?X= zG)?yb_0X@mpnbm$@DW-vl%7Ai@MpajO`ZoH=X?YJA!Rmn>fZo^PJj%KmnJL#_xwp7 z0GjO#;K3#X-_K;WTV*^~V8qjZo?`ReZM@Z(0kfm7J=zOgH<)g*@>{%atek_htH502 zw5IauFBK|1k00|C8Dx1)>mmXt?SL=n1EAA9jq&y>;0Fi?#(32$7_iepI&rPmeh_F^ zw96n3?{&{{C>oC(sC*}aot|ASCzn?lsPq6M+db85NV6&2A~%4NEm@2Kp5!q(##yZh z0n#|;*Pq`mZ-05eI2xAD5jj#!HiFcAIWeLYJdIC)kPF*a177NITsw#6w9fYJ1A#hzO3#_0NFcBki>628wbn%AGe6DEcNI#~ zVfLW2AHadM9eD!)*e!bc<-yUc;l_q4etCW#bG-0WWJ$rW(ER;+^lJc7u!7gjUTVri z20ph&XJ;pY&7I|k@7^@NkIUOUa?v7fk)LnwEc^RM%gbnq7vG;;^vs6?s8R&2SM=$= zc7BL{N>SPgAZp|kLQ#m*(|1iadNpt3k;ZK*1FVvU?wb?S*dGdOt$QTpGK6z6uh51N zSVZa_Ax1(qoW4^qqn2Ay&0IAqn8{13z=PbI;QmUCFMQf9>O6z(iB_!O% zpfYa2)5nww{|J$78vzguNEf(q@=mbi{lY|hC=oF zi5)Bp&=wh%VYRyC>coSx7;xoNBt{}~fQZk&M9gz9_VoXLNG7XW0WdG@VK*T}{8 zh~>KgqMY*6=!~C5QJy^dy6A(hIErT_szVu!Q5(LG$NK(+{ab~D*6;hg1z^(qnvTU>#8~7Vtql-ERM*LZG-)`=&lb@G?v{!k}(~F_? zJUZZ^+3J-i7GvziM~8vv?Wf60gL160J=Q>AOO?m}sP6)K4>CN!P2Wj*TTeVrIm*$C zjQFFPUB&-SJiFBm^q25_!3;(Y;GT$6>rT!9K#G@v#hLha3izUf>zT|6^Y#vTDOPU|}GW)=8;I6a?En;Q9uP&v_XcjsV2?mxYI z+xiCpo^GY=@0OjT?Pc#^J%gRGwU=Flln1RP^3+2-*~lQQdgUS~=2^dAW!QiG>F3#w z_Ws>3z247wtcRmSRdwrmesy)ym`qijWJ zH@4EpFJDcq0I=4e11NU^6plt?YHc|_&W#|01LhLIq$z;$b?lGv(rvE&Z0rFFJRwhw zCx8ldGa?>q8Z6$5u2LZrDUL1^x{n|11i<6#@#YE=ja5-6kI0&H`iS7tL9; zn_~rVKU;3Dq&IVZ0vC2=y?m9eY23h$h^<}+uim_A{dx1Yk*|m6NHcX=05*HUmZo>q zdRR8QJ=B?qj?BY*U0$6v?)W~NdwZouZtW9;KiO_|S|UZ?y?(Vkv^9g*{4C?Z+{}K$ zCah5b#KcIy3Tf)a%#>*MVQa_Lm0V#1h(Htugxpv%%)3))^zvwbs!pe$dy97xATSY% zj^UbOgeHZ+JT(A8;Rz3oa^~F%56$O7z~55LO~Rsm{B#`|TI?2zvAMWxaX<2UwuTXz zCIp`DZ3OUn-qqpr$ux<8J26^l^3JWztfQ`1L4d(YqdZ_A5MN`U~)U&2FS z$ak-KXpE`5;Dq)TJ2f6Hn*jrc4l#u5DHL?qZ`tvm46lhYl&9zC!+*l%n=0dz3<_Ap zKpsynnun~W1kf;7M$1vKA~8FgDWCwT$iZqjzKlTwUKmhSzUu)>qB?8606qh}2;2Uy zp5)Chp5Z|YoB8~^7f#IL&8GCXGH@A2f~x&9vPmDg@2Oij zRF~3NjO%Hy$*reEr+b^CSRSIVVb9xW41R7k_npS-z>xiZzTAGg9HnRh0ch6gc@d>& zt_qD}I>_?wZbakmWiEI^DAW2g>U()(fB3`e6xBiUkSp8?~k^YKfd0Z{uSm(2INhJynS!Zb}*LYMp&87*xgffrs>Rs^uo$J`nJ4ADrsx(d%JyqH=gL3pEH2;Apr3H15aFMNwm&^vX@-OgFGH(Y;&tdbsQ# zWsEZLLdjCCA|%ecI6FH_k9{6vuCC(qr@%?{P=B?XA+#p=V>P1sV`Q)=R-m3p-A$e^ zAQz2*m&>cu<^B8jO`pMC?T@H2`6CZhs2wBZoNjj?j+4kF0WKqMYhV8;o&IJ=(?o~vQbkSC|J#SSykX}qTNni>MAlbv9f z=qTGj*#IDu2R(H{pYJA`&AUH8J6}#tJ~Wf$zp*=w55VP+N1<011E&6RFC z3_A!Q}&_9HBcs7*3>+HFI*5|h0#7ARj zv_ahN`?f!6oMG}7iYyB`%2UZDvY*0J>E_-Klzhw^mxpb^qwg^06p0X`etJ_=hQp%- zZ=3Pl7<6;yc+klWN9c#Dzc(``a#fpM197e)euaT+laoMC4D~^zpoQ9rCc>+mdGCLG z4e9PCXd)k}fi#X#@8ozhBu+`vm%l+ZZU$AzTwj4TgalALe#DTqpz=~FovD~kr9hJ> zEflF<^Rq&L%@x=7eM&Fg)lUeR!g|^~cB1u@Xl^T#?PC$Emt51Q5VxX?IS$gh@u;FR zpLO9VLeC-O5utf83@L$()$R3po=Ap0`pveY1V-*{a>y|r6tDBt6u$MPbqgFNkhdF1 za4mjO!M!{%S4KHONJ#L7{pZAFD#W`6c)-k~yb>--T=Ngx_aFmL6t! zqz$x+s?5IlgloU|?6Ws#=>+XH_4Zb@3LpS$%EEZWj~{$let5mV?C;5`ew^#JJV%tO zc^>R!#CLf00f)ZxWE4~0SMxYb+3m)6+ucVP{dquxfw!XtKk}yKAEzB}?2r2&L`%7T z3V$MXDKfxRq-H-adbjZbw!q==5*oIyc#DAmz;r%SbO^*9<#A3yR5V~%N1+WkW+xKRSCf?kqFLwdQC3*-$=VkS|JoqV3F_CGC@z9sy z%LYDkS|bqa9OsOIcn@GCOG3o2-W<*PygR*Yee))pFXJjw2X+FQ#&Q{G?vutZbWHBz z;RJ%-p__=9REqVu4dkC)1~lvgWKr}&NWef#!(a}+7~T{l=KTdnq&H!t_n z#|lzEMsK-!^)%OAm%b2TjlkB5tZQyH4oy=4RLQ#hKH5!X_kg|Vsoiz2h+_d-hx8%P z9M2zrI37^+nunS$(&)?9!4>d+3!HykPLnx$j7)v~oRk`OYoP97+G}5}JCT5~GliOs zXPP0!kK;9`8$7Fb)6+cXHPSQnHoZ@^a~Nm$OEyCudLprNBe&)d&_C&g-_~7i5&n0; zlzWLt@VwE(P7iB-dVVz#20U%NKq;VcbFcrv)!df>-PjPFqqBgG6;zC4*k8?(?(VO9 ze!ulyf1E5g5w3vG4}bVmpz%#362(#!3AC;Sk$CAKFeaU1z3e{VBpzn}bg+}U)|&vh z-9OOPaC0I$qOclV9z!duQB7N&Z(+~0%(XsEhU=c~J} z$2gR?doOv@Gz+zsk5@^Tzt(nm&;d z;O!HV&vTBkh97cNikEEdIXS=S*#V8{BxfrOg(kxfKu$vNXA!}KZR&mi`gne}6QFL?q+mtrNMDna1`!ng8K% zcX_$5t71ICa5hoi;2J>@ad@1uRmk=&5OSN5lYXIe>>$Z_G%nFFO1-a);#Cu|6KFWv z&9H5^et8`q<2&{hP~?3|MZ_3meCQcbk0)KQM$!Td8F}vm)V!WF+3<+=)sw4_22%vu ze1Sz===wL>bpfp&)L-LG$#o9IjLXo)LguoLhQWqJMb zaQX8;{9$?Z{VQsr?`6p19lAt8m^YnPDL*~CVzTa&noYlL?=jMf5my6jjh?rz8|k3i zyQ_4NBe;{a3>BHDXEgQt)1Uq!*?AGqt-uM5RJhAK3&(4W_3HKO<;9DG(IwVuy!pmO z244hxfb`R|lcCAFMYDOw8z&hoMcJz!2P_n916(TWbGQ^KPdb3U7Llbx*X^1E{IUmt z8r?@y(NpoE!Q)jlVzb8k&$-aIEo-ekhkYMeNkKuYouG+C0Di{!h{7yWWoGeZ~&q$FF(&B9OqgKYM{* z=YjOq?)x-=9`CevTibifP7bAU+Y&yfA2wnU&%NEf=BWPd`N+aUTZsZ1;~5hk#z2@0 z^X|1+6Q+zQEaFM%h7yl1zwLER^(9~!?faw6#6`!H-4@8sm&pkOc6#Paj; z=y;G9dG;9gb;_9czL{cH1VZZ&cvba^r|gHFZ3ubl&?sccr2q2E*&Lu?iw5N5!J4H! zfU+8|Ee=!25ko;sCns0SukVg0M8wmBj7?@E^cy`|sF)~&&$IeuJI0MroRChp?w*%< zWEd2JTO0mtEZ@I!K*X0}I_PjCq6&RM^MD&u8lU z+9R&HMWOl2Xw)Mo0)V-MqEuu;-tOsp)2=s01c3gx!y^V60vZIDfrrSKF=`AcDX<-G zI(sGDLe^3P6z8KywzRE?-gQ6|#VoJ)_DpDAA>NY_3W|o*hzC&2Q(@eLlGElB;U0(% ztj1mCr{X;0Vzjn|)l(?EUW$=nloogrPi~F!%%}%0RLR;!aDL7MjTntczTG@EXqlX^ z@ig}B7=QAo(Vs`dyO2f^QQA&v&Gr~%>sga3iWZyk2ZMfoV++t)w1@wMig~LH126bH zd+!^!JU3twk(Z;rjpdK8kH$#(-nWl=q$$Gv4E~#!yM5ksGkTQ0cE^8+*PSl-{CT|E zbz2!{bmWCgZGC1$QeK|GI{{jDnv1#=wJHi3MR0T}RLr=kAhPfC+IbQbCAIkCP@|pJ zqxmYfvo9Szs65-&{``7xA{f@2$DFS^p;3Jp7(w16uSs5*7o=tD9IzC>2>D9CDZmny zd-+OXk`Vrb<;~vK(8QW*=gb4q+LV%Yire#y6GI|}lvqsj!k4k8h4Iua^J$FF#G>frs8m=9t=kop3H2F`g-{()ZU+iCgJGed~Etf#450 zH!0*_^Sl|Gt?p~=B5e*F`R@B4CfDyQgtzbBruRf400sT#kGf}uLmf^0d3<~v@1G6t zZl))JDFtY=^P_dY4s=Ts?Ccyx-<=$>%f^`OMi<|EA2^^}V*4DG&zMOv8uAv>5DjNX^KhRiW%T45hKD80EK}C&2c9jt9{@s{s}2P`qW) zzOIYdeIXgB^WE{=Y4X76SpY&W=RTo9`QwNJnPE!C?@RPZFE-DI%Y`DC4BHT6332ue z^R_LA;7>=XM~x@JfOer`=o4XW)4S1qDCMIOgqjCP4@#CZvseGyH;OjTGJ_}oa(Z&H zoV-7cK`5Rz+CkqjM1(~ZD}zmt7U#{|>)nE?o3JiIs*`VwZg+tX^dxv!SEqS6ft(8k za?%8%2r1#{D3YPy{_Jc({hlZCz`&cw$?4UE8m-ZGhUr=B);|Y{+cq{WZMzR-OYsY} z*&4@7aUbI~cE)jJE52~AHT-HEs&@4lj}CZ%Tmw7JN5o_GKr_4vgyI{sL6AdUtmBjv73m&c<`F2Vbf7^zlh{d)q9xzVdq1@Z*o~m($aW4Bzd@w7QPr{qDP~0oH~_ zJKq8VAJPq%2}UzacX}%0zUo*_U#^#*e>n-%993r(b#&2OxS{^qMLr!CG_(A zW;wmMo?Q9c5APGu__ncryv^9PUJT;Rowa*3sRvtc?=#T%H|M>#+5(~E=>?Ij$URR4 z56jji%^LGzH5GJs8)kTFEzfav*qmKw0pH)b_j^>q(AH! zI=jfJ!xK$V%JcSKZ4uy+E{k6rBBrt&k1DKlUnh&oZ{t(ruqNtm=nXk`;A6hiHO4YK zTDsQwFOz#GUjn4&GV_8iyFVZR{EZ`(tWYY>3c+iD(Gx##K!K)!a9DKa$O_xhb+a&A7n zKM6ElOtH3m10{?A5iSqocsf=z(d3rJT_?01#Y*jRLwS`R`osGimY zb!O5b$hIs9@8f-*T*RcDDXZJN2g}9r_43#vM>-1;>1IsRB*lXC?+G!c7_UN2fBV(y-k!Dx99fr0N+)PMf7FpI*G z3`X~P`QmlYV}N*Mxf4BHsBt>T#awdIlLPAEigRqk(XfVM-N?A_9~_KldUHjn-%Wv* z;tqg`A!c`bTr^GzH$EY^>G~D@L^Nj2qJ{bA8Ku*8z)S`(hS^a_xV0A@@Wd;^YZ>odrlrzG)#%B+7~8LOrp_;BAkgB^i=}Xqv!q%4dJof zgEzzb5poiV;Vtyh+)v)<#fuk#2MXN2UUeHQm{i40F+-{v%VhSKy$tblK0lpRINS9=DcP%|ri$DHLl>9!Jdm%|6TE>Vq#N_lNIaj`0$0 z;z^56086%LNfTHAAcnD|T@^Ws5@Eu=bOQ2?=k14+ zkvTgWc)FjyJXzjnFmKXFbRv zMgmugyV3&DXnX1%M27C~)@UfeBo#T{b>oe6jbG%hJ0*~|e98Ad4~RUAXRQ15u81}- zUphGsfZ{zXBwp4UsdNGG;0=InS}p%}U*7zr30v|HIWBY?z=QYq_6`GEN6kT+l33D< zf%VpLjD7d#yy1z*8OumH_nH3m0b6uOgX`PtcxESibeIlkzR45%vq7V8(p%l3nSYM1 zYj?~m5sc=<7THH{?JomdJ3DJjW7v%@VwZ3$XNR6}_?id}$4it0ZDvn4W%6mhfzJc>~^wuL47Ea9DVZ@;a1TF8s= zTwXd;`8=M!%|M?eG&jqyzZ_5R`3c*JIyCkP%Lrp)*o>+Cld5((jf)ANV>TWEq%Hg` z4D~6-CEQ&X`jT$~27d3o{2*n__z~VYuXv36?sTGhb@+6Q2?MT|JmbxX@wg~o`gx89 zY!!uxuP;&%*UnMS7&uEfPc_odcXszTm!l(vaSZda{QTqFy!YerfH46BiP;|ui}jsR zi`~CV#{+)waNVk?#{eTg>5AXMMI0)87oSYQl@tafZ7;466urr~G7PRW9?F(bd4^7T z=yiN{K2enE$KK=^O5bPA8B5Ay4bi1E7?X?;g)8s&`$=#Zl8_Q!f-Ss;=cH>q5E53@ zqKNS_k8jRDfZ7@NzH1+|ie!qF_dI~1-|q6|FGKJD%wf1l<7Jr~oTLxVGvL-0 zSY#;37rKiCxiVu%n0QvfuwoJ)uK zw5;i+C$nzA@7gpvbm!zcVIUjc>eT3GNHE%hel+e-=i}y z1pr9-kf}LhIs5tWKGbuEX_AmPN88Jr*BRRud@71#YE1j31zq>#=2=8&iu*v`J%Rq= zkFQdy3=Dxz_yd^w2y42rn=;mcY>H1}21Ibcie~u`;&|H`8pe??DNOtMWA|x1lnW+- znt4W#LcEukpO;^LK3(pw&l?}lcdTDa93t%=6NZj4`h7Lb)6z9h zHBOI)q6;XfyIWx5S{q#F%lFO3PH8MLiz2=+b8So?Q?Ft9#?a@Cd<&m|QUEIR*KmC^ z8L%cq36a{Xzk2Fcym6$I-GlJ0j7{lBuXxlvC;Z;OjfZzQ^z41#qns&6%ZT9>P@ZKh z&YWD;d=l&Vt8!k%VP8M=)*U$7!E@120S*H}2(^H`xjA1%r9@Q#(gA;66RF1woce|y z=#;0U7Y7;Kugg#GPR2+BW=?9eTSK9hu=Af1V(9|+`5pZDZC4pU4Sc-x-zMIYqd zi}3{P?70@&#LN5p`?KD^zI{6>9Q%<0GSMW(hERWm8RO1Fmw?Z zB#+9LxV3PPK@i!|)hff14B;Whk(Z6OJ!1LC@Aj5|`tu(Kcqq=n6Q~U2PglTZh&+mT*)4u z07~aXyxtSVP@kjI=7ZKH5>CVcFKljnU0%M~S&r0eG@i?gvv}d%vX|a?^4MGgr-AA9 zuqx3lVcw6+>FN9C7}&@&1Y)0eJ=s#Lz-!zH)E>P2ZUsQ(u4e)U^fx_tuRu7RaCWX} zaCL6*qpo z6Tu34&sqVS1D=|*7f;>1J>R&enGoF{|A|uJ+cCPWr8%+(u( zPc0gnp@Dhh&Fia++157C?1i`R&!gsM)_999xxBhw{`PM_8tUj>k*S^Cwe|pzR>St{ z^^1`gy5ZriLYO9?xAdz#Q=O4`C-RCPkb5TY}Xg_j38hGZ=4=lhs3r3f6z2McL2Vird$ zN$nV~W18_+dT;ik``Lo>Jb2!7%xB8Q;wwHmzfg_Js5Z}tPY|4f$G}ij3Q-iCEZ-ct zm>B1I$VV!sP!M;+GrJ9SMx%q6{MF&cvbUe2?fbX4Jfla;i&uvUC!tIbyEl}lX^#UD zri)UFToJ{_=0Vpz8w2(9sREyrX9BC41SWDcW`|#uN!;MrR7& z8dJQsZYd1BG5Wkog2(H&HE{(Gl#2J!ov;#kp;HVTjideg>@I8^lWT4q6ZYzQ+)7B) zKG;Sh?==N?p;D+;j|B*pk+4%h!P)cY4+j_lK?Tw=#eA0s zIZaaXMkX}=n=})txdR0Q z%toghyDgD-%Rxr(Ai&WhM?u(vMq$X!?`}O>{=+~2G5Od^k({k`v5xVKHI-IUNxiSo zCwlT+&Xcu${K@;wxbEeh3lRz_16=kFt0|H4ILNxA3uVPn9lm@$;Mx9mZEdyg+}hc1 zY=QY`Wv31Gim$ATT7v$${P_N)_g3YGekq;-lq&cYpH2ELW_x+`BAPtP&Kr)TkuHSfkddv*!w&_|y} zi)gI)c?wER7cV5o47~Kt#o2Z8k%7*^VElgX8}dvMX>=7GFqU6`{UsU(`19)2E2(|p zyb;6LxNCs(^j&hVfYvzy^s8-G&ZL;V>AbTAcLU9FKsKh*|70bn!pu*E9AC%m6HDWBKQA z90p$ zqDyPVx9?dZJsSJ`zCYdxnbuJ*USE2S+isvx_f>n{FRqRk^=%xpC(p8;$vO{z-9nch z@0qJT>i#VVV^1g0BS5)>#^!{(i_`9Z(Px~~N4>ArFFn^9+08^R$KRQ&W&iMCwqoK1 z>phWb12{TLk@Zqd-~6Y@>p1i)?=0-`;9-jrQ57w0rda>s)%No0r3MXY7@h4>%+T=p z*-gllhe%;4Zre;4w#K;I4)@{sU6cB{9N8k#a|sv*f`|#D@Pq|ZF=?v>#3EdFdUCy- zo!2*H|1EIwDKA1e_gMs-Z2~C(or5gKY@O>tZtGc39!J;+he)d5Z81=} zx;BL2#VeRgcy6IUN5eLz#-i)jA75=PKYX9Z5rVZv&e$eA+@ey#6LL=QFaYo4Do<3% zhk;Y+&m(r;`tz=*>^65c2k3|>*lqCRk3Y6xpC_HcfMP)5?KLbYVn8{B1QXEy8xZ5v zp)Uyrsa@yT2MK}Dc+-tWzzzEF3~h1BhOT3lxqc0)lu4&w&EEJO*T6%3cy$Il> zXLx;{I?wF;A|czO&@-$FJ5abH>8_2LA!Lfvc)d?ydQYCtGX|-eOLKQmX&HvrxF38I zieB3uD;6@o!#q<$K%5E0biV8P3OnU(DPc+*sB_>=OFTl(%Uo+Rpy&qIe+n=0*}wh$ z@5@iWoD7en8yaisbe!@f*x$c?IXnQgF#5b|ZEAP9!=b2&285CsA|5eAXln~^4lU(J zc`175qXn?_>ecIHW2@l+9f8)1+Z6Hp0cH${oeT`M2?Y z5A=_ZhPr3Ag`Mw@FM983 z>zDV>3yZ$^)7t;|>*efZILL^o=@3-{oX68^Y|=8iv{`e-m3tWjyK@*Ghkl%21pd=c zo+pP1NIvO1qC7w%Sw};>E4>Fi14?*GGbaLL-BKlu!%?Mcj#Gggy*P)obWffeqe5<9 zNJ}(-oeX(q_EKNzI@x{J1BI-O@ak?qHv>9>l;+Q~wH|WD_JXTT5;*`s8F^c*Cc1h{ z|DB~91KD(0v;xlPSSd{V=hvR++L>M~CxJIKy|c}(MUY}67or^w+p)G+fgQZeiQ#CF zcXZ}3kw1V_(-off!BGaIYfny&r>+8cC9Cw>+DJ#INOO}OiQfYmPm(YD!1Yw8sCb{B zL}ueH>n^%L5Z4r5YuB3p^7Fg+<8~1)? z>6F)cO(UMhC+ZAv+TlP(wf5CFfj!W8&3P+obwr0B#2Xr|b>s9wK<8SzEn9VP_*-@R zYK*DZwC4a6;L194RyZ;|W=(AV?ZLt9;5 zAFZczjiLtUTj-0kwNhB9jfY4g;AaJ;!;$--j=Y5u;3uRqj#XJE5Xii9GbB8As z!+v?x`)hkcOu4sz*mpI@&oD%-Z$eo@t09)0U80tvqt(95Vlldu@z)fS!FSU^fIc%x zNDSlfXopEtKpN8MVDvc;Moy-$jNq8CZArG>@$xC7&jcnE#A{czcsZxTh453@7m6Wz z{zhK;s{?yCTZqkd%N=G@5$t{%n-Ct#6+)M><3QhJRd53_&|Z< z_44c6$Ey`Ed;s~I#(Z|m_?_*)62_f#M7X8yo@O!2;4>T`5vL26hha` zp%&4VVPY5o2gXDwX)0Br`vj|7Hur?h-{#3kHOzUBU^+qSGSCIA*?(?7Il@T=-3T;r1qaCm7wnd$z^|N654L0SUI4NEI;fdWP^5gQ)|MZ8(w7LBK=l4T% z;rU}tnIfv%il@x>hiDH}_-^DJ4aDz>;w2 zq~4w?8Ah(wnnRuqC!vQ;&Nd!l-~dADAclsA zH{)y#7ztigKL#M8g)K`!=-NA96ptJ}9OG^hPog7W!lOP)@oT#E^x1=q^|R#_JsNKT z41k(&G-JrXsO$%Pc)w^*l%_90&!6JS^a_JXa2Q&^`1Cvvns=E#S=TJ39`KL`p$_cw z;2}RC->vI~0=ur)QBHJ#?#7QCrpL+0MYID5fPG!qjN56U!0d*ceTGdTl z*6ps}lDhy^I!ixxH4LJ4QVVoHzL!Q)Co)EU3_m@>xW=LJrS|}LQ3wu&GcXi#J_5=W z$R^)*W{7~%K|=GNqr+K-{*OPrT)umAF#P5dkxyw=Yhb&vZQ#4Rhu!;NKt0cx%%imw z)c}qDiqxu#-`v_;_V?}ce>$lOk)A_oj@D2pe)*;^4E^aoq6_Jy=rZ+A@hp9|vlp<4 zmZGQ|fdsZoBna(9*T=z9JC+`xkJMtVXWTDi2QIHpMiw5vJWMI2xEM9nhPFt!*>E31%xFPEDm_1Ekc56PYoi+?0vZ&{D5$&&XR8ZU+e!e1D<-+Wy_#{GkBamA zk2ewO!Gtj7;|BdDSPnsX@(58^FX;Y0MbNXMagS0^5HXu*|8{N-% zundb(2}Y+xgdpWn?BNzNo_@_qiSRE}Ou?8x56`u4-<>V*@?IWv&qpZ}ibfua2QnBVpxr@B$hD(7{m5(*v~wJwo%ayZ+n zyn)!5_&l(qxN;8rNWdsL2^HImRNLDZj~Sb?uSY-12~cR2QoMi>T1;rX?=cVJoF^sJ<WDK&xL@-HVmy^eGBm=&Z6`F{WXi!l&!|0tko1Kc`D@lfE#YfXdzZQY`f zbecj9MvzCUxO6)&?fUv^=C4gDWiULMVo;&32nWywR)Cf9Zq3kZLdWs0 zKKcxrbqAyXIa~5x1V{|(CLpax{>jN@3eB9B<>mBfe-U5DFY&O$f$V4Y;8>(FwQdYc z^pM8{1j%>QZ0sUuU&p(Vb{NIn92G;^)$qsf4yR%teIDS&9(a`zxe6puNDATrx-LkW z-nO<|Lo>$RlFYW!hmLP;E%TB@0_2uIe%A4FGntWJ#DBUcUIzwt(<=ebp5b|n4I?qT zE?V~qeMc|5Vh$Zy{bDbD6c}rcB6<|2XdBU%+F@{>Ki!z3U4}woBRSsL-dk>>t^M!P zMyZ_v-%^$*=anay&)wPIo~Rta23$N+Ba@8pA8s%I{=fQjMrQ4NX5MEf zCmBlZruPFB8}V(jlFsUJ(Rh2-|Mb$SoWk7!-2MIeX-(7xh??^3q^P8Xwxg3dJGy(e z8@5V7i0xjhM+iWtGaFY+8A!B+3;mN}x`mNWU+5yJsMwZHAo%H%?RbFhxl6z9E)G~y z+{l}rf=e=)9-Sjl8QpQtoR#q`kM&hA$J&y^baux0NkCWEmf6nAp-e^@COWqPjII$) z)#Qr|J8&iw(l~9E-0I%yOX!iVZz8jOZtiYp%tqsrs#%Yx#_Q7%se|@wZ0E_YU0I2}0o+D&IN<{kT{Pc2~qrE*oPOiW7{@Sm_fj0CF;3BH&41JO3 zNlkE~(lZqQ{iozSM~i1p4_KS^Sn~%H!L{RIKw0mR}8!tZj7o;=xEUWBfn=DB~% zV07=xVn!nEn3K1A>RI>yss#Pg$PVUvv)Dhzy0lP-qoiTXzk>t&@lbp81str zh!_M09rI4OImMi8%=&a2m(KdpLeM7#^>pKD-wg=FP+BhvnJL;~KzPMMRSGdESVl;9 zLB{jRldqw_%jNxt56fi=BeIWROjx`%a)^jRARV#@D#cDHAzk@!Nd4wcLkqpAhe|Cc zq#9rJm&ZvcTA;rBWjsp7T;{0A$(FOnt&0eaO6dI84rC1kjl658K-vK(ZrMH5^n6bo$qAL5eXCwx2Bj?vJmQ zH{ZR=0Nf|5@0MSF`K5UV78>*J^Na+=nCvAojbB5nyWyRuj}*r$Al+Sd^Q@gDCR|Q2 zQ<^U_YOnLQTGT|x`r<^uTVx`Gn=%rbozo2agh4-;eEFItnXZ)?E1v4l?~W(^<`#~j z1}dDZPnK=ja}N5PPPxA`igjvVUcSl0-&xL+wZH%Pw)ea5Q~U7q+~XN?`||bSI30kJ zXa=K5m*79%s*MglK?f8`P)w)CLKtJ$oWo~ zJDzrLz|wY0juIK$c)qhZ7)#?CG}N|N6Qb>afoP{tnNHBeRh2OuJ;(Nq)11$4585aYLwEG$+Og}l*&XaV{WM`x!{ZQ^x(y5pRW#q z9ZT*df?`(y4^#uI`;Yn)B#*9pqES*tH- zzr0K?I4nB3^{ghU*wwn>J3xLs@8-%$_{Sf9Se)@N^-}?Y*%8)#8B~Ux{wEXf-XE`E z{9$&dpOdC+zP57GaT8UEu4pd02p$W&Yfv+XevJlt9B|z4nzcqkLz`6x1lD``+<`Uj z?L@){tv>;9tuz47-Zn+T3M4#&gL9RIsT5VoVJTktwA{y-w_{Z7vCYd#IG|32 zD>@NDC=~8xj5vy#7u15R?J7PXL-yiQr+%8 z$AtG83^}HUF$WJrF$wUXa-4ym)7Dy4G9HVL=T8yc_x+Qr3{LmoY0R$=wwjwFi5M;~ zm4IQmDcWE}FeGG{P&;|9VfJt7ioCd(mLW$o3Qg!0QuJal7{?Fq2_xovwCFa*PN&uz zmoTO6D;T4{anE?DedlY6yML~0H0!6hZZ)Wc{87v;RH`RC@FeVYa|cQCLeUS#=$VZ{ zb*JzutF9 z;f?sz4wAiP_waBIdiW+k*xKu1z*Cf3VzlSqaq02-acl9gG5I}X!6ium#}kWpC>Gw* zAAWeXyngj+p2LeqbAU?9!?rhuaSmf@d`}YSX*rtkzj~>0&Svs>JAy6zFR$3}Q*sZN z*GF5+4}bh_xlo*SeLkr&Mr1ofISodnwHScpMVS8W@!4{f@qTl3Fn#BpJhmCIuoaD0 zBR!V-8=$(szZ-vjYJLh^W1!?qM@1F)DuzuF*icYpY}(0c{*&YfOw^%lNB?EE;<T%-%t{H`co}=LpMGQo0+o>uY za1s{f?N4MNnFLO?YNjwXtD1ayhDk4cvS=NEf-&ITdnbkc&wqNeygpPxoerQM@t25N z^t+B1on8hU&Q6nbrTy01joBREzW-^WJ&Hu1JnI4UQO*ybINo#PJ-_}kz{oaEwFk~Q zz!Oe(+}%6O@Z_8%3sR%>sX2_UPWH(Youuw#Ym;%}2{g9WMp2$lh3bbyJ^*P>l7G|j z)1;|;9DVm@*?pm)aZL|gU)>HER39YGAWDch^awEV`C~>Y;LErv)D#huPT-_151%ck zAI<^^XS2T2RE~-ZXV<9fY^Zo##-b!!aL;Wx=b98BK^1U8d^mJ-7q$ubvdRbIM1Q$Q+n8$v81PTE407S!!ZVK@7 zESr~zpJHs6E z9~PRS(i}uZx99OjRlJ^G-1XgO6YkQF8v6R$z$xF^4%c^t)9G-&_!Z+ z8zKv9i%t6`Lx76D-d&AxSER-pR^2McD8+$4jKTUf9&-goUY;&#vI$l9$nPG zo1ew!S@GKB^~NCMv3LrQNiz>}b=bprqQ=(yp3TS-H0RXYD=i1b-qe38UC4KeE z;Sy_ZO|(QxMaR6e-!JOc+^8&Q1rZf>roRP;z8E(qvcTbMOrQ}Gy9tsI`vhb>L?~gIHE^BMm?%(qn7;;IWF)Dxf z;ms&z(U{jS4wmT|9{r77n3Hp2essXe+4)+#5D1vGOiG(lx7Z3?tpgn*{R4#P0Ct<< zxV}loatfySk3oq}=zuTLoMHOChx?Q6_3G7&<%fU#Q=dPd_QEf||89Aju~27W7ZG0K z^w_2GY$vZZgDlT|aq)gRJ9#&?4bSsZMZ2aocC*K;$fU4q(q`xpK+#`ZvDRrG%n+FjTagAssCxNIOgT6*8>7PTd`jDBU+`WM8fPm$-p$d;eVh(r@^hjWB=^= zyyL*%jv5EehVX`q89+5CfG&^Pch%2mH~xNjTlgG+0cxk;M4(R{%EjsV^3zYh_I~=e z=Q}3a5w>;%=xK6tcGa^*O=70*E2Z<-zx{pr%fJ48xgy)GgZd(lgSD~yNf*xn9{^pp zL=l{blPDh^x4l~{brA&4gp~3+-qpi)=E9^T(}C+Y)#wlyJ`xGWqx~W4PvSW`QM0R6 zU+C;-8#bro+iayhJ=JYAd(t|P4bkLRZ(b%lA~V~Kfm63Er^hGD+3DN#eD*G#hsW`v z!c$`=TcgwPKsHJ56vi5DY<;!nbp1GJ@%-?9K;-oJL*Koc@k!Gh1k87HJl+J-6l>>H zW<=tGn>=^wT(~e(lM>-OehG~%%bS-6DJDusI4gohNX_#g*kRMnJ6KljWLS)jz6~Fvc;oB@B99B!uT<7U#QjG8SJf? z@nC;z+3WTCyVs*I9<=tn@HtYlMIMhjuC~~iOKM+k4NOl4@yf2C0hYPN6O zC9kUXw*~;r7*lk*G!cGL6dtSZL;G6h{{7dB<#?)W*CM=cjaTSx&}{Rgs8mq%unF@3 zk;Wwqrn^)B><>=h%`uF^Y-=U!Zpv`qfh>Uv<7c#?%-P4?U!L3P)%hq4%FB2szlM$h zB^}Z<)X)@Tt;An*{cD4n#?tkVo&a`o)?J^ctMF2Ok@Chn$0-#N0?{Gs_v+=V`2TC3 z_Sv#4KO7&=akdGY96jKn(8`u228Y)>05~DEbHp*r)8(xRt&KZYU@Q$GjHvj_k++k# z4AEOVIePQjTA&bDFHhz09*2bPsR}U zp00L(5TXt|Xo{k9Bp|*es-c@CMbl!n#3PSFox^@IU>~HqEk1wB5H_;>63J~c{ zpz+AX&Ba75Uw-$)@}k$%9D>X9)8UJ$l8?T#lcZ;ey3H0&hP^S`8N|c07HS3_29_90 zYj&S}+y$bY#)h9bTBA4UzwRMA01(@zs}H^6HQ?R0F8cKFK-6QW>!e;bhCgg)oMOLt zNMDCJB{Uki5B^f{C=$ouV?BGZb<*QdpoItkTj9fTQksu7T*qfz8PB5^J^MxXKMdeI zOCTfJdiRJx6dC0-N!v<|<;b;8)5E0s0n+mEs_Ff)i^@1eLDi!;eGMqbOqV%pcoN8S z)bjP+^>VPi*YjVFQ#18XJ?xI&>?S)I|JeklI-vs8i_YUHlHIhqqq^3mmr0y;P^$!`xlk*0X7hwCC z4sLFyw@&nal0NLodS7sh7s5V1t(@G|t<{L_991 z+tX4J*)!1!orUc%l+dFhB0{=3MNDf4 zHFVakjZp-APEW6vU*2CV@6I)(StBtRTl$2DrhV*MwcevA{SmENbKcpcYg*s&j$%-I zUV%ALmxF`-VK@yQY$vpJi1%S#8MlZv3Xle%G!0V5{zHQw1Depi| zO6xYjUGs@j$ii{9z$to>tCs_(bl3?1+5N9C*S*G?(gSN(SMux+r(kcwj*Z7& zZGZf({$YA2P&lnMT0MJg+s^rn&DbX-pP)XEkA>17Wt^pG2>gS_v zxHFc~`CT4^s0!m{jM4#j*+a!=A@gJ`{%Bz^{q4k+4&x2>Hp^t1+K-tSyEW2I) z{x<;8h&dA02Ci8v`#l2+IrBaB~wL#Yz7jix&29cAcWygKMw zRm`83AAkO7afk_nAQmQFvc2vZnXi$b-@@uC4na-`M;E7EfSobxMy!%uxS+x#-HQX-Gz zEvX#xq0!FlRd4K5Kl3GHkTYC-1X4C?eF$r*VoKZWw)afnV$qC!s zNpCT@@o`FuW4JbUX`N$?s|!bqKc7@7Z}ar{eLBdAXAc4&XVG9_V{_y{MDxbmOw!Tm zKzgWgju)TaEFhEd3}*R$#Lzwr|0(PYrG_y@-aS_HWW2Y|Hbd-uj8h)jOgZOp7yhB z;pO>NU|)LU;qv~&pM~&;R&eCEo$wY=uAgfIb&37u5y; zuWwFAFR=$|BCO31-+vb$i##@__*`))hxYvJvN3ZEox`xT{Qm&~w27XF=bK#s0000< KMNUMnLSTZ=N4m%W literal 0 HcmV?d00001 diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample2.png b/sdk/ai/azure-ai-inference/samples/async_samples/sample2.png new file mode 100644 index 0000000000000000000000000000000000000000..59d79ff28fc57923898ff20a911afde70994fc6e GIT binary patch literal 264509 zcmV(=K-s^EP)Px#1ZP1_K>z@;j|==^1poj532;bRa{vGi!2kdb!2!6DYwZ94|D{PpK~#8NRQ-9( zZ~J~1_N`~0pS|}v=g#)Mwy*8$2|QfeUXJY3W=jIR4GabYJ*axs?;{EN?SEm zQ$>{^DWPquLPKd%QB|!%)TT`kL;D9U1qBk!P)rHrg6}HA&Zwe{crum0ZWtz~O@e0p4#r{^XA<=58ow6!eH&ri$t^Y*g0v$Y)T z?=Oc3d-=Yz?CxzXJG(o%YPRQmdwzaio}S(<_jixW!^6Y!?%m_^`1r7FJuS=j*7mZU z`&{{yKAc;&wwArUz2)%uXxZ7>UEV#vTb>`EmUj>Da!p{+e?Fg{AJgW2?#cPIm4ErZ zEccJkX+M2DJrr#Hzk8Q2xo@YMr+FQ2M23X(S2i9Bn-WJEL z?VaFvupFPj^EiF&<#^h^zfB*{x#xL#xW5lov>AG)@txi6<>B^mxqN%MJms2Dw&&jk zPuuR!UfXr;cJ{6CXXupcLZJKmn_Pvq`@QB4FLHhh0@>j62(Qpv+xz(d^C5J)4ZYLc@{oQXLX-O((=eE~c5o1nAH$oc(&pgcU^zM7US7U9SdI>l%I}?A zr|+i-#AD>HG8np(lbyi4o%1y~Bf^1Ji!KFOmGWuRmHY-dyM2t)XY&egIqe zvK6^Je{r66LbuTD?c1y6_V&KAPo@ao(a~XWKgjRMZRGFr`gXaF45Ql;8soE0(bqNU zp>XV^uYg;cSH|-FFnV>8_F(_;kp9yK`RsL}DY%cqn}gi1J^UCwi_E7z*V2Kj+q>LP zZqs)9ewPOJcDI%fUY?eYk2!ZA{BG}VO0)OgdzIt+%j=KcME@Q~79;Dskwx-&czl@O zX*c*^U0g25k?EtOqsSGy2hRK*ogXg;Cr9b;dAYf|UH;D3zP{`(2g?^e|Nio+&%X+d z_LiHAx67N?7t7^E^f`3dKiFMfzIPg#*^eB&tDJ6=fwVzJZZ2+@`*D;u`=`tCY501$TN&EfiL9jChuhobA^7I<{EjU-+*{s8cK%-a zyo=r+9PBKw-g~*6y*jQOKi$7uZZ7Y#-;`HGf^Hq8@3U72%i+ms<^J6Rd+-?j34cP@ z$Mo~&!;9FT=znaDKX&3Vx{MabXGhDc4_@Z;blD3(FUps<0cxjq>*gYS4U9Xn72D}+ zKX&#mbR66JZn?U;jzGnh#2y`=9xX@NA0OXEzAsCc@-^~KhuHGk!ZNtZ_%d6!m$6mgAQQ5(_?QJ$J_9gwWBJ}Cx z>^ye|@l8XNfGN7u^@TN=ak z^t2t>zOP+a?fg6X9A~r}_|s&0z^Bn0I`kBM#_PS^{pIAvabP}LE?-}UZm|u4kqykH zX?qF%E`!fW+PDp`-(FmYUI)vk zBDV*zV+Thk;os$QdHK=u!3XaL_w(HQCNKt;@D@$(V{^zE_|WApxbH=dF!AR4DnQ{i zTNt`V2A<+8c4HUGTJ(u=iSXqjRtA%*P`*b92s5L77-6UEh{ajgz!&LpG~=uY2BR~d zWnPTrPKdglHvI;aHaI-ay^0V+h#3!IK4*oHbx7G60z_k&pA&UFJvzo%j`4cRF$A@{ zXE*&FQ=%Xn-N^A6lL!*L_5ueZsDGb`h+&Lk>S{SoDjB46y4e7;wk9Y~_)*i|f#h!9(Zr#&z%uop!>bgE;DwC?usD-fAbjeY}su z9a_S1+768(SZn*7#Wvutd-Ng*oZ4Avg_p~EG^j#~iX?vk%+9{oi6QzE)Tt|7YVt_e3M$~bJ z?mn=&@g#hUj>fn==DM4}`mQ(u6D=9egM;JVQ`{o|l$e9sOIr*i4pxQ&n~fnD-i+a9 zbje(Utpr*e|Hv|311p)E{%mO7ATfQCsr~&?^x-f1NB=_4X908Ch1qTz5b*A7?N+Yv zkgSle!|pI-4A{YoxYtTk6^2c_?^A92^{psZ#D!RJ)Y|i6YvS}62U}g zu9v_6;nniy>aM}ei<9H%C}YmR3ZnBD9U;%`z%SJiGI^PxfDsgQJjY?6962J(=^%I| zkP2_vDZCgscVmD<=Nvl_KqNR5{OpBB>!kMzkkq=<|PP@f-$p4;h@=&UgGvi1_W2n{rh zCivmAbSJC5Ztk-?2JdYFM|h7858iNF$c9|xY{ z>G5#_obpmK#}A@A+@rG_8lok7v#A$v-Xw^<3XKk`%dv)`%g8IZ>E2^Ln`}oQR#}eO z&Gqbe^ezCBQ*Dw>Fi*;hu;g#61cVW$u25MmZHli!mY~BE&H) zBJeOF?Qqx#AVQ@0Te)`#DUu0rX|sx*cHAp#e~gk*luC7}8&o6Y4{=B~=1tBiA&Nb& z0q2+z5xf+KaTJ*yrqA6Frj9v?3RL!UyoM^u&nelSIrc<$?=S%+qTm?tobY!mQD(lT zjevuA6)Ztc+7}(A54hV31LlDX?KsNdc>dz3!n(J|Va729ubhqC3rfZ?5Vp3D7GWf3 z#gMqTj;inwummNcBUBLbCre??!`uOa<=|_7iM(H-&Ck73y+YH-bf|?+t8o@|vC{53n zHg*&2Ht-1F-X`n5jFUY|*8lSTWT^>)pCc>pV$^RExZd5;;w8>~e>sbO?56*+Eb#7Z zbU&IjaFYGPo5Q(ChC+8AV$5@Y)+~EPawzG-!UiPkSvd|<$ zANgxQ6FI-*y!D|k@*Nb|PB5*B6QVt~s;c@Ln-KeO8z;x%362CXNTaHYw%SAg27u@l z7jWZ`RT1LJj752j82sG}Am`yceQof~dFR}3ctd-|N2fVnBjQTJRUX8+FibTP zz@vQZz!+>nSE-VN&T;Vi!Q*DM*|sB)4EvNji9MK=^>c6FQvRB5wT%~nJD(H){~1DX zb6C^AzBndrOm%@lZh0w2u}YFZMhhH;FR(x61bK0G3`JNUJZgmU9j{J~c9zqVokYg) z7`#S7B6l%n&E(Ujdz_oVVqluKC&Sc7I}M!~;G4ijSz7jrEaQ{1^+SRV3XXqhdKZV^ z;e%}8zPY=p5mut6l*+tdKqGn}v#4VT3~<2fN^f{`kn00hVI$ji)bKYVQUNRazqt>fe41_R*myQt=qEaU4zoElm>M$Q_9=AvCeXTHCEdr|mDSL3kK z?=D^0PXN%sCwMFIrf(Pcq#I!2aCXx^_|fyjH`lD~^1%n^%lUi9F~kXo(&tThb@A44 zTWFbUkIxf8g@*cD`GjWu%pbae?PK{0PgDZAy2p3H3C_u8IU={+B$Iv&JO{b%C~#Nz zYP=b3!TmTp#<-3vJH&1bhh0ZT(}y6>wMy@TRJx{pc-~xI$9N|Q2%UHL1kpRqIt71~ z)6)bL`V8*M>Fy!xFv7dq%-9SWpy0{zae8PuK`RckZQU zt<2@Oer#mMwd^U~-irftjLosr-P(ZAl|vz0wP%OHB{BjQ^vQ*3JA4n{NQteJ3>(I*Y>Q@3Z_>_Jv7nEQAMj-cc zv+?%BICoK}vP^|4-#eC>$O+*9%-ARi0w#Txet@7Xgo?9fR54hmzqEwnQzj*IMQHsX zKfz$Y{m@TLUO!vn$d?+Z`~%ZOZr}^zAu3@_>t(j%&4j=lgM~24bPNnZf55~D3LO!V zRsjOXyAT`<`Yq$r?so7wigKQw?l%iSD9Thu1b@U8EQp$qPY#xYIHsp4N~oKi@d+a_ zBvB#9WSoRDg{Xy9)Q65yt+oL{LWwS%gH0wRqX@R8M-PJ|*R{G30mxl}JpoHDkbSw3 z@-m`!S@Lf9Izg5IEBB^9ijG!ks!_6jb3u4K!wCB9n4bQ#kD&`Ma}i@#BM_N6I6N-R zDCARw{^>sOWH+AxebF_I2Y7rFc?L#!IhHMFizfv^W{D?R6qIn#jC?b>2)#;$Hrj4z z+aRd&Dj1A&CR=H*6{^S%MbJGuKi-dFJcwMl=j!@8${gpE?PO!ff_CtjAP_AcLJMP@ za6dkx1A+?1V=Fpm4A_loHeivPmKp_)0pQ3tZQ%x3$c5QecC#V+~;UNVui z6WLK@9Uqmu1+dDOx%lIL9$R`&29pfERiEh7>x&q;IH(uVkq_QCHhU6Slq%yylhvD_ zqSCS#xojpFn$R*ryt|!%lRTi$!#Fn^sB3qke{eHDh0L6qPY}mIUpeWwZ|}+@j%MVM zuE!z79*|QwuWh;xjozi7i;LBv#$g2iv-1Sz(O>ATMfzikhJ=@o!>Rep=|@E$b|N;F0MEF2F8s{L};6AkSi6t zoV9moF}x@+xdAQ6L+zaVg0yznvFg9yp$`X+hGZ43t}i3kq2Ve!`f!(i$*DfjVLN<& z&b<%$9Wffc%Fb@nOEg61fjRdC7WP)@{ay5pVMiCA08`cI>+EJs&*5Q;PMT{0RDZQvD0<4N-%gj_xryfv?r! zwBav3#PP@8#8z~^R(7T}=oQZc)Ts$T79X!1pdGvpxaF#Lk5opLAgJdY;eN2$X;yK{cYL{J!*q;r@MR6 zN;H6p01vVo8)h^|Af}Nzp8zB(gP2k)LU1Z5NhvT9b0(?~!6}2rK?G66o`{1}oGiw< zW?uP2lsY0zw)@Cu6(Q17Bkj}4&3VYnU?5edWQPwUwq~k=jw)ELeSQ+zrjIs||G|@f z?fULH@{vZ|x3?3Aopuh70%wQ^#>qxmjS#lKDS^zvb)tXKVZlrLfhp(pV{94THo{!S z)EGp_(hjb^Rpbv$nz&SJhW}{sC9okNR+HR(m?={AuFZ!>UCj3k5TnrC1_j|^+ zON|PZjiS_X=32W(;o!SK$|2)TgnvH{@96ZT1`u@M8ocd+Ggc4ZZ1C)P{gWq7$6wkm zj5(&_au}W(JJwG#1&R5Rw$wfRf=A?4 zkg95d9!l_rRhUM!B;)G_HbYe}&d#F5lZBv}xfF&P^nps+g%6nV0*+uR-`vo%x!1L6 z?=YDkry7oxwqu}&9z^0Wj_lZc6^*+$$M96Y95Wu(5R={XF>*v#qXVJG7&F`O5guzW zRQb_oGnY*ud^q~p3Qw*Z2Q)^o_opRY?CzO>XQ*zsLrbsUT;=ke1i>fE$=PYzpGiW- z$M2Qq!@s@#txjNLpvZ+lKI|{wz&CO+beMTB1B>hi{qAmW3m1C1_U`7sPWtlZvDK5q zIHCPL_i2NUG0!DjI~?lG^+V@Oz=cs4xSyO#YKEpOt*RyTOI!3YmkTtm5*VQQ zL4vP6JjP(Iq z;$nwK?qq{_G%^oA-^;`FOBRO;eu6Zt%_#wj%0cnnT%q44Kn=9gR(5(`S<8>uW1v3D~B9X>`E zz-?Hz^Qe+YkXibHsC2qXL2lOnO#<7iD^)l42JezM8PQZDx!gi_r~D*yC!9JZ$fC|Tj$}hh83cFRJ-MckL@)_g1t*+QdNFR>XtBbU z^ATKGjO!VcqY%T80{FY7^G;j~tngqkA&%it?Rd^~QKF!cEKK>uSe4I?>kz~Uq|Ejt zj=PE(<$5Cc**ChBCId;n93~n)j)9Q<7^j<)!!!-npusK5XeMdLPO_P3MnMsrQr5_& zLFiO4%{rynxmGz$n=*yF>o}zVU#138_fc$PXu&*q2`Jvx*np1_b1fV?CYM7LMOg|D zjX$r(L%aC%$>wUi|2Y!$PX>3G`wuy(F~Tl+GDaH3(*~gNi!<~cP609fqZ)j3i)#XF zg*Cs88Sf{%I8u2Dyv|XqfgnaJd}*XDQq6Y?p=7oiMAwIC%?>yr#~mR%ov#r2GX^QU zb<89yEh`f$&C9mX@;1s#=BiwUH32|yJqXT?*t2!ApiDX)uf~M3!v&7`LjKU43^&`! zeX1sSZ4B->@C!7wU&8^*oU06^J^I+PZukec>SW--E0v*}t0@(e+g2yCYkOaY6#Nkd z9&`y_a81+weitWkkl;)(On>MwItbv#@J3%F2aGJZYBbX?XLpcHS2gAG;&%D)^<|Bg z;j8lmUt8PBf&<6>?R95$%P#O=P)S+D@#WUbE%D8EzXM)(}6AKC8B%*<+63QEe7_ z;!$+c@jB(~4Jz=!&+rm{bc`Kk|Iqa5UQl3yXPju{wM0%^X55@pSpro?}Bhwh~N%q{9?; zw0<&K<`l5z5-4kVTME|C;u1l6}LO{G+4mu5Yd?bCthbJ9099wevxO<4nGb zpN*_YexA*3aL+*l2aZrg#kq(0A*7osa5p7jL0ubhR(gg+w za*y?J_- zAdFR%iGj7lV`dQRN1JwzJtzPs0#aejIc>n1K%Mk4x5*6W9*O{-amJ46$5`l|2}$XD z%Cs05rHzmqmlG5LUE3{Br@y-xf^kSFjg$kI4NV7^iFj)avM~a;F`PbY4011L0-m&9 z!DEaG7~yg(CL;K6=0gyW^dWdqny=ypzD@Jl#?ZL-lig8xjsrmNme|NljE-m#?POSe z@*&^nF~2d~CemRLgVAA;fC|mwLueRSeHU=-?~}tgkOWnQDSJWlA3(C*3tB>3j;5Ja z`8Opx!fv9D8Lf=MWTP9|Oke0CgOs(perEnBvy>^zJk!NpjLckmV?c4Mk?$KT(V0Qo{@cYv)$er zhhzw&AGv>E*eZ>&<0ek_GTD~7I-h!V6dL7taODh~yT2!E3A7KkJF!kcfu72dbfUOs z9~dVX97D$_+UQX9Cz;h&U^`BLFx9VQhq>G6JON$<1{$H2Ia#srbHjE=H@CjCVZ zB8TNiXy#h<6b#_2AXBgb&VZNuqZgb+b_ox73qQJqmz<1zU?%j*%sdjOJUn)oys?cO zm21;@`(xYFeA|b+fW&5AMk{YB5118N5)CrK?vD*}XZlV%DkAu4(rFBGVIp50>H%2p zg}2X{Z0a}o1C#r;UwWif2p99QcleFhhREoa`d2`Y(HlLZEOZSBj)qh~N>!H3qXK+Fa)r;}wY1XMzKlvbl(R zg@rK`#d}olAP%<er?U!)HUfiwjt|U7@H>Pkm>WjsFPF9* z%;b#B)Fm*hp=H$d$B^C_uam{NcXOW!9Gn2AEnC4aZM$a`!2Gv7V~*ysajK3JZn$#% zHCoyv<-tY7pCGo1BB*HQi_GCO0Vpk_Ah7OuSngM58es=p0)?DcJt4&FCR)~)@iODX zB6{UnlSNEAGI7VmlxNw22%3Yhu?`Ia8+=96W-BWrmj~%%Dl>jhkZ4T&AP)XmPzcw+ zYb=%gsdy;$-CkZV_sLpY4as@NQmM}PxNKZIg{_Y)88?;TkE6v8v^dIn{ZeGSr^MDQ z$gRswjOObYaZ&sl%#kOKOB>fpv%%{+x-g|ja<&O1LL;)uz?<1FD5x_E4eNLUb7d|x z)+PsR_Ao<6P6@|T+H7?ze;%>i+ds@T_%<`W!76Ab+mX!BHAY0Aj9utRi(E5?LdKew z9L&}5BR{ev*@`DA2+(9%(XGH#-H#(N;jr^Xu(Wzoawk(VlURV103$TNO(y&pzP}8t z@4YYq^)T1TWaivt9&rev`SIyV=|#o_CFE9yo0iKrkKUQTKqm=)D{uL`PdoZLNw9PN z-dXyH+~?XiZ*G^1=+>c$bqP-BegnSX-S)Dp9+Fq*T3v~(C^L6{MKWM=_2Ol0$?;KX zRag(s!uzoq?lWmk<}6shR+f!hQ3mx4d1Z6^An-Ew<9ygabggmCzh-H4kW34nb8xD7 zxukH1PxP*`C_vBe$fl=@wA+cRxqlouTfDJhp)+0W_b^A5LN*9C?FgP~a4QwLR?9ip zM;!x$GweZ{Yx_thV5!|md-)bQ;>1;!*yKs-*hm3Foprts-ct#KKiluIPXTJa=KLHN zJkSR_T)<3sJKP(58z_gKhNVpO1ji>E`#7}4JG?9%BKK@f?RI$Nj-jLKEgA;^M|+LH zb%Jr08r(VPpPPDb5Gt>194;JXGyX81XFiowueN}Xw#WH#dn-6*T9qTttfU~Y(r7{7#ogs0!! z`w+~;gb@r!oDV7lC+!}^xOL(~jHm_(I^~ho5XgfgjPN|=2=ZOB5X$*3 z3?NJ`5$m^vRwj>g&oKg6;}AL?9n0QD=t`{UJVwwbxTf-vev*-hh=Gq;49hrD4g_;2 zyY84`E~0cDm*U(5yX(P%Ztl}Rq2z!u%sr!|lpzXMMG>?lLJUZpxs771G@mFpCvzKN zY8ep{Mf#II!NJkl2Ijz<_9r8qz{u8;b`-AP(K!Z|@`@N46ZbY7%e~qF5``T)r~UBl zF28SMq#3djI2&4$3qes0X}T~U03WqO(KtC}U1U&(yuLI(r@R|Qk1WcfWep-=a;36h zXXu;xgq|443Bk1ydvF)6J7zQXE^{@Uaq%{N#Ccb)f)BpH-7~tk#s%xtz+v3pq&T#S zEJhZ=50eXyeX@%1J~(ud zAastiG>hGsa>T4Obe(%^49S5=KmCG1`=wL(JZcx?x<3XU-ZDJ~jZ^IyabVVN1Ha(7 zdz3&jWW{q6=t$SAILX6P1~Kvw-i_=>rh}t)JvwJ`4{K})aGH_i4{Z3-c{VX*HgsI8 zF3!nj2(n=?LnOq~q%;eMY%LGVF()2dAm~vcfD;^ZGI&j<*&pePI0_DDGV0Uv;_PI3 z_5P~_0`8L`C%ekkw_OkhUl6EZ<&#rjL*i({dyk+t5D2|o7EnLtKiI23Il<}9P#RsC z3W)1%&DQhDPIUYnB?k6pwAv7yC5!%C+qJ_=GiB1v@jdv%^u585P7Cu4SjD?4RozG#}-2gEv`?uIk5- zn)x(%p&xsB+lh7@d}JlxTXN3zD?J0r$bH&DpO&Z#Q*hGGrag$xP(z$!_QILHu;*I( z*R?h_GUz5eWGm3B*XEkaZuns$riFSu3hW*e2P>QDbhV$qbaTwaT|3>uM^B|-54lH+ zmf3ThjVPbGyx|_BeXoUXO4j5R#RyJ05;oTvTTvr>vIX7*E?)yQ=GqPoFz{M0y z@*5$PUqso)_%H_(5k_Z946+0caEQtQ5&RLvz>?#T4*=~|?_n01-#7+@uXxWt$zKiD zjp#bJt}9%D4pR_7)}yS6*tlJTnR^|V(K7%tO*2)&DU(&s;$WL8!uya%-2d_M4&+{0dl(b7ttX3XL&#@Xft30{N_i!M9Dn9%mtJgSd0! z(BS;>9(prO6H%wn+)qh3;KES)aRkW-J9ZUC4~+1Zp?QP^C=Y4Z>UDnORzU6MIso+X zr0`4drjj%z;RrY<6z96)*0HYa>%G2HrhuL{C-C67{1y{Uz!h9$#2(|&DX>iW<~A9+ zV;o)ZBsXxsjKjFPyv?!5KjTd&l6{>VpQOF10B{c1x1KlN%wlilO3)XlmH?6=75Gq6 zvi&X@dbnIT@C05hZ3btuNq)f~i#NA{?6^vXoWp@tY1AWl<1lKxbN=e;c6pm%M+va= zRH`eFfyr~`f)n|>49z|F zX^0PAoi4{G2_TzM4L@Z__6dLjVP$C?h5(oYoKR`D!kmpsYUUd_*f_H3{#Fs?37QK2 z`XMvURX5XSu&v`0Adv~>XQm>jQ7V4|wR{Ub1y&O%1aEQ~N8MFMx8sm=T%c6FSYeAz zb{}fDEq%uZ^sy?qum?F-+(K)%0IY^=97lker@{6_c0#wV$QV4+QT4DvK;)uXZ?5r_ zgBj!cSEVwMwUgq47o5qnDrved-&BB|L%+2TtKdy=Ho8n6l+tZo#S>rB#_)*jMu(!; za35Z=hmmFYMpwxdyg#<#xyF4Rb`zxJb8NDJEA6B$m5J)Mc0xmR=KN*YY<{0AOMC1t z8FZ~7pa!_n$J&Ejui}iBJp ztJZ6q%!I4(1Ovti`U32-Z5(AB4ClTVL0reIWi-k%5wr;U9S5II2v!cJ!9+M=2xUSI z2o%A5x|cE_lC8pmA?bK+pVMH}F|8Z^gyyr1<`~gV^axy(QM(+X5~A!%z4fanIUVB7vU@yRwUVsjKne%z9Oco7&XCxkn+mrb@LpRIx&xKOir z=34{5%GDTy9`5JJ5oJOf)g6z!-CoB~+@kq@t2D&$`ueu3Vo`MMjUp!ij`K2+aINxm zcwu7*lp)cLLu(ezAViSC0x$iqFqW5SU3dkK6O_5OH1D+ocZ0n2{ctyrPvxfe0|$7^ z)=kQ}7En1Cd2|k4PLlb|WFBMG=oT5#K>tS%*?NMr@TK4BP2?905$VV{J^4hAwCg_q z=-|ouQ38VfWVmP{hc7iV_My}`Sob(-d^+z|rj;8b) zZ>NGV_go}Pmi4;UTmzq@Te&_oxw}u05dEB?*XfIl*haVV8M1TY{Y{3m*Oun&&=u`R zw(z+3Z~F7BKUl1chX#C!1D#ip#?fk}nR{6wd@>=IeI`5jKXY?pLj#QSgg}L2s!4nS zehr-Hm~Uw__CNg8w$A!H-^UkEMRx*2G#Ojlz$NgaugC20yH$7cmp1cz7|BpI2)TON&(nX%F|oof&q5fJKQ1R|tL{uIDbrl$#Qt`Vgx zF><&)jsO9(NJqE`e>2?BCv52jkT?htWZbf_$&47cC|npPqcna=c(jT@f)9cS&N4q| z2!M>(tmcz2)ibfqV;F(p$S9EOZJeAE{yZ;?7P-Gc$q1C}XMN0M(2sy+Z-yjL2+rNp zuHEFJJ_Zv$_^ffj8xyhupJ>oHTQ2N`5eB^kj6sDDhMUONF<9;S%eg9VaI3LPf9n#L z3?`*4ukt;&0J6dc9-QeC#<^D%KaU-SPVPw?Gwv9J6_{j6gnEVw1Wb%jV8+v&=j44? zIq5U7X`jM^lN@#YG?}-BLIhR&Z?2{6RSd^NAGnR8 zEG?(aqc1TA;mPZ-UoIDKl;@t8voos=n)NN4iLp%}Sf-^C>+tRot|}sQ69Z>7$_u zMw_lovp!?IBm1YxM&18-pTHw}S((qD&AJmJ`vxY#5tNo$?QjbL36Gz*LW6wvdq^=A zwERq8og|dD(cr@mUoY2z!!6C?`M z!{20*^p(LPKwGAy2Re2^U_NEv$W~}^dz}nEPM)Jv$vY136I~9-gF@vj-9?53{Xd9z-W9mRN+9&rqK_Mr#3IVAr^zayY@1w#ip)KsrX3$>_;1xTb1HH`xm`6hP1}Z5y)S zparZdY-0_w6+y6I9Oq7_1!}GzA4R8D{l)t<-ga|7!WSW(EMkF4b7rIB8mK^x^G<)~nl^8+iUwc@}iDUC*$ZG^0oLvo(-tgvWvW{KuU5Cz8sF;j-A zQl>50Fb7|n!c9N*LnFkK6-;|IklAWP@(;{16Xoqb){?d>xG`MHKwz2sCMvZRE=rkW z-~ryp{K&oQ1dbXjAY`XB6ow=0Gr(zsfQdHi;Da`t@ITEI2LO6*`8fRZNZjeExg20h zli$$BmzT*3jG5~4^+#9B+qXAy#$aIrKpwlPFcW@Qc&eUTK!UsZv!L##zBD zFk+Q)O*?*I83QVKlubBLc}e@)n|9E{PzRl%A9Jj{$@fk?-P~&!f-DA^{ARpE#~7ss zx9FE^@VxJsFbR!;3=B0w;RBrmFGoOY(bWP@+Pr*yy}Z5<%QA!=_+&}TQP_Yz=CJ!0T1#g_~>VL_jRWFj&tVb#O8F&IYu~r zdaSbatL=(&X|N}wga%*yX!qvwq8ZoeNiw#`96G2F^@US`&G$%Z?zi?p+KZm6 z8mdU$M2?;lWRo-U$G{V4l?@KtTpz3!xT0}0w(!@}H4Hq*hOQ_9SF|^TX1F7_wXIcW z9gwNAMJkj1zN1Pqa>3!5Tuk3g*pngFQA9u30gjXIAS@bkLhvBtwzcAC0xh1b^u{|o zJ6)bfx6lx6fIbP};14H`2EA+d-o^POGS&`R6ZCTEwfVW8%-8nk&pS8ZfgWHB>e-@O z!5eM(9h2F>*oqdXMW=Edx~Z7iC-|NxLI#haZT|wtz!|{Zn8v}%4$-FunK?H46=&Qw z*lE5mfk*JMgftkd+o9i!7cbI(cv)RmeLVh{2D0tL1M{{<$Ht!Q^(8Xk-`W`(3w&q6 zr{lmMnahr!+Q9e*4t;T$>nGSFiz*li`w?pRz}}_Z>TB*r2fhZMb9vhA$zi$+2nPZy zF?dtXS}w;~O4>oyd5E{plyVKLWn+r zZU(nUc`(Pu$OMpHOSr*34(^~vv<@=&5wJ#uwNMjrGI9jIL`Iezqi}sZCJbjz6 z)`V+d8M=avBQXoPf>tF(n+@%X5a>4j=gayGsIAK1bE4-kJc78p9|On;u7Z@d7@TJJ zp&uf^wT?RHx~N11M>}v~G#E|8ANn6B9V4|7ek(Oum_n92!y%M3{0%N(XDp0?3c3K_ zc@7Mh3_e+&Oq?K7%>GLoX0qRT)JiG3$~}&ZT!2lmVIq%As+mN99huv!pnC(=L!3t+ zSxL9n9rp%1FdxTRnT%<&5PFmI%2D`QLm64ODxp4QX0klIkj))Nkww~Mij3WIY8xe7 zD@X$5GO2WpOyoCtsBww{iy@D+k*hiz8eF+ncGv(q8=0#U&Td1?&QS?nWVcMG4IFrL z8k~+2ppL95QR5%2@eFg0=usTAHgOe}$-LhzCq6%V3O|&>W z@i<~=om=%mHl0_Zj|5Ei&KQCdywS}*-UA=Bi;mu3hp*wa-{o!KSWDe_Cct9M5D&hB zHTS6~So+no@0O@_oxZ@zC{DG?5K7=r+wNs!OctgehGxk4x_-EcO!`d&&8+YVEPWg- z`(yMgjijm2DCoH7A@_`fpi2QZxBFzk=?jR-^;F6NSAKV%MXs6b-kaH015ba`M1Hf0 zR)v(jFq`ft@8qG6a3v@p235 zPsoOlGuu*TEqXL&ISznvG`3ma7hv739lPjV_U>p)NR^bQ4NcMm7Rm_&?vJ>k-xt#k`* z(S`zdEXMKd@USta&W|ufcmfl68341jRUvGI4^jG9SK#ZIe`pFdP3d#p(`|MXjIstk zLdlu#w=a=jj6>QJl&rJ?!$h_EiUNkNf~N**X^YdGg(f)R&^H3)6A2q^WD|`WpM)zX zZ@A?yIF}E2muqIyU4k>tIql2f`@D7x8GeAB!4x2w#cUCmDKnG5?3KOni}4n0B-mm+ zM&3q-Ma+Up!KXFEA&mw~vo0gY%JRpjV`PkHGx`LClc=mmzD`b$G}#&KkD--}M3BLd zdWxg-KmrTASDtf#^ucekr~yh?*$*AGUs$u7+*+fZHjPEPhCImn(ru69OM~OU={~cE zZ$ppD2!pDh97lt*1S6-X2c_>#3@pCbQAXPwrV_ra1E0}y309dgo=_Q)+f z^k1W#KFBBgVc2bFGZY4{+q6xW(9R2GtliOxSOG!%Es;i+*aOEHeeJoYE2BMa*SQ;( zV2?b0OI7%deNq~RP1M)Pc%a}&a}AgYD7(Z+A(rRuSL0A+ zOQQ_RL~q#wgZse2VHd`>oMb{aS>nNQ1nelnnEtww(IA&C zf*YJn!qYD&3~@NPF^cFWlbwLDb132<$wXHS%g*t4 z{FHOgecBMvuv27AAKquJbgd2d`cfJOUU^|x?J_=@w$R**mQIs7FoK2(s!nMy>`A-P ziQLCjbd5S3nsc#@?o1 z4qE{H^c)`GlgA75w+0b?LL%@g$=bTkL*%LY&6acCX>vP3Smi6hHqG~h5gWrs)|qRJ zF!*Gno}=$uk{32T{R9T>2$1N)J1>__?SEt#HtqT||b-{1|;$sd}rGmt@pxtHvQ2GIq9 zu_2Un+_JpI$lwia^5Xi4W$>L0pUkG!(!@06~8JI5w{xfwhr?Di>{z=e$Z(H7bF zI%-LpY@YA(t`tENTwU)vIzvb358rWfd0D+;$|B1&MpcKGw)$95t}FA?X8*v4AZcHp z#dvc)92xWDV+!eNr}}?k#sEhdjH595Fb=-Uq-JZR=xmuqcA^C7pTV$c1+$bgZ8Bv2 z&IA_OVhm@PW|9>l{kUInrY#M~(g!zmlwo0}QXPXlbnRYYo=jCM90&TGoLR?wCRAw< zZQ;Zzq7z|gc|PaxO2Fm~Pc=|+?Ap*DWt1_~VAr*B5fEDii1C0wIt>L0*OK7vv2wxy zlr}LmE`$%<8PPQwg*5u;%TMKhyw)6;C?zFm=AL`ur%x3Q3&XU+5m<{$j(08VoRh8K zxwoOgo1Ampss@gG(S}!zj@-4o4IEK#hM%kkHW{n2UrKfq!_Lv6yX;@Z2rP^;MMGP` z7Ft-{4M)g+@kl=$tNv|di1VSy`UgM5%ef0uRM9jpKzj=8tgXq4)`_s-Oq%VcLI6lW zx4&b4iZXnc6W!=Qpf8*pL+7fb{SI}Ycj!11UZWdiXw$EqVTMEXEjTl}f^DVO%gd|f zHn4e<7MlQY@{GQnG?tzI8kcWyhauTQh0q}Vab_qT`0;+OA?rC7`rr#a7SIZKERM4m zM*w$~7Nyycayd@lb)w`nZE8#Kgcsncz1Z}jY4a$Iq3>Y3Cfs#>1Cna*Xo8miG1zQ6cR|W!{ zl-Gbu|1*(#Y!mqaABRNmn7Y-OwBnn-oaZIFtZ(}@pFuTJMebmR8p5lgrDSUCAh_8A z@Y+FX?A}nT@$FB2`u*kX!$+s@~@fmhJy zBnR#k`{yzV-szcOBdsL@!3w(Kda}%U))wbl4ti{@fQv&H2%_Tz{PXFxzq0@iy87Lf zH6shCCRs2fbaP`EoW9L9-pc9aMk)d}GOA*QU+_^`SADLoY;=E3+Ef|XX~7fwXXvRA z1D3bHzP`%MVZuh?f>{OTV4pCD$Z1jbTR=GAoZA-dZ4`E#Fk^=ade0pZie~LW6tav~ zGd#cbgE1UHnSxMlsz%WI^6a+ZfI7DrwmSWNnVLyX#*#3FfO^(lX(%vMI3w8Qa{g_U z4WZX@W4|%a<3z_%Xk*khHcsRw0zM_5Fgw>-q{HJf_?lYdk5- zbFyShOrov;Ao7d`=}%BV9v#DTM!>OVG6I7bl1>y$H)d~Jk*$su{Lwu!Ae(^)Wp9uX z{6mUK>%!5aCDGwTH@n8-JftYYt_vsSJEi zkH*Hy;?R{G)R@6HeP6}cFfg8a@P;uwX@(rzU?ng8&I*$=oKyKqTjbtXiA;Vzhaa}%1f^^Z8{+Mbo)lo~_0hlrtwI;SQLfdn$Vk`r&vz9Sb3V`#f#?Pl z(GT4m8{4=E4x*#@)}S#C%ryeXo9oNa&4gdJ4P;~hi~?^k%GTKjNn8y=`WidS_k=0E zR=?!LopwPAxO+Z3{lWVlBh=|bLdsfnE=sOpTlg;hsmOTJ;f}3{eD}$%(0QE`CHPbQ zIz4;Qii%QwyURJ?quW$uBl8iq+W9a*ka`*Wfv-ow4IYLLN2g-j6L1Q&KKlAM(iR#V z2b%=4x7XE8vfz5mb zD2T?$V8k#~`3-O~uTC=C2!x9#QX#~QPU$&J@H>byy3P~O1_?1b-8R<&xIdME(k6yg zIdgBFY~bY7jm3x%=M7wpZ5%-EW7tGJYoC~0!tffoLcf$-+tdrwJsxy;DEeiTz!v4m zefk>%*()P-4Dw0g(N?dGW2*tkweIVI84|GDDzIsIg{$~gP~b4I4NNn`@~CA!3X&+F(<%%b`ebW+#U;D}du|ks zcdV48gi`2*-|3}z=syiaNk_mq_JEyZ0ta_Y@TQa;k=IMk8!P6_Wm_{Jpp)yG;els( zMV|7dO44$6`mO=Waf%4X@+a3*ly;c*;lU|e=!ioh3+b&ixQonP$w(jXa^1%9m`4y2 zT)e%B%;Y%!giz&q+SC?0DWTrRP+FkpdFwGUaIhRk_5xI{oAEaj?ELPkiZQ|+{>gC; ziID;!{&nbp@eZ3TbvgLW{0?ZS;wX1xP4BO;223_c!qQV~6(69GW~_;TJ!JrhuI z%6-2=zLSS$so`z)IQ`eKh0JKLLW5%f7`e|+5|e)o+O!eEG((P4GvV=#U?&0Sab%0Z z8;9nJ3VltGtIR2r&}Q2vW1)f1saW7Qd1XAu5W;nJE;B(l4u+ApBrOA-Yj17@@w2dv zGC8NkQPVN)GkmQ&q|ecb&@S+!fpOETt0|=(pD39dU!E~nunU@;>u|*?U+56{1b8Ow zGN29q0?tzmt-y|c+4^E4{pJ8XXYyFujzcl$h=Y@@cgbqHkqhyv^CE&n(&3qUwuc@G zhEu@@?NkovROd{Eu4IkFnW_bQO18{TGJ~C+b7x=(yzmgD>;L1K_7OZbK>rdIPvXGE zuok-49>wmEk$l<6%E;%<)pgz4!R)Z#`Fao)jz0_ha zMsOR%J+pXq`L+T)%X*3i(_jE?X3Q9*agK~L4m?F7mk)615xleT2_Ob&L>NQj(DW(W zX%-pb!0?Dfj5Q@7gzNV(FyN6X=U&3LPKMJjyo}j4Gu-^S58CiEUg%Lgk5i=8+%O8! z^~M7qMXEE)^`b`sNXtcmVI8V)ed(5KMgQ<&Od83>XelGh!aY9@#_}>aX|Iv*hPQ1e zxHq#w^B5*OrR)|IDTXpt>5@J>0V-|a>AQz0$?anWTHSziSDMFXLblQj>;%~O_0iF8 zvq^7wV&J8+jRiB(9@8Qt zXh{|-!;w!8#LG;AcJPif%aaGybH*78(Rj zvvWHi055`rYx`mi8~UfQy~u~jP7T}wV`V?V?L-L;UY^vfwlc}^J17BfP1t@&63GUzKxLdWD#7h=0859`FNSkz| zGtZ90oy{2sKXW+nxmDiu*?x14pi#hrey%aD4M*+!AAR9Kw%~zweVPi)X@DQ0CwvOs zbdNO@O5fm$pAA5^5@guvz@ESa4@2u*r;^igdwn&S$#1qJ?W#th3EHz=WU6wMPl448 z*+k!Oyv)Y%6908j{Kyy_o76;~OoFv$hslHKx_swfJA{{W96x_Oeq-dL)tHUk&k*8q zX|AHSji5S$NVA0i4R`<%5j2nynG7P6ax>;>4S|OdkQ;()N5`wsklv1$APl2H5AwT= zAuI$ceQ`z%ZC{WdW(VIQ(Nbl8_Zj$n*0~dC{p1S5&}gRZ>UR{a2BUotMp@YyN)E2V zyAdi(tW!s?{8uKWP4IJcok$a4o-DfwLqyGnNo76;#7H^3?i>pyXm@7+j}p~5#K<#T zCXVF5(2?_x5()^$`3~HKu%+qThxX`eg9muEixf&T^&bvSx4V_^tK@1TtFd4Jg5Of?|R#V<4KzMR{&666u3Ic#&HM z3!jV^pPird+8l~JM@d3{w5!7j4jc)C-A5B+Os;McEEsnU?q|oxb;RV>s(8k^DQmi~ z6V?ZM>wC?H%(Z}@Wo~CxyeaWACd5=J5OY+c(7%~z_)cyF++brAeTT<7Zf;5j||xk({%P?8U|HaueQ? zEl-@tuF0z5vHSb$<@)lv_ZzC9yFlfHYY1~39aqPr;0r2)efbCt^u{}G$20qaJlR`JcX>-6*7006NhUwGlnjo} z(}@l#`W+tF^L0bVxmTZc-1LNg##spR&2Pw+ImeETjhIir$>q#>Vf?!^ad@juZB_On zX9$B&Y#!QktZZ5PE4_kUbh_3)_jYJOpBuazY#ToCE3nr-g%`5?aHupS3%NVTBVU1M ze9kGf0$*y;QZ5%95oBsxgm zC0lHqd&q%`1pcb*&;c}7{V7dCKeR`ynOC(L?n_@&y%k89BzT&y=t>^w7TDQoawl05 zl%J%-sWy-aNi$lgmhBxJ2L|(?Eanp)<-B2O6C?$=oq(Eq$vt@B+2OqW)*gO4R~zEl zMot9rI-9Y`jR8O;C3%Jt6J(qNWX|Wh3g2c;X~thWRag?4&$JXgBd9eP?L@Lhr(~ z!HdFm_IHg>=#=ve9~_mQ2xMW%37`H)qgnTvvX6jY2k&*9QzKx~mJ%~1Li3a3?5YcC z6P~RY1Shg2Bk2lkq94JZqwKtZfFdaL zp7-Ok8PbutTWQc^g9s_(tipxS!8fZeulHsPjFw~YXA?_OVmCw*&xg$L=bfoEjG?$~?p9rbZ1vS&vWAnIop zquH-rq1Ri<=5B5*sxvZh8ko*b;XcWMik3=GGvnZ@5;b-PY(ranlb`S}AQooVj5Co0 zjr=5rk2J2}C>n^C?(M`6dlUJkX9&s+o@v+0|D51oGs;yGA&8wlbz+c@FP7oPgHhv0*B7 z)ozCh$wo}LK*9PdwfDheo^D|KBt}UH0zli%c~!Q%+pE6RrgM~H8`vCG53;HK21jrS zjD1CS=nQvlfR`RrHt>^d2b%z~GR=2_$v|yIDU?H|wyy*!@&}Qq?Xk|CQ%BV7xvw1{ zVgv;c1RdczR#8R}6#sI(_fFr4s?TN?`A#7im90d<3N6IW*498SQF6pj8O8(I{6$rQa}&LNgGA!g)Kw*2#qyC3=7% zw46)tOTbiTm+J;US&1l`a|*n{Er%j73}~bQ&9oWGD6PVt754e<`v%waUjfZev~)g) z>i9Tzi(jm?C|=;mmm2gQAH{YJW2!t%Jiv?ohC;g3q7HJOV?!H`PF5NEE0yKA=Wwm{ zZzaMtk{vl9@Pe8Va?Zu0h+v#PMu=^XiHO07miRW~;-Pc?^eeJfezj|#;8Ua;w_Im( z%vGEwW9UA{li?t*GD_K?%8_?@UnI+iH(L1Ao(Q;A5RYE<4@OtVajo~NM_++WG%kq`aY(X`(=8SR~7;7FswAkYlg{?z^3%) zNOaS%z^gc$lVcf3^g=rgP-YwFHO3>Pt{$#KJwW6D^bL0V&~2(H9+L$z+~F62i988r z$C#a+o-QxW&l+5rse7A%NroM*4vv!zu`K_&apmx~dyq3;C^Hn0!d1WYnFA+(;5tb# zc5pc7@%1X1o$sm@V=(EGQlypLLL)MhZo%aF>Q*9h+$(F1y_*w4_FlX=?LBDI$5Ig^ zFv^4lq2Qn&ef%#n=GkR3hxP{tTg&PB;d1`stU;T{Ft6TtU-wjMhL)%wTE-p1)$uyd z;5By9`RMq@9W($V97Tk;{kh^ZALLADg$z)&xFjBiDDBCUCsj zM1ysloIojh-S#E}_C39HIO*of6I)|1@4T!kwo|su=dh*ZK)*`V9Bn5)=I&O5^F>n} zDZ18E_Uz!m$Dprc0^On8=pwsB_x!n50&7y@)rAFiOvvT)!i|XXWBuH7=bxclyFfI7 zLhi5c%pv}*$=((RK98GQ-rLg_FJHaPcY!mxpCF<;z;!`0G84E|yFzykb7RIIaW@$a1EK9tluGvlPrBVOW`wB`UI6P_=La$_Z6j(R%JuHU^xV*$L!Vi!&Vy0O zFYTruR2oKDEib|l$5zElTYU#Q;Ys_VRDFINWJFH$$W~xOdlOZ>9nQ;Sj$^RWU1TQ;90OnmGzGR^ zHqwu?%^qS-c``&n*tBFW!Mg~&;6)G-N_QMldGg`mcDat@HYrZ|n@u9z6tmfTvH*`a zc{$4K*KZ;>s#XkCnoh@f%t4~PD87y+$2~TAesU7{W4v-{SE~z;$sYbUTh4CI0eIKR zXnlKIg4#Y3mNu-8)y0~EKSyr3tJRj!o8i-!eX_9Omv$M~RwiNuoySi)0T|=RXD7xx zIcW|cK}Q_zl>j8)!N-VMx|R-+6YwdK!o`lR_*Ej!#tE7SLEF=jHF$c#x;odCCy zN0T`(gBSVpI@rCvw3oo2QQ=<78Lsd7N7l(9$9o-lvGzQ=cmz?2w8JzJ%d4x4oWCSKxi8m8r)J4v0Wi&Wc#?pQ z7l&4y7Cx#tk|BMdQ3K1I7c@7C2;BuR?B2+K?h!!ZVd%q`;jc-1VDGqf?2sX*+JnHf zR!+>#aGb+#&>dbIQow_G1eDy30|#rssEjZLY-pb|&+*y>bV*QDJCqA)tK{JLG=bKc z<%$higsQ=T!xsc$zk5|<1YQob%$(iIcWA=~w<3~z$^6hNztUG7cyz@R29k@>Yjli^ z3-|+XaSffCEZW152DUkDt`s{$$6Yto#L&sP@|t+2pY$8HELRDV=s!EOv%~L2_e0O^ z@+WXd7p;}T_MD!-2%jXoCQ-(5NB1QIC~x&WFF*6P!hoOtf+r07BLU6LRpkV&!P0;? zcK!JDyhD&?{5Ob8pY(uSz-#EncaGhFOC5T6TpJcW>U-lQmrnx4+>E(8vuaV1MHFSg zd~ZoKj8T@JDg4UqGmeY#6pf(z-pDBg?;b`}H1l80N7}E+&u>Hp7=a;tT|PBC!e}R# z79N7(SyQD-(F%bgbfP|j!LWsE-CYZ|iEI%|8~UHD!I)i_+Dv;0)=3i$BRHHWeD14r zj)F2Ul&cv}1jRLQLSWZ)g|urgN|=cnlqVlU@O=NoxhppgE@(7aK@5`qvQt{`6rUh~ zz(u&j?FLOb&@})kX8Lb7Ky$NUXoHU2gGcVgM?wt-5`#8y>hBQ!V*?bya1X;ULQFXF zEe54S8~IlQM`-e=)HM~DiDX6Y)=DT2Y1^`|W*FaiHD>@_xoBhHEXZN40&C4%{cUw( z3}KB`z#}P)1HLe9`X>hgyh>Sk$XF0<@RR%fwhKZTVha|{*mfLNG<8a>iGDr)lE99! zxVpOTc%r72LcswZlfrZ!*}EV-ajAI26wQ9AA**%Q)9`q zK+u{MD{ppA#D?z;KziQiSrZQ@(3IsUgucXL-lq{30h5!eOf)+{g?F9$w`3lHFsf9W?)JFRGc_VW9#}%Mt>N`PG_p8kIlY2rXP`c_3~`_>}Nln_B}>9vWs_U z1z*N_=psl@#&+|#Tva#mIsw&LcpG|?tE;yc)dBie84hXmNgmsG_+5LN^V(FIfhRbL zF9)x}L?>-CL2ctSgC{wGCxb0eK~pqji}XQ8*dA@sZ7)JWOUv@EnYcUfSie&tSc5}D zn|Kx*+6L%X`p9wgcL|wPLT=(voH+MfC&0?N>Sx+@A9@>eHaE+?EepFR*K3oWuRQ4r ziuEz0*XAMNqsH)hpe6_8JoYC2d%I{m%ML!WruvZHs|&7m+*VlA?${95*W;82Xgl;x zTlu$Ekij;5n#2`sezTw2E}o&^>3b@1s(0QM4-P@q0LM10$*k`I ztoF)dl<0=0&pp3INOd?$7y)nu zS={`|cSgdaYn(l}2OS1Od|YW@9AMU(T%#euMu+QV9~2_RL;P zkJ$q`VGn~E6*XXp0OvYF2sdTB0M=ljLK?Um=_UI0c$KW7I0Q}+q-eL}J-ObyeK|~q zqYDslGQp|QXWH=Y^vf6!8PmYZky6M&w!xd{H9o8)MR8>Q9Pzd+#yx?#QGH-gap;n{ zxfUGs-1iDgu93BCmmw9=YGERExP|r_46T~YhTn_~0Rk@rH8bZE+{8F}&ot}6GyIL!nK5nL%;SyLN!U&h61XF~Rcm!wB|85Cc~$z7V>fZes~bh1xIHYso^1qVQ8vGCjc;3 zX0_uP9qiK(B8d>9k9Y~V@~M2Io-7!o}cOc&T?`bev*^)&8Z)n z2NGLC|LX_>MbOEI;hxF9$+uH=`28DuSEKKi{5MW0e=>DCklnG`pR^;8r=1*(tPDSm zi+6%&}kyDEuaG(SiKoJ-bv|1h9wrp;=IBf+)I4X040B&Wv0@R3DK~aD+a6F;)`gon1jf zYV+a7c{dN977f`tsfNbr9(oAzRMP?@u<0BolKVJURCf zM12|qHf}2GHO^4xhM;3$BNAy-ze%64EWH30#aSztm&`$%`V~-EIOX=1 zKn;`7M?3!Mi?DPnoO6zVamtMHH~{^K>=`6kiJ9ONoD{#ntFJa1p4Hix2XW#IW(=OU z@CB^Wlh6xpz+Jx85NXHGuq0Ct-1GWfM!`Id&@i9nJ><(*8P#rIyL%R+f0}+c7Yc?R z>r8$0!lh=9 z(c%T(0JTmepJay8`&4Q}M*)QBnrtvW?f{F;eP%}s+xon%9I{xa_*>aveRz7sf zJt2Bah-qhCBfE|x$p9HJtWvthxQ`r3<#bFCl=S+!*w+ywy3avfV_VxLFw@B`f@ zSaRK_suZ~C1tSgzG^(6l8EP3v`SSc=dG+2&os*#C9eGJdwIzYiuvs(lzzA>-%!J_U z(AS}pll>TAxJ?GlxSAB$%sVikryc%%n^=R>;csmVWbB9%oas8JR{N4e5|s`a<){1T ziA!y+orFc;l3)wZH%_l}333ztZHFqy6Bv$mY@M|*l|4JFgrvQoeT^E)Qh6p{bb{S%pb@yc#Bln>KRV@} zHLzlP&R?8FUq%P;N%C16l)fz@Lzejkf$;>HBcsJDeRO<1NOke6(3h-n=1pS; z-BJehllUi@+%mN}KS#=z>^qY;2sw4HtgC^h6{FZ`Vso2-x2 z-1d2P?T`a~dsL5co{W-$&Ll04MZYQ{3^*qwLPsaTmf8QDFh0vdPft$km_xSopFN+C zfrCCMKhKh%V3Qrkh(>mdf5~)Zj3&W(WKlqN7(+~sWZ7ihT#(L+t^5X^vg`~O{Xmeb}70A^t@i}avNug#99cs7nL zvYB>NpDL=OKG*hH?`k_nJPC{5-*`bPHKfeMyM+BpVYwvVmR2E2M*{violEIY10 z%lE#ZX0Z4sM?sUQgOJRx8Ks1Pl1hd*(y*3OVHot=7FPy(y8iR?fn z176aWo{Br(9qjRQ;bnA^t+Q|r`E)IK1qG6a%6k4vs|aF$vmH5xwki+kr(N?b*o(-A zRe{W23}Zsr$yNhN5I`Ie#l^)%6$3T8gkN?7T4R(Jxq?-|I;$F?^1~+w#d!VaH$TSvO$mmA>YoBM z%E~7vX`aErlWX%ahSs(24Ga;sW=e4cDh74zx=){_S^9AuSSW23nBub#u1(t%iW5=p zZN^sm3VyUV^qwHWamq7UC0fax}aUeJ!r!9-SoSntF z2tvX-l?DQd4-BZsV(<)pdg}cVB#TWb&r+Us_ApwDcwJ+1lSx8X7aXEH2fzHQ@su6+ zr5d5JjM~d&TxZt5>tv1VIGc>Gd<{(G6?~p~KZxv*A^cZz^O6$EJJpz(txj(kl0JVN z*_??}advTr9S$PPGO`Vy@teaD9H0RQfwnRguyv9mZbn``kwdP^DiV-F= zAH=zP%uqnn8O>dO)KaH$fC3iu48CAsnCK&&fQvv%`M!_LlK0pIvdEDL0`#v$EU4@3 zc)?F}@A?u<^b5Rf1KpkyWQP>2J)M?3B+&0>(JH^(dmohz+w5%yA2;? zZn-!2?H`<^&%l>f;(P?=*O5C0T_sU=?|ST@XA-4IZ(w9^YNrC* z65gr`dEBxSG(%_38oaGi~6J>?CjV;_yyC@D|#H+>~M?wYA5L_2M~CI zD<0!N9yhDcPMT}?Ho9Hd(+7v(I(_2_$GL9_@MOTA#xdzkA8ZgfnjHow?XyuPy&4XH zne0E4MH z5MfN@6sOYF-_l{T+Yo9t&@uiIpmZwgucF`&K&wGW7WyMv_|P|Mg)n{IGoKWNu~$+e z^Tm~8pz_5?=g5*X|mcKV?n@>vx~Ka z=HS#uv$~ct8Pchq5NI%&O>W{7ht_IIj-$U|2@O4GZPxtJQSb>l%|}SD!Hw`uBrVf& ze=81X71&k-gD+i+Ah0N*I<6AZ(rJ{3L+Av@^r_U0E;g`h%aXEnZ~^S%?L~3GPfjcU zbp<`+UX)tEMrk+=vSSIT1so|nH$ThD=OD{YYp*j(vePS9gc0xta~M$7HN*LU|t*Wj;B z88I1!ho%B}Ew#J3K-0v`6KF(RD*Lksq*LsB$E;Cm;A;9&P$Ely`gQ)!0`(mVe7T`V*K`vFVG+ z*e0~HwdM}-Q)GIEYjQ03lTE|yopTYIcle7SiZC_C(KT90r@12lGZ-cejMDkQLdcatu3>ZtZIwBjYZ!0NTuHOe97AA307H7wEM)B%$H{%{+_6N%ZHr*0&wL`R zopRpdM%p?}MsW(42&LbmuDNsIF<$MH(00kqC;@_X4}7W#st7e4Wgeq8Ss>waoPc)A zXQQYX?2(TN6c|bl6P)1g<3Y-L`RuJZHi|F|@Ay@A3eXHEpRVI5jOW(T#-KSzNcwV| z{I$fDz8D&KiG1-!TgYWEp8^v+1x|2(^l}~(MG`5NuJ8a>c8d7SfdnTRQQyBEWnrL{ zjQfuFzyl^7l;@#w`5Rb{5|Et6!CUQYiGeaygO08jovRjk?jNnX1y5yT@-^*+-{c}E zaIR5u;B!Br7HoLDu=5e{C~yV3;<0Af42;LCjM)tz%bgytT%_f`suK@FJ2FI;(THLQ zszRnHGAD{E%8a7sD1Uw%sv%qJlPBvC2|kc($FyaW+1R=`%t;J!n&aFjsF2BXR4Ps< zY5$+6Hi=OP#W;I=JiKV~cD+^*7fGrj4~SjQh!`XROtGFNP<0 z)}(`cq>q^^@ku}Qg2TH{W^dA`V9PuN^9I;64xJ2;ZNZHDI(aj4=E({?tzlQThC_Zg zNULy$E`qiOZjt&CiX_v!?1$MdTBIz9Ov80Py#ny=X&?JZO!=Ejb@agr7DMo z#5#nSE_XXgae@&WhiwA%habL+FYQb}(=PjDs0Uq|bkH#Y zMCf>Wem+aMre8SLIYmagMV3H>4*U{WYrjh4!NoClR&WDeItx!YRIbWnZGj>*$??ic z&h;~Y>>yg8eJ`b$#pzvw5V$9(hj8{jk{8~LEu5=o);c(1GxD8W%pSQni8k^jDK-(9 z^JbS;nzJKls48Oooo!+#!S6WvEibDl?34K{xjy=V7HnXXz|a(q=uw2@4z--MT-oJGJyxlV!uv!RSf}%4`k;&2wy>j3j8` zBup@}%%`BXB0v1qS27eKTB3T4`>qxMK42-t~1+DrnYptaZd)SoUX*GB#^Lj3n!pIzry$8e6 zwpnmsMMRJBbb?cr zrSMj)Lic8;QRaZ2KrB=VUN1j;uzcyeKC`^|%+YfC{_gU{@BaMql^^`l^4&lBrRA6Z zxX;fnU-{}6^7(DccmBW^m+$z2&o5v8flueU&n}<-(r1>JpUQo&j^e1jU2qBRcgxk= zo9MZ~aNy@;cTLC&TyI0;tH}I|)1&2sPn~qaEg2`rvU5w<3S#My8Nt>fU;v3u4QPn4 z8fS9V48(kioFmV6digfTWkY28{SV$>&fj|x8}qb$?eBiQx_y#tY~R`y)zfiG=r-_9 zmWtnu@A;uhQlVO3>;C-EYDWe&Y=o1e{9l3^9Jv7v}{yK{wg|?(*`3 zmxWIoU>4*ECZ;4WVVSnbs0!%7I@Lr$9J07i)u#6a9#tNMW}oP~p_}#b-J{dv<^234 zyf^R1q`MBpQz>k`wg8m3=7J~}>WQp6F0kMnh$KdA#Ro^&*UOoBx? z!W#{2bObM|`)L$^z!j?zm?t3X*nb>b>7Dk?8|m%&7&{{%7{}lFG5E~3gr{s!E33Ia ztSOJo(Lt|P0Rvx}D9z1Z5x8B|!CQIspthBs@QHZXq$BO14>ZnRo|Y#j^Gb|4y;qSv zGWF)|Rr-IooSeQ04uLT+ot~c5hF6C3TP1X*8z9l52`3wwWJFMhcM?)TGaYmcUVUQ6 zuvV^XV0(1DyuG-N-@042-Ng7{#=2>oVCC{0Cm`d9jYAV!OfF?}3}G-;sfbYpWGS9~ zA}!hGj7NG@s$)(JHUg!~;E*l-?RN3ia$IXJCzVm9si$OYQV zcf#(sHrxk3k07Pz(iIapM09I!`C2+^CXK>d*3hvr{e(6Ytdk7ElDk6Yv=gV1-_~T0 zyU%fO&7>bYI2uUd=cWzY7{kypbYFj$w)yTlPOB51N+$|S;CiWxBlIy2<#7yT6ioCu zfgt{m^X6O+_P6>tA}8N>+y^<7)dqeluNw;L1jP+3fhzy!doIHpw1SKBAA!a1K7wWq zhP2noBSqVA&}2UXH~i&nIcLAGR7*A)0T`VZU8~wqelvd^M|1MVNf;;g;tnNEix0iM z6x>Y-OmJaW&AGn4ij0ZW8Jg%u{_uH~cQhPlk>Ejg&PXKibgq-xA}%wGBS-?RMq&wS_dQ~$&dF2DL`zJ2+@ zU-hNsd*kH4^ySYipZ?6};>_%?mQQ`|}>Kodfsk+?^wL#z{Nw8rWL_ z2repTFJ7I6H<7#ABeJjZWtb>-C;W09K69ewKN|v-KYbjlHbzbnH!ZG%l{LD;tbYL^J;~a-hr)Qj)O<;?Y9EHws_e2JJs5&C>=D6t5 z*vR}WPsYi7Oy_)OPgGXWi!E*zpL@w}tCcy9KNHZ#?xh|5f)j1l>J*-FlI^TLagbv_ zOwMEX#wW1XY&{(*P;F}}K>`d-1S@;)?RR_$GUG4i9_Iiv?Ffj%UlonC-FYzKk88nS zxy<+4n#c=WMuw+aqoM!?fd<+XuJlV61|IyIc~YvUGshuqg)U=X1h7i&>pi(C@YUW1 z9z$Km^gF3=1H0jwCYtH5!A07!0;4c&sb`GIrGbF2MsHnpZTwBnP~*blot= z!U+VjWqIj-u8B6}Ux1^-1uzXKTA*m2)%9A=KKq!)5u}bdbRd^9_BtIuvS}Ac&f@?g zGnDQ{0A(%AD)R_w5YcAQh>%e%gaHuK4j=o;b}Tbhrcrp3YyAIgW(0YA+U z`ZyQNNk4V2xlbk+lro9G&`6#}nz2OM$CMbbCr-`rWqU{O=WbG<5Q=K6jaGZ^N;4g!GdLlZH z@&pHh$>@_M<8;RKJ3$DHEhDA>82{mK0K&`kh4*CODd*;ych7w1blTh2caWlR@GjC} zOlRmDZb`e0%0-+6N)4yaSgY(KX)lZgL!$JT~V2-|J)eZUsztfPj1HvZas%r$QI{Hxp%h6JtruT zv5xmv0~D8U$K1mPkf;j zfmffl8rgI4R_gYWn)4TCpc^YSquwK0*^|YS<&`e*>abA9jYseDrTVf?EHwvah8!5x z@Go+889PHiWRB*-@KSUn0XCeBWP z9O(*v=uL0kZ`@n}hi7!aaeA5#3LjpvS!-7A-T+eDrw?)Spm$ozB53gamE*M!Fd`1IHvG{w~;LYConM9VK#jzWhfDian6Y} znyx;^*F7e&cnsz)S)7*&Y$=lhi9>fo+DE{NNT+T6wCtBQeAgERu}-&=*$M*%hi7RM z1GyT+5pWkxrYU>(yU}U;i4h15b%+=Guyay2x9XpwRS`GMbw+uDAAGDbt|tgn`YFzB zAcuk%A;TfG??j={IpDS*4huZm!4JGDO~YU1uA}sC+#UQgF^_==#A$#q*Dyy=m8m4x&o!c-zQ7VX1U8B?a!LSYK$^csawPB^3+@gn%hU4ajj?rMNoW%M z^tn1{WA|XgZ}2gq9F@RC_U`yac=qP}Q3# zc`|{a@mSt|Xd?PX2Wu#h>D|!l^b4oU_x+kLFTde;{zx3{KeYUofA)KqpZO<$+49vN z`}XBC-~6hxa0PQu?~K>MhD?tVQ}|}E;WwG{sB6cA56_mP{ZA#xdw;pUKUx0Ve{#6| z{{P+H@^k#pZXuXS^m-A|9bhC|DEmS&Hnc--~W&P=H>bOe`>k- z!dI79-|@Z6VVpWcbAFm&jU1*!Pfa}Bd$K{GKhOEgt_4#v`aWF~KHy3D8v0u%w6m<^ zFhO5hMppXRV05Za1_XBRmgmIj82WhXf!#3V(<)YIO;GTk9FYkD)94#HN?UK zL%a2V`*dsq-?2gOA}>3!0ewOycnj3PrJUbPIXnSN^&I?xm2PTJrd}uB3JIc-MR-2d zj?R+8k@+}GL%e8+|C}YKUHhDC?Q{>GHayg)$)qYH6Ko}LR}sLhbTtV?^pj`9!=>3dx;cIu ze&{#f-H*3jjW4og*zQ03+ShCI(bUVW(D>@YI~rzg5I&mVYY`|uVA^1-cb4lLYh(y8 zz_dx&+!-3FHbwvnlOUt>hH_6UGl9Fqf^UtG`^9G5xd4UcbI7% zEPGss1Zfy17#hdEJ}yz>pera`5aL*~SF|F7-%d1DAQ-20)~4@KqM2+JWHu2_Z%=z8 z1Z}F(t?bdvT5nO(k?Y(m)8O3olLL*c5inQe-qxWCH@uY3#sL=6GOF1`<0!wqW9@0fhQ17fM>QDxK8}=(M;2^0IDGV5Dx>jp@D2 z8@J2P{%h}+|Ka~-S^f`yX>0k5fB$Itg|{!4|JT=EEr0T_y}$fR|J(cJSN}IZTz=-i z_jdVxf9z@bZ~O-zEdSHL{N?38KKRjP`^8t5Pra8wAv_D|gS*EPZ#LPxCl!uEuQ-bE zri8N*dLQ}h`WT_H;FjZM^yq*?~l{m#h|_e6B%bCpe}}#<@C? z>x)(RKqn&gad_4Yw_4xml`A+mvfJ73!B1eL8o*+(sbrhZtg;h{uYL$F8{9_ER4D}r z?h%-fdB(f%)(>*zk?rcZ?8cr6Ce|t-Cp|A~F*&jpzdE5kZ;!i;1A#|p^`|{@P2ZUw zv>n^d7UcePNao2J`A!qLfbFAuaVq+a9;59HBMRX80wsAju@Ov9Wc@+0ZE)U>K!2uV2v^ zfhGqfTmYKCz~fNq?LH18L{xeRk_eCSO7WugQww=89AO6L&v;*iIQKM@$nGXlR3llf z>MOgpz~nnYGi%&zU!^>R1P#~_kAuTFh8tXs?!XZoE3DZkVx008C&zfSTwWwiB%BhM z@m`%n1y0(pkcEzfw2#zzgHigLNHl^S$3AUfI=T0tAwTi|W%Hl|?g$}u{ znQeAQhEC%^AEHbQGUFn9VC?#$kqu4tu?At3XD80(aDS@{+K5{5g>h>TV*!!$y9QC6 zKM;8Isht_lNdIV>YbqOP$!LUEB6q=n&XoJy2PdC>#3dkdZk#t7S(Dx(CzPTNuf}sq z$;RK5J3C$<8eb)=#NXVO4gFRTe=KCA2U{v;6!Xzys@GK}GdLwTDoKC3%9j<<)&1`hUK#HgJGkGm(3?H!fdk3M}i#XN~&X()lSIggcd$wFI`M!0s{6BwTZ~0&T{O6Yc z)6G|xzj*oW%k$2s5qr6dbCkW(0r#VChjhXW^5&1jCi5HsdUBFCUIL^F7QLXK44iY# zhJwC|hhYc903NM<tmbjeYs@xO(H5S>z0) z??a{cm)n7a?<;b;YYsH?c9sDWGkO=tb>RIJKtfBv{y$S*ubs|GiC0v zVRRln!6o2en{0Hf_M&Z(-JFkpyDkUQRdCMkbA@G<@d`*T@*B*83di{))#4_Zp-Fud zo1A-x9-(QzcNil5+orV5kB!~Z&P*yK3!y{#=u=`AA8?oUz#ZAc7vR$C&e?E04dKbv z)!X(zHc?-aiFp#C&)0{aC`2!=V$Z!CO9JOQ{%GKvCq4v=u7f99w?dWcEt@Nn(Px)` zPoUQ|IMN0=1s}acBk);qwvNAYo{NSFi7wKJ6Lq>6D?2aYG?fDm+Kj)8w(2aL943@H z`7Vh>kP0Wum5(}gL=1!N`Y`sZU33Y2*+v1(DAcy^3>gLQxK{)y5SB^K_5I?>dN4|v zmKZsyuYRRaF3u*}>?EE#P`n$5nyx-pGG%`=eMM`Lpx*2D3SgZ;oM03dF~MEsuHnk} zD!TyVU10V2SsZ-YL=tVAr(omk4>pdkLKr%N#qLdbJ}A?Ml5JwoJp-%v7mpit8lql^F9eN4ef}Y^b z-snpvgl>Yf!}RMJ>CT)#*WPwuMps5q023hy0czNZMr30GFu;?A?63*mGL5dB=zDm1 zaCj1)%>og3;R#-LdCBrBzy!w$mXxT?(!N{%!7sjU)xkJ4M=YD3pkkIfCd=9|nT;`T ziMtsSEg6tv zddZlvCDpsI=)B;vPIwnmvsEpB$YmeGO)K{`#Z03B0b;oZx+@L1oA9 zqnBi=%4*ApV;$Fi=k=N<9OYQ}A>bG63nCaiC1i$o-n_;L=J!sVarDGYWb!5mH+z~< zw{jo7G+9&W)tsCLpn+wE%7TA`oAh0~!1-X(Cyt6N%A`4m+K@0%erV3n%$A;^x(;hr z7r-9*jGTpvxyMRxoCb%LzViFZVq>zw@C^QBgS{v}`x}L~ivwH`yf&D=XfXYfFV}he zuo+`6o15J)0NhKkWqf)sx<5%By=1}*6hK;uo4~owFQ;Z0PkVSh$%hl8bAp^Wx%4md zMWfoYG$rsrTPV1;UjQAxHF?0BAmRl<@D}X#X(^v+hpzUiB=_YydK894W>oscEB`f-LhZtRG+wTC1j!12%=7vDNjr>9uaS^h{MTco$BM>X0(is83m9_DF^qhNjiucWq>?LZ> z7Au)e=#e&R%pyc}wskx+*~DZ8*Nu=)MB3lEo{-CAS4GgDu|R@%d_0qy5DF}<2t;@) z_$5*)Swn4>EP=5NZeXJ%xn94yR79o>*NJ=Cg6@2eP^abMql6U+%=KuMU?@zb+$35+ zo@*6G$6EdKN#`4T@s820Bhw!^m-V9B||2AdUv`E4INxV zcx4o3?0OZW=dhm>=zRG4a(SC;+gIAwuacl>8qFxlD(h&jEdS#Ce9q?v2KORasET8-3Us%5KtG~4T%AftV<<})s z|G|IcdzR09%V*O^VBd=ICNtX}Q92x_9vC>Xmfk}@GO+G&K#$pd$2*z$QL-oRBYzo8 z&*RwlmVe=o+%7-+2XB@?{O5L;uRp~=M<*gRp-Xo34&EkKtyPWgGK0`>-^4=MLkwL@ zsOg*RcwYZDhUrg#<9zw!zwo8yZ{L1#ImmhUn(Q;Qloh5E;t?R(56A5c@vPV=c-&NR zat-bq24N(%B~vlpu6lGCqbdMktjQTZnX%sTXmxM;86!uctWAEGpdhju$ChT6Pkq2a zu|IfkY#$E?o;s|Yi}=L}ME28DT8slSPvCL+#z)u7b^4?2h5+!J95PaD1HEi88vUiG zg1u&3=imI8!T2s#vknEr|!9%0Yl4<6sP7syCkH2S3}xLCxD26JT-H>`|`a zc=b`(vXdQ+k_o125*B@olN%hyzW8l(3_bB|l^sKZuH%U3I`(@Ub!9XCpjjPeaK@{_ zl{O8%4a>lZQ}@&s88|&Vt1P>pj*VT_efU7fa}W7JbMX6)CWcxC5_ra@xu>|a&-liq ztUWJS%*opEe7b+FQqzJ7u;}9i9PA7M(f>B0GrGMv2N04mYxW*M{niW;i#`KmBT=sj zW&ApJNg@y~z;y<9l*sr^ciC--Nr^>>Ezg8d5Gmm?t^H!iIOj4a9oCS>xKm~VS0o8# zh<|zuZj_4?jxlN3Rx9bBk;(l~(=g_t zOX0{L8g6?mr0X3|S&YI~aW;S`cl8*eQxfntngqu%jL;G)WtckUTvul2Z;bv#jtt?H z$iRYU%FW}<2}Jh*HHIe{Rcit{`_VPGz>@C+f#=mZ-?fh|=-GXy7m?(-h^Y3Eh*OvsD>3(`@ zks}64+gr)pAL1*`+lw0}FGR0jYH_d$TrB5wi{ObAg)t~;R<)?o8FI#@} zH-2&X@(;YfeDL}A((YuqdLM_7z%FzcgE_&j>OpmdLrP!ymrrmZd&dS^1j-xmDX5NAo z|IlGS$M%eaN1pp~kXlh$ea@X8E&speZ29*uzp{L7`#Y8o-hZz~jr>L!+80Aqr(h3X zoij<*`U!$`fo%}aKe8WMKX_bg#@97q-1f4sV13u26!RvuHH*`z;Q1as3J-mspaF)@ z;d_i^oVBNF&QFi~5*``Z!EpkilNjpsE&G1gxhrW-%k(10w0E0qwt?i;eRw|^xQ^xZAxPtYfZ%i1g<<@%Ce{?2s`DRrcsC+X8p^(ao`i z`OF>=(vHAv#=MOsyGNkgO4vpY1sxP!$(xQ^0#;%`&T3Bq4+1jGrf=-5qml)a*ktyt ziUd~@L|0n@3jNSKKg;I|NHLm?W3J5zebLw@@Z_{5Nc<0b(G>&P;{4U&<~s91=7nkT zIYD-3U#s{PdIlDpAR~N%%EHe8!MRdndk8qtYyQenWRYc?f-Yy>`Ng&4*eL?+Gbv#-pw9{14!VstA{2p3RzbuY z#Z$!e?dH|FyTQF2D10nDa+3&ovg|R!`XoRjEK!^o5Q>n7QKkKT5@`mhj}h@1SOT)X zD5E>p1u)jR3$hb!+WK1FIQYV|ae7`>(wAIh18X14s!}=<`1GIS6I}-eGzfi4^BgZv zbF47?l1&sYkPu|3M1b2TILvhR%)j?9+wbE74nLm6~0 z*XQ1q_JHX9k-4rMnSOFf4F|ZaOs2}_jdRB*`^iEMm-jyXVmaaXM^GJMjs6PD)(J?K91Rvx|T-%)O+WFi;m1SHn4D5f>pGY z?Qn8*iJZtly>;&GW#m8Cq7|7Q=QU4RpsBH1@)-uVLNanXxaYsLG&+X%jyZ%4>{abU zw(?`M=Wyx8=qu6Ev8l$%1atA`cl9OsaMWmJjMdII>61)$)^upWUiw|xOOR4~8#=R_ zt_2%Co_@v_2||D&(?85T?B+C0@5w6OO|}kRy3U5MF{`5iBj?m7c&bm~k3dDA8@gym zV8lnT$?jPb98MlL@E6Sr=f-x-Jc7Xi@0?RQpMaPRP5_egD!E`=?dk*v(3t)^JdpM) zi(om}Nr1}+;Noojg+%fC@?yDsbJ4=A1&Q?IUi5eM$Tt|wmiJPp-44m%>m)m~jm(bE z(Z3b$*fn^=zp}Eq7(SCglAq`^TS)I?_t!;{rV<9P;U&8Pesbw}t0EGBCdx#J#sN7P z>d-}<{TZh>$EN{IXZ$Fw%)M{6D#2V6K}6u}Ejvb->ue&y(=GvzF$wa|b%v9@H9Coa z1kw`3PHAC`CyapCkhT%Wa$YPXay~vHOq@F-LyV>$ZOI{G z1uu_HSyED&E@ukgRtSU^xn%^S&JgSqoygpqb;TH!5m6)t85WIV1Dl@K75#<=N-3dX zn2X`|3QYuFR7@!aP)digJ8!>A4;4lViO!VsV}S?`7(OMWz#L9SO6Gj}T%T9fKS^D2 zaLtr=TtCj(^XYfVo+vvWiw3<<9iO!yvIG}O!#M0O$HCdG`GX^SVAE!dv%q8oba?R? zL&Qi9uClN(Fg>5W^o$X4%;w$flt|^_?%KquINLZjGop>%l`4j!ckR~E^5**G@^Ad#_Lg7&uRJXO{r~IU@;5Kem)!&7??F89qbV6= z41KThi%bc&8IPUt!{jaO?DOU6w@V5~_FhERte9xtf@jBN!5qLATBbdY-bB5xZJ#ZF zfp5kAQD$Q2k>PweyVcI!NB zbuEQfRcL$YC#zFUL}BzPKIvCm7YRr&l97?wm*=NlkmnQK5)@3f#PN9nhv(13+_am1 z0~g12cV{)d1Q=J>g~{WRvqBxG5_=Mz@R_djeH_sQ&=W||q0YSs4ER7@T9E?wxZ*~SM`HKNiU^yJ~p}^T=LPX30?H2Gc;tI0v`wDsh9Q} zd0TA~TsnpvhsznFMs3Gt;w*l$>Yl>r(=oXK1v|XNS4<1(J9aNJP##vka@gJEJ9w6k z`9777ciGvx@dL z;Z$=xu;Mwt13&Z}8U}|A9Ql#Y97WgAvI%tVWos@HOjyPija`GFzX5T|?EbXrFBD7x94FEqJcML-sLB!)$+(N!xRif4OEN$Ce;NFia8cH&eHV zK_WJjl4Rvdjc_Xi(*S47(4yn&?DQ#c>KpqU51rTJvKWkWyt97OU&r!58zr6WOXk7Q zah6J94Bf^+1Rlr>I7}+zEZ{XlG(iM1i#D6B#~{vXasoLK_$R{%hF~5NRNdq|KEs>C zLJ0;F9Grtv?@4ygzT40bu2sNnlusrhDC`6!a@7D}WTKQ!TeJ6;p)nY9EjrvLGc&fn zW-%>GhDK!I?8SKu#Zh3qU%v5;*Of>2@9xUBNBGeeICi`qKF`<}dQWDV_Cq%r;5C}$ zT3Ln)N6To-uzvQt&(4e=`K@CQ&i1yo`E;#P{0!$DFMssU>@7d}Ke}E1#eZji`3qk=O@PiB zhbN&eheNLh7W7>E+q4;+YS5y~>Wp5=OIJ9Sc?4?YelN5zIZX-w^z>*si^Eik-Lc1* z?Z;UkrccA;9zM@cgPA|W;-1tyNn$fSE@$nK0cV> z5UtEEX24}C9*WY!xVIbX0CD{n%Qb7t?$az7q(Prr26W1i+9q~+>=4p;W|)1WzR zqlGF3yV~YZB5kT#D*NLLdX#7SWn*%We$aw!pd0(n-mN&$r^@LjQ0hclZP#av9;2DW z$pTf)w9}r-8M!iKAW*{J(j+jMnXj5jmt(PGLrgR_xy-^X_tg(Ffv~mbfq86B^dK-g zSKFQA_>8X3;|W*||4h{)HZ*P3-oz$a8^C$@j2(-N(COGZu1A2P?eZz#;9+;n{4kHl z6|BOtG^=M9;Kt6d@%5|ZFMsC0v@#Gp%g_AL_u2;mVeak3x|{3lD$BEqo%sspmCOpK zGYm`r%~SGr&gWeB=A)022zmO-L{Sx$4bf_dE5xGJbQvRLEQO<>EcZdwn$a>jMAAB< zHVO}5N|Pl{oPJ9KVKC4kV8;Ymo@lM*F9H>MAPIwqHYTD_*cnG+pt#$#nQIv=&$=HG zl=Rmvj6g^P^4L_JXO4MGk;y}{TsR>R1!VkW%LGeg=CP-lxf-Uek!Rp=?L^k-k+YN; zBhszlx6dBu3ucsd8$p{4YJ}FYh)MfIL(kRAl;?30aB9oAm~y${h}(1*n(+{575MN$ z*5$i5pK-?8t)Tk^Tky4cB&{<+0}`P{XHlWHI1^de%1>174ACh0yE}d5`t*GpCa;-$ z2rx<*T7$RGu?Jq`z=wNE+!SI~YqM2Bs$}Uuuo=_RMi9sm%ARATEzsd5a0%zoEU=aK zvJAl;ZNGB);bFEOePt-;Q;5yqvr|S{{-`{K^09N&gX{N}eewu~z%*+|=q55A zn$9C)Y{ocSK_8=MF(C5)5c#uKH;1Uid6>X?9vLEg%8h9{2s8R^IB?p{yD(Xd$h}Bb z=so#1`hkzGt#i1Eb4UQl_8=R2*EvRRuC60%0^IzTs2oHV2wQ`Md0yH^JZPWM zQ~D%nF~;6;@aTfA&Lf|O6lCok0?K`K6n=Uko1DfBFUr0n6Y0-+@EWpkuA-1T&|eTS z0UBDvCw)a08#I{p-FwHDjw58_=CeZ`;m1RQ4z`#LXuuk{JjFuZ*%dsQz(BPiZoKvo z4jX4GkjKLuOYdo;B9J|rtpA)NOA-clJwF5ToxsEK$G*c~FiqdnTwv%d z`Z~evfv6qKSq9Q16wzqfEm4DD_Fk837&ng(l$g*7RKDt%6XhTnMGSRM*CLPOsbJ)$ zF?eydIoWZIFc-5SYHL^NoFhF6F-7W(!<^?NCUR{MLXm^0Y)a-{B4becq=W(l5xUul zsHeYmvI7TG!_GTuIn58j!4h)h0A~ab%EgHjK1?Ds=)$9jz8M!8CFY6fZN|(9A2@wt z$_Uv6Hl1{ma~B2tynf4;*j`m*HfVYuogs}@=r! zIVZf5xqBRMN_7b`@@dvQ9&mi{RvyIzwDU+D2V;mtkkk1E>2xA}u!Lf{7j4m`fkk#> z$Iff7nRbq$Cw#zb5{s&cF*dX!i;R(pC$?tv;iGGmkhSyn_096phcjy)uQ`7Bazxt0 z7qX@b1t;t1b6#lLYDe+gT|T{r{ya@_?hK5{*GV1{PutTS1mvBGvB{_ z=5sHoVW8aH8~K?me1f;xeTSpqjPCeE4>FElXt2uroT6uB#8$@~*ztca?Oa^$FaO$~ zykCCTzxlBIyMOa6j{P)!mVMdj8RHrG!QahJ209@s-!TG?vw~sTMrY1l1Kp;gMfNM> zJq_NuhFDoe@ANEzvQ-C7It%VKgrW1~Q%rAD%6L;IDjk`I}eYyqxYQ-m zoUe}O#y&lejbrmW+-)E;I7hbdj0-gq5hEBqF^b%ar4SX@j zW`^!%)kdxrvT)3D3dR+1PqnGuw? z?<_412Z2U@j6eX@$XfQ$VI4SSXIye^vB`PI_6S&x*J*~qfRItPOcX^+XAuCMI}SO| zs2cwShpZ1_CL$B<6W$XDSc)_TtINDB5ms|Yz)qn->z zgx%l{PC<^si>y7SMsbXfnJJEevmnkm!3rOzjYa{%bDspLk!0>eBiq0m1r%u}TcqY_ z+e|QRcm@-_8m)(>jEHQc<8;9fyf?QodVTJlbKXSX?ul_mD{U#q3$V^k_7-b&7r)?D z=O&s=n_%jC^LX6=IIu@1wju|er;zWnW0a!5hk* z#wmz`m0UlJb0?D?LD4q@d~`acD+?8w!R@&6n%Bk-pIs~PnR2x8ufO*XzOh_B-7Vkq zBVSm4?)UuU@{j$OesuZafB0LLm+wV^KoKw+Ws{#MadC*Q`{Z=!!#H^OhpsVtb1c`< z69I5J#Q2aY!Z9+=NHZp!yLXRoZ=EfF@o&Fce$OA>UjAGE-uCkM9$zfS2Xw_Pxi)R~ z$&RqRtBjdz5IpQANDXe2g&R*NaUFMuLtsGDzMKhjd{0Bol*oC6mV@5Pr?Nhn_xFuS ztC*~Mhi9|S_UVh`7?q>t7oJ`$|MS28x#iFP{V$}MV;Mu5GQNvhlRo_DqE#bH{hEQSZ4MsoI52cnMew{YdqPG$G2!V3n(!jQ!pj#jq6wbqz0x|P ze|zZ>r07L>Lxwr}mNFxk-h3&GHS?ToS`E|0L%7zi8z*P^XY_NMX~uGcB0Ki>^i%y4EJjNEuX z{O$F1?X{r`eQ;E03r36M&^I!LrmZT5Uez1@wxyl&Rj}f^2F&S4zpXgNSZd3B2X?3% zyaH>9w%G*x)rTDOXJfC)K>bFx>St~>|9ZKn3g zNsM!pdxfFGS;UmKqqJ#5R%O{h6MLK|q?84+V`xXH=UH`&W84;Bi_Y{V{%|E=oBqq7 zT;INOKe&z05?JTznA85u>;#iH_}m6&<2hke=raa@QU8Rdj5*;#FVUSgI&n$fONk75IHKI~M6kZ4l2H)&tr}QLe1Tr!bN6XoI z=|i2e%1q=&wwX&Sq%9-H2s8r=e2kQ3Ze3%{OIBYw3tgtIWR_}B9<@4Y9B98qBmf!x*{`m5JKlp{f1Gm7LcIq(06MPx{@abIm zTcnO)wEr0ya?k>OOe|EF{iNAKXY#*HF zT)1hnRunits#E9<-x=F8PS2r}Pnk#@{+^)?hAoaD_j`0zKei17^Oql;XqfYwpE9pc zxXtdG!(^OF^&H;>Ncx(p$w}z_BF^>V;C%UGf9JEyzx%asTkfAp-`|8!J*agE^cm}VL*=Bv19RN(+e377q@x-HYEZ3>POrkRFx|VDjGUGUW zuMtki@TC9jA-cL;l9W3uN0qzk#n@I|n*=msRIlLCPmS zc5FMmh{hB?yw)xqs%_494s|W}vWsIkCW!BRkhC$2;J|a_v$j3`3dF&q-AG{h(M&bA za4K3;WyD*yhrP0~VXMtu_XeM_&q~l*19tzL*`un?MzhJ5|) z8jf4S;yWHp#fNR!JPDJV`|_JEKz84L1x2a~H&-6##Z!(mwle77-^!*1DV|!mU*5iX z+W@t6P5*e^*G)%u^@)!3(9>Pk1Yu*)jx2yjAng%8iKJ`U6?34x^r{I|@kd88rm7IQ zqc1ANbiy`|6^@;U8=N|%nf3*(Y^6T6Pftz0(@#W#!fq^62+Z*Yt`L|kVy<$t4I*e}F z78D^1zTP1(leyDh@!lBBz|D~ybFPP8=pxHmAIXT2fC(&VH-J@;RV*kng9Zr`KL{WL zgI+ED2S=5JI_wxSisU$Y;X8weM(86`6Lj>cf%K)c`sQsEu&9ev$0(qWHSm?!7DC^f`N4e*8CmdHEgxl^`o73eoP07lvS_@z>t;lr*badh zU51-+^oM*Fc0(CE<9x<2=6-kuE_wiahR=7ek~PlDHh7l5Mlt}waT|{V+jW9Gv%cT| z^asm9t|N1OcHQh~I-Y(I)nmQr;C%{{iJ~1kCz}or*q2uOqOYFUzq))A`7+EH@s4cK z%~lNj31$NahlDr&j5~8U%KrzkHJ$8MLoM-8CE>UuH@WWqAqhfk$OH&uuUyY)%WT8O zItgtXJEj{=5&|Q!!;{tFt`&$rV(8wq(|{!HdpZT4Yz@aqFTf$#Y1JjWDqA1G^V+9nLN1r+?*kbm{LpE6$J3Pqjk3`0JB1V{h0h$w;4$ zO5=0{S=Z!)eL$1i3V}!N#b;S{E^b>5fE~~MZ9jrR>jXpKH)N?Q)DD7!<7D(k01%qv zgXOQ!Uz~?du|Wv}wnCHDzA^TR3<;tUoTVR1f9}6uU~1u1bZ@ z2(8f?fwU+HL+!Jfm#)*Xi93jvHfszDQVukm&py!|Cy4DLtQqGnRqT|I7N#$XK@bTO zf~|rvOY$XxP5TzJX|&IAQJCC1!X{FC*9c06V|Jl&cdzTz4`s1&G#+;lF;Y;OJ7!s& z0lwPlqAVdIXVOS7zeU3Vr<;B$O-of9yoydQ2rT)a6rLT&plC)Nq)Bt4Sxg8(y-uG5 zhJoO?T`q!`A!u(cd!czBF^f>!I6srP%8Sqpz0i)KM2}`U`OP>2kr5*Z+RWjcj}jjq z4}56{U%cnsqAx8~ijIS_1r-RrN7C-^7@1^lfv2;p&8LWgC(x8v+s)e3AzofzTrY1w zx~P#u_X5%ltSd3|FZ3xq3@WFcP!Wl)BJi(YUpBxR<2c3~4bcuAjirD1k+-}}wo-8O z9~ks|gds z&70+4{c~ye_-sj>L0oD;;WqL&M$1G(nbNeq!ZdLDkAB+F1+C}1HU?H%SxftPmo}93 z_Y=?vC?uDGta{FN3l8l1;ehElBT^X19bHbFR!iKA?pag*`21w~OM72l{`AvVmxt{S z5{O6x5}Tdi^pb3my-mqdpTTZ;oNJ(TnE?Gd4vkTJaei7D zx6uZC_an_tDr$YAes&gLl+4(EEkL_bKo}V9<zqV{_;D*0o%g_UQA-UsDaHzhQylp|&40bj*ZC57^36hUEW^xca?3w&| zyJ6@bhb&M$JAct98V*maZQ=b8X*0XW&r}!2#|8H2vPFK#TNk`(kjVK?pn7(8R{22p z{avs4#ZQZJUB-^RZZLx$9ZHV8^TX&^3gjNJcgQ2{SJu)t+vYns^yMB^kZI?!`kfoH zCrAyxp$mi6-z8j2IcXg8KFK0Pp&=q87^IZqt=2=)4|EWtvlWGF2`s-mV=_!r2FJ*D zkbXjubeLP3%}5$gdw9skJh z3Tq8t=olOtC;-G3oC$^JE=8&O0Hvl6xN**HG@ z87>umImTKD=B$`slJoY2{6MyqD}EH1MLBD%;^v?qxLN1kR3<c<^ zf989a@A{E1FE2lMl|Dck`qc4oMgl|_2*~XC7kYl&Uz9AT^AY*av7ED^N99`Zl6@*E zb3GmzV^k?}U0hABjq%z(T>k!ths*E&L*H2b*MH$)IZp=6q2i!`%6ya6sh1Cq$)M?2 zuA5!&V+ed58pk>77ffK-TvvyUR@%vadbqnBhp&fqt{ndCu7j1abZv52HR|bSKc9@I zAfuI}T!^=YF~Nd&&>to9`71}?zWn^jSC^y1^T_LcXAr~ZEIQhIV^1zGIq?Kz(Jjy8 z8+YfN1wa;A!UsA-hd4pb(L9@r1nFevEKUV%sx;*2Av#VE1pDNtuq9wKBv&VpjSNXB ztjAzugy;3X$zB5sdXaa1yGGx1-wuAxj^piEDgBGB^D>aBmTi)k(9f7Gx|f1c1Wk6r zV;n#fx3WStO)&P(1?|;V6ES(2TsPFX}|r3-+21;WCAUE#A$|39C9lLIY%dg1S(}`cUt=M674Ns0Z110Gh^(@(2EX}keR z?o%C6dEy(!53y@g&9MAB-nthJ;D3|$$qMO1-*UqN5toQYWW<4WDcYPPAe4o%j!w@_ z0LdrkUggi;C_kN%aR#SZahzfAO?Qs@wACSLU&$wjd~dlu%x4TwPmbfpE$jlxI~H34g2ls$qT z8vjz-!EFrD8trkKXjv65ev!iAWh3!{sc(sUnCE7VeR_#Zl+zR0>)ey?X`~Yv69mvTP6tg|aV))&JuuLjwAny8 zHjS-tuIDaAVM<5?&4XcovlUN!an`FAFOP1D_V&^3PP7UWyoCrIG}4SR&Pm3qK}th_%BXxv zpEU@^X>zE*Rq(n+d0W0o78pVyR@b(U>LVZpn}~CwY+NFwe(T4Z$xaRk5M+t}#Hu-0qFDZ(WRbd4O z_+V^lYcpPzdjku&7};h{WH;wUeBA)ib0Xd-F9D)hc$DL2*sD<7-oA?=%bw`*+LH%4 zW*BAyG)q@TcnMXEk#YBvWMd~$mNf#U@W!ZRYsSJn->LkXUSbqtAu z@+yiP`oI6pr^|Q#=ogkB_^EGMzWIATzicHMW?e0a!-)0W-Dw+qi0FQ@Jw^(@#?jYd zuR9ITXKBAV_e(dw#}K$byxx|%3w$YQ=&*IT{D1zx`^)eALpRI+<7@9FnDr`1ikarp zl)%s5^oxH^k9^3W*K5E#!RI&zU{pQ|U%tb`XHNLu(z?4u_v9rVl1=SsU!TqVIq~8E z-#GTPkydk^D~ft(lxxFp>jmt}=mj4+hIZuBdSBPe0BS&$zv1%dpTBFlczSO+iW56M zi)?gE*CfcxI?>ZOI28fG2Yv+FW;tF|)c3tdPiy#tmkjChI`S8qoSU=45CtM{KjR=| z|L!qq>gx4nWHq`P9H7Wh&J6G9R+%j@c)1`VQ1KEWaAVoOyg*d!arjTXw% zs)nwe`AYekwpA&uF<_XKQ|k*+@;7=CdlG&MV8G}}7dFijun+e)fkl};^a?HeWQ6&;bgoN;e95&n4Zz5b8!8|Hegi7-1mj}6~d|NpY1PU4r-C%UXC+#+a zb+8$Rh)_1DbP@DXR-GG-@28>(kb3 zJcJox3J}@kG99PPy#&y#3T;hbI*Y=0Qk_61-2~ghfoGPIZEzHJFmy7HuKORQtCN9m zj>Exmj=GOP;ql3F9Q`~cz;NRycq{?$y*UB&&c8ELiHiP$t>ei zMGt=d);?VvZWkZfyErF|i{;H5hST`- zWNd3?hb@v7mWF9p-+kgF*IXt*zm6UYYK)hssnQ3{-Mc>ac^T(U=Q$3r3Q_`fc|k_V z7B@>rUc5R6V zYbzbc)`d8A(q!BE0`#0NR94czpc0>E_%i$nZgiCqSNdnv;XuX?jZvq4=U00SFYwE{ z>ri6&$iX#0*pzYPoL=rj#{@3rak5bD(n*d z^r8CF{zA(%6Pl`E2>2ugWFZ|F%+a;cAlJYlzx0{5Y8P@wK;%gVK@ip+Cy?PY4iCNd zct3ce6`fH@F)x6#51peM*bGU_oawm6qyQpN_HX5tk=UKvy++9Krh zVh6&d0mMV7)tKf$ertG~PTyK45{xoI#N#+-Jd1K%N~u<3TY#S;|uEO4Uue9F`#h6++Y~SAf(R`o^c*ChSG8GkOd9EkBOcBjZxz7y2}V zAOux_7={oHRIwm29>a7(f_?%wbZ%)bJPtL>_ZsCWQy*bWzs6+GkN1|B$+kUqCOVB| ze9Tj*$yo4QTNb$Rl!Tj0x6%=vhyN|r#ZYbqXBqy{=}8?iC%{ml6MoyOsCQFJl=*1W zY|=g}Z9-OzQYU1^;24+1Bd=QI;6v@@>MlGucwBz?*L~~qYk%txE}#C=%g};Ba39#h z>A+UGtwFMr@AcToD&jg9;`%S!@DyA=$0*FP@;V#b9cPRL%QbA6M7F|@w7mSu|M&Cq z&-{_Q<>xPslD#ruQDzPymvO;x#ud-1*U%IO8#&X$TxX^`=dF}3*mfU{s=kJ1#HqYa z`vOm932*gt6n>mV$4+wmY=3(>jd&iVos)d$+{v2^Ub)_0TsMDdrW-xr$elx>2|yXr z(ev7he1A$Ge{b)bm#xG17f(}MU*9cnFK_$&Io+u2rf=C5S$YibWm4u)09N}xJ>j7g zs+4T8r3NIqSD(*pLSRN6%dbu*$wtIs2M%=eTU%Z;Z89y}K!*C-+q6aQR5n_<$e(dp z#*;0;Uq;i+_YT zRZuugU`hvD1(wNghEedURh13=hE2)O1S{sPO}U?~A$Ka1bgK3_4VbLwMly!J0+9Uc zFo7Y1T-vJOeq`(B=CVuMERmJqJ0`#7h+I^I`Ka?eq z z93!}zHbduOJxOlat;QtqJ8n{lu~u?`_a->@F(m;&F1U%|67a1<8;t&k-*~g!#F#b! zi(Cw!W$TnMFhr=)!BnfWFB~mD`E%d5{MfJij^*$)_$P>y0ZoQ7crXT&na>7)`#MQV zzj#xgAyQ!P`3++0=kR}=>Uzw!$_3cr;!Moi@~8fz?d4zn5093+lTRcsH?&&K{bLtbQFyI2T@ZK@^o<@MXU<>Jc3It%ltqR=_P&uny-v)_-LC}Wa83}Lt!S`|dPQ8^2|b8qer$Yz{- zC%GX!LG@7_Ndxqpo6>3ezA&ym4w;P!xXmiN#A@Vt-pLT&uZBN#avqOlopg#mGS;gj z^~ugOAj=;eX4@v?9a7r+7O@1$=AF1U%~$RZ56nOxhcZFoJneHCTFuiSoL6)!?KRK{ zLJ|(`o1}>TQvnHV336AtY~T5BoJX!f&(Ic(oQ$p2LxEk+c?I(%Amf01qAMP;e}Yaz zK>r0#XXoe9XFMEROAlR7&k)wJ!V=jiU!z-`9safgk^Z$!KefS^`0G;zxu3IB4j0^M zdy+CfBz`8}`KgihP%H$-gTl-wHK>+R5B+*1=fL4!)eE2QC#$7T;H!N=#a!z?PZC|e zohN$&ZQ8adQFORf>Nx)N+iL74DaBiI!+yboT|xux-`(FP;Ml-RCxmFtb+1ZR1ooH+uC1ZFyvi*R?v3J4=@DQt>M^u8*TF3Y{ae5K zx#idY+%H?cVyrzEa8G^SJFr)A;gRFL*V%k=eE8}hqd5Cq19$h0|EOk-g*I=?hO+1FN5P#mfe!xumj=-e5i6TYmoWQ_Jgz^JMVzf|Jhvl>sK8 z_tK_7n7#(KI`wSDBfHXTXD2tqj{Zlsn?=PA47~*w7MZ$?llIEaz2*G<7ky@bYz?RS zuH{taQWKrXIvwjHvdMzF;$6<)+$1{=eDt03)hFXihRovk&U|GQ2+9WXtwKpb*p`A*>tfZD; zS1^*c$SJ2qKG2_iWm~JCxo+eb>o&n!=oJ)e7iY*%1$1zdd4lOkNjYs=>wqm#CAH!A zcI?J(&TH3%vl;IvSJR)Dp?I3X`MxYA=Q~{J`oPOZ)+y)SI_n%`Kg}nwag^NxGrI`x znPeAIVxD9w*R!o?2ddiNTu`4AxoBlHcuk-&I>Jv2{@6DakkR!DY(E7!)y<8)pTIJ< zIz6y+oHm)0^mXBzG}uS9PfjZPmgTjCEdA9sygVJkt~YZ}zmgaZ+sNR+kBG!eZ!vCnTWIB*q=Vym_P1VAKNm=KvN(pEcq_t z=hzQJBnC$2UgZt#C?aAoZgzUv@qlz1`gOyQ{1?I?nh0aEp)m-#g%UUs%C%&eEJBH= znQ)j@cjn{(q6^13c za;Lyz`fu4ul_J04utvWHG<%H&hQn8?gF-5@b*61_BCLX<(mm~o7?rSEO3eNG z(s#$}a?CYoDr4hpax(YmYhdc@S3?(lP9UQ+nB(Y0K5|VJWCNGCaWNEK0WHRHjcBhf zFT;nMwt12ol_kA+9WKyB?5BwwFx%{f1{@&&N z4^B5c7XXId!KY3jaBfEiCVR)bQ6M;zj~?6XJ)R4i@$`DMvoVeoe4P65Gmd;4?#AF^ z(3YG}-{jY;UcJNpKm3bV%YXfUf4}_YZ|p4J`0z~}%FNbnupFD|B^RC-_dfH!#x-)h zA3TkdAA}EGYBr8_eOzvpk1A4jFw>>(fq-o20DnaEHKaLs8=bJY5@# o{nZaq4OA zH2oc?pOF-f9skpmKInY@@LTo3uD7`$`kg-LIQ`{d-Pyp!J-POy-51N>zWZ$YkKqAZ zt|Kq=RyJ~3kc-!3c%yed4b9vnASAnI=O+m?Rrx|2_5w|^%c!hxp`F9CF?yqBpr%$~Y&uGw7}Ms|(4+Z@-f;XJVWUVfG* z?!ga33KQg%&l@?G#Bp4%O)~>?WUTZN6y%z*g`sbF=X-e{KG6yMozOp9+C?W0R3gSX zs;UU!WV+-bjW1^}UiO%+GMT>g*9C3-4y^1LI&<#!;vC&XE5mf=BzTNju+csX|9r3S zaDV6k&)UhrgqJJsC}eoAx{F>86qDePcKEl_%J1}1f0o~_*@`avPM12@Blnu5d}FPU z^xx`5w%(s}a3vR4S67|m&~s^rZ8EIW=!$^mp8!BfogFkWz3Us~-09hQXlUL1)erH9 zp+RU86YWhenCM7X^+ z%TkGIjEgAg5tb0zh{8BEgyPH)o&d^Vjl0k4JAJQqZfmKFT;vW4H)C6411(sA)viDVpaC7?GiRd5Nh`{Qf~^^S5`a1>q2@+%;Lonmq- zILPTRL|~8+P#EP{5u_l&UR)v&wxO8_Uwg`PQI=#(GO@^c`U>4cPfp)`3{wTl@!W@j zQ?5*)B$IzB@5KKLUcR%!Nziav7U-OmDcHWA! z-Puvbl6BybpeQuiIOT3g)kqbk?C1w}3WNUe8OIK``J+%Hl>J^K;ku2J-vZNdoOJh-A+^hs&m<6(X2N`jd~G>?O~)_xZj%3i(W*z;Xw z%i0rxs1A1m;GwyyjrFu=9!w60IXw^79&o*$b3M9*f3aoD30|jm@u6Oh#rYg0 zfrw$lSF%WVv`0YGOkrmWl*~Cu6@-^B!{1$UpYy>%l?T5#MAxtj&AgN88XLzSfui?g zJk{oS@`8+cK~3yVWD@Vmi%&KJ+}eoas*&PD1o)bL8%bV;=ajm&2PJ2)%%>0c?HE`#mxNJ2-3;T@wDJM{l2 z>(7II>(cw6Z{6|C=iYbdNm93F$(n4-vMgg8V?!WRzyZfV3aX$&xhk#-1r-t?F$o1E z;39)5AXFSD6{!3d{&1$M3`_}_5QK2-SeC4oy45Z9^mf0)9nO5tJ@?#vKi}u~dtV9m zx%>WpzrEM+tY?0nwf5Q^dhLj0;TBIaWOM`r?@?N>|J_S;nudx{^_y{Jv|8_Va21$R zGnB1PDqjb7uQmqjO=!o~;1#RI@rm(n4K;dnS}%PYdjKi#fYXX|tN77C8+~FSybJb> zvTcgEHMHFkkG`O~v7{hOS&wGh^rq?i$2;pf_#E8a^)W%XwLsJhyFBlY5Yqnig)A{$ zsSl2dsiL!!7t1@`@hG^|KRZwGlYo}J9o%o8D#`fzGOgfs`$!|iuw(40#gJ@^@8j&q zf}~#X76U)Xfcy$SrmW`{HI?5r>{TE>c~nuDAvB^(zhFl7w~==dj4BySQO&3g$}>0- zxs5MouI*@lKZ;G%9H0nQ+8+RiP@OUe;mk#0FzJxlFeJ`J5F&kt8OEUU{t`tq?h(4C zWj2g}iS#|GpYgh?h$67_hzdfE*N)A)y>6Hek*jDzY1bey7fLV(#^QAoX3W6A5)|!h zg-rdehe?TH$~qk>k8q+{IAXNMGbg0dtLnobbc!w#;RYsk>RkrM7*rpGd<1r<(G=xY zW((mx3h0?|nTv@uhK`Y~PLo&!Bf6IQw8+txuAa?BvC=XEgprM-gLJ`1#)ApxDF%iy z{6fLj=;ILr6PQHtIaLEA+~GOjF|3I8UhTqO{{hjX<&FA_ebc_V$8~$UoH=h+u zqa~!qo?ku#T22a8JB+zD#z~-HGvdh4{`%!Fzghpxzx8JQ;zx(;C_K)Q9NKOSNck`F zgctRZzhJyP>)!Oqpp0DAG1Peve1!->1Ris#u+4o&0qwNeW^(x56lQrTvKv{bV##%~ zSX!sACvi5+#Xn5@u_J{=4`}9?C@EiZ79Tm?*RCdfRtZL)KE~IByLI{Sa9uq5Y!r0l zBQ!N;i!(XRQ!dxQk#Uj_hqkT~CEdiBFz!!|4r7!B1QSug%hg4qp6C?dc?_@4MU4;n zk<|)Z`80NTx99vpj63+zxpTfL#psB-9HR>uIQ$YCls7%UZ#8%v)U>J6$T)+PAGt>- zadi61>)@CouHnr=MmYy?LvF}vx0_8AtDQQKX_kx&LbO$y7^m*iNB8rn6M`;-BQn&i zN$$_t%jGTgIU9gwfEiK_LUp*HCWeR+F1@sok`1u(S^h-(;xP2deGbt?y2gVqWoG02 z&;h}1b#iy(8(a@w48dA}^sUX%7B87LK{Iu_NcK(c1vY(5Tl97KgL9HbhVRCCs_xTv zPIzeGrK6rR_D*?$p?`v?;54|KCv&9DwUH)L-+0u8Q9FQw*Kf2nEmXS{zJoh9#15bg zH5n9yG`k@v=pUZEh!KxYyVEvDhGQ9h5uW0dcG`DS8vMp7vS-&OL!r;fA^;99=ZP=Q z9^B9c=`Urj;}94>R>AvtVQ+UY_c%sza?4!lF1W8i*XX4|DS8AhB2xmvCrvLkZq937 z4OmoUpcuhGI>c1Tl??+7K`3jU4(6r&j}pBc9oVB+w32qyAHmu_YuVS-W_-ILxE-+e z6A^an!u0r;xKQ0GQH zYvXzL>@+yM7X=y`r=I@3VB_Vl_?7n#&M0+8IoD@Cad57^zRouF(ojBAN4*W-Zw>tREdOYu4g#UhNt#BA za^7`2)qnB-_-g&zzj3gRpB!kmWy-5IkHn$_`rRNAbX{NbQBM z5d$B%jTz9iM?Plmp2I@bsGlmeG2ywkH}ZKM`id@_0u}9c42cx-`w`iROe3w9x9#iG zBRCi&U9_JXlJaRTr0pEp4Df|^;a6!-tTd2H<1!mY#JFUr)xGc~08mGHv~f>44vdkK z_qh#5Sr^WPfZ)7qTEtO2@PS@^ z#yvOdphO>_Q!^}~FM};eQ(pUwwgr*!Rh$Mt2!a;pXD93V^B3!*=O=Y=I9sNUDj)i- zP11H&zkF43of5+vy~%AfFhq(w^X^5VP**m^OD z$hhCwH4DLwCy18r;&GReLNG_YF+!Gc&bUUYdfu`GNxNNWk;AG-4vOfX_Ud-L>Cd>x zD@G1pO8{AfaVQcM<%Fj)X3LK^4pum9{frX>sk6;mP%fqD{0n0Se<^3fg>HV3c2l*7 z&-u*=2R~_}o?))TeHRjfF=d_lZ2!SapO;MA=9WBmVHNhy<9QoWGai&y{w7-Ty!tqA z@H}*hhnYH*0eb0gYpgJwRk_O7^Gta{$+PU1z@Y;{12510>SE@U7zFs`guQri5g6cV z8-;i#IZ8Y9Q2+74{q=Ky^oQ5ex1w<3T#VtwM{v0L9s^qbV1Ub~8^kg0v|h_A%h0Aw zc?P$`z3+3+HEotY{jIEa8Td^GpnvY|tc&y4>tFb{PS(HuwSy={Q9j*G@CQF|Gi{}6 z36nEN6=lkx6F!i>_j+DmX`1B9D28*eOE&`M|J29$^7o2Ugl4K0b9dE{?5fIjJrM?>ye?`1^|4{LwFUQc}5d{u&7uz^#m*uE;cNV&?PdW| z$Mk65Uy^HGlMo(3Lh{tH*WmsN}z8?n(jV2S^snWDZCwLR1dRs?{jWC6xT{JeW zU@K(1%^3%JbvieAGwo{t?1unSW?I`8&{>aU?H7Aw#GDv?!wudGrAh;I9$hvvV!U0i z72MCu)ZlOMS!9L(io3K822+08@{|($H_mzB61~}RijYgwyIw_MBxh zW;7HelwgcP>a;y~0$Ar}UIigx76YPq4&wHUn%)rEYJMjUKsTES zaztX%UpLoQ)JyTlNQdckNbqkx=0U+kd5lq=Ib17Gn@;fuamJ&+e5n$wmpG9=27z$n zHWyQ*OF2~mq`dK`kIs{Yv5ZSUvq+C&417T}iH%B6`!OCgM9MIL6?(V|0~8+|j7Fs* z=O_u~wshXyo?ms|*?il0{fwhwzgefb|M2k9disU8*V(HGK`_00?|SXs z-#lAg1{@rjdoFYLWT0|JdDPnElb};3@Uk`NuKTUdMEAetUU?|r#{}lH6l&QyZ`Z%} z|F~R#{x2P_`@2@t4$YO3wd&)Scq@JQEY5P1p(Z~qR~|8p2rj-e!Uv4@l*i+B4)IYc zC<})(=K1aHDq&)pI@*myr4xW;+OCJ2=VZ4I>KGp*rY2e8BZgS;I zI`JoK)H%!)ly)|r?b(#!&X2m${4d4_Ya?4 zq?^h1Ikt*DHPe1z%U8<0k10h4zyX)@=;rY7b@bImbbxvu!)2r%KT3P4Dz=vLf2j~{TUf%@AudmmG(3A6Im%Ki5NS6iYX(A)ZFfdpC8G~oh8F$~wGTlD1 za=!+(wk3Q3FK}t9Mdj-0G`4-on3oV~TN|bs;h0m?04H?y<~l}J&;w>`BpB1${$8pS9$H)iX$Q*gs&;HS29hn`@e02ca%v!V2q3o))aCkeo zwGhvyLFMttOOOJ7K`h$^r<@W`lnAIo!FUKk$Zk68V``K&x(10eSaTv1VUEXSjs=t8 zm$Fq3b(BxV929#y4sf@rQ%^08S}}hZLFCy`vYu0}BcPp>eHDPr9jgma&ti9D2*$t8 zf<-9O^6@D5Exa-xG4t!)jjxY46ofJ^s5hbE46GqJloBUI7HxM7`Unhyg?tUb{$@ zG6Waq)K(cg<(=yFd%B@u4v%N0x^^(F%|zhiE)np}O_(|i?A4kT>I?!6OSb&xnOkh- zUGHc@slfe~_TaqnoMv#_Z9anlu=1+iNAJGwHDfFDZ>Cq=m(~NTad13dJb%$oOe^yG z;Qi-ODxqb^ZTycS&^PO&k6sqpraK4#=S37z<`2Ga-aTHw@P!{-fBLW9u0Q>Yx9gw# zl~?QE_{!P(OCMgW7vY_IVfy{hckkidir+$Iwmr(X5q-w(0LRv7J6xuGlRqmX^WQ&N|N74d58g(VNTBppBA@sK&L~|nu{}zce74&l4~|{K zU!2n#!Q6D+C!Q{j%jly_+kdO$YjoxibU^*?+IaG5M;kG)7vT%Wo@4*`u_tB*)<&k< z$i3+Yd{LjRKQi7k{cmvzSNa%POW*m~I=NTpt>X{8;kVy-{qB14`e|uP`RN27qwiIW zOw*|`79V|dvQA^v=z(s>YTV=?_v?Iy=ZqIxJdHt5fyp};C_s3MIIi=HiA>4;+wVM$ zBWo8$?agDEQsSl?t5*UmC0sq$G0#`HOIhR6JtRZ%ZLKCVM1 z(^^JESJRL5AH8Mk;L`>xWX*!sd#Rr$*RzkFuV>FbD(}%}+Sz82X5OEHjG@#|`g0a{ zIpBl?wzUC*2|*6JI0|RwaU&ZW$I5fz=Pys98z$0~LhqwxX%A296N1Mf3*Y3kyc*ui z-SVrx;Do_b+^3hxfK6aWPot?wTixMh&H?;s9{m{t0T^E6K-!6pEo4WmS+XGc;d=6v zBOl(Y9hpFpjW8DQ<24JwwO!o`#*AZLKs96TV#2@yXJB*DU%%yrv-}<0C|w&oI$W9GZCdHKI8&mXwmRpD=fX zD)rqvI*Kugw+lz^Ve;aGa!UkffyyTEzzO+SnyxA!RC^!>n3kVpuGrw6p1_ar!c60UqgW_SU* zYtYkoaT8w2sB#q#O&jyM*1!8#F4w>O|4P}tx7P8l$V$W(+~n+wVmAGdErvuF_zjonmL+*=yo}Y!+ z<81H6(V`F1^YDo_QnHb}--EIK9(|FUy4WZs&g4yaP=A~RO0CK}_XA86WE_|K`|IVK zCsB~QM%1hoV{N0A$<*9mCui5|!|;fV9lb#|0S_;9O4H9Nw{?{9tcXa8SkxjyI(>Pu zzV)pS*6G<*;JCk@J~^tadfd|}7#(x-0tYyylRR5b?`&5v!9104*5p_;HN>5dH zrGF7L2N1gbh8Q}eJ0xyoqw7`hKf)>>)7$9fBYLI~3uH9K%5FB@1Q2H}&Pq1)sIz<_MT)m&Z>{D?O z$3ui`)wy~4zWgTR!$&$BeP+szj*`I~eUm{d(9gnBmcUQya8$rE)3J5>gN(i1!_w85 zsuQB0$eWB(rvy{JnF?b~TU*`j+VCK5N+01#@Z*)P@sReWbHnEx0?*+0B}?j}RocTJ zu^1hXwHl&htJ%9lr>C@{qxW_X28N94P2lmqbuUnQ^7MGU^XX5o-GhU!apAPm|8*kL zFFgw~_&kpu^j(IAeL$<%v1@oY11YYs2pUvoa@5wz8sv8!+wwpL9BGuDu@awN-~`aO z+SYC}OZadnz(|EcKIkiC8GJW=Ng>Jzam;n_8XSI)LMf%&5Tuv+Q>NZXJ}sz25YMf4 zL0t%^eP&=%EQf@<56%vAo8YYV{UKExt{yqU{ek>IDe z4qOzLtbqE`!RUlH>>i;THm4ds( z=IhtL@V)CR$8QIyyLA)+d3w0N-U$tk_7B(T!-MtL?vB@AxIA3{%zKa4Km2PC)<5t| zm+OD{#pmna{??22VWPpE`&VoKew3qyTwvYco1;N!wj%MR}H{a1j$?{&l-{rb%*D-1qz1{RWh4OHnU)`)r&TDXcRlMW)4V-BnKMqg! zYUNZa9S*zDCq2=%eQ^~%0cNn(5HWP>#}Q81-@N(Uy1IGTg-8r&Ukno-+Y3F@>3R{K zIla8?dZ6PNTs&%svT_Q@A~J^bi;Z$@!1KxzArxiq<45UPk$d+QcsP2jE2#q?=lxoy zD`TdM9F0EXi0PMgZzy? z84_!Ulj52P`use)EElU&;_#BaDY^ZBj=4S#RP~Hs`Ol|uat3PPqLXA9Z(^M30UhGW zPQL^un@Bx>{=9|=j|Q@i6-@AiW}H-p@+46;9lh0S)g>8U$DE^!lRJkznGkdD06l+`$?|J+Tr~v2LrMgfVAq;~l!=t8j7a(xVoEQ^HskdbyALa1K}Y zQ5apEIvm`gDGBuFt}Qfp2@XGU0oPh}nuMy}_U-jUUvosA?^E1q|i;2iZO0tOzX+zxA*ZL93JQMpeqJ>xvw+0I4SjH zT=Qtz>C3BlpYz5a(So)eO)HCHdFMgqy=XAEnqtDR<^$u&LmZ^c;yQ4vKXb+Ny2*?) z73=kdzzBwUjjx6`W0IauGbKLUq*#~(4)Zf@5|r?l2djHc~4?Sy3yYjZtPfebMBva*SF~cnl=a+oAnb3<$3bdVB5jL!31nm&hsb9KSfA)(Xu3vcn zqc{)e>+r#CqWZM;W*mutzo7!5aZ+3n9E8X}*P@2KG(5dS_-tx?R=W|o0{iGaz zNwaYx;^7nOTsIaxGhrM)L$ev9->2b+vxxL10}y%jmcGNiIHLKBDyzaNUy7ddfw#$# ze&9qzznk&JwpP&=CdPmc2&T}kT%v6HW5hH9w)bBjuHSfJ-3VOSz0Ipg1Ak!fE_nNV zQ`Y#@c#Ww}IB?Sc&H2T;czIf#VeLqoPy1;;@GvZ*qfYPa?8aEk{3he>V07$S#|Dtc z?c=8uYc4dU*|j}!&^R5&#(DRaI^~T^BS3i0VeIwa{EB!`(jjb@2{*m|K zjA5Yg!9rg@2tB>@sRl0hBe;Q?jzD?FlKh5`gUdJhZqJ6x%hTwVleXVQxT(WQ!gqL1 zoL3x1_8*3x=uMAxS~Yf|bJj85R1E#xOhGf3dQAU;>GkXAdE-c%HRSnZ6t9E(ajH0z zgIm~z1AE2e&pMeKc>oWc($VC1_Yd(&j&je~=*3c~wk|wt>Lqf(-q}60k#+iGbF7yz z9%GQj;4sn0c=Gtk(K}(rf*V)AsRynfxIWFxQWWj>-ld~C`4h0$aobdu>Cho#ST?L$|aj+O$eH|R~-VPIG*aeK@n@t!zo6{L)-lT?thIO9E%(RD$h!=|NAH<1^p*-_QT$F(gcD%TO(+R(}`+XE|`ehX7 zH$L_5Itk+zsI=b|)A*0!1xv1VheLu9ydQ^G-VN`4D$)IIjQxM~W`F&Q9~`bf`K1Tz zAN`f<^}qbmi}kC|p2Y#YS$p@RxZ;%?V$ae9myQ9Nd3W=b3_2tglR2-~&;RoI`U`(8 zg7|R1Mh}xQ(#9Z_T{>^ttr5o~`7{i?S>=>#jJdTiUBp)2b0#v(xNHI?)3%%Mk5bNB z7LJ)I7W~vmE_l~y+e|xE()o^;^RK*<|MHtjVA#Ve(O{P0+yu6Z(EphzJ~w4F@Q!vz zxrHlI1~27j+io@?DB#Fc{?I+2vVK79yi3#LS*lq%zHeE_FLzm@Nt4!jOe6tp zgO7e7_Z}Nl-dMWECos+t$WmW-|BWjSNaC$Jk^78GZuO(!&3uW_LBkWD|$1| zF(?mPwpoF~i{9k*E;2}_IQ);QKNul;Rry)JWDMcO$QegUrsd$s!sj;$_@s_4B=cmB zK0=xNuA>oP^s6t920q=+TW{Z%1`nnO=4l}M81IeVuAVP12mc%Tlg4fk6}aHnaI2eP zg8<|Lo$;!;4sK)1thbo@!HG;;zSlOSxam&XMR3zyInBZ6yR3k=5BB%g;qg%&fv#%` z{I9~dGE2%GJUNVhvq*1@Fj&ct;Kwd=Z@=|+*O+la1wQzd%)pEHE?Ak*A=uI!&e%ik z$r6N0K{Ng}J_QD^_-nh1Af60v#}KxWT(XDa3 zP9)=f%c5JUQ%cPuyu)M9RmX7%ocygWxQJuYD4mxVN9-zN^IcCOYiU!T-AE!#yt5l` zb|;PqJOux%YwG&nfkFP=Mnl6FOe=IbsK zX&E2odKE|QJjy789w)>8&7ntk0nGSJpB%)ovtFV7%H+h60r#^Q{g2*#dw7At4TID% zNn6dXl)+>~0#i<+X{?V^b}#T9hNnLjZ}_dlC+qCsQ|teJ_wM>9-rHY)&#$~$|J)ZZ z)?fVA#rn?6v$Yd??h~l=*)S05H~P%@nh;6DL!>i-!}4+ts=8t(BhgtQ?cdfA-cgk z6*Q+$C;SN}$nnr%jxn?h=ho>)YipD{j=*Kl z5O}*EShU;hOvZFu!=7sy8$O|mM=rEE&ZeV=65MIal{&)4l^=Aq>?ysWZ@30WgS1yN z#cZ0X+qp*v3b@FS0QS0R_H>;4%}%E+xBwIWgKIE7eR`C>Wt|Ds$UFGxfI5jVp8j_e z$=8nP4_+b^d>gC(`xHa!&BEuvWW4#UfGwm?Upy}@W?nz?kan)@R}kmZxcds;*d=RD zIIL;B?Q5%xRdYsKay@`-=ZX*-fNmmopKZ(l#~5WWIT(|muM1iTgGR>qQvzu)=i?~V zE@}w^c!1mcb7p`6(wwn_m(3&c2t(%(GGKG=qF(iQk)lHptv+o-Yf+!~sCOY(%3zKy z^MjLN;UQZU%&=p``xrKiLH-CRVI8MrBKh&^M=8`92&kNunft*M!<kS9@qrBkHIs*6)!$on#_P5+oyh4#XWrk7B zyIXlHEkE<^=0GDTiAt#&4atDi*r#pt!29++@4Wl0ZfD#nGYkaZR`i0+rWWQR`-H%j z_CV8YzG#o~-HDgF6T>VT-#@Sy{0Q0UX^eS{hD;DKyzagU8FPezv0->PFXwRr^leMa zZeoJT(+{P~KwZa4y7}B^)>RBN#RL}khbFk`R;KRDej%Ly&pqx5c<6v z9DercyX&Lf?^^%n^}Fj&ee-brBfoZk{h41qUH`Y=d@s((`8o=&9;X>jU!Ol7HLlT~ zuP(p7{ty4j)%r^xzP0+Yq8jy48r`@l#f-5q7FH)r%ry}9dxxaWQHYH`w&ijSE$}2I z1G&aNqbnh5)J6TBYetV4wcR)hyBp&xJ#3D;D{ONgPW7x2d(@kcKZ%3*{>9CDaTR`e zohT|s7fz{%ai+ithV&WPrsOPq)aR1}5knMG0O8PR+M`j7yoH{?B*Vt$oc3aD?>$=I zxSF|Efq;ciFRjA}ir+*L+RWzugE)Nm5(V9+kK3KKfBtGcJ-N(54yXVC|MW>jK~!0v zIeWdn|Mv0v!MnZn`ByvZGne<)I~Q-(+m~9`CMc*8MK6jM$}=|6GP%yh(twff(cz<-gMqf5NOsEtDC-E~ z8bwZn!G5Q&=$Ba>IGIZD&`qw>g>;kod4kdU0Unz1ncsD^YhdP61UU1x;no<5;VJFX zA>+)v45iP#4( z16x6FADjhtMmuoMpv}_`ffLlRKww0Jswkd5NRcw|Zg zQ=QwuTAF4Ye(OtymONMY1^?htp1l(xyT;&h?&^^Dl(*Ox@(1xy-sW27}e~FV)W4j{1@SfjnWT&uXxx)k2Oq|$c zCk~2K{Z*V*_{W-@H8vE_!HM+%_VBm1IBkb5a}iTF6%fJX=`y7Cynt#14%_eL12B+8 zc*|UACznEmHyM0{c2|m(5gYHTUTqF#86fYf#mqYq@PkBX(ixt+&ZQb#2v6Z4Z5HN< zx<%Z5DM{d?Bq()nL%X_4lze_#o_44uQdoqQ8rJG zW8{KsN3#YIaFPZsqIaBX9?IMmTv=oS7rqYn&_71I?uwDV)u8X_bd?X%f zlpzMM^B6cps^$%?-nEZ-(}2MRMyAa073aykeAg+w_13x#;u@i+PYcAFMn_73E0f=) zdqpF#7Q#8vsHrt)%Y--b-#h8^Nj&V=hi|XHe)zfd=Wjo~{?Yf2)}Q?1tM$MA_4n7W zJ-b{-fsLYxOor}J1fl&}FV-)9>302vUwL<3JU(vb0T^2xHDYHTT}wZMEp?swT9}mI zz_=YpPHnhEqWtwx#BE=~blqjH{ct#pH|6!Cz9RX<>lAtC^;-`P;3)01eR3ElGPu48 zzc?=g+*O@-ZAGExOYCI(*M@0>It$S##@7Dw+0WaEZO?XN9oNW#r zq8Gk#xxX&nOjP^w{B}LNxVQG+JXt?-cf9_BeEslW z`P%xaFMqUt`db(4_ddH_zx#uW^*g?n>tFe3{lwQ#*H3@vV*S`}p02;)WUasD(b4)n z*ALg{uWr{tIz4)Hu=c{!lVJ=oLsR++JUhGdB*|{%!y!VCBv%$|m^y|yC@Dgf6ZymxbZpsxUb!20fXB% zkZVol2M4mD@EmmYl%X;WCV?c$)8LxNG!Vjw2C1MO)*I zOwdEG8eop|W7zQYJP|ega+q_|aHI9mfpL>Oq9^!c59s0&N1jmO*g6gp!p7c69yIK$awmWUJ-(q{h=RM%xD)!Xt&QfV-=I=!Ac66GR z@Zbe^BdG{x=dE!okqgXD?da1^Nw(`%=jeEUy%i&;e}^)j-#tIQh>>R$U&j#VTAUHY zr$FGcFTlw+(aT|!*gV$Z)rMiX^dTDpm$P9|=$kgIf52GVeq;*jZ1uu8FqjamrX?)c zvFj9^qA4%Go$9PjiJnV~%@6B7s z>pMU2z3U`+t-=fL?gE2(E#DdXIkzDC%a4>DH5~gC+p{L4%WZ_-o@I#I$gixD2gBoQdGK@MKv$hpZgErS-*Vq-KAwWhsqcz#3B*| z`DlNBZ+N4FQzAcn_35@-EtxMHwVk%+bIKZ#GuQKb+Qrh-jsY5n>#j{r*Mr-@RG!;h z2cigd%Bsv^&8M%ar7vT0;`64$TW8vD^pr;Yu573Lb>$)a6S`TLrYn7RAz*%#FDN_? zW$t(5sL=ZQ?ooIo(aOomy8h1V_2cIc)*lSd{&bY<_eLRpI_Hzf!BKeVC^EX2Iwat6 z`g)i)9*2OB=#lih6JxO#9^22ow{P#Q&s@JrWdC}7Iv&c~f%oZ6lwu6JZL6DEi-Tz_ zZ@>MtN>QZ5z(d2?$!W(*u~NY0|MWH46u3+OGIr!KKV}Nk*ZB^acwr}AB;AmT-SfTg zWXM6MwGKR`J@0-OZTSvY+A>wOHJc(G(LEQ%ICl*Chl_{kx_;USRit51hF$!W`J7oQy@;iCD+6X|Xa5d`tO^VpQK* z9OYX6<(M-2A==^bUfPgK3jBddYQ}t`7ZgZ4g=eaW(MNVbx$acPs||yC+(<@c`$!xC zf-|PD-emC^g%st0IpIYc>8<$j-O)vK&GNvjG~JZLP@J#V;eqqhS(Ir`jKkp3pGy%@ z>PjEU?jZ2VY3LfJhwGp}?Tj$xOL*qqI!SxiarDl^xF>}Aw}yv$ zj?!>XTW|?EO;x(Hsi(fSW}Y$Q>Hl}@$%6>*!CUL2cYk30^QTYNKk@&2w*Eg~`u5s? zoR;IX{XhTOT0j5cJBdVt*Ldw71I6>r7KMN8wGe6ospFX_3`F|w*W$9Ww>?uBRN(q; zjzX@B*gKt*6P+AeehC^{-+mA1J@et?m}`g3p^+v#=bz7zA@%YdFQyX1k@Meo`N8_G>*wonbO7CQ@8Qn6mv;MFSZP}kP2XfG${XEdyJe3x^<4xB z7#=)`a?LfT^80rW*QY+ZS>OHs`TEJ{H|r0+dbob?tHF1N_~! zF*-#A=4qVjIa5QW%_bFG-4eLync{zF22V2wD_D}oWiT>ia(r}4-KE=4Gy?qMf!T+ z_PuDvA%GlzMbRbY; zC#12vNHh4EBd{- z=61c~WO+;6$744oA`6!K@G07tEtp8&RP!d-4BXYz3B=H}w9gpSh2!#QcoS}|qpLh* z_{|!pKF815RrH-VDRbD&z(TX}5{m}a#jEs z#{kSz3bX9(9mF`sxHPg2(l!yZl#;R{MFi(eFlaAd&ZY~l&(2k%Ob8BuZJ!{FFc3cJ zGXuoTKtRZ*t5QqZ`;QV?MYm>@vXOy*WrOp7J}F@F>%!F$%hFMVTOfF}!P;Sx9#!E}yoV1uJk7I7lL z8KD3d+L!4w?)>ka6@`V;gconZL0lN{kPbV1r(c1^>58T^ZKmGdg`s`)oB#!c{+vvT@Edvr5 zQh%##!OLmIrw+VNmVFL>$lDB(5ug{k4|&>99q#?6>r7jb?F_HyW%MnBMD9wfR-m{y zJd&e2FV44ebo9`DQ#O50kP^7kAid){`L7;e-1Es5FlCz6jqYiH!A5Yx0!R%{PQ>K!+sH<`T40ec%@lycLcjSFn2p2AAgfDO z@#8@p3)6?9(CuFDRV2pN|M6~UpL6u9F$d=0+_)HSV};*l)!3F4G7g`^6M{OK2~j$r z;W+&V$4-ax#d!?;1dn61J+>!X3>SD2RNc4-H*aJG{rC4h2_~yKPh~X#zP#w;;OrUw zuRMBM=O*Jpw%D`@0JS)D?Lz336POMk>_*?fb4Xu#3?YLY^Y(p81zvNqk@Woi&VL3d z^V)sX%77vu^;&aM0*HqQ(ys>5IW)`^BjP@cOU52NsdAWT@MrBvgnOqwW!jiM>Bs=&qo zZZiXgE#(;DE^ILtby{ADj5U=$NzI4wVHj*nUHOuvYMY#Kk2vDKKodGkx7 z+=J^Bu2&m4O;tQS+Fi$K3yhp7>lM6Lo@c&|$m-}Adb)hf7fMc zJ2^5wy*^Bl-IRU#>Cdk}|K-=~i~rZ(SbxjczPkS9cfPg$E4N=+-@kjiUWPxORPmHU#F~$7QVZRZb?&VGP0O5Rg$5V zJ{zH?Zrey1Z+~;>L*E@wemzUX{iVI{iU8TkjNuFilVhJD9GU4E9x{LIRlg~mv5bMQ zZ0R>ABU$G@--|Oihz#71j(HgVd$4m5p0ZH&VC^N6|Mbh-^%E~vBKrOH`TMy~o}v^1 z2*z1+Q7-E>da`W>HW|G-?sUQ+HiOHED(ehMj8N*MB;#&uWQ@@WXn{AFNHvS4;I)AO zW0&@{;l!)P;edBW0B*Z32~E?-MAJD^Y@2;F{(+x-N*!}J;brF0J#)_aw)xKPUL2A* z`TK{*jpXTF2id4DtphM=YgrBB=lGC&8`xM+1%6I<8tghAJZ_P-CnP$>K}V*&nMNg# zgGbJd$HCx_K5eiUoK&wz_mfA+VwC3?+|tWwCbu{dxt#XtGy1rt$`^2*;HFy>$0nf( z+J%3r0|F!Y@SSY{?>nFRboXI_BWooX<1Ru>U0H;SGwT*EM#tq(y4)j_o_4wo4&d5R zI~6C2fwwpP5NO7NPH4@MROi-{SjF|@wG&vI2{Fc$rN1pKzskFH6oj}G zC~_RUK8siMXD2orSl9$c-R%Yk2>D^JbuD7g8Vx8w|Wz!%$OKv8+3G`PsUX|g*;XP+Z3bQf`$P- zyU#dw8X*F?HCnuJFFQF*)P5Y#m{(3nrS@AsbeVCBv%rz>b%czG(=g*Fh{bTe^Nd?B z=XS0O^P=f?k&$QDrLbO{oOh8RC5_qZ4W~?ZwM(_b2$D9i&$dlW)prqOp!?yC2XO>n z?d_&~aMpgp(HUdD6vr8I4Lk+R5c!U8DYg`!11<6+E##|_x|9oeISs%yf!H4C<+=v6 zBW(?*Z-syUw(Gt1I}*tse(+-buCM;``ll~{b^WV{zqx+rU;dBQgYSH2U2zGbD9{3} zx9#~24yUgUKif&ZY{%mFrt6*~gL9u!8zVmrfLqy+IngYXL>cSzw+yE3`t7sh@5SD$ z(#*TwETeF~uMrwvrGJYVCiNko0arE82`-$pr@2$_hc(Z=uJNMT2J-b^6*AcReyP3fN+{ip5pvUk( z9N=5>QoN?$;y!#&&dHz!q3M+lcnvloYirebx z1R3eWcSaTpMDh%;gHwZc%8)N}v*nq9T;tUKQ;<_${p2;RvUf4oq36-jegcSzHh1=S z8llsFU};u2?bCmFc<8m^yAVO2bj27o#xmE!2X#<_59R2>8EY9Yea0wloO*Rf#|ql4 z4G`36Kfz|;WNVbGW2gV%yVD-Q@nlqIzT7ypg(tM#Yr&z+#n#c}FoYPv<18fBhiO}U zg=_nWWr7U)9W70B$&|2(0u#2)qC`&@$WVfllz8X+#d+*doO@TmL`OaU@WUAX^T?)b z&R*N$u*h)jZ`L~ro=mNnlaH!x#9hZMEg7%9@9#1O*giZsY>?S^CdB5FbHTRt2_Zva zo)=q@-;AA35AKZXK7;_EUQGaU1A-v9^PcIz_qiX^MgTDtqs$1XPk|roGp=EDD+!@3 zW$IZ3m_gRO@0Nk?KJ#H9h`x?j;y$JNXtN8I$fb)wu43?lbcDsco@W)k56p=!`xn$= zsuFNK!SPhXaJde#B{)LnnrFh#f_za$YHh~Mcw*RjjKXuTA~}%*W*y@mDg=IMs8FEJ z1tPgtBOPW2E02^#!EE*PBaC$t8r=eXj;wqhC5OI2aJ_pALPEUaZ!0Ve!PBRS!qd?iZ#U(tP$D># zmi}cD_Dv6j4lWU5mg_2?H*p5zMh4>HKl7RKcZZ$@egkjL8ZDo>xc&28ZKuNX=UeLL zz~5=P*RX@jRF>7_-Z1qjpF6qsdw%V!>+k&Hhim^f^mw?p-n{khI(~hB{opVD<@LYV z{o48$p8w_bJAdgvT^G+@uDktRZhUzfyL7(03Maj$Js$YB*ZJJ?=oS)m%>T`xOX-uRT#YL9#-F_5|rh50|6zjw0TNHjBfz*S9rpu?b{1@6pCM_z)Pvm_6 z>dpGz)7$mCLf_B7i6Vx};9q&;p$A0WrA6+M$1bRgaJp9ba{7+<@#4V!ChcoyvJstr zFouxYYq$IUjdalMWP`IR`Lr>C(;N(KosP8Wdn0|0XFdf^>I(wi+$E}%0TTe^-m8=! z{vH~Em)y(7F!G+hL1SA6%M5m5aIR-~H832LIx~Tp^Zx2~f+7+Bz!;uQ-_-#I6b$d= z9^GE94qE2jCiJLzb8dF%1D3!iXp=$aI0^c!+rndw`jx~7sZ;zKMY?HL!L?lR9o?<> zv5v@VV`sV~x-4y+CE`AL`7-#Jjg8>o)%8V#P&&DAq#?Mr_Tu#Aiv&Q{GsMZ-KS)p( z9UKRvkAsVdgHO{#^j_^k@GafHw>x%^jtiU7T=L$bk+1*mKSUNFUE4F;pl zsluvha85_ZOgF|s2<Q9n71Cup%5=0nvS_56Ya?K6%H!d{T(akxoY%?k`XI>2E2 zZPoRg7)TpdoW(QpNj)&|7~fzp_T{BrlE9q1`Rrp!K`MfG<)iCqa~7lw5wSYsBK=Dc znkLFvc;p7EeAC{W_e=9A^~vDYxaZ(pV0h!E{L?_kC`E-Sjn%#F>p(Fgf;+WkjDcqG zn?KCSc&pPCb%+Zlk8XJMMHJJ%B6wbYa2bKL;&V$bkMHr=%}eU9(YxR2>mUO2G=}-8 zJ4^BCGlqcNbrOl{`>yhiedwh@V`-F}^6)b${+)GreB7rUL=VSLkNP>~qj*xinT^2R z*CyuA3V#Pd@;-k~*OhtBC?CR6p@}eZYTyKYIV|1LENvQ#Im7EHire6V@v9O^-5|A| zEfh4m?Vn-vf8F-*&HZ%I(|ynSty?Re5+C1`(GgH4%o+H6zn8Th>@~A_C zqa;gsb`17p2RA_E9Nk2(z*94C`9y#k{~WHm8AsPG1r}3iUh4Gf znhvR}Ul^SNBRY34^b(m1o@N}84?6K8k^9YUqfN${>~m(e?*b9bfk!~b`4-VjS=-CM zDdXU!8H<4BdHH7MI3QpS>U}n<%-D^Co`t9C^z@ah84MlBR^*JHz?<}>rvqHqhk(Fu zMlnMVPkK_vMn;>odnG8agqfY{Z#NDD6CI|VPLC*?M%2?jgDKsfp~kV(3KnGpV}#>G z$!@R_Pv1IPpZe6>fuD0`M?6y|Y?_SZWIT+S6KtDbH~}{r_6?*NAG_B+gQNT_OLQtY ze1(?VUGY5cZeJ2p7YF`Io?jHQ#Zf6)K&NWEp zsd%<~>)D7Z+CYd2mg{($o%0EGz*Mgf(?v)aBvh-K+k7HBSf#8Q!KUtPuNeYFs4ZA9 zX4mKC6XPWjeB-rCkjFYG3w<##(v>0wslVI_Ji${K7PByr5KlQZEgvL}!#cJw>OZ4wv1tP@utiJ7NL))CGiw5ay_ST&KxkbVeE}%_h>(cVR(^IcoR4*uJa3wfD{XoBCw3L zawC^Qi;f|G7+w_GN50w;*x??|aL5?FR?S5{N9e(->f8e8ETF-y|H`eouo6R`U6P!imC!hHP&rti5 z1AP4q?2Zvy_2>FrZi_vuRX#pGX=z(tU+}Y?V^zfX7(M(UTL&oRi<|s}8483HS!ZLR zl+2JUl?dS|{O}~s;=YZ3-~nd@pJ3z6_}(cbZ3x}XpO>DT(dh#q_~IM)_SUnVw`x2J z1H~Wsnzh)%1W=elfWoe}GV@!%__h0UOYMe9Ips~qhCd(V6kVx^ew*%#oUY9GnB@KF zsoVSOM{gdipS*j#-c6hPp`Em7S97Q4c3zt7ROiu?(N*~PHrJYgfRE^iS-d;Jj_E>m z=o)GJ*_WrvNe@N9mpc_vx9yX44`Ipvm{l^QZ6NWE^S+ zWK!Y9SlaJC{hSB#7Fa6(DNpALR)!ANP06Iv#c4YZ09=Y_8ABP%?ZIilDVzYt1ygm^ zem?8mhK~B0aYy(AuRgbd5_(_7IXSzCUASV{1#*+!4G99ZKW&rc^py7KbWd$mpJq&) z7h^*Y*;S5UsSFR%iEKndiok_0O;e2SZJQ})3vleIKVkxhb$G%@%G2#fcCbsK=O4Z7 zVrSV_nM-5r6A~PWqn-8kTTk+P0x8?CTesHjXQMaCp-jil&OvamvzV3CMN64bxW?mk zV$;5-sJad->|&e(xqI|z;ImMgong$aH2^*1-}HPO-TULgj3M_uV{HQnjbR}l(nMYP zQKqah%6Bh=a54apr9S4wh?KsC92%tTrRB{(NXzKM0u2s@MQ5J5grM1d$0HMAo)B*| zkFG+fC<%{*NaU$16?jFJvq27|WIPcRX+DLF(>%YfAgW$! zh)EE~!6}=xU#TVDsQDG_vlVJg3zkVoZM;NWlJ>O`1Qiws;h89kp-UV3NI7#jgp<+p zTf016^vT!aR~?alg)nXGE1!B>Z@u1kxM=4w<^-V(0FN=sD7Ec2hY@(*a6F#gD4_O- z@%59o#~7r)2y%^m+U95&$3zwS6oq?o;!y?v#mm#uSIX%4=|LFzagDb}>9ppc=nP-f z=egXlT4-+V!DJBtYPIp32;A)khT4bV)EJ7ze5ui!ub-T*0U*cBY5ENhj)4ICP7(E| zJkNQ1a4>Quf_rcM!LNQ}{g;2`{ls=L8WcTc57s}S~9r&t!?X%!k<3zb4%5z1U(%K=|TIg9;kvz`Qgzh>Xf6<4x_Y=JS`I#4r3%3 z{lUlZo4@v&-}5NXCu7ocLk76>q3_f^jp2R&@u#B*qomSNqxhU1P<~hE_?hB0Mr7R? z7-lHOrT>0wwc^JK*OiOt3c*L5_gqk^_gg=GHLc$$EP5)&wg%x8R;j7F@=XC3wV_dF>PrVLD8L#$gLE)KuI>y5bXufXlp~C?EzjYSk&6I zf7+%i;1jIqn;LaKq|xa&1FtTp3#w1(+gU#Wh4#*{uF05XXutzzPB??=#Lvx+q)hc+ zD#CZC^fJc287;cIbM)y067ZAnaL?Hp7y`U;6!$V28NUo<%5-eywb?V@f=P7*2)18k zc%!Kc2aIg-te-uL0r5}j7cZ&Tcc-9f`e0K`S#SunQ{|EJ?2R2#o#n|5KDBRucUBVdh{-^K|>PKja3XtED7;CRq)_u*(iw$;vl4bU?l^b~|^BL;q$vV`;*W+-v; z-C%hK7*!`t*#$62_b9-jhQqu?R0w52^>#ww8uuLLad zI0zm&i9p7fat0W283%$mjz;hmhOhudzu<>o0yAUlAOb_i0oLG6G&d2=Ebvj}<;#mi z>aVIy4#FE2@o+j$Pewr6UVc+JHSU2OJ*5}NNR5CpMn`WStjl=9=5XDSat#5S>V-v8 z2n+S*{iRLc;n82er|dvJ*ZZ-l=Q>;({}ur*$m8hTQ2 zz(hSYYTU_61Wo^x4t?_a&ExgyANt<)-~RodS^whgm)9TqkN=%@`&WOd&Qae0l}2{c zR(IiI$n*`q)O}#Q`D&Ls&&bYA2d8R^snMw@MhQGF=BP0Zqov{tnJJ zRm#`ept`#@K%KT7UQ;NaMmq|g;~Y3jr-4U<7FOcSIuVgEurkhYRHr!YRGFuaY#FsQ z(K+CKj}c4R4<9^PSNEg9iqlcN^Pe_zbPy?@=p;fJhii_05>UiSMJ-2TqYF zXS8F?1A@ACVz(B|2!_G%X?>IQ&Hy4SV&#$UubW|f(dT;{h zlIW|*Jw9+>MgbXN8}R1p=@qGH0hH-E^i|hfG>3vS12!4e*Ysj&<39fD=3J=&_xP~w zZs=c!Nft4zYq+nDHQmL5p||ao0;e($4Xjc}+451^ zh9}0^!aA8#uh4$_)^YW$Y-Bf4i$RBbZ#Cug&=1C4c`)XDuN?`D@YW~HQbzlxA+$BS zI!33b4E#-y1N`7Jjo9*B&?tzKkpO$MvMB<73%k468eevI*a_r-PQbp;H+EQZQDHW}5HIRf0?&-Tq+={Ba_=7?|z79ewG5 zwiA`x8uvna_kyvL36Fr76UqSnW(-HkFbW-B&v8Ox+`|O9-N6l^FxxmTWu6T)tIr@& zAoWBUt(4JrccJST(lUb-hT@fV|t&ZTTnT{X@95Xiu0@oPcqDHAjI7{&=WKR(69qz5$;KI1T8KmT%)Go%4 zFz0M4KLy564Dq2sJK)#9(}APg&ivG9%WS^Rm_$0XImrhaNXgTOO&~*#(MhrV4Xky zRQq(0LC5@`BbVU_fHvYH<23?4U2eT?4-VfmCgXxvW71d(O!_`F6zL`Uo%@@zIl~2k zJ9;JM_TM~OKYF*be)jRv`dplN=2YgXa-vOVs^AL?NcB%&F*G*9nwOM>e}fB|Le8~q zrYYrAE=}RKkOvRsiZ0O}-DSGc_sIgxILWJwW+4qPY8&6;r$CbS=%)k$0gPOeQxT`J zRVRdYo^kgM32oEuHIQ(dwm2L5#$)D((O*j1%58mfCIUtrT>8hH{wxx#{RnTO3tif2 zP0=~$BJF{3Xc+yXY~&;(P2E`)4g~NxMpur$gg*|?L39|Wp-vqg8~Y?n!70LTaG~wV zObkAo4T@VYiwkz|-vDQE61^yhh0|FVF`METH(F_TYfm)Xsfai+aOvv4ex7z&z6>=+niF)-|PzI#7WT#MTUe4WHCj1)jcH^x`VK zfKS;AQ=s7|tNYg`VD>0r`b4)Hc%B3xw@Ra)cs&Pm7UQ{zx)~%Mx;G?YHpGK?$f?2$ zu-$|ur|%YB9cD1QkM}bck0Q)HhdG6&kHRRF{CNxnPpHplhelRa&OCTVoofh10GbXR z4VEUuykVk71Gz6v4pv8%et3P91O`uA3|PuT@)$~E7SG87$P{F}sW#Ct7{|EVVv;h@ zzxpBvb@~Wi;E}p*CgUDuma6Gn_dW7u)M{YV!sf=L#|V4sq`r5CTm06PR~o5O4D-ll zT-#O5YS)yNhBN%`)|aLTiMbnn?gqBEpB#3r13~58QJlujxJzA89`FYTm~eI(OPR=& z=)S_2-%T}zUTL6)y&ik6PZ}?taJ+AdKm^Dsd-nXa-Zz>s@Cfws#l?E@;w*yc6^VOc z#yA?e*R3!^Th9Prq&@hwNV1Ee0{4EL#}&o+I@g&C5xeY+Oq-KEZ5rM8z+_KsKFi>_ zYBE2-TlCLC$a98!rbO!Js~f#0>hqI&ybfJ|?)?|*@BGadYbVN|G1w~erY_Zi*Nh#` zYNT_WXD$@Z&pw_;$X@RsuBSined}kQ+*|+n$zNZ8>RZ3CKL5d2x_z%XG)5%>cL7BQ z4o-P@p#1p2-yF6yn*Q9G@jKDpCN?+>y|(pW04$1Y-Pr7k=XrVfsgX~W>27NEamz4+ z#cx>JvAqvW3+*nVgm>w?G)2GMr~2T|Jo06f z{pG{`byvVPXQ4J)avvD<8KAo~2Mtn2DM7uy=kS@6jEKx@Fcg5{&7oe}YNjf3Pyx{P zAgeT)R$Nb%d;4I0*VVoC`yL;zAIZ6I^Bw4pTw*mmVjS9|L+K*Z{r9}{yZzs51Z&4f zk5_q27v%J*aWHdr$kJ#H8~&-0jnNRLW*l$;Z{vs=zn$n}7*ck-=$Rpvo^@}BPT7qC zjQr6tvai<1;EUs9Z-d*Zb8Y)shU@6bll9JL-l>!F>PkeIz(Tue6EDLbr-HM)7XvlA zYBCNmYo5Jj!C(Br5x$gdun2jwGVpHRy1`bATd9O>AZ+Fvz69 zsrqn&mjz<4>>U!=!6g9nX!FJSY4mC^8oPWM_~4VeeCwUJ)`49EGagPGK7@bSg9arc zI#7<`Bnu)*3U?M?m{+~vHN<-`=)Rq*j69J{l}oMx)Et5phk$L`BZ_?c*c^k=Rfjpc z(~lS8X8?5`a^1o@hD5s_4S>J^Rb+xIMMH3@B}fT@0|O5XQtENW^TCX>MnF4=?cR*Z zZ(b9F#7lBu25^h$86Nc-k#7takIUm*R-QY?JIr_j*EXU7%hu!MwY}0&gEPW8Bkzs?~T_(ZkEa{t?0^WT5&z}F6(YM=YhTwkvL+An^6ed;1t zB8>AaE?AV9#?o|Y*#%vd7m6dNRv3xkphxg<=W&{cd+Ymu^t;zT@*@w|Klc7Ft?&L1 z{{40P^=~b&X0;V7y0_7sqv$v1=J=Q|(*N**Zbo_j)?@q2XTQTi@Y3c266h-*o3`5q zSq5*)7KcG#6`(%zx5baoI>9NY=ZmYzTJ9f(W=CH0oH}?Pzm%qRpvN)UK6VR;(k$(% z%R#HN78+j$g!7%_aDr@K@Ew>K^Wro8ja+DZ#^FD(I0~ch%KFNT^;UiuQ-9<%wi<)b zF7UufbzbNQmu&}(?Q_1DHleS1;p)9-nQ`cJ0KMf#>RKreYPyz4*fN>o zr|8M?nT4#O86$acc#yi$6O3@mGU|4c+dDdJFf)XhdG%RRUP+w5p+fiawz z!kh@4a{yo3w`eker0(nDjFW-?I#m(F4Oj33{$Y{Oh2D>i(zhU#9lQ4?_GmrqJbkmg zfxnS<+9mrFSPtE*7lRW|C-jN1)0Z7D3NqMzY&NhpXi9&|HVXn@`E9FUbN}Vh0CgP) z3=RZoBVQy3T{(#1n>5T(+S&(juurogj~^Y`7)-_;Vi{IDy@{?d;G{xghys|ot_qNP z1R&p}i5Yi7j&Upo5b5K~n^zt)IPZY80eJ|6_*SW3o_lO!Qt&(&>2823&wx-29FZD$ z5|uW)y1OZU6b}>fTDQWVvS1qnjA1s;NMI8QBit~=q<#pKdSlFje25Dk5SMdrp1l!V z9GSFQhJ_P#nljB*2G*kt9!2TgOvh0AbzFWE{B&I*=Gh_`uh~l(82EHuVW|>`Q#Erd zqaeX*T-pM!wrX(0u!MLous(gV8+#P`#7kAa!jyi{4kOn&g0BeGC`veT%?OkB{?xDXxd|?FLAr=?HS*W)>q0RFLcs4mRs5nMPVXlMbRIymXDf|jlb#H}%h zE{{|9?c+Eop7X?&m?wPsIs(+lE%lr*Zd`rO^^m#>%?*4~xbwrUUUvOs=AUzvMO&)i zJu)6V{`zM9i7$R*{l0IXtviMneVsm1Ze(Q?RF61PlwQC3Pl`b}sZPteYTPLkIkof=cm1!w_k8`6_kVru{L(M3kG}Ddnk}n1i5jW;8Tgz9EOpVlqXrv3L2T7>v`%hy zqnXIaoA8&h50>13%d{EcE)u_Y@F1ad+W#U?W~{K zIa=?=*zbzos_3P@>Cp$x2-0;7hk9g&LFjYmIdL)tO%ZnrE9J=bP+#Odj}se<_B@T! z?4j{?jKOnYbQqQFn1~$hOrO-b$PEj`=(U*o=&jHydhg(Pc6XDBYX&i8Jso3%7qTci zG|i+R9T~%9jm8jmI#Sm9I3v z+ellYQTL65T;oCd*DG7?8xU|xdkg-oRifi$)!+$G^v+&za+H8UcGS}x+9E@ylFp-} zX7M$c;`pSVY^@Wxw(gel#*bFvKYizEU-Dw=xK4?&fQ?=Ytl9v>^^G+QqG%b5&;s8E zZ!d$q93m4Us+aV(V9J!p?>^$Z#~-EZ#gnpy91>GU>wO=NY0^< zX+R&ejvl};lywGCW`y(@_jKGnhr^6EE3mC>x0Vg#Dl?I3`Y2Ine{-E!3}8{6w1IgF z*Q9l9tkBd>=cCNkF=QTva8K0F_^17*<;M$^Juqlq>uo+EGEi0ktDL}|{;SkWIOxom z9LkTOAAwLFQ9Qrcs5nM*glwD%3t2cR`rs9NtD3YJX46(1NjXjg1CO}+#$W_c+C{p| z&RI>{YPX7A-<%6hQ-s;Lf)7yIkKdcvq+^T$@vj+1JL&al0yH2Xh?>4r3A1oe%(-k= zz=CssKZ+*|iR*c?t{V$DU^Y9OsTUaorYffHvl|l|)yY%+ap+`yrU&@*reHA#RtY3$3 zUNbx?7XVtP)DCW)+bf8~8T7lw(r^Ctn7K(rbWR<0rKJ5a@cU{1k9_;1^~b;bo%NV! z7nsUD?PS^=KYR}3(vna3>aqE#hEP<$y_a$Gj~IiukJsmZ_;c&;y7zMZvmgE9`n%u% z<#qS9uSeit70(uZs#@4uH~r|#zu>TdIJbv#K5{l9&k3gA2G{lo?rTBf6^a%~3e5xdK13hI(_QLPx-pK-ke;9r)B(1;g8+^Wl%g1lMmm}Y8 z<`Ffrt`c#*%IR*0nRW+<%7q{BckZY^V;mW)BL+Xt^0S6hE^SIL&vV!*6S!w_q%%4~ zvl*LHCZ#X5iSe5(=H_0$Ocp14r!t?2|INeo{W*VbcYl4)gUEiI>WSV)MjSOtDNlCE z3jUWx^9UIJ-@XIh(P@fOJ@`#Ia_Q)Ja?+#VC;tqr(=hAYZ)br>C0l~oR(Krr^ zDMc_ukL94NtfZ%V$MGM1LC+drv!=lhK9FrQhB3y|u0wmXQ4qd~4vf53FQ%<+x|*KA zCnNhlV-WLc>X1?Hk>NAMqzfE(ckr-@us8Y*Hk)ZoKk9Gy3@DsM5C@0uI9vyIeG4qr zWt@QAv_Nk6;3%*TP3H+Gfdz-g)PaDFUcaqV)|~`9E;`OO!ejroMwP8#6YyYdT<~H{ z+db;h%5*c(1hN8NxJ8dAPme=)LG49;gDt!iTJ|~ke9pm+2GBIX`59ghuLdV<4JWJp zZ~p>U=^b8`r8zr03m=9@Lbv<#5&#}X+AaSRVlQ}M3+Cf!*34=D8qy?Apjw_I}%|w$e##dvJYE=85cOLrf)kDfC@D} zO1)^{UI(W!B8s`FdX1?I#(QC8P6VdaW}T3Xg)ntRd+zfHZO8%dHdgQ^HqVsb#QUQR z;C}lIv3WAM?Bh4-0ub^3_ z-95mA+Mdezz5NRwUB~PB**a8;Ik?YVUar6IYtPrawj+;FR+)u%3R7@>xp^!4KY|dY z{$DuKK8aESe{ENb<2UKNfD=69j5vYEPuAc3yWUy<>}OxBzxP-E;yV6I|JgeI`nM{e z%HvarXGo`QKj%9o($sr@;j{GEAHI*N=yTAy*!j7fJLu|r@DB3r!aO|Cv|;K}Vxa4D z%lWQ1`xf9sJm{$gTqD{#3lCZqzG*EymGZHu-itlaW-jM?pZ*vG^s~k|Fmxy-Mr5~o zXRVhH_SdV&iJIE;^siiHHeXuT=vr3^eWpzN%8_=aVfTy(J_ZkMt^MU%@Y=G0r}Rma z^jQskl&I*v5jS-N6mdrivTE28c3 z|ER%@wmUr*y>fPXlHUTC@I+vp#cvsV+AMFSX}VGmbtuML&38vLRu+FW=Od+ zteQt71jbAneHl2f)iYJW7XQYQkl)gB6R|4?;`KP>rW20(}`T2lRXK zx^{LCilAHRmuou}sbyf3HnLI1R=5Z{#u5oLic%3uV?0M+;85dkfP~wlSTS9wAbVhHV7sW#VcJ6mg%@i2z>9rE)iZGGSw#8C@kt%+Z+^|s z-+FBN94-Mzf6mde`46CO;6=X=-dc}>>wo+wpRWJar$1PK;^i0D>)(8Dou9?a2;ShU zn^pu50CYf$zedu?TT^L+vE;gFcZ(O}BYWk0teg94)^Mk9-#=cP(DIuv8zuBHm~LpR z=kTG|CwsN&bv|DOuU9b?UOHp(7UgH|a_bmyoSG3#b9J!NewzkIXr@2M&?@ccyKK#Q z#(5dN!i86dzEUK$`q^RPb8v+xF?F4<-q>JXr#<{qbINpnyO&ZfMqsLh3S`N@eBZ*u zIFL61MHkRj4&ao5oB4ITn*Y{uBJv+&oYg7)e_vkA|XOqngKK2gisN?h# z=e^?%4Db^8CM)O@htFT_QgLG(6C4?1jdAcIQ_QC9$I%caV-}0qlyivEWm7DawoBja z4{ukv=-$BY5k0u^y}?P!v1@d}n|t%b7`o9EM%>C((7Y!54XeOYIbA;f3NoClOKnEGom^%c;E(I=(U{SC~ndR z+}h)z_B=dqrG4!e!N=5;bC>!DAujBsfb`qEI~r5203%8lNit-!;*duq^?qb!W|VqF zJH(#0FtZi&3`v*`}olLOR-<3>@LD+6Zn&iPp); zmEJlgeG(eoMEOAss=uA8)x4-mi7?rRSf&q@Y&hTiqh^e9;F8#=BTVo z;Ou~BC$R8(DOqEKL!X@%2vZPPSq_Whl1v*$7$%Zq+upPfD!dA zwMYQqX3~4q@;tA5e0pdDXs`* z;D1G+Avm9O^N2qbQ)IrOOmGoCC}nbKj9KcmWd6t7`CbE;Z(uVl-`&|sKNgPuo~zgE zkN^6MwRcHD8F$L09jEz!FsAW-ccxUw*1svDoxxB~gs-7#pTl_>1OM{$YF)^@q>5F{Hd-0O{ig4Q zXGn6ZifW6CQE=0j3`^;U7O4v&6uil`^ZYv5d3(LSXC4%A=|6B6+J1)bIewv=75Sc_ zPV2o_BKh3HoDU=D{a(!lMfyu|a+Is&Nf99V9h?=1#+C6ku&6$$DL*=~DfyHU2*^yBTH{Puc(y$l837Xb!CE6uO`u1<*hip-GIhNV zzD788`o=enmr6sSF^rSJbZsY*zxnWS2BPP~U*xBY&G19Z399JElnGw#Bu5VIyZ?AE z_DRqZnC;4^yLOqQ`+KCEu`}A|(PQTqu~E^hveWdP>4R~!!&A}K=`}bR2Tpb%1v(b} zUMB#O1-F3Q7-a!cA;I%WaPqqCD9hOe0v!vVBQt3#*YJ#kGsC!hk4v4|@0j__-afXQqY>NDC%S@$kbvM(I`Pmu?>k-!qhXi2M~@;>DOd)y8vF6y2x*MFSK&br z52}wog=v6fF2j3n9Zh$~01VQ{`xKq@4RMr187ssHV&|Fg7yyt3G=^#v0~k^^1Fg`_ zxChx0v=O*TXK7zQ%84k{_3A%JcT~xhNO(Kt(sy4J5GBvBlwhxA9_$1(!d+M}TNsGo zV;)R^0AmQj5klp&N;aR`nK4WA^Yo-}?iELbOM8@YTcf;lB9wmZC*76Dsi!?kMW4Jw zMxSyMQJ5b$_wErr?HfOjzl#JJ<@^yOk%*@j8c~Ig@W_j-*U3@J!86wjg+-X*-aWYT zDTNlG)5iT6Y2K_Aq@jz&g)z=DAJ$=5JzFJZp-TSXqCy;4QNRWooEhmqP7DElaW?Xcfx*|u|GSVgO|%^&D5mngx7^E>reO1_&AF4`xrsg%g3$Z- zeepNf_q_LVw!_{0VVMdP8A0ES9j!-fQX*~WaiE#QwH_(&*!~{n<#+xI-&i%SbJvyW zV$z-c_2m0MxBkJOeRuuYw?A5c@c;W4*X=KTah*pllFyL+6&^kW_# zocIhZj90TPDTV&Pt1|$P+RimchZP8Ka{a~LTkG|sD8#h!rZS{b{zq_wP^olaL`D|; z9uAn(@9lWDDCw_$_s!qv7)l&owhmeAfe{|U5BI~*TgOOWj2AiSXLO>+dA&R0Fh=Bu zPOjDu+&@@*_a8U9)Gl2}mK(LgU5q|i^;S#s=4jeLqH=ECO2!mGFB2efxJ7uC-M}?# z0ep{qFqrg1e&E&1=urAn=FrAsHoFp#(c6vcbB}&y=;`E1%O?mZPCT8nS6tPxD=6fB z^-*VY+v?F1<(2$ZKNJsepD_ei9!HCUYJD`em<>!K(lL_JN2yN_PAYt}ZUG&FAE1n! zrI{wLgO9YY3|^eIM0C>VOM9wx`hg)%+u$fn8ywN!bg|!kVTVPJ63#mbltbrVkD z3*2~sZ38P~JTGyf8v!?gOLbUaXGbQ(J=tO#E4gM1XIWmf8yw4O=u^U- z5PcK6ff+a87tuKf2*l)dc%cj4bIsUNtn`_wP-Q}&v~|AiB+wsmJk+QZ}X5YJl z6MY*i_nD6&@TR6dh#$zB zMyABlKW_}s+6F2>XYm&2VFWoqq&*pdNiQ4hZQ5>nCP=M;4ZH}{NIb-9>L?6TWk<86 zeJ8;+m)^8_oR>aoQR9^|ITS-ckuf+i9)^WfUT)JNF|cqn_K zP8F>&VrpgLse6P4qs@XN5mWAGY+w|jUty+ zc*zu%$AHkU8$F;&cwp$MY_K1vr4B>-CGY{c`_+3J(%qZjEbZ%Fwj&e(MhyHgIlT8M z-L_wS4}K`@d_@<|={DsW5Q;xM5C5aS-J&*TR4dzW?|yu8a3RST_cevfFWJH~)$sa8%I;2b={6fuAv}8{Q`y=~jdn8=_Wu zyHHj>a%jlEoP{`TXxDxfRX%~2;;~h8$h$7C8JQH#e+O~)rb^%n46+7Q{)r|A7iqLG zweh*;x6bnUI>5Ph`R38Oi=+4E0eEwdQYAC+l#dN?`GCVjZuxAq?|Q$d-L@HcI%a(v zZ9c(jjMXJt8Hb0Hy@jg>x#0{On9>hQtKt*qRULdh2nvKVn zPBCvi%pk#lFieeH8a5B&5sp5rpZX$53#inyP_0Pnb!lC%@Vg9xErP(z^H@k1DP$n{ zfu~u*W1`fvWh4Ve&>^G>2?Agshk?0P&(44%RGm`^Vw)bT(aW7M0q;u+9z&=H(RE%z z5U(xdl_Prl&Dfj{?Nk9!A_1b*}r2TrBX`>tPGl45GWD`lo0u-ne&a|;UonZ0(X1jy7Y@S4Q8M%t^nKed z+FhenJ}GXCJ-wDxW(hy5gMWgcD70D43?KfQvWBnp37#!(G!9pGCyWQRE~7WDc8+sB z^hkSEdH|hDg{4QnmrRT`88kL!;Jp>5cq{jd6TTrQ6&WmLbFYrX_E0uTxk}f4c!;bA zVO7!;ax(|HfuF6&_UV1}kK#-p#7X~r`03Nl|4z2Hx@#V87*1|^$*H+|RO2Y+Z! zzjh5&;B$-~0(TtVje#NK#;AzXlw}P{V0@gmeKK4m!&I87F;TF!I`my>jn2`t-B~Ys z;q>IBdSS=Lzp1zU!HIC}Cv@uh8qFeln3E(IEnfu17jQ(UWM~_2>lznKCKkb4+;3r#Y z{Mf{M*k3$GAIzdLb`I6B`NB5PW1gPUNay4bkVYx|{h z>m;S!2D9v4uIrl)yq~_RSasUL#IW0y%EG!bQ5iUmTnUMOcL%%zTKz&^@VKnCC=pH+I{o$Dxt?o^ku6e@a%49t^l6=)hR zgX}(f69+6tD$WZ3cNRGf{hIy>fwdb9;g8&#=S0shP9w-?jRIOeFn94|=%j7+IA)`G z9+WQ#xZkEby!7JqRGMuZ9Iv7!6a?kv$!Qsi7tC0T-Z?^j6s~P?z>GNKNWVOOE9dv) z^b8JeySVf8oNU59eon>PQ8 zhco*VA#eEB1Kgm|i zAkZ-A@02%1$fkj#Z-UEbSJKCMGq=tO;JEau zr1WRMm%i72XFqL4Z)Mz^lX*Xipe=Rc^g@rlVmKYLtmyiNPy|kJ^$~7#c|#iFObA+2 z4{QS6&h>K)BHJ9LYNz|DpN4I?1rHNAl2MOv6k8rp8_6AhOJr z4l!6vT*@%+-tTTv8sNgb?xjD`V57EN^B(gn`_G@97kJ)CQxfSMgWhr&Qm@`n`l*M; zIHW)6DN)88VQ7t`^X3Y~7&L~1&>FZ#4YOBzpVdzv+Uh1Ogb2HD5Zdw)03Mb$;Gsw< zqXn56uNCO}X+4&hlFJyZcq}dgZzJKf*Ys-+ZJnQ;Wz54w4)_3fR}f>y426)$Y+PSY z^y<6E;|Qg|Q8jl^b{YGVJ$8XAw z@+#b+aX&N6Mr(mD#oI=x-v}SrXJ_+_t!REPFv#@K%FjN09p7SJQ*C>Z$Q zsTz;IwjQl%Amt}@X-nSMbpmAwY@s2K?VgmUT1tS!_~yHj zsn5Vsq;H*`uK((L=j-Xq%g*&0YwM+q7Pdy&|9wSpTXx6S?Pnm{bpP@3pZl#H<9E&X z?df3JLde`uDc7_cXRt%7vdd?T#(J=`{>blmvi{tUT&_QL`IUA5SH849{MPgJ>^m>k zS)#;HGi60L>gbDN0kZ?HHn_+yr(mbX%-ximg|wXS;3hQNI;tN>wi|f+k^e5E=+3WV z=r3Ncm*>~(y^l`T2QM$mBO<+KUceK6nGIHqIW)*8-a@4s$9(pqBZzF}URYsWi;4pa z2xqaK?8=5ARLBXG&PO(KGh){_G|crd+VnC1rv0=-?sKDcI>y{{;QwiwLLGh7x_B9X z1TP8{uE=2*o25*l#Mf!N*-imWzVC*Qzw3jm^@Y2%6?|Ew3O>#@ z!Qq2Acw`vuErfIS@!T4Z1V`b8K+(P(%|NDqIGgbZ%tlw04>PvTVMl)$7bnsE*KZ~x zKzFNK9coYrkpA##jg;}jt^YUpHGbdi@89Ru11lTGhIq-#$XePe4BYn3kv5yz-55a` zD{V81uH)6NoDY0#0>p6&`qV+n)4Sjl@FvPk)(erqY zyN_cC)3HSXJXMWX0CFnV6_N2oG4cMvZnriRImzJA(`>rETc&x{)9+ zMwDyNjO6CA2ErQw3g6|o)NVW!1~GyJY1j&_$Kk*OGhp@mPU0?lNWW=Afxb&VFsalu zPsUQuPUW^mnSnE+nR7hS&Ek(ZC)ytdl#1phMc_zEs&`HW^ARq{&h^|bT%&S~VOE(p zicSA^Lu$GV!BV7x5h7d1V3Qb(g`if8Vz4cIm`E0Yuz+DVZ5%}r9LAeubkXQ_#(&FW z+|cYM&eW{h#RQxet7qffG<8pjb7*HfLAnfva|p6klGpO$)?g4mSrsbtCq-=$mb*t{)t)cRv5_`cvO?v;L*qFRefL%`dI%?|itPy@)e;ex9-vqmBm` zZ7Z}c-gC8K*1} z>qX@Jnu8kUSqE8pZG?86dP-Y%!Rz{xVhSvJo%W*v3IjcGUGFYm&?K2tvYZ^7Em-r?F0zkk<0SqGKAg8xUmvwEDpphM_F z({ZLYWi#oC(kA1!CZ{R$j2TZFkPHdkCy+uP`i<@2G<1zX#;5`J;cy$U$UJr9GW)>C zXnF?#TyuVy2y)G-u~vvK!f)W6>CpSNTh<=Q4*1PkW4}K9=!3#1Q|y>WE%E!%vHKLH ze4VW7TWiPmkS%&`GT-_V^uaG#?H&Xf6UM?b`;l=^hmcqIIaiYj5YG4;eLa*A8FL`0g?rsGFMs+hs01%?H&vJe!!wLE zz^U(^f;=P!Rvlhr2^mI2K#Im`OzJekO?wP3#^5jqe)Enk&<-lMr=juYl9_J$M2UQu%j14_7e-)!FLB=li*4Ix) zXbz5ooAiYVEjn>v1UU7~Cy|Habl7%yt-iyV@?9|+Wh2sO$nB~m6R>r3wiyrtB_m?H z;*%FK()nY^cjK_^?LUohto6}{FWSF;w!9>gD0AlX?EJL!KzG96B@FI+*+o7|AA(Rr z9UOG7KK*xUA?-Qq80E~@j$^R2Io$W*JM`xM{LcTI`l68D&Ijs&^{qIEfA@Q*>zxly z$E>vd6i6EkwzH|*(VV$F?QUA0W7F5>>y%O7@BH<;YyQ{9hEH78*Wh~k-JUA<)0grV zqcODZma8G%!{g)ixBc8_*Ps8H2kXCa^0jsJjSts*@4Z~#{?} z3XfWD)4A?`hSFB2YmowFM><+t4y&o8}@D!g>z zF-9)1r zAmuYoGE65LLhiT8jHhvccKrA8@ySJh?J{k@Nw9N&CynHM6C;1`aq#dsx-o$e=hg0$ z4@76tExpDV;1_>g+(*BJw-b@=$ASEztF?ae>09eCa=H_^iue3Srs0ZFDfp>JuQdf6 zn8`8S>wxPS6!|n&#%X)dJw?bUTu%^VmrQSz+?(Jk?OXo?HnbrK=9uf?N6zUDco_bK zj&U^hI4Ftm;iaiz{1*_&PcsJQ)T1BptC!+f(AUR}bJC*>m(wF@m4Ez~hP&{(4$$tv zm2qm5KCW(db}S`Z+bCb8ALa13qYh|v1wEr6hsU(A!O&G@zp;~t}59fQy^29g1Q0cM)oRLV9}{bP6f!u6P#A(T zlX2J$zAV5f{Hd2hbsi{1i?&5H6d)s{KDg>F=TFp%;dz8R3~Q9m;gNKVO)#YV2=xd6 z+=<)+)F`vG2`-FIc~~jyv&@q^RoDb2xF9rWgh2vynH;qC)raesF&Y@$ZScwV5RwDg9Xlh(5*x_jh$7UNtQUPgdV+?;tqg9UmPXmY0s79Dm%eG<}+^;zy4VbLdta z!{g9ni*tu^GtDf-iF>k{o&dA8$S%4;)rn)@ywP!>WxClzW}V> zwyiNTe(CSGALYjs-aeP#vcVG^8z0Q0F;BncwMX!Kx-e~;vn-;*LtSXY15~A+;6^h9 z&l$@4Cx7a}`k(H7b^W1dUtJd;og|vSST7S*H;WKh{5AW5AJy9)>4WZQwjm7g>8(JhV%>fpsqec7x&srV90%A zFYYr&3a#_);mrs{BPq6;k$2u>#B(y}J;URST;K5@*yt_#tWH^Y)xkkxbarEaf9mz4 z^&|U-U33R{I%Fch!KXP}Mu~3oMn6G8@m4)bmdO{`Eno}px|d&-hYmRW9XMvCc<>&N z#+Ku5iB`vPxN4w9mkX$5Km{@!d>OmJ4;hRO6{$v6JmzO^ngMGzExg3=RA)|w;=Qt> z-~2AE^P3*89Hbo?5l<^UY#^2TjCyoejhJ>aj*)rY;{Az;>ul-)&*d7OjUO3j_;S#5 zC1_6erK$Y%y%XDJU6M4p?Z*|-2b{qlbQN!41E#d~_R z4iC%`jMs%RW+%IOoTCwN$}rLzv8Og9q8-C9!XD&f%-oma;*eU9GsY(f+<0;h?+S+K zdUZzAlfnssqxc{UtYEIwuz?ftbc)#+VZi8D!c0GY~oE`EXAdB=XNCPYMuH;~F95*u-IqaVKn5jA5>uTm%2WhuMR> z;%L0My)fwR&b=7JdF`qK47C-l?a1#izp|Wz!h?>fZ!D7@3LWwrJ_zP+=y)h4m-G1P zlQ3n*7up$~$R0y7(r2f(6TZoHci2e%QI=(lB|rEQre(Nd)wfDJ~o5&Enl5}mVTSx`8EIO18t8>nDBO$yVw+ zPPX%2WFuNO-G`C27*81p&ihus z&3+-#7ooKbCda$0%_;i;3jdV%=r5qj zvZueXH$9rhd(Y8z6OrGWs~C*Xvku-yrs+pKAY&__=+taJ`xHj`AUqOStGfb!4SozV zUXi&2f42rtdzY8oZHCO%gW%ZeZVnf7393Mj;G^s0^^V+~Z>dpXd!?c_SDL5-k(~A4LKn9ucDI z#9#@G)P#)*%r#pSpiRmIgCG`!aS*By^T&YBGu;eN{sNEwoRvsrN;ph2Qv$gX_-2a1 zczCg#0p3@YY)TmGMB+hq%5o~kc<5t0hsDsQQGvlHXKYfplBp!e>Z z78yK%x6kjv{I4chj@_D0`bmIN>RQ4zM^a@##zm?197iXQW z8Heuvx@4Jx1F=`oS7Gpe@&`3?8BfVbD94F`0++UmrMEX_;eBXu7e_V%o9{pR8(&|4 z!w2!KQ_wjIPC*e~zUOzNj(mf``GT|D%QuZ~kKB{0))&Rs2Gb(E*XfWA3*T(-rC2_5 zKR?HaZ$Dez76$EkFZ6VlR%!pu#vt3;7`56PF~*x4dEUx>{|8TsfkYc!>8 zrQh0+TupO!n9y`oDr4r>$;5a^BDJXO=&RB%-~;;vJDg&ZPURs{VJO} zhF)Fr4u|-&Ptas+^fB67nXZqS{-}m&7KTIBMUt^e+B9zOSOJrRAqB?-8pvSG))_tS zz9QZm-xtE9PxpaC$e3n6VF-y2+>k;xYZO4aj zxCu;%FTKs+K^P>=^X`K@fesjp-7u7j5=bp zctwQWe(ba1fo#UigGTze`w*x{69x|iPM^M z_xgPO4PW~DI{4-XakrzC(!Q}P(^k`06${Ij&wQ&vuR#j^9IfLI0^1R6Ia1y|*Efed z+q)Iw3UmxN0~+41F-+TtDl!l8*Xu-AcToZiazFb$(Nm24tCKh;@z&p@%$#u$Jc#2OnL7xtcc>Xi zVKx4MlVNm>Fkp;Tnm+S4&M-x~9Va=bJA#`uMh5Y{_Q;CwIvMJ?n( z+jozxMn=;|@z3tnkcQtlnc?GmQEqp1mAxLSH*&x2ItI1@Q3L)*n|zk8b1>=?1!e@f z$I)t^PI9hbPIZbXM!pquy6>7|`Cenr(1pjl^=shb{BCs>W&G&jz4e(mSwDP~NLxEO zMi=274zf(xRtLqokFxeCsqO~%VAFMi3JX@lGoeQ7wvF7R6KXg%?TQ4eAMsdPa*e*A zqxSY^6*<|}F1hU^j%imtJl|+OGRfiB&dj^x7+bRnKNPMm*O= z-X7^L9x_gP2<(1quR0*LjQ`^NwBtQFIqQAXHq-+H%jSVAql&(iy@>3k!PI6)QLad^1fCm7IdoZr+BO!O{i%Dg>X%E;oat-UfH>jx%lFc}zG01l)b6z#j9 zt*%Q5jHXmQ8PQBG=hFCj*BaL#SG@w5QFFSMwrYaVffD92XVx<$|3DFf>l2wD|K>jBIO~d17l)zOx*qA13zW~ubLvw z{d!A*H;Ol89(fK@tH3s#s9iMccyr2T@a$k#5#ul@f@@_bkK0Be<|<1}8a3rm!OJ@^ zB)B|^cCeQ-__w-svIs*ji;T8-L-cu+C?}haf6^pjn@6ouCw*W7c$}vfB2+iy6;{Ee zu4i05AI=HzuH=`A{H(xL7jtqr?6_yuJbcIk1nt2m!#9j(;S5l^c~1G_ew=};&du5w zM+9BR1btHX^87XtdvM`TnKMNJ0mYzzl{>Oi=5QhQ*!zJ`YeY2 z`AN>xbr&b>4Tte%z{h`&2{vj{rr0M@-$KW)%yCkPSn%T5BWU(KFYr%%64wQ~up!a}6!ZNk`FoLD(R^;ks2r}{7|QPs&7No?cqXS5c*mUU^bh&5UWp#uZh&LV*1x32 z1}_4Ya8+a+K3(H*cN#75F?OA@3rvD5>HaQsZm{rR9VTFV>+QGFPhhpmKK1CRG(3hi z^=jDT_}{#`%=Kx%8Ox%IK_7i8FyW+wg_%;OPIKU3GbYQDI%t~8^fw*H;WXw{88mKa zql`lv<9q{aL-+P0OA6J)Ta%fwK$|=(&pD71pY(P&AA4+iQxqP36TJu4QZX_>hvPjj zJM;21^V!ntd;5>plcUkS&18ZtgJ>Ip!$v(e6GJnZlTK9x7utoFyEn4auj^2BS|B43 z_>m2P4}B;)R}a3)0sSZnr{lfD!!!A4W8S_C>(U0hX}nN_Zv7?<#bf5+l z)ix==lvED^tgiP+6vQBe6Y}R6r8r2W02xO@PWc4J5Mmo4a6;U#=ZC4%o-{@0F9>cA z?FAVbCnD<8v_hGc0OwzK1Pb!D&(tzj=Are~$Ue{;i@J`1Bd89HoEOomDAw+MTgSnH zVYe$9yDvZpVIf)Pt0?|*^e}NBE5c|I>b-c^c8N3RetdM?>;@s%4q}xNVu(HZ){I5k z>Rc5TO4}C35Kc_SV}5lba}pY0Dx8ouf|40e+A=SuO$J&D>7y8NE2Gg? zmnw4;1)yF8R6T7FuG!7X0=c-&>!3o+ybD$UREe zr!nBmfYLU4_88^5>HOpV=Y*1xpH|tH&Dr@vin;YPlLFCjQ??iSrxDS7)9!d2{fz+K zA|LK~BI8wzex0=_mAgxZKZ+=Fc>ChTy8ZA)4ExI)zxo>=t#5yL9{k_LRz=~2=l60xVtBVQ2b9`vaF5bAmTm*w1AgJi zct&oG4L^+oJiJ9tE1N0P>%~#1y=evxEZVB_gu{H2jTDRl=X|$N4m_T(3I*o_TKeAL z-!+D}9jCuHjB#~O>TTd)d}qts1QNct?B+A)$DgI)z|!_mwuXI-ySln8YTN^h|G=LYyqXQ=L#`|N?@fzJ`EDnav zy&Bb=b&vu=GatUBuNr?Rp2WM+agQEZth?7mXMIjP;qaa2ra z8aJ5G-ojjE$TfJOz@jr{y{WR?4%v$TY;+i8VDy1ArXrc+8iCL1e$$%Tp6O#Yxxs51_S$v~3P+vd$rnykltY@iT_-2c%bRH_<#UsQ z-Fn#~*;hfj>*nVV4j5l^;Nxlaaf~3?!UB=IhAif|zL2(!vQ9^s7Bi5OzE~l~pNE$Y zQa((XYxQOVCnsRu>=OpO3jt6mz4;vG#+()B4i@3M`ZCBS3>`Rui-Ibn#?z7sKtxKi zDW(t>k}ywhbne&VYY`EN$g8&E|g$3F#Z+ zrVoUc#n5(@W)!a->8=Bd)yE92y5^ld*UHQ59?toVamZ9tK(nd$AF0UlKiSlM(c$4#l0uvet1D}ZB{2!-*Cn?qB{>~0&ew=9UV!b%= zn#0gCxHLZnmUPgxlIaVAj8t3Y;JI9p!KB{^|ARV%Co$Rtn!}Py5e#bp9t6&tb&{yl zt8uMuxQyUZE}RJ~-KFP=?>&PJ4&itrxe_2ZBjf=93^-3qW=1dn$-_R!__y${yy`yZ{> ziP&wCdOvvny+8K@>!11Y-Sx-5^#5M>{>m=|$jIvLz4hx~eSiJ6U;EB_c@o)-T#G&& zT^m9vrhl^Av|-0xtON#fnh_MV$ejSenB6Zfz)KD}ydyJcN3NimoNb+;j@{R`9OIyV zaHp)#+qA7LgRj5N{pL5_&?ywqs2IJ_)$5h-)FG$UHO^e~J@gH8Q6jk?h@$Trx%X6F zpE*-r)~UNJF&g>};Kmd%XRP4tcNED%n*#?hzT=~mH=SU$eb)-K{j}jspL^GD)@S3m z@4tJz9z=GmF}Mg{cHwLe2AM${eK?Q*a*#yi<1k4XN4Ewy-JLVnyf>tq+|;P!yO~pG z&Sr7G;J3P=U!zzpMssrQ=At1M8N3v z;nL{zUTqp%6q*{N=(pK2XX;sF;7zB-1jf=dcFV4D7V=5EgNu!7mNeu>lTJehK3O+B z2aipY(_>yv;G*~J zyr<3%`=%^D%pv;6h1_j zcOs5d{nbSx`?HHGrVcbnfq1}~$A|a|IYA0wJQ+;L17yrF>_pz?iNmav0q7wsgKlIU zVmAsg2ZD3_8=Fty$wE@}qhw^{~1^|)mq-Q50?Ki!XKFkqY0HF>c z0Stz7p^;(A4zUM!V3;HKQ{32FK8Lp(zC5k)I5?0dHOx^d`G$rrqueqebTJ4^B334H;?bB8BG>Yr%U@wne_H_aAAU6^ktU;vCSU^lEjpgYVS4}hy7|rb z*3H*GSl1t(tXJRuXubWJgY~ce{Xe<>cRzKr{@A_mtkXCF2cLdteQSN zygn+L>(h45^o@QpCHHRf{WQ8EIEkkiiyEMeUpaNu-Sq3y$8ET0;QEOy+>6XTJUm(t z9`B_J$0BkSSv~&Nv-R{d@2vX=yP<2kV9m;X~WYNPg)#pewJ)9z=8#MgUj+WdWHAMuu&L)jf?9!$-8aLClEsu3)a9j ze623tWwg@RCLjQ3#uFNJv9NKa{OAydm(!cRIg0ty-)#oOsbIjtNB3|F=rm=(4`yj- z^V9T*X+!o)R(3yjsRlIlJ9UVE0`FuTALiO*dyRu_;H1bZ%TjwD{^iL@<(N|qZcajm z5#1YJk1T52R3mu?=LE%(*^Faer)!PcMR4!wve?7m%{XsG*kZJ%<}Yv4A>)43j{1Cc47@j!bvLy)ydm>5gc~CalLX5_ zScD-kS~2e$W73z=Aan*yDEFmEMDrm4B!h(!SD(>zKzfxA1Z|jSYvAh`WRMdv#~_9X z!%%8Y8|J4lj{COJy}YvPxn~#Sp{Y+$9&0m|%IU;+ygUck7`^cv!-Qcz?N{tmw~E)8 zbB_@4vM&==pC+QZiUO66tnx~^K0BMXx(9tuW9EgMd-tkI^Dq(XFeL};GPnz#?-IQM zEhizMjwsnJz8QXsw)3C)oT(Bk=q-ft>?SX7giDkrYPPn5*J{De%d@jG{V2=`L&uls zBG(D8d7-Wa$QUbxxo>WtV$+2-rd%6G@`KS?6EJyE!KKefh4)NKiK6rn9&wuilqpUr~7#PE+ zZ83aP!_$UFfTlZ!B$vVy6YZxAo^)^ff66$6@;N& zPS)cmhwBgh^!KlS?BO@opZMlqTHk%~ej?_`W0c-s`IYz9*S>ug8K)?&O1nPkAwtSH zx-c+&%lM>9&t@Qc_#9jtQX{mS9|D;r%xCAMbaT69<6f?t zjFn?66W8enXReP!wW-tgq9?4u2+udozA@;GEM?fowesl%%v_ikY zv>!+M>HYBgcYkJG8AsYAQ;Z5+!F`QhXhF7XY^n!2K{7ETzvS2>yx_+B@XfeULpzD= z4ihEbzwdW~o#^<>I4v(1GM?$%uJ= z+y*WUc<{Orh7R-!Sm`3Vt&fa_#|0bR!7cjF)~enYQhm}kog-J(L2$dFvG1aP4vY76 zI2d|zgEJhSFtC_TU5*i5)}Sue>!79>xOejVCicvIHb@%(G!C+bgv#viafS{f6W8k` z_`1sYY(XrIpYAIQ9;?u8A2(l-5noPhq+}k@J-EVGh(*Itc&!>YMkfz>QjJq`!931fN#lTl*Uj&v%4`&x1 z3oxvr#_2jfNT&=xqMe>zMDRV*u(J*m{b2=&wdl<%K##ta@xu9&A{DvY$9)(R@Vxn% zS1|7$BO0Z<)71P(7Q@RUX$mUkAzAxPX8?Ubg1^(CIT&+E$2NujOdiPmH8l*C59w&i3W2~W0*R!I!&8010c+MOT2j&}w-rj57Tr}2; zFpQ{Z5+eg^nN^diBMRsAsH^YYMD130S~bsWmR`rG()Z1_FbMf)Dxz;^3Qjr?qkjSc zo(vrg+E<{d1Jn*N9~~9=1KULJug6J>73IivU-L9zZgw^(dRz~6`+8Ye^2y<|DUf!E z^r`JN!-$Rc;T}$6M9o$0>^@jeo_gnd7(O2E$?4U4LCM(LKI8GS39#;_FAlx&J_sxi zkTBPs0h?qtMVkR^F8gmj^D)=pKK~mdPnqNKFjxNetB31*Kb$c*;A0ef&fq?%YuyrB zJE`mU^wHK^4;ou0NAKk**5foxG?ITC8nqOfZQCebhW;~#xekWS*Va|9JUgI7v4tO< zumAvj7G-}ILv{z>;e|IcZHZD|-wUI0o1*j{r0>_yo~?^t{FQa~E5Ekxo}aBB``tgd z{>^{rr`JFG_?zqPzw)1~ciw%P2>+e+?f1{t`_C`?`qvXi{vy$1m2S?+bnyh1oQ~2V ze6q#C$M^Fa-%@@)@wF&t3{&XA&>F9@y%JD;&ku9GZ6}|u-7FcQj3zv0Iu35e;fVY) zCP2u4J=-Qal4U33!1);`C;}6s%0T7Xwx7uLz%rx46X0UJ3LOI2jVih?i0Q(+dolR2 zO03+BSH&DN%@Z2zhR4J6Z;p@Gy|>=U`R-b8eQMo%`gS7uDDoJ}cmRdtQFP?P;Q7Pv z``r5Qv!7nqJNvU~PT&AYGaNa#*~RLZ^hmC*=?%2W*tY?Wa?-Q{g4wi1X^+@?a>Y|l z(c8fzqjnQ{rk`FU%JZ6M4jjYkn$F<`9_YsEZ}g5HwgC?)@L;#Q6+CdD(13gfWcrid z09V_IuH^jNWTzO$m!%(r=XZzMx~(66@mXj7jdcke3Ry|cV@4lb*G|w|kqht%1Wo&P zv!qm>cHA4MsoA8^hi;(f(r|v$xwHArWIZ#!ToXNO*R+W2vmmYOBhm+3hld3!UbJPC zF8$eFc>8ubI{-S_OEe(%%}z%?Cpr&}i&u`==yyD?EMUBz`#SWiGZULdN23MY_0i&t z?d;TIx>xJ)_(|H1^AbGDTfTh$QTgX#Y&XXv{fN*b*o<_LGsgWe4JD!t($6x;i+C~m zK02Pw{RzFsy-da1zeMky-v~iyN-LNte}>Cp&^+-;@!Ash@$&w0kZu)($1Ds4F)<G0qQ3)6{URj{Tcr>EHp>fSw0 z5CdNYobh9iv%vZ)F!u?F;JZ&w1Rs|Sd2j`H@a~OBmvOQfcMcM~+w=zWneXam61g|! z@5Nc!?_x~%WPBL+v#a1SXgQ0}p9cq=2>^P8kawI;*TV>9Kn{L5Q=?EPnxA3KqN30z zznjg-Mf;sczZvn`(`LrE_nTi`KlRsteeK?bMw>{dPDtweRG)I{;L;;^0#7u0*AeBm zp|^WIi`N`zo~r6IVspAT3Nk-ZK8h+lfu90s+mp@=QuBSBO}MfCp?xY6oVNMU?e3m( z;c;z22-%4Nweah{=qzWAea6~u(msWS&Q-dh$K3}}3Z=4-?wg$5}A<`PEf8h zT4)G|&2*-puDY(=!7=?b!4WyIXUA>o&?#gxoYerSj+U{etZT(xe$e6PXXl+7s4V9< zzNBOH(|O!oHW4zE;42iBr0bn>Wl=^Z?1 zPumlm4E|?2h8`vV=|6Shp$5_m*Edvpw0zc-@IbR;Plis)fs~L^l<6Pmz?gE=D=wbi)e98EXGrs7_No?{- zoxf(pAdz=vaTM=Qlm$^d9;{VfV2`OZA!1$|Gdnbei=|OhSVkCCATo5D!|G}{%72R_;{v7N;F*WdBc<#x%0PeU7LvCuNBf7gEUv=f(SWZ-$ck_xhx*D zTD~ve2*0*&YJfFA`W+L&RT-3rmrtIl`7&^VVT9wZ^XELqu2D!^pBxMXp{3J6YVgC< z+SM24Zj_$7X(|k2u8i{(Cfkjt9mDKh^rtbr`oe4;%@BPXqj^{zz)e@B=C?(3gbG7b zx_yEm{DF@w7P^Q5=V2R@d9ycfc%pCC%NJ+e9nXB&U9NE|z^*==bpAGDuqymfoJJ$< zQ)sl)hSo*8apzi4KG$5yCuK!ZKMGxU10R0)nQwl3{f_sZckaCoU;2QT8lRr|7kn{T z1tupasgBnc*X?KnLvW-Nn)+SC+q!`Vg2CrArCeM&wMV~oyZ|R{NVYz`sz2abN|`nL0;4=7Wo&2XGr|)Dkbv}-K^H+y{dyPBegKQibJ3g6NXrUred z7tLq8mb=wK=m}5w)_rn7C($L=>u}fwOb&RPbyCv}TATi4yy%H;=#zTN(Rpau1%I5> z{7F4Csx#L78DrX&)}}++Z`vGQ;%r2>pzFrluR%{;j#1?%4oAnjX^V^qDvd2&w@o(4 zy#^yUS8GSs1u|2IUbBx2eMv6};L1z6?L2wuG4Xli+Y>?EL@aV=VW~xPbh+`6kJ{37 zdP}~8OEM20_-jgiH?)cMF&<8W;PE<6q^XA57&J&*^pWY9ae#(5=uGyK<6-T=@zb}{ z?p~bp(>iYE`R{8`Wh()Xj_oloBD~+Usd;0o`VQlX9>ohaNRc|v%u6kJ8icy(b*XJp zKZZ95jGR--oX0`v&24wF1HjP+hv#TioHj-005;#;$TEcEC5~Y;m`Rt6Be7{CM?FDb zf@5nnhXMCnh4ghzgPewJw4?sTtW+;`$phOq;~iLLPx38aSJZ*hyNZ#@L_)D7gMD?Gfs~~deRP9`PXCnIj9Id^6lMt z=70Rn)3tlex$BK8DSb2=#vmL1AvhKCoLgh1FJs25lTDgKRBPZ9xwqk7uc6ED8gkdc zitb?mbJ-rLyfe=i`gfz7;Kb+a7+Hkl z>+JbyPtgPigK;N=#x>C;KxeFQ1>R=G(w7KJ#7@@HUs{u~@t*E&njX^8v<_T;YaxKn zsgLj_`r1Du%r&^*-W9)0&L4$=5UNgXmdVGf^=)cHlugTfF6Wor8eWM~>4rr?~Q&HuS|fRJY~NyUH0b5gk}&dtfQt zw5^fgjh=JRxpw7aY&zOFWHJC83Ur6h&i$sJZ7@e4HfW0;oHavl>LAc*%19qGkc>@N zxkoQD_yV@V7~C^t6Oe4{S&Q^|9Yya4$euOu(I?91c3@$P!UNX2lW^@2jYPfllJ+|^d~d1O}q330OmJ>t)_*-@D_qu^bt_v z4b7@@NUZP%$}J;n!%2x#+)~;52UcyG12LB#=SNCC^=)QDS@zlXoZ8*i#gvo6jF1L? z1{LAGb9nE-ct?QYh>)1;DgogrW~=)Whq*4!PQrXPB#L23zac=zJ$RGa#Sm3kWukbh zVPMl2qVQm`H1k3U&uj8n&8v#YrM=8CS=?c}Ms)KV-Whip6?1Q3vlgQ-<;iJ9>nyJG zqK3Vu>yG0zHq90BuruE2MLf>Xii2|=9#go_;s(E40q-1F%0FNb15+cGeAWX_+s%{& zr=dd5jYs^7@`%{A!538zqb<(}t3vgSM*F&=-C+5nR>yv~?&Q z-Yo6|#{htLau1*Q4YrJD7NhahDg9CYU?upCylN19rX2!{?{kjQ;ZUa0C?v3y8H0xh zainJ9SQKAy?vbhpQ-3Q{_|i}16THTi?=x?%os28MmfL(rdEMuLe&gHg@>?IQ+tBOX z?>=7toj>rI_0NCmbba>QzZ%}U7zK86wr}tCD_o&d8lcTo!4~+xm_{rS#I^F)WBo##_zux z*6?N{Z@=9`-zi?cg$w0M>Q)BXI3$i{7Ut&W*nA(rN;5y`4{eub?QF)>x{9DRofdg5 zFQ8+}-Ng{S_nG7Mwa>n@PE+?~+I${8`zDSx+&DZwO2^S*_LVRui`3{ieNEoc3zR(0 z9Er(z@gzMn(K#62tgFznnHSluz}@Uzg0`;k)=zX3-i1a_PB3cpZATRE>-;Rv|K(ZR z-^y~QP}8PQx&<5%ERv;H7;$sZQvFR;bBL$Z2FK>YWdP?^b5n-cwHQ6Jfa@6JI7 zxH``{t3&k}{VV(1olevD$T8kx3q;U(Ex`$6oAv}==W!O{fUK#TN;Tv@)>AnM-9vtr0DDxoB zBg_>0qX%!&=+g|^LWJ?!&rfC}0V{B=PVP#-FyI)t5G>bucnnNaFBvQYID6iY2UcV% z7zXn}R1H7`=odrhIcY?A+7FAe~O|ganFgGNXsnU zqKuYFyG^5~b#Omd2ao$r+oY^DY>VLNwYiS);|=bIuEV z(#O3R8slx>G4Amo^;toRpn9W|@$f)jK0j$TfC2CBd2up~`Q`a}KPT%lNb7t*d;||3 z!@*#pZ%<&@7>B3nUGKYLFpqdp>MR`ko}9#K42h+_>ineaUdnP9Iz7Q?W#H0jJL&Gx zD?*I}GSVtN3N@c-pI;?T4&QMK1ejmraO|e7{RqcTy}Vve12<0`Q2?p!xOjP z{PsHk%GcKAw?A0d&t9xQ`Lmx}f8lTcuJyaGzu8r}99GZBzlw4?eQ_4UkI&Oblvj=T zrayZYz-ZBx|0wk$INvk+mYK1JZy5Q)JhD(cMb0Q~-x)Wgvc5vy*VbR);V4T{R7-JY zTNYmo9O<#`>$ZRdpV|ux%yk6_J*Dn*>-dx}6ml&%g0JA!J!Lix49_$Dqh5a zyth~0ZZXW zaAzcy<87wl=*iP{cywIeAKa#V>Y$m;!1}I}AgO_CZpk#i3hpf^uOm|#nCTeyM^*ql zX!80__5lpWK2BiwggH9i$>2oRjL2HH=zDqkvK>5ql7Uc=ozz7uK)aAW3ECjH~4=oFH9I$c$% zT#hlgL0pa>jIV^MAlQTmcHk_i~&I{os9nng(aeIZPYyRjQ%T8(ZT=YV&Z5}SS) zwtO0E>i4yYfdy=sKxE8|lM<2Msgb`9tf`L~%e;j^wl?rG@adoD%BY#TI6Sh-oD-yf zj!=kIwi*V-^ajvXryvVFvzR1wOZ!!tIlEz2wl;vcW3;c?4;)B@J*eje5cZt#RHs(cTP{&-+H&7 zHlk=gX{G0kDK+w0!LuvPYp52RS>I;?ma{cRK2KM!a$gCf~uC+o=Dr205~#c$z7l z!#A%2J7eR9EQ#;cE#aRu(RTg))U>XLP{mnN5M- z^W8Xl&1Yy)dV@Fm;9mHmCx`mt<+}GW#{Hv@)|=-atT)f{8GUg-G=K5-!TR;jy|X^d zU@v0qJWcXE`sl->=*itNj2qV27^&bmefl#$U!xITWbpPPujR#!Ug30$1fM)PjI$g$ zPtBsEI?;NjSu3N1K5CUe`bKY?d>5mMo0F*Mhhj-yU9o48?i__KTALDCd6l3tn z1-*+tvWm)RM>&q;iv)?McCKUy1KeaSXPk}J87O>`11>;FH#g|Z7{^YS#}_d~d)LXOH7!L9-im zk^_c~%-VkLp?P&zbik9h-fg{kLtn~7cOM-d+P(510}X=0y$qFS0TKI2BKL!X<09S4 zW((?GeDpjGOI5@;iP8g<)tG&R>&|s68F?DsSQLXCrs}@qxo-Xmk6~5_n{=Hc|C5bv z>IwbAv{EVvY)Zg%fx#$Ok2Vd|pyjW-TWKYPcS0&;%NR%#Sg>9lsOp9d2RPaH~uQ5 ze8GIO2af`)$HP3UTE|Gc4e&F%z%An->tT#_X7U?AJZo+DPQRy*mK!HUS{pNM7t!&i z>&faN-;L=if@temeR-~w;P34wqPV;XA4tg={)j*3>>`)l2hB3*ubG6@wNUOnyhAV! zW0dxHAFZP(DjQ=M6S#Tn7|-;@St3F>F%{qqSPTOjMCqit;!0ZfQ3S~wP>f@g+Q(-8wppW$0PmYWzh`05@xJufdZo$G zpO!S2&QDFU^zO+;R-YC>Ca97l8??LJ-$LRN-Pwm2cV7yDTcKN|~*5%i~ zu`b{HAd&j{`Wt`u=hy%K@A!fBUw`%0b@#zHW@zc-^2NzIeSTI&`sS4}=`(yF8xY!4 zdX!Hidx|#L#hdGKga6+XC0+vlP6t0-zaFyajQ zsB19LG(2E?))_~1RbZgcf~3k~ckqmydYYzA_U>loIM{Q{6F3RBq7!7yIETCY`xyru z(0!?IOvRz$qDgS6zucz}U7z=MMBdFk!&&qUxxS1(v9PXdNjSbm)T|=6KI;{o6n#MX z(3KzK)~Td^@GbiiyrNa5Gjy-hUR-k$I3gai)~-Ni+M#DSWY+!IRL3*&1^Krry=yV`~`}@XA0S8BEtA;M+ zCYm3IML(Pz_|AVF7iKn|I!2Mv?vR!8@I(PvL`S$7ZVZemW^OEk9Dt!B3tu`gPKQ;>kHdSG@S*D{gOa&8Na-rz0GW%e(sIpC1g{U{r2NdY ztMw$fVC)>DwuVQlkb5!sOdIfBgWd6_w|x3t1EpB*R|#Z15Ye5^@P5u#0ob-4wWO`K zxjnU&f_SJ3B=x`%*qXH<6T#mSwdeB%|kJl{RPXg3}FW1PWi| zZ}e0f9K0(7kHK|>x%6w+BshrTi|3r^-RnaW6upUIzYD(591lEreSe)sF8|7>_t*0T zR<}19b9f2-8P)oL(LXxq-Uj+76IM^lU`o69k+jg713FWhp=H_~T@?cnx@b#qRGtmL zlH;vx6}7<+XY6&Hi%5(M(Fr168PUjRBg5c3?bBsG1ry5B0~3+c?a@uSW=cR*Ye6wx zO77ELaX#t!>C^VsJEp9-M4NFgfE6j4CTfXNs;{F!zn!_~?4D3eu#_ zjiJ+Y^hu%)w;ltg*D>?)pqIX#3oJrH;4_N??i^zR~s9`#- zair-?w1z_gnk~#ZY>sU(HEY!#MhC~qcpQZ<#cJac!n+%$>o&9j@cPwdA9<3(wE|l@ zMC!n65OFzNm+{j*qeqE!@9GFV4~voQqYD5{$CRLXfv&2};2ahoNGD9{CI3Pk1BXCP0DKKGFuYff_ z%@7_xIau4r(afiagql_&Kp_&Lz+?zluQ+F04_tLtf^&`)@6z`aF5IqJi7J{mk4hQ) z&XeXiSdd|@1!cGM_W-|eS*{Xj1QOxM$S#yR3zB*N*P=4E-XJ_|e z7pd^nDS>Wr97DpnaG>d%JC9_B2ZH~gEqt>vbWs-Gt-jsIU_ZFLpYb+LUj-WgT+7`t zm@1FbRvTXU{o4JmL`gsQjStu3^WZEvY40T}GHA>1)K!n<=3d9x#?W1vD99)~d=!?L z9*Tz0yAAlEoSW*nH-;!ziX+W*?M*H;;uZOB96fd0C+Ch*jBMSPE@a%JL>TCYhmoQ1 z3hmNuiSc20WLv-c(1$ldj9g29cp&WsJq0U7q+2_mUpaCyAFpE2y&L`w-jCehy}Al< z5|N#ptn06TYrXu=2kX<{^O^Ne{E?qnfBN7%>j%H{%k%hsWcTL$rn{kCo|%8mh-D*; zEwa!vf-q?098WZc?`sgydC-`98Gi~s$T$LVafKfTXYiCZJKo?c4u&)aN2UYNfih^( z&(Jbu8}&ri>m1}-E)~ac;7aKnT-LCVmB3T^!~;DyeBi!HrFmdRBTS$bmwR1#Mn3w` zch~hBn!JJEoEFm+Zk}+6a&>;PzVSn!USI#hXXhyayi%u8_515Z@cZm=f4vFqMc!S< zQ^2*IGO1JNDRmgZ;)fBJIgS44I;i0CQRGXOwJ&`-u!G)=8Jb?1g5}9j`6~rcB+gX4Qj3EZ#FOO zRcnJQIBp~!=xK3iobhp9GG=Xs4{Rtk5%3u5>Ud*GMZw@0&xk-^oGdOQJ!D~XMD~;p z8{>&FaN1$iR1`a?E!T{X;bkN65T~KeLv$J&DU028D)1K^jgH4VA$Ob)HV5C@V}{|C z#((`L!78I1onoA_!8VF>*x}CNR%?7j^X^~7$a5e}Q8#lQS?()gkDjc9)M1CcocfOxRpBCC1W|Hho^#dU=EB0_8epH~#c6JkT&M9lyjfx|pJ zxk0veyq9(iZs!X^2!i7=1MRy7wO+h93uBm%8il1_?euY( z;D*4p4}t*u$4Vkf*qA?&^X!Fn1arD%WwYR>-GF@bM;q90!&9-zj0 zjB?R8$cHxC>1WOw6W?>>Tffh}p?Mvt;1+K=o1zDA+MESv&J<6ZVXU@M&~-pO-~85k{DuyUO`qUI{MgtKlCFp%G1-ulj4F@)jRzDFYCVHEM)Mf!Voe6Zfc;YLTmHc*N| zqI<27rr)-_$Iy{IL4!V2F~@ARbtBuzakmRjJJk>Q6yZ5Ep`E@=CHlj(C__Ydh{C~R z%-S~&`cpc)X?V0ns~ij=S~9TZ3-nLlKB19uvS17y%7YoREs^OIj&!qkrBjh!T&p-k zoUu;eHTI^utXYCLO>w7%1~W)`8yK6l2rW1@g4LtrqxMa&WccaJJa%vpI_mc-hSJzN zR`w|ln-^i38cyl_gTD!~(9*j~JbhFLAT-I%27kF3XW=5v+6{Kz%kFmuo&Ijt+1bn3 z)Hr_mJ&!aFKW!ZV`0jL9_(Cj;gwn|s$7LBCAoXHLS3bjp&T zby1R(45Bc;0ouH{<2v5Wy$29B?Kv@_GFOQ>Gu9j^!b6Gy*gRW%n?6TqjGWR)870$a zp9F~a$zd^1$2002Hh9B8?hJaBuo~3=*we#<3$dvs{fg|4x0xw3Xgb^l1p9V{BCDUHte5YM!qFC>Xe-p>dKK(WNfuV9p`J_(A;akRP zJku1ugzNBSc=_h^WmEI76HQp(P~HtK=7|7NeH8f1kJjA>AFP`XpT)>utsnpVr`A9D z*?a5nz5Dt)ygW_haIwywJ)fPoEVv5{kP24=uiyC|_$a?o;K3Kz(x*@G-=nDVyO#r> zu@~l?odOBm9vMm_HSA%)TywTU&WspZ37!Kjw=}_T{Sg}H#(ZyK&G)nTc71zp_{5y) zyqG3uWT(%$=YH!c=R-TbN8xt=d3DLIJLJUp=s^4(oSdGWtS|kZ?_Xc}p)ahmd z0a0~DP8^@cL3b0Gd=(j|d#anlFG{?+i(aaW$>kRq-{_A~jGtNG>>wVw7*Iw&c&-^@V*z{%%Gu{aE z%KFye;-{Q;>TD*KQ%BbaCOcDBW&)FFxZA0wpN0_V1w27N&XocJr8yknKvre_^W zXQkkm!@B|-{D!aSV@|bq+RL!oNpEhzrHvE|j9@8$<&zV7d2v>w4!-!y7Nn4rmK zD+s#u#6-U(R*Z(sfgS5G3xk4qI>@ML;|>yo1m){lgDbr1B?CBkdO(n1n?~)0LDPou zi@qK(3PZiq8)Oitfrm+bWgLsJ{B;Rbo@JQS1n4tr>}6EGWW2~KED`o> zH~mj0w%Xb2|+Oc|c&&VGz?Xmgb)`_=2yz(1pM9&TqU=X)7DbvN~# z?;-*R2ZvLm&E4RhvjLAyZ>J`c^%yuC?Ah(zdjy6{99iQa|hns_gB;ayL6qj?X~5h!W`({ zNXNaL`U|$GlKhx-Ed(j&YuCGiaS>Iz_z+s2y?T{s%Op?HHsd<)X}?G4$tajqw_*$} zeYeyNjb;R+Q|T3c2oKsOwnjcU_xnCZ_s#iL+wL`BRfYm@;PBT;Q#l!2t%}?`(W}vW zlr?&)%kwG-LYrx5f6~^)$+-UZx5oARAC1$GkH_cVJsf}GfAojO|6=duc=Gb25t5FJ z)AP=(zcR-{Bp+JO)MTRj)Ki@h)S<2Z_bp|)PbkoV*agZO`gv`V0?k+PmPj#jS!X8C zDK~ev12Ys&{;LDwd3Y$U=Vxh`3d?J~HgC@_K67)?Xq{x=eTD`dS7%Uav*AKBF(Qp+Z_MN+gsz+?%}u!tf#^CZQzpa^JKbNr{+HL z)m7*$Ne)Jd+!}tkxr*VDIZ3^;q1M0)Y?^IK=PfZdzokw^klS(j&;iczm4Kr|t$C)e zAx?WHS2I{5ZjJ(7?>apJe`Eq|hT7;j@}b_D(IwBab}>Ye5i4){>Q)W~JS_0V=%51_ z1X#wI!H+MdZi3HwRxi_4fkz#(veu_7>z=gr)s?N)C*A;;o&t5PTUR@xYncrG{B_c895O<&g^#R!3qH%DPz#1oZr zFe9@&6@_^^3%*8Z8rf+$Q$Ce%$_!X~5#%#ecI~13?GLX^-#`@LAd{zQK0pmHMUaEX z$q_)$?!#N54f7Tr(e6w$^>uxeMSeFOAB;_A1&`=Ggj7$i5r~cf_Io;)e;8>-uOi^o z8<;Q{Z>;c#$X8dVB}@e(1Vuoe+%@7Z`r%%girk}`x>M&y>em-usI+>CeS6gjXf}!Q z%-VWj4Ly7(q&!}DA@7WNpA7@o3BfcPS@Z`(1?|NLk6M=DAzpty^*RA_VjaACo`XOz zb^rL}EU@Hy=oOX>6H~-faTcffqfF<-lR3$S=gxTZEcf=Nw~sF9KpEIB@gZKT-N?{f zHpduP9hcH4bq7Dh6P>2A<`Uw>hp`<*tOgE{cT6^Adne^MT43t$TxZ<9!HgmP!He7R z!>=fqC@6}rC8v_AVTPl8pB3jz>xReFmyc;IJjDRvpLxISr@u`l&VMPK|LvC^MIj}< zG{S;T_v>J!JiILHm&$zzo*%*wL0%^Ts*_{f7a&m_adPh?gKv&QmzPK5A%^rma`C{h zpZR_z1vc(ej^VSFY~e1xw8u8S3ID_ikj-LnZ!Zf}*Y8Wm2NtQ2Cmb^{1%~1)e1wlf zq_HXscJyl8eCNY){OynX?7#c7?~MP|J7?qFt4|V9M&Sk)+aH^x)HfyuMGl^rKd>rduP>$9OML#bmF7^IP&=zy%j-eRO_U(Jf#nHuGOD@ zjp#JaT{@i$ffc``&DHD6@$$*d_?4gg{o}je_kC%1V|?QMALJVd3%;0R@%TSRPW#mX z4DB=~n3fS+^0er&srkanDGfY%#@XFKtH;)=3qFjXTezfgAXs#MX8s*@6KLgjz*jG@Bnvg(;>%Ls{J zt2%jMU+6|zRKu+0gTM?|Ca{Xg>8tC@1Wqx&t#38I$ylDHPt^}!qVLP2`bIhz~Xl_UHRP=c!`iQ;#oa~{X>tMk

|n|{!WdC2_GgKR2ZcKLnw%qQ)Ku6cXK^oO((U?NYzVEWdJZM-I$L` zAqJB}WFz|g^SoEvUD!k?#NiE<&|%h=4$(~Hn2MeQ9D@^p8FB}&r2?K~*2}m{@tZs` z)9yX{IHatlu8BDNF=Hgi#&DPiGb3XD<(y;9a~yb{`cCk@+-0Wq!)Ot(roPhe&K$j+ zsT$2FkHO)zUa!A!wRE&Snawh{tVKFp-YD~Yz3?;p z#c_#ZCO}eS!9ARqKXDVqUu9p1#EdH5SF~0KTzXEb(#+T&Pel3s_wnM}4~jOp3Tz(s3?WQMb8Xf) zc_!DVK3U*%!cl&6(K%afo4w~6V^oz)5c55KWelY3E?>S@d6b4{HI?=$J|6bL?pQHn z9@2srFm$d6r5f3LBYSpkqE;N0DCpahlksqN)<`gtn|3%8(?E6tYhaf_kY?v3bPszO zA&MZvFYCn!8N+`FeP)zj@Or{+P|?r+PXEYA4P+dH$J66D8Xu4I?|d-!pY4x-|KI$> z<3IS>{qgPxUmuroL=1Ns(lfMXoxIA6tJEW^lPM%GoEO`)&N&Gw1cfPQh)`;`7d#A} zDCrg1hSo#I@-4V;rX+omeaOY|<&=@O7~pF#`_pyb{X(DR_uiW4>~zX$8&Bkn95p46 zE_fv0yL2`>D;8>TFK6LTjXd()*FJb2;C;d49FgnQ`|ViSb06WV*X9J(K>3(%# z+A3K?S9QC_32;~j%(htNWG7F+;|&|C%_ZJ}sr49I;7p{iYgQi^f==|G-wmqbFb4a* zzFd9aoLi#L;0*kiRuUbbroQQv=qh~4+8Pdr3&EQi`lpfgW+T!cEW#$TaVABhp9o(N z)jbZK@FV3lq^cytlm4BqF~^n7@KX}t`P!hk9=;;CGmLyRbL zs~x9II>Rp=M%1RQiqxsNJ>O+tXk$5(k zSbLPK4gRHup{t%90SL2dqmxnc*CiPt4(57zgmj)w;4PC%X@Li`FQUlwy~-qi`cls< z_ZVV=#^6XHzX>u$v9?~F`=aNWz$!&l&lcl`{1|Y1Q^Y)-2w4z>_mTjfw*K2{l85Gv zK@~RChaDWAf7A1X9}`5Ro_LVcxOF@x@O7qUylfLY?s;#epGhES=pF4g<$>Q>f6!m! zaFi%fq_vhdmf4S`Ljk|=(6ODst~@2)arLyny}cF((Zoz3%&6k^tK)HYauLSl2*lY) z-8}v}8L3~_l%44^nU&xxc!TqRG`2#I=ZA52LbuH@zPHM`j;%b4TYNeQmwVRnch>Ob z#d!MP{KK*G`aCy-OE+^a-xc0xQxPGX%74}eA^PLYf2E06RnCaky_D?3`COds3Es^! zQ?UIEJ*3{?Dm1d`NpGYrZ){6vy9DY<9L_Fdqp*UT!WldnPk#)J9uuuF_CaW9lKuz( zwrjno4AMtq>5VV~gH(2%@r5kjCpzpp@jTzjXi*R%_&Fyf^#t$a%n%qEW+9+J{tex7q-X$>rZ`V{Q1L+arV(i&DhMeE+;O$ z8F}UqH(i_02u^Sl{%!Dp#}+uuDP>IOOk&MX#yu4TM+^M7=;TkC5=ZH!5BZ4LLHJ1>Xtp@pPPj+=?KMGZPXkGJTRCivVz{|99ZWN zNMH*RF$H&dH7vbgAaTGn$S97CT@XTa`Xz5r#1eUHxuRIAFVhB@) z-T~FJU*yRTx{y&56m)HP;Pm3Bz5@Ev#xo4Wlt0lqeTu)AdYvw^cZqhEW5}>aX6Z!q zlaVW?(j@$Mrg`~q!V;?FZmw~F0ykNv_XQ6OIv9;L)5Y4uUvp;2p! zK%D(&S3B!IeMMWVz1X|Xh>x6mcf5(v-m1yY&=+7ZtZ;a^-@eY_VLIC$Foqf9BnGBc zJD0if=5Db`LDNAL^7PK6wXF}oW8A$cBu1?UH;7%nF=!ff9!(e^_edJ$vy8kQh~YJM z2*%4}JQxjLJtc#{HB^CR3Lsr5BM@(n3}d=<1aao9Qv7ep*;XSr$n3aZ&Xf)sbEF=| z8M7mf=SA3vhv7;PMm}vYURC&c#&R`vF;Vm`pSkIfEN-ILwN@k%rLlp4B%q zAAvW_uCLlJ@_E!Ltv{I3VObdm1F`!@@G(Y|04ASsVR&gO9v?P0y2rbPpDM!Cw|rY%>-E6wVke$l1vU(|p8gyhJc;)h zXD!diqlz~ikNDCo+mz35DO`dMzs*YIIqf*mvhxu_pDExJP-q%q>)3T1iLJo4ojTUs zhxj}X5ROR4zk(CbY^Lt(^!*1OpNxNg|1-rE)jkb&p2-OhDVqN$#pa>Yvq*Wb5&7co z@ZyuKO($ zIcXM(ac8*kTE17w=X&Q9=tOwADdt7Lia+(gJ{gx^|K_;(&d24QpZjNCjQ{=be=`2; z?fc{Y=)-Y(bTUp-<~lrJiKLT!OMTvpXhP4<8E~X+5$;BC5I-__MWcy!-=5sZ5r{*+ zq(@-}dlYt;cZIJ!09^ldGN^y~P8O%5jBfLJeQALnYW1D2S}>&cr(($`es(SYX~U;` zkqM4jJd(n1t^32#>nOO3@fbX>E@;%b4HsAAD9ZNl{QkGcSN6A4&-(cIDv=yLN7i$; z*+nM;X_{zb<2JD35pbHjG+($hVWAVsrTOfUzRsb`G@W|poR2wH{rdIm>M~w!qwoAJ z^{qKUi^uDjRo?{uVrhE8*m@eN@I`mGOTE|8{b{K(?V!h%7swdCTrLr_%HsNp(>IsM z%?!Zzb~WwMuBWd#1#>f?oA>nJGxqV&=Q^#qmZH^H4VE}4!DHtz(T(W`eJP^X*QW9F zJIz#22<&Q8@~2{3Gv(ijPn2w?E)vlZXYj^J; z&u@h9jjLlB0o{{v`24Ln3&9_~&9($T65ZX>W5&&4}xY?7iP2OI-h;lc>s2ZA|?}0!|S;j+0A+!O96#M_vg!J z`fD2QwB0!X>9DbkW&olH`YPHeLi;pJ%OLJ-V|s$3y>1K=BjxTBHh;@PQ)0;7N# z#HB|7SwN=0&C9LO=YBnJ`^)E#5R2kPZu-Yj;jK103QQcXC+p@&tftP#@hV2-D$Z1p z-?;{ugYa_Z;Ia-#PM?gf7c`ip0bVo!fmh+0JHco(3j4d82nbJ0Bo01kj%3Z)p@LMh;l}6h6kw zH4d~rjJlU^`ZS#Z?TW|~$pubh@|WNE*7*1<-x!~NaWMWL|HB_0|NifbL;UgkNy{0kqW~La;6UWo^DCe1##;1vY8u&bo(lhEiQQ z(}6tVetxGPt*U)l(8zc4qeHIdySBOtgyGi6;;6y*yJwzRT%U)s1+e#VvR4~F#IcEq z>Sa33x2yBh@he|A7{Bt*{=PA`_Qqv+;zSS?TFuN5Si}2iAm4Iw-g2bLc>;s)nKf*1eTZRFsU4@bt(eWOtH_vPGC#d!jt;26X!w{ZbK?Ir^pN+bceba_WWp@t~1yhiMaO; zpN+TPdMjkU8<*jc%Q)@omnB*50a94=?BwgKxk3KFp;LBLj1X249f}X5xj^{6) zrT*Y6_a^N)`vSP=-Lzx18liuf5*uPja0@Q57;P^NwXjjq(ZJ98|PJ# zXu8o^`YU3ranJKr963bZEt*2Ka|*xB#kE8h`OLoy%IMfy|28Dps}$0Uxu-x>XdmzlGXD+3yQ@IH+JVI*yM zG-M4-)6sB=0VZ7V#AByK)xitIh#Ull{p_4D!Nh2qOW#ze55~<2L;G805T?y{zZtIz zW}dX3UAmotH&gfdp=EJfn5gYrbzz++*a_}+@BnTexW3K#2{Usuqm0K2<<$iRHKUiq z1Fvb^E#IhTKXl(q8Ij(aGjPCQqs;tfhFr11&}#VXC*M37JJ#K&qYOo7lH)gqCG{~Z zRjmD(I1OlYVZKgYQ?`=hf>9{{R9=9KSFkXM~ zap-e1{+B;;IR3qZ*W(ZEU5ukw$K&s~IG6h6u{tb)X+9U3id^HdC9Rg#wx9W@UJZ~T-^q$gj_=V|QH5Qc zb){fWFLIjKJwKl;jv{w<15EF7J!QiCFTeQi_SObkn3yXm6S3btV2r}wblwg z{phYLep{-+8KvHMQfR9X}LV0^4y4Ur=@PnYv<%>(})Dz)Gw=E|D_1iqQ(+*w1p`cG?Fn6Ql%&zZzh{DMR#E}S%Mbr9?7p6@x z+0@cl4o`y7;Aw7}V}^g=j8YObS`CGt%ozfYwfbmge}HL|%b}%*HqCjf9FXN;#>Bq8 z{k?H$#(nT(J6vB(xbue7{q(aol?QTr^Ds_Njl*6CnCThTXhf-qq99w!7PIIK;v){_ z4oMmzq~q2-bG ziI)qH>U4QB+dMAA&suh3h5sBnY+}utd4sw1bc;AVD*t1-kzDh;~`Glbq51!Y@v-7hU__zSEP-d@- zj!F+j+hznLcrd|hM#R%Npglz%z}r{P0k9d9{?7k_p~AiRJ)Hw^dsM&rt#q2zO}?;J z{L@V>9BK0|GGUq9ec-*7V&|A$*yDYPivY|x2gbQnbixi^qXP_g$D#Elc)GnhPZ^n$ z&=_Cp8~DK!T#(g#EMUk9C;IME{KQuEs#mE^`FZSLsY;J*E&8 z`K~me6%Ic~;5v_^9RXu->eKUZ*UUlsgqK$0Y<+NXGXB;NJs%%`{}0X5Wu7h90qr8IIH+hN5-NSZp( zj$b2_5bD8mGlDaynQiCu){J)VrSSGj!=j^3j1ygKLzHg9Yc=ZEMTK+~aVd2`V z1aUk2hhuAZxBM{Yn~}Y{I0^Lp44AR=Q5i1o!rj#jQT;It!D+|dL)YYu_gYJ_E)*N)QJ>5Oek%+N-xOAcjW8E>?WZxaAf?ARfV~DdG@@+F z^VJ~vp0b`X`DxNMmqQDTJE6|$BmdEB?U&PE%Fdy*d~dz_m}`O2UiO9p-o#0{{O&i$ z>#u%weDs}fk1t*wjsN7&esTP--~M>qef1xXjRQ8eQ0x1Jca+BW0B@ zUg2~B2N?B`?h&-O0Z;U$=-xy-tDU^dqFJo`tMiqz>ZNb!j%GYsY2eCtW7M352143M z*~=KlW`4*}_+e%YrS&Jzc~*IPk8UZxAtX_8$Ce|%qVi?RemWY`+GHOtqqpOPh*pi! z)2${d8#A6W`Qb8s3=SL<$Iw3Mq#pfOw-o%cD91to#~#q-jBSk^qnolw3m@wD(w$}5 zmW#Z=rwwPkd`5Q;4-7M!3uNDlk)T$_OjVj} z_M6=2_0A)LWBoIZ3p}1*j*qPc!OemMerftS5hHk+V6;8VgOD|-fnPl~4uTiX0i{6* zU0>eUj#RSD1L86AjJvr9-HJ5$>9&)(&hVM+X4g6bVq8wD2@cTpD$LQg!lWFvOOs;= zacI-tXCDK1~$`JXfpB(4%x`%U^pmlzF)-m7?U#5*21*cz{Pd^1=GN7qGC)x@C zZi+MQbXjchEO_H=`BW~5iQEcq;L9Au=&sb03iB_41iS>^?n0M)GQ)IG;ly(RanfPZ zhi>j9xb(M9OWK$YrnaQ41)pZn-=q$k)Wj)?o-hR5ecl5%heKq~AhIQf$z&bqz1kym zg`b`Y&f)}YC&&>PX-}{lPt`TX=Kdi*SPvU}@Y zJ~uN8pOfHo{ zTE_-2Y!RtDbKQ3xFEfN-m`GW5ik`u~j5sS%xq)EzYIli;CT%r^1A6p(J}+{GbbV0< z+|u52)}<7ca+G3>Qy%CTS5C^e>o~QWHruDPWdfr5omCv^w2!{_HtmS`8FtL*8AiEn z2cMpqFIR|jl6t-CUD_@n)RW~XHKlWLQKo^9?HE!pFmUh-XC|3+Ms>VgII&61d==s9 zcoE~icMyh`C4l$8Ts9vIwOTKV|;c)WFfGq%Dz#-KaLp}2`cPYr|y!{2}Tx5xTv$_Mw8HyNcUhO}J( z3h&}kzsr=9kGhPELn;cWGF}+lT{`UMijR%HcEZN;Fx|?Dwm$ ze|=nh_}y{!>h<{KD_cR{rt4}*gE^L^;drc3j8whHSuG2KTeqX`0*3}=rNB%9y;OaNv~kM^!ooVJC9t;byq8Y z!Mha?dDw+nTIbU?!!(P-hah2PT*is{==Z%h{^_6lW8fGH+P6Kmec_AO zJVA%4Nt=ZsaLwgu4B9m4t{3J-zZvi1Bf1Q}ry;~o!5Klx!GY+uuqnSAyHSIiXV@0$ zYs=!i=GNQ=EI_9=!yGxKb6Q96>$?7U+o$KP0;}^tS0>mo#!siuy2*jJs2V1l^0bt- zzEmrHyuF>}MfhOG2Xj7~q1r_-<469@?aBp=R;WFUciw$_Y^BfIt~|zO?d|Q4w_d!R z;wufPaHoJ6KJgj-4#zCbBBkv#VJ-^43`qeI{QIdq^N^^0xp4vpfaEHM5W{$Mqk|w@ z#EVHFj~z1v3%}ALi%|cDd)eI!zYjF_pME^?^Zyiz&v&fEl;Yf$-#eQ;4ZR`7$XSr0zNk zxrQkb*>@A%%r}^OvzxdU<`dZmAN3x=9)`J(w+2RDJ^n=eS^rFN>mN_q!D*;>oWJ5B z@JKg-18y8P4pN5T^(}xA?4UD|d%U~-M3O0MP6dZR8k^FfsQW6{C?c>lot1mWtkip! zdshr)O?!6fQ?c5JWX{QSI@=vDZX|CU%oJ~cUYmM+1ica5p z-6!6FFtqT+@6ef{WZXMhjbU`+i@?$LLJQHRsI|rytKgrs!+tolyJFr)Xs5h&(~V%# z&#U93arVJ?Q}5N-jDXfi28KFHft?(|umcF8C+v80 zSyw8dM{-cAv(L*;j$_9t;NwKLcTs+C zj$V(mS4X+FGXBjUerNp8cfLJ7bN_K=iQ&+PWNcOM{^B`|Zo4@#Vvf@gM#(Z;f9&*d6C_QjS9BOY%TIzzAD#nGhv-Q4taqNnQ~59W zrI*Qyc_=2xMa(*%z(McVfP+5Y)jJ)ydVVEOwbJpJmESIsi&m%y*@ z4?#Ehj?5bd0D~Z9XJ<}$6ObqeKDH1)oG*5RmFcAKIX_FCJ7Ev40*NJg=%cM@iw0UU z4u?+<#_qvk?Y8Evj3V!{XIE$E(F=x3I=ns&55lTcYJ(@j^JClr@0^>p#EYcRf&-sR zMEE2s7)#_RqC=4FoxLdQxyD=F(gu*#cSq-w(yw98XQSFMhV)8}ZR)G>NS&AG)?Lo% z*90m6K$1E!6=TAH^H|DUffwU1iB-=g6mImq@N$;SC!I8%nGi9b47!ORX3j77mtP`& z_LmGQKRrF^p{_me{UAE+5K7+n?tU1NXDDlsK5Em^=Tk3_)g`#Bw}qL~z|0sJ-iVU# zeGD@l3ovpHOf1n?_dNp-#`M96280{Vb|R$Dp6&;Zcz}i+f?M?zo|MJdS67LwD9GT0 z5HQ4@ah=~+!39~xAJ3m!vNPusxKB{vdbu?2GWB1>Lx94_P2n!^GTTi*;39PqCPr^3 z-mmLcaMDT(32y*HY0q17P(y`V1qPQcUm85J)KJ)RCvit;%D=oVzsFyb5SF^++4xdPzo zBtm>1rzPKQI1rBJ=)Ie;tGZ#5wRgUzJ^c<^g@g=XhmqBITS_s%aGt<2T=XF}@u>ei`{Yy}qo{FE0juaxXwB{Q8q~ z`Dt{MZ;_TdbP+R|^5Lc6g0p{hZ9E!`bl#jCoT_p}7X}Z@ZKlZ3B2g z8GFMa@Vtzkfx~&cx=y4YzF$kQwbMBxQ@`!n=`@b&SfQ6ID%Hzifj5|@qo5mH<;h&1 zjzsVjK(%M=n6V2P`x}gPNav%~<4YR32e5$FYXvf;IKb*TJwC16dHI(&s)>e`f-8ni z{hR~36x!;n;t{kCe2g~)wmmDOLMJ9o->P?t+-W=dD%Z4mc6wI*sISd%<~ac#SvN;) zKTbQDxRwP>`y4Yo=oRbvSsgz2ML`p_2T$~DTH4<~sBVQH^z$Bl_C@B|yIC&cG=o*0 zoI8DOW+f+U54C6oAT^eRcq8u2eZjz7=>NG*0%N?c@(g-V_iKRg4 zUOQhxrSxPDUnz6SVlqUd-VhoecT!!-FK1cSp$I}}S%?lEqPTCaqtx{!eKpiDjW;~$ z$21n20=MtHu(Ct&K=_)Tk6^%C892vMm?j<~aE#<4B?Y#rpyz!A4!IV_PF7<^_L$1# zCc6QO>@DG_azLLLcLdh2M&w~gjs~N@wY}Fq!hMB4_=K<0JBD#d3-}}m6p^8tDVR|l z(L8S@DtVgIcpM=K7SeVY5xoL#R~0~G>Lvu}Qb!?uQ&zSDd@?dpRi5oK!r&0WEc>9K zs#xND;33QI;Koeh-6!kg+3nrfOFuT#k0lYFNX`yPZ8eD(GB#{)qKlULcN<$zjR zg#`+CZuC0GgR*Ff7f(|PRob!IR4hR$G`Qtt?@5B^&*~+L%TSX_#<`KF-n7rZjC2zuEyr|QRT_+nSO-` z{mF+Dfc}A*ycGZGTjqwt=wBfh!IeR~ysSXSy8Pn%TJw8d9y{+-+^}UH) zYkh;GLIV-n3SX>57Oy{iIllhx-uO@d_#YU*^mKQ8?d)=Vyho? zpmVHJFWQM9$+`)=vb=O7o#)*eTRWRg(VNR7$hZsc6qd1+{wMF1ha7bd*pmQUvZM|~ z=S;zoKA*&CJdX}!6PC028#tt|s4Doouw*yzb;wd3;ZJWp#OHL&rRTw$JK>J8*J#+A zLh&T~5xW;0kzqDrH9W`&%aRIc3~9P&P7)j;q54DU;3m2*ePXcT=ki>zHJu-h@wACj z`ZuMeO@OXqq)py#yD6jdRsxw*W&$!;;LiI_IB94jkgl^GTr&1n9+;zG>7H$J7XdT| zPlqBD^{y}+#O4oEoTFC$lstZzYD+9DhdOzOG`C~YpC(_v8}7&?(6-wA>Gy;y-T&RO;9183SRvrKQC z0`DY0%DT!etdaJ?fXMm><{HGd8sixT#UMI8jkWPD(P9`p*Sah&48@B!E~B4w;v8H} zfl+tc&lNCeuaQrF%M@tWbJqE9Y))aU!HeQD4vG=LBw|+|W}a)-rD|qM`|RB8<0y%A z-q=!#6;AYpqfTUu!SuX)4xXVt!5?Icj=%6H5%MUy6v^%MCyeD~HePwCZ~A40 z0fuATVa&5qg>y{pp~8?$byNai(|O(Q5yJ()*1vD>Zibzvv*|g*N1PmZc{}|&417}b zoy{EhYUFcn=lf6m#7{>*1@Faac%kqE5AGT2&@{LO3z%Vj;$UH1Jm)tfA0=QmJ7=`1 zXM!8t+Q!*tFI|t$m@MndN$GbI+0A9!Qr{Xmh(OQiGw>9a;1OVp?|HbexYTHzY~WRR z$wj>3(3M^TYp?k9BlVz3s%jniPqn_!GH4b!Q!cEQBX!S$y%`D0rSNlp6f^k_e@@$U z;$E^B-O@Vxy)FBjjRf2If70IJs*G7~1Xotn+>W<5?#KV++5LF$_GOgbNoSmsV6f=} zxbMR+>$g{9`|5a6564?8a5x>{g8qBj34zi``p4jKfKo61?6sVZ(m8zBu~CjdBF6%a^k9M0 z@KruIs3y+YpTaU(U2b=(S2+R5+-rx+}mkhpLDErA^lWIvua{JI|28>cUyD|D$~a zH+>F}$}r|rUZv#cOmW{kk1O@+llwBbogke$IU!+^MlgN-us0kd_(7=`AUrX)2Gj(OSjUtITY}N zAA;MBb-NR#LBf#>#Vx|6BI!|4B9E{SVK&}ifHGl>fyQ(UJAgY2J7p+k9vm-4s=M!s z&aw2Q>)nT_03}lzgeY*#ad#?wG63V|K(t9{Oc>7uVe>o~`WjmI(kZZb)+7`uksfJ- z6JU0;W|qt({qCmzAgH~IrR90s&QIqecn%@*oq}tXfn&LJ%y&`iT#_;0^KR!UDc$J^ zEf5}R)WGKo!;BH6bL&+`m~MFFxvmLB;=%j931S4a zPC$%1CyIgp^f@Q_h(Slg(47Nt6L@ZI#p$%`oI16Q#`q?(0glQQSaZ*EM}ReYD zlK3pYLzkxQf-iST4#V0bd}t>vJcPcn5W&56QnJP`^+b74L1@|9^NgsWjzjS2v)7fY zSW@VV22`4V6p%L4lfcUmsy^kx6#6oE9_^&R1?&YO?N0|bwT8AGata>mDCVLSJAdNs z?eVWY3;tezRQlm(^aY0c0^r!YJss=!7imQn0AT5(FTJgQDNlD$nCTB$f%6FCc#KTk zhesY__#V%%qb)_N7tXnSWG#Y`%=(T)zKk}jBMqneVN8crnRZ>(5FEOorMnih#ij7W*9YEX8sd#)+sq_-B_1Kfu z(3Jrflv)05roH9m?90jNWoT@blwe7aCY!rmoZmmB9;js8JL{d0q-smOq*E9N4?!R! z;i!>Hr<=k}`KHx_M5z-9l~(R1D`B`g4H4q$?J=m+xX-lSOyf<%i9y{9!D5u9pZjj~ z3IL@Q;-qZ^g($P8$vcJuV`prJ7#%tY(mMtoxJB?a{CyUaBtl5tC43oHe^X~*olF}d z6ZX`>DFGLzRt7u!n=niRsDFf%f??b_6+1hd^$@{Mcs!{Og(0wj7fcuq{xFC+2@LQ| zc}-k2;>fv7qcfE4TNsOBCK&F2Z@l_)oSj%>YOKz1#N0c;J!Q4SF*2D$DjwbI#ReV* zze+Ogf-fBmt>!Srb`0>-!$dR;_rgHHtM#*;5XH&K=jlc88rTg@s5c~8Ij zFZ}3_Ct6xKJZUGLPA^e!($oYxYq2{^KKL}lJiz2Pg&N%F8Hy&IuQ6uC?-GrR+Rcd& zso3O28PR-~hsDU>r+r(>whqsxy*d&M3i_r!D-zV(IVAtBqlP&GPGD;*(+B78RdV<@ zpjJstnTc}ANrEAJ8oQKF{cz}LJ?Pwf>X45fjWOa=lAZD?i<|X8Q&|NsY9maf#Ye(qh(=dm*X)?u%DM#G4xks zg`6=eXK~6-PD{&F+AW1w=)35vB0D^|A|sG?8oh^(HnF*W|AX<{d#mGb#<>5+XWtsX zb#gYomT2sXj$F!a>SM6M-z9y%1~(i&_-KHXzIXfOob*T|_&6Dy*YU>s?%+NABX|l3 zs%qVH_c}&mqj;}Db-Bl5E;MChw?w7h9tZ6-U%(*y|r((Y2ZG zeuEQj(YVj%H{Amm#`@!Hs~hx!!*ZAz+yKyw#Ree!k3<>58-V&SIUH8_>d)K z=ir5|a!rt7nAqw9*J=|5B=8sHXb0R=XD9f|cU$K&_%f2qA&6&B55or!057&!xt|aFn5R0;EQu`u>Ul=Jv!NQ!K0b}^QAj0;+4r#PZe4{rZ6d?%25W(=XBn|kxy{+xwMy02C5Q5 zsEPnYn;uLRK#39uAkS6saxN7lW~dh#LADE!-}+~gLdV|2+}`X&-g2qN1LJw7n=0fw zuZm&ch?gaLH^Wop!Ru}+Gxg=8ut+uAd9JhN^V<#MzBL#Tw05?~`~tU5@WQP~x8ptO z180O6XnCXhy(R8Aip&;_0bnv#n8Dip4`=&} zJqTc@xK8C>9&vH%HH-211K(NNK8?cUVK-6$FJa2TQ|FX`wH|)1*`aQ~S%Ix)9ZVuI z;Sf(4%Ri4I=0u394WPi{n|o>`75&tW-r zfe8$$TGlKy*T0FY+Os-3<|3FZ^!(v`{M%nV9B)ovg?2Fr z891hMqo3iAv_Wj7X(=;0p>Hn>b&F>Z1Qxsp589V0!Rs0wYLw(mvjI9 zOJm`jH|QGbg={sX@GbibPdwh_2MySsgC=qLPEPW%( z8T#9(%$*ddp=WrTZ9>8+r-RKc8b(VhkTv_}8-a4XTrVf8bes1F+Tee4U)xi%fW?%(seNTTw zFGi4~+l&qwMK^y7Ihgh$?hi*X^=sd*dGvTNS6-Z11v)&J!YDGBZ}OunaiT|x%ao_@ z1s6Me+vD(XFVXRC`H(}Uk90bxmq4*$meuKN1EsX@waAREU(zr#y9&;raU`&^Y%uf;?hk_d4w(c_nY{Nt`)*ekU7elQ@-BMwCB9sw` zH@xs17{AeI{$QpC9-;%YvKR{Org~tk5LTZ>tx<9mbiTy5Zohece)FU;mII*~BFfdc zJA6p%={un+qNnCddx3k=KMY3Ld`@GTqe>v{1)qhwP3RXm14Ur4d5ztmI0xxauWPeQ z@DMV_%?m!vj%D~5ah^EAurD%!F~aCm*?L5J!d&X$#EPq&P-p35nNHq9eYRH^u+`i9pfQu6V50p!e-eX$3Wzt!cA+z zoykBZ(9~+BF3OCvvYqq$FJk2Hu4CXM5L!sx_o36x_q{zHA{2L*7d7&fXPBdopLOn? zL{}plTBg6YX6{%zLoZF2?{$2X!wkX0Orz$jXv)O82so<-r(MFBPcL_=rAv8&Um^zP zK9_pBJ*m2Kb|P44JssbCPub}N2FvNHM;t`0mSb|MAg?rj8+x zQO`Yt5##c9tX_p@POrxK``;Pg`NBKnm;R-n8^8VecgNS`*nVIR4uieG1)Sw;TZ5ls ziJrqJc%yX6Rrv0wz6Ua&IiIA-RgPt3(vrBQiG%B|JWAcWaf&Tt;|&PR3=w)Qtv+_i z7CsE}7z>BJP0YHcE9WF>@`+~*LC#@6hWI*i*tB&xf^HY>!y$d;9UC0+xz|N(L}Xv) zPCv-1^6f`)4$HY7*$@akK1L^o4)lBtB>m1wpi9uFL{TfN%h z@XI*p=L{C3`{TUP<8vb3a&fh%-6J3brd$EV}`fzG$#Y?t5GO_YDT?S&0E4m;Vk2;vwKLbF{0epIVH9e3!%!2U zV9!VXBUH^i!~pVSJ82Fy>PqrDGWQdU(0US+RFK2F2LtXHk^# zDlry=Z4IkA3WUEYvpc%-ARuTNJgHpTwcty zN>p>1YrBLX*C;kLGsICREZ@PYU5dnH#H~$UKDwFpX+mP6gj}Z_365xAx{wou z2YJGrn>uT$>-(c%-b$NKLKCr^I-aa=jjyb{8D|NB7}7gqYf`|*{VKdsQYAxw6`e-^d#A%QT`I?cRc~+xZ>^a^#N*gVwW-bD{I+lwa=)^)_>O)e zPsZK2-tB!H9rvZ-1CwXc1K{@=aCGN2OiT~tYjwc_7DqE4;h%|uc4YBfe4zZt>AQ?k z-MBp+|Hc>Mw4S|=65=4;jc0Ez$GcbG8k=dKlCE5lV@$>(c;mzr zCbUyNJ*c70Z%zQGK$ehFhbuau{E&KAq{P4Z(YX882jlk3-x`nK`glAZG4_e_<2+w~ z@ZtFSv#s&hf8n1Ue`oK>_{}KZ_jB(ma3cT>3IoLg7}EB1?(j^EaLx`XWDqZU}e&j67RIHl{C`kwwFhcXAA>-OU@G+^LZENP;WH*vb*mcF51MAkF{ zUFupWnf~g&3~8K^zyu!8q8D9Z;w{6h&N>Du3l4l^sItKmnG91{KI>WO?G#NQv_6OP zqC;N1cxxOSKCMG38;ezBP(igRa9yL%WL{-R1bXB;a7Ay?@AK_#fjRx-JnmTT9bF9$ z{k*wNI|&fjjTwCHQYVEC4*Qat-QC!vI5OeYjkHHU%62g34Gseb15ZEM)Q0}+8$ZK8 zGA@?XvMa99i%$5D_;fPb_A4DSGpEG*9>gH18I_+#te(#^T@#=0+T=0SZ9%A_60Nlsath zt9;VWz*X@Jau`T2Y3Ueo$}otW0K{n~BSPhss=K?JO=V*MQ9;*DM@TCe8}Kld5hkzF z&mTvqo4J(X!(3VgZ^~ipi8J-v+0XKg;5@Lv$IJxGec&%+?9QTS+FlgmPMjkzSb!^V zI2cUETM7vs^^;11du?NM<5h6J^s>GA&WjGvaC+OL#{Dcx1x_&I@x^VBCxxZtWb(+k9s#{UgX)0 z(5LT-4-arE;fg~w75p^BRkA(5$3L#u;-m~+&(P+PNt;}DxEM|@~(kfQGlytbLsd+I@7Ut8DvOeC4nA>5=u|A`$uB2Pfn4JD-f} z-}&aa`kil&$4@TC$!~vUeC7Gh_&Fk+weJQL(|PT?9zHTb!&&-fzht10f? z=o2N#rAq2j>IKLW~*EXU_uvOXwpxhI@ z;W_i5j*ecB3-fIPhiB+3Fwm>$+F`^zO9#(o;+7%e*Hop_^iAJSPtUt$EgGJkoCNNJ zu^&go9Gsi01Peii;O*)2gT-XIR1jL)YhQoy8hVkV%fPi6yu#;YU+TnxjFS>0FR<(T zUwE_#hM81==w*D&n@_2hP9_6jkS!ff!BM_w$Y@dGI(2n90bd>n;hC;aq|9Q3@^paA zX8=W<3N8Ul{lK8x`k{Np+CLfI_aiMf?Q5%*xnQ=x4o{o^dz) zfIxStM~Ye63*)0_NHFiEUlh{qtw@&V95}<6bx;C_Kj0zW9D9E<_7X8~1*WE@7~$08 z(00{e`sz8)yGZzX-pblwL8m&=BZ4K4%2O2T&_@lzk-j^4`^(*&u5aKn!ow*rgL7>vSPJR0YkqUj?T#>$MRkX?=k3sSE*Vd4vv*@|)6fFA%hjmTQ?*uPeFp*mc$OtfBYKGoDZV zl`U+eC*d7+wINS0c*|_|!W{fGg`RTFZiTnB$8jyZo=dxGnMN(NQm?xDOBE?Yp)-UC zm%7!FDht2zV9`wzg;d%5?zL_x(Lze)*@vU!8Of(VG6KyCjK~%vfUbkKGn!ZLJIK64M?Sr?G zeBhn?ljjH?lyR-bzDL@H*L-VsFF29~*@zQaBnQqrWAfn}n^F!LYze@Jz4eGj+pph|%jsV1EZQ_=2JVd+z(_ z%77Bfe3HmfimtgAcveHdvVQ(LBqE!XgMr1voIGRh-s1pQ_9JhrX_G$8*Pe7Ue3ZA! zJ7kWb+~_dgOh21he%FaymX_71rz9EG*3vOy$m!P_x7Z0r_2AH((rkodQ?KZr=;02D zRYpQ}nTP1HIl0%4d~=vU-;|YIRcDYI7kT!D-J-Z)BPI?1IWDZO;)Hn8rj!~Cn zz-fXGvIK5U1bs0BSi0jb#(i(>9qyM`^%?%VS}HcF!xiwJd-!Czh8{iyP!8l`PdYg< zbir75wmUs}oqk(;Zg;_(apG0V;U@=t)ZV2Yj*x7hKnhK)*DtaoT$Lf8fyZWDw=wvg zRsRs38oF?xUVi*x^kmu!?A<#grVf4#@XB0h6DH};&JzNF#;RBs_FPGi;f6GmSOnNl>-$5><) zZoun=jrL`Fy-^*r->G>c?QK#*see1nD@wY#v4?g zqk{44o#r_Y>4#_a^+`g4juoRx2*v|#U4i#3%pu*wQw~U_g~;9nK$~P>R`}JA;*yXD z9&ObNOdX7{Yp16dT84|d1RJI^X4{%VxrJc3~TvdM4dr$P%T)I!mmte0ozo%Cg1 z7xjgc>ZIW78F9Gxy`w*K>VKaJKJn+WobQV-E#q<@@yz17vJ~Rvkr;}6)=6olOXVGY z>N(4T)Lo86>tpnC4_%iI!>l)F>X~EqxxYAl2hh?W7H8TkjOq;B@UXzeyrG$9-fasa`HRliOId;c?PR86M5i> zayZF}v-5buZ%@KkE8)-Sm{G#%L;E`Uwb6Y(om%UJN&LQL=rLu&x4n-quFl3?U)y^z z?$6Kj{j6*B-@N){9R0Ij9KZ3e{^{}C?>!%15C4A@W&S44a3{v4y`m@8z!wy??euxM z?QCx3Uw-FaG1vZ2IF}A0>WbW*E8+E(=u!DFPHS~m_*lKNM9pGE?~n=n*v6~HMY0S? zB4zxl>HH}}=g4qXzvON=(BZ@wN{L>q;m13Xb&;s&x>arPYae@heJ*|8Nd4P6cO$oU zUfYdv-y4U|_tVZ&?>eT8EVpr`v!JK49NwwHJv1d-}!_6FU92RzgF z%ZK*()$p*5V(@JBe(J!l^TjKM5*gj&1SRJo14qU3xy@!l*b+E^+i4hMYogB#KNv#P zrgmg9=A=gNua9m?d+H1L!C7oZb(D?50(LIaSLf#^!I`Cs7bzEfq$`~Vu&}XvafiwRibV>Y$V+>{5CdjF2cBW1qe*-x9~TV;FsSxHnTYuq5i8dpJuo zayl^FWbjVP3O-7t1=Gzn{L^DK*|9_pv*)oeq^v;bkSHDF>t{Eq2@DM0u@|ePUXhOV zp_ZOe0_TZ@32&7_%5o$egbLxovvkJb`uzNIoL|P6<~thi?QDk0<~!rz@Fom;etsRL zaGP?s<1CDRd=|$q^~z!dw6wJtf=K(0deM1acEQx6qVT=H#eSF$%4w`TpZf?iBFJFxp;iq*x9De&SaD4L#1s`4r-kkVG(AjbJlYR}K-vx3W8jG;B(tdy^JeD?Q`ahqGHnkXRk2iVjCf99* zq&>5Qr{nh6jav3jKODP{QP$?^gg?xkSz*}MqLac4aEnJ)jbmO!VZ0H5*yPt6!67n7-wc;rG@t$o_9ji|}^i_j~@IIcTN32NcGTfaRRB^897<_BV6~5>sW1IDOCf7O+pX2^M zx_xCMvM5qDVJ`A4BPUXfETKKedPb(?dJdKPO;6D!I04D2WPAm7x`ezM@`PuG5w7P@ z2szG=^po+kI!gA&L^*t7x6L}Uxu%e3JFhFh=YAVn;~*OD44s1mdV*oT3k|N~d~k$0 zeVxyecDBMFXdp}4<$}SXKyL;TGNexuHJ?SFou}@Vaei@=z(#N}_lnn7!(biSD1H}v zbXZ9g+A*4)JQ$GaJo|XCNsRV#=nYOx;xj3cBVn$QXy3awRuZt4^!ZO)c!>it@ep3f zH35^L&v6wS9BHaa8Zm)#p zDJ_xHahH5&kP$)W394w2XM=$#MF%glk?vwmAz|QYHge4s?RuWlU&5o#mQ5Q?b%g+o zPVeQP&*@R>g9&1kQGOGkw1xSNb(_sWo}#kJq!F+f6*ycPT`&?T zpy+qoOqb9DX~c6j-cvY2V7u5|m;{#tPL<^qsz2bj zoG-59QjU~YL28!(i2&-or;ikdWk`LAOccRY1m-&5=kSFM9Hd95L)T%E)G_x{z>v1k zyc(xwdqzMvDB%>q3xvJnhQWM)zY_RdwI6ARPz2@XBb`b=^?U4sl#1iP1z0)fIaoAr%`s{$}?%fwML`L z=KIpH>JPs6oVr>rzw1~6qtHQ;wkI3~rTvPLpU~%;tLn?M{#;J^maQ{DW}#5~;Es&< zjB$$7dlM6X8ysGTN8hZ32lG7B3ti++q;~%LxbsL>=$I&C_Lz*IGv`v&-t_#!us<;_ zna}koRQn`&TOT9eXg)F(hOG{m23MDsI764TkJrO5T`dur_MUzj$~yiRr{m%E%W;!v z{@XwQGvlxQTYql+z0W=$U%5OVA6W{=*j8?W!PNadylM+>7iWIg$irUn(nx>t9R~6x z{*^jrJ;v<~HPz6kjjhO7dR!yb)I1uepqI>aqZ9fx_?!J=d}oT=-XC-m+Ts;5IqR54 z`}FnEILT?*7oCU4>Nw{+IHbWR)PXkj;wAM1n?7am74$b9kQq458SLq2a7};DIXhk= zWiA!tE_;YKWl^O~@%nU*rh|eu0wS`l_S6;K;IA^lArFlN3LFyGz{;?C4ec`f#0m6l z{!72_I!8fQ_IBp{q7ID)*7J+A=pSSFJJmZ4I&)7S)zvqsl~dj+@NbuTNrsv^d^4F=)sYg=J>L=D1LdDPR` zLh3S&N`dX{?zTfbS&Qru3j=$hzul8~)dUT2=|?+5_&Ey{x$jL;M@bN943KJbP!tio zc<8(sMC~hK>7o!zJyJ@IN~1U!_RdNVBBnyfnT}$3b&1R=vPS)>U~ZBz6%2x3r$3Cl z33cigRdmxELZuIZo%cmRHzP2c@#eh(aA#+3YLm(fJ0hm>{0>v*ne?`JL%1??4g$x~ z$mN!=(?n}hLV;Vv!im{P8)e8h(*VNNpOHIvqnu| zOdcnOn2yMku5+HtrQm}g!-?oQo@d`AWgVz3hv7BGR1}$KtSOaU=(55*W4&j+Ykf&Q zo#POYC~Ez%=?!CJoG++t#s))9&;GJPva@g2=}gbVkD2t~&`%z`;Rj=O z_JWr833raqzB(J9O=OS`j*B>!Uw#v303Fi5W|o2r_%n$mVP=Iv`%Z%O>A_M3a`dcn z@obfy=ZmS_%Cn*M;$r&jnKXefQx`r^hbS`5_qwA-R~dg4q~E#j6JwK;I)WYwU}ijr zLnqvMFmC3e{ydj9rgJ-;PjHdJYUrHcZv9~Knf}&6@Cf3x;rwH5L&FujUFj#d`~kE@ zW_>^`J^j^4r+4m1QViT38=r*7o^0=pweZV&qLTHUeBapN1=OJ>bB~doXyG^sc_t3*Y8=$Zd_K6Y zukfHFImiG3m9e}JzrweOK7d44ZpZ7-zBB&IAOHOLOMmjm#n2JyOJ1ZZ5a+9&rmr%_*rKht0MU<^gRoCCAu zWiu|qf2C7s;kjmH)0PR?GG`{(U0zrRKO=WRr)XVY>DUH1fsbu8hlq~f_vTLWNOnaV z>9c;aBd_DoYFqY&{$YQ~g#bz*lFJ=Jps#H}w!422TEu1q7bXyj&^u2p@?sgWxex2n zi`%isX{HVWm;~XabCDB+JVSopAu5t4R3@ zLU|nXVVV#s-{-grMMhD@aHV~WKA)M(2s+Ur^)V`puODWTGtwsb8H>OabApDkht!a| z0%{2=@-y4gSVe9WxdZ`12_|N@`&!=^M{%kk%8tRuu!Iw#2*LDHBTx&ty#Z|_UNmC^ zqlAMXa4>k8`biij9y}Hzr+P2|bPDIhk3+vH9fXkzk`>?`if%40Y=4RtsiT6oGR|V$ zUVieb&&d*O2QN<#4$HL0q-;V`k3Rk9Jzhmg_?+kseq%h*KnkiU_1w@uLk@d8*31%A z0yk&A&N25y$NAJ3URh)~yw$SLyRD4H~Q7%!Jl!Y6*%Gy8VmvNz*I z6W=^s>q*_`)Nlg9E8`=JAXSFn&J$&s7vergZvW_VeE#He>?I16DfreGKRd1uLYvTF ziMNUCz=KcI-;RH#EFSb*32mXzjQHkHh86!C^GqifW#$1F|Lvno=fY0`x^?Q;jN1BR z@l0@t=Yqp3Fp8(LkvnNaIuY)b0}Gy_Fl*rRi4HZ`i@!zfDldLjpJZ4#CYQW}2x@vJ}apK2(2C{rI>{E8^dMAI^iFGyXp5=%Hw*22_a*CqK zLXCU0+T3ty()3CA?n&f$H9WCy0#}sWS`?VHyp4OL)+xOpYtjXM;LY=q2fNX&M#-){ ze>xrnFTo856h1ZnAT=CixgLIh5M5rKkMon)<2OI^eEjmi`)`fE@^Ac$e&o762bkN=ae^LLMYgpxV&g}+%Im8}91y?e7> zQ}pNO8#y=OJ~N!ENkWCQbGL9-A4Yo6;yBE3LY?MJ5yBCCVZJoRmx1AspR zqfDy4z=ysqvzBb;?%sZJdU1Ikot!@BJ{yS!*B4iH9y+{JJOtP72dCuKH37E?&jMTg zc6|Ig@J82jl!9}8Y~~~Vk}XkH_p*;(Xr?*$I6!1AG+Z!Ae(;mh}q3uqF$#;esA*^}l96)|E2stUY8pNE9 zh*MXco7A)1zScoG8zUxQ9qR}~U_fWCax}H;Il(Zq zEDKua5)}=1gu6~)4o1MZ-(8f3$7*2VJwG&61D(3@Ro^Qgni)ozMt1>@^7+AOC7bF- zgJl#_W1r{suSfW@X2UbRmb%ie#0+DP1M;ioR6m7i3X*b-%JWwTVEIJw2LDs(bS!a^hwXjKjh1wI(#_l(BwxQHwI_%`6PVtM06zm?lV#QlkJ6J zk8c9La#$Wu`{@D1-yZ7Te(KX*VJsIzx9FHrLsPr}bY)GtW*0hCK+Ff)>N7FC= zfhE`T-F^(l;YKGUir1~^6K$Len*$J;#|)c?Rc&7Mr6`_8&0wN z-0RNd4j$a|W=3su3^sQ;$dkr9ah$!8anoDLOB7wuXF%0Zu>8 z5-h+0dgzzB)XVM(?BE%^r@_T}g4;R*Xth#|5tFX~tIO(`u%}UVDiB zGtY=qvYYnVLb&M)rS$Fm#KvBMA$`5Pyy-9qdeEH$tvbye4qffs0YSFiAkk_+PVY?| zl(|HY!x#NTF9)wTH<$f{&mdA1y&Nl{Y@>_=WEmj{vUAaV{b&G}DvLn!ZYZ~U-V3GI z^o7O!1T~0oBBYc(0xsX;gWz*Fvj}b4b}}4&=|Db_9`iu z9$+9Lb~*}DDlw48ObC7Iq)?^bomH?=U=)Hhwk0EQpsxHV-1=tBh3BXb;N9G0RD**E zfj$G2Cx{S)F;B-Z8YiA(C`%7GRZV3X7%1l@ULEE*U})Xp#L%SPjo$5KQ?3zCh-N0H z&1|kNc-9euI2hKHGn_YxmR}tmcWiBrfuT}f&hVDH;b4x-<>ZV8#@W(5D7JoT3!J=nUVTq>7QmJ)Eqdl>|66`mJt>Cjr}p9^Cp{Lip6=Em8dG-8hHH{#sA!4PNm&c)c^gBucv$E|vaD z9L4n)&&Nvi2s}n(25&iIjIKZYV7&k8X#Dj*@#Eva_*Z^meEawP!1!*A#+N^NIbNDo zpL(^X?&>>r28a4G>vwO`9j`Icr9tkOa%rDtn0qz2Mo6ZE?2LxZGm(kZC5m6a41G_c z{K@G;cc_R1za6=@#WN#LkBB%8f!WOr9?5rSUc+&kA>%76B5SWAowLp9%+A-K3(cDf zACZ00p1tVl4M<~DI&_%t^mXS81@H7AN2e3?t}d!$t*)Ub8_9;h%g_2Fc%$RdF9uU| zt1~i-#ZCUBL)Z=dbaEQ!mauPnVHpI`rGm=o@CN^BM}`y)3RiVecu&?aa<{CnTeaqX zQ|Zwe+NJaKxf$B%eS$ol4ZNAiIQj*yX681=ISyv2mEPr*Y$PF%m&YkR|0zP^Cr)GUf^e%MgeL*F3#O9;vIH z2?802GGGwNa9BQeAC1E?Fj)z_UFH@8s?T|}4z4~ka?3QfF=`BhK#f+z;7y%}Fvh_4 z_k2zRgD^9iz@&kdu{RT#vf{M#sy;J%wm1}l#>C!(M_=P)V57+FI3)w|Cd7Ppd^J9I^DwSc z_Ln~MY#iIRHqj*tRO$AUl0hq32Pde5t6V3zjBFzeW!%$llm=rx6?w|!zB2f@R9C@0>d9J*ozEagLd5k#dwvywDhI2T8iPCjz2h9UKKwMIUh2}&F2 z7+_*tB7gZ^zE+2z$ftS(CwSrOQ@AmI4sBI-^*`5K^*=KXQ}OkC+PsGB%y%?Ip3~3n zLe}~wE#9P#{K|9c*RCi`rXzI+whrBpjVRTP4M70o9mBmfBlIUxVxskx4TgQIlkTjR zSdFsFqp2!*C{A*=Z?hefOdXA$!;5_b;8m1!BBNNIu5C}Fbr{S$=)^@k_Qe@K`)7V+ z{Ea{TGvjanp)Zctspe%I%x|3?kBdCB9@s6JnzYiN8S!@FCVqFlpE*MZhT0zUu66u4 z-wUnJoMtmK>Ov{}Oe)rLy|d?Y&lr3Jc|1me+p(?z6Zws9-tVNV=mD~pb~=8Z>(!Z& zZw|bavCW~H*+`k{0Gt#2bj8l1-6appcADqZeMmNpokuU|N}@?TTcPXm zsbjO?YZ*W=3ed<=<*VqJ{Mjh?;;j0C-T-6AoZ)VPS0t@mS7!#_xDwB$4t2X$T%@0M zRMYNsh9~{$3y!0D*%C5h3_8Wy2M5R9&;jSh8|_5?GLC%*LSSCXgTvf|WuI=YrjDi; z16A?YIW6QZeVRLrZZ)Wt5w#m6J17Vs*6d1oB@femoQ#8K&r*@u@wWZF>##}XBz>su zjBE)$Z83ZuKE^SI7WWU{8lV5d7skQU1FSbrk50zL>1p5zPu!5a^f`J)U`)>&M(2nJ zejdw=x*F{n`%Gh)f``$U7X$%a?;Zeo5!VU-F+`0<%H7^B(gEp}2xb}>NTB27zT+Km^ulAaiXh> zFbkOt{6|rkaJa=Q3~bwo`EMi!CbOezP$?|Bbux_RanHYU=rh*M2?(w`oJ= zc)auuGJVoSRZ0|1@Lj&e6Zu`ofD*|mx^4y`&-MPSF4qX<;=4NYgMnoTo#naU1})r! zXJsAwNeJ=gGGoH|MXTINxyd&RfCK8{K^I!dY|klT>MY~nVH*9#XYh;9%Io<}@y)98 zT)q%~f6C5&%r<;#PKx~@xPAzKGgbQD$lZPIuR7;Hd;MhC*J|8DJNI*j@PRTX+`wDl$B)Ii%JW$~=>(n%&fkQ0-`Hs6 z^{a97(I?~f!%xP|>*LTQj#lLF=7W#Nhu5d$@Bi^19pC=*|H9aR@oo%fmQ1J&DrZ z#4{$v#rf60kYH@b0WWLhp%@oAQvKQp2Hi}J30qOKEF{Hmj0Q;us(;Sw{_qeiFQKg#bHhYm=jMqoU!LzmMJdmQY~m(Q4VIJ2BF*mPOyFtlvm z3ih;y{`&GP`dX?!4o2F%4IHvbx1oIkRt5``&InZJ8GS-)nG<2EQ4IkAH+Mt3YZ~6T zoXK}4vu$n06X!*Qc%?}W&q92SLB8i-iSX$x&a~4=;9ev*9RQKLhyh_lcL-zq*BXx0 zD?;p-96HG3+Kv|`a;3bMV=^;+A}vhOO$X-p@Xvz5g79c0!~io`A$(5!%7y%13!!;% z*0+l^wW%*WD}-7qWsEFs7R*+$O~P_6%!rKVpE_wj-ed(gxMmPBV|rLdGnQkfs5(uc zGsmEYEpRZ1a3-_DTa=b#Ty0R39t6hqMn&Q2_Vf1&4!ZGJtupW`!i#u@hAJ>E8XU&B z8z15!+Xau8>a~*;4d%Oy@ZQ958h5uejsX{~bAq(T@u_nb7&X2byzIw=l$D?^(Zl%E z!Tx@pw`<;G1dw++$HqEoCKbUK+;m61OPfwip#WF$kbC-7McrsBwWqDkwe9hpi}msT z@2-xgUkOG7*T-M_fpH&)BDHpmn2-?t>9OO5X((#VmY*q0k;~qQ@DjzezGg^^B94J)$HYsc~;-#Q1(TtJ5ef4r&ee~V&=A)0t^@lIV>mU8n_*;Me zkB$HM`R4d2#_Ge%%kdR&153S~JW9u;VrUn|drh(Lbq4WAa9u;b{G^;iD`gs~hrffS z)&a(nFV}kyRztJ7H@$v{?!)0C_c+v>)xB|2C~ExoDgQ{N$C#!LIu!3Pb}|-t({D}? z8XIdioPl=QMF6`s?FrNJw&2e!hfbm+&naESGV?xx=anSEJIU{luC-wZoD zg>P~rdQaAJ$|gI)$uPv(%tPRkdGfpSc~XZgDu)3*`p%4ea(XyAzOzRhzUOZ}AG=$7 zxyHcCcql{9vISnizt>qor7AxLReE&dl`k zL>eNP%w$p;6{LYVjxZTdMCfpzCsah|84OczDBsJ+>cuqqLwpSBb$ljCAz}fFa3OrX zD(~J-9cF#L{od!tR=oPAEJ85tH|h@oI?kz`5O;IKOYyhbUL)UJ$DAqFCGV!_r?D%d=sO+<#rM&mnXx~5EyErus_`Vxg5tf;14U^>F^QYV6>7jL^i`58$*(44Iwi{C9Icwj`N8wm zgPkgYl-Y^WHrGP-r4S zI9O5!WAbF-&Ds9rquhgkF#g`UCjZWJ8uTcxn&T7<9t&%}r$p~_7?v=%zk#7C`dq8= z&K+BKiYmxMciF08*YL!dvt#dY8@{^_UFM<{O5a?SDEF=X@MMfdXoS}qK!h)Yq`f)6f_&%L2;ZaUA{f^^=qF8>dI(lklWhuYz54YI|HHgnx~8933!lRpXvB$YabW z5(7u*G~q!D4t;Rp@tnEP>vp)a&wL~N)M@gXiFWI+j}wkvwOpPk?HhpxPn=@Bg8uXa z8eT+i>MmK3jiV>~Tws7UFmr-^LL>)Sx*eQ`#^~;g)3b4x=o3cmkrK$MTZ=;#r?Vaw~PxsEC$Na(sbU+DSsgO0BYKD_V+t>C$~ zFuf&5oQmtfJNp_s2qe;F%Y^9>n@zP(9GwL%YLD~x@ym~!cDM3NR+8@Ak3(P|dpM~L zPCIQI$-_o6H>>v#a~^LT{(7#8U=n5W8!;Oh1|Y&giD^%Y2Tt_MTFtYwI6i@$ z5#?;Di|1!3V;VvZjpb$1Sfb9fLs11keq$uU%81UL&*0CNwDCaqck}7xLa9Fx62#Nv zi>9;Q#3?c~a21?Pr!FuDZ^qzlNvlI}Jr&Ty*diPpp)g++Q~H466|90nqws>ha2|~N zhYB(1UX_Tc!`zrYzPo)#zkxKkg0G&zUkw|AJKB)?b1gsoT%P)?pFYu=_C_Ok znfDg;wN!tV_KmhflPa+M#(+)n=DYFPFu`;*gBy-T4cg*FE2r->GIu{`V4vS7bZyJ; zMZcEceJ)FoU*+eDhn%@Vx+Y#2_I&1AccoL_bNE!B@+{@a@ORd{`qO8pEojYB4}r$U z%I4UPao>z#UyV^<5E%FFDZh+7bGp;4jPH@J&ff{GmyT~mJp8bf4}JcWW_h-bRba?} zflVjiD@G)g){~X+=UwFEB+lZmUmlNN`Kd3Bzx(g}E8{Q!>wjwe7u##&%crN~TW9Cv z+vgYKG`vnm$iB9!SaVX%w$1hGfG%qTL+DmV3;ja3!H}oz{OG4LX2pXGV3Stp7TCy4 zp0W15JKd$2?`tvEdmo*QwJ2n5Yj;k@jB{KgOG@agg$@(dl9QC9PvJ;(3PurmSDQqi zGDhkMsO_J%*3~nJrk&g;Q+{@SLEw`%HV5bM>0#v49G~c<$hgQEFWp9->3~MF(Jy$_ z7S^I~eYNggy`Et#?G)IJ(4tr9B>EGLWqcYa`O9aAK7*UiGe}?51s7<|NSCM65&8}f zi){5%*7G!uinDvM=lY|exxkuI>DSEs-o7zJaT`1HruvE#Bf6h^+6!(btv1ayNO`(b z5TXrH{#p3Nkz38`aF*~39u&!Qu+Rd(qhPaXX?renHk2|qW-|N%Um|gVALl1XDUD@C z_xATDorABFJ@V!6F3H#Ek4kzMU{7-@tjHV5Mo+SVbEll8$dWZ)Q zj^SZI8f9dl-;#n(0K>5HFt#`Mnm#su0qNZV58;D6opr5IYl#tW(fsQi!T`?kpzu>a#Tepf8xyri+GWh@B?cZ0 z-!Tf&Xv!MTFqTC4aDWs$r^V0&bkpHx98y>7-i;R=K&9AVg)%4fo7f_^potZ$ii0mcdaT8 zKFCFQA`M9a7dK=p`liOkmB5TQibCaF^QAE0$z15sJGq?e5bhti^Ls|gfi>Tixl5pM z@tqIG?~XU)pZvK$KK{c0_)m=g<4=5c{FRM2Im(V2)6eQ#a`c&5QJLfdqc_#38M!n~9atb&J5>nQ?&$aJy9`b!7 zb?qFVjEw|K@W9y?Rpxf}9`!;dXOJ=HBtS*?OGtUyJ{LM?HczKa5_8a}z$4wJ-q|t-CZW&vP8+T65VmJUw?vGf&9a{&nDDjIEKM0m#%} zp4Aqo2Q1YKske?U-YDH z!|NH}U8_OnK%}lRxn@a6>a5bqPf_H02-tK^h-kL|WJJcwY)M`@V;rTGbEaG;V_^c! zAY#POm0n zc$aiv1pneJ@NsNpeK*HeO**JQ5I&PjskTJTQ&;k!*`1nbf{W~%85!}D<(_a|xzx(dEzS|nN@$Mv!zEH#b?OVrD8}X_qc%Io1t5-%e6J)O*#LOT0Ga^`hfRSAHJN% zAWA=xA;`%x;NhZ9h>54U>tn4sbAqff?Z^u%xe{7;Pk>%aI1#((f5 zPseY3|7XW{<7j^^3i3F-`zn0tWgvG^%#V>Ho85S~fvDe0W>%v+BIx5ucw$8cD!9_0 zFlr~obz)p_!kBlRyU)_bC7}uL5$4mL|<@}Ph-^HV?YO^ z3+KhNhV3{jWKuTOcC)*fwy;F#aZgywCGNg=43&dsVR(irTOPR|NUuidz>fo z*B>?&zR?2CWnH#*ME{Y`*b|$K*@Ax33s%!0qhTE~DK$}pkt09|jw$A{sQDh^NgPIq zLC7O%gnUxoH3aD#9yJM$#|R*VusM#jEh8!w%`;>0RU$QCPcGjnbHHI(Moo&ZGt~24 zbhou@t3n2k7e-kt&xx20hx7@9g@BGK7yx0W5U<8P-0m9q8MzwnlWL$p%=ASRHxKir zGM97K>Q0mgWe$e~v19uAoAV;zVB$^Nd%IgYu6nDWF=Dl6r9-llu9T&qUCQ(4a;oM+lafTkNA8bw| z|8#Hs-+%gZ z_~ya(p@(N3%1j(II$4zHv&;72Xwjnj`k6nBvW!eyNB)e3_j{jp;_VPZ&He3EcD8|I z%MJbRTJEW1g$WPrWt^O;LLX|>^R&}%o0nYQ&25L>L+{NL-w#c8QnwrxXHi6%n{8Lm z88Gmt;e5dOZdc;*8@}VDhj609ECY$ z>B70G%hV5!einSjGUcP2>3F)rb8~s_bb726uRZWR_0Usw7V^A4(U*mh zY(JK{#d-~S4t%TsnrC;Q&^#>yF>MC>V3OX<9tt?rO?UJ{4(-WoGw!EnC+)Mg_x4RN zl>r}xW3F-v?o&?IyX}Q8`py}6d}LqGqx(;L4P6U&;AM}!o8<8hkIj_6p$9{EvK4sY zA#y4sbDYS(0dKgbj3Z5Tc6OEq<}wLLFd2+Orl>czBFZ7`brip?^&oS}J30a@%N@KR z#D4%ZySihvDXTM#1cL+N>hMlm&-C5uK@vkU4HhI%qHzW|44Abkkll?Cbu)+y_#up8 zh3U0PRq?!}ym^1<&+vp$7#Ym@w8F}BDP9=8A<87Yn5#0>cHr0uvdSbeyq;Ij6rO1u z8k}&It#`pNc+pMKIJ+yo9b`M@OC{MK3bYkG@bQ^??L$1&{J z`Vv^59mEUF{f@b(e8-Fevt@9W*Xg}z0(0%|Y>xdnhH!)qn3=-@7lgyol{DYI4gyRa zMIBcMLyxaSAB2mbP{K{k1;%>Tcr4NTtYXYR(IjSNy^YcyKD^)Dn8tbmp5j(q{e_FN2{c7}9>DmcoO-V)(%1T+rL$ z!*9G*1G)I_S#ql{p3{briB6UaDEeaF!9)6TpWk+&<^=Q*Ncj3h;XXG_K&e>szD65X_r>+ zZ7GLuV7V!Keb&Fg*!uFBX2u#m`Q+93`sy3| zx#+rlej4#FPF-vC9$f53zW2ZVYOE!SlTk5uWvS=%zsrQx6TC87op7Az$T8e7l5|ja zu1q=6BssGIjR+JTv8qjx7*!|3MrY7bpjqk#x>?;zCrf+X1~vDmK2}*YWCOElsOUh! z+p5(OIYq_Y@4=XG+DTX17qVMg=4_@l?f9-Fb&duQy)K&2NU#$;Oc|k9LhH0GaP2Oq(OdNG z)?W0OrJ$iP>7;7~VC=|x>b55g1Pitt-djS>)K&J4(y$eG?L^@8372$|O@+YRp_ss+ z&xeW94-fVN>$B+CDZ_?y&8p{MnLKxPd>Tj4#L0&SLv(}X$Cgji4{$2DGxv=CeG=lQ z3zV;kPR4J&#nP;R@UL8TffQaWFYAk{0*&vPv85j(J81o@p-IT=)x z#(Ci(cbi2(rgAi~DG8-y)}>veI#Dc$vNt;cYRSOhv|NILQ$S|l z;R;^Cq~DB}ad)R(`FO^R57^NE>e_aF` zukH`W-II8?fx#^9)d)k4dvTLGT$5!9JZm3BIKTdE{O#TM#((xxpBpD{KO0vMhvRG< z=5sejEdpO&(bnYUI$+_Ar8AbV<++J#KoR3<Zlk#aNJGkLc4v_6W0d3S1SDtje+ z=3;qPo6GV()sYf@FVB|Cb*H152L>`YbH8iw!)HFLymQvThczD7(tdoO`|#CW4DJ1T z6n$j=E{@pUc0P9!TkeK;ch=)PMUfFQe2Nhms}Z05LwnbAC;jqVvDc1Aep2rK=yZH^c{zUjkN&~& z7yjH&kN?}xerf#b?%sHv4t{uXF^-~qI~0SLVsP;LY)2KdU40{e@D+wkJJZnRzZ!Ph z3{^|%8h3^|UCn2=qYXR^JtN*>i3My7{B!^!!+Ew5d!93cUfZP(AlIp>(ODcYw8nWdN)7W=Aa9%8Z|f2coG9;`E3{v zB-)E^m}~Q8euIaU;c!rhcoi)4Z|4@|yK87;XtbG!=mY^5-Dcl+!wmLnkwJUs-M7bQ zKlfhYomdofvL&jTjtpKBB%Q_aGY`fvBeMgJW*CEyId>uLzze+{dfCiIhIlT&=7a^_ z+*{76ipI$^i%d&sYXc_Ehw*mf^@5u)Me3M=BPXI6hJ1Hze=oLXgVXge4i27=!>3Q1 zEmL=c(|mF;YFG1HK&9{OpJl~!CVu)Cyun2?%;;^G9H|0Ky-gt};?uYdZRUQ&QIJ7` zS@KsGXL%@|dkk`k;chC7IBLI)#hd3@5FU;K;_;3&4$h_5*;Iczj_G+_o>K_{;{@nD zUa#Urfgqvl54*v@S~_N8!%R2nG)0Q|qG%rDRE7-IG$b_`Ary~ordBX|`jxWX1SZ7n zq_OnX#5#NKd&|^>E3&odui2z)Ap~cmozCGN9Y--?biaLPya;A)_3%iu5rMxr4_w}x z4>q%9O#lS@#bu0P1c6XtDhwdPb!|pTVJC!4zcIi1wYN-n)q5+QChnEd$K9R1Zl&n8 zlbkMN+-PQ!pIPkJ1T^h43=aIz?RwD!&I2<+IIvZ3>L;)mjUaiAZXhdzTek`pLaq*N ze{Z6Ybk#JB@Iz2*1PMj{2$mEjoH#MJ2-{fHZ7kH;{qFeW?%B8=JBd6K;pDVGdDl&+ zz9KmXh1axo44Pr0&EvHF&HeE=pS(T(lb`y+c>VboiQc!z>DV175BuXhhW^I*eE4*# z{Rpt{3tHCj1Re_5??(9foIDV2&(}{+iYZqaYJ891S{Fm?*_7vOc6gxmz@R#uvu)RW z|1|ZV3OoCmxk-Tvv+GOvQa7VjT>6VLNp(%Xcf*<}lE<76;pvAclgFn3eL#Z0n`79s z|E)wK@5H%&FOKu)x5xe$568{n`uN7#(fIK7C*{$N@LHqwwh_RBTMAkIzJR^obM_-b zO-{PPW$_nC@_%?WdgVSm#Q@rRx69e|UlCv6IDA#7B{cVaiI5s)y5-W#VNS+xec_$) zmwxv9$6x%TzkmG0pZJ60yWy4N=&9FN=i@SUlSRV{Wc)Vuq{ia3FP=%2Gu^&uT$}h! z8>LWsrmwkPL!PsSJ!PAcZyE5+6Tzzk!Y5N7q+II8>7SPOiC!rafg4XgIT~9U%5U;R zH=UbYoX@%Bb;^~uiWySr<}8sdwK{-}&qdmdJX!agcZM@c0v&T-4xaDD;LoU={++`e zDhLkpn+~8)nh}h{r(%&jxrKAGKlM&}lzX1HRJQynvyD2Mgskb3?5v50mm?y?o^V3O%TLJ zv4wR8Lr!`MJmw+@geUwA?M%tT<({2jBNyjqUiCfG{Z6)IbA!XQp1xQWaFVO;nR}6l zJUo4Nej56v&A`?7h2;C%noRr7xQUEMKu)E&eRL?6w z>1c>++yz!IW84v&ai)|2jbPKjr$|8xujabvAf<0F3ZWsD+G@(7V+wOUqEwJ*z7x+7 zHb|J>*<9C7YHB2N&8L~!MRx8;?IRpG(2`LzYn}l3zKopewFSC#Cr-;W_!!Fie8i*x z9GsZ#FvoTxTe}o_>4H=-Mko`ejJ|V5Fi?A(>;Cjtdu8R|W(q_Y7B}{K_%?mePo5Rx zTISZMEA`K;#AX$~tyf>ap4+;59L{{!YfgJh@7pJ)rhx051U7r**Px`2yjW#;=rSen z_x!Dc;83(J-LDLWv0TyeI@{8Y&YbW2+ryX~4EQBDT{)1`ns>t~1cpLv79pQ_p(&#% zZ|xqK4H{zt7US{ygTIUwF%q8FXK@hy}^TxpaD}$q~fmd;ch(^l#;CZTwE?^YZgMYUW0rhUv`4)u2&dvOmwr&4J*7WaJVFPzcOmI;lPx0cV%(vyFdzh_y^ zPNmpd7}-9P@4=Dpa3wp_-%kBkKNbz&-AAcBhHoGCiS7+n@`A@$+{-b~V)$#J>>#-Sn?*~`g>*25TlLL$%slGJ!#plelQNH<1%XrFn z%BadafBhGyBOmk3cX+4)&u5aX3SnhT!aQTwqifJ>%cqMcj)GsVLm;(h%FGrcr7&tWa>X~Ii&Lq z6V8BChIH-)LN6Lp;M~}#OH?h>V^xogGFHAkPf#1(NOx4vmAHwvy{RzIHFIz;P zcK62K-rhKiaaOl%34L#fwDuzQO;&{yMW1;VJTll6&RcKoj~DMeANvQ-y8QX$k6yNo zx8Hdu^>AdO|Dyv<$lHxx-AJGCxd0GfNadqDr!FLKfKEhLJ-}0xPUyxaK?Fh5L%SZ? z5>Mn7-bvqL6mySqHfx%2U=)l=&}Re@=FM}AR*@Yq$#@JUVCHm5T3gt|(sayN|2YVfhV@Um`h)tkl+(iTo`*z<} zM_|YTku}TECLs3g5v_#exjd#vQny#t2s^{4{F@+61CsKd3bCEPJ@eP zR6N_~@e=&{9>BtTc$cwTi=x^2WOMx5hkN5MzWbT+n_v3e7@N}c@t)TY#`*fQarU@3 z&hNL!<>U6aS=k=9D^Zw_acc6cDSu5or#@y$H=LsOM($}c*at~|az7YBQe4cssX8i7}566G@>t7vTj`RKPhbQCX!15|Q zgf9yxsWIT3Pf$9*ny%*4A7-4_9iM(f79aXqM zaBv-?kxC~FIkDTE&F&Oor#HFZS@1cR6YG|#_>RfzqTk9*LgwCVr#)6)_9TCtg0gf@ zFszfO=(Y9p^Re@-kH=>C-foG_JchQ_MJY{JHo^d2$L=UKycqDf1@mO9+4cx^f){oXNe*8-1&tmkL>(AKCj zF~+r{r5jzUU&^?{YJf|LDskvfysONrL1hNNLsXQ!!ny^ z?HKNh^HAp`hv%)9>dSN@djcGF7v}aMMlaDn8)mqW{pp)ebJRFS7pb=nP4F@uE3eqy zYTG6ncKKxJLr2kzdkIK)Oe79bXCOB-mtA8Yd~_%!c(~wr#J2K_=%M4&vl##5(Its_ zf~{zqsoPsX(I3xnTFg(OrzwDwI7sXy7&rx1iR~Tkmp+|*7=8Tw=|THogPz^JgGeDB zXXoPB{qdjqv0r4=O3d^Q@gT4UIY)(Vo>9 zWFvOhT^h#^QtI&qAsB~H)@#l0x2;h$W*lbK@%9X|(oPuCm{*+pmg?X+Cy9ECu8N>Wb}R3ISU?brgL!}&vuakVca|BmTvGq zIVqI4G5O9${ZzmLcT?|ka~pWz9O|o1dCoFFO3X$N^?Xy#Fhq*B?{l-HLGkI|kkI&h)#H4?5+Oq4C{k9i&{#|MP&moc)@&sYR;Z?0?tqV?1GSmQz00dOnG3t3RnXXD1b=?^X^xKBnE&|6p}N&ih<*ogt5y zPp2ma;4bBEbN?>qP0HUTLR^W$T8V?V_Swkh7oyN!gy-K0pFfKuyA#T;$H=bUj~9EJ zB>A zBy|delwFUUtp`>{e&g#Ojgf=LlH8x2?cMkLHGZ9C!M_(qhk^U0jXJt7$W)T&qAij%svD>K2?h7%+xz zZWz;sa!#EmxZ2;_N!^q0I{ce{SOb5aewb1IxzE1S>_X=NEpXb&fH`eULl;ZAn{&l+ zkj2%ud-k122Rc$0=%l*h^=uDrTU(WPC1FWCy z9KHYg^-*MXW4+Hy7;L5YjmamM1!^5<3lY4Ikzhj)A8q2>*Un9q% zf1QxGWbIA6|KmRj7{);>koq)LIrE$#x!y=UOvaEFi;$i#jh=EnK-U0p5YklBj$2{I9mYLGW+;g%uUgwP zr4#{;XNzGm^@Q1k4+6e2qvYVN6TE`#GJXljIhjsF87wfE80b0mJK#K3v=7H8mxZfi zC&9M~ARQA*d+-aV(%R-E@V+sfu{BJpt&Y*>o^iL1g~z~x<=NByzD%ZbE5Z;Zah`6h#W%c819`q5oV#n7229?GdB>KsraW+ z%J6GE^x{JLD8?uEmM|-04|J?An&?=Nb)bux`3ye8Ypmb*htPy^ zzr(wDD+b;g{CjJKea~mVr*;kjgUdO1v$9dAA<{D~1%A$4A%e>u{G95B0NET@|zkm4i`{Uc+y&m5_x(L5U@vOvQ3m@zlUJGw`SS~m~zsg5` zFAab34EsLexu34gYrP(RssjMeIVshC!Qc5 zk$iM)elrMfKQk`8U){;@2pDR_gKP6^$Ww$RbYSpgN2k&4ij&}ozUih_F+?&zpl)_H zhlsDbIW&DMjspEpkJ(CGYFW>ap#4-I2FUlB>VBq^ExzLjHqQ7Uh)ebJ{%@^s1-7&o_ zj_p8SPMC?)hJ6{mZsc}8GZvf#cBN_bM9lPB?MCq1=L3Uw_1SW@-QA6`vv0E;TQJX+ zM|7Tk@9yrm8+5Mc;ht`0$LuX4Lk#Xqj#ivjj*2-?8|mYF@0e7#Klb+b!^<;jhkN7e zc-zur!-Yq$PsZNfesraM4>rmJg*P;(YdtgdwmDumb7kW3R>! zM0b^@Tv$2a@(ifCJ%uGPu%S9eWRx=4**n2Ox~n z4I#qd+nX~&*@2KeGwTU+gpnz;dP3lFFW|{S$V`C25JcZ{l(IZObynF$%za{@dRujF zqz^^O#%*FY(jR!#5AQ-hKbHWG;R&-fQVy{Q;qriY*$lKHv~Xq89M2Dd?}7t(o!TjTBLQaUj{BArE+1C1LwrT()RT&GDVo%K)n)6w!kuF-bVW^+@CsVE!-QHpDC zZ#OvM70-<`aE)?x4)eE{DC8hc2pkd+tt{xP@?ad1lNqSCdL2J#&ETuF7sZBz9%IJw~-KytWmgNnPm)$#SP z;d|k~@4LT_p$+e*%-nu8pLwv$%JQ89(C<~(F{ZuFU^CuPt|@!R$Tqsp*?T$DSdF?f zbue6)Qt{=~oV*4U0wSWUeQUH40eme3ckM$1J(CK`Cy82|LE$K&JB zZznJvJb7Fg`56_4V9-)p-#^|zd^)b<0CEVs9cknUf4#Ax2U!pAc(!Bg1T>0N8{}Af z+L5ByCgbknH2(AVqjSJH*)5sBMgl`kZs=?MHmx#e(v43qTF{nM@1^W>E_@Z=wxx1;?Iw% zOT4x5CS8x*L?7B}+8Y-Q!FCOG0tQYrBd$EXrfnywB$r)~(o6EO|A>9UDGh&`8c)M^49y&*4fzJk4ZHtbd5w?R)V^$q?By-l*0*`036bfa7zDrgQe!- z-bx>tN0Yck{#FioMkn+gjeMHHZW~+ojJsR527iqugV&`K^XdEc^PE*&8ag|_j697v zaeJMg1V0yNw*9?rFlJI7Vl(6;GJAA%7Ja`=eb70b9KgmIdi~r2e z|I9A}Ea-1!SD~etVt65OGm4{z!8HGrfpE-3IR@4ey9}1=#-|~e;8;$9Ks&p0xds4i zbJ(Q`>ySE$B=v+z+KC*1!d4@6PW&tAecwngqlS}D%rK$0E$m#PHk_aF zf%rnqZMZ%~!pC74N@tFtQ!s%sW#8mGWB#V=^8L=|eVnWC#a(|h@En49*1*@OGxRw* z0StU-%kVeq56;XjFup&d^~g=iacX1?x@sUYHe$dZ8N7J-=WAQzqZsqA1Z}@|^*H|P zv)l2v&)#a1@DHAi_g_DZAN_%i@gqO9-{Ax@$v8Yb#5tuFf=db)4{oH- zcc)`~`J>bE>S1;4hh7I^?d|pObDmw!qX=UAT%Ci+3w+WC^B(*zzeb*e7rYr|F&-m7 zCYwD3k7VNUz0Zt~x1NlD@O*vz%AfwJ@&EqPi}6eE9gLTsd23vTpRS_F=Q7DW6Zr6? z?>2x@Uk!VxWatimRyL71*Spt$KAoj|m0zyqhzW|!E)09<16B_jmtS(y@;Vms*{iL~ zf$!!$vQk-1c}_yz!swoaC1WExZZkUP`L{n9Yu|m_#{0nuv}r5Urcb!5Nqe+A-;3JpL~7Nw3Q?%ASJXtZ-3a z_j}JDhyPYR;f)T3roCmb<2hLpbEuTv=!zyYzoUPo*F%qRRP}{`P5nnFa}r#gcy&g; z!6ATvI6Bbq0w+vLTf3j)QWtVgn}g5tJIAEKOmAP(XSpFm=)D@Ym37eN+U#560;5-_ zp2rY#R@BShfU$v2+FqvT(N8})BV*gBiA3D)4c z=!t&NZXLxsYjN`|_ACQ$Rjcyqr`<#nvr-PslTi{Nn=nqmU0!!KptHd<7L*mB>`c^P}QV| z^K(Rsii{ALd@)nqc|i0NSf;U$-~^T`Wd@#5r&5ezQif{`&_)!N%~|%W4UeJfk#ZPn zdet-CUUAU!z|K|bv z94@3{?JZBV>5&2~x4w1V z?-cjC$u}u4T7TnA-P7<3Sku4o3hd5LhTkd09;bz4n)5#L7Q~E4Y0${WMb4LTB7Qdw z{G-b^N2N!h&5+TOv+}zn4o1w|oqZI4y_!B?- zH2i8pZH!ECKo;;q)MtJ}X8>He8+$p=PVdIojxNT<%EmYdY+EAvC`MT? zKhACT5qjN;&|*yBo8f6x8CaJ_Lj4hlUZaOL=yW&wlE6`>XGM=MOBA-zRC`D)6SUc*X=jeWp-EJK4IaJEng%k6<2$#D;4|eD>IM6mbkjm3;lyoi#S~v-G zkAq%vkA7z0gNEXbPHjdK?(@7uUHFi$mW}23a8@QgQ+Mj6GrO5qo~usIb$CCI4l(4n zL*JyZndJCI(B)-0aKrA%Xrh&;rc)coD`gEkI%<#7F8I0)Y}zpV=^+Mv?#VLm%W!ta zJ-d^Rb%J9G(vc0k5`d!V=JxJ5Ok{p`I*qON%z2obds)qJ9X@^>U--VyjkjJ{@)w(v zI<3!#RKcx%^VP}mv8CWRXU27zP>3Ue&ixcjWa$;q%c%^d~d zny41S0FTjk<0Wlx%qUdkycFJ4o-+9!WTl3Js}2loNdyDvUi%PyFxD%L-skc(&uVun zzLhd4IH!^KN7Y{dk>-k8nOsG$1PAwTsN*R~cNyHI|H;zv(BUrgKo<^A)$2v`!F7b%5VfcBMHSTGPqJt~+#0UB# zvvc;aH$J#~dt8g&89Yu@+BI$%849+ik*C&Ss!G?FoK-5pV`;U#xA^Qg|Egn@fstG% zc=`6#kpEXN?#Hj4-i)u@K8_=vaqi1-d^+JOe@sGEUJN#R!aw-ohl4WtPsE?k+?+e{ z#YxEbhd2^yo#{qS0Ea;ParkomRRs9`LJEC*$wDIvfAh(dqc5le6*b z;hS$qwoW<5iR#xxDxuZf%p@l{isDumix3-^*yF{5Lh1pq? zax#q8sK1D1*rnykV0fH-1y+9(hsw0!@&Ko0az$XuJ)BZ$%=c)q^0RWv6z=jsE~Ku0 zmY`F=v8z??~W0=8I~g7yOswtu&LS* zsZE?X)DG$9&%Ag#o<@clEApv7lsa8XH$RVDdD&K~YPJtv`qHN8a?Y|F)0oZly>u^? z18Oz9*B*h&rqYt-qn z>*ykrWP`Hy4Ls;oqn;Dq7^g<|!4+EpZtb|fbU@%A8JdYpSwuKY8{OX^hIr`|$Yg*O zC3fT3PUqmJeO0GykyJcg=o%PIoMs$fee$YwvB69YW}d4PBpaFb(3T@WtC_VBFLsmu0Xyh`(C4Hg@!P*IOcVgg0 z@KzCE9?HLwVBlfcv`<)wEhh-_I?fTLT7!|3(*k#sX!AQ~rp%mcGishuhq(ikD1qc@ zdm|i=%_~_Mi9A}5k+9c!av=wNV0H&)1OtJDGt9uFB#ayejAb4FvqVC!FNYWqed2-v zeKSta!%Sy4<1EqlcItj+yeEBknB0e<3`gw6v!?*y*Qa{)!Pp>rnfY(~M8efnTVO6@ zhwg{_2Vuy)3jXQYc_P0kl<*OL&=O~*o_X50-6Dk!_vTYbl7D>nV!Xb8I&K@yrvFV( zPd;3z;T|>N`BCHU&=3FS<@SGmncu-^*`osazc;4C5E`W1DjenW;~1N-MIru!oWB}R z{2!iOjel}+Gkz=4^KZpj`Q0e5uSDR#9;f#Go6GTGaP%@E`cZiAG_<)$9T&mZMLOmC zd1!NO{@W4f}&n41h0+Ju)(jOsdPJXr%DKEyNO5iewt0pozpa;8U@(P`JT_|7)-gPqqL`<{0usZo&4p%SUHwyc0YSh-Fe0mJ860G;eM3*JKy}y zSbKe1$DA|7u#gpcm+_`&^;v!^ej2N*g-r7LmnWz&os!-IasAv`^=(KGS4vXDPUnf@T-oktTLORv$H0jYYh z&Uj>3KxWN-BgeD>M|7}VFzF`7t;Rb~)-Wy7{mbk?@fG>=JZHZ~JlE;6Jk;-Wr7`z8 zXCQh#`W=p4ukL1Z0uy7)QRPg4jqVie(MxY`6S>FW&ZTnOu^oa;bG5A4k;#~8fBn2T z7g=A_=IrlzqnzyUc{9-T6B{Qx3V+H2MwYulOnzH(2u}jz2C|Vqfgnd>4nebzsENZNSk-FHhDb1MvOb|`tzlDO2By74Akf^H1GzIFH{ zxT1sU`IY~}|K{Hb-0^1f5as>GKE^2kDUjXCWEeO|WN_?n3qa#%qR2+%IizT%^4Fro z_v2lq=rX+GOQjBJ2L53kHX>0ZnQfi(k_1)^}80TOskteUL%n=yQVhp?PFNiSq zVK*U+h2h)5d{3o;Gm${PT5d5tU(8{)wDGMFDECd6@G98Dy`32N>Fok|M&*-1y3vdF zL%AxJ`$QG#A3|QX@95xJXW|QL^GRUeg0Pa3J;j=>EZ4;h?3}15)Mkra8NGJ?Xq+o zr{ZdS_x0s?eHw=;^!#kR*4VkQSv=my;3q(I{5`6ov~l3a<#&9PTdi{*y~1Sma2-B+gx^Ix9*zivha!}L z<0gH-iekNUUSC9TA~aHO;qh@7`7k++5c;H;9x1PxGNwrF`X;UC&0(72AcidYo_2D- zDd*g48ag=Cmlg~zIyMyr5yI41+bf}t9?L5-=UlGGXfxzbD7w%%{9uSmBOM+}T@3$| z$R;^+kJ2*1@elvt$@oA0Z+|4$$$FIi<#80@MY>~$xxi8$yB!yCXf9qJkB`1}GXBzE z`$im>^>LQIS|k1Y-g-9PeednDadVe~diJ={ ztzZn#%#CY;sp4H~o4ya!sjtCE>OVfYh(4P;2ik6xE;EPV;_A)#_><#2H=Pp)eGaqg z+>C=tA9C8fa#mE@c}ywK0S_k>M}8YJr1Rc-emLHGc39*3Ve|?zF>;J3IUvIv0R5p4 zwMPc%tlQ|mIu9wQAI-jopBYb)z0G6T5A-xwVP`SK4Do_DDS9~Q5O8$w>({RIgb9y43r1MEQXC`qCIC_y7$G4 zr=j1Q(Cd1G~F01;B2ZQ68=IAd&M?~J((1hi-SQic-qm<)vR zcS2k*TiI!rU=mc{82|6}HH5`IskaeDPH;2q$~%;YC)cOZO&@biMFFBPY3gf+p;0|U z64-N!+AW{Zhv`r-#!;5J*Gxp3P+q-^U_Iq9M&V4K^c#a>ym`;iMe5PFI4~5#Onrkj zc!#$SvM-Oo> zV#w|ioj%0Khr>&sSh5O}eg)jpz7+9azkKiOl2(rX&hwM1`J0vGh`^?z{^goJF8360 zq06sYYW|0&RRlT11nmRk+qLlelR|G&<|eXn6TBG#yoyq|O5NA#vy6oG^_Ib{tTWPy z#MgK8eK&HD$jT(t;KFM+MdqvN`^u9Tft*h^YeG5lyQ zwG9YxGO!b8!C`)D8}1pYe6$>WsEz6LN5@sK&LC)KcN%8*4f&ZAXWtFB$gm>VpFZ7d zcA)w;Jl?c*+Jnn!;PoXk75tdLG~e+Nn-sahbBtte)<$%~Vvc1X>-o8z`(`6+6He#; zEU^KxEeK7n@P{_h7oP~C?NBH1BoBsV-#UCY-hTH*YBe!#u5O|`;Z~on6I|G))?`As z5d1dd9r~f?v@KIl&0*s3W6H1rG9q;Dst!R!)5=9OF2;#H555)`Z zcwagQ**r;x-FO71L$GO}Fh@8n(l(8xY5xom&t7`ngbrzI;~`t|N~7FD;J_KeSOVu| z`KGuuU27(Y#>o55!YjHn!qV70`gK%})GS?5iD(mpRvJNXw_<6y+gqNFh|V_m}@ zcx$XuXx2XkADnU@gi70(Uq4-k6UmoDQC+i??5`gx5uUugz>85j-Lxfi zY&tp(=IJV2RKcHf4npKF<>#cy@M69_=#%GycP4!5qsZGLCEv0+yUnYIa?Wd! zr|=`A&3Mn<=2D%EOysaHd+BqL{qS5<)O{|6D%knwUS!1mFed5DiADV1FSAd}@8As6 z(<_|mI)w{(SJS^IsfPox7e0UI%kPiv@4gJKq-yCb6IoXqz3-w~_>c3xziavE3<6Tk z*bWUl)RjNR6QEDl@hu?e(-@w}roAriLXU^&AcjrEyWVlz=t(@i(AD%a*y&>WqMxC4 zH+R|GiJWdGAewu6kVQdHXXQ8jKbItP9A=g%`ZdqX8kN528N3EXL1B1;nC4mS%CZUw z0!COX@TOnzM28?jwSr!lzr0uOWYIW2TLr>iz5?R7XL zJWPKrQ|yA;xuMNn+R&aMYqrijlE_KyBF9KDWS$I%o8H`vOnUR*>DA>>jG-(}$I>npZ^1>tEjNjUNc*=B4)G~sXCludruh(l%wjB32e zGpU;(6!3QW5xIkIGR73H8s0FKG`$I0jv9`@?OAYR28L&M5xGlIAvh*0vsD*e?B%ozOnqaS)W_5%UT=juJpkXt}+eGsZot&&@D|`S50yL+ZlF z#RuU`*(r$mo;E8;VP1|0`XC~KnR^#7{&BE;2_DGDaj0O+u`!ea#NdjH~lSA zeJ@qMcfcou5APg9$NWBVx=dv=<_OQBtHIMB2A`tBm(r&-&c^?zk!P@L@I~7(_$&GB zwUl#8p~n*EoMl&H^kZeG5s$(5+#|;z-x>80qwZdRtJ4rr902!L8TRcM_05B^x}EPa zy6eI3x*@F4eO>dN8iI^GHGe=`8&_X&-~06#?JcPcp7wZeQzU|_i?-*IF~Upa|4nX%-=&7 zG@A118E0q?*Bq_|u_9gbOODDR8ad02leTplDlEB_5B%zGm$T(N<;eIO`;MUG%%_Mo z-=)o60~>=CfN)0ZEC06rJf9|}6y{I8`EyLCkpmKleAiz7KGh41t9Bx5X3g)NoR7DE z=WAo^D<7gBfzB@eF4K21ihedgYWj9WM2o>LY6J$(e z4bOB=5d_|l*k|AhOmm|2)PKgpd!!7Rqi>e;9lXI2w%g5o(kq9Dhh00)(J)C@HV`cI z+N~{x^BlT@$;|autgnI365Tm5P=v|Yg~!sb>Da-U6D`u`z|Nq9uR%)i2|l{ob4a7k zXV0H@E)SeZiO;NNzPJBiC)f1g9QW3a2H+O1I5YxH*R;7EhnLOeX(Md7@phWTQJg2nSw5TU0!supUh7;7JdGC&l0(ddAw)gCgWE&01|n*X!W4j; zadvJy!RsmsLTJo+da{Tay%T~+lTTx=z2G7^LKyXy;Zrxpm1?1Qx{*%4bn-^pflnSf z237YwI8$)AXeZZ8?BK{)jM<(?rx!8qxt`}Nb>a<&Bsu(+K`^V@WXLdi;GN@=!C~kQ z{{ba1aDK*Q#N4j}i7@j-(R@D&3XSd7barM7$v6>F0wzg8!4xFp^&HFjhAi!a^*j#T z$^Gs)dx&rc@y6P1HuFf}f-9<3S-AJTTR!$DI-*Jc6+iv(qxUBbm$duzdrs{%a?f|T z$Mb8Eg=Jc@^a)SqLSV>ee*2yq`R;QW#m8_jjJ-*4uB{kWkY1-mIRQ2DPBOqL$eHI> z;{b3JI1)=E-)O(H-*aw8$?hD)2iO~%G2ENap2a7KqZLE$Rh`W?M8H!~uEp`oz3u&! zOZjddMG?B6zHaaBk6-`BoJcEOT=3R@s9=YD!-5fvggD=LH ze)uzq+|S03Jl`4X=G>e`_RX;g57dw@6gZsYu^P|(V-U5^*r5y>2nafcjlmb!`k6Bn zS?CAXQ>VS%X;Sx7U#K-BcWvalbbFbSLmKVYNp-pA-R)1pR~!N5v3k^AV2aEv;8fxA zB{*%_wH$sI+5izYYk&03wZ2~VKv8Mo@8(9MvRtmk@8uw!S-Ppw!n z2}~46mv_jCo=8VNjpFe<{0By};^_(SRli5x=%n3)gYopO=i!N&T{J(#8%58Kk1Olo zf<`2?jWXlQ(CIh07bLB&0FOS*tlbQvYIJjL>MC{3xj5C~d03QdmsNozeQl)@Ob9+y z&7iR{)eIr9J>2OSbCc?P<&Un}3=db1At(pCg)VGffj8`$PgyTI03KvS*`mI5EZ1;@ zm10`6w~L9b=3AZxa|f^Hysh#C5+7FJY(v{ES5pY` zJJT?gipm%!OxOrN#D+v(8U?@6$r8IosC{s4%)le%%>@5tW(XHqHvOIYF$PaZ>H@K) z=AF{~2xhVY<|e!)I{){iaH2| z3uDl^o71rfgG#46ra_p(u#xDdjJZCI15E(7IRn+q`P*SfBZA$2Mv$jx%CL!s8Czp& zb8;X?r^Gz3{V8BkKj{}i-+t2{Ww$1b6@zWY(j+&1K}I$ zn4mk*zMsKWDAv#3OFOf@SrX$bIzQ(5r$;`c_gAN%po<@qSTPmae=y>~Fyt`p6jUyPOTDCHj} z3cKXw6oyYf9i~Dw#xw1gX9662JLlMOU<`3wM;GUkX7evkN0BV|diT2}CTV*x;5O(= z&l5QZAGsM6K98?wLhcd~Z9GSDn)OCRAE7X&B&1jKQ@SzcqHeg)e&33L(p5@51-x^PE0YL5Df?k&G{OC|R+3r9*AOTcfhjjj>#|YpAIK zS%6ZeDdaqB(p86Cg2(cAj$l4@23IMpOyO=HqLZRWt1EM9VzXf7q+jT!;1HkZH~n>G z5?W{9+Y)^4`>riB_dO$P+O#n}#GvAx^bMfPwi}RWEymPDZ2c3k(eVwaz>@L|py6T{ z$fZBu87;ZH>Z8DJ8JqXIFLiZ?XiG8ren#iLo_0AH()Q#XU#61Ex85^~oGhpLKlsze zJam$m+i)gsII$qnr}n`Rgo(J3A@s>Lv|=ab+vofaf9Fhf(c@Duu}L!{w30~tX|oi9 z;jK6eoC28_eKrAe|7qkt4vwMhy9djuFUNP^|L%DC^5gbR-E2*JS$;5t8{Ec*&EQot zfx~hwopP9Ay)-nKNg)DTBajL^c+bG(oPumTeR~`nMB%KjgfJK5;_M`z`BadpIKVM9 zL0rnNZBi&x>GH1{Aq9b!`7(cLF&Ub% zuNNI$szXXm%3Z}%p^m0u38uDf7kL(^N&Or)84dNpq0<_9hxM3sF3^X7!BeVZsGmJM ztRk{`1bBIfaJN+GmK&;D@6LMq5}E-JeZ2OPf)I(wv|L=>jq``6(}#92xWvB^V3_c8 zJj9XsM_-eXi%f5&e)9+PGwm0@Dcjv6W6V2Cf75%XH^=AR+Z;dslb;)3``X82<<-Ub zqo02^)=f~lydEneM*J8)=+Irhn_$&-|B-(R$arlajv*(bq%DTTXWCpER7T!WSwp^@ z%1!9=bK$TpSp3t&U5_*qm+yvh`s949b$cdMU(}Q%7iQUMK;3tG$o2l8-;7M*a-Ysg z-=0L@Y~;Hg>fU7~JayyqaAXC+8VuoE5&1;an|fb;)L)}O`Nwx#!B-`M-?zCX>a zTeqs%NfafLCM{Z&Em5>(wXxtpax5!SELd>@2@#R*YwJ*U_Q1W6Wc#7;O&vvq* zb||zBG1~J9>7l*Iw_Q;y`<--~K1NTQ4+mb3j9`VMRef$XT>2P%heYWdWY$2X0-Y0~ ziqhto@^z6&I58xT%X&M=mu5mx5u6v*^H%Jz)JQ*sY^o?wVj#-jcb$fC?Eu=60G%OJ z5NgQ;;x7?q5{Ta%4aU&5rU1=G4kL6U85~(6PNBjil$?}1X0!uPHwmDuQ;wl>m=Vbc zg{{gEpr}#DB!O3Yf5Mjf^GwG+f~P7VSJRGu>KAy8QG4qep<6OG rH{nuGvleMr zJ?d87^mYkB@SMN?PG7V%6;N(!HHG1cuEWUtdwb&`3{%fhMS+DA(IJiq&+Yy5IFFE; z$=s{~WfQR`TvDgJV3`roxiKjH?T|$9r$zm;EKWpVCMX(kPP$1^XgIf`G`1C{4{lCQ zPOCJ$S`i+=K`6{jhcgU#ad|dwI~3xrclYD$Zfm@IJQ&xB-idiQq((j#rcBON;{E!L zc8%2YeMVzBa}TWQ^I4>MesR`w235Z6bMc;z`mM0vaLHg&5}eH%_|QozuM^lB?J|T6 zb_6irn~GmX@Mg9%nE4Lw&V-K{W<`{H_ zeNLXseNIAPUhXW($QRZq^&0im(^O=HeTk0}GbaTwzsCR_?vMZU51z(vee->XtvX3E zbyr#Bc{o`y*Fc0HBX9DS_d)FJt&d;$g*W4)uN{ux{7=3;Rz5x%|H?=EV=WQRGb116 zA_`*Er%{PqF0WO|S*x2fiX1QF*UA4#5O{OWuk%wGnVd*LbkS6Hh(0Mi&)G}TLY~{> z9;q5tBO?nV?)R*A@o4^Y=5V>^mjz~9ZKq`-Cw__C1IT)Kd@Ve@eR(lH{N4}7N8kGH zSbzI&++Hqr)=WM4eJWl2m8S!M@ok&f;C~T*d6~6N>-ME5Q;a_RGX`WxnNAu8Mg*QN zGxrAW;D?cu9TW+gIHp|JZHJaOfoC~Ji?=(uEIPXmLuDa6k%K;!d8Jckw#|;Ar>QFe zTKX~P6h$Hv)Epe`_d4CixY4~LIbix>QX1W%JoxMcN{8e} zZH2i=f4TSV`Z{&&^qg z!&aNYz8P*X-)Qc0FSx~_GO@~8dyk0vWRI54VM_ToulL7SKl)(2dUF&$TI)O~&)fT6 z0H<%wL`283A9zEZ(FJQ|4ADDeEyt7rgAfY3j!8wSW^Q^lYCV@2_A2#s&>CA4xkSn$ zF3*~HqvKNKcI^qwiR2JwCBH>?YeiruIfS5?{W?NpJs=Y=DAMgAj%WX zCwk01JoICaIYv0&Yuuxty~BKWdyf00$1RJoC`io2=#?qM*ce>jwc#vrN+1M?aHKtg zhq9h$NE=zF-Q`R7d7_*m6D=4KiVMT3t100ym~s>VVPinSDi;X6rxZ1#KN zsqp7Iul8Wy7!$8_3X$x{gd1$n<22Fx<#oL9!nmGD!B|;5WDLQ$-%Tapy#p8=?%;*f zLU6#^wW=3q6%^}s3Cd=ah|CGbLQhe?<$vZ$n8Zjx?5^eab0P{uGE!-H!Zn7dTe>Ea zyja=qTjOfD(-SJlmaWtr{JEZ@Gfht_^BWE3vFJH|P5BbAXD*kX>Tm{+&fKWN$h|OY zzE6XX-@pXNjGcF5i;BrZ`Kbml3Wf2jac8(0$9yKLe-Y!|G5HwqRpWE;*{D39{hnun zJJ)y|Enu-iT3M3mzF)4j2BXq~V}xcxT{Ui$b68#n&evlQ-M% z3dY3Ia{mAZ;Ob#*9AA--@Lu4p(hEP;0WNHLgn7>~3` z7q$%LpFi!*rNem!v!_=a1<&KtZZTXwhE8Q-bb{ZxiHlv<1OyCxoC>YK&59YkPO>GQf)fj1H^}<}BU=i;T}4>SceX!IaWBRG@ur)X9j` z;vNdytG%<@W1HG$%EB33dXI@ddGS^CcJz?dVr&jQr>$n5LTmV+`rU9v=!I_S`_vm8 zjo7=}tB?gPV~f%+aFpF2^)UK0I7cfT_WT^OO8*1S_BID%_G!Lz-di`VP}-~Ac)SaJ zAFOJ+iOx>_oZ`TApWnyFUqsjLv|s4VexortB}w>$i%vLZR2qX2 zuY@7BalWzUo2$7meH%$fqfsOugdphQ$oBantgrIDbIxu5X;Q5Yq5!@Dk6`b~pL1D<(zMKSM z4Ju_b6+NOZN@Cue2xsPE*LbHL3{DuN8q;tYn1M6vlrgZz@zlQS7;fQ_cXD9r@*EEg zVcM@;tASU?=1z?y+;&o17`QXtQy$Yho8F>q%4pWna+IQw(|xp3Cp<|_X{$@u0#6Nd z1W6hl6ZEx|DO&-K(lS}a_$+0nPw+2N79ofVIRagGs_!x{m?<=oK9j0H@Un#9(&`C1 zgRtNU8Js#F!Gj4wghS2pC9lBH~5YaFK5s<-M#n@KGCsBYikiy<6+X)X5BYE7K1&{@V4lk0nhbL zHi{x%+i-10?TzxkbOvHfJ=3xGT+4TnKIh>@lrP03-Toqle(1lGC_X2L#4Px9qW#zR*>9cJ=fIx+Q936V_i_60Okkyi^9+1cVQGu}1W(=3 zF>-1SiZ%C?)*t)1H;E>^yZU+jC;zwK8=wEDe>i^rMVz{;^9XJDCrVY+P9fn%XP7Mh zYm~Cz9MABdrDL3NjwU^Xzo+Dda`=03&MWg=9+Tor`p*N;&d=17b9qk{HR$C{VK8!lrA&@8+bUL{ZFx8O_ow3;X= zOIdrAC!&qq(J|;Kb4d>v-lJa{yhNY$Uf=>l3Wb;Al*gc;DH`K@^2Pzj3nr6w7JT?a zN|~OaqbYYhqMuOI@#_dC(dHO5aLElg_kAf_bjNbf1(bttG^Bf7Ba@tY&f(?xd3i`i zQM+`ciQLsCsmD5Tkv9IB!w_?5VQXivDR6j$e*JgQ&-yQVrsuur>M}-p4w=l1D&ueH z(6EC(p@n{NLK~>19{s=%^cn}s%X-YJM@Kx<7og?T|2_ae!KX|I?n4Jo)~navT|WcD zNlWvUwyTrD1fI`6dsk=OEPSsMH>632dbfv2-LrTGec5LG;ty@QXWf+VR2G>T%;Q~hYo8R`jz?~@Q zsmP}BUg>Uon3r<^p(as@UJ$!cZ2(534l|@J4n|YEVc?~~zz`ZjbRH>Z#YBqpbA}B8 z(q{SzJ_OM=0odX21SjeyxC8gIV8^)Z_hp)H2vmv-&&oktY1M-x9HCj)fX67Pz$aV7> zWCFBv_qZ{B^S4gM`NdtH-3Uw*PEJ6V9;FW7DaEx!YG~)(6kA&d?qwvh~)VTRVqLG!z;A(hvBQm`k zIePi-?RfQj-x_;_$vmhEY;Mlg8e04#$gaMMw2 z1N>3ZgPm6_{JxOy5dllFM5>2|)=Z3HJ$H`fRg3~ z=Egm6Pg3ZAx~qY3@e!P2l%GMi? zZGkiVHB%%L<^WZ=7lIRn2@RWI9p6w7p1kR3g0XOYnbco`y=HX}; z(fC@FI3)pzl!^$a<3PDjU{6B?L{h($x4N2=&OP-l(`23CkpqKu(q8IB0|Z5Ag4zue z^gEqgP99o56Wtr9VnzvNk|-d?0Aqc(aZBJ|)Q0=^mLZnka(AOh$4?KoTn zrCxYo0H^d0I27>7OVoec;!5!@9YI@CzC1k4Gb`g=A{x6cVQS1b6-Sg+Xn_~dhIflj zdV^eEqk8(VJ;ItXMA#=@ZX%+m7!GZ_2A)*eXZqaYyPN1V*GcE}J<6rgn`a;^pNf9@ zHFcW2_tbRaEFWaI3;}IsA@EKIH2fQSF>ahuW8K!3i=sKcBK6K%4{rNCd^IP{xfa^w zl)kr?-V!v%e=Q24o5;8pgWnBmC|31EdA84i6Amfid=1V+pZ)D_GPIsE#0xAjESqUJ zyx(DsIK<>bAIMf;r<=a$ZvZL2^sSrt#2L`vAHKcGeF`-E5WeP2Qe>_*0!kg7$CCQi z)}F#wapLo=wb(!R$6t*PKG>-- z>$1A2#x~a&#L8I?#^7?Hlttv6=w`>3_ziMJ=;WoD)PF`#3NGphMTc@$$m@Y-8L~>FMJ_&(Kp<_<@stWt2JrF8xGjaOgkTM02=fC>_hdjlP9x@06f} z7-2@ywVfJ$!H57UjsqN*mUMCW+wFUmju@>vC17;k zS@a6r%36X<*5%~=aaXV50dglRLC@Op6TF-u_iURR3p3uoeOK7$`{y}2?3-b5RU{QJuIrj2;FNwNWMsUo|uUvh@w2TN-?1rV;(H` z8c)b9dIW2lB|uS(A|;cpMC;aTGqyY);}G+hhL*?z7_vk(kS}$D@e!cQbaEBbn>D`| zC2ZmuVVNnF#pDuiVJtoI)=HkjNv38k5x%4uWkCL)q#d}l~D_SOi90p#xm%?n1tte?ugH3HrK6DUA(LM)-XNq33 z5GL1QB=GF*CDKoQj6Ox_Wi;^O8QyPGi22h$cow<$mA~mA{DKX`YD{E#^{_H7AEFpz zaC;UGeIx?;UfSo#Z`Uhlp6w^3^=40sMp>uJo}KqRp8M)(J+sV=aC@_7e+T|(i}WLg zBmH6wO$1$wk~5JF{`wjG_geU(vvLU&p?2bvl(X@Sl)1I#D~x}PdMa34n+`#cF%7)u zf{#USC!ggxS%Vi{`4Jj$VxzDb*^M;{EO0tg)|u`%S`D&O!8G{u_w0LWtPGJqZw33q z&sN9Vv#CH@PtF;=L~a@W)|*=3BFfZcAMG+eU-|0G@l!v!ugN@@C^TiyzPK1?|LA+; z@OQpD4nO^3tjF;WBUKl`dTI+O=mZvLo=tb>dUn0`UnF~>OnovsCQdE)`JeY@dCyH6 z_eZ}wMf&smqV7Ht`N^|&E?sjo&$WKCNCu&8Y?p&$J22KRKM?}p|Wzs=y7 zJkQ}o&K~T5aneqfi~)7Y92D))J#^m1z~j0M0yyY$(W`?dG4$GO z9QeZ%-IFB0`2=RE(>LUmLG#iv0TI0GkE4MF+6LkNQf2$ztZ4Kt!)ix7nVzMw_1P5~ z;rXe>@z&fwg^m#!Hxs2in(0$?b!|wRpzHAu+!>cPAIuo`T6nyMo@{ao(h2+IvuoR*JZL8C>`aD07ItXkqyKj#mQZD=zi>r)kh{X(rwrIjKCG| zev7y}{7t6FVEUl{>X`56m^u{}42M~PAWPvsIPvN(8U+UUWS902j&dD8&Ec2Mk%_JA zP@}q2r)LEwPmeM3y9ZuMvKfF90j2{yLQ0GxK)@*|(hPHqp<^*&ERh(8K)D&wP!fTp z3O7P#592U>F9jfzH$={u2x8s9pz4N%(@xThhn8o{K;^rjBwizALz4O#clR-<30e%N zednj6s!eIL^%(Z`cs=PjqNdXX1`_HSN0CWqNITcyr@xqz=gYfPP85i-Ey>U~%ily; z<`Pi+n2<3A6VDVAnx7z&ZXZ(zcseO0Og6Iwym{l|x9P)FR9<${$SHUyKxS&Ty$Ei* zlsX9OL`WErl1=?)SQdY9fa!C4J+PTU>IE>@*N@{Q{o@H+m%EEbQ6g2=dA`z;`gY^( zHpLi04ykIm>z$^*Z(hBrS84+w%*R;*D<^~_KxjIwlixOFKsHRc6etrv+;_T17~s1+ zmG8u{=X z18&7e9fTNEZQ=9MIv~yTf1bs6fwyuIndlapf(V9%lmJ`rEOs;2mDdsKH@hk`qR0-->&EpH9m1 zjY;PwD56hruoawOFxTNvx?TT7Aoi8Vqz_M0It2TvG_};Ra|$AyQDcQ2^AKV}K|qzG zj4smV#RcPy0o6ee;XL;#n0&7=hu*$}gP>f4^XXvRJV)8351h16J7*5a%^|ZwWxL0) zh3dWUx9h&+Iqf=&;D6?KuPu%rf13wbCf`mR&gXpxfBM29N*~P|X(TU2&d?{KH;!FH zpZ?Eu{`_p6&h(ujtAP&hsh2b28Drs;Kp3mam$vK&f4Yh zAvn1U0&dU~pX7P`Gy9Y_+M{5>e}Sb}^v(CcWPknj%@^Y`tadxif9B|vaqGfH`irD+$7XdLrDJ;+{%(W+R;4R`p7x{18MY!i=&~FCsJAye*l8xu!J4r;Jmt zh_z$s_uhPzIu_n4qi#8xIRluqUZN;1-x>E#C=2}FHB7i1*1T%eU%kdUBGaJ5!E}PA zF*Gs95=K2{+>Of{d+%l;xt1ao9BbA^;nLPuH;LX8aS%QV$LTw!wb@qq{th2cJ68g97lHMtx$qI3|Si-P@BsM*tjPnt}~N!4^g6m6hON z+#iSKJ!d!1}0~=;m1y;Gv(A1 zoM@*Ggx`5yN^vvMTgSztd@OmJIBFn?8^e?zJ=f zn|~?OC_L~e=kjBsknesRWAdE-)PPKQQ(NW9NiNA*D4IS7UgrY-qYt*ntJibYhx$27 zU{${%tku)YvH!b&I9`1xM*cj}q>Ndj5=PbD?N!h@4N>7u&oVZ6$(e8AJp?A>rO-l= zEvTQ1B7d2I)i8jw6n8%$&+-g@iblP?2;aSUj1h}rkJ8C`GhReSx1xjIyn8>s@!cPc zpZvXVjW^%={&@Q2Z8HyKopVKD!{>JBa_l;b!dA zTQn8F^ja|5?4VSyXkF@^oL`$L)_^AZhJL%gY&+&=Fd}v*RgXYL_Vdg(sy8NM2kFyC z?NR37(--|!?0AdtGNZZhd^PHvt%%Ee$ZgXt6A_pG`P z_N|vExAujY0Z?HKtg@R8q5_+PtkVM=gvmRRqrhxP$DAn7u|sFaC*$_&qT}$K6Am4i zZ$fLq=icsK%3Zd+Ay^p#{DBx?F^|EJ0lv^bnh>7obb5Bx`pv5=W9-wILYnad6{1`x zTIK4&;Xwgr9HdP|$_O$m40{H6NZr}qODE!4qya>+OOSB|#O-)d6eX{cfx1b%7iZEW zh&R*l+GR|n9SArwstZ%q3&R;@gsI>O2`8gz!oa_=>ATCKkEzHij0>SLCT?>86U{Ji z)19ePOLJYgp$dXEkvNrdj1;dgIFg2ua}kO7Q-|ez$6*|k zSCEGzW&FP0Y48qx>b5fy2Zu0YD2EjhzB}REp4J?dG+lhajR>BAapLUpeD38n=|A}J z2|j{zOie+7!$cg4Ttrqn1o#B}<2up%)njlK;R;sLRA>t5`Cstg|3f3^q@V8{d(X`Z zc?#f>xdz^4QW+;PMeG#i>cUj89sND|XV(4WlYt`DZUzUTjY1R^1gyk4?H=_p#A|UP zH+SN7N0A$|_XajjfT4^hGztB?Yh%ia^t}yirtb6I8Ch5w{AC$Rr<3@$rUTLCa*-Pw z%@8!%gnmw;3vWR)vajvnEIR4CC!L zHgjzwb*<;zh%Bv#4=rumOIt^mH{+{s&&JPu{Mqu~9JZC3>^r6_~nuCG8m_1vn00eV=h%Js`N$L1uPMtA3!Kb^hpU-Y^7PEw(S_^) z-ZyjJ{4H+49Nv-*K+vVEr^k`=>(pt;5M5;>;mjx=TSP{|Y%UMoD}%DLw>u7A+U9jV zaHP$^e;Z!YCx*TbMR0)zoC-FHgUxxn5=h~p)HwrA^G(c)5Xhuo3!3R?M5MAz*XtJ= zT%6e)Xv!P?X(Hq0r7eEFY%F}zwgh+6#<5S*_V~*NLbBY~QH;m$-UVL$PaA|a__L&s z4Y5416WoJ2LFkh?A?TI{=*MC^?JxVf+O!PD3$UV;Y1k=^c!lu{0Ny>w!$6(cemw{K zDGE%d@@Z^&AVFS`W^kVo^x}jq>7^(z%FFD=xjqzAnaOFFyy;x%3<5W8666E6GZJ&H z?~#^@2$Jg|#Zq8&57jeDeW7DknZA2RccLsXetW(i; zCv(-?OaBN3W$amXIWWF*oFn@HxQ^K|Cy&>QZ{|3)mlGsP4h@_|+^>y`m6zjo#LJ}sV&H?FJkze{2sT3lIKvAfc&zC@!#=NT0~f-4TddLz zvU4SWdA{}Kb7?Hp>lx3^0}hp*^`eX4z2h_Z@Y!fagv}5)iVrQK7&c<)rR=SjU(ffA zd>T_*4UTLvEA?)Md^1U@qtCkEGj%ZPkqb-Z8o5*4sY9L0bp~>9EB#qTn7-9Xh@2I! z&=2s1HN2qje(N5*;ExolLyi7ZiO)V+Iq}Ky^Z4RC^m#GIOey2e)yOk=l`&66C{rA{ zuFlQqhj*6`%$BxiTI&3ta> zb31iz#|Upnv2VxVY=;+jQ|=&9`v>P|<0n4;c>LUVzBhj1M?V@rb9yp%qT5y;qHpf4 zQJ+f(IjJ~9pGD!`4=$B%zJZ9G!K5ELxivhG|2VY)aVpp1YNmE&u;{Fv-MM^928(>u zXf60rY8HJBpUD)$A%!k0gx<2LqE5QR#9Gd_@8HBIJXmroqN78=Mu#w{^c@4ic@t$% z_g}|S+~v;`60&i>u+0}YN4}q|3G5KOc zWRL6g1#WhPUY_4MQsLjw{KcR9<)8nJ&|%oid^riMj2WCDc9}UI1A^E>7Bc7B%v2I) zvn~)qIwR3{B+`*6lvCp?07)G>)TPN$$dj|c&4g_MR<#Hj1 z>xCY(v{F zLCa?iv9`j4Rjv!;-g-E}IT>*@Y*t`1e8FK5xks6Bv_3ucaT-(jD6x;(6X zp75qb$Kvr|?8d}T4xezq*(OgqP?;!FpU3I>)%eptyFPyDkL@+8@($}NiL{4jrz1s1 z@)`P3{`l=GhVnoD_VM`co^nssA^J@LZ-!qx847H9W+%M2b$mKDVoIq+OkjwDqi`S?=`Hesc)U(PdhIH(`N43h(>h2 z@szs4t1ljIQcny;_h-JYtZA@kI(*7@bYkFOG&uPe2{OR$ zPzRk$_X--zzvQ&KcNwWOgyso=e`ZFt|0a=mK(D?MU@9-*Gc;hR5YNkYICQsJ+{h1m z!AVdzT*;2Wv3U%1tvN_XM=yIa^km8K(^Uc^aGFoTVItQ8S~|4vP>DX^m>FWF{|ZlJ z0&LxmJ9_u!n>TStq}8tr)!c=0F7@MBHLadH8d$<(99rMOAOowe{2X7rJsJCX*08fI zF1x{L(ErM3;ItwF4Q1jub)8#~`*?1d<$Uqy{^T$Gh6q!H2r+pSzEasVI^GS8Afi+T zVqLm11ob%PtcRE%n3cjYd5+PZYvt3q0L+_cY9iDECM-6Mr^8PL_=Dx^;hKKDz@Jr3}z)im3;PgnHn4R!~s}o~V zPrcC)h5%JD1}3HkObCw}bH+Z8R9~hrh`M?67*Y!V>gIkNUp|h@L`H@HULBa=vDN!! z%?Out5U={>(Lw7ZB$UvV+jUHFzvHmxB6u85E@cw6cqsm zhB5@O>2LB8o|&80EY29}8u0K!m%Y_+hi4djGL&Z+cj;TkL>)`x&M-4Flr+9ursF4E z^Mo0&=_@oj@mZlReCkMjc?M3K84LeX5?8m+CTL=KK=M zMMU9kZNK^1#rS``rAVV-K0E7pJK1iQAdQkydoF;=@DtgU%3h(o;}lw3et32{)<5}d zy!g>4WA8Xd|KxORo+d(y;oT77MuN>fX%t?L=2w`Mt@I zMptc|pN`$*v$6hzPsYagKON&+-y27ty&WG!VQ+>f*V6|wO4bn3*z375cjM5J6};SW zVQ28>6u}?H2%F#27de@Klm0Q>80>^=Tiia4)2AK9e5dI zRznhcns`O;398Ju+1XFjAN+AB)LADo^t_jz4SX_>0s-~Uab$3C6zPl{3%-uXhUReq zf>YTCfsZJVyo28}Cf819C43Tk({r3Njv9Ti>kyFWey7-1#%^oMc}(Pwx; zIWoiC$0%zXwE|Z`?=0@p1IxKnR$Bt5*yVx25S)C%w0v}(Dv&aS~-`M^Pq_>);ubv4H z`Q$JZ&*1@efd#x)UF0Ker`^B&XMg24Fp*i`O9h4~h`MjLx~<)&(T$^8ZpCBJ38`cX zAT5+JYT)ub&(&D5w8mBp4DUsHr42If@oc)Gjj^Es>Uqk;8qs=mRTA+c8M|H!qo)~b z-iv-pCSOI3B3iFr+}U0m2QlzEMi2<2$z*fZCA?zDDO4tt**>>@3{oJzOqvrj=vI|c zh=>XFi=m!T9|4_&Fplpvtm@$m%(Uc)g0KP1bR-BahX`KU*Bpf*?PwE$q+P&E!DCbk zw2C%NQqL~m2}I{SC{%JoGoOUeyUWjzWHaV&Y9Lfm^-KaNz;Q;5weXsD{y^ z;I5;PE`zsN$8ocIINm<&je9fbiBtNu+$TN_muY=ZHJvrC8HTB+7E=tJ{2*BH0@i=l z_<1%LOADX*Z+_-q9e^AJA8lO=l?q4e$={Ei1Rs5<{K4?{V#-pqHTd~nqn)#pNqrB^ zIu@V)GwgF|o!=4Ce2UZ=a^rH7-)7W4msz4W8i*jLQ#oOseeIZj?k$HE>c-_O9wbk| z(<$(l#{&;IJJgYz<+Ui6%d5Nb%Re*5zy9Z5M#(L*XO2`Mm;HrCE9$19T$b)nkZbf3oYpv`{+pXPSCKDzg;PM*c0~QrRh+mw=bFI7%rcE*& zTu$9H*KBhf1W^nY_?PpJ7`<*BmHu-YY*Eg^vUkTr@i5cRbgyAZb#-M6{U6wX1owh7 z^Bn-rX>h#;F}?K6)X~vH35ZB_x?LO@?>>t|$k7Q1kw?oOcM6+pGSOu4Jp9yoP=c7$ zdUbsn++Vc~45042=wQo%JER(?hs?@!XrBS@5Ka0+CwkK-e(|0bLjZmEMCAS~G9iOX zAE>yK^-_mPdGJsY1a@_jH_PQXIJI8{9?tI<^9js^u6L2A>anH(1`uHyr?sKJx5Oc~ z$uufWi4vo!?GTp{(h&z;DP;ih5Dsum;kj#R`O~X$an92~{OXZ>E<8m{9|K$lAOsP* zE(;3cMDvly3P$@ESQ#atLxeXSAu8jo|z zn%>H>+(nNvQBTlQIKW2fms!IA(?M9Ru$riNZ?l*fPVnS%lm;(bdJdEGqA_)ZDRq0R zo;IxlKyw~#=SoCC@dihzum@fT{9zO{s5hCqFC!p~yK?YX-bo*7+|x&lKF5wIPw%{b z9v8uvu}!8&it8zn`+aaz!_m~QXmL(HDo^BT_e_AqA1RXG)7UOgR-Pl#GX*^7^nIDm zqjd6bwv)3_sCse{HCvaC=d3c%XJ?MP#8?H7oivtDc&ifD-{6Jg<7BkE=}io~p{n^i z_xh%`815)eOYC}WC-mHn2XDJsWA;17-BT{~W!&*toyycR+s*GN%f5;?r-?*eZaGD^ zCz!~RiJd`x@zOuf2hOJX!LBdl!9#G6htliu>DxGmkxep0E?cetxL#=+@2hJw_r87l zH2(J=-;VF!1c1lD_#7Dwk~xH%k(KSpf|=d;Oh7s-X~){-!~JT z?|kxpY^Tgx4Eu<&Up+ZVWPd&O9`D9}aHh@e&}N6yO}pd|uNvE}@viet_f0yRN7V1+ zAoAwG8+7A+5PW=b9zzsFKf51eOFl4Ko?%Gg)iJYYDF)A}3$YmA8qb{Q3OEY>S>%Qr z^UUSxS>!qnPh^MFjVAOE$DICBCw;;=Sndn{%1q$UFR%+7%(^GHVJXXoeR^yGb_+Brc{pKI^}M}suM z8(9pDqHqSwZ%4BYp$$C%1<*%dcizK}vGo|#;F^AW$a8rjeH7$qKXxj8;t1h!?G%l{ zqk3GUo74wx9b}Vaap~=w1Ul01fYx6$)6bm*xAX$Ox6WN)%D!lyJ55eyddLyIH65Zk z97AVI*UL!XraVW`hH9KWHWfa!WB6lEM4vnA86NfQ@##tHLo53$uybi9FeQjh9UcBj zo$P}C2{;7+>25yLzx3C8;n~yBcQW46IFDrQ|>xA1+Vl{yvpA=yzgDU7KLPU z7vmR2T9^mSVWG$kuhJ$y5s*d|JaNLJuQBvt9ux9hW26asquu=3v$+n4hVn@$>k;O_ zuow-@lp`XLQho1o1@Ps$xk(BI!1*xl(q5E6s^vxMqfLU0^P7FEUP8KT3!F{a1`Q@j zsb@MQQcZTNlRkY6d^6o85(SQEe+t-CI^b6?FMKNkLm?qP27@z(+pJnR;_)-Un7&?e z_(Q+ojrWLV$I`JC1WG!B!yu*M8M)8YBL>tqN5XcwcXy#m+N#r({yxMoSTj3UFtnNp zZ$77S7lBVh>>j#MfXlMI-r~fK;`nC`J*Obo@_i~6*VLcui?h$PoqW;n!0M}$P&;$E z{8S0?y?h(Kh_mUBBE{qBRLeJdt+IfVSx0#<`03WMey74Z(fzLC*``#l#^7=`mg4!2 ze|-0!p&|;vvo%7gv+|oq^)DnzpY)8Op6x5s?`>?+mclu5HF(Ms;Sad0VVX^+Un!G* z=s!nwBOU!G?{3Eb@_U!#w{POKnRTtCp{P7BL!kd6cTUoFluU;Ko*%|mc-B}V{B6b1 z9x?X0)*+}E0Yf}}g>jyp`!1QXLBBhtw&< z+6-p;iyG+C%w7JvcDp2)d_^~r$8cfrCrF~hCeG#<`P5l>QoVlTnR01<8y3L-Z8vz6 z4JJ=!=~JgrgX05HbeS=4{dn${z%zS0b*W`=9Bs}|pc_X=M@{8-H@MU@ov4}G?n*Co zNU&w*zPV|JEtc6yG9`G_(XJfI^POmikMlhI;hnkKDnV6#a{_N~;a9fC9I?5tiD=%N z?>d1H+!q%KrcX*=0hcVay5?T@iwM1RjzbrmpqG~i)lTAk#Kb;}Wv&c)Cb$TVWYqj$60Z+^M|yIr|r)4jMXt`rvE!5=CWCMN-9c=D0& zk%Jh-yL7(8QGsjn)a*~AI}fFLb%SBzy*~<{JJU(XubwZu>5cw6JgzKybgu+;uvF?1pqM|ziC{OtLrm0c^ujwb721^fQ11Nnto#JGhDk;Ac>L`b?+RD2oqA2Io_J>~r%uZ6 z;E_$HchTBJOgLQPy4lpUA^S_ucR4Pp6tdABNkYmPnz?DFA;D)f7=!5vGK^C55q!wb?@r1|nY)E2 z%Ujl=^r1wq6A`3%$GbMABp`|hHksOF5UG$Dp!5TxwF~Y}YD$@&j9BW?HwLJ)o^xK` z#AC{H7|CWfgi3}0J{Z)Ci@CI~j8>aQxOj}nAPPxe)Lpz{$V9tQLMCVMRL$Zfa%O-| z&v<%g6bNHz+>&9?H(SMm86rf5j5dZb?ru!`wgyA?!`Oc4!plurCk6pC;gd4ONS7Da zoseUDU=j2(4b31EJU^N)PCe3MU5XI}LXgnf7@Ki9Du@#Vk0zY8?=%+U8V3cfnwrZ) zesvB-3UnrMdM`q+fz5N%$YWxEQ6RkzCcj({9-Zn>9qR77cfWnl1KW4+^o;F*1ve}%WY~?4DK90tH3?ewmmzzDBP58aL7?&++z$p7yVPXbSU-J*@CfrMpLETw4DZ1&A82TH|ndyfFD7W zBk)Zp$Tj`f1}9v~G%^u5nym`#_^`N2d-~dD^54d49OBgS{o|AIAAK|D7n|d|Pdnr3 zHa$&wS-Zj(!08Rfmk~6qkQUSKjPj&@O}3j2Rzubd7Xc-gBkJwxAcLKtbzyD@uP zxH1k)dMIaR{$AvWK+^-Zc-`1wob17|*_5p@NjNZxl#H2=Vw~KPGX|9*>u_7jkcsMz z^rwM9S0|@#ye2)Hq75q4J^9a> z$TTzv+8R_ebz7ZCF;53kwr?}|seX#I;O%A`7~>daj-;hq4{5jhEU@59N}uk7Z~Eym zJYtUlGsd4o7j!4@W_r^P=s;I6QucOdEY;Z=+Qk(|D|n7G0KRT;lly4qSPt*u^*~qt zbb-U@V5G<#mIk}F@DAMLXt2Vmd)gAT9UaDw(0%C>=SDx9^}L^Q0iOcTX3bh>=#*zG zi*#~M+1AJKJX_K;H)fVJKoysfqx6++vK_NyP8^RcrW}0JdvGBr!{dTfx<9p!>o~sb zM~7+C_RKVamkwU*Jgo+P=^wr~F3!;?z0wG{^RJHQW}H&GR*Iizf>$}AhFPk6A2~aL zHF}$4Cfjj(dRjQU*;@Jrmvu_g4|L*et{O)`7T$o4+n5IN&d<(Lsc1V5=DKtRg9tDL z0vsVj(0Yd<3Sp|3o6acL)KUH{BjdsIEKIP`K}h0xHLV`Rsji-sGWmhoDSvHaJO>AZ zhc3P5HLwR^0)pYxUaBh57$^)spD}hrsotiIbiMU0#_{vpXV*kROn!8?sD5N?7>kz+WsF!jf#3xwKaCz%>^ zEvy4=M7N+@$LHFPB}4H>qfomkM)1pcqp?0N6|Z!BBgFItV>v{Y{hA`Pn6Y=!*u>3K zloKeYO7J@lW(Jnm`&{tpGd6a%AH#?b(G)P+bk>G?PQMW# zeXq~~czG7O#hKsk*MZ1$*wW-sOaEl;{I0QHjK6UPeW$$O1-<5Ji0e80Y&iOu~FItqsfFa1)Cen&^zQgN*ND^4$Tlme#|bfd~hy9 zBe1L6E^>5>IXw85ff9v+MZYRn40Ntf9h3q97dk{vC(S)Hebvv z`O-9Wytg74kei593z4HW8jzcVnv%$`sueLc}zJ49PJZwf0EjgR|h?k}My=~Qw zcT)O-SMsOd^j39y@F;)=KT3iXo>pyK(%qMWSmP3TVw{~{6fn^iMJkP+x>9KvTB;;x z>M0Tk_SSA9=n%lJTOGSocF|ccE8rnfG!vQAxGy893TKGH$Z;$fzp&0kTEv?|lpzpl zHeDS@VrOfuOBZYkf_X3|$AB_Jrrwlae|{LdFQ!9jI7L6on7J^og+K5}u6dJ~i~S3pd^(hy%>>p9>^A^mW+PgO#Fu#e9S1E(Ocl8H33OpV{wKy$ceMv75qh^0AQ z*Z-NOPx(COzFErX$6O|-vJSMu13mCy{zS&cXD`m|@owC9I$#v|Ug{;gp74fB ze51c~jO<7!^hGXb_K>lT{)-;);u6O1A=hMI4~`D1W66`ignpYTWqKkyJblfbDHExm z4Co6Z#u4K@l7$8q;cvek;A*?6XF1n}ewDSLN7F*^5MIqS*(A~S%m_qB=IPE23O`9b zGi2JOi>K2*ot?^d2-h|Afl1R~zqq(q=tcUL6HQ;_d+wt@zJfcpppqN-J!hDNos%&o z^JuK>`T1$;O90f2XE-xlSdcrriAJK!nK=vaKd z658gw%mjL@8b@I0cK7z{rAph}#uJdD5^;$DrGI@gS`55S5ju!t6m0rXuQ?rXE~7g{ zw%0MElnXhf14VoxJcjUmh#-s5amdsd_MP7YoK8zWV<}PugR^Fn`Kn zjC!BtzjQ9u-9PYF=Q>FBNog=>P2V@7HFl$2M%?Rg=Wqn?t10}2Fi6WwZ9=eU922|DwJ1~b^+ZKcHk2w5ZtrVi>W*)w-Bk(cgk?0C}4z9k#TgXO~T8N6YRntrr@O! zGJ3*{UUi6$I#dzHAoSYCK)iI%IYsDLv;q*g;S$5C-vku~&87^LGCFHhG-3ud#b8Vd z)94S}aHy&X7WGWymC8eu=(gZBLZ9B$;6(rll>G`iwiG)3^3K}$N}@&*Tah3bd0ty} z{M3abf}8oau+SsrICJ{<<&p3B05^HA9dzGwp{ssP!OAs+35{|q=R#?OzHn)ry)%$w zKxTxo$WkrJ;!SJvTejcf5u6KWCU|E|;Q>4z)UJLNq{5Vco=ZKAu0sbjVu0-p8VTG<}C-96sX!B{56*1iQd zCe&?&|H3ZqTjzfSzi_}vTNbC|oN$JDsdVWKvI8d~RYs7$*@+(6itHvgaLSL_+HmO&JLRA0ya6gX9Zit!E;e+-m9kMQ4{~e5^K7!3e*z!r3 zfrD)mlcR$?pMElQmYkWyjGsD$7Zex|cHkM$lG7#sKfrtM#_)#5Xxkx@z}xZlJY=}; zOC3a~!~vp5ecy}oLubIntS0(3^kN*l9dXj|X_X2UvtO>^iB=H-N9bgy2Ib1VhV zxYN-lsNE!(G-qlCSfOBWfgexri}&6;HcS6b7D4mk&~twizdd!lpi@)roZ zfiAYW&7*ZR);4BftzFCOz}Goz!37=Cp-*}%fAo@7UU0lhQKxi)6x|r-=y+O~vg33y z?$l@MYwDH&W5PUC#H_4E@dgOU#K=DB0QDLh@Sb;uYiZOxr=JWHL^^p+l$~V>7!2b+ z266^WOKH9-=)Q}3GlZcy@OdhNw0f}l)ZyiXIVl>*G-jbr?M+XIvoe>BU0z@2J45aT z3YMtxfC+m#kUA=;1TM(By12^M7{(As^h*)wM;(y5TcMBqMTPz9`+HKW2g*m@DYaTo19VyV{jEC;1kObCfSY7N-b^dLO2~a z#vPz_)KXXm0q&aG%F`yd^j+3)8XW7-RXj(lCpyD9ULu3wVZ$p;F{N*Ws`SopLgc_d zHmf0DG7DfMRJmM-MKssR*{SOnR_@2IZQPCZ$2bl8xWF~qEDe4BE3<htH~;*;T&3=FS(vx)#@1(_j6Zet!|~tW z`oZ|;Z%)T2kGtcCs|Vw9^I+VE=2gUTj8_Af;Wcdpkl2%b7Szk3EO$I(+?%FHcZ zV<^)$E|4x4iK|Dy4Rsk3qfEOu$JT^FRKBqlehrNH-ZeU^Fr+5$w$NP;25*(o2R{R` zSx3PV&`CG+WQ@#qp6{TTdO>~oq=8gE^RGNOUx*c0@;w-sG}3Cz3OeR@#<4T>>p;%z z7_D9>DDW^KO;x9S1DEJRvd8IhorS(#ER)0Ybua=OJ!zj4a?c>$Nbkl_;Cpj^&>BNC zK8%rP!Jw>^ap!2o=w8I|-zMr*x)FKdoBhnAd5-Mp^Nd8nVGLh?rvc;;;(4k6X_OAl z;R=a3&DB$>iyU_ieP}_ax-4x4_lzMPW85DCn`m2s%4g`xQpqBialX5I5Sx_%HMBxM zPQ4*8P6-FmQdGuRe|>L&8N#q9ctJ47@ZxV}CN6@P=-V#kPXFrchlcvCkGYV0Q%6<5 zw`Vcw<*qO5eV6Jqkm8)}4`vO1{_r_M+%n^0gwdukFrP2kmr`-r9OA~V0G<`BJO?fFg6 z)W>VagcmWI;N=nGO^y^LN)ebq0&)Jl;Y}J-oyxbL`M+u1DEaGa20jWt0wEiqoH0e* zrwm02zYJ@~J3~u*;&Hmi(qE9S_D8TR#d^QYgQ`7T$p^~B+4`nRJV`~Z7@(b zb7`HJH*E(_-ZmkPUaF#L^d6jben9xFDN|#;4;SNC$LaX#hpTQ!S_4uHU_nt1-os0b zOCWRK&Bf`vul`#K57>KcqOD4bQDq>?M+{0n(Wj0|D{|kpDDgbs$32_hu2ax;5OOV` zq#aHR9wJQnY`fD*C=M6T6mQbikr8Q9#>IviHWIe7Uo$W;uz-`6qbH;M3XV(iUvxry<=6Gg>Y4C>Yxw&d=k_zC9Ul z-hMp({M&yz{{7V-j{n)Klkq?KiJS3vHm}F89iNQNL_nvJk<0ria0)88p>S66-Cpky zEt4^E5!^%;UWB&MRPIzJE7xflPm*jjfeXtnDdyco#0-A!G=R{5#)e~9N3^(~JXq(E zept|?v)FqrWrLz=grkHTA=#`XhAjqAR_4{qqp=esVSWUse=GPkDHH!0!xYSvR;eF7 z5x2%5O>HddLvL{E@8QdX)Mu{G)RST1{I@a=500jhjIp5qknSq_;wm!QrG=ZEfmt6T zQAary?{hNXEBNE+!x`KezG~(u?vtSaFwAXUBm2R#vQ0Us?_?jqumeX7x-20>-E3!I zn*2OhhKQq5$!8MPnyvDm|#wubOW2LOug&i;DbEgdXyrZkXv&t^*Taxb zc|W+;M|vBb=QD9oZmz0}=-CDmfKK~XHE>=#1e^PeejVc2H22X&odK&(6MTd7=+(>O zwe~mtBM-=^@3L2i;d2~7e(vA^c3^y z7z8$56vif~#^t6$E|`-^f^?lCjOv5{%&wiKktARtf4+xN0*~vEPLV?>kCq1x7y8*$ zXRcpXshXe{k80Q0)((_5qQYEa1_~S=rWMlmOl8-&2EMbiiI+#8>vwmTItDsaUZ?h4Lvua zch24&BcJb=IgQO<#vwQh-Yya$-F^Oke0252`1$wW9skzVcgDZJ_Ji?1|LA!9{a;)k z|L)K4jKA>555|W(QC30kAH5vo!{CA3%{eOY9y&z^!rsM+t}^g2PHFmXpZArtHRaK{ z(q%*Lg={Zasq@UDXHp!tJhgZbAlqrVBs-!uj$apf! zQwPr>NWT+-=6UqT82V+0oFq`a8BuX1KB(8$zF3 zNuUo`7w6;r)GMw9w=tlhIlh$D71*4foYkgzmj1zOr^mqKm7O^SyyVV8C~(<1kdOZAIaj6?lGfnhG95tm%n)(15+k86wf=L|rC?bs|a1 zNC{8xOF0H~LE==3vAp#EIUX417RebS?B2aOMXhD98XnKZ!wNK$xWQczpx#uQy2J2c zqU|uJ^aNykZhGG_G@cH{MTW-OE#dN}q(9^xB}b$wI0N^c>Ua=xP}}OCzF;A&iNWhjA4=T!$?$ zW6Y)T@1h9S-+eJYID0pK{)_L8|JKgQ_#dr)XZ(lXxETMNKeayogFm%3{yV>XF#gm} z?8opOjJp6KT9TDg_x|nk*ePF4B?5oBQ67RMa7`R9Kgnp|x{mDRIXFZYPNQ*nJ%E=Q z+|(hRZQQpRfMBp`$dtbipTUDu)%ca+mx32%Q*<3K&0nzJ%$7_@XDL&LxwdYNG9w%# zHjVH;WcG>WW;~1{sORJ+!&)3D4gq66@lMBd+|w zCn?4%&YTK3H-^yA4P$NRKAA;RZ7S1c&*mS57wHO<6Um`rxUN_VBJ_{(;a~`;=o-O< zeKJI@2-x@GD%AV@AKLQSzM*q-V5K zeGSKHcew;`rkfK<#|SG&k9S;}6A}Hw;q4HEKGR|PmOiSFUFr&<;PU0`SAlJ%6Z8y0 z3TV&I&jS~@IcVX{(4Ud5(abZYPug7H3hwszg9CwD_##1)rHb?bC(@hj8l(mWyu$F4 z3-*g+hz6cR*O0V&8J*BXaBVAi0AHrhcnqz{KDvJOYadmAS=nayQ;t8msLc&-o574c zpaa^8{+seor&lQwV;?BfcToqyJl)kqSw8zVwLGB10AMT`D#iqHJLzsw2EdduCNOTT zJfhco%Dv^JBAiD&J3~Ie=?Fs)A+`>54U#Bn9?8~bqURuD?s5k@RfGzxJWhs_VAKc&p%}Z=%Hdf%IT4vvD21aByh(9OGGB z25vYISro1?T_c~s)LEeE3lAJTXaJW4N}AMcX1%O89+)Sd@@)D7hlWr@FMIJYDIVUj ze!~@E!zWatK2+_N7NOyizQH}8h|%OoSevX}nG!w_yxj$-SBcnTuEu7fk&o_=$G^UR zKK`q(9>&+6!#81K5z)1o>8X!E;J;4vlS&K^ad75eyBJs|?MP)aIXU#pWptWOAn^M{ zKbP0gfn!t;+U*v>XB1wZg){igr>L~iU1+nnzdc^P+U@!BXls1%YI_`ohVBO)#X*}^s)0__cTyDyrrHw(J2q+KXg`i$S`&zLngi{U*``W{}&n6E)5AF^AszZ|7Tk1;4RbofD6He^F(1|m25OVQ8S_uEy}Xil%f7lU8y zr@wUMS)xt`2E1!l4I~<5(8=y<;9+R&XYTuyTkj)3uQ4~wMF$IdMD6FN;kV<97{ZyB zM=bIysDme+o>BXBuF)R7I?pHf7@TF+1KjjFN19`2c#=+T7KkoQ$2r6LUwe=`J9H5} zHJ!ToNlTfl(+zXrCue0aP7ED%esLDOa6D(wz@S}S*n-=&1aI_I+SMJkHSh{-aK?_% zLHeQXt?rdDQ~VD2uk#il*@jKJI`}d~!l9Af5Pfra3_G(k_*s^rLlw1;p)-7>-qJOF zR;NB$_DnaEt>t_-L%&y77hUaP6-bv^=8qj^r$qTrf$uEAx2!DPjFC9zY#QFXGZr7d z0ka0Kr>2n#^~fk4RumbDk=fP2)tRH;srSX;T4(dX85He z(l930U0%chno1plnZ;`?MOqVKo7yWAJl5OMoa?-~MyC1R=rU~(X0s{xyR($Y0P@qZ z{ORzV4WD16HT5;Q~Ba9P=tJejl!o8Ostzri3llyz_{&OaK9Ive-x## z6}|i%V?VCX$2YcD$3J(lHhyvSaeR<6_R7AA!a2XeE5WblwVgV`PE$!zGHC95PB2yZ zq(7Y_lJA`_pq#%bGO#V!BaP4B=+P*mmnJ>S5cyXA2rl4Io0OQ8`q9DmIE+DmC1OuB zg~x4UdKu$=a$-W<qAKlmXOnV+b zk+lX4xkj-+uiBx^8zT>@e9;0w(IIF>nNpzA+A@o-fAN0K>OR<^%mpPjjEQ3wo}?h> z?vGe8ycL*DJ|v4fDTjZ-ryf}hSpk6zI$esO0+`5)A(wKtFM(Ig$`--VToxxQz);WO z1hPrTi3s;2hn@!yrA=0M_xDo&ZFuD}I(BpHAKG_32B%KL<2X4!Yx-Akh1X^2=)VPv z&J12~&LFtXd6+s&*7UA@kR3TaIgeqv3`~JD_p3Zwh3I;bf=_)t(LRSh(4lmOzRaM= zZ~=Mp&UE{CFszZY;DABrgp;f}S7OeokPdGaATU*4gIoAjzS+5a?uCZqId&wxk9V4K zc0cWMROpn>(8s&E1}6L#;-&n0%d<;{92z)AAAwg}-Mnabx4dO_h6&Z`m@7>l8TymHU0OJU7xmDK`i5W3IbicTp*D2Ve|>%R=BQiKs-F&m%V`6ibMIyT zm)W&-EnIRqqHAN{nq>{l6fhdqNgWiiN;_SL zc|}Myh^dRQLYsQJ?QT5A0Ms;IPE5ZQ1ozEoU<*uAfGlP#AX5 zno((asP$tIGi}WyFg`AYBkIL``dWpaGBrejfyWmWHqN4)F#uxn(v8vFgwUkQIHSH) zzWU!}>MrY{L{jcF5t)egaDOKZx83xK`iq0q2hKbe*vy>Y3k|j#y+lOH2qL3PlUWjp zRO#oul6p#H^(qd5w3{dfz9=Uzd@x328IgMDNx9sI_#0hY@0!#bW}<8?0jbhV89RR& zkMiDU<9)IZ-k?hZvGhCd|&h}VJTgBZZA z7(>?{?r+9UbivQA-i$xJb2EN@?|J;0?dS3J82^WO>2^k4i2zZ~Pvx!DXHYYJNMnU- zMguYEBEa$kq35KENFuBSTVx6`I zG2nL>7vuiybUcQ~AH%QDk==)Uu7vM8Kc>-S`j+24)9yw3@DG3P8{-EtEU?r)%<*aH z2tjqIiYq)@B{8JI)Ws77}S1>8xW zX(KgsE`ux- z>mB-r{x&883m9frF^<^t`y2!g zj52uz3nMrUZ_1?~KH*H8b`+%#yK*PYElMOG;Fm=LpW!2N>R^~!mp{g)SJM~rlII11 zol_OurJ5RA#*`5`J-z78jB`(m>3HSQ)?xgc95O)C#wj$Nq?BV%z=~(UO>Z0>>{J)g zwY5Px1v$?44OHo;Y%X4;@7W+Ja_w}bRQ~8Uhb)L!Xj>*Jh@z`!^v$TU4>B*oapYjn zo(3tW&*Z+ti@_lS?>qaT{=Ec097fIM#Aq za>A7kH~^?o1NAf2!b1*I?|W&1eU7DurQ{iG9;Ibq7jv_QMZ^lKrtb2u%XcW5Jfm@& ze^42O6%zv-(*0H!CML|=o3^7B`BL5#hDjin2*g`T+a;`tNnx}qbDc;P?PQn|n`)_J zT`}&6d3tgjcmiX7cPUR`WlWdRA4Vdy6`%;5u^x(mg6^`kzza_lLA|Q9DaEF4&x?M+ zU|dDIZ98paEZ*(hL_r2Y^d99JX6M0v<@G_B#%2a_R&rM)0}pncIy<@OBq|Z!Wjtl= z*l@#IT!x1*f#D*sg3-ns#`KN(H988u;E$rB7|;Rp!3jr%kfj$@HlmS({llF5!&5-}$~YUpuzE3megA3vwU5@v&)%MoKXtS|HZD%a&&9wUM4;Bwe)M03 z)fqD;oiSDu@hyj(jC-b7+rc99poBF{VWA^_&gK3$3!!z>@o^B8nc1WEzNuxy6447y z1%GjTIX;VVfB*hE#{FvCg&qvI)5}phFXHliynKq_ot}*YeR#SWAH|se#hWk2Um5Sm zzqN5X{<}ZBIsWF}#rV5Fvorq6*Eh$neQjs_k(V3eVD)M|oga^zI01Kg{`&n%4EcEs zd7`lx^Pb`Br#Nq6yr#;ZV?3_j8h69-$xLlPpYpN}DdzuhWRs@X@fT3ms&{MD)tb z4B{h><5;yYsC?$%| z=z?muDs%ns#Im$0Q*aRd`R3JOZ1BUKggLeXO6?0H|c-itIP$t_Q;@*M39C} zIH?YDj2AmZm$vC@xE0~5&pf2u2plpSQ~z-&Iqq<3UkV~nLs~lJ@znQlA0Ca&nlnIW z(`VqAZ3?QaX4)%GEeEqNgWcL3GkTrRl9Xmyf)BI44S&!Lw%OKi^{B(aX``pSWw4QI zXk&E{9AZiP3y>kS!H+YF7tF!)bX4l-1n=%~5UVRgJ2YpL%x0%^;R&40{)C?yR!_>1 za6k;zh~B$8D?G>Xk};hQ$>{RK+M4vw+$MV3JOD7vnf464caO-p%sCkE-@dCI=__Qz zt@F5UFGh+4n;@wJDfsYr7s3zNZOH$_pgcykHK1PRx zj;ZJCoaqTMQN)RCPEJlcZe>zVCy~|Q2DA!h;EmGb(RqOag(xDoImlHINk}mT;=&V! z1Xo&Q#0Wknm3p-c5!{#I?RGy+a3a%PD4iBUJk>o*5|rKs1^igNR>gk#)oVF|xC zVam%e0U>AL2_$B)4J(*WA~co~SzFr_W0(vqXfeIT)E$PpzvbNI_d>XwmJs%Z5fK9W zXq(|nnb|s(QBOBQD@rkSMv)WJD#~1k2SRM?TUic$;xH0se1jI?LtjyVNbaTRDDZEF zG4~VYau&*e!Qmv8dE`_IR}eRw})>Z+v}i{FR^F8UNBx z?2ivO9><3VoUSPR@apYFjCqv9&FAlG&}+mKxjmYc!x^Pa$qr)ZLvTp$Ola~O56g8Il;2EMCF zBI71}HVSO5_R=;SG$TBWLM|I|sXu>Uv;mfN#!YwUd1LGLjNgqCv<#FYCjVYYW8ZoE z&9_JC!$y=JT|~$aj=b?}ClS?6>WowD-SjE>(TA_1H|881hPz|xcrh@@Sgt18?;Hs8 zA;KqMo`J)h)F%3+tYrs$KRLIhEd3MbBz@MOz6G*+p%`jEeZ^Otd;RbG2Nq=v>xdLN zH=frIvfy>1@L}2AhhP0lqyOSAePui@FXk50o|zd(nQa$ST z40=cppP%0qcU?Z1>w=s*`s;K>9P-dzKRid*&!G<~YjP3Ym3!uGaq7_B1Vh0`E>|bg zqYOM4&C2&>H|=>I9vO4r;aYz>F*$VUaBOfL@FR24|INs()`4nSau>=r0~B3Ue#$xV zKHC<=zysJjneh6uP8laF;$0h|E~_!D2$IbOPggUg{y7O!c9Q1AvvB~@H>+M`U+4zW zJRC9NXm76-lZQEK+QzpRp(i}a%ykR)I)JM+;!^A8l*w|z&6U+nf%om(lhX7ca%jE* zPBP5({5-d`7I>kr;XVCe3+P_;k`qH1aM$55nda2>;$QpqU;T~FN)9lLT*_AIFB#RO ztiIOnU~Y)9sZxTis1aVCYA42Ekun=gI zkvHDYs8jaF`NK#dnAA6KzdHAr5Du(TPr^K19aU5WrbEK0a-GCNFs2H5PO%qXu2&BesaALAqcxaqQFhvIT;eL6vpE_o$5(8QvEkgFrZIO*@ z(?jJfglU6U0u^y9V|KgY)FZOOq~|dl95CZI_V<^@wQhe#GXXzk>UhPYw$i|kT0G7= zdhoVTnj&}>l2fz4A8$Q<YU{rO`2;*0a~XSS}#pWV6|e}3a){Dr$O#=rb>G;}uX!pC{9zXMNHGU?J!2Lh|{qa+)_v2OSI{)^M#)?#1l*rZR@5fqr`7XG> zj`4bVdo`}U`R#G@yMHh~OncA2_XlJ1JKr5&zdajA@86EKZ+(0G()!K#$mWF_ zBkQ8P?ktu%%owH+!P#Y&+K616v}zcUlTYN;ZDXL*IHw1Rdfp_0kMuNpBwu|KTk7QS zP!j^QT_C?!M<&N1O(MOrD`b$Q!tPnsf8= z@UZYo&C)aU>rrHg!@}U`2Sb&rtN7dhmwTOcP49@z1xGTL-bqk=MCM?X;ihBAB|4&+ zw6n^<;Gkox%X=+`KKiImRdn*p(Ao+rhPv>kj{=^D4spu<&8#N+a31uz`=12H?v$6h zIq(9Pccb5^d| zOW@?xypixCSDS{@Y!);N4PM7)@Z z0CGScBAC*X_8J!bX@fD%rFygkG*v~Y?8_Zkg6AsH)b$*NgqUYB?l+6cWoLm~27!@2 zi1&|>URFRb((O82=23XLM^Tc7I66E@P6oklYBO+YMcRm`s}FX^Gh^EeWh^7%Jk24M zxo*D`Z_@WJ!3o1qB+H?OI&-;C2@x!_nbGXXPJF?K_Tji_3aogFGecRJ2XLQ09X_s% zosz0-b)oOX=0JFEkxKx8EtF6plIwM0aBm*a3;j<2p>j$hol z9lx@5H~!4Z<@ne3@5aB8X!$R!oQ=P=i|S; zd!OH9{MDVC@n8A+=J@rOPvcL2Wqthd4|m4l&dPZCa<}_}uSPIeay|y<&%y6g=n;q` zWl{8zod*%X`T2M}jZrtdJd+5mr+xC`wX7yqku65R8?@+@IgB=Sj^R3! zY#C_=6JH+1$u(S9W2~>?c`|f`zx3j?6GbR6vciK{%3qR zzJ{&nCei*zB3kW`Cl1(oB7HnZ{^$m}t-1`}!=rG^xglGOns+_WD>va)N}kc$OJG4C z)L^Af^}s_XkOfvl4FVf8w?&?GgB=X@39aZAh7bI88iMt1;1ztr3%GqMKwr$DE$t>M z^t`|UZs8Pu4OghkrcC;eW^`DGMDiQX-N!qwUxq(TxMsLzH?M;ec#&yT25enc9S78; zLYV<`V8Bghzyr9a>~fR@2A0-FZ*nTrlYku@(=T@Eo5QkjmZIe=eYN?})PLqH89tj1 znLSb1hSIzN%4%&9yCL;%saYpKaySBqdN~(h;3zQUo{=%uKTGQ9$TzQ!icbc0>AZ-N z&8v~c!The#gT6O-d-Fl;=H|Fc0HN=Ddk1yw$hCp5 zhzK(!LDeI&!_ou{Aq}Dv>LNT+ln)UGpT;P*xA!Wf7!#5)rHH)a>FQrh{NW7T-CPx+ zU?iYY$@OSMKun3@c)U_z7_+ad6piFl-3X2O_jdNel(HFtiAByay;cTNW+8YCZZInD z>dDpU}0~v1b5ROEyH^>f`9()4N3K7zm660*>wcL?Q8Z;Ry~VF7S)tZ)4t5 zcsl-@W2xFr0y_Wz|MW>jK~&JC`D{o;SzaV^x(v?F<5fSt$o)lKq6D;gybDc@1*WWF zqQ}tx=`LsJ*|f1XQ!6E@&rs@z|g*VQ}b|Uu=cb~_8qW!OI+>T%TBm0T| zug0I)y&He|XLrV5{`%AScYbzt{I~!4o$+_R_B{TFfBJ**-}uG7@vnUNV*IsV{rdQo zKk{;XCBfZK|J2_2sh`*#FL$2CSKn-opZLaJX!1N>CL-T?o#;1$b)P<6hKAUn!xo|3 z3z0*d;R2Z(kMY#+-hMIeKmUB(eU|f!FUG_B@?bZt(3jIfNZ~ErDoxRZH4d73 z?Z-!NNukDMLAoEF(=$@q~IR%i)z9yYfZ8@PdOvGX~5NBwJnj6TX>4gOquKhb&Yc=Y(D^zmC|-9yPZHubj?&SC{167M(n^Gj%278 zS;RP1o|$@^j6?6_1GIOfxj1NxY+!ETd~o02r%&q0MfydM;Mw*+`c?nos1tjGW3nK~ zW3SH6PC7&d&PQZ21=9vunUVK0Z4Yk2&~^;z#82oJdKy9s=$-V22aU6HT%n$Q z0VjP|rzm{pS+wEY2)snvdepf{fs1|GkK;&2H`ccU<5hLM?RwF<`q=n;+TPx>etz05 zy71)mv;kT72nf8;Xs!jgnt_%v&&6uN&}{Wb-~7+2K!dwUoE>uw z;270Cf_Qa%RwQCrV@U*+ z@XYaxiCD}J@er#nQ6jINa;Z@#I6(|#V04HpVbZ}y*^8h?%3d^RJOy!#v+zPY?x$RY z&x9Qjy_A+Rgamxe5-b$i{$lG(eTH1PNDE~s3fHquaiv|tg&@Yxz2OQIG!s!rio!`m zeQAOaV&6B~kv+hSl*4k83R-}VV&DM4i*#k@PJ}*|?A)g=vys8qsAPd7bgP4vXS<{= zr}UMVPhiyf{(BAuhL@V3%8!7VI7n&EOb;hPG^S7L!r)=Z(g?rAAxuA)_q0tB@LIc~|{fT}Ff5VTH z6S9ny(ZqdoHab@bUc+mKA?oBU_`l2N(ny`6$Y$r=kpEm`u%wO2CpgqcvHOHWM#_+( zXjQgUCrotf9HaEt+yaUo9~fR_i0M0#ZTH6y4d#2p6QD)!(6J08maj9#(1p@6a&S1) z(FqiyC-slR>}4pOoSZh{fWf58WE<#2RItq}{-Dbv|JZ&6*|;jr9k<;KaSd!-Leh8yc8xKLb<4 zjDm^k(nOfcp%Ml?jg=qFv!I_}9=$9-$Q&1jz~Ag|=qrF@Z{RB+Ec%r|G)|>p!y0yT zR7Bx(9sGTVo7j=9@HTv&pB;Cufc-M`Pv+$C)#2Eslgn4z<2<_Jc;Vb}av-J-SMaIr zrE{+z_CP=fIA4`=3ChwjLdVlF<|2hV#~hH&i(VLYh5|7tlAD`3W8MyREf<0^v_V%r z35Z2B=WUc$2^(Xg^Hc`FC<($4iqr#EdObv*PDX^vOyvN|a2B3itI-al)|iIy1Q1pU z9|H%ylsTosd%Niv#Zw}QoeGgwFo0|J8pb?b{uX22C?d=w(qvS4uIfU#nHr17oEA#t zFg+$T*2e=R1p3Vg@xmBg*C;Rw0#g%QWnjS2NBAwU@X#@A1Rw6ixo<7eg=&9w=8gH4K!-jBy=SZmU&zx z>TnQX3Vbz@XEO`IyOiNfhjGvdTdV4Ia4Z$}O!F;_fAB%M$|As%$$XRo8)R7EA;$7N z2Gz@j8kH}`iXVa}OSmvCB|OKj(zn3zRD&qOk3tKM(@@j+CWkdr&4H~P=$L}zIZ7{` zh@DHFslT?&l`s?$J*|}coAj0mUA`z zdOSXl0snMH<)_CTN0)AY_I9Y_<3#FTjHmbSYs?=mb3Fq19NIlbF``$q5~1BnV4$dK zbRsMIvW$2g;KYm*CxKI+m2rB$^vHL(%4ZFB?$sesjZa_tyB(j;Xg7Z%N@MMFygm0_ zS6|vRbABh^_tTe^C>x5mI}_&1ZE#H4t5RB%V@W4SQ8ET6r)Pm9j${lN*`Px>qLjOx z-jrqN?BvK0YMLIRWGPQD<3*ojd^7bkklH*;gbI!lJVodc%Tn>Py~QcR$6I}gUReY# zc(U_7Qn#++WkwPlGxL@5sZJdx#5ouRidUwB>=a+=$Lw>Q+Vls6!0?e$kKVPbBtzI z5&-BD$ST*KfBEudVA<}bJT>s)HTuDGmgNfI(uD%9{G9ZcXQPvXrtn7e8U2<1Uv@}_ zUP>MGOzz>M>ErSibbb!;42ph1>YT(C-BMwG*A@qcIPM*w+iPe3PPvj1s@r}Ca z>>BQr?Y76!Gib$;q8AN?reyiY1Jl01S&Vv}#*_~Z*Z^(1kLGm_@=SLTHJq06;GUVJ z=owj#9UC~sfeP+qgXlxJ=M2s{AskzQBAYieF@}QYvRFD3{7((o$iViFY?)P6ChyJV z+lGSaaC(y-r|0Pa^P?`}#K=^goa8g`$_#Y4LwnJi97kDmb()(2XLt^-+0=A8H-gM+ z-5hf!A!cDjqkMu`-j&^+Lc&~ssG>eSxoAZcw^_!I5u$F!66I|Cz2h+H80GD=kwXxn z6TG>(=6!lksX7SeCD{dTF5wbkcOqF47kwOIF@7{tc+-n+ItYRZSn9(J>Yov0jAtq) zJTpxf#0FN3iGZ^X20jKgQ6+>l?Ug?1#}$t=Wncx-^joyp_HxaPPeu(9c})n70TB@w z_jd#>cBgGohWbD`nMlJcG;|U6&pjzN{Ast5d-{U; zFju|h;Ca5OYAuT3Y}lKCK-gZ9m+MVia0)&XbLN`Rc;CSVI4KenpeV34D-gnXt~;uD z%FJMdi$%&Y&zPV>ku9V5I&YaFhz_3|cCuSZ;RGBRHtf?W>!y{+%C=jg%9)*pAxz@2-$Z)r^lJ zF9(Gy0%d$vC2ii!Ukc#|hTh3(7bW4#FOTpSIV2hM3toc*dlPsT9|OzmWlPV<(R^W2 zobbrFWvMh9MNHpYuM9p>t1sjN%=or=P8srDeizvWPHlExOPo4rV@#w)mlKZZJV&Ez zeNG+twPUoQ38hQvccwJlMW1n!&%-m?Xy!t%LMvIZMzuLC5hl-5SAuoDM!@*7eJ|ir zS^5BbMz@$pNJOeF5 z1oZHE%2po+$HvgjEkb*N3dd4ZE(2=gv1S${gMvCMu(mgMqGLD4X`FmEinEGG*dp|y z6PuX{BGH-7w2{%PMD06)3vZdIT-*g;=-eUEFrsw-!NFWciT0dib^;&IIcIPbz6d}1 zEu&p#o9Q`*FW4pB`Dhx8mzByO6|(F}4`Ije|K0*pS56eBXqq=bwTQI|AGBZKsX92*NHEEsP_%9Ki!NsOO#^ZR$k zMJ6NWa+#38P$b&w>pT+~N(V94hc6Gy$e}~(o8vQH|95_N6^4K}Jir5cNPUPHgPdzJ z4f>3UMDK*$de$!Cg6FiwF|h=sli31GQoZuz;y!S7AX$LZr`GjKJ=%A|Y`q$Kny*R+26)G>X(8TuGr6WH8q zG|gZ~kniIq--VtxKl)-^|KPJOd%L&hJfF9pzZ*}9&L2eSx%Tir3L{au`m|RkGyLHb zJ*55udcFp3{Yd}wvj&3`zNphKf#h{fXHj`g2(WbO-b}sbj@JnVam|6dW>hiIgBIZd9&zk zG^7k8(&bQj4n8=YIN+a(>+AvnWX>Hp3SV@B7mjf3627y;S1yH1w1>~cCXfnEEl zOIvWKFLVWm-Dfvr3Vw(pL(W(>z{&+WTK#mCz{0r<3LsrS8DMMe50Cca2)&HVn$u)Q zJz3Xjq#2FQsfvz#VfZXfdSxkHohyOCi?{YmC-vhkeFs;Kb@>t9wCD(zN0-B4f2AD# zY)E$*tlZWG*_qV|4~8yu&q*9`li%RFa~aZaD--AdJc!OW(aV8rG3w?De1^SuH%V{838osGY|$+Vybc@gSnOUZ@_$zWu! z$Rn6x7|(EouCL=XR{w?{(kFKFn&U;z81d&2k|E-yxLVIE2(C99q-=x3cg!EqDtX=nauT#nU!I-TyuRg?NojyP85E`3NA#;lnDe7%ti}k zI_rKJ?=%oN8oegUHRecp>RTSKQXw3KKm=rBWLTr^G9X~xOP{r^ubhi2tdy$VNJnp_ zw*#N_sYyL0#Z^l>>PYeoYg3R{F%2L8mE4qwQ+dwEN8s! zyvD$TeU|q%G$ykNiB9~q7StBo&bH47(>5aK}g7)xW>M+B38Qn2b3^83H69NYg@rZnfL}QH*uaH!gF(sUGzQY=a`e!y${mX(aWRuS5%%( zq<`VBqG{sFfmWOjm$c)2Ad~$boCPj0aK>a6=rEP(BL~Yo0fvkoQ8x##8RQsv{id?e zrg}5a!B_iFuJavDWjrl8gKx{3(f%xsyvct{2TY$&-oJ|jXC91z(mo^{xzHx<*o{&k z#|~AGrOhSombQp90gp0AV1yI!-`qu~gduCY0>AsOzw*`b`l}y}i*uQUv)CyU!#BoP zzxMUOZ7byjw1GuEoD+LkkP$kTUhFEQz`{XbN1$7#_9S}BP%~TP*;V=jmIw(3gGCic zk5`};V=@pd)ph*E*|@zlrW%3HJ)XAh2dvp2W1lr{o`xd9A3Wr&HI@>|O@3`37N4S`{M%dSFUGD009Lk>9XMchauO0h)7 z2ozb6%fL9KUe4kT64cGU$}@dUJ2qWNH{z*EW$_SaWJ1{>V*8%DrJTGkemUmB&}n=2 zIn0-OEBxsLoKiGW>Y|mxADp0!l?TSECbI^A7%W0%f`wVgowQUXKxylTbdVXAQvAp# z0tmqQ?%Hzvs*`P^Wb#Noa)MU)!uYmw#q1rUi~{J4;Lw=xHH{xQDM>WExuh)Ou;dvG zZLUDwiYQ2Jmk!3MLbpc$xu^f?qWIvC-zV}$Cv8%89aad;UPpX)urtm+dNuA+mJOQ5 z$Zf_dxJf=6X)6Mc{ZU zE1xA^g|nFrgwl6Z3XbNSgkoTp!<4LE>U!!u_)yw)_27f$0dMd$pLe@>ZwX~Rmpd^8 zH{tpB`*DOtHGyjyc*qaU<7jc$_m9uUQ4}8WL~}+6AL7Y?QhuApoufC?@pJ3WDC^La z5pOCw^jaF*r2{8bDy2*}n6RZT%4y15+5tzL0h5g|k_f6!dA=X)?~ZNRpTNkmHirii zJFg&LWs0OJbEUjs;}PSTCjY{pRV!(xFzrO2EnnaShL#6^@ICd)G?MNq1HL*VfsY}p z10Vggzh{@b$X{UK9C)5C(4gV5IzOqSo1_F5eIw_U$JDQ0%ZJrVh8%YB)c?+JFl?6N zG`#5D6I~{Ur_e2Z>e_n-p7Q|@DQoDUkuvUtbQ|amJMq!SIUR_#jpt?-o?rF zP6hIAFOkbcuctO)y5ZQ#>@MaDFk5MaZVtaI37>>uurZ+zqHbqJ03vnynnj0upMfhZ47-0DlXIeH`V z!jTi`>9h%7LVQZmVH}GQqfGK_E8=Nd^Vs##>vPgx@dkU$>s4+yx}h{F38^3m7i~&&K!navJv^S}j7}*JyR?BrR0^>o zY({t*E=UX#(k7NSPGm1<%F6hbF>$GCGZ{tim;#)3)idrPa;9tuCT|c zA0`b~l^3QO@FWW6cLZ_yr!S)PrsK_wy^nFuZ)tN|wZciG$0NOlk%Li?WCu)Jfo2gx4vs6@%C5*ApJg`3T=; z^F>(AUzw3TW15o6+?~ z1CiF~EHWb_FoPimQy=sR|H~4RQM14)>!$UCGi_;;vr)auiAn=!F;X^^QWr<*%h}j* zDmY2t0=t9JW|-l6I{H)prS*L0bVOeX(jr^>PKHIX?xBgk>l7Lp2Jmin#>y+eWd=~4 zaca;Oc-w5)q{9pi;--Vuw0(Zd3Eo9-nMfuwg)2sGx#9tT&|{1}T+-+2q#GRg6JOx% z(jmX`hud&kya%6tH&q{C*dQ4m!edh?PK>z@vWI;OXB{NZsE;V-H@lXnZNw&8rc=$T z-w(g3y!8hT-8FPd2cwrsa_Y#xiMt$V2hW_)PR=8bVNCq$dCr5ZfxaWKy<@h*+s*8z zgt=FO9k$Uhx6>Z_Q5=PZQ9gbDMd%iNq{h?}6qJX&3K>~M?m>O^w>N9*uSt5Tpw~ED zXeJozxWAial)^L(cV~;HBN~W!2B5nj=(@A{R#szZ%t%i;28{Q}i0YseCBZWu|_dF)M`T z#7yI7Jb9ti(iR5jwvH8wnYz(0GurJ^)p4LOz3WawSmPeWCf&^dbXi3rk%PTNLLnH& zA=sFK(Uj(~AxP^8Oc;Z=X||_X{gi|V(WE4^WKA~FbMR7*1PY!oLz=6?W|-YqCkF&= zWfCwHg=ta>hMwEIuGK+;+irvux=`921d4Df(Yej2)S}}qkp>#`sJr<^+QIl8Up$P{ zv%5YXcGB0M{pz@hVrJwmX(MEj2YzVtJl-IJM0FFV=I0=mq;sD^e2^V;BKEV%mi3F0L zoWL}vph_g)$wO36PML^Gae?@GV^`YMBPgSYi}(=+t$=QNjf+a4>?R)|UtNHK(Ye)Ki}})G-~vK!DBxxTk({ zs04ZXi;n`&+`l3UF+enU$l1sbKGPmuGc!qo7Di=^ix`MJQXaO+31e^KELbQ=`pk05 z$?J0*cdmmEu7V6kcaHbB+MLlt;hhCZ`kV-_@A}9+_#lt;6&Sok2F&o<*QC-n0Txfg z5ZF2Aob6^O(;g$*O}e7rx{Nd>rMFEIq$}tj@+)0`ab~7>_&Vk445>B`6eq!9WhOja zofv>S+z|Z@chf+e^V@e0_C57RSnfzRsKkXx#6s!q+ZbFpCaX7b>hY)D2;m$4rc;$OZKl)Vks>qKW1H5Z0|Pf% zr%NAXJfmav1%0nBLW|V@{3!S_X^%6+Fo*8~Yy9D6O3~fhOl29aZ{p0PFLPP6`BCXh z+V(a|_6?(TeiY=uacl_Mk~Q#hLRO{3pJTMU9=3)GGo~{+XY^&v<1qlH^I?FkZN_lU zH19fb>Dw4T%)>Jb-=;191qYCj-c{gJM=qosA)R9`l!EBlJq8Maj74)8I1r-ht=$m0 z$Pj@}Pe-bLHPO;Tlx$U2kYfwnjYS#WGKZqeQBpU>P-E*|$BREq4@sDaz;R&C&ZF={ z5W;T4A7Y9mAe&N0aO=hAvN7pzT4s8=A{NZfyN2MV$kH}J$ABiRHRDlYhyvydAv(Dw z9jHRf9bqpVo zJDeFxbh9IqsH~ePMP8*VJL5NSJ_eriS3BeE!^80q!Hz7%$ftg7m(oQYisx_QyH+ar z^EdH0Bla&N1T;vQ8o+!vo85M=jrjdec}wS}Lm-OJJ=YfZn^F$1xCSTX(;AwT3(mn* z1r;!~(F=$^zW{Bv9hq44ph_V`DKB>%TA%WLx}Pq)?~B6vUz^die647B-CPBSI*W%k zv}niuRG^w!#tCDJDZh9o7w#fkpS;`|SCLgpCJ1SI9-YbKc5t`zy-&yJKJTI?4yFlp z=7z)wY^@FZt5=~ep6h-XcP38F@p?Sb`5uwaMxi+w5Jpm&>GZ}h#!%Ftz2LylXuVEn z`eCDw>JIb{PRNqyMMadJ6*J+e@;Gd9B=94CU})e;L^-pHQKpd@FA0)~T3f8QR;QWs z=|Y~jBre7z_bJ$}>WL0fSBKbY#h9T<2T`qqF>bcg*0Sv%Cu2_7qYDi!sAn3fxt1P2 z;Bh189N&HFj%#P9r<11&vF6fR@Y!q{S&32dzIOo>dU76|E1`{kqbJ8^xdH$_=m)w$ zUHXP!IN|UEZqAN_4g)_}Z#cU_kZ0^cV2%YE%naVt8+3vCniW!4J`EATlfE|Fp}v&k zh{#Y(r$f5|l&uJ!$N>AH{Oz4rEnfv^hHYY4WfP*4^?|d7-{@UVmOdJn?=VkfSau0s z4Iio(PYFce0Bm-UV)L-B(;r@Eqfam-iuR(4mS1ECnr_Y?$A)mu zce>9B6dl_Ro?IhrlmbQ&p_Zt*pbkn2qb>KL4$g@54;m6A2f9##vL%zbW;VoT4QN6UEDbqcF#K?N zmHr2oPV7rNXrpY`v8N%C3?FO#C*?4oNg0j*{~6gj73Kf=H^1dR*!w%r?O(5mP=@{)_k|V2E9zt^lLw$4$r@piPQ*ZTIo}(+= zqkK6rvSkbmo>zw6;dqk0Q?t71jr`#T^~*lcaXb5IJNl^Eqrin=`Y=hzktS*o7-dv!ftPUTVv`I;Gh$u*>pM?3@hnB!{a%CfK6s37wX`_o3h|LIxu``*S6@$ z{B{41@KO2n(Yz-#nZvuvF?2hIZ$;$6J3gal=|e*Y_*39OHybK>_wHQ-8M<7Gy|ec7 zHzQ$sF~8Cm&Ya+^!w-=GG+oZaNb{lV#D{k10Cu5)L}(`?Ak#53xzT3nCz$nbeLW5h zJCVL|R16j14Ne~WXz2Fv<*VEe%@%s9`-`NGhsd09c)C^~%c(Lfh(_oL=j!MVj44m2 z!fl!YGoi_s04Tzm>8I(D)|lnN_m&Aoxi>8pZ&4>qP_vhF895CC2(#6Im%2RRX$)LL z<4`<6O+$yN34L13y%?r2CyyjJPDl5{M7BbNJSi{25TVoo`X|5ZIhNs5FJWM$>uE-S z5h0hGfk=C{sl+UZt%6JMX-``wb%+fi*|Ip%Izr1(%<;7_Dr1rM2n}ya8w8y&G1M53 z0dTMD--&NvnvsC(&tunq^U9^ZD(#%t@ea*e6@7P2Wtin!6p}LDqt0{!O731)+~9i$PdSiV366No7&C z!T;wkx4X;TV_@rqvQo;4)(l&CAy-pvf7h|>b;1{1RRG;rei}J{p3`=U=DIGY?% zL&z5P=YbO9Nc~{1^haM7Pvo43XY~P4Kq{_s))5I$+(sE3$JuKdI=__pcHz{gO_pwSr|KRDAST|vRAZ`L`LTpd?(knidgLD5^5pT1ZV zCC2Gr;D#eQaTH?~o(9uijCzeu_{@9CO(tXT$iHku+bJz0hpFFquYTYeXl8i3ZE4`P z@fXKh-Jqv$I^m6BN$1F*IWcy3>-zEZtJ#duo?-_NTB29y9t6$?8)P6bkay0LXE;9` zsBQ=peB54&bT0~*DAAl8Q7IW`fI0fhY$W|d*BY*qu`4YC3!?$k(_$*pH5mf}1$2!S~da^m||mqO8zpiac^8QfJ6HnjE$n z97J-`V6z=@bU44S-@NHO4)RS$;rF>;1-&?jr|?7r&^+7O^Ymweu};p;Z~6}|oM>Sf z*u+Sg2z2E%b3Ewq?cIZ16Iezk=MSgC-BuhKbJV=sKx!Yn^d>%Vn6rWZIczh)K(}ej zX_q2!dv~`fZXVfk-7;lmZ0qXcy4UKd<#C&}Ak0dHsEUrlpioOdI%_h5SDa#)65ElR z9d`{ec~G8nOL=|axe!p}y#%pioP;aLYcv|9LT-&vkaQh}*4dV$pi+lAbr9^0Fm*CN zF=&Sta^K`Seday$eywSoyTU=tj8;t;#6<)Fb4gu2-g|S~Rf)u_?wD7EC4fiB6Dg-| z&c&Pr!@0Q$5jZmDIAGcswsfb~fLk)1!wU-fIr~sk}LJGQbAlwT+ys^tz9%{jB8ND>Y_$F{nWvsuH z6E#OrupMEx6Cj*<#$i_LRr+SW!FIfRn;}tTgnN3iQOqgFxgr#lwg{YZDE$}b!}E*r zSz7z-_3pTefIS8$6tJ=k4dI?ficn55P;tHeoLIb*^HK&LDWn?9bfBQ74o8h+jB=mP ze>JT6tSN0_pX$$-JfhrO{^pyANo2?nryj;8pUbmtw|(WfC+hH=I{SbAa#eM~jR|zd zeA^$0PMN}xXFV_` zz#qzt!?*9o{^xJ|f*Ea7Sl!+=_l^I`Y8>oGUh70tb_oii*lN_mC$dvotwIhS@Y8b4 zx*M*LsgylEK8*}sMM2GIhNJAQa`TSesNKp(tC^_ebNXzvX9%* zg)Y!9uuZ=le zufn$%0eLS-pl}BU&Zq3q z+-`UVC(`ILv)z?0I<&7I4}5(MZ5%Z?X;wS;Pv0M>H!@J>nXq4z4)A~uo%a;_qtj(* z3OBo6kDET7U!E17>f*p`8)G{i;u$)a-O(p}Bx_+97A?`f`Xzv%ckOMOw1R*`l&*?~ z7fcZ5T#MIhoK!R~U2KOPkNz0wJWXMMJC!1al!sT^K~%@`Aty{!&o18~LBye>X{1YS z7(z&H)FZ0Or;bB#$(~Nms5=Nr8yG`neO3)BW{UuU_0?rt0<@KKsa{%lw z4M&DLnq>&w1Wmgp+4Oa=dF&D{f}74pj^I)pgapxKK`@J~$BZt#wM`%4D%eW@!GsRe zxNmhrVhR`k(vsR0af62v=z7*LO{%Mqm^jx>hN?U%o1iJjoylZ==TCakkPCVgC+Ho- zQ4l;wP603cAj(G_RZLL~aMH~)Ql2wbK3gb)I(h2JU)Ry*o;T-Rr~NN>63M@bM;_7i&2Qcsj$voYFIq#Ppsh3;}d@(=yjG_u}woiE6 z-PW&c{tuq#xP9si0Gx-}pBbHeY1~t$b@ZC=c_uCNTK*tp2`}8Z?w22MTZgAduI1jB zqO-WvBR}xQ942u3mA~{MYCXSGJ4r87&weWY<(c33=HzH+d@6GnxL5HT`UdXJ+v~CY zgYS>6@MU+ltZb1nM#R|qbkb#bwqk%82Jm&LCeB8Oj#8fk57e-vjfe0qm<%(S=p>?L z(DmC27R$)?QjTh6lYs!A9A99L#r~HRo14}tJu#%$ei$|Ni|rKR>Mu7KZRgw^NrvOucEM`b&SnIq4O7g%1eHfo}AZz?L)0@y0v! zl9aj4Yd8{~fk%!HzMeWDdOET$bKIaVbm|5_fm@rE{ygI~vSbuLy?l94{x|u~oHBZN zbK5+xz@F!fy>koS;m@L3YfI6vC7NR5=lc)K}m0p8F+ukkqlN0Dw5sG5M&lO6LJaIff9EQy4rW zsV>Sy2RzrbOo(rS88-poWJbO1NME2aJ;P;0j#bi&f1ln6=3XiB$*Fyyz4#WK^vgHsm; zNYMHchbT_seR1UlJfR^bPH9Th6gELXJ3IsdrAO-P9@sIsZ7cUVX71x1ivI3tb)4t^A4G6|u(dMo0{4?) z5AcRZxEv^CM>zb2e#^h|T2CYT{!!YFN-$9 z<1N{C)e@xVVb3atcbv1@P$j(j{?*=i%PD6#g6kLIqpcL#_~f&({_bS#G!mec=WO#T z&V}Kd*|C}K?IgD}!$28+!uM8swEy|5Gm?K^j-(D5ps;?8Bx7}(cF}P!>YxPT`!4 zMHy4NutITq%Nu$#1WTu_4!JMdib9li!ha=gc(@aDE3eZqfrkvCFM~}+)el;)P2D>@ zh|I=lrf>9|NqgXQf9j{SlT+pn9B^WWwuoEiQk!_XDfr-y{HRA}qbm_2*PUFLKl2Gh z#$5*a%%F4V19)iXI{m{Co5;#gbYrH#+ekZj#FO*@orw1M$@RlmQr2573tl>s?%@!~ zVqV__&fpDwWEUszz#&ZFv*;SK8oJ9Sba`F+;d&?W20li&#w+dXBm0m?>qyZ<_)K;i&vEy5=&?ET;f+ItL9z3@OOIL{HM|l~J_QR{CRgl|FQZQTk{(HC{YFzbJhJ zT4~Gha}42z9l4A1CBT9cx*X54XZXdcDZ@sxtD0j=yjE8^P7j?04*gu}d3J6FKGSi6 zFT@a2#kRYnLE&*=Uk z9ZVY#OAYEgf*NxvbGWwfp4Qf8iW{?I0I*r-ef<73-wEP=8P%+G6Jm_}aRQX9(F@pR z^uQuY0l$qCcvK<<3Mbv1_{eu~O2<>~^-_Z$jEu?E)v^8j)*VatuCEsOYxEvt8)3ol z+XOiV8&rjdnYi1(A7_&0g&GCXmFmWnQPR^P-=WM*}JG2)Q zQPLN*rho~Y&1N_R+Ty*UWx7)QyJkqnL>|8x_wF?SzA!#MxMdvD+5LE^%Njdhe3z%&_HSd*$)txIX zH;>krYmX+_y@{fP|M2K);9bx4{W$e|-}%8f2>z|)5K%9kay-LO#9nq{kb2OKk;ji! z&vAUaFafZ~d{pD_8PBX?aKIM67=q`?MR7UEa z6Y128&pFenvd=)b{_XD1b>f{Hq^;28=y117S7psiY-RA~b944QKC`KmYyrcsej9vs z_$~U^>yH*JFT-87}>yvSXBQ6Kk(Jr>WmDzNjvZ$5HO|==BbCt zupy2a9F$fmudnc1I6`mHKK4?_bm)Solw~L#GKT0@e(RM$05vHC57kwv7hcIxvy*ry z%;=f&PH+k44&?8V0XOv;B_6Z^YfFy6UQZXsX=RC6qufR`($j4iRR{afqyR5 zJlq9BxUss^eGJ;xQQSu(1GvgJ?$KDxOa#oaO$D73(0S!%aSj7ak)eZOm&| z-f|%ftgHzy4-{jNwIr?=DA;?MlIUg;i|cb8;`CrTR+CQzA;g^r5N4GwU#8~PW4ODF zAx3?B=V0va9Tv%GgK6qDg+Um$8l`w;C47|P+1cD?u`g^0e!Ao*u=1#*iz;mOxKal~ zQ_eH01>a->jsT}Zf4aF$;4=ePip|n10%x5yf#D5yJdB`(h!woRq)n4$8sSFRE&}6q z&dw1aY!Mv9wHLumJz2L-8S%bNkkKDbMTknE!(95$Dd6-p-Nw;KxyyL7mfK)ceOb$i zLDlb}`SviAH7vWC={s+!LYrkuopaLVMa4-Rp74t0jrwXkN1K^=XEudqe1Ca;Ndt8n zYi$-JaHQ%mxvesJ_>4Pr&v8xTT(-f49|Az(U8K*SMxp#p`u6eO_P7oYnRplAIw=pw z;2VDwMJMn09sJa2I#U_O$Mnt2v@dBFFp>67#4AS9W^PEcbtY?;4`#Zr6fYHjX1K~KltHT`}|!TZqD*DHBb3d_%mu} z$hR2i^o0&+nm9bIoCDpLLq_1MvdpJUAii^OfEks`7%X#Pw!=F|i9R-K@FHJP)HW*d zG8xRSC!VpsTb~#n!#Vg_JM@)tX>Gw>lw0XTRv2?BeMY|yeXjRi-3z1U;AGNI*Ufn0 z8IkJLJ&Ya2Ss9CQV%HfxSsdTt(Ec6EV5Lq^;8F+Y+GIZ2Ir8bANLTdA*e>S=^>w0= zH{e)7yjEM7y)tfJ>X`y`;f(F7~PeWHRd5r!IR_ z;Dxzn+fv5OMwtQt__@DNv>ScY&0!7>rvafeV`PJp;`2W@RF_KWy6Ff`K47E_X@RK^ zrObNn!JVKHrGmUJ`RgqH=&`PJ2<);Jj6MW+NHjXC^0Sbo=`20fkj+4B1}fkY;o6W4 z?{aSU1OJ;3U*{PP%yi%kk?JSAB<(K<%){j!L%*`gchOI58eY*QJn4;s#^bHLVn6ME zN3RN$!FPRi-kn&lFT)Q3f0;q3vywi*rAz|{hE28pT^2;-9?C?SCs2)y2mmK;XS7cS zI1zOQV~E*0xab_ToZu!00`R#af;@%*(&JkjI~CT;i!^$16@Y_?tr$BIPY6*3nT9$s z>^{VlUe1|I+RcB7lG6bU$os1T${z&hR_I|e1wfJJvCL~$HAcng^`_D}9hxbbna(bd zF@6X(C+GwQOcZ1``U_l3*rt)dAd~USy(xblqjhq6URXE@XJ;mea8mMI{-)Px5}s&* zQJxG0k^0vO86kjf@T!-Qbd@{)#Ekh)@$tSfrnRb+6y?HjQ_PeLupaWC2`)G3$GKE_ zaBU4_sx5=7*U4k8a}zG9anE6l9xfTirY}dvFqd z__R%?dmXO0zh3+%7>StjEUKVsXC4P0z;{PGt^{^dhC z9cimJIloJ%e99ugLw>cq?=6~wu64ql_7RmVRKRk*e>w7-a+?R^nF`!!^(vcqaETPo z?QX%EcJ(`La_o-3@^X9{Ww$CS4V>#ax4(EdR{rpN<34ogW*pm_mH#U3v?ChC-}CKl zllMxi@bnTUzh!$mQ3|Y)Y5HlxG~wfna{{YV>d={)Og=?MGdsi~W&Ak*0t-%mv{9nC zMBMP3^XgiLj?(*aMsW7=seC%ey=TCo4fGEX?GGhSXE5H5NL?NLri@dMNXsTvbD}9n zq6#zR((mvl+DRodLiE`A*+u#yizd?(>IGkSiN<6B1OyEJf~rjwbF#=E95;O%JS<^? zOGXzB$*gT@&(ANb4>%agqM>@yvFa{EZ2DXN#%qqihZ21eUdL@C+mpWJ zXgVhPEVIVJksX0}UJY4_6HXZ&#G#Twet1k!7+ZiQwzf4(AD=bIO}TD7l=>RAhgU8VTp7xs z3-#X;LvslX0m)zq8aZ#uaw_ZK1$K4FLecw%Ip$_>b6$znM;;#OTGFkY4m)iqbk2CTz)5p5EIOI7$e^zj$F?c0AAkOqirZ90&uU zm>oh94tCT{k@9+Zn7mnxzyqf|FgHU-STGR935kgQK#_nZ9l;oLQc)sfhPkEsS&iLI@gIM4#R`bQgRXdq3h3z-Rh1w}dn+{kDt@U*uIv z6f-wk4?`)7*|cw25W-SwuJdY_o;c;l$h)WS1Xn-GsQE-Yo+Ky6q(tR0i%m_?T($w@ zZG<*kHsc7t7)!m9dB8tmKD$H}H*s9h32tQ$ylWuH3oX?#Cx zf)bREwCl>)-B=y3j&{e>-q!fh*6R2qk>|x`Xcc(y?GrasPWYkzG! z_3K;9eEApZKzPdH(;Cbki)Yjt9&F{Ib2=AI82EE84Z0UD5%Ow>&_iEbpY4&J(6-g( zKf;qbW=h`0z?n8%T-)IA1fh+5&6n7Kv9(MXb16Rxt2*YNBcCdAGiB$s9DRV|*GSH= z?hoGVjklC)c$R_RjO1*ZnRLXSjN7}J*je7O56LJIhP9o_$kz@FtA?P*o9(eIP17bHl#-O@3 zZPSTUuHhqkI;a0nJywiAfO0<1C1V4B9jn2nE;T!x`c1tDjrvBx@d4reE4BtJd z(PSf1bAf!}>r3dyUX+ z?$tTL6nU0cZQQDp65=^AEH>wnRz{%ZEc^a}BY*o*Uawxhnx0<%)T2!IP}g+&EahCI z(4Q-vIeU735ux7f`{xl3VKFPln8O)TPcgdryG=i&C8eDXb{R;{)N=@H8QXSYyt|nx z8Y!&`LdwE{df*AnaE%2CLYH@>Ka|*XjzE__1$O<{Ws$;s82$i1vjaR!9&PXCj>--g+Yn8>n<7MgQ7mf9!}w8~?G~qK@w+#i zuIE>}=n`e_bSMUQ^M+#XAyY7)2j9L*p7 zXgq)ShaKnKjGW3&;A2rAhY{SA5S}zv-Y7HlVx%U&PeWOylWX*XtPaJbFX+md-QSBd z8^hG>N5GOQrR*6xNKi*n74%rX+p>XC9TX@!Fj`%^pK@Dq0FRC=XWE^($@6sv;34=S z_uGkr=!EOQc5k*bBb;+O%A`KErY;H{+>EqSAEyj(IO(HVcNAy))zopETLzf{q04m2 zIK7nz&`$Pb{+g8tLtD{eQys$%eKlx0(RodIj-jMa;FGTK>e9Y4I(o!-{s*rQ=Wd$0 zHX}o+5&C6Dy!wRR#XDI-tH_RngT{G*_q5P5@Y8)Q@+|#=_tZUMisO}NU8-9I>Xosa z4p|c!gfv?mS$~b@^O$lt1Ey(&@wPcUwq7+SO5F~2Wf~(q6x|h=M76G0e#!@km^&#) zo;d*>^VYBJ=#JdygvxMuE1SNU>tIM?>Rm%2lXpbZ`qrV*7*`GrJ)>OrDhLe_%-$t$ z-W-m@!~NL1z#9138237vG~ThyK{RCL7w*b$N(62W8JdGw;Mw7T;0qjZDg#B2O7Wi_ zd(HY)9V7h zDKz2Y&6`*CzB*a2C-2{uindYr{sybmuur7mX)>cGgFVf^u}e&j+hgM-~@ zG5Gs0esXM<_va^CMkVEp0tgIzZ0GEBC@m^ zp4oVRGFE^8+he7C5u+4Y#SeZghg{}rm&pVAFf&9c5}=lJe27eFqmz%wX9_T&3`FG~ z2tig2QJ$0n7aVlSR!-NYEyKFu2^524aESQQZg{DVMJ~+Y786kCq)+J7UJU@c=Ysh+ zbPD6j$STD6PK?4!F{JwPoEIONz{Q9)@{S>=SMagElABIY4fJH}?Bq00h7$Qs;ogPs z%>$XnJ$2_9DQmc=n=Y=d+O`PLj)41nf{vT=ejSa#ZRucTEBr2tXEPd3PK|adG-KSx zCk!ii&Z6hQqVMowNRd37&x6=_cNr|@cFvi7%+(q-IPb<rSxIVbb8tP@z}b!kxZLv7@K?2Z9mNHC)O2dNnNC ziP#*g{CBHkBAM=ID5sP3$60aK}rq{)wL?9#7#+!<` zcoQ4>{VE>krX8ztQ0jysq)O+F6YTfB?Y!@$*V=K2{3(lxFsiy4EA26a#v{^XnG1tX z1srM0GZcy80`K#6ugl_sXaab0dRd}VjEy?9lfDsjuy1WkbFW6g;~f#$I2$m#^vEci z6t7@;2E*BtS^Ho4@XdJnaxQVGcbYHoBm;4AVH|%(3)gw}G7PvKrMRDZ8DN6ZR9lq5 zX#|$1N8xomD-BDv5z-pgTo(awTqxD{0d0bdDlFb*`Yn&x35jU~li1U_Gfl$?oEc7> zvNJ2dK?>kG&Bj-T`QY4`cfskiVFW3)&wn!@IXFOlF-`=zW0w(9AfS~Y7r250U!-d} z8V<^Y^9ZT<0laW9jY0%9eUk+`+}#=bhhAN~nER0j?t+l}I0{!E9*(o0`pUT2Pyf>1 zQ*h8(_NfEX!2jhRpmOy8C>21?E50tj{TGjkPl|KG+=u$fK;>c|?SGhK@tx_K#zWtG zhHvoVRHDvhN&ioHzdXR+Gxw1Q%7POHXki`xnKJkz{7Y8y8&zCy?ch;IK0ySB-mrJ^ zdT;!LpL#vs1+EuSJR7%HW9tvTJvL&jz-=u$TH`s&oqTuvGjc=btTRV5nHv2dgZR>L zn@COsobL9X`oPx}CFxK%R|!m=aUNbU)(GCdfQr!oBmCqKFL8qL8s$mm;IPqfWQ&2P zt7N^rm5(u(#WGnomA1a>l)!|)=mh#fWXTvV)0>4iEu)R^`f+g{J;7+CU3hn$lL8J= ztBj_H7*`P(og?c7N7^8(jG*ydzrAMl?H4D}1J^;y42tFkEOAf+7o(R)3NPNLi%s-e z(mN>BrSvgv$KjI77q#2h1Mcg1{6>e)(}jIW$y@lUs($1%|6Bn$Cqd)BO@@Cv7m>{ zFVYS?uw(4&#`;zuW?#3vR|sCexs9`vAj$9y+hNu{UM4|xR_Tx6kzC10Z5Tq%Ne6+O zt;U10J9zr^*c@|=W(xOS9EDUdE>Ex1Cu=OXYTzh?(=);lM$0o8LF!Q*ssXDBAG~Kck&%$% zz~IY%5t`0o^jrvwprYlw>)UZ~usaTqqyeYmu?7}g7~|sHWI3s+B5s_=i^m{#r8^9~ z>PA4(0%c;>W@uJsGfpTv=S-;!?l3z?L3*aqb8riOFmaSBX!%X)Nd+;i-K!vQfx+ZG z9yx{}9Op6o?ls*SSSFktF-lLpG2rpu8SvXU9bEEeYp6XCcZ!-(@_| z5>1!hvk1^Z=uFh9V}q6}oX9Xujxw#r(}oE>PHTgk1*p)hjqNywPl?`-4>rc9uXo2u zV0Z|=8p*?F+JzWGsHPq?pARRamp1f!8eGc2r(z58K2;tlz(cz++!t_p9UZ`8}V)9`jrO_dP{>DgD-lA6-3=lcAVK{>S$y1J86KA1>3s z($S}wZH#~Nb03ZWZ)DQuHfvEDE8qG47~lK2FT+syFp!_MUXZNsPa}4k$GB zp|j;yWToTU!Ff}&d6rXHgB5wP(MqGTK)`XJ#2uHB3!opR&M8}*{F%`yZc``YT%(=m zr`$zO&^K*#^QX#BV97m!2Kns$)EtbY-I+x?iyU6Zc~u_&xvoz|Z>~uLd)__zW~pCF z+XenOr6N=1On5^%x(|QOtXFW_enx3?NFpOL5~pXEbp|*ajGMX|zyvSk=`nJJyJ`T# z-?DsZqH{a!`i5oTg7LsJcZpgtHaR9!jC<=z-SFZR$&nYjl#a2SZ1?DBti`ay|zr~6X8#L!4AxI?AqcOm|LZa?n57ZF{oq{Z6T}1Kl*|b#38aN z8XDG~aGvgL6&!d%Yiq~psfWZM>v_v~;D(ZEdN6fKh4aSighVNb1n%NpO07951_oGNwvZ%jdx?B`bm|hN zTc1BYUJ6aB$_fJ;UIbSQWdN6sX_)<<=~u%m1VWm8Dz3-WWgAGZsND%((smg4HVS(> zXq4#02byzcL~#z{uMB(yu@%hAYwzYKH!kMWng2wNX7W;w_=v(bd}79|eqE$LKZ;C$ ze|KY?gnu8>=3^d|aq?EJ54|V2TMh?rmT3F{Ce$Ow9|MRf+7U%N7&tD#{>%-#b{5G!@hiM%D%pdbmQ_nmL zPns89ef+Z@jsN@4ycx$a=E2lhW!V4GH^&Rxy@sZGL@rDQG9;ue8+C}fq!-DJv?GT> zS{#pRqe{E9qi`6Iq@P1` z_o9MJzq{xUb#e@$m2R56u$4mODD6vSKU*#xyagck=+jR=tA1y%ys?r~V0@pGz_GTB zmxJ1jOYj@eOE38L47p|a!OUS`$o+P7XVlbDhb86MHhec>vV>oqm!q@ z$2vz2*}l#<$`fQDRh54}{OGHx3txve!8e0$8)i1^@aR?Gk=a1I)R(#iO8CDi{glt8 zJin1zQF`hYcCk(geb+3(glz_T;fJPb1Hk^_eh57oK5kWo+bikjMloTMT*pYMqn-z* zn=h4^lRJ#Nn}lVCquJKmQSh%4)oyJvj$wk_hgkI!L|&=2oeU2LL*!>-9idaM@9++V z0nSUxr4leV0?!lzb&2LNde?G?prYRsEroY7%>>`aJRobe)E^UF@8Tdf3Xst{NnAn{(t|YvDLQ}Mj7O}7wK!a zhYhVJOjdPlMk%MS`6)dpV&wps6vsiHnW1T1L^TfM-0;)LQHCzClDE!piGu(S(!iSd z;Ap=?TQzJHXA*U=ZLa?&f*m~fIS z^Hgx#q@QbWV-8K%UN2@yZ*JMJW;zlaC(m0Aq;G4HpQ-p68`0%-mcbx0HuNwJRsal; zc{D7;VduygPK@)GKK9L_<-Is?aj*eYS~iP;Rx`+Gy`_WxL?$Eqe#j8Ke|KJkrY-bV zzt_4teAi!nG^`?muk(?%U#KI8tjEl92Jg`&htU&W8Y7!XA8>L+sk=M-)zfrIjcM?Y zR-BI-?VOCh{YE%u#>cIJmtFDtTLCNHsm_l6l9^seMxMoowtig??q_zS@At@e8{AaS zru2R6lf6U~C~DKeM(8=TU!DtW`Yh@XLUNt+F!ePZM@NI>;yic@o%eSSYOA!#DLIYv zcaUp`k%3^mJY*Q;@YSoz4MX)9XPmvX7tO8L5C_k-i_x#uqrn-cqC0Np&3 z( z!gQUnxG>-d&7-L>_DF|Pb!Sg@W!ZVy53+!q=LD*& zMN+0I2Q4`iA=?jrarnu9v7f9^$YIM;NTMZ@VzWt$O^R%`y1TjxK+ba}0Dr%43*Y1;94+JV8eoA>aZ;kllb z=Wlf8KL7BAQpg!#hqURVf9HSlB5&D;NALdG|L}XuzdQ(EK0hpPtWWuW{%m>u{%qN1TN#7EOW-cc zi-@XE1?b{}ESDX>>0-@N6Ef(e0m26Kb{9mxP|czjt8A}hN}J|wtx0jvhccst;Anrh zI8eu)pSHi@NCRub$(Y%3l_`C(V^uEp*>-ageSkBj`_RY{M8)#il&~kGR0a_xH9m{ETAhtt*&n$(P8`>_qYG9tRsRSFV3We*7ce$C~D5syFpVywyC8M6o&Dib8K*iW}YVd3=_7VBO zAG?TJ<%m2QOyoNJ>~l|im^LO5Oagj*R6318_0bdnJT-2?7JozU;F$FY)&M;xoAyZI z*QO(0mU1X&8lZQwpbQ{?E2lH~C{5b9i6ciV$B)nEcDqOqurZ?WaX)~l z9L8B|-y3jtFKpTu0b6j{3@bE}<>;4n3D$jxknd*|FokD=`Ps-bdd5(V2aJ=Ctc3H? zcdIOUs}OvOEUg%IM9x)c`}lTm`R;d4maF|_oB8{5qUtU}BVX%rdEV%rE9hkYZxF7FOf)ii_}^vY?85glwYK9QHfyLapZFr*4-qe)Q$Gg!(x*OF zuYB(9`!RfNzuc#Z-k@vrg@^f_^&~_~{Q7D6?!WnZ`Ded-xO|Mxy}rCy*8ccs%ky9V za@hvxAx>WcIgb~{-80i&!9;oJn?fEMW!@DJT^k$v@~G@qd|{Zywj7W18KU^SPD z53Pd4R;_{YbVYXD|^JqO_lrwa2-S1PzE&)1%Dtuy zdrm`el4EyVFibRl7=cC^YpcGwj$WXxHkGloiV>ceL+@jxBMT0R9F);rE6}n&Ova06 z%HW-5NYZ6HvKO2&h1B&yR?|l>?Dtpg?euOs4oB-SP5rKC?8@96c>{&$1D&BSWKDJ! z*^#otZKmW+u7VOy*fjJ`!eL@iMC*)Hk5JtiQm z*K*{xU&t6lS+*c3%YrwKT$J5a)C3vkQJS9nODRK0^dJHY7_5)uUKs(QTGe@F6?>zh z$eEigL=#-%}!>m#i~< zF!xTV_O}B@XRP1G!J^OnJD?;iB``|E;a!DRLKDRvBtQ`aptskBK4a!wR6o#$7W`#A ztJTh?PmU9*?;RY}K{C`%%83c`X zKu$~Z=<_dQ)Oo&Xh2n=j=A$zGp6m-!{bn3$@6L}gLfL29iGXeI0W@i8uIbO0*Xll> zjnBx-{V&(Ft^6LD^+jP4jmy+k8Q7k;Z|l^<`&PqpnTp0!zEcx>%x4b=+snWIhue({o-uy)1HSmYnOzTR_c*wNZV< z<6O^C(ziO&@Hs>_I1@0-;LX1ZX5X+K{{VPC!{0jTv|;M*GIoIlJ-jOXk-;>m3%&ME z19<(e9xuvHRH~dTXN`&~6}zO{7~R9SqMw{Ax=oLM@e*5li^??l;8$SmVvA5Gj4SZrx z>?&;~a9XcptkV_greQ|J3D6s*ZGr^^EsN&E)#FQ7<@3(%5LyKE?)Wef*vx?#9B1tW zA`bZa8ew_oetW`aultH}bFM#JoHsCXLKq?9VNcVG5hzK0{CM7HsZ;zOb`wRQ zKzPSsJ*mnb#~M#QH*SEr>m;p4d{Lr|Ycu1g{Dj<~M*bTQhAt7TgJme7zeSR>c47{% z8O9IL>ci(d7J)?51c1$4LkmDY+TV^doy-nEcuJp45KrOx?ag%>Oi&i^@&a2^A(%jm zqxUF8Mr_WJyev=xh!kHIM?XZ(oJDSEhE)KGKAH!lmogD;pm{Itygk`pUPoS!>G6Jd zYq|K{}aGTrqjXRSmiz?r0U{`JB{3iyNv z1A4uogRP_QTZThhfUru_f05~X`@^sKpSJp`pLw?(j@_#_`M~$&kaj3WUM_V#_abn4 z3>_T&bH+)}fB6r-S^g;UT8lk=_2>U-`RXtKX1TwZgEUl=fGeHc;ouWw_u!!1@2OZ$ zRm5NYO?6GV*S0i3EQ_I!g7fHJK$Nof;V-#|4b#opO7yB#z14kJfHC)4jM=efY}S=v z`bs}!-KM$fq;iGb6z+eHPB@EVV?6<3cuc1r1CXE4lYRlT>noQC4Q2s(RWW@{>6%`~ zK11VF7h^xc0jrR06QyUIq_qKpV)s=?^AE1o@l=w6O}7U{e=QhmWfuK_yfWk5BO$%h~zaa(;dh8Ce7H zRE(hC`~n(wsN{n?4tifzWgzkU6q9|ijbs!HL}i||3^L{P~O zsI2x3g0yAa=F4G>Y7)#b9&5bHj@(a2RApATHpK<7aCy5d{lAj~1bqHRs#`Ye*!KogR&4TfM(ZN;}1o^fx)VS6W`D}wl%dRN;6>VqBSZry*TJA!q`o=x-=Zj`=Zk=d zXb_{71j_s2Z*LHZttI$)2q9VV>cOZomYfmJmWmCyZ8N(a~N2NuZGHZ*QVp0Ri`*iJ}QwI$e=#7(6GftXczTbRGo{e=F-3;1LNM z-xy?+I-iC5^nvz(T@;)#HIojIcfCQ|R+6$(KVV2Eq51Xp=JJ%!??Us{$?o#;oBfvT zAHu_DiAR)Swwnb6Gp4@Nmdrh-p1;|gzN5pTS$0+T%i&U_TtmaX0PT!xK)&?l?{WOY zPvhd~M;==0$nyO-{~!L&TZEJ$&nxs8|Lkb+p1$yhOvo9?KBb+<^uf75YHnVt+C z$%e+k&epJI(&>Y9Cc_2=p@jaHfBCEDJ^=pQ^P{g%!lwyb>m-9w@D#PRB6yR(f8blL zaW6IU=1g2?>s24wT|FhD{LC*f?(q1ippPET(cWnM3v@Lh*h}y)bL={(CPI2=L-2X@ z)8DkwH=GBDYUhmX5N$hYha*D|K9eiVl}u@Fs%V@ndV|$zyFAJ7tsJHgx%IP>Xuc=J(v;7QBDpJ~eakRy%S>$GM zpXah$T+DwuHIWY)pAl?q7M|ccw6h`W!mKT#^IhbaHm&#SG3fdI;OMw?td75}jzk1_ z+J(lEiM2=)=0+XWjxBY2Q(;fVody+j`x7+f*NbQFP^c zAeK+^@ypyp1TU&lnlfN>$+nAKC-Q;VT?~?c(Xp1vjDhp*x!PeG<+T`a1U-(z81hM- zy9F(Mgks*a4nWys=6W!eVl}c(8$hTIC^Q0rmPrC$o}+K~0oxH7e){xk`4IY|13j_M zyn3B%u5HOO*NB+4W9LCwnLZhWF`HFZK>!p32ue_W&Tqy;2^j(fQjVl7cn9+U8s#NJ zQ!X-AjHz)XOHmo16zHZPlsp7SzMQ6oaS419EO{v>QGOA=dBDBhjYQmW?%}lsa&ve` zKqoo@^F`str2;Th0#jlF0?LRJ6h%`6l7K^MT;)}|n>jiC%{>VKDSvo}CuuhPkMX2k zGS4SVd*N?e8xC=D+*x<&W0WuD$L*{qtq*>bB*@ zuMz+p?RiFmp|R)aRQ`}naSVX4QzGFtyRo3D8I6D~|Il7Xn$K;M^k5D;nx^XDwA1x6 zu@$;X?}bF2uIoEs90$n$**1GvPZrz5wbF=b0aycj=n>n-dk-H{N$i11!KT`L2rs~e z{;qb<%jYV9rqEWw5xc0E)4vlHVT%%}Aiw42D3_<#2@E+*O35+cg{lR|U&kC9V&ilS z{`G-N`Q7~Rs2#cjz^xR7zR+e`{5}~Qz28qTfsgDYjI3T)LDJU9J35iil%RbtgOur8 zv*fIDbblfLr_dKnlwD)|j>pD^UoF$pt6V7vW4CbC6f7C2*hrED+-!~=kv&Je7DrV1 z=6-Vd@cw;lGP)4^;_wj3Hm4dPI1-eMGv!nTH1yR0F?;}>;7mE4NWcH?VN?9fQL@^d z=#KsfM;pWkBXFdFZrY^x4J;xX^YZxN`-ri!lo&KI44r(2AVJb8VB_S-5?yq*xEp$I zZ?2Z>tBat=R0i3b3c<&ZAC~XGe_vgb+*A(}XzFWK0fJ-n7><$we52@5KRb!`MK9uy zB46LDKEW2#-0Zf&BcXE!*H(^(=$3IXDG>`HGEBAoS&4oV(q4qdn1O8+`Z&P4o9i&8 znSB~20A=cn>)Yk#fk9IY+2fQ`DT)mG=&cf%tbejo##*C^BKzAsn@8h`D9x+b&mMU% z8(TXWj1zm}#G@<#NHz;}IAdqS6IxgIN5BL_$c?u06M-0Ib)sxj+g_B2(o+P#kJ2#} zi_$`SeplKNxe`^*kYpvmf?_ek?eKY~ z7rttA2T*0iUHg!K1kdp7T0jwc9un}}Twmq8M7U9M?Npu#)&K~jGYHB?`HfAVWQAu{ z*o?JNYWfrCQ6!6>*76+(f;WtC7EHZb_5#clh~jbXWW$k@YC)s&m~j9rGF4VWn}Dfj z1cW!ExSSo3fv;160seGx9G*p><77B?>nrFNB-Mr>2tM{WUgH6d!h zqw9}d2$3YfDn;VZBqZ@lD$U2|~YIo=t2g2L)XbcPP#kMX;G)FtGF;KVQaC56 zy$SS9Nk+%Qhw5@@@_8KvY|QVU$Sic|-#ARK!4+jxHpP~s(rMFp0?RL_#i<%f6yONj zn(0P3SJ&5VOE$>P@VMDva8qC?!!;-i?bvcl?HM=cEqI1&?7q*+4-TSTAD*kw$_zWn zhaB+W+^nZi5hK6R#nHhD9?a>(R{Whgf4pl888}|!i!58!1^i^f?^Ku=U zY_X1(G^jiGNb88Ti+95xG}3AEO_#+)C&X#nv=KeyRN<#8CEu~4i9>V~dIV0iLC~gj zZ#>p!sNSkZamsc#sm?w(*b3dDNfm%y!Oux_mDOV>8LMeU{3*Uwx9MtZFc|YH_UZbW zCQ4wa?J#RTw1|>2k#Li(h{ zqR5snL$eEweGc3(*B-#hb)CbHky989XC$5fb5CAMjNhMMEgQM^Fg)Tkl>m(sz;a}q zCk5@?b=tvO3gUP~8597}cAuIQ8Q=#YDpAU&(?rWh=x3;$se@}S!$0jiTCoe1Lcgd} z-vWc4yBzQ!M{@Hyg?7zmlu6bER66{~tTGSZoNM4zyBbr#1+To&rGlZa0=RqgS(p)? zJT*93ot~Gt>xGv9HeeYV$)y07e?ZIBP`A=VFU-?fRj;(g(RhhmRe`!SG53(kYXM%` z1USal659(MZ7*Z{oFMDzw~m*a-+Q-Q|Mu~68@W6)L?D~%l>7@^p?me349ly`FxdGk z1ECkjNVj~Sm*pwZvwo&V|x=XY66bj|N+ zv|XkNm50w6=Y4GQ-~W&QVEN~7*O$j{zhB<`*v>1E9Z=hohg8;aHHyBvWQF5ZDV3Hift~?Ek{`X-X zkqO2$(+%%ePZ!Kk6C}Jz&>P#`Qf5v|;kT(EP6^MH<2lilNbnQT16)~5%k}*{HWFuN z+;qx09q19Tavtk(8u;0OIP`aYOZv1r+qj%%!Dg&VZiXD2WP8@OSgmXiGqoUY0KOvU~mSWC)u3G5>2VcQCrFXQxiSye`qGY<3jCxP7g3gC) z>7UP7Yo!-W^i(3{k=4dW-_m*IEs-Rv9cL~ifUp+8`&YmIwocQ@bSg5Y`N)#qchP6C z1a>-BodY(V%Q?e1JHH$>gc^PK;bwxV@R)zHF3X}xyrgGgOY|>1PrGFQ@%`Cy7d?`| zNJIr*bU=Usi(h@kHzlb}d$FGKEgD`sv_50Cc3~C;3f3~*3>GG~svI5aV$b{zW7#x& ziY)>_ESpG48C{u{&?cOm+8PX?1035QqMt@O23mwc|1M^$^I&(jQn>pkz*I$QOmuDisiPX)RS)k_> zH%G6E{(M(O&p*y&wuJ5E*v$QR9k0$>R2IONJGMmk$%02HhC**FpM zf@|I~IM;NXk+EwSsAw4o%77RV+Py-4UXvAT|5bGS3kQj2)mS2WBcdyg$ zS{!7!1fEu4UA@&wJ}r3VJs`@-TXiLT+?(cHK;{uB<9Jv^clHez0eK1E#2C#Mwm8h@T`= zrE@k;N9h>b_Ql!7`48A8Tf`$-xqzV0wI|MU(t^8;J?%b+=9bH2m-wR^V9F0>%$X8w zJbVZV)==n2#lae)>K}rFn|`q8>+8$G&G2BHJO0v{@GqEQd*Q?Vr&i(GciP4)RjY%e z{o)+`vym@d~*O#M{<5`OlJ>?fNky-+N2U2Gm2eoMa$ugH9;tVDupI0k z>8@oEMBm@rrV!;xq;3VZg9kW&%HwU|UBaI>ICX>~+G%Z0s&Wz?qp=mID;fh@xC{Ik z5xOw&>f*9akinvd;hT%UwaqQ}>vghD_sUwqm`q`|0uk;P5p0y3!aIyYG=@oz;{=8n z??o`(8=vuuxH&jkw5$VNf`S`W1KZp%F@5XLSpk}05F*AdSfmJPo6%8ZJ#2)2duByy z`s|5tAy@lffAe;E`}Ikk)6AU$^D&Hz^YhT}X;utO77+jpAUBH3-!y^41=#tZz~Guu zuz;)bIH1n&1>(>w%b<|j5FIN|HfyF}xoP~Lw*+G zk+H~DW~FcCS7mzh_9_Nz`5v5IWeh*R|FZm9`0%qZ>o*zuU5Mf^r{v8kjbqVQL*`uP zcP^VQ16&ekfUfiE|2g?yK7rThQm>r0!UKG0v)YRu_2bn$|66_j!@qnU*vs2@FZY$k zDcfHD=xF()|K%Sq|MyYs@Gt*n+5X92EuX$S4G(1G7N#YD&zMblS%uD7aayuz`apH_ zUx9QxU^nFEf4oBEqv8hqIsDmnsZ<`>#mu^(Dm6`^Gl;A27AAT`(hi>eK<9NMMId891<$6#`}bjH6G08RaJd zWopj`7oaHbTG+(_t_j_U}&3!%DgVUogg?ml>ZyY_LeQETg%#^E zkR^d=!Vr7(H{0QuRrekfcsXaFRms>P8{~U0 z(zi6nD8u}g<#WBY0+eV1n-~H`PTLmw+4l}eIe2RjfJJmHFKY@!;G8Mr2q*xsw0C>0 zH2staY->4rcf9N#?j=x~Y=UEDAOH#2S$#h|HOJ)H$Z5AzDT1Mia<*zwh$M;KuI~fol1E`6@Gj{WcoVv$pdP1HOI{6y6k}(hvNjHlR4GdUc z^WYg#X|Buow8`<!DNgr3(ej7$+vPfKT!#Mt969_|fd4;jyjs3Ze^M&YTAudx z6I%S+0U2#*pq{$w7MC-Y{!XtxJygW z$$A8ChmjSqwf@6mInyuma3vvf>@GM!z{sj+*?q16aN6`GC)cSPv1rH4$6{E7jw}bnavubu_*z_(a~|U zG^@znqY4AJR8oL_J_=hRo7i8dN*flY1{X2{!Ck95wdpvBVAxGE{;80t*u?3@X)680 z8e4y7SB^kN`PZ-_-^d8##vV+;2xhJ4nfYTf$FEhzhi%)vqUvWHoACkN+LpiBs7f3e zvRU_e)p>FD1{CUdHXKWy>tI;;hW5te5W#dlbgDaa9|JUv?M>>)F|;qQGDTykv1F4xihJ}W=^G}psDNz+Uj&>0m6KCJiA z*U(|72z-rx4P)(hzfJ=r*D^%7%dt^>0&9kq?>LdYt&QdFaf~8kk}+~H+OW4>c0l>B zV-UBu3^yZvQ2@gCh}7=44WJa9a{v8JGQ~t?jP!SZ@4sDs{~!J1D4$aHF!ectmD$Zg zuGu!Ro@njh3_WNZ!&daV0nJ~Pg$ND=D|2?C? ziDiOj=gF&*G_#HTSNh5k1LZpLIBf?^aYB`Gz*xYws;H=#0iKAt!NZKfbj=V=PUJxT zvJq1#oU8*sTu>W~qmk{J>+G``B1^P4SdSWT6!>c#etNaan2e0veJ-d4bZA426EK$q zD1sr=Hvrpe|31z-{O>~7TnCH7(bC$Cu;Uj%&zc`j7pUt)CJ*$6`()k*w5q;xw(R`7DrvFTo;jiimz$ zwxB?T1MS9!Pu2U3M{>s&1pU?!S?Jv01inUdURu2>N_*_8|eO3LqIJ;OrzW>l* z&gUu#^lw$^G%hj~Fx^Ei=D=@muJYYD@v*q{u|Ajxs#F{WQ7W?NgPo=c4iYHh1^aK6 zII`Q_kIzVOoB;;2CQvXQRbP59p;DzbgUT^n1K;hp1nZwE6Af*7B=^ym!A^ z-bem-x#mgv&l3>Un4&Yv<$?`CD0>0iwpjsv6%8~*5Ow0YGK6`DwgSO`L8~{u@87-~ zzKnBNecn+Ob4(#HdymCE|NY-t{^YlJmOuHEpDfS+?kCIscORCW*u+}ew;0uR_(!+# zPUf&5(5HLsk4@4Cl`T^plaY^;^0{>;#O2uu+CWRo>KPY@O$GwXmHlv-WUG>czj(v> z172I4=m^lGFC00WA=lhqeFnO8Ri?YMGb^RpeKXy(9KFUrL2Mm+bX!#dmaDRZ@TZcYd4%k*rt}-uryU)QVzs z)T;NjmWapS)ZBR`E!Gvs;7Brtf@?pY#Rh4OWs%a5r?_TwZhmr2t@W z?`RfB2G98=Ygl&n4&ozs6G)KRR8*Rr1a75fUgo}^Y0EiWBYVy%^p9i z-RX_;n$?YdA48jk6bwrgV}A4aV0YO}25=uGmrXlH_VX7}ypq7Dp8M_)n`T{kb3{FQ zFbz3oa68#nkDCocs(85v^BknJv$xy%B75B#2jKuJi|o3vEF(JoaNZ{xiK5(RQH4Om z)B>hsvDKqDz^x~(8QxMTJt2w~XZt8(?Incuv z9KXwW(^UEFI^a3;Tub-(?(TUaT?|jQ9(hH>t60sPBTwMO_$j^iIAq2o8b>2#HyYgXXuMXqEmNYa0+643{S zVdn)M^Mo|>UGzM|$h~17`t!?EK<^=d{2{vf*J<~!)?Y1unfI?=e_l>={X+oWKKd!Q zuXRrUG2`zEa=HIG4n_VEQ|v=;l9$Et=bZq6!$BKhPyHVN;ur+hrmM(SX1x11Q>!|<+yh%M1I+k#XC zWRb?f{yn7(@6Aoh`p9|K`lS8HEEm3PV@i?`n~?}~jZD;?Gx0*(TJ#0&0vERBEC;K@ zXI;!xggWmUfSkGaH{C5XbG)uYpX$Oq3fr77Iy~2*1IxP(C31cfy}^T4aMFCI3gXD8 z)QktYE<=>asE}Bjz<~>};ifs`)%>;bICFn4AJb=5HR5|Cfu4{^pqNhP^zGDN*)0{uu z)COmX7!SOnin8sgn8wD^7MhHs)e6B|d1@6N(IT zKiuh=Dxq^ja9yl6g&n_;&n;3GxHg!H&pJIluS`{Et`g+b4#SA*MR-Q0WP2$A?(w&` z%i3i!8H=eHBZfLZz9pnEy3sc#_FoJ-0^HkA)alWt`Mza~5dQG!T=sk!sR7^uRRkbY znrIb6k_E>>9wtCIINXo09Sdny<6WIZKTBJpBtT5ZEfkI&t6HC#wU1 zY1cUh%4Qv(>w$K@kcoQKtC9~RVkp*Zcv*ca(${8fHaFvipX6n%fY69#3+QGX-njz_ z`F)~!|GA#gdsvP|T0Ru4;el(-XIaeTT7Ym6;1#sYeE*ch0CGV-{1WhR?n+@j+B0;b z4R8w%>%26a_8K6hN8tLH@jJuaLu_vI`&F`@56^eY&%gS#{CR-t-+j7We)4>^eE%we zPDVF}%e;;g&pW?A=RWiIk70@fb?(#WL;iMb5lwZ{++)8FxaaNXV>8c@=}+jf30VG`(Y|!vvOO zi+=#q^x1yo82=P|m$8x8*h=TQL${y|h;Tl12bkD033vdC=?yl{Mn`sZDh?xksJOXL zz~$hmv5ldjm9mU+c9V$D$TDM!9GXSao&4RYAI2ZL%p>Yw;MYtvpVKokV*i}Or_VB< z$M)L;p(1~QURYY);Jhs?O9!Ou<~03+6?7r(o8D7l;8^GZdN@m}!G=0-oImuJkIEm_ zD?4jUYpPOp%UyJUJx}KTCKz$kw)N~h4$}+AM zk48Vz{){y;j?6mM7k;pBub@iXGX_Bpd^V2GEk}kc=cPi}fG0K##~sSkBe}~zL6W4v zN_#jJ`%BwfyE_@1t$2}JaDb#`zfTE_dM?3BCBxge@l00gZ}gxQU+|7BV3q9LTrx~z z({vsUbdAV3cgY>By0KU?guoxzSeyy1eW{WcX&e$N*)aI#dcUm>P4G!Jc=Kr|6cu?9*;?zihB2vwX&h z`q%875=7~Dql`y;PC&B&2XOb&yhBI`w$)*CbLR)(a1Z68XaKI;K*GhdL?%(aqoEp=#Z%dkSFvdQlCHFGGDSghvR5%pIRKSd)F|@s%y+*7JFf!gg&M`bc#CBKwfq7Q8Yh0rxk;2HW=e?^c|Y65||CZ zq~FNlK4VDeF_oSKR^){qG?>CMPdb5JhMl&R{go1Za2KA-e$Irog1b}+RZ@Q4_ym?M|&rG=)-#Qdy?X0=C79f=*hF(dxN-66VfpZQv3&P@x%~3+eEIp)+48f;kIPT5PnVzEpDq9P_Wkm&?msU7 zX1QGca_wo;8U;rxgVmHC0bqaug=JO`t+78cMOWp!G zrO9TJbSW})@9;Yzil9$G9R2`FyuxRJ2s>CEYfayQ{13Y(a(_WhG7z3{4D?J%RyIWz z93Z&}(8h_++%THV%Q^srlzgWA{pQV1?S@_pn-*?K0dDV)(^cFl&_}71jnihcAYe<4IjElIr^Ee z%jbE_aRP|+!I9%{Dj1O??YLD)X{*dk6%~f$} z_h7F(IP^uIRdWO{u;?(!=f=9z6leVOOJL|wDwS3D2oT807z7X9X%JiCjO}otw^&Bs zC1gy?|3Y5<3tYT7y~tQ*J&^^1s>2eL45@kv!|Yb0f2|1R9*LJRUq)`uU%*#4upxRE zo5kW+%fZQ8@`~^oHlsuV;GFsL!@aF#|7iCIps=`$BAZKKEI`}7Z$?V&1kl zK48Fz2r)xNQH^N&P`**EfL{Ug7>d_Kjfvu-j22<(`|xnPtJsAu8tP)I{J!=YA0~hR zL>Bic0hKoa24g~&HfwC@b4r=Yc(Yhz7S4TIF0L+HzD7?CM0@y=HuAp{RId zOvd@5B|p3=JU}52U=6SUADViUV*2e?lXT9RDf8gtIOuHW3qhtdtPMJ6e{8`r^v;7NCRp>P z&=29~W%JQh93UvxZdV+i#C`rD1Q<*#1H*}r;N{^i~I@_$@@SpL<` z+42XsXUm@^)Bjnr?!S)X|JykI{~Vk7oBaNt9&eVvxW8O}_Iwd1ezp80?|*Uge)*?Y z@0b5Q_y5zI56eHx`+xiJn|}7MpRSib{_?Q=EJ499!>{XCpO!#eWz^FS!)J2tp+upL zo_5;G_#+RFh}^8D_!64CjWA=U7gBoit{LV2R&Vl?eDNTkR9}i2xkmX=)d>&Tly>m1 zGAekgLgd1H-fc>uOAyXhW`R)jzs@S3Y4h{4?4p!kXs)aoPN#oH=qukw%D>? zQwz3+(dh;b0d2a3F1(P9$&fi4UD#@brk2lhP4zQ&3A@}6D;Ob-HtIK_p@B;5jDtou z&{pYl3MbuMO<~mDGG=3%YQ^A#k#XXJsy-_tSi*ML+-mMuSr={klvFiX&ONYD=70Ka2|GntJyh2;>H3It-jh1hmIoD)n=!vlhD@LEPf}>nAOLeB)dqxY_*ADC0Tz_(zRDGF0I*Vh--Ll54% zyf};9%mPpWhiP8=#$N2WIC*=p>>ut_epeSCmx}~oXg6l|Jg0xs+oh}K$M#fIZ3Q&N z8heiXVYgr_Bo<%S971rNXyO0;4}ZK|U0U=q5h;VFn1sEZpBy~FYK(9gm;?VYTb66v zN3%&zC(;M>TN%)PfOkLHyG1900?gp7%o(S-v=bp>SePm{Z1nb`s2;4(aH5Heg=`$< z(P5lIX!{gH6M5V6z))N#N(VH`%Che+s>|&F$cu4OYV*4oOwg1-Hji;+WiF24d=&uQ z-Cb{FMmd>0x@GLzkjW}Ja|#5`2<|$+r8?jQB=bhIa<~Brf#kXZho~|UBBcX*7FeL4 zF^lk3MLq=>cXkE{fUokNa=-CWCPSKi9bB@_${Q_wWA@o%z!Q z0RQpx_3~G7{J#uuev^@&hwfj6-d_dS&Z3Lw0gT%OGoJ)Lv6+@%zl>Ab+p|)4PHb#& z7T_h9P#GQGi&NjpZ;tZ~NNq%xR&D2RzyTOMvQ;I(7*&f*y^s&5x3|9^Kv5B}f19o; z37Voa1?T*X=pJRG*O9SP*IuP^hj?spod zOtx*Mp;kH4A0VYmKD)Y@^*?OMxbd~~`Dx38vpTODKFG4QBUrbNM9_+E?I<6sSP)TH zi^q1^focQXYWX+}9=@u=g^%G8zH>OX#I{OS{dI-$lSdMh1nBt2VY`2(JSPFMK+ScZ zV)x|FzP`K?NJM|)9Gmn+CKimUB0iI$ zhuuWvUHxLS`3Gy^S4-|8QqXpgMDtB-Nw&R{-vufjV}1}lrz?^!(@XY|7Xv~M+Ue{H z6Z}nn#?h%u$!+9e-kUto;h7E+8!X!2Kk134H*xl+Y*ku*>v#XF<>=^b+8I95NmUEG zDq`8;pWsn)7X1-4g#YXh#*tMWD88g!_77iSl}ZbKsxAaKC(=^hU=Wm&auFtiw)1vc z9gswbm7iO?G9rUeqL}$D4HtCWmuq*EU2P{~-PsY*kHJwu5T3N4*Z2mo2ZrYWlFrG${F&qL$!#IjCA#Va{)ga?z zblnyd8l9%bAw50Kcjpl_J~*oJAlk)u{Ne-!KgUN09ltF*s|o~=@xtf+o&FqT zGLCX-<|Har8m;oDFE5d`>j6{rzlJmPnEr^FM<(`J17il#EHiX@qe<@^7+sM;YNRKF zg-0qcuG6+a6I(r9Yd2AG_amnV4yk*2LpR5R8gfn3)yHP|`Gd^yjBYj9#A*chm!%mo z0|O9ixi(&Q;byJ^NVjS9IiG*Z{g0tXS^hpUxk!KKas20>@0Y9PdAW$;pXTq|@M&#t zXZf6Vo-^)e`V>AqrN0}2NItXh31GIukT%gdcKW%rC@pqQHgXZ&yY}v0bqF{ZUkC{8 zN6C!3h_=JwlxD#z!0Yr!UVxC}m6f_*d*&h8j=+iS$Ye0M^S|LUhn^P8J8v=uMtDmh ze8k!KY2`CR$f%?50@bly8JN=RedI%y1+&lx0~}yQb#i6e&gc0Za6UYaoQ>DFvb^gA za7xPhgE@3VaJd#9Y6B4O?H>eSw<14|FAggXmk!n;MkhJ6mSFRDv#sz?g{0YLK7&17 zUqnBm-(lJWh~`M#@Wm_I)gUVBz8pq8t~J>(4`3)uYcGFpIs;W zr+WeuJlje@wVUzm?ps7WhsWS88dSH~H@?#u_s{v}0-Bi~P@!v22`(gHwi?%3>S*BZ zJm-S#(g6VpJf|%aGL>%Y&gdU| zWV5$77sWlrL?8eqt%gITi zf9IBGkfJdW5vF-LElBCfB0F>Az`VpOsfB3&ICo|Lv(t;tmz%2>VcBngd9B_!>eiFpp@v2e~*!T#>+2tJPhkV( z<3z-b&vjrNlleHokg}1-z4qMnTxX>;y784^b4vKcl*}<9Dy`ON7_LAJP%G`ncJfmu z88M}gNU+hKOyw~E$Pf&xff?HJN2dl7Q2LHSRf+)+z(#X{PlnL3a$XG<@I2A`o>KP) zfDJ%KMvOvxI5I#B3 z0-XX6pLJb8+VR#_g8}FrplK$bmmLMbcQ^cK@Dl(K5RfSy!!HL%n4in&yTia9SzG(O zfLOP$#rOp)#-TqNX&lZv5Kze!(Pj;TEKh~Qw#}ZlmN5z<^-X4*FMv!=`6X?Jmok^| zxmBy^?d+2thd~DchOKmf$X2lGM1Chv8OPSvPC@BGrWYFPI3hqiq~ic`1&GH$FEj#a z0q&ulC;==_g_Px*JEn_duNq?^+$=ifwBoEY1~L^4vmrJJ{4HkGK8L0J>%0mw(hpjI zcW|!!czzjMzN*6uap92+B>m)C+TLvY<;~M}(IbJc z4pJ$}?X3>t54n=n1PgR+f)EbxfzwOBVMF_68>e z)3SdU#SduMYyaWH)p8xM1(JlL=!>?c0++Yx(u&MBU!wn3S6XIB_72tkQ=q8)s2Y0a6!mM2YZG$OMj#6JhnetIQPnvTt*)U}5B*?SG z=HAYA-A6&fBJ*Ujcb_Tco9^KF?;g>mJRdtT;* z7@WP%GM$U_1VeQMv=63hTTT^XUN3U%TdS2mIc)*b8AF#!9Ko9uwvisaam%rUB zlLhR84_aue3`cJ@a<~44Ph^zQbnMz#&EwKL&g1ytaM?9I^x`Zy^8I9E*2woq} z(gWO{k6>}8-5J}lD^Gjdi&Kh_Td~^Pn*iLsz+^w!_g;b_2I3SuKnawr_@$TDOf(OP z?lKvAgZJKm3~&Jzfi^o4$c`M@!<3Zk6v!^}P(heeFvq!bh`fia#+dQ6Yko3b{Teg7 zPooX2Wk#D4gaj(%3}-=SxK-Nq*}3J&g4M%_sSwTHWU?^!d{I4F?O|6E2io*?I{^^+ z!#&?s`UwEhsJhlO)6>OjA>i;vZy5)^?(iU4-9c8SORz80^08Guk*L+ z#6xk{mR3%NjPQ?xf3-Hwi$e^%Ce0{D6P(Kqjww?BGU zU0aI`$t|R{4L+sqpIwA6umxVmPBT_Py#UZcW=DPBdGJp#qtkY|7O&B~7yA;hu}j#o znv$wwgf4hR-8MIOm!sq3ju{raPEaW!u%=0}>1gK4BV)J;f8b<~(vB)REppBd7#)N4 zsy43IuPI#BGrX*W&o%7GG^9WRznnVQ%0ks}+S)se9f>Fnj?#ri6G7+S9Jxrr1F9Gr z?jhQO0V`NJFA?LLH=-wzZ-`BlqrDQ(G-He&1En=ippR-1jas0bCrbS zyE^!Mr<5dWtfHp>gQLCWz~0EMtqQN(f?_y;?djpma(x+R{>3~r0199xa8TB=ig_o% zq%AKBYfjiWlnO`b%*#)i4_7GRyRY9ik~cm|LqULqtWktYIRrt<*&I(x_IZzz$(}^Y zQH=0fA72=0fDQ;ce8=zpY-ue@PI;~z-jne<;+5e#zS7|x=E}PtIQQ-*Qxw7HvKr23 zfFFgV@MP*UbWncdp;+lYH`mq0^Mi>f-RM%QwG^> zco{PlqNB=b%GHhwRhpZCCIFlJWq^47cpqJ+8@aA4kV79@LuA2DkL?dI=R1A*yVvAh zS}i(5E3n|?at)}ZPtEYiAoq6uKcL1yJ*Rw1YGj7S1PmDio&f@2>GJ|;`j#=_5t*(Dh}{`A1XKIt#EzBqzb z)@Tc7fjayjJ4oOWXV5Kf(OvTMWG~=Sq0+I?sQgJrbdocH8-j^B!gfHgPA9_Jj6UN7 zkRgXwD?*2}vjG&@3S?#5RvWW(`hvbbQW?+Eo(i9S=_Gs_#}~e^_gptmFPydo6|GL> zr(pi3rDWB_$N`?EZGjSZSwY7Ws05j&d;XHlC}Sl*dLXFmHR;QIH=XAk;Y9|S-`RaY zH?+}xI7glw$Au|^d?rv3Ky>apeKsp&hlt0dl7$*WNcVZv0w%VMaShlZi_&_Jw zk+GO(M#oeYROy}?RNO^=?6?_jnzX}*jhluDvx3vXpNy~fr61c*zdkuw-UVObBpaEz z`Khk1xcm`R}W(0XR0^3UUh-4 zKHMe|2#%}Z9-o|KjIp`s5hqD6XWY^I=p_2c?J>UX(HZ#ZUt)%zrh;4##|3~EICHl2 zzt!RBMeh%PkH3D`^1lEJZ@)xV0zRwrRY;#7KPRbt7ut6!KNXuMXn6~g{VgD&d*sGf z;{8Da3xN@kH0M9TN&G|DV-^0|l$~PmAvW#Au}2XCp!>TVluSD})JVQ8cQ+Ri#0a|` zN7%kMK$$&3$=HE8;us?aeot6+SxKqY8&%&@pQ3PqLz$zQU z?6WhgV$-f3IB+1HfiTipM8c@Yu~%u+x6H2w7^4!P$kI7YAk)Y-3WT=K?FO)S69jN9 z6)*tdoRjg$B$VQN)J)mv=qeg5lc}iz3&xgmxZa$nNMxz&DxSY{- zi_ODbd`lwd2;Qn&B`-peJyCWgJW9LDhQ~RP!GLQQzTkD*pRolr1Cp)-8h{W0J%pF$ zYGik40RqZn)Ujl*fz{~WD3~rQb4D{msYPq<)$jCSn zVb^5goEUkc37_!Ji`;>87d%y8WXEYQ*D^3`7|3dC>-DnF!9|9E9xzU{zAB&P-UK;5 zYc(ifgXge0O~1Ux$$QNlJ7XRoC_nNyV9^#`>zNS#4jn+7{*VQS*F`zGhOAd0h-U%= zZPn35j=rnh(Q0B`Ydo_UW)^q=kwJBv_HfzC{dgdAGJbleU$Q4(j(dQ5#k={* z&OLd|3vE-P9%s27@MUM7(T;c86+ku1_IvurU_eM8=*6lc=v2*`hBU3wAg1N{ z(FZzOC+Qu!v=6J;6eoUlecL|Z2nW5ohP*T&*bzXk^iK);IXt8rb>hL9otJy@+dXv8 z+JtToQ`O*r((as{5hsw(w1$7SH{!qk@U*yYaxg)B%kSx)K5_^iPb$-I=|6n-+StGs zyMv}Grm{lD?tyO{jB$qR^%vyC?ge-aw!?ET=WF1V1nnw3l}HH;1QE6$(#0;c{%Wd< zA_TV2A@dDnjBj)WHXKB*qdzJmQ$;kde{ps;_vN1hj5_@q8dUSn%~?k_DpgOh3zJ2= z$)q_%c)}M5ieQL<^}~nv(OE$sTZ$heaAQe7l1`e^fPM}Bvr|=$*zRE*Zs^>|y{}&< zhze~Mg}P&VcQd~6syb)<#WMDkzfFBTC1^nV-r-T(pbJe&VBJ%IIsI zy;ZPE5tlV25t0t?t}YUpygXts#BK$=oX|!&I5OE5@U9coZj{O6_7G^`mvQdE1ZY_- zBI3HZK`UiWWDN{*Mdw~(46}evX^f+ZR5Bh;Zc372P6jONH!nML=QFow{?7?tEotuW zwPZ^9`oR6WsVW$(^0j#)ibeV3L*l@0mfJ*an^7pjpy(}$g#ZTIvza3dI-hFfYEA5esUZIVtr>kE-Umik}5lJ zAGy}CMmD`D|8;Sy>vA7HX=B=Xaq795op@=>LO0KH@aR^V0K2icW;c$+b)10v)@3>Q z2`H84CL2vh9Ru1EOoWyibNbcJZL$H6OMV+bJTM>6&h(KWb(Kle{jT?jtvlCZCqXx! z=c}|G+l}t$lJpP|iwtE&gk;Pf@YL158HcRT_ba``gq9RzXa1~{$~|~NpG^k|yy~D{ zdhkzHFF2!z?r+cOuhT}ibWZ?1WmRJ&7i$DK9~fc#5#sxI>=k<)LIsrBFP_dbE206h z0Ri!3%ulsZ_~CE;3*;M6Eb~v?BCPP@I-0d)={{#3KV69PCbL04QeA7HV@Ou6m8Wr{k-rtmo?k1EQ~)N;=Oa; z?}AIVCgK5S-JMR>BII{|0G;Ubowl`SOt%S)>_+hX0a-4cz%;hKy$=)TC~x;SV2@4; zB4HAJWY^aFXv6a@1WIsc@8D?J-A^{(#6pEDJ}S=p`0Ysra&Zwq75Y43@F;<<>>Kas zw4j+hysF=Q33MfDGj(Yinl5v?cAVII@69e@yX}lW{;l7N?ZR7F`6ik6VQgRm#>?}w z<>vA__P7>Zo63nbL!)CBi_f_;0uhHH;hA8mizmZ-NnL&9OVEX(jyD&iL`&kQ!U(|j zIsm{49qztfjt@lu3?7(su2(U7hpP+_dUY-_oyp2L|IK9M8=F%)5S8MGg&HlBzP3P* z<4j}~fj?v*Ek}k?UiN_QCxX!@fPRjli2TeyD*-;;e_6i&a5+)Y3%wMvi;n878D@Yc zSvoMmIHi<1Yk8Q&$SEqo!GNGG1RxyNs$?Up zGL`W@4jxcWq?NHnQANAP1)vDsmN3E{#k5suJ-?3;DVw6p{Fh)u$yJorj4c8dP@JAw zh%zNPnIt*bDmDgaQPgwK%h#v!X)7QsFg%cXv}~Pgja`=Ulr{t;d!c#7|LgQ4$a2~f zxtia`WBOv-4I_2_!1m58P-L7I*Q^{o5aGxpknjNz#>gH39fzfSpbQh&ts24R!X>lU5bdeoMEjYcpQP*Nimnvj#q?^t{|7C?QwRApk&DxC>^& z&IBgr-gn~6O?7b60v)zMKiwx#G!2l?GX^^{IAR&7a(AsSG`QC|%eUx&^(9K|A53fI zs^9tMZFy^Jltn$}>*-Vjju3**RaFU)&>0n@W)vlFL8WD$*ftKC+yvTne+*i|H>N0G0&+Q$SuVRIczxDftE{*fuYQ|i?h zdL?kGs<4zkG=t2T1e$`mR#?JG0X3TOo*b<~p_8unUEdj77sKW=7-GD;(JK|U$;1VU z)*vOZ%l+(G=FC>SomfXd(bg5}p>G^{@Gku1L)ZrWwep>=nX27PpB$yNG&ADXVNE*peePzQIS zm>FL%z&1SoFu2*Ntk_7mO~zl^8qFUV_xag}@&}D*hanMetI{9df6!cc#i7$d{1aHA zolXcwwMBgezr~;pm7x6M8M;7hZ#F+Z3#8> zn#&NKPn05BH^&}8T8m?d{zhM$k&2=%zOy(d%N2aPJ~qs zp~kbRXF*5U|=2%S`XbW)k zIn4QdvM>t(6Saj879Dbw9ByLX7sO$KZ%XlI0s z*s5^Kx}S(xbZ?%vTW~Tq!HHc7=ojryRUw8I-dg~&A4SFcW~kADKJPn5V;)pSW`8FG ze-qH4aLQ!>hJzKnP+}|gi((NH9cmVqv6Fc|?0x+{a>h@(*>xY0nRXbx1xbJxf7jzU z@mf%&^d~zrK5Ikj;KL~VwWaaqG8xhnr#K5NDUNCoV`b<7THwY7)o~;u2G)WBP5{r{ zD=+|dfE5^+dlwKgJ~a8>PKtv7%}(g^^u(@I&p%n<;oe^K+UZl;;Fy#kDBU>shX7St zi43+1d}!T_{LH`0O7ubw%J1yKcqtN{7m%Kv-*j%-Q}!-n1Ey<^_VydNkgKYJ?h4Ma zBT9+?GoI*a?8^9or+a}W8vsfL^5_Ykwwhq9f|HE#O#;A{5A&VRqkwhxPJNVd;iYPU z0Ize)>w=#o5}}JT=D5)%gX$CF66A1HhPOJ^5Joi%qt9tvKRs6g_rhD--&EH;=*e`` z>M)wl>*g9ZU3?0^IBO1uE-9HCvpH7INE@yNpnX+7(a-Rk9g(%~yg6Qrf42qZ83P`> z->#fhcBPC;)(r}B?}~nQ$yxb~-CNklF+L?I>!3oXKpXb318u}llrQWAUpW_cPETN~ z``HaA<2fA$zN3;8dz9hUzBsEFr{l5U(dZ^Yb&6?hvO{Yb65K=|@<(VMJ*5lQ7O^c@ zgfk5~(jLsVZih2RKaBG-hVm}@b8wKPBLS0OXfGJFvN5Mj!Y@Cq{ysiAYG0;H`4bfb zI$Nh3S&SS{bE1ux#%d&fT(Des@^YdTj`Z#6uZ04H$ z8bM*N-0mp3u zo9H)(X-;rij`yr09S8m9bt3X8%`Bn_^K%_AZR7v|BNU3T0)&Qg@;xTrIl{DiZ-~gWSj?x2G56jrkm}hk+2X3)pv+CT-h-C=PWWt@@D2`)G zmB(chUdM*COsg-+pB z9e3_CUb-acxVaTI%k(_2U^mfuoREt`he%tSu5URs*O8m8Mpng}Hx%FqQk2pCEi=LQ zbXYwpNauHpb!68*2S}|10apPo-b4_!g?um21Lkz){t+i}F84(*>BC<5HOC-Zu;n@x z4A0Xyy&x!cy_de&Rlv&F)3ybDcvA--XCZ5?UPLbIvY%XIF_O7%OJWa6n zwEXz%!`jfzjZ@d`^}SmzF6O9UL55Q9bLe4d`X^TQh>R7V;mDNbH8U7CiBhl& zz4QTZLdW1RCp3ZFPJ%;v&G|a8hz`(4bV~}*etf*Y9GuuroOa{TZf~#09z&Ns+YwxC zLceu>`3DQx;7rklKWQINT<5_$^j*S1=8}_f*pvB^IU7QUpWUk+JJe2w7=6S)^cyRi zisPr(dCO+$H^vG6EHINO(&tuo(ia-Z(zKBZ2wP_#vuZp^OEPaVmpG{$)2;r@c;J-e zKr%OHq--={<-|`verP~je2!cNKPskVmce(5*qQ||av^7x5{WKe$MmDC>_(M?FR||E zyul4yw_udxmQ?j%p076hfHpYWqiWNK@xxep?5VC_1=GP25Q_llE{2C^oMwS1v}w;GAPnU|Ec6o{0y%8}1W(bcb7{ZnD++KIg{LqD&?tw; z2U7a;06YWPikvBa=MafH*Ld`*z|8iQqhuSad3)0oqUzZzD|Z~-7+@wsCM=*40gKQh z$HF0#rv3aoc=qEslnnIK@Tw&jG!aau3gest2`WA`H`S)y^e zhBHtBF*T)ML9hUvO|Pb61Tu7zO#dss z^7yHAi=Rr@oTtZ(rt6F;cAI7#lC(PcZ652&`9}wXOQkFK)FDT|H#w7xZ~ZmR@^Go! z0B^<_Ug5bwm)tIsQ9Y+YV-YYoEJwDd;?z0p@Iqy6-?RF|GnL9#^&&6RopqEoLF<-Y zvbOMmF5)Mewszw@x8p2A@5bwmjE_wQ*CI!H-Qb!nME)!R@5rY5kg;$|yC@u!@eX}YvK&A<>aj2xJ&=!vnweG^sm3{x}rA?(CAPCgNFno zcj3|P1*e_s(yr+YI@<-vWE?$jweevdqul_VJ|;PYxhiJ7kU9N+ANdP(=&sDySl+#R zo1p))bi&D&%@Y8yli7Da_KY`5_o|cH=2tv%Pw-~SHX59J!-MF77rA_R|2{gGBIYlXMF3uy2beI5UK$*XGEP8UVjp&=3sIy}rQ4SH)DmuC9Jx|Gg1|-i; zua@&ms~~050D2-xn#zgEOc*-GqPxyNZOTd=y{P1+ywz2sVXBIPS4(+OFj2iS(*A*y zyxc3JoGo<%1I~yN+KPj5K=||`*-$bgCCV;}3TSh-9HOzJg~5fK@(~JdwfuM|l@iHp>qp)5;&vOAB-N-57xi&C_@#_zNtpH}w zzF$ov2}WcfoQyb$qge$g900tk+yRj-T@z4!Mw9mkum zGPaDrr~ZZh+E+wMSJ^2&D1D(#Fw9mP@Z=s2RA7kjz?faw)&~e-hTv>6$OP!&Kkyd_ z*$IQUC-06r4a3=4Yl40bQ7~-04sb$0oIx8AvKewjA2046?-8#>NeQC!9pFPN44W0< zre|c?WHr+b%}Ql}xsHRAxy^wu0`QFneskKTES2-ILnd~%>p-vs$NAn1ukp=s&1{r@ zbF%c9Gvru`eUbSpaF-aEFIF`mZ~9`K7CE!!ap>^I`l5_AFJtUV{EVYnGv}0Kz|_mE-tTfjk5#BmPTLGIQz+UbjJAREiO_CNL!IV z8lAg9FFCMQb7F+G7V4X8r?0K>@;oD$xp!lg6+hgDzmb^-0)LvQaTNXzF}m*<@>u&|aL7TdyKzD{{vunq%d z>jETC81&>z{@~!~V0>2?%wFjqAINv$6<_#f(*aFRzvE{mLA%wT+uH;%32@gAwq7sC zM=`(*l!u{A%2*6yRX!Cpbe=5$BhnC2w-gls6n#-x&QZirVq=&d7b%hw3^YQj-~*)j z+sSMergSA@7#EI}d5qdXlwq&qM1T3K@1qh*app4^jHpu7^Hm+Mlkq4MCq&_tVecqH zlwvW@6oCI=f0ZqSpf6@E* zA^-xG$yhk!e0-)L>{9j@e<1@8dU!^a;PdkK_+aL{h?71V2oFn7X=wc zNh!!6CB9Uktb6J>TF?0zSlO|S*9R6e>&IowtLtfL4}7PQhCUg2T0*Zgf!qfC4!7! zzfrm$$1gj-jLbL-E9YfneYi}<1kj(7!L%|F`skDAPXyu$7#8jcz&T=$mou|i4u}cx z&Bfb|fwyEyKy}jLA-kr7d!f(EgG3HbVmq5%Z7J_6T;46`XRLH8e?)$qTP_H#^o=f>CKMErvo@-) zk%QpWxO&C}Ipi}ulA&qa=XBBi=#rV!x!e@~b4(UXa=Mf0da)IiGq@pGld0o}z^nS6 zd(qWWL4J22ihBj%cm@YK6-kCfh0gcUpP|{4`i$3$&P5_=e=3vgPFdMQxt#h&-WEm* zK2%%D(6i0ai}xHhJe2TE34d09=YsZckut~FtRMJ97J^~MQ~u`VrvQMXMVG*t4XIY) z9h>Ihfp~vUyQd#NHZz9Nk}*55I&~77;2#?nU3toz_d> zN{`8B~1+cITz3+otrjx@F}yNo>fs@EcDYXF0q7|w zM)iB0&p&)TO?G!x`HFA>EC)&4@dF-H3i<$KR-{tcE|7|{x~9gOF$h@v)Rvz>fMKa1RL8>Ojm=k)m7q}e z9o@>$F#y#XE1aiFBFpeo@PsddKvNc09arOWeHVph9OT6SXCYSf9>1bb^cLq-Pt2DA zYk-e7z=AeGf#Bg@5R3K%t+}im@-ISf+nGzaD%jqh^S|0+$UB>~KrW&4b z?ioYIxt@RlIDh60A}GOVAH$KGZ; z;sm*5>WN|k>%Er)R~)PZ>o|vXhm%DI*(d#!)JfhjmA%X{!ef`5<>=a!=e-(0hYH`5 zeR(KB%xC0K{$xC+5xRaM?QX>;Eilbj9SfTw#~v<|poF7?r?4TH7lUodWCOh}cq_2l z-BxAX4QFSkgqI4W3LCl7C-eBK8vM=2v$NyqIW6s!Z)11I-^qtP&Azsr{PiI}}fW*-Cl0faR zsq5#j{09`HN$E%@otFW+VNMOY01D*Cz^z!cHKwKc2wG&MBgWSK$?42$L!9L_JbtgG zxiG>CPEo5>&lqpt;DGZ0^Yu+4(5pDkC|O#n(JI$Q1L`np?4b{Xx7Gu2k+WxF!@vV8 zk3%zI0&#Pz80;K(krrB28yrM&^7c52{nZ2pQ3e^t?zV#ily*5^bKx=eIRDejywMa8 zJx-?K+#E$b7;VDr9mS%*8mfuH6QQin+FCZphkWKa>4*49QVC{J^S^&uL zv6&DMXP0a8IUv)Hu`^UUA?p_a>4zPfn-c`k+sC`n6}uE#{R*8uIV|I}jZsET529MGI~P9}7~S?%{3AD)Gz&>6eWFYG{O zEz9>PN`W!^54GvvVy4h?KT}K9-|%#IH~Jesx5OOYH$!Ki>CZ`LvD$(wz6zS?Uo)Zf z=N@2(Ci)?36BG+R@ke_VPF?_=ZFf;$1}3O5KG|3kmi&RwOa@;oo~ejA8@uAjH=W z+ka%q#%4NzGo8c-hRwn^ypTDgF=Ni<`Yirr{CkHM-_Z%!J-XMxATqFX$yDZ30;V2@ zq~exq4M)P*R7P zJCc*|?jV8s(EcfW2tLHN_m7UEe-FVOQ$^N)1=G|0#p!vIv2TC#Z86Vy1s_WD$8X=J z&-LP$sYd(J(Fofw&Mua-I9a^IGe8Bfm5N`uAG|uWQ@xElkq<<7Z+Fcp91P0>XU0=e#f7`iv{&D>?Jm4 zeBlB6kw{nlC}mzD) zQ9>CAr@4>dnI(u(14m!>}fJA=d>Sj?#cS@9q5n@VODu^Zln>z5=F=I8PCj_ zS^;{SKqFCd0**QV_@<5;ASr*+O2FGFI zJ7Ae>Dct9ts^AgYCKkFf>fEw_eT+x%oA6vMoc>jr#=_mqepdHbARG&Avi7^`2O z%X$EEr)lz?vC|b989p1Qxmr3#=4f`E*8oO;OTPlJ1a-Ndj?e{D2S*3CD~`UYDsYy9 z7&_5~RcVXy%WlZRTr3_iTw^W2(k5`_EI1r|V37Ex?}9_x0f<_ebI>^lS)BQQ;6QJI zs8Xux)wFAxO0Xb{5ae`Bd25rCEN{?~z6&4$1os#@J^*EXvL$?NAQ7_m4?HDqqtSnz zYiz|mz|?*CFCf0Ux?ir-<_NN2o zI^;kv$;ng#oW&uaj!xz}iH-3zYYyM9(heJ@ubifme$V8{?;b%si!I62)E}D*MtFr! zA4)b*=2C$LlT%C(_aQlwwiQnIm~ z(ckL{^d&JIS}`Mhu)nYe4OzYe_{j15@-$W4l>(rP@`;r`r&>0OTs}9LI=#` zD|$?CUSyN6a;=PAfRN8##sNpzr5Am_j?EaqgVA7D2&fNRGuPP2A<9P~Iu95F!qi#( zqg*2TLGUOL!fR=1nqkR)Es8LorSu}gvZX^Au=8h8dX8E<8BZ5vefkvTm5tuYLd-)U z+@UQu-S=+gBjdK$xQhkCB*4U>_+4g0Q7Bs%iG`5>o07wj{Y14Q^)7&;-~mcmnp2^a zs@}dk&Y0F?AQ8x#g?2~FMlvwjgKR0ygwgh2e@W*4uzb8s)E0UU_xD4ODuel!^b{oy zwsxho3>^63k4)?+Gzf^4s|d$S1`VuL6c~{Vh~w@QK^%Y2oX_~2`bSA9^2%A&nTi$z zgjW4C5Jp!;T2Bw*v%UZ(kpBMsDhBYh>~8HWo(xy#`a*}7`FBS6?&Pq2k%#P!LYl*} z7DB`-IP$;|pHp7+$!JzDft!4aWATb8>ddV07BB&iW*5GQVGD+U7m((NID^^GoNLlP zV7719Ll!)S@y%vvh(JQ(WFXNoi&13g#>+7%jS57a?c9Yu(J2lwq((1fpkoLe3RwUh z0SJbe`YH8d{O7-s~akAqf128e8hye!-?6(swSxr@vPL?*k#e`BS;f(EhQIQyCR zY=9BqBDc=>r3LXMp0OeA9vqPWTC)Y&O;=^-UcEIa$$!?Ca7f>NFI)N4H5M(?g$_F+ zwlZ$PqpAtV4U|mvtSUzIso6^C<6tH`4d5l9@S)uW1t-0-ZB8sfs?0@j<1n07KyrW2 zok%|%Jy{75Fw?FA3PHx<p<1(aycYh+Gd+*cJW0N&kXL*Q^fEp%dh(ibYoiYQTRL zD4K7s9&sr78xTq~Ow&|HBD099;CBR8`ZR1lxo2n5xd8-a_Cis=% zw=zN>jSu~JwF;D`GVl@{H4dTbrp0N z7u|RfVR?X8eTYx!QN8toot(@}FU^z`yQj}E$YX8ckwj9^-PQ21VSRSt82^xQa$px3 zE4y|0n4k1Z+X4uVmNUG(xF{WxOVt9~Ae~a@*%18IVAbce8~h00`2)^fdo5{4R+2l_ z!H``2fIk*D;_=v7^fQCDqpVN$dr4H-K$E}tss{Fwfot{j3?s*d7o9Q#OuwgdfcYJ6tleXECaj06ivX-O}9t#A? zf6bEoFX=?5Iu|WCwy!`NVRG~aWI+OE`cTJRUbt9-Slmo9}M7cO)22Vj;FOw*7 zQQ}69QQn8!5FNuPAyE+JjVnxyz_i<_G;J$cAlh{z8NeeOr3jvTeH$a-T-+l&7TGXv z4i^9_qiiRlH9wI-20YW&CZz#zi3B%Z7sN$=qB%b&QcpIR`_Hbz@X+D8zqZC%V1&VS zB>7wvTV9#VVq{Si^WE-we|iz{`P_MX89fk}O^9GA8KD7CZxJpBLJ?(n4K|WxFQ;^udO6BiqC+JrBPp$^6o*lV8O;$%R!ub9aEg%l;8uvQ6O2U)7JJfF3~dO zRVJe_yD#f9$XxKoVYQl)wt*g!Wh%yMng;+(<~bR*X&O;GXNH$u!5qC1u#i6x07&%A z%RL-CkVBhsZEV{6?g0@pB6O*CDD|3t2nbwUhTddP(G3eXtya|@oly;8Br>xGvE?)S z3BdtgAVD8x6!|*s^Dw>&a0Rd;ijI}sWbAG);6*N`kjTWVwqP7-3sCvH@6Z7N@lW4V z<{Ue~uT{oHf1EIA#kvd>JLs8%Y6Nf18h~i7>Gri;qb!ehAktG7<0xck9v#aL(0G)L zSHI{|_28H}d)LX#))Lf4IL!u?{5=*F4+NMk*Js@NZZ*$aU=rSA1V^eYN%jtxGF?G_ z9HZ|!mFi?})f*vNA6$9j;o+_4BY(p+?WoZj$<*HpEc>{CEzeTeY~>Ikzu z?+JyG5fJ4(n_1=lu3V13IkZGzhm4p}bzp)p^Ut9tZFxq6d(qI(9!GAKKi%I~c|F(-t=2!ygMDnP#ut4Fnp91$FU~s+YR?~ zyNlKWQWQ=^FLR!`+JIxY%!yir#4w!gt17Y86YoTf_sik2;|SvnW6b6aRVgSsC8Knd znQ@sf<5Vx>-0f@KNrt=|VJHDHfWE@asVKZ!wg5^T4FOhZ^FBR~k|nw}bzmhZCrc0v zn$j=^PGc4Ia{>edaJ~evDu;}thQzgluDLsZ!DS^HhOE= zPg9Z43m94!ppD!!zHD&)uUNz2QWhe=Sg$Y%NZvzejiT zN2^e|-?iq}WvG_{XIVR^!SKxwM&5kpS%To45}GND**H|&NJf>NMDB+=G&FyZE-Xu^CpduJ3Zpe>X5=82l=#ez;Q(f z=9$=nFu_O00Z1}Na>DD$v}bArG47*>wOQ+1cK7QryS*q`N;B1ri#^I?DrZdmZDkMr|+wz~W23Fq#CF2*pjnkfYOA^^m{m~HJx!s9Tg-2{{Z$;-3L z@Xf-^$+q3lmHBCg9>G@qNTT3}09)I1K!C4DXW<^bQ*AIUz;;wkJmQt! zZtpr3^QpA&?b>$dtO;v+Bu|1Lfm4F!V9mk7ab<6zupMBniax})V?U}*Dt?#2(RZ+X%@|?p3)gS5BVK8ETC)f&JKb;Wfe|Ej*>^~ZP*FlLpB@X53JzaEnJ4%yZi8O zDmm=z?c2AlZ1bbAbG8t|{jK=dtL03v5E-&{(?iy#S=Yv<^o1TYupzv~>(yEMj)5k7 z{@bp%s|1$pBv*0BqtsKj2mnMtJvl4P;z$YHcjJ7&N=%%Nah4~ol&;v|3d3)4IT1A4xwR^7Tj~Mz!7j&?eZBKO*Lp6FnNc>-j}cXe?gTDXg$DeUY1d+ zOtdrKM>jKs^16@AO#{Y5R{G5#dTqfkeD@i8(S=svtsO?E!lS}4a@6}|s6f;>n|(3- zbm##v4WP6iKBi!FL=5`2u}o zqOC||8Tmc&5WnK%k?UDoD|xv zo1k|Ss2OkW3-44gIM>SyQy+0)q0xzHcx9}uGSRVC&Q^;-rv!f;EEv2CP2mN7t4;-1 z^hpO+6)(KPZ$PeXK^;fc${^f`OifSr;GYgnN!zxm=wA9?wN3TXP-WsQ0QzX`4aD*} zx%o+_ILyj9*Lu0m)6VwxW+xMj=h)#Adx_SD-sp!Ytva3`;h*takVhthh1+0O&zcYr zg{SyL2IWKgBV*&HuiZwQ?`TYYMx2;wFI72wgwLjj$j*ac1n*8Rq_-bGzRz{0Q@3m5 zu$Zouj{NQ9If2@g$&oxVfV(n#Cq-JgR{aiLpT7uFGFCxy`j&J_CQZ%OW)tWL7SdF8 zr1jF$_`BW1Q>dOLm??g;B3U6p*sQm80@# zzDDD5;w0TnAOh8)$Vy2st}J~8h$v;EfBP5bxYQUL=TW7dY=&~QY-nT{7P<(`nbQ;t zpqYcKN>lhSn-RAGh8Yl40+ms2%Q`-<7Qt;K^4rVbaa>Vg43nv$$Nczt3|gjZK6B6M zP?2IItQVv297;NC`8{+JK4%FOY*TWr)x3<0^kv+X#n=eLw7};tkBh?)9QSG-hcM@0 zizz>1AdnIv%M=GlWIN8M9&l|*6y2eZBHREc%A0GC53PRLT5Bmprd@AAp+h&Yw@%|W3d@1Y z#JZp}yb<+sR`equXKWlEDpfY7bcb$+1IU2K)2D!B#s|RY3i?)62Ye&zdA$H6tqO=M zQ%@kM0y0j`8i8)N$$j?WcExkX&N*y^w$Z}@RJtY8%b%4unk}mHQWao3A*|~a825mX zcK|;9SX|>8-8HhOoT0^N>(FI0X@^aj;wfMYPJObv$@Vg)TvLZ0-qVqu@)<(h?_PH8Y;!a^(a#(*XUka$aAdLM#Lg>&j7tV? zoPC68oPk|K^bxb>Op4GPK0zZqE)AjSGI(d#5nZE4hZzT(ROUwis9p2{9tXob;m(P1 zU5izWi0-rT=mvgQe=@cPL(yA%{15gILQ7;6deBN<)+0F84xf5@W3E+62}z*^UVL6o z&n|n9g@mw-ZlRSPN;Wxs?ZORs15cFB(|nSs&5mEy#2F{_HIm79>&aLz!^_(wi7Hs> z3ucA}i%@G{`V983C%E8fohrvB(CsXXmCx}v-tbxMfX8Kg+6 z;GLhc%C;G&CwnLGKDza#+|v?b6e>2?>-9+~?=n!lF)biUmI>}1?1a`Y%e6%|p+yN8 z6P1ppY&<1*PQX6W-<)0esCzH>6FrJZDJLb6$+;H&<~ap7qO&dv43D~1C?L0CA9MQV z>a^8pT#V+PypLcos>gpXbfSoGc!R>*7?nEQbD z%$bI8JP#kEy)DNZBl$2KC2%~RMJNIB=!Qor0#ra^+!WUJj9Lj(20`KVm;VYL-(;gQ9`qe$CgDf?f?xy$+iOy`C_&4MP}m`pDK6+Us$WY{fE##2W% z_9q+D5!%oQBhX-8kJADo#^`}jJ?tq#C4P9}r{TG`*-8F(y$pWJ%CU1!JVA^0c8?Fj zQ_ds$hPmNFt3h?TbUpz}**r?rG#lL==RR_xXV$umJ}V=0zGNUs z6+B0e1NtqsrcM2ulQ*A>rt0*IybIdF6ak_>RQP0ScuPijH^+CAmpR?YDJ&dkpVe#|q{Cv4MzwGugPRiNs=;fehk>tqCqQCvagO<2u?$+8g$j+Fo zt8vq`KfwrITa*lYeU3lp=NHk-;Lle|>HI?MKL0GB<2=vuJ-oEmz)mu<>DEU29lfJx z8X|8fNd79&Do+kL+CAJ~PTsz&o#0!imD4X7nugmyJ}f`%g@+lom3kaGoNdJ}cJn&4 zsb;`j)r$xa4$Q{qwub_Z$-QC%5a6^{IUmq_U3GUg3fSUX|JxE{D*BXEh zS2_^Jm(Q`RK#Q3$t6&{wQs!9*@hXB4q2>D+xpSOb7ETsyo=_Q{liT079V5#06u=E} zRpdK!@{{p!SXQ~3H^hwaIn0XzAul?rd zEwVJLHupun?#tB-0noH$nctg>(uRNwDA8v+V@rzDp|s^C zdj;1179cPlSy9ZS*V9iw5zwtIh{=ntHd_oo>)0|5@&>SMgFLKL!3*=y-1B6;Mk?E* z&jb9N7P)a^y+7@@ge}|6wRDn#(HA=y=wlZPMrHtCn`ku6APf2M{yf22bkzCd0eI_I zraV9GwFDm8bmWvXXELy3)i+1c$|^GO@*Y3{DbsGIMw+RF*9|_>PP4JJrJwcnIOW5` z!Hm$vSpX>jQ-_(qJGk%!FR;?HEBax0Sli4`L7TDHsR2s;M91I)TB|3y7M7@p*yYiA z<20!GmcR3hOc*XC1_*6di?h#M9~&Zm0ki$+=HSujX+;U0!6yyksSJ~d@w8JUScBoca(vASzx}8?}B0qY-`I0AmaGb3v zOMTH7{NnUDuHpln?bMYZP(l#?hF40~9&g?0)bxcHWZCDM$M#W9cVTU_@ceCu1Rmqb z=-dPpu$2ys&RWd;SX^@Mg#^j8t7pASP@d!XfAx#MT@DV9mXmky+Md2UZKjj_PeRb@ zQa*R|xW$8}bR5)_=3jV~L?q+U-{r-1^<6bX#TJGL6g*X~ynG|W{}P9JUYgmQ1=*!5 z_n^T6N)oFNAD!IE&SbrDz8Q<4hy2j5A|iNcNk0zL;>;#ev28prA2V*j1D$L&GxU-h zy;bE?A$t4G*9j7LYm@poJWBR%|M}@-Cg@E}D~P7xch=+Lxr*c_rCt=|0!8Ka0@20A8{pIsuw}ph$%qAsZyc z)nhujPa`)ifm{{H0i`IE@I}^`b$xjjA$U^RUK!;97#kayXP$Tz|Kh?*{0Oy1{kn|0 zxSmz0kI(n%JB~Pt%_wE;XyAYuhV0!yF&q;@U6?c3u;W3i)QRGZL0^=NA*%{_dfMA} zwjj*oQ8B^^Px4;017Ib$Zb=D4KAhbwA49A1b<5as@-^U$qfb@~{TK~29o6}FrL?WI zf8BkhX*z#9<}<*n6qonTwuNdjU;_-@qRfkn^C~*psz9L%00LM*CR4YUekU{lLP~R$ zK9opT<+0pQFtk|f5(wt={m>%n#7sPz%z3IclPSdUB(wB8qZZJi4S>t&x)>*8rZDJ1 z3qu4#7IhfU*+(mFLtki_Y;j7tE1+&^D}OU)hA{x9Bp*jna96ITf!LvJZoyF;GnyC{ zLyIor%U$GTUR!`7&{CST&_x9Rz4+!{Zy8T+!_v~NZ9rSz=WpZEx3;xOt^_8aLu+mu zI|v-%5Y+a>~$?4RH{wQ^srw0%kIB zI&QJiOvB`!+=~YC7Oc`$&wlsa-}@$+ROkj$vx0mw^kz2cSBB=%oUMSRJwdgV#q1Kz z?z_IU2i!{jj0rde%q{#8_~2jHhvawt(-muLfR@0j&rt|{Ui7u=FhYZs%A)ph{(v?a zUV>VT;mGjVpj0np+V(UvK=`8VF2K|`a7@1dO`yjqquKO>Bt#I(+0t2uzL<8wFZv^6 zYpFhcu^+w9ajmLlQ}%{6UUMK%95|Up?$%2PUL*M*LxD>}tKjNKqqA7r2Gz!*lh(QEdTfpmU1ZJ~4Y zEB0MoAA3bF+1chLkcYRe%BGE0;=&ZROKurJ`NAXwR=!hxk=6Gxr?EBq%AWu7XFsb` zJvu%qjrfl?vKR-OKt1@k7l%VX{3C#7!m&G313mC7wvCVOvAckdI26aDu<=#WaM-iw z-YdAeER8ZdJmE6a~#5Jx~8Pxie_-sA~|Ok zFcCp4Oql5>dQU%8?k1t=lNHmK?ZX`V$KQNhZm%9XZT9v@Ck^t*?>V;1K2<3))+oZ7 zIoE)2vT_Os?&eH-*^Yx#92==bzL%dfchOSwt2m@El;h`EclVDv2;Ym?fhABS)sKlt z?r!EG4oY7HaTVvTYM>tq;<2Z)y#ZNs?8^P?T|r4Vo)c*)ckl_GN$mLAT->>t*%I>< zAz##Z8g<1e%?a%vIs2V3&4Jz3cpT85OX}n?unXpz!F&#ICVP&+x87tNj6UO`xb{m{ z`3W>*w-2@qO}~_ea-N=@*H8|U$pck}cVU%nxZ15}lq0t)6!^cDj(*_s+{3R zK8oU#$8)O!z(zUp>LL;0tuq2bVEDRnPL%gDN>AAwVS9AA(-JhrlmTduGf+O{96kS@ z@>h{^4}-;*uFlOpj%@7pQ6`xXFqWm759_H<>5JR}IR__5(VpoDvRpYBU>Sy0-umb< zjOlYKI2<#=X17F7vr79YjUUZx<^o02i+$<&+zNY!){G-^>uTQ6=G&HNIR+CHYZfEE zI1q*&`M3u_)^Ud}Wpxh89NuH-o9Bq<7WC!12_9e82V_hRIw~-PGCC@Q6iL^~r=J2q zezpRV_VGF0=H96!6-e@Zt3=T~tGfjQ#=5Z{hoHn*AfdY!%*}QwI%W~4k}_us^nlBC zvOYoNUV?(FOI45rPoceXdI5b7Tt>e4QaQpy;M2-Mt}m}c)2h_^Cg6gvBd>7vi}Lnl zKLE#A@P&W1qlE&pI5KaMli-4uX`f?+DG*}y5)rjk9RZYIuhjsw! zx-O#4_Z3R`zN*F;INRlf?4h>>a!TU@z?sXYuM!$^*xPZwJDx>PG6sPg*=n~zX_5nD zp=U5t(5nI|A>hPZOD1em@?|?>R4=5bjnL8)57U;XQL5%B?Fx3-fq#ObRzF_!0ME&= zH1zeUHfntI)i&EN;R)KEJm(C0Su49IZ*M%+KR$GtAq@M?FMqXsZ9!M;9zW`kVy|e> zx9XlY1zoajJ735_g+iq^8ZuQwc9{Xj3-#}V@#nJ8X*54}9?4l1e`|?!WtPf59Q%M9F zX*>P1`L59jy?l)4Xb27j-fY&{393P!0BIp00SIpR5$&q_^{8o6kg+ZLk_J(-E@CqS3aD8*{thO?%O2m6Vt5@iDy4(}=v7X~tXj-q>pdN}un`Y55$E3_C05DeLLnD25 zLK;xR&$fXV8QYr#A+p#B>I7mI%?yuyFDuhIzQ~5IoXwD+$GP0H5c8ItI~nN5&pM~r z0>?dv);RIzqfEf=tCjp4;4hG=&Y%h}alhu?u`%tEgxpTv07(Jy&Q-z7G zu!Xe6X2@9f)Sx>wz4{`sqjR2hup9iGGvLWYiCUoL`3#&I9ViaH1nhJe9+e*%ILAB= zJUWyNoV|J$JpCuyfoF53-F=15ohi(%j9Ggzp=ns_HLZ5$kLVQg)Lod zrjE6Aq~GY+n>VTmvs=g^PaocYA6`vcbOhd|`TTS!6P&P9ATkU;1z~jEv(m?=Wx?s6 zE=GqfwB|c}*W_iLc1j-arVQxuglT8I_M#gNIKwyVz?_F+`b_B@UJGF8lD^=I z!_>ADlnbT==j_RLb9i7*UNWVE1OxF`;K(01KuTYdF`U-t_Kt(&65L0YcwlVwYcB!C zyKnwplR8-RB|iD)>b%tlNz~fwD54TG1qCDw4}m&B03q1divpQEBVzzvZ5f3^>}G_u zvwKt^z;y5XL>n9!B5xCks1V3dWFn#xtJ4{reU8c+F%Q!-z|)=;pqFXW z^)hGK`xs`T=={w{l;1ftZE*06`tbOm%H_clB2q^fqTgOgLd6v2jECE&` z+t|A3GT;k@wIQQWm2jUZwUv?Zhf|m`rNugdDEblpD0vCuRE?awA5kyQf|2~vK;fm$ z+>mz1nV=uA3M~BI05UY&PBYHJ<+inj#!yYB8(&Rj#_3w?;U~Ucg})Dvf?tkr7TpMn zjJ4pJwrWuMl4B6q>>uv;^sdqKIK%X%KUQm@qhO2u02^C(Tp+mMKt$@sLyX^V}>nqAB3auDRF19bE$W|2E*NS5a3iD zK~yJ{w%M&GGRkZlw5A_+^_($sdb5ZL1JV8BX!FlDtjWIkRt_scXDbd99KGHUuvl1U z9R=M~Y8PC&|5Npl?OERH#E}`k7 zs*Fd_vbkeH8N6@}p5UYPG<1z!JhyC4cB+Q#w};zU1CW3bcQcUDk9^k??TjP+addVN z(Ur>tlq#&8@z8ja_L5v2pHwGo|8xfXNs=kLLXP@mH}uR}3Q3ObV1DPgEV`_N3$6%* z9n&nxgAuZI3jyILtkR~^et8fc@;4kjIt9`8x=)a@(Nj$~lUULj0fB0VsvVWG=0q@W zM+Ez&cW{?ZxZn4tv*?7XjR(}6CS#V^w1O90A%k~cziS{Xsq)hi-SoFX)pHVqD3J=3 zAOjYtw&2$+J~nk7V#50-Hfw1!$&3UMcCCD)pxStz!!JPvIr69UTcYEBrUmwiu~XPR zdNK;BG#9Nez<7?g>;XW_o`Ef30R}y6BSecV2~31yu8BjIMJW-;)~Zlpq%CSzyn8ax zPNG);ba5qG6IF^>E&iCcF?Y7|DC;{;7Dh`0wRDq}SQD}3Wz87HwY5a`MR0+%>F_;XqV+R&84hr#i<=n|QW`(hG z=j>1I91#Ns9=YZ!yM?IjsO$_660FwhPO8IaSeiVHVzKR{Gb=-Z!Ey28EyDY?yHQ!Dq%p($sK6$GQD{~tAcSBZv~K{zo4h;5LM5+>qA19 z1wgkg<;wmdrvRjkw+AKxIfe*aGDbR&5Be-`@?A5JSd?rJf7qJonPw;95%1$324}4F zuZODSems_8b&4$CsTkoUzNro&nT~qDjt-K~*~R_x-Q}!Vao~zFzVfIs>R%>+|E8Ys zyFm*Z&h-GMSu&xeZNZEqQyLK>q zD|%)+l-Cw5Y9vcuz6grSd4|->6B_l*H+|r)3(MgIiO!;OEYp)_P+oA<8pfHnf8*tshYXoE�Y_cuTKCU!cN zC>1s9(~SAMU;nCq^Lre)61#2NAte8?cU5f(73`Zc_F;3-q<;2wla%0Kbk|}*yj6Mn z6g{lZ$e7n0d#YSO$uk0D?C-V?c)bd6Dlwb)m{s-Z#Pf5|;0&KQ#2?WMTY;Mge#`BM7>BIB1J+}6K*$l0_n_n$Q4)TaW z0tJy4gHsA*e4CEz4hG2iPyqr1{ru|V@OXb`N`(Vn9P(tqN>35j^369M zsN->zp>SpcqADe=T;wjN8FCI}3na{s z3%Zn(=#`B2O%z@3q-sG&K1a4PKtR^{uCM4_+K-OmHE^Xr_G$x9`~vdI|5UV@U5F82 zacY1ZU+7w$aR$?^g?Xpbw8?pIJH)D>2B06Gj8H{J`Ox7tZ<_Vup{j?YP@foL3|=J# zNRUCeUs?;+(N8i0>H-}gkEXl(o6@x%O(9D1CDn^wYW5?8er{ z9*3H*RdC1z__9ShWY@>h$wB)kw~mPu%ee7erEebFsj3s+<{I0A1de2MC7_LcnA7)` z%~+4YuEUL%dg)y&4+)g~E~D2re4$hH5lAA^JTcv{D2apeA->DB+$#*uO&oH*XFXIf6byQYNG4?j%NSufSI zN766(NYSj@Q7L50GFB&lvV&PSBH^1g0-U3)7!7aooh@Z-E+Q@l7`JgqVj8HYKh;>W zA6}vxMm%@18B7<9&uV)%H372m+x@chQr_lxVJm+jiDlz^Np@5Y1Ux(OW%NmZWWk@Q zb~O->EL&-QZMw-~?ep*|d<-^h2H*Y8zi4NkOc(x!KZo&GZ^9q`gKvW|k9~ThYB>C! z#7z~Z$#llCv!5h(=kL~asH~bM..inference.ai.azure.com + where `your-deployment-name` is your unique AI Model deployment name, and + `your-azure-region` is the Azure region where your model is deployed. + 2) IMAGE_EMBEDDINGS_KEY - Your model key (a 32-character string). Keep it secret. +""" +import asyncio + +async def sample_image_embeddings_async(): + import os + import base64 + + try: + endpoint = os.environ["IMAGE_EMBEDDINGS_ENDPOINT"] + key = os.environ["IMAGE_EMBEDDINGS_KEY"] + except KeyError: + print("Missing environment variable 'IMAGE_EMBEDDINGS_ENDPOINT' or 'IMAGE_EMBEDDINGS_KEY'") + print("Set them before running this sample.") + exit() + + from azure.ai.inference import ImageEmbeddingsClient + from azure.ai.inference.models import EmbeddingInput + from azure.core.credentials import AzureKeyCredential + + with open("sample1.png", "rb") as f: + image1:str = base64.b64encode(f.read()).decode('utf-8') + with open("sample2.png", "rb") as f: + image2:str = base64.b64encode(f.read()).decode('utf-8') + + client = ImageEmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + + # Do a single image embeddings operation. Start the operation and get a Future object. + future = asyncio.ensure_future( + client.create( + input=[ + EmbeddingInput(image=image1), + EmbeddingInput(image=image2) + ] + ) + ) + + # Loop until the operation is done + while not future.done(): + await asyncio.sleep(0.1) + print("Waiting...") + + # Get the result + result = future.result() + await client.close() + + for item in result.data: + length = len(item.embedding) + print( + f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " + f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" + ) + + +async def main(): + await sample_image_embeddings_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-inference/samples/sample1.png b/sdk/ai/azure-ai-inference/samples/sample1.png new file mode 100644 index 0000000000000000000000000000000000000000..ba18b500872f53981c93aede494a07b59239d9fc GIT binary patch literal 307178 zcmV)1K+V62P)Px#1ZP1_K>z@;j|==^1poj532;bRa{vGi!2kdb!2!6DYwZ94|D{PpK~#8NZ2eh} ztXX!ZiEX!e_UW7xL(a@3b7W3rW|74#vWi_KlU+p?*-eV*Y6((9EkJ-EK!6}!*ALLo z&{f|E5YRwL&`kpZs*5bn$xP-Pkr5~2jP7pp;AZA}o_D*Gg4A}x!rjfjZx3sD*Ra># z-^u^w|NKXHcgJ=)?2l!4ye)_0zT6$|c;A-W{jM~dtU-UDqk{g8I)m$9F4U8_yo zjK9!VIoXs>w?&^FuDvatUZ?bXlXAP;mfP*R>>1PD9dqL63D@uUw8ebrupDoX{4NbT zWw$$)?fwqhK{v**+n<#EEq@;mWlKBTJv49vyiQI|O0&_20YWsHLzlmM!%pZVjrVuQve~T5ZoST&8q(zse$ii}Nt?|1 zcE4pVG{g0_brwb` zNtw^LWj)Qv>6h{M2K_SW4;cH9Ikh4`%f-6f+$_s=z-L;q&H{CH7XtCVfmNsK+ zF`sU?&ou=;F>mJFQhby_{`Q^&zaQaKt92~>PN($RjH?5G_=x3qmyR;w+VQgvmg z>_F$+TV<}4UZ+=1$MT=?H{k;ejDGQ*7oe=+=N)qHh+2&87CJ7Vk@Iacu5QPB)gk5T zqzsU$E;4?Ee~wj|E8e$(zKcycZlUcRIt>q-(64dg_q6Y4%)iZ;Iz4F3XMDu`kLAV9 zrYyOyjZB|S;NclR&(F$W*hBTu8EAOB-IwKhUtV2J%Vx2SoOYn;aNL7-J!EMRIs^mY zOZ1xG8x6+ET%#*|cDsW{w}-NtZqkM-rCy;K^wogg>NWbX-9leJnPx}29n9wSnW~VgU@DkbR55Ooeh> z$o7%3`GV!;)ofFyH?y(_6B-Tf9W={$%ov7X8$(dPloc^RaFaI&_;!o_+$oE+gV;jj zX6f|?;m`DXR-V6{mE{aRa9>Saj^%}i`Z+Fb|-CiEQ-$BYxag3s!!^0QfkH}r?*6xaowk$vR<1fIsu&`P__x8P>4J7%oTWyTm< zWpLUp2lVBivc8os=;qx48>`wi zZux!`{n!3FmL_uRyq!y1TSi+)jj+VYT$b1ebm?$M=NOuu#NW~|0bkS2x-0Fu#_ zaD4!AOka>*P@&y00l+;n*jtq4fPp*VcX(9R3}UlyK|mZAog(P8LpZkr1B}^AM+9Wc zwMUea(FZV3=wE@o#R?bFAxOlPaITF=tndbulC0NfU!VYcuL=K{yzBx8i0C^4{&e67kHgSlQG zN{4Z4(7JI#8j(SzgO37tP2?^zs65lZBHV%&9l*QW)u_Q^Cfx$m2Y`MP`g?5!a^;~K z;}(W;JO*6)@CBo&6Sdg@E*eh&^+-RWejH=|?be&JMqzimJB&L9RXO9HTS1bhb*`0@ zHaw5x2`vTtJ7h2bF6V7hvT?oikeBMq9kkd$RO!>funR^l07w}hbWY%jAfqD-ucZNF zKf%LRS9Q$x7``LA5_LZ6JPQ;UFXZ{IF@&Bv8-Sf?2Z%X`4_~j#)eIb9Y^N8!a`~iF zE+3zk(P)Sx)#84D2OjHiUA?|8Yxug~p&RIJyW7C{b>O)?0$Y$R+mRm{zBu9hDjw)~ z=5q#hV6053($4RB%O9_iB7UwjWB|~`ICtnrhd=YBtvz#G!iOv5N*=1?eXP(qM&fit zD1pN(fy6rRvF$cu*aO`fyBMo>>k#Mf4nE2w>8?YcK!}#A?kXoz2Z>i%kvCJ)F3EAw#7-Ci#`+`@@aP8vAiwx^-ntdKJtg*|w+$6=3g$Hr_nI5uJxdSWmsZM-** zznH`Iin&3F*P;rD#{mbnl+jS5sm$;p{coY+9)}BMjLsQB-okHfn2y{A+N=TZ6O>MI z(k^IM#F>T;M#xnI4)2jBJsh-7&tO3er_F{Ks3U_Mh{wp5bCh!3q@B7hA$4AYv7G^doH|8-)d4fB~w%Fa&-;M5(k6IYXD#1_EbW1Q52W+EsifzcAH zZ<&Ea@pUH=`B1NX&*W9Ph#|tJFc{_go!8j5-bWiHHG+!b@UiIt$=Zft8m%LOa$w?u zfee$dEhlF9jzHMABXjIJEW)`!(=`G^A{=i4v_LJ|2T-`U#l1ZQvWo}TLXoAx;daH4 z6iyglxgJBQ2kUsDUp#b;yI$)Kq1o}>0b(AY%|UOI+qWu@*@ukEDL7}1ihm0@)sY$W znq`Fdh@D`ZcV&Xn7!eglL77V%W$WmGK%;<(9w2S#z-$03J$D62CMdibJ_sG9@GXxt zYAxp9!P9AJ`~V5PPopda+<&%m!hOC4S>Nl8-ZlaQ(6=#olhZQ=gGhXV!U;+Xu9;_U zJ%n#S)ZQj?x})9AW>;>e%W`!y#}mCpfCViTi8u4Q-LE68XBdR>pc_Lx2b`7|KS5!& z+LsqsSN9-bwxJ8Hh@@oL?i;8as&q*e&lbjPH~-cQbg4$ zI>(73df>Z0hFs@Hkah5hadUgM_GX3ofi-`zs5BSF9&GKlCicC!-DiZSZM==>(z$3GoW={OwXoSh9z z7bDQ{dw5}VyIvj2i)p1}!?A;J@@RnanaOJLyGEu(AJPuohbN9hOhC6H57k+wyU6{P zXm3G;YxJmND8_XH+UQz)&+j;~oeKIbbh8CsiZ90ljybY@AfVDgP_}}7^dVm9C~S!k zZu!}QK3()mhtueAGMS*q@=#|JL4X)$7tK7}A&YGczfMv^=ZCqsp_lTcBib^J6$h*a zSh(kzhccy8tCP`CHW}AqseXfrI)f^ZdZb{{AKlT0c#eGaq2UMv1g^9&TxLaMjP80k z>Ea{pD{}|%3SHXZ)U4JzSrSpE(k8zu4{;1*tTD{|uHx#v#BrlPs2d)&nVUAGjeeP} zy;WX93MIkz>h;n?dvVlgtG~#HG=tQ+Pg&3y135A5G49N_KY-5Y_!v3WC>v~uI7m8dTd+neB?}yjYJ;pwp9Zpx)n5s0H?u>H(P*P{0IgBRIX7E`rx#V190j z4orby5K`e~^hVP<8Ok$iMt>S$K~X~JRnrgcN(56crU8?-(4(XMFjT5KFBCvv?xQez zth5B-G%PBb?b1$lF91O12dF6kn_G;@2ID1=Ko8$Z)iK9Cim#KiLtvy;Jmd7Kkf9*7 zm~+m!a@*;yF@fE7%jle}o)*=n;90&XrR2+IC;S(ft!V?#;& zJIJS}DcDusA!U4~(i-6?ltgwKj;?gCylW_+_tyz3Du)bXwiqOYlBjsAbHfBpzscvT z*K@vGLyH{|^M zcRludP{-h1&#qz^86|VT2%}g!lcwZPbS|N&0&N70)5~8dJG|DwDi4uCjlKe{oboRY zn@$0IGD>q?L$2>a1C?QE+8kQ;08^nPDPyAo0V2OEzZiF%lHs@;89E>nvl)>)*Y$D4 z+8Ah^CJje_064^{F;YK3FCA!=yc%>a4A=LmA?7TE1Vt z;5X;7LT_!O0|@9e3VcQ;EoIEqx@kXHto#c)w%Z021Wi-9F1_fGa|fN8mE$_IIXJwg zuDg8H)d8cA*YN(uwZ^=|2S#3ku+FGXue=d2_KfX-15%d-#Q|tSA8o#CL1*8IP|-1+LLCa}I8v@~ zHq>opYlj@H=G*e}+H^BxM{aaDHJUDq_0fso2cLsJ5I~R=Jk?oculX?`Ubn?h1oEdoX~${%_Y_ojiuN7f9{EsBbM7fZ5m~E#BvP0kkVkDE?tU; z4dx$^GQAs8bTzp^v?~oPLy5uBNFbj&HCTEYWFb=}cjO+R!rdi6e<~8V75_^`kgX z8873jya1=cUo&r!4KumOsxrA=nWcu7rp$2&#mz0jlUXtY0cIH6qq8CJ%u@CN0hiQV zW^|6eH33=nmIm-roi($%GgD!I{O+sLTe}VY7)!%-holqWObi4>k&+U3yK@~x((nOZ zOPenR4{$BU&2)|5ss;)&EkY#!u8P;;osVH z3{;4Wkd4@7zeZR_%3u@`p|wq9I07Go#XSIxsegyA>*!ZpJ4cz zb`)caH+>9nYD7?0g^_XIYC!Mluw0^WrWqlOGN)&{)hPf#Tu-^(`sRCS<$R%x0ws|J z3dD{6^~6sw?#D6)Sb6|jpJ>|kOM(cgc)wf@#TzI9Mp&@X>(4qJZLJx{mPmKEF>26Y zIe#dw!M=~)1jPrTXchz+iB?fBV; zFUvKCd%4=-feSp1EGL|wpG6pTten5=hF7nzh{C5Zg?R)WnG*j~(KN*8mzQNUIgO0X z6?pE`yA)4NVOQDG8PvGLD;O;IRSE=%`{)bVNQxWpxq~q?$~SAH)8QH(oK*@VOU^AG zE^Vre9h7#A>?t_g-5Po>bD!-r?Th z3k_q|)1b2stHR=%o=Xs0jSb@#IQ>jn(^FS2%&d&F=G!&VloWULl16-DV6r`Iu3wr{)Aq)%i$#xWY9_eNAY zOwz-s$)Uq0Bm~;i))BeR;$k8oi~!&1TuD#=DSLt(!woJ-2iHP1X395cQI6>9mMC;~ zH7&2Nw&m4SdRsF3wDuMv;TEUn?ia?=Dm zs>7s=>NJeT7y{-w#2`!8z!p3RP+4<ZD7*cK4o!;$@3=9Vg?r2E!!+Upl~Y!HkZW8r#?s zZ~O2Ud$L~O%q$t(lK$lt!I+r|4gGo!#<@Q3(sjjeWy(M zWH7=P0WeC2aJ*@O{#{>}fO?=R1K%_MwGq8-;Iyo{cIUb!bJs~SWmeaP^tPoGqeS@o zc=5gVu?$c+GZYqSja6P1mq8-Vp@-n5l-@C>z0SqTblEx)+lrqWHW_)?uL2AfsZ>Ek zsS|y{+CEpoah(sOfkjVY?>^$i#3snUEox%;^$-|5*fXq)Rz}N=#`{?YnOfV38=(SuT=o)kl zDv`rdphiercQpmpgR&r%Tb!*ZX*`~mt0$cUl-OkdI04BBPDM($ji<364_ZZPvjOS$(MlB z?+v(vh@J6mH@h<5?8{=KGsw7&+Tf>S+#*ZLTBEtg;55sS{&jSL6r9Iuk?TxmfVK&KX99YpreQxz0W5 zm&s%h4cCD>ZcR~FC(0!VBQOnR%}Qak;Fa01)B-SPzmq4{3mtBkOo(xdQvjQei_2KU z5%NytEZ>(z$)=d6i(`2;+a(%q0@#C&YwrTGnJbzByMkY|>nn20+rY#WaIFcd*k)ac z(YJ+((>XkZw;(`3*G$}aM9^~D1E~9jgqq9k|C4ehWq6hWG3t27%u}Mjc(WV6h4>Z&c)1$ z23MIB;|#Di7^d|cBZM<&3f^(i0Mf@l4J2>Z@6`zF3{vZWVbHmDJ-8ZQ3&oHJ<(oEB zXzl#?=bX&+cyH*#*s?6hJ<{Lwd@{w#2-uO}F1AMc@#gvz&@vx??%=NuwzVVL0l!y{ z_ytU2?9jwMlMRzT>5pif`v+i9A3Rcjo!dUPLe7{Bq zY{T(G&SMj#hm&AF$zU;3^*}o#uB^-w+!t;t$8s^lY*PyL6vN^}l-TWx)AdK(Z_7&l9m0b>JQ zjX;tTgm#j#)pqPXXk`kng2oO{RfRa<Oc<&!M)Y!!AyRae>h7n^B_waL7k1+^agOZSktvO?m`ZfA^Dnm z6`UZeVCMk!4fEU}AE}my&Kg$7?yH3nwy16hpUhM^_N|5#pa>!SzgZ~tz(1g?G!_{9 zeVj$WAW<9tBJWJ0*FzSND^sWjP>C3kYty5q4v#v{kjVAG?3PhaP0!PZsa*>PI|Cd* zbXb1RaqOlG3!QLAAmqhqyF9v_l)>bTd-l*(oivi&;0zj}Fy_!!{IF7+JtuImkM5XT zO@-r>?3(4plsT+7(8db%ahdc0Qg|7;q#G%_zoUQo5>Pb7ZMF~jK~|)-m?U6pfGiMO z;P6;iu@ju3fJ-x{r=v1C8xR2^E67W=v}Tt2$eZ&9%mII+Wu?nC;aW!m&jDriYc(fw zpKfr%bO3jeRhKiAzlUWs|EXqs{%6(r25fL=kHBw+OF8s{$OW>4kit)OU#!DH&X}e_^E+J0ymsE4R)6RXn-SV-9;Bn%(}d~U_?w`%3q8=pWDCo!+GqH)t#wzot7g3 zDfH4Y#OTp(Gq|sA3xM)lUDClzL7X%uXwd;v4#kZGf{vTd%(S>;XGlQfG!Fo_;jS4H z@jCS-X@b->_j(}qT>{o!MUT~sxXcI3OrBCFcKmitwmQiL&ga!`9 z*%$)CdenOH(p)fTW}V!$6CjSFS@4xL93mpCthp+RAeeLjO#$BdNQ2ZGKqrNRq0~$P z@1(I2A`Q9Ui}fuY0(k5ZIsyx8A7HIrqB=f z47rh)m>@uEh(1i9Si5j>ahBpoojrMXyKj^$oYrf%u5rC}K4;9!VomFE z0>aEW&ItWT7ez|88rjPai%X?WQlA(TqwD4JNR)J_k&^Nb|Ia4fa&d`n;Jg~~*ZLfs zH{`#AT-@Pctygo#yrzBl8D*8H@K7U#>@U}MWj@C#oGv9`b#QT@?@rJ^E>8+x&=zQ* zt;X23^8FU92quSEd|nM2pE7`pveLpz<26rq8OC|Sl~A(?GU{JNgb zEe_ThsME-o1#^CvqBCU5@tIBB5*WBlsUdTRl))dPuDAwfrYfCD+rLe!)miF5?vlmj zLjzj7Rg)c`#!}g5)HsVZ{#9>eLINblc|f)fcUy+c7|keRk8W`QT*}pIS`?Q8x(>93 zC*G~YB%o8DU`n1F*gzwtpLSSs19I z?Gbsm*(y}bT<*?3* zS!DqZ(uI+@f~_NFA)5w3nf0Gs_U+oD%O!fwNy#y93nGhRAJL1E6q^ zDdskS=I@G~`*+ha1BCnht62a+!j!z}Wr6ZqN5G5%G8depeKSBBEQTGUKC(bj#pHwK zYMD$)QfplA*d>@UuVd%7AER6gw%j{xe*sL|^6Uh`BwM=pu8lsZFB zv!1w&7Tum@2F1fUbI^Ehgn{LbX0}H)8!V|w}%+seotNzDMgm;A9`@TJTg5lAUh5Vx%L=NQ@hsvh&OSf@|!v>Z|#%z z)iuLNx^%y~;CP~*Bfs)SNAP;KE3X-UKpVLj!$-3;>SPanbUr$K=?QsqEG|=;>hHOv zMZlmy$Pa^*;FEHu(LkQ|IBJG;2i15=^)##V~G(HA2OCI%SP%(oAOZ>Sj4Ig&8`vw zNS;V{=&wQr;iUaai=iFUJ z>VUT!{d-sbJ8%y)F)($CMtX|2NXjvxppO&u`9Asq}*EYLHEGK=K^{X7}d)JCbNIw}>z{ad;4 zycjbG2D!1Xw5x5}?acgW|Ck?RmmjgqOfU9~U%NOAI%M)Tb`n`hz~y4R5l5(^1WIWK zBw_hXLQ0IPT@s%LVFKng;80Tz5X0%xXkAa0;(9UwC_*|gB4A*qVvSPtFd!xEIA|{A zTA8b1${HvJ(jH`!3TT0clSzsQgJqRw4~Dm!5LCmc!XP+$G8NWQb`{C70Y*tTIc}vM z0oKdCGg`+e>22Q~G*%E8;<~0MK$xC3Q;~uzlu$fDpUMhv+;o$UOp2HIOwdGDG8aaZ zEQZyXZk_0KG*asn^$6$<#=Lipm$`(j%;PS5RRME^QQGO0xGrzeTnYqv>q&axO;TLW zuxTZCjf`Zlv~eAaQ8S0z6awkh8o?@rb|!FWeD#<;yk{{tH9td;RtOm!UqMDWanUHWQt9?(6#*6 z7<;eIn3;w?ZCa=%P`kt`lhRf#>d%^K`HC^4sK)I!bW&thc35_+Xf|atV z1FnoUn+^2up`O0U+gPJW> zkqhXL6C}oHQ$gU2)2P&a+=A@^Fgww~7GMEX2_P0YUgaA0#qm{F+`>0GWBvg59vpL< z<;~5aJo{i>KKM|_(`A%K8342c=%%q#{yT1I()darbaBsh>TtG^f+ylgEpC<6Vw1z9 ze`sgjO%Ai-bL;3Hot>1&k0u!8F$6c|%g9|KGpK+baWL1_u*$#Snf&49!7oSV=s5-& zM-L_uI=z29);T~P=qG54%o%KW@RaNCI^-IAsUx4WZmW^vXDa8x-fXWz2M;(ewj0^z z`#4TauGz6xz+l7qGalL&TdcN+Lf|ev2a~GI81-6AXD08&gRy8{n=jU#AlGIZWvmXI z7;hHSaYn~!HwC|0_RyK8hd4nR>ne}VMrR{I6LMsqW_!&LxwASRnw&6b46A{n+e8yog7eCx4x0L?X6sPeXmui_mvpNE zZOBJoDW2Vlua1EJqf^zGhY#@Ej6lvYaL&wEhsHozy|fK^0!QKfZCNZWW?j^bD0rg0 zuU4~QvCgDoqkcP9Wyma3UgE8!_aDxla;JW%Pekw%#SzN_CdL0Hlo5l5RUq>PYoaXv zmh=z5x^LIdBx?4*#+zia5GPwh_}sJsY+}%+dyyiDU@LpqBO^F4QYQmKD;zPz7)cdM zLi43V3b1&i#yW~hgy7!GY^OsJ6axQwkHBa2Zh=dx_+hX_k(jRgdB{XkJM=9hjVwH{ zC53cRVAIDK_md7f)5inU<7{S&oLj`qC`76B5{>YsKkX~Dt_Ny#72Ga>(T7kIyxemD z0CA6DulrRM35*4_wlFS-i$HTvCTqDffVL8i(N@;Ud559W0J=6CrcDKTK8SMom1$x= zgWh#r9!49@Ay}r22IEtdgXj!Ev%qaRTSQUZh9=#YsmwN7rUjiK=#_1J_gjA~T+s-n zn1iTeF`boL3{L|u*YzLiUQmK~%Yy1qrF3f!bT_@hOGjXh;ht@v9^zms&%;zsho>lU zRYV;VjjV-f9uSi{39i$Fb}Y40-P5>EvMvv$Xc9 z$ooF^0hg!ka`B{JI)edipk=&OS8EO2&bD$X2br=~#lp=~jJu$wk;H69E2XgxHH2Z# z&Jo=+ae7$3AaV!wRx>HC)0+m}X*DxmaDm^m>uG<C*U?%ByYQYFb#Y+8qKaF4XSfd*lEjDy?o6Q zBOkep>J~F~((kRglEU&%1&QOKPVr%a#PC3;)~L5S7s|49P?m3Ry;fQ9BWSE4XFPWV zFOu>AmdX+NVf|Ht6O6Ai zZc?)$&Su`|ymRCF7P2Isq!5pJIqx`F&`bKel&cZm(}#68V(NS`OR!+uwW!Z~5>bG^ zgE($FS2^>9mvgbECxv@pYCWGoxwd#&EVBVR58)&*gVgilzId>9;1mQoJuSgcrkYN6 z|Equ__?ni}A@Gm|BPI(&1RIrhV*wRWO`SMx@%uLDnd%|-17DzFm8g~RjBVRq5YP3?&b3{=ggxLn?xijJdpiPCi zJxnj%RImjLD~PrvLY;4{%7xj0c8og)U*gu6UYp9H5Z*FIgarfQDPj{8-=kQY7KU&i zU7}bRdT8SspZk?HT&6qq$Wdo7Kmh^{A{0;pAT%N{VSxTICz+m0_^&!-dfLuY?@`$Kxpt590WICeIOyxB)v>$1!((dQ zBDQgRS&+jH;SuzZXYPB>x-v3gRJ&S0SHNFEcHM9~UBt-{V5QmVz-Ss{hv2&|tfA0I znp)p%b;i(EM#&d>oqdzg%loZt}^;WM2yC=ISygyE>O3{v=Mzee*! z*~snbXjmRkMlnuiI&M6zk9)EuhWnEeQEV_*^fn<%2u}K^Z^Y1ZC9%@yIGeR`no|zd&u17$YWnI z97Iscm1B_JM#nAzxXa>hj}a4K5ty?9eVhK4Rzzb)QPM0~gvce%2nMJv_&H`8g$0Za zIX0!r#HFpDYZ!#tL~=$`e5SF}!1ml{j$QW=LpluJUQg&-)Jp;vYcEDm<-A$zNac7Z6(1>qe+&h1H+AA05gEAZs()O)J z5czRiu9)SLodvQwCDK{rR+lK4EZt;T=-35!7z0Ms`Dzpp&tO911jiU%P@lwTBh|YD zhKu2;>ogI4&Vgh)lK#WUcyb-=&n_AxW(y!|hrDSE)I0>n!JMw|wJ z8G4OU8Jpx2o8+-PG-{7rA#WalwgVfp`v_-3r`Wb^V6|Ank5wEF+l<4eAT$hwptHdQ z{EeQ_j~Je!wm9_$bK<-d6$`;2Xc*apEi@Lq^HrB0l=Wz3K2u=b)|~^*Fo`g-F<`uX90|IqbcAeiGU2IxIcG+d@r(8KZA4xjv1U%Tlg4sJo-+mmFbJ3F za*d8@mcqS)zAf7Hpf0bm^|}||VSBFwfhOXs%dp63cAa4aEWWc*jk+9AR$U_4xjX{x z#GUF?#W7UbWvBzI>3YF4go612ZVfM;SsY>ElhL^oNx=?GP|>A~UXI{vq+*Hx&W>-n zh5;uvEEsW(1gH@ip00g4kwi-VE;AL^3XL*AuyCzMFRY94Omyy;)0F$y5eCNI;=3+U zhMrCeH+1c!ZWzUcQ4Wc0pCEb!-H5do#&L~v0bJbIZUK$a;~J24o72ieZ^3~si{48WVx&r&et!ahnho(wZS=Mx3u|L#*&P>me6$fH7TG_B)ggf9!G z^C{rGf%bGB89KxO8#Qkcd}XZLyHKj!=W6i$T@QXnpo(lBdJ2W z$8@eEV-^8|#Yw!6AWgLczi|#@1L#8sLFc400a;Q!6&m=_hnJ_!^I~GEdzk*_I6g*W zR{mP7SYO_Y!_1g0Mr3x#Gkk(qnFrq)rMcZR^(Fy86yH0YgY=SL&|c+t8BAr@VY1K2 zKdK%*RbIRDSKwJ^dFEzL6oIi6Om&3tMc^f%=vEBqti*e0hq~il`dcf*jZlbkJln!F zzq~T4G)rgQh=wwSb(x>8dv!t_mu+YqjyQs9z2g(`vcm>Bj3chz2K1E$YekM_ppynz z3z)aJEokWhNlTn@vjFngwa_keK)#r(lWwXL5=uJquL*4wpPWO0>?A!89vb;2OQZ3^ zKxV5T?XK3=4V{$pQ}}q^CgQ^|AWH{$=$5TSW9W}SeZc8B%0Jo@4zXGJ)u3RXkSKwOsL~o;Ip8Sx?QQ!N|V)7pIUpX>iRjwP`22XOunK zRbTEcbmW68xn5bR&oJ2Dg4-BBPZQN>G5gA2*X7|+iiNcY;C-5MLMj^|Z zMT@9&euDuGo#+G_8E2;o*YjU^>HM=)2d_Rt+WX}IIE0zcL1I2O>$9W^NyhelZr&;cO; zkgi!$mMBpe@ut!T1<&U$6?lJ~8pjn}Qf_sGYbHqNOd2*41sfXj^5Po$KNwd&XV7{? z5-Hn-7-$4aqQ=Xr-j~H&eI}x3mx^P15K#E%TD^d7#jPN!0t$b=Eo?|C6b2>zh#n3DoX`)*F*xiI^dnj;q3w zBRd1m8W#kX;ckg2Rzy}yl++4g$E+t6PgmLlmPeLu5mcVwhRVo&29rj4f{TSeqEjAL18>zizNKHY9!3p^ z=z^8%arn?BbxS?)FEcPMM|dDojy*-z1x_6Tmyk?rTLkA^>~Din(vkJNcAe`X#?B~p zIPOCeZ~#6_*A#XUDVVib+<0CB17~bzY&1xwdR@2FKg6hEo-$P081_%ly4K~_S$ z_HL0%J1%#1H}-#yUM-I|U$;IDy@7RpT*8g+5>IxT6X zY*uHcfgU+F`_cAidoANX0mIBJTJV?biy1%UX*OHsp-1xA49w-(B*k?egc3ld?;Npe z1ZGwIX3o48v}OA`;$~|Y)4c%|PdqGY;o(_efJLI}*xBj0OirOU4vb4w(RIg(K)Fnn zQ#C_x4T|3xIf8Q<7iMiXQ>R`T2^o=nmrhO7>u4Ipq>#>u@ShpU^$Hnf+%AjJRCsCM z8f)bSpB`*s4q}ig|I9~2OB3_Ak*W%#lxdfIrNbq z9`02mVbBAtR_{+w-MVc7?1P~;3P$eu{lv)GF(bRy&uBC2vC;%jp$W1;|6+oHgS#aZ z7VW7)N3SztF6yutSDo-IyD)xFDHM}jnq_3@I=qE0aT?5)Y2TQjf6Mn+H2AXzAL7U$ zqs~owxa8n>0!l16G-lia8XqQ|z;Ab^uw}CJfijYmAF3f} zX0pbn4I<769AU2gFs>5;4k9YhiPrGKos@=IL#|Rr7-G~FDl4?zQGl^R5V5;uf$~{2 zl?=m4jaW5kDc*1nh>?fKxGtQ76TrbfLUIal`lKR;&V0BJ&`wbp*O~Ix?i|q^pRAN3 z4{3=5kQAOE%#9vS6M&RijvG4VOlL-+Va-vz2xf0+F&JJobkP$%Y3faC$kWWBhegW>A5P5B$8GG_G4RC3O1 zz>+T)I7b>zohx|fAuo{Ib(^JR2d!)#__0z~G283SX3Mjj@@eEGLcWA2~s27&B2h0Bik@PlelizzFoO zC#xxM7Q&jkwb0Ydmd@G`;i%cP75bkoWniBtp}CY$gC}%e!w9`01oOv%LHDeAFtV0L z6}-Vtb&e@{fR#=#b`ZVwfilpZ@k3qasoYe&c3H^aLY`Y7CN_%wI=$kfj=QpM=GEm3 zk5C@;`xrTQE8vjRkHw+tl358%XvUOrp_9r^17{&MMetU>JKj6!tKQ@&ZuBY3T~Y>m z`8MKCVHmQY)4&~Ik429-$MmD2R!izKl=G)u`h%FzNlfGiUw9~4s_)T>#)*d6DBb|n zLTn!dMH^m7``+YP5|dGVCV>Ue+Cwu5Iv*hk*dX7;jaCcii!rqrQx_WebV1Kw(So`& zC1*RNo(KHc;OHS2VkD;Xo(>MO(!XMfZ-ZfM4194bXT^Sk<-WUFl=EsE4O2uscY7ot zWRB{0cnZIS5cSX{3xC&^+jm1_l_(qqlF&IPo6cpnhby=)k@Yk`V_+(Q5mrs#-=c6v z?hq1M2k;=U-kC~fv=b%dZ+gJcRVTvFqVPtCaWM4mO*be68dImkFd(kPw5>3S$=66v z26GK>BN7iRP+;QaaKCTcw9w!VrOS+s+WAT+V*~N?7(=Fzlo{_WO6PX1Ex_W|trgd} z6{mX0+zW9nd{R|RwV8I(aQI+}0S0rRk?>74?Yd^N7%7}X$hk|y?g2moS+)w&z6$Jo z1jUD?4(Y1_VyHMAa_t@%p;=uLQAv&D1YTP+<>u$K#dSj>f6x8)F|6`pyK;Z~ZJFK7 zGiCu&5L{w_>ruS&R2o|})6EHk9vYI=J}h@UQzCaO;N1@9;VecKjv;H;2v-XA=u>Z7 z&(xH*Pk~$S7UgEXAWAURu8h?qVu!A|)3}HHsBEr7d5WD8@&dm2Ru1ZZQloCh3_a${ znrTpoHJn+35PX=Ik&tZ-b+)oLjc`LpkC2^=^o;M|mojO@;jybk(4nvE+n!RVoY!o^ zKSUbx-oN2Fqm7|R76%#GVZ7!WB3X=_5yOM`Y{ZoHLjXqb_56eLA^pce!5Ls~I!qct z9}8IzNkT_-AQ(;s2_IZz)h|vQe1^@8L;gA@bbxs$f&y>^Mk4|Qj`opdokeBZ%#)E0 z&YO;|@)JV>-2g*CyuU{y_omm#NT+SO+?LmKmroY7aVXeL#pnojI&QAnZ{X*4 zMRdwN)x$Mw&%NTv==xC$pj5lLqUdp=qqYUJfLdGZ$wGCzh zonwQ!Ns&Bd4O&Swe&aigI}O&XYIPhvE?WaHE-^gDsLinMMk6f^Doq`OMTnsnx>T2r z>4&cx*rK{s8~tM(ew8#jFC>zCYcy}R+XJw|PxTggkCR9vI_~nxnuMg~k>~yGiuw6{ z#UY(QWn5Z&7?uacnYF2p!L7AM)&ppp5svUlUC;AS)OqF%g`u5#Rk5LFfc+_^K+7nJ zPdAdtbY9bF6+nD8u&EgVNCMW#RZte_0Z+Y74KxGL^YUD8y|)ZZWkyjXn2N7)m*`0k z@nNC~8SL+IC=^1ei?HETc+y)xYtOWWpt)Y`Si)Q`V?gn|dw7*@oiMegUZf&gU=Va+ zc%md67Yi$VX0KaOHY<#fOLY)N0K7%9P0M>8fJbur)GUU}91#v=QGlUo+NgckJ+24{ z-m?3cdk7@*lLI|0SdtN8HVn4dq)(KjLL~aa0q9SB9`|V!{FX7Bp7rpN>uc8nJ7w4(X1-QRTil|Is9f^O z!%Zf`X3~Upo%m zX&OHj;RFnjowi5Fp5SZ{J!AO%T>!B{w~telv!(@i=q%9GsK!|ed}axJ ztT3R)KXqVj)Xrpe`UD{(Q~xQ#OqKazD2WO#hBzriJ+6=GSW0i5qo~0F?m}li2rUIW zBPxN(S`L9YOJR&c!{@fJL{ETh&N6Ux2j)&2f(A2UC>kkc$SQ8exQ4#a*mlf>xnyHj z#0;26TKb%UIZmL4Ov5@tt}jmkKBGXeAiD;jah}?TZs^ymPri(zJteIcbJ4#_gB(kv zD2j3^P=k$elAS8|)}R7FG*E!Hm2wp`Jn_qA0C{RwL^+loE)`79hGjIFz>6_(0k9)) zOfM;PiyfIFyf3BC{4APkfg4TN>}f!UDN!uO*g{*V&D?a`9F#a<GT`j>1 z95{7rf@7-VCuktp+dRf;fgef?+u?T|*WF$A9-af=Q;04Zkza;F9%6&{_vUjP*ZgoTVa} zM!Sma!a_=njx>L8*bQ7#7l96HB>Nbe!Dtwr(@-~>1aun87+h&4CV{PVl@vbov=0xl zGV5UgBJVBM^ht9b=UTUR!3lKPV_jqHkPTHpXHEkidNF>_O>v8Eih~)ya%Vk6oI*Q| zaj!p3S1U7dkoimpAZd7P)EHdwsg+D4x|Yg#NK|XO%t-1;zz_Nr`^07Er)OL@RM#R0^WM!4#Ol#GY=r$ed z4O$dpWTf|#RF(!NZ#}Giq9>oFtj?xrgN97W=*>7dBd2ttp^A{{g{V@{QQe0?dO`w9 zPQ&vO2aHqEL!k81heVQHy>;ZFH)zqe;}9HmGS;+H6_>$eTz7dKAwo<$GXLnhJ76hs z^b}3~x(&fSzE-$`xKS3t*vNf@a#~oo;mv3w_;ugBqQc*fF)2Ow^*3-hPRhXa6|@)) zJPoajpdcK&ds3Gh1WD7&o}48`lJ#J|Ueirl9gxnNHGr*9!E`-G#ZzHY9!<;F-yM@g zRB&`;s-ai7RhU74%orx=H(P(!`?8vCFzz~CcUjMu{idY_7N!euxLt>PF@UBX$2cMa zrxD=mt9g0(;tB&%k0y>VL2hLP(l9XJDxm3l*W#q32H3VO9x}>MX z5yE2@!zXg72p06j>qmKA^EUcPu_g`Q%;y+XkUU0!|8uWv{nBKym}Lu|%BmqZig9=X z-$>)uxlGyMqI5{J+HsUxXjG9{>S_vMmV&UMGp z5sWN~t5Lni&|5JGUg+NvM;SsNRXEe*^^vf*+>b_T@Mu=&z=Mf2?lYXY>9Q7X^>C;! z&Y*`5j}DX$HahKD0_u+^vROzeu$v(=O=*@dvOs^eh)e^b@pi6)rcO#UKl7|njWpm- zJ~De)f8i&9s$7Te7@N(RC?*CU0=^}2-cBHpT|AKP+BY}9dy!+ zYP>H_H)FRuUnvwuN-_{=SHBSBsxe$ao0~<&k0HD?<8cxvu%@Jq77d1~lLEMcAsQRk zsZ&9&d@^4hmsk_-bbH**{0O~0iLE-<*&;e=VNAgUoa~WBiu9vV701jXsDsLxwF3#l z_|6Bk)U1RNCVcASXoEu*zm)KQO+wsPOOY>^##rNd~tSv*tre3(jgK)?#+Sc?cQXuQ;W3mHAKSf^3v zlrKV4h#DtRfQ^h&ohDhD;A zR2?O2i@IQg%Nojpw9K}C3neL}U90|9O`Y;)5FTgx_AYO@+W}<7> z$}y_vW+;4q2+_OxABPOSF+b&-DTPM5G-g2zys|XQIK9t#=fOvGZMIJuDcg?I!d)Fh zd6zS5xG30#j%nL;C>yT#>7{s9xTowGr3iQmrohFR7^DMr`vu^Pm)1jpJZ90f4NWm; z4~f9Q>3Nt^V32}UOu7t%uqv>VN-B(Zeuo7T8*5A4;)hP+}X7utf;!#Fy!vBEYWuB8NYN5Q zJ>sMonW-Eec%yUG#VBM8qT@hl+9-WMgTLVzpF?vqAz-i?^7M^XrXz(99!S0TJ4ghVC@jp&v*vZK@x9LED1u82LCi z&4y!|bKxVG=odQT95k+8%SgD_G%1EcNMfv3qIa1;okX^CXVYbo+k1J^XxgM8Kr^;; zw}mENseB0*e59cyqVag!gQ?E(S?OIoDTB#5MgU1gM~u{_@LC7o`#s67m-F2Fr9XC$ zIpdI;K1q$vHbn3`3>w^y%PiyvMiQzE_w${*JG2$bR%oFN zAd7Ic@&=kE8-yIhVY`@N=z zLMW7VcOxK>$gz&nINu#n2>Xy0(IW&#tkZFr&SxcOd@x@PeyiEfxOKji!!}N?YxFv! z+D6BzQ*DM?d^SU1yKqMJNG2R6Yd2_&o(drGDuG2e?42NJeK4_V5tB*`5{OshpH}w(0-;UE$H;MS-luRe;^vG8|t3ko8zh`|{-;Mo%xx_7Rvb zbpiqb0t_}xNP$wk|LTn{i4fOovlK>Xdcp{yMqOb?5LW<1J)KHprr0n-+SR#92mI}X zHH1Tqdk^p!_Ar|LyQH*FCyc2F?*sur%IfwyMieq0KXXTb6egpRaM_mW->P}R@1f{K z1=UzvEZE9hA%z!$J&FV)xEXty2%c8;AY+YurecGDN1AKu_8o@15 z089;-3)J#qh=+W6DLBAWuG14VDth(mI<#!z;R;fzfWE>r1NPbX zEe!=WB3Yd_Pf;`K?7Kvwqe0saR=!OzuAWbx^)Z}}>zifv-b*8+2cz*W#?1A%a@Q@( zMXi8NB*1(iH%+M*E(0NdrrqF|%Tg)Y1lX)qQLcSU&8(j>#xX|E$LJ0`gvFFAFLfm8 zK)%bmod%EVy)*YSR|_$98m%W0lxh|MpyPh`x_eq%t*Ev=qX|JC9XQd+)+tfPob!YJ z-j(YOe8H%H>0kbS`NqHclk(&X-z;~NH*s~wymXW-a#GHd zP0RX@<`-HcFXEENA}5WC(Q)gSP9GOy+?65Z2O4R_2fZP4Xq5?c)wqE97#ss?Ka8Q9PLO$iq%d zY1^m4hDC>FcXaY04K$5BF~8t1V-O2-%r*|y+L|2kPE3IS%2Fa&+LD&Ow-nE(11sV6>?&IRSCoEL(OwsMm3BF{Bzv^#?v1plDEajy&dB znKfv1dDx=vS{o*8A+PdAr^&v!#|&1Yb>^93O*3772VGNm2*dK+50|s}EJe|Kct+ov zcZ=>UoYQtmM;#!uVMf1Z6{>y0p}SM&H1O6LSU;nor*GP0=Fn=$n8ho%7FYda7__KE zW|jcj@_kMx72`4PLm%A>4-E!`?eytZ$=TeehR(sOXJxjyL1r}Gv=rw_>;qE?0^QO` z2aKok!!snZmcKqEqmZ*@;H766G{5!r&;7eZDF}}f<{p%Qn-r9wI|>9SX(&ANS}BCq z`Kh-Wuail6Sc2zQ!vVgLU|b|47?=}<$|1DgNCkxOJ-{zB-R`9_S>fxts0J;})o`HD zIbcTSF-B%v+b%0XQPt z4H(t=2{uN?o=#_qxsP!-JwNKXg=mB$V%9?sP$2?{02#-xXXX7KGNI7ug{_EKRk(mK zbBMygCIroWr|lJU|<^i&zLr+2HceK3?nwZUdPZ!aJe?~ zg?>Elj9V24*DhrCYN$=?l2uOKGG~>;_d}M&(WDAk$+0X}?sT|6w z&W))}1vrid3S8HT1)fq)ll1|O0b>o>e6hx%AabY8E|bWP40Ub)UMP*`vn8mS-L zq!Aa82Ld&QQ#nuOh-;HALznZ&UqCiS30+j@YWmyzt0Nu5C`v(N>HG}i-G2L1<=g+W z|EB!Tzxw0yxzB#BoIQE7%!oX$UcM|hyq%8)WUfVDoDH%?D~^c~GX&kaO=^oVoo&j7 zNZ)=0dyA|BYEq4^m5YchGHWnU&uWlYjOm`P``cx4C&iBWJ7=R~eL9^VL6N4oM&pM* zf+6aF{Bcw)B22NlsL4%UqEyu^w1_+E9#EY$>g)H$i?dAg+Aq$>GE9whCnp!1&* zW%gzxa{+SZs>~}>OCtBHnb{<3iFAM`>q?UJuq63p)vz)(hzG@*i#SR$s(hAIQUu8LtMkuHT2p>KnMHprG+t$*%|sMHp@?) z5;05XwbREb;(H5+lN~k-c4Ck!FR&(zSqyXh7#-U8GcnbEz%#ES3bz2f%9(hCvdTN} zqkplCKO<+ozRRCt3bxkeZ|LQm#SYKz&jB^)Z;CH>7KV%e$`u%wx+;0b43T3U{fHs` zcn*i>Dfj>+mrIDE3Lg2Iux1bi|Y>kk6^4J=JnC{Mzq88~%pnQf@ zF%CU1!HVe4w8m0gyc{D9ii^XwS|K+WB z7=@u9Q%5Uk@44mf5O5Cet}r7bjT|*(Een9T$+hmnh|_g1;E8aNgFFlbIdtt_$3eMJ zz&(J&>i8aGgN>oBWAO;u?5aQ@^vrz>sO!e2^<2XifOT2~c7i($aI*|2im|jlTRWTY3=U03)KJ(H&^qF_0dD(jOu>%;n)pSc0bq zy7&N}I7IT3iKtJK%X^1_y^S%7fQ7Vh8yr(-E^2^|j1Ht*v=|0aXP&(1ujq{*YQ)xn z;nSN{IeGl?@+beh|6}>JfBHM6({!s=DWCl4r^>H>>tB}hPk+ALAhYgyH>)-ok1(R0 z?4Ypr#)3-=J&j_0qM%zWbs%&&Je%JwPvA3ru0|#fNd_e#qN8Rg_$&@depj!MUz`PT zMfye8b1#2Gi_K;kr(8$XVpG$z)-R;k5QDt84Rm}7kUhWJ#&OdDZX=&QjmwOgK&pX? zQO}sfGw36~i17HBVVVg6jFC5uazGm@T3=zhe@^u7bJ-bs8I0RFjF>bW#-z@{8QZEs z2lxcW=m7buj=u84b&OS+%kj+e9!Dt$2q8oAsH+iT3}*erPccCpG7FiA7+OjTtE<&* zMvrJfPsSeb<9iLW#g@`O^#uou6E)(l*{@RAmFeZ4fCx?FMf(|Qiq@PpF-=% zKMgVuhvg}F9@p#n5P4K_7g-lb>yXjwEqs}W4$7PQ?l_ep`&Cx*P#YoJHqjq#h-=|4 zP5Q7i$D&N^#dfX`8-zBoPMkFOa|tch%M20pwKy-C6`#=ok7_l(_P~suL8AeRbvTv2 zVpx`jX~S{(e2p5NyIcVsldgx3*b;HwzC4@5;^YqCGzL3-$|qm&a*XcKYM0^IgPNc{ z<5RDa4M?Dc1AtP+@xei2;E@Hh8Om}!YMyaflJCGn8U`--_?GxqOAhcnf>jVw?~ zOwQiyq}>2xj||1w@&UAwxHK$}G=Sy=xtun@*#Co=-6 zQ1x?#UK#QqeQ)oRvN^{lxz>)`-_)!V5!4tNvU z(dT@z#Hho6jPweSvsznawFqg5%$yPB_2|QS>S=ngi~Q0c_%M+u9OLwOT9weZUcMl) zS}r0N3cM#zjZLX{TnFE!fJboj$x`x0;Ia6`YmGcnb;e>ED#(ClCS@4$I;IsT-{50fGpB{Ba)NyLX{-{RS z?p1Z!!M2shSFc_rJxX`HX%{(Hm)yoTM#1H+z~g>cogL@q+B?%nKKLKJg7nbMbx&wY z-zsx8?tZSvUdnWREb~Uc{MsM>N$Gv$=gV3J`u}=;^o@^~pZmhs%j6@UEY0ztoL`K~ zlSjRBe(96s2J}0~80S9S44y4->aoHa7oXgg?T56f6RacE#UTj2R7LbH;Dwy%^h6(A zpJ$#vPS`bNjhN1ZvTu}S1Qf>_+RHa3NFy_#FNl2(BpI5IF+$)!- z=lrahteistHYi{9`QOY8VK{;<9$@3y5JqG+Ig)GW9f!XX@w+F8J z6Vh45Ej}1ID)-j0EYayLvVKBaI`Yo$odS)H!H2a_I@oT7yxrnJiU;)N_(T9t z=5*XJ@+&w1O2;Q?B z&>!SvPsh%|t(?x+Y=J3ljhzQyEx5aY{0dBf6x#b6s?x zuBWT_bz7?xQD(S4cnp2GPUqi8PHSgOubUA`jg3YUU2CUHRi)V;jAWGJm2D)@}aap`I02*!*>qiU4ucw9fODNrLWh4$c< z8@xVbFDXF}Byqi`Mot=b3E+{A0}SkeB;+j@6kufyYD|k zy8rQIa(Kb}i(mdmdHk`TD~~_=c6t29xLjOJV))F+nN_*IdR>;&>#~_oq315L+U>(D zj9m)Gn3rSMsq&nEPY6pfp9Vf}#^>5JG}8In2G#5G?HZ6Z^FP>}OVafI>ejCh=> z1+7MY(o+7oq-Anzk!QB=B2O2mgK}}+O+7-kLJB#wrkumo3Z$A{FtSIGq@0R>)+*#+ zA>RB~W;eI+->sq+C*o`}&@tEh?064xQMZkZb)}(I5gIrJ&hEruf^w-Bz2dB7bQMvUJDTqO18-BU01+W3YNMIR&f56azn<4|t-k z@K4>$864U+es}H`%~|&lzH%L-W1P;v@~391pr>a#z(Z}>Lv}82WLwA1=eB2gTSt)b zq~-=W@g>j1WV1L1O-&D~Vvcd<(WL>h-foW)dCNV6NB=2vIX?qhg%i-uY)GQ@Eq&|E zIaUe5t$XH`?SjlrD)Wb_N(9~s(4dB=%Y=mV9SxZZ&`>2+D8Usp0nqOx3N72Io`jy2 zKp_xo)Eamo-eYRRHBHw+jZmU22!QSCeHaNvf*?$eV%5OtW#ZN81!Zp_?E|0ndIl>A zEP_ZiuH%=$C{-T&%HIZXDj71*-+(y4U^S%Tpf{zV6(|LQ%pKwOfB_AHYfx^pGE$t3 z+huYNkYMQSl}oGkFm_39(TNU;TR&EdH4cUb1TPQ(whz-<(um)f1GGYsU8`sM5hx9d zbHB3yi0f?AwiSHW@!~L{)Cy|d6H6Pscl5IW^i&qpb=A{za9o3Nt=>CIgEEXJgE%Pi z#0c1aUVLyxjpu6aUS$-lxkuJK*v0iW0SC&!Xy*|F95(Vdj%A)0=m-4I zGZY{}1!#>sG%-c&4JvF-pM!*+QZ^w9e)ElfnVjM%G6$bsFUSg99+c#PI`Y^v%Z*YZ z??gCy=PGva6WS&POZygVdaSC(tBpgFT@wxrnb+Afn%B{lkKVUO$hWISYA0qlvod+| zq+C9EyZqIE`t$O}=_pwlU+$22hB13ZTU%u68{hnOc=e<_x_sjwp5LI%&1Tog)go)T zmek05e5qs^vh4|{%xsRSMFBTkJCPd+W{l%J%s^S&;HhrAg_!`NP#me6vQ+kzEw{Hq zS9NmF7y&(<%r3mOsI5O5GA3(rIyhKcjO4Vup0D5o^Jbg^KjMYl8BplNfi3Pp$g)iZ z#~fp=D)Fz618AEAz~8j8@~oUM7iMP;$aIxw4eq1MVR-|`XL3rkFDNrsw2f}7aSA(_ zcN`z^BEZdO!g!W0G|Uag4ld1bN@vj0xp%;VQGbjs_K{f({R9UEr%5MGVxbGQp~X>edV*8mGiV?a1q>L2~9#i=R>N5y2% zERY7WB-=I_hYq}bC|{tNFieCU&)mzL`CCS!Y+N5N)y@IztZk|d7`?i;T2D-YgD8A1^JQdC zN+PTi#NEOtk@cQ@ni#(@{tCpm2#sE^-jgXx4)!imgnA#xYc?@EwstyL|Mm({gb+iId|| zq8^*-+P2vSl{3)-?J*_fT?R5{fXfr^te98kXg-G2{kr1`&gAJSG#t{OP9J&_o#0yL z=y96m#1t%YwstQrJh^o=IHkjJ`TWOy;q)v^~FrzYDuRdn=t1BUq6 zU!$+cv3hmncl5(G!g_jF#T=t3qrzHm!oVP|>!~mdXS!PkIP|VPR)Trrnr9^}%Ju3b z1+ikE+Xy|4&+2+<8olKn4X%{X=}`Q2sD0>?MT#EZYH_M`t@RZ~Z5W*`P8ho6QjEHK zij(>#JUu-hK>!_dQ|q+Dn|KrUIT0`sdTL;$hlNkcJTkr*A{|@g&_@Vc>A!_(_#vK}Zh>Xie;_C(jznJM*;T&mjBo_T6rqC5y1Rq^05kr)iI?rGd{hGb3ktA~u z>M}>JwR+yCLt2|-fM&)J>hnEwV-C(u-O!Otrj`B@RB*r8VbD{Wo{0i(ac%m5hkTf8 zjgZaYxpZVU&Es3G?622x6*3F4xDK)*4~f)qex!$ysq-*v)oi*A)M8%xp|1pZbnU$Y zgf=95=FvyRMqS<#P(*7v3F83s5ZdOA_6aH}3Jq_ravFPllabEJ?jD~uy zW9Uw%Y9Cy-tcF=U%5n&|8GNPg3LL@^rUmcmws9aCt8+@KpK+OGskQ~3$MNv^WW7PY z{zu$_hT0If89EkU2^zuKd=IU3hUB-kHxKLoey7eQC~@o%ClCE{ko?iG$jG#?DYtsw zypiG|#?VL-Rgdv=0*Q3_T%*q)m4Wb2P*DJI@_J8S1++>qg8e=$@8N< zTRzpKMj4V$f#DS+&9$xxI%l&BM(!RYq9HjYsxdMgO^AX39_Q+*LlQI|J#AYI=(s#z z{}vP>7cYHi%mQA(6X>5T zf{KmsD_mw+COOfrrrGB6by?0A8K?9z1#1zYO4DJCJ{s3;k~(GB6@J<`l7lhNIRwyp z1D*7keRQKvRH}}lw?%gDhc|*T>*IqUvQCLC?9#_w=@B(rAZJ#^2jS?H$VK;sdyn4h zLzS&UuA}K$>yyzWN|^64x}K`%x~R^8vTT9kV(sz*vL%07uGwi+aGtn!v9LH2zB7hw zyQKXshE<$#&-mMyMDC+rfZY_bZTohIhqQ7m64u9-de(T3m_+70-{B00NFY)NG>k)> zsnNtL;&H$&@{Q05EbiyeK}gYAvmrREI1_h{%P0k>gD%Ubzxbu{z3=`YOA5d7oBtwC z(Z+jt+tb^_VYz<)S$X=?cgpv_^S$zmpZ_X!dK1|kB;pnjO=laCDHBFir>8g^XJh0< zK&#GMn~*-4DFF!QHV4QsE~9q^Jn2szD)=w(gkUlZx;J&&(fweBd;m+17_QX23@527 zcWvMHvv#~#mFe75_mE7+B&{q2bc<(>JJuIKj&uywJ;6Lr{G)&IBnO%F+**cQA#*w; z4#q6Y&1{Y>^soUatp1cR|jV6_aEQRTq(xhjLShG@T=DlK#xCE_;p4B;*g?$vQbp#&d zl*bD*hdg_LHpH{+AJkQh-YTS<*xsx6mbR9pJWi9X%XWS5_9npF7 zrE}+Ux^ziE$!AQLL8UrZVFm2@QqMpE%RvL~^Fsh))VW!Y&msC#kbX!>*jSGB!}oJg z%uTt8Fsld}P@m!Kqd}1m1|VA;7z1H2^-u%6NSpwFm(UeN-0Tli^*HgM1zeTIl+}K3 z3K;JhL%yB6Fuzkymx>pj(H-4c!stY!a05w5)h$p>;VehKsbF0oO@~e{3MbG z#04A`!gP?S6US^ixmJH_9QDvWX0@jNa~?F&tDd9=DA9O$=Kuv!aIz-L z*y9K&t|);{P>1MqFg{K6;h{9{A@=B5ofr+cLgd;^9vH(|B0Lgan$nID^~j?ojr!n$ zC-yymZsfj5`pC%ARFv8j0q2@Ab3dQJBNRXbp42aqU9tlznT2T>V+A|gX!`IBCrOF*5p!x7<1ssD4nu#Oi;Src@-Ffh9qQa>D(1yZj=^VPq_3!nnh3=Asv`1G_7iZ@yVR ze122@_OE|fe&=8RLHWevx3k`UO&c5DzRr6Xu*XlzhDh8)qFN|I+0JSRHjL2Q|fw6!QNpT|UkrU+7f=cC7IcH|@ z4?ah~1#u%1=FPnN8f!k6!A6s=XWPc>#ci4LZou0fayF2DJ(mr*4xKjYbZq$tO=_W+ zX;d=-?g~(*bSThwh?ZwjfEQD$^omrMkV(7wnJaWN60 z5tcH8fz(*4w}LxXna{uo^kTMZl$#|wfXwN3HyDoxFI_yEWck5?3h=cc4qZYn%y`gA z9AYEvoX3tHT^42 z8v~c+RfPzi)MymLgz}}R7CmdU=~Fysj=C?myCuV>clz~-erSKTPG;wW83JU!27+QG zG_i(&>nk5?f61_9+%@xOidjcT`7&##jEL{*vGQd^E5GVF`h3oph&wNh7=4&+J4AIC${L;sw+wIwc9Y99<%vNX-ubn1LN z?j3<;B5TH%^?L&u46tJ}C^MsEAL^z^prbBE$5?}htJS-;f|ZCM^+aaRm(LJ3kM2 zVpt@mHbUWx=SsVJNv?loElOdevAiT2j5Q^ww{1$eZh^p95+Ny_aTXYJz|;FoIpH;# zdduM>qPUDx!5FZNQW#CUm|9^b{B1Qiin6WVxCVEJ!CyfW4er6s{}>|Apm#o|cTBw~ za2X>6REc_>dqW6lxCL(F7P28AZps-0Yro(Z_iI!jA@^mEzLF??nXA7;LX3+=Kd$jH z52BoGgJEt8ph~Mlq@bwaGQsH1<%R5{24Hn)^a4|zY$OYjHI}BC1Op$a;+m0kl_nZ@ ztK(B43U5L)+Hif})N~G`VN9kZjqqQ*nj%yvJOmLCZJS%@N6^vHYXNkO6oHqXKKNq3 z^neGhLIXTY-BjXM{xS`Z1+@^-Epcx7Fgv2LxO%-TudY^Q#@KF%Cbt;aK65ekr{L@K z8Ckr(zRtV^jqK&6f5F~7f*`7!BJH3MvLh`U@B)KD-#NOJ`~22rl0DW}Aa|F*=#;1p zo+3RML5q9%$y^d?LIY? zqq;U|=3s>*qYUOC4vic9R5@WqqG&T1p4`_!?(^tZ~`;l+Bf48VCX&&64N)-{rqELN7(V zUWhyOkE-jR-W^CY>?(#jR0Kjr-Mn zU9J~qSa$TyJY$4(oU07@nM+scP@@3NvJDKKvnt&}Fy%Jx?%u*Ri``1y0kVR#VhLmE z;}E|6WK!P7xT^#EJFiqQ#hA3M0!*7}g8|TXqD>7N_q#@6YRK!GM0XC-qj8CibB`XjBbHEZwJ;sOUh7Gr zvvpQdHpL$4=aLALfAkcKf9p25*DE{+FLTfc9SQ4cSBShqTP^eg~(yYswc ziMEdca4ztWzYz}R8d;@Xja%R1ovsHMFee4oe_YH|;sv|qP+*gs^>K($im|GFIY@@F z=*e2)Yidqmw(9g2C3Oqp5y04k94eX7q|dQm@isb5s-MoVec_4GLf^CTHKz2vMwp&I zpQZRr_)s_$eCNV&;Je^3j-UL{NVXf@ zL?sHf*%9=VS3BP9hG;;b(D)8=#Hxls0LI}k%7lgTTyW4?(IGMeAbnCd0{`TLYo;DM zYC2NhWv!EreTEzf@NV~lq-@aL0BdF#nzB4Wmfk$Lgn)AHpn zeVH-!Qaj+`AFf&Vd+-^)$Uo;LSQyc0 z4voh&n*r0|S|MMao3I89bsThlH70dg4(FUcUcPco(i)En(CU?aR)96y(vckvRWIhJ z4y9wT_)h*bnqZyIc6WeH28$BVFuqie>nv%QPrw6o1zd4AirSjf1LI7a$&&0EDGefW z;HVgu%`mL$D4_=~=bCBJusJ+yUv!$(TcduTGjB1RwAhQ4ywn-T<2G%{rXA4+4WNs1 zWWAMROs#?Kfn6zzqXUm(Po|Q$jc5o>+gsOSQJ%|VgYl#cCKuSFx^qYg3l|wfw(v=J zomPV~_+$Vz=$|F)Q?u1|%w{Mw&TiB8Tn{m=+60|l90BO2{n99#h4FijDvz@Zjq14# zU0pdAD^Ea?HNgq zFg89jdIaMx2HkQAc&p?}TTWY}aZ@JliFF?}l0ZAOOka9)h=0;LGFKz*;}PvQYv{Z* zD6V~}pvhVw*dD3qx}$fiaMmbzJgs}{jgq>INrRf=th9w90ZeP=uIFzRXST@g>QSE- zEaf2s^lfD0b}jer%1_sFmw1#PJi94dj8}Xt!2=~SRc;YZ-SuGckM##cgn+{Jd|LKI zxfWi+x&U4tr;D8E4PRg+PA8*k9O=gtmXYPv^{h;1*BE!40eGO2>YeN0xX<0gTm-C} z*%ZLMh5mhvW>rk(pi<2UM|rgJ6xq;ug1)Yi8oiAM(pw{7DiGyB_*LMM+m2knWjxs$ z!{>Evl4&|mc&rjE`OFlfkM0$e1TE?95{!|t^SgtmrYsZvA(YaVFR}&jjmM*MdU1;5 z;NEt(Rq1pXQQ+X@d}^E`{#it*L*ROtK&ulYkj9JGVS|=Ki|S4-V4_cf$>_zgb4`tw zY7`+4Tmo7W4P@Oi-aPYx#^)I8moMh!Zrv`w{Lg-;eBn!9DlcALl@I>vyJdL2EO+m` zER7GRJ#|a@=tthhksX&Nj^@)Je^AEFUis+TA1$51Wwxf71(}R52~Zf9fCEjP zlZL@s5sRCQ>I6$cP5E{0vBgj6r86Pj)n`FXjJboqJ|xCNPTWlq4S@DNqdc%h)H1E>a*g_wbM|%a0lnyS0MnH3Dmqp9Wvzw>_TZ4f^ZDYoynMMS zFJ3Ql{=VD#+@g6lXqC(JQ}|%L3GLFRCtNZFKpdIiGYwWU5RBP1S#>nahfd8;ogb5R z8ECOtM?WpF^5uSP>ouBf4XCmx4@@s>Uy)y&G>xuH1^zUeu0?0)0P>%jrIr!3wFl4{ z8oS3_baUQ5zuoBur;+cve8AYzJ(nsxURgf56e-U{C>^9cl@6VHD1gqN7{wHgfjOya z_q0aGdADT@;)YHrCXadQr1{8gqj#MmYXoGvhS;@!4UAD`9^))6=s&wAz-P~LS6>X8 z)a^Xb23a#xT(i46OiCPVWPI5k%4AdgibLYkj+kyFkA|q4p^?sv@?*Nc+Az4oPq#+u zBv|;Jr<2l`c%nViHnkdV5$)sD*3$)*>AEzhodO@Ap^k+Gj@rExfD>?pS5TYgz$$15 z9h3=m+u&0CHE?Z$*>S-5F3QYZrqbOG$7n-q`IUY0w5PLbJJuxV^lPtZJo;!h^H!Op z73Ea6D+pO`g?7rTJuoW%F*?&d13=cb9)*`t^R+_|MD+0NNt?&S@IbY=A*e*+TCWp` z8g8EfcynNh55^Z5oD8=M^jV*9k{TJl^C2pE+M0Vv`(u>XwG#%&#E*L%0hpsVm!bv$ zGW(EGcm>Nnmp<6vJ<9HFcTG*O_wau!(d|?3O!DdFB*mnbGIl*+!PWx03C_p>;6EKV z%XsX*W79kK;R#h1Mi7x;G+d)fy5He5P7QVr>|uTy3jkn1pT8sGfC_z8KMe(Stqa=L zwGo-9aOf?%-_`F_Drv-L2!_Q=0}N-cH$>oIG^BIMWNC^U@X~p*NKwOiun@U0EF;T-mKAsLYggjrIVFkYOtc(s7$ za_&zndjSs=84;}Kt|NX%PiAbAaz;4>rO=Y^1VeXwc%2180;US!8S{@kZK^X&y8Zd{ z>#|+JXPf|)(Fb^#o|En#_31ePDz1mK2rQo8E^s9!4KV41xgMK+**I>VhFGJOq}3uj zwq=o(>3AbgPjVAr9g%4}E7`~AZ9NACj2H6wO*eqP>s`%UB(r)|D2AO79b^0lA;S~;*C}Sd+CiKZcAVv?+ z+x6EN2mx5=&@jhwQ;rxbv=odM(<#nR#SJ4JEr)>0e`!NU(2S8(Xq&!f{zIIz6xX4T z4QM{SS(evVU;zDUz+3_xArl@ME#?UJI=DI#I{Y5#>N>RaQcuhb>Ihiebacl7a=ls} zh}=CZV9FSLE`iGq(BjhLl+{E1Z)++vj@8L>J~fpM7QrJQOk&nY-ndT8NED6SoBi|) zYh=uNS!i1`FQXC05B{6twxFyQViG){0~=&G&(~+{Ix7}-iF>Y9XUh{)upqT5rs60x z@9U)O&!s;F)*6}wFJc|ho+o+gM0wtXOOh^|sH4f4GFS~l?&l7Tm(NQ;)xsY$1Tk*V z&iT6&;Gwoe2gW50#39(YMXr_0hRcBi&)}zTxq~)#cbk zz3|l~!erKv7jaTd3vO!zY?E;`(H*3N&lwNbW=ymPA*#+8s3?=#7W#KQOq015CNRHdf{$2MtC24^}2{;u3Q7kT9Ge(qL4eSoCu9fKJIWF67VxZ*f4 zN1yU(k)+J>bKe!Sb+VW*{nkQ1-x~I9WG*LsnhDM%u`%GiGVg9M^(WaYbjeI?vPaU3 zYvQ;t2yLHlf-61{OjJ;yLqXIV^j%LY5_BUGT!R245C zjltS+)+wA65rbVvX^|=odv7pdP;DkvQ%#P*V4L`;&Q~%${p-b%Sw#hA}k_4lsCN zhg;IDL_g6XV?3S`<|7%iZ3tTXFdjjDh+!3IUQZWUgID0pSm@#UNJtHzZ>@ON*r9DVuvH3n~$tx)bqw)m$TVWbH96w+0Arao)HmSsUDDpP8P|miAt2Oi}9qKk9Amaj%IVd zw$6il0Xe7Ulc~DM9X=1k7t9!o^3Ir&t_5_P9uk61=wP{Avc_>pWi|4Rc8EzR`>u>0 zJt^;g|HE=QxF}!zC;y~;^5;KQrU>!uz2~Joe_a}{Zb~2h83X*g_n(#Nt5=KwCkEbs z{8OLAVYy`9?J|8aFRe|peChLFB|1DqCeO>pHEZt8eqaIjKEogww{YM!cz5#7F(XeF z!!B?RH|VJmj}yL!4dx~i0`vfsOKQ?qrk!3g1;-RbOjmVpm5*{^aiOdhgU>_D56bx& z4z*4rG!iIH$LGug?skbGOC|hQmnB^vb=L&+fCp;3a^yI3d@Z^opTsk@)CbAbBXlvp zIQG&r#vS8(i(yw!jz{Q-&<9(aI@Mk;jWJS8R%gOFw*`BM$(6wfF(rj&%%`sB>pU0; zxg~Xz3Tv@g`P4~3aqpR-fvU!@KKnz%sFTLr=*tJ-SSg?EpbuE#rw?gyUQYTB+_hF? zwO-JIg~>Px;3utxFTpsqNqT}CF;M&$XjFE2>lns{AQvvbq)sVL0Q@t0w=a09qYY!= z#{pb{cLs3q+yXjpP;cS@X|SPrPPPMgqS$=rc-%6U!?lR|T@JL6*I>e6E6YcWPvhz} zW{fn<8S)*rLOG(X6pcgt)JM=XQwyF07Qy6dOt~p<#-IE7#Gr+KrA@tY9|uRiwb37g zE_tc3wq_*bNZo|Qs%<(RvjLvfv4pQ4$|OFz{$Cv;*-D={G4~wj1-AJMOvZ^ba?o%f zB&Nqr56Bq5PyM?W8f#ODm48|x7DV||K8uAp?FcC;g4-V9hxgKV!Ku+8T zQMjMowHMFq-R?|jRbYKceci5UW8?Z+U2m2Frh`;e_pWXMM}CxL7{i$PTjWz%vTY?w zr~=u)k%Hdru}rL%hjCVXkBIvEqe12nW%gRepuig`7;U)Lj2nuVIHyDlrUNXw{Mvb}$8@RS?`q8XOeP{u1 z*I?jH9yf|WdkWGZ596@7ZvwRe>-n~P_+nPxfBG5_+2;Iq0Z-vcC9Qygb7c{pDRb){ z1S}m2D?;6lXT<#KYF5@nGD#OQy*}6FTzSYWtt@!rw(|OBjWGpi1RDX4x%ptC6zR~f zmGjMEvl@YlkEFXN5j8qcR+zlQkpm$T&O0 zF~UiL`Z57|X+k>&#>=#^^0r>pHC>&brK!bZx6J8l3eA`N+%Utr-W|7Ps}33*MYj!T zF~o5aaNPYy`w0 z3iCryQe~`ewW@*ItX& zwdwoM%l2=-SDLS8r9I!5pMB>?amaj4-gs2reCx8bG?LKs>L(wT&wk`H<>JXlkS}<_ z=Q`a+OwnnHBeLCts`^v|1C0>$MF-7bxQE_I+G|sgW#)pvt&zFf?8R*SAMg+^$Uz0Cs%!W|Jkg}k^fE;2NHnP`E;j-dXUT)Jk(6+dTGpcl6<*L zl7K^}1KLVEGhZ%kt}dw zo^}2=n`@wk{sa7CnzFC`lSUQCJSa>$2@w`7%NzcRp-d`Tn!+OQi)=u{hohl&c;oac zH)0Aj7Q=$g+>kA*sWaNeIn@cw$(=YwE*bf`@?>Og1~R)1gsOWwF{1FKPxM|htj=`fH$!oRd}IGdp)_KtpB5!8?NU z7Nzpj``u@6W<*(vV?e)o&NYg@|Er?wZ)4Sj( zm}URWjuvtJbmYWoY>EdG-DfZ5iB<9*WnK51uejRm8gGdRP1MfluZ{({Y6Pz{EH8CT z8YkK|`GjMl&LJ0(FYn}s4<;K9$FvEpp`YV+x7RKDqjMw%cp|A!(lm%PpqMSpJZbQi zo0^eOZk&&Dkw6stAhFCW*mm>Vzy6E=E`?eccT*?P7ljlB5~wS)ZqW$=StdwRE;@k; z_YtbzXEk<`i^g@==s*R~C?=w&GsQ+l>9{Ag*=G<|92(`D+SJ=IC8~fTu%@lHR#D>B zn36}3Q&Fygr+EVQD6B_7Ru5QZszgu3fgspcPSuM+zbmIitfq@y!|nDD5ZEG|ar>NFXT0%nTOs!J<;U87XWdKh5$u}fGFWAPE0QzB~HH(hCJ@!||)+INi_M+ign z^3|$bT`d@rTWlCBbTNv~_C)5Y=W3*Blu)aHp*J46d`~e7pSitliQ#gdUZ(@%$#Dvd z;-L5GcQ>F>0K(bjO&AoF(V1AZ}nw-|bO zjO76evcQu{edg)9n%k~Cti?#kYF`hS=?^WOQNi4jau);a@`+168kOa8ALU3P5i~eC zyDUHVg)f&+^ZxQTf31A^mw&bNFQ1etGrY|M*v>@$_jqrCkpLYS6|B zhIRDhjqGS>U7VJ)^9f^amHqmvY(98e`u(%A$DuQAdj9FR%e(LW1ZV$-G2X%p^v6gF zxlqQcQP)T~E9D=pHcEmPKHbgFs}oznHcr)gUS9J3kU;B-3mv8^!e_ej-cmKG2 z^P9g>KKk*WOI^fvyDrn4oAL~&q^o1ceaeG%LDn+BzQ}~_D;E_kksE;%+UWqe)i*^{ z^evq%rnBPMWs+o<_{_tC-h4DEAA1`Icck-C=aLk;I!|-AW(A;mW{6r>BPiw}V??gv zWzC#bUTN1Yl(O*m;1UejXw|hDuhHqSc=(G$uhAv{86-fv1BQ}2Xp3=+Uj|a#m!ezy zME+c25?}ND4{%PSm19b2!y?;y)UtF?m#Xo%&`nx-h>mqD;*-4cI(1Zm^K6AK^kU?+ zO+V22?37>wS>NOAm>snZv!LpiI3P~Bm1|BAVI)~IB`&Q(0R9ut4BT{D)j^$Kb=-hL z8rI{U#Xt2z$D?AVG6b|RLetO~X4(sjHEz6(l)m2! z#!drmkff90L0|GAkLsmyGXiD;bK)v*@lQuhY_w|rmGh>y0)+LxlGy5EyLd(uW2f?Lmv(D*ExhK*MvCUH!=s0@x{AH(hzuJOP%>SNB;#=tAox9s5^Qjf6o^EDnXpy{)vr_&j9Ao>uP7`I#9oVsJRz{zK~%OlT^cmJ{qse|C-Lrp763->{f zEFy~Wb3EN-6Y`i!L{Ep$!+G?GU9-J>bWz4ceae^0DTuqLG|?@xYWi)?XUm1>^h39P zzkKE!zgm9pkN)T78{hhuKZcsICRE&lj(rAYn_9fNl@t=eGn`} zMqd&PX-Krl@2NLg&SL~z9!hGG#-wR?;937?rTO8ey!GT`0x>+vD6}tOdfBfH;|ME}% z+wx0a`?d1;^3BMKm@yolmdnR)m8UPBm!JLg$7OZ(tn{G01(4Fr~P09>B3VAa$1PA`-TLE2O-+~!&_ zRA4tik*3Oxe77jex#*1MfhZ4u)4l0uMajx+anK5L*J&le9&oYkn*KG4!&&ZveaNmf zR-aPE4bOb5qh`gji~+rT`9#sdWE7kbZxvCEV=}c2!28^mdo#lk!-@7NT5EW`TUn@l zaQW7Npm%Q|)d81q%!)DRok2t8lgkhP;QVsGk*YeYUb&2=i82GEG1H+^v|L(=d@*iy z#rs|VH|rHyQO4n&1*zAw>$K@3c6C~O1Z`|c+CrO6(a?T*<;f8eQ0 zXMOF2j+=5o_xoG74r>>ki&$5Uur_JE5u=ooO|Z>C%$g+w8+Gs@yJA+t2XKk|*(Cu@ z>XMV^EL2^qhFqu0V}7+o+F`L=o=O)SJ?N6<3*=6@6sMIhpAlhz-u$g^{`|jlf-*qn zW*;&Gu>%z~Yl09uR-KoH2ss;_#&X`V^!8E($+at5&lU)Kl7hVSTwDXL>-@f)piIRkDB$fnD1;{IAI1;cGD_#1Q^{bI#MG0Nc+N{aplS3K0I3sd^6LOl7!6%dHzof7Dnvg-?OVM6tR?E0Fed>{2D@(P!8C3?QxPn} z^ZfJ6QzOe>IYLvP!+dpx6L5{O25|NM_1yIuUAN3y9%FJm3J59-9hnnz*ZZ~~!P&Jt zmqGMoEkvA*#)(M7<2ZB7Rc8l*y7qLt%`wrryhs`{$}C9$O=xC{*TaXjU3#%bVuFjy z7b_eDBUDM0NG*Wgr7h-v(kW+W?gqdiVf?0IeI~i<_eON*kIu?ya+c^&Wt>e{<@zPg)Khbb+I(vw=K6Q;itdxPs@M#Z~s^2v!D7r(e?-vB5llsCTm+47lh|3>OZ zp8e_Hl^zDQqZ+=xB2pxR5u|j8aSl`9h5TK7>XY!7{*cGj>s6V)|FkrTuv_%gLN^|N z_LJo&@BTEk5=MVM8dYo+n5(QfMvW%)MfP;!G+Yio%L~j;kl%9eRq2*L{@?tc%5VPq zzsBhGL+}6JmsuZog1qz2kIHubs&qpO=&HlV7$hT*75$7c#R0YGOZc-$%X2K0JtIJ~ zR0b-m4g7bh%jZG}gz#|G@03rxH7SpujF?OX)UX42vo(VP3&%VI!tA3sr~_7OCCsvI zK`e~PU{ceo0xfhf+MmtLCNh5<=Mnlj8Cy6A769oL%#-uR#KEtVVqAZyqG^^nm0{W22LNZ<* zd+TnjZ2;YggfaHRNjW_mGOs>u5JZ7T%G-LgM6WT%=!a495?Wrr)`>RzY2>PWU=VpH z8kd&zt-iSA7vslUtT#w;jCI!zSmFHqPM(P^;?Mo=4DC?1vN1w#Wh~BByoBQt%8~PZ zPUysbv^+zFdvw4yICGxAp`$j!u=a3h-2$h4q(YxL%QI=nXu%_QcF+g+8g$sMQlZ17 z?djnt>EzZ3O{cOt8!5gO7h+Vk1@gdvMFZ^!33!mrT03IK6k5kd$){$bc)StDB29F* zn&^@EyJkN3Szu()ZdP1l-I-Znr_Z;1UYB{2eWybmU!C@lCw*5;N=RObYD!YU@_RiM zF9wvDbSA_?ZKd#YO+m@H-!2}S_qC2azO~=A3MQDP}y@AR)mwbm|@^OVix@AWp6dWcKkE)eF z8aQPDMWij)MLjmuJ>yFvmh35~ed(601d}o)ZnZz3=H7Zj9nJ>#I~tWxAk}Lf09-vj zmn{N;FnxYz0U%e>mq6|@p0nA4{_)ru*HEvWdvucAWpH!tF~2YgdU!yH83?_2*DOs5 ze&q9CD!=!~|8@D;laGsa|C298r_Wfw^^M;s|Mq|Tzb~KpC%;;Doj!U^r0M!2@}cvn z2mVlCr^A$!3jsmT3@}@uCFF8dCURv_=fL`b&{^5KufA4hB2#>ZptXFENEF)4l%o{X zYJ}|He(qfwWevt)h5qzM<@_tZP=?6+iy!>79A7@mn&@PJ^QXQpW~Bumvj3X4x>vJu z_~AR{)jQ8Ccwk5r?CRfeoOcmu}6Zy}UJe zrlnWb-Bg2Q+R;lk#UerFEx6G_3%K@AQ4Lyvv4jhco0r2UCs1rQY+3OaM zd2JIhYT$g5-pZp$P1nj>Ks-;pB!Dr}a||H~yw|XM;FlRf38kYgo_0IqU}d&+ioH5F z&@x+fc^e=lf(#}JFY2mIs6WUQ_wXri+JjGK1CETpqfl##ly%2Sa3O{Y=w@

Qe&`wO( zeksq1z;!aDGc*zl4GzpanUxFfNkrOAF!^8vpm&YjUD^c`pjGB`AUNY^j?d;jK0PaA z=N6Gr3 z^J1)ZD`VBedSa*so=fu(7~XY44*nj@6>=Ir998UXofLn2qMXlsj`8O|1w|vKAb5=) znd`!C575Kr7Jyc-Gi593t3J|Efpvl^k_C0Hf z@;;4~C#*d@f>gsdgyCaAb;M66=^j^$$~3To(RPWah=Az`Buyk5-y8R;ZhA+0aCli< z?-=!x^56tMk8xHsWDGCGAwq(Z>L{7Yu3Hr%{Q(8Xb=N>hVVJ_;LsUErW3%)Cj@qk+ zBuwJoEsC^QSWWM^Fg^pKTB3rBNv$TGk`$iq5M zNfQLT&aIdp{-ASKBlnv5fokE^64{v(9qAk;%{dvCU-P^gu^LSNqMo5sTEAY(Gh5crX&gL_?}gQ|w!CZgC)x21E0&reRv zN51?8j0KU^_kL1_W}{wQmBsgdT#mD8Il+nT!JF;$x}2<5r4Qfl{{Bbh)nEO2+3~rr z!JFsh{1cxl&GE1d-#9Oi-Z(3>A3ZCd|Ant2-+gFp@ehnsiMgd3cytl4U(PJd)QBJ> z%DsT8itz6heE!zI{;$ff;4nSBjJ~Ry^8f!*ca^1XZwxq(vW#Q8H)5nA)sfclsSJG9 z=z%c!C6&<X z>-9WF2ocMC1N8o%@7rx34pSp{Gfg@;)_BaJg?Qq)JWs+gn)(+*%ycLpI?iSuJ)X2? zAOUTtp{@$p?gn{?PQ_p;qGMvSb*Gcf!V4W1J`}43cb!A&X24-aMI)C(THuGliJ8qf z15C#3LW<}J0>TL}+vB+1!QfWBTjXuAo)Jlk_e=VPAMnAsixWoWF_>MMqHW+uXj9kR zGkm^JDqdNL(E7kBGYimxu~f0srhpw7%^3O^e(|-gMO%Ahuo=fi&_KSbNxOg1PoDL0 zyfeyNSO8fcug=_&37KYrp`4Iz z)qc`H^s2!CI`6!&QLU!g?*|#gPAekN{~!OhAA}&7!#uPP+8}5HRUU-pe&7tqzvERj zs|SI2WxdX8-KryaDKIfh2+awwXqq7h3~-$XUJTs7%-}pn{tgGhb&C`{z#q5QB;x07 z1ml^)3V{k^n$v?KOzA8Eq%}$kpJS9=cPWkgP5189OWb62y)0xtMt|M+U9eCP4$!w4>jB@oZjD%8W8k^=@fSW>9>4KMdHK%! z<@oHw(s;$#pS~zzGRPcu%7-ji;v~9{4T}i#m{hQJL}rqxa?C z`2F&q{pRoGGqv_L?+>Bxg6Ykbhf4N;cqOty@7>B|^m2=SDHGDf=t`i~;nBF6?sVA$ z-!WK~o&iV&#%DEn#=KFya_3=8k1sFFTTe`bTSVzI0W_Ru)YO?;plbGIg&t_EbSwk{ zTsZ_oXP&myb@*pcp)oRJT6xxh(RY<+(SYU{kLXuT_dfQcjqMO?tvi8SwBh=_Hp+XY zpK@XFX(mGa0G|wscuQ^_9kVaaOXo?OpflhSjhhFI*Y22W3^};v4lyyz$k`PKw;I<| zFqslO$CZP>ZW%>pn2R+SW;2pefqtG{U@@ICnBb0f`Zx&Lvdh?|v-U|@(W#4UD96lL z$p}9nM(Cip(XYXYynM)Td)S)GNcAXpKT%SkJ2ZW8_VS#1>CJfS!-$wCo$#3*av@(j zI3Xy4gHU7;{4a$y3Od>wA$Jcr1djKiM}xp6Ed;|%|L9d1<;Q?~1dq%+>OeL5LqQ}nm+5}2wSoSw3_V;eCOvXf@wV;!8qy@86qS; zZR%{&DyOGJt_X&dJ1Kah5W@ZUdUpc8#4&0%t)4T20hz79h;&J=5KwQ4c}umdxj0vt zrzgeDI5HYjK%IFT-Fr%r$I@Dy<}>7_NA`5W#2hgyvr$MND{IlA&#Bh9`-oOSVY%`d z)q;?bxJS=+%Ww>HG2+hmX0|U+Kb)4A&xz6i3xMo07u4UD+6 zvjAW))h@dt-~t*mH@a|cdY3Mj@kx4L>{%s zuSHv1qDRkp_8IYYKFW!E+N;v=Mme%a6+H@+-B?MZppXq=x;LPMX8FX;G`yqsSR2)4U- zj2GINX5^os&LHa>S_-W7g_`n_y60Can{wyn(oM(M`{T*ees?rL^xI_uU9DyK)HlB# z9b7zrRhrM9mEDiuDUI2z^q`5F?9t0sq-zLIa7vqiUlWijPhXdx{lEV#Clf|1o)F!B z`Yjwq4B+CneEJJtDeL!Mm(PCW)5vZgo|w5~Oz=1vv-tw&6lcdNVTfG=4NV!h=OKLT z7ynuLga6`xnRM!c5&rn~Rr#~O{k!s4-}`>~V7)BQ?l8i9?u%RUvbb@zmPDTz2p%v(AY7U^Dn&zZyB0suuiYUNo8v0zZ9{pw2*! zf2G|N<6%HdZ!?_I2Yb32TV)`)1)e!J#%Tah&&1HFHuJHJVKu{~Qx^ls#TuMM?_BFi zhXyM$yE>&b=2#h{ztrUogA|u7)ME?I-1!4eI5Xx41!%vn?MpMWb7Gw@jj=U8I@>AM z)edk-A6|{I1 zFw%($&MH*aN%^^&AIf5iq1SLgNY|o~3k*C8&Sy!r>Cw=yo*ko(7w%RmqZ*Bi6|_T) z*_h~Wf^k1Z2q$B_Ukv9FqppIPLXsI8TlZ(HATrT0>Ba5$dg6Niuus8-EJnRXXoe-G zO#QIY-~zo#`c{Kf60-rW14=iyvuN;#lTjI+jm!D@DFzBB1L4{qOeb3f>rw;2fg@lF zTaPk8#b?gf>Pi9Lr>LD0wfh*$I3&ztIy;mPpD)TR=gNC%ij1s9aTaoEIO}66n`K0V zsj%&=CbwE1`lj%Lal3ZCTr)4`I0R&Tp1Y^2X&57MygW+P)aSKN-NFdRF;kckCQmR6 zFhNgg6)!R#sKyK7r3pPswTHWz<87@r$63ktJuchR=G}3Vf~3K4oNhgdOIld)XUf!_ z1pU!z0MLH-x1J8?US!&|Pu><6nf);mV|o}wc+&6cqtX`2$ec`wuArYX<(dA=WqlyV zvlYfh(BPFrFOZQReE07$@;`D~<-9*Azx%KLpnU5;|HCr4{0MY7$rJiaiMfQLQd=4C zHbT92AF9*Gm<*B{)KOA@%wXsQGd9|U42#-0$g>7xbOs^gP(n9 zH|Rr$C7^cfmHsC_TAuvE&&3hhPXYdy(=t5b%s|)^4GC?U`OsNMEJ_=|JT6`lMy|@} zu3Hv=@i*ng&z?Gv(nVGWZ+)bE>@%M%>v!KR@BY93hjR0spOkTHTpqviDKrlwkDgm8 zzFxYG6TssWci3ngG$<%UmI}(*N6R1l$^SZrMJRmt*~{{m|KZQe```IdnLoQKAHM%# zIl7$@TIg{3k{s{6_mgr<^e8K=bI<{HDa|MF?P*`Jl5RTKCxE!RfP1Wv@&887&6vW= z`5OA+3~2C->K>i7kguvc7D{=SCnzdc3Y?iiqj$3riK@{f4U`=wRj*?qkGMTcDK1$c z13tylHEEB?)j`$)O|6l|i#U491+kStOa0RjibiF z=#w#S7tlfjW8h(QXN2pvJYxpk}4>L0cKY)96&?VR`YoOOrZ}h-SXmxJP0^6=>Xvep%Cu8o=NA8mr zF}BRt6Wqdi`FPMFsSZk(C*i+K8jf8@wj}??Jozj%MAnRUJv%>Z&pH?AvisyA0)gHz zPr%*DcQ)Vsu(V&u3h~7`ckZJaXA_Ys}Rfe@}KVnFIp^^qVm$>-MKN zQ-f5TI%G*5sY?X%8hfd8$7edf^2A`%-#z(Klu0L955WiuE*|6e5N+pyFc=p-J-44} zRBNGxznfNKvKY8lkBElJRxqmrQ zFAC%nz@l7$i^8ebvIC&pCn>=sa`r&Nuo1)69+?*HVBCD7n!w}nwQfl;KVwB|tHbYV z>%9`Aet;ni0Qlhx3gk8zzGA*E#CZY|rVaTqiqLac)G&-T!Ts)Kw%EtD9=FZ(FqEf{ zEU@UM($sFOS1_F)F(Dcf#9hnuGx?VX{WH$&pC&Rjin9uNI2@MA_`|c&V1$tB zEY--@l-B?Qd5bZ9@A;~{xSEv}blG_}HRCm9Z)IvdM_z{kx}vleQu?=wATao0UfeBh;q3bH==$wq7w2A>&>UrS73$Dpz5e z1$Cy+1WRijOrL9Pv$G=G4mCBjt^Cr%4C8)v^BNjlmGA%H`{f#E$8XdeQ;8TDCuXrwm+oLk^Ba3O-sDtAcSr#+duj;=ko0g?25z>oxiQy%*)fzxcbX#b?>! z9B1^CZj;CLd-0m_{?X^_TUnlI|pzQlWG(^%_0c+KnMmw0KM8A%CG*` z@0GV7Jw{_nSunS&=da85)pa?;Sv^NT2HfKzMmo<|0H1pOdUjo&zWYw9{&gbN@4y85 z=eAe*cSJu!M=&In&VJATvegh;&X$LA?c<4;9xh@OdMuC6`{nWDD%+tae9fBk*y6pK zh0M+Z=2RnIvoIKb=5)JX!IOC$TGZD6;#gSsVl@8h^|ZXay3X@2l>gLY^l`+HKQjP2 zADM%WF~-=jS|cFbdU|CKr(Xk6BRFJTU7=qMpn92gVZPtv$j+}ZzR_2x%6CSLKKePk zhM=KOc+|KF_-GbCX*2TU-~0-1g448*(7``L11Hr;VBqD-vs<*}qd`M5VDQ;P#yk^4 zi<;s$^gFgjS%;@tBL;Vr0hh_dTxHviEk3ju*WS4U-DKXpa}ET&wC2ZPO*^L{p5SOY zM=Q-Wdajq()I7IKKiYMkW=4P;haX-Mmhm~lN}t+#EkGPEeKS9vc{D7jc1ZxD-ruLg1a%pQ-RaPRt(leVuYrtMW{@am_|ma*my1s! zb&NV&F0~*75J7e12B^V}5ZCkx7rGXu0A{d6!U{)HL=h~bRlOGnso|y#-g+@dqG4ni zqA^e>E>rNrjHZtoIHC(vTb_XCsdN@?<+%ljo< zA}}sz4oSVk=*nPJ*tr85ru951!nDt5G|XDEQDn2}KGdqjsl?t}y}yhz(K_)Fp8zMc zOaTU-;`Oyh%Mx`#&ml&|f-?>ENqbP{i%t39*|cnNih>Dpo434jue#2JPeVGpILmey zmE`(`r^l_hmiP-^7)4yYzQ$3|=`j-Z0WhAVhjE~tJbZ<|HF7HJPoB=oGaoU#TH!gu z3j|&RZhA3kv6Ehmkc#LN2(9$@xdxLSMmseJrkH%-Mz=f~V*pWX(@RDtW_f(jk4}Y- zrN&-|(5-N03#>-IxEPkx)6>Y81wpgygTMRxe^|cqt^cen?>c2p)U5Gt8(j}v+k`%CymMs_kxh{xJP>%yU>))3SMbnN z3LQ(%gLVcwnY2wGblhDX0^mk!YoVpQJx?5Ai4qxu(rU5?*< zr!*JbhvTxHPGjhG`nEViw+kZsK`+}g+sLBY-h}o;cyROAe~xjTXB_Q|QQ4o5%H&fY zEB(`P>0O+b+nY^!a{j1XzWH%(8qsFOgs!y`I9(b}qYR5?UGwfd`9%5VKmF&KpY;6T z!`Efa`+y8x{qUz{hBNZ=$3H6{{_JUa{`^@0P@H@R2W4~hB6S7goZucz=l?oz*^Vbr zs)*_@GUfL!CmAK@3!lh4OhOD;9`mfThJ1?! zrl)m4H7aHRb&L%blKtX3+wzbc_nLcPir8dS8=62LpPs1$k)<%kxx8&Iyol2xW@yB-Ga&|XQ>Mt1yJhUlFaK68?_t!#h3ce2b>XpCdG;Dbxy!8(LoCrx>Ap0=k= zLuX}o zHHZjo&R5oe14cF$11S?^EKIIzK<-anFljRYbxp*{Y>7^-n)Uz&K}>-2K_H8{p0Jh4 zOrFVT^t1IW-n)cR85BS#dJmZNb9aIps8zWLJOr{>!jrRJU^Hh_Q=_h%-G<Ag=+Ho;oM34lbC6Vma)Z#@W5)9p7Sk8RJCv;J{ z-0qe1P@Iz!4Eq|;RIzp1DlOU>6Z41XSqFm7sqody;sOCxcrnwB-_tQJFF?821J^UqdrvJ?@3hD=yzP%GqasuDt!VFP5#2+S3>16d=aq zC{5;lhcP)Y-&;O2IzyeAIPwBrn~VXBEe8zY$pS|c*`L1i6gWbDiK<&~zEO4ztcx=+ z{mzfe-Sx8EUYGKPFMb2_F)EGT8N3ygkP!^0nJJB!AoP4XD_{8LKP^wj;~4ij&eaDP z_5F+M(naPkxPJd~QNI7De_7u9(GLSmedy)2@4fev((>`D$f{{ed6fsAIh_9qOL3NalGil z9-%wX44sU)0f@-2bw}=5Uo5#EC&EI`EBbR=7Y!wAChpudHL^OZViktXS%l|cXb__U zzQ-ziqMCB;fl1Ynnc`)RT<oHbZ0;?l6hna0m|D)Y$i5L zd`uhOZ!26ULOEA;UnfWGbvf5UaIH?9PW$I_=aWn@<5K3g$cI0*OF45Nde`hXoNFNb z76&CQt#Jv!Xte29IzSBC;SV}>{~niVWOWRj@a@6$q6Ai8U6UU29iT)5rXa0=wc_3N zs^xN*r<~;g8@&FF13ehosV|ja@p3g*j0K)SER8aQWvto~J=;#tbxsTwzq>}=>H(w( zZq6V^__KcCJkKhR0MrWSZ_WXlq<@Jk~FqM1oN;UfmF?VvQbp|>{FdyO7 z$&!XDy8?A}wJJY*{{{5*Fb5TtKKRb@9RMq7)kc97*!|tM%rR6}lItwx@DC>K{!*1k z&rYxSY&59HQuc^CdKyL@AB-`^%?RY3_pi$bfUy}FmrI)TW%Z_K!CSy0+$!)Yc7&Wc zBsJf6JBP*!2kU@=fu`d)R?T}4Tc_l%^!eN8Hz!>s;i11r66@TZo%YM4Cl}@X;!)|1 z&LV4Wdz;-TODF_$wNJfag~k1@^0$E;2sU;7i)-P72kFe`H_*PWH7}9J_g_3OLkwS+ zdA|JKJLTqkKP@}>GkyKa=E@tFXN-*~p9pPy+bEB3j^(=;`5%7w@6*P`*+u#G@BdNx z*0=w#j30f1K8Xs6`1}14qJlwHR`mJ=7UU>P7;+lrcU$HF1L!UjRoS^oOU2UrlyxOd zBQQB<9HVi@)X)$TxgTE6;ptuZ*e`ztP&_Tum)B*-xSNZc(pqEvHy*u-_z_*)!S54v zu7TW~@cH`nRXMBxh$Vc*Sot)P+aFz(SO4+v@~|df!?!NV$>UBLy>VV{{{APWIfkD& z8E;*_RZcHIQEu;ye0l&3*nkdYUo-EG#%}z$eCe0}S;o6%B0v1j_sT11HsY&a{M;AN z+jhBmbyYs|iBDmC-wqHV0p*IZKYjP_%kAn_obGCX(PNC0I;>-p%690Jr9Nny!?*xu z&u;J#sMm`{0td(BSr>0y^viI7Yy~8tu^=X;8QiUzLTIv_VXW5#9RRxETBB#}ow9?m z3-9XMz4GKT*L2k^FRnMpBhC?*Ptc>Wh6Y3JKLZL6aB}V0$UQ~{{);_=zY(KOXb$~? zk77#hL|jtn7P?v|qXSqAYSl9x4F_kWn$$d>`MG=fH8w5@h#gKreb+dM%?4OPl(Ln{ z@iriEIYA?-lcZ6w4Fs(8(Q&gPj>SUPr8N*6j4^F#EGr$V({EPF^tY~TT`JJ{i8UH` zw+`ZDf;(ckm@PE=Bu48Gti>_QXq!HCN{3h7HalV+fKk7gV)jJy8X15F%9i+}&TA-> zj@IzOUmXRX4Pkc1r92A+eUjzFwlf`UmpwevmdnEL`p_T@B(i3pX(mHF_FeTD8afte z=4o;Avw}Wjp^xmKDmn!3{i??2bW_ zeKRlcF~;1CIrkVaGQf;GriN>BPKN#K$)pK;TIw{<|o_R?9cTlp$NhA zr&4WYp32JQdFi>ya1Vm$_YoYJXw+D%UV%1b$LQF{D7w{Ry>FBig6hFF9&@^5a0-)( zrG#oEU5g4+XeU0-oyz7T`f6%$*uNtxZ1TQ;R3YO}^)-l@lsAM_&@&#lE+L4fNjwYL zb$w5gGTVR#KmwnNrnl!>j1#I}sZI|P5`JoQUBCBntt~uFobu8%y<#-MEjm4VfmTks z-fv3SEhlq4{pUnjtHm6lRLL0!isb(KL@4}jbeU}+Fw_8CK%>8^PSv?(i z{g)_}Pbz~p@JH`dheIILu)C&Z<)_ZT#_D?_GZnGN_$|!RY1=~I9SVe9EsrlxF`hm# zOu^U4BBZoyge{+c^4{z6-Ul=2STim08}_1QJeIxeM3ysyKdPVVo**oY-eZZI162zf@QhBnwAAoggXpW zZqJUEUJN1g&H)|F(RF^6*5W(&KR+z@c(T`rA3n`ituFWaxX^<|TsNEY{5#(-o$G12 z1dy+O_=7~U)+D_7=9}fB2k;;(=gWOL``NU7|Nr*y%lkihhbsW~)3fp~zWr~?*T41K zI3aIBOVjACEg};tI5cv7FKevojZP3hb)bwSm3R4t06-Jo$f~+!7R?C9qQap&8qhO& z&>vlt)-5`>0&LGm7WsZ}eiEm=CS{I|z^G&F~d-1B=BEu)Ulk!Wy`fJGlpxibv@E8v3 z2KLZafVrM-${Qd1VtMoJkEgKi!`W4N{zHuW!c4)oeDKq!<-33Jw`F;KlclHWD}Hvm zWyG~Vd*=sb`Rc>Sbe1xZrBo)=IMG(B)Je3Rc^+NEUq1Y08!`4L<*}vxNC2 z?XHc{=n%pBof%3UU(Jg~S_dr;^5SnlGvIRzV2Y$Q@ZhXVVP?t_;YYvGA8=IJw_@I7 zoV8`+Jfp&lwBv+O@)fcrsDTf6p0|%oxb@oeNUpD6C(8*r7^~wl^Hv=LSiuXdG{D2L zFkq3VaSHh^^#s<2X?vpE1ef3=u*o%YlWX9u?I^=}@RfXLJlZ>*lVp8ujH{d0Utnhq zCINHW1n<%jbEdD< zOwlr+r1L{`z=SVN57l+KAyK5@s+v0)y-1AOty|FaI14|GvKjXSF;G5*Ii8k6EaCVU zuaM7mAdE1(rg0F62?ln8;qs?O)3bs#t|@|3^I(c8({2~gH7!Uaxkj0dq}`sTfmfn+ zY?>`V7h_`t*dpAtpJro7@AMtEa82e7@l|*fsoh6tFv9Ko;z0#8>+DJob9XQ8dNC`@ z>9kxCwd+`PJ0r+!(S#M!w5$OF9IdWY08BBTdl-+W$1yjT6{gc=dH?+n0#a^u%3&uc zsA<<)f8d$!7PRT@YuM=96gEocX<`a&d7U0e77$&sGM&)B)G3 zfBM0qJbkf<)8%0yMmj2}>2$Zz=>!F9xITibVDR<`UQ>G;ynUkq0rJuo}?d$l#6&mlLX?u>Qd8iiWE4 zPSsUL*VVaW-li3OXvn>W=6a9T^uZV^su7!8)XpbwmN!q%%A_+W?T^1z&OY_=M9Hgn ze^#2$Uza1Yec)}nQhx1;FsC0pEyvZo?0Fw>5FvTluZeVVHhorqitrX!W&NlBsl5KP zzf8eXo=bo6W;wuv(~q2%aD61}Cl7PzW#<`s~VVeOo^E z^Ir-M=twPIT$hVOt6UOc-7t<7k^9%b^u_YIPky3&@SPu&@Bhtr%VappwBLQ_CuIO} zdM(qdfYjI(^`IO0@fhdl(IfPd=-4A%eE^H; z;$#MCa|g{gyyxhrS-w1|#i*0-BS-d1KcHIXx5i^dNMPhO=g!BVB1eHE z#|RC$Hx2-EGg8mCSTnHTgL8M+g%N0$veZ@RV8+jJd;Y!#euyFD8w~M2k+a#3x)yCz zj8WB5q#rR(@Q}tH$!pPa#=t-K#p{f1&`%?8gBrJynI&=8jL<1vx)3Rt#aQL9GGm5O zp6CV$wbEPK`OJF{mZ5~ zM%g}G$7i??+zLlqVzCi>f->!pBBzcx4({=v_RXw-*se{W(~Lh^8E9zB+DYZom$D`f z$SUV%5SDwOlRQK!p{v>MCF8@6wEBDo-{q1sk@g7*h^s42rVCf4d8DaRofL#Z`t ze9qe-!d(h3yExW1Z6|dPwh)~1AzoMpi(pwnZxox#Ra&paV-kR?0M0u}DzvUsy4GHw$?O|L4?$69u5oDbmqT0$E2*-_-D?0hpFQ30BalSy zX8`iqxKl28yU$&LcZ$w9elG;5hFj$^^-cdM^KLWC0WTWAx__0q;lUX(4`6zZhgD%A z67=B(^AKp~!Vpmo9grplcvO7QiB;-(gk@L900HLrRH%lic$KS@GWGaW7?DB>lvbcO zl5ng^*DzL{87t)7f9^Iv(_@FbE=J0A#cAlCl$ln7q7?XU4aNwi z$wOa6GO5jNrju$a5MahAyQb|K>?@3>ig*s6M$pMrQMSY}_L_mQdKl2wF`^CoGQHbu z5~bp-jhK@sA8v6JX4msFzX6Z|*80zfGcY#BYRXkF9r=fjI55jyxw>93*A;U!l2<7e zs6%N(i}}lEfb}#^j_JT#BY5U`IzA;zot7SWeEaUQT>a>05~j2;`m<|Ke*@6D4l_~C zkcakPez&|!B>((_4|1L7;s1-@`#0qmzVa&=YMhNb9YnJdIxZgEqA~-N@LVNj*pM9X zR2F&rx4zI1ri1ZX_T~B`g;--u-xUB_xgA-yk@T0Gq?(US$9Jzk?$EAJ!vfy(< zW1^>*FUp#~djojH+%39uO&%oL|KgFddx8KT>~u9F!rMcsT?-?oQ{z%zvkW`%N_Ak+Yh=kgt0u``x3|SS&_B4O z5kyyp$dt=s8k#JF@|lspb58bArwN1Nyfq?bJ6Iyv#(7}g24#D$6F-d5b&AC>os2AHGB20zyg&6rEsds2eHM}FEI3zjw*HHL z@y}?|vk&T$7>)?WSiCb(h>^RaucqtyM$4kJ4wpfm_+oZYqpE=vi_s5mHX_a$^?ZKB zPMC?)5vXTo=p(QxLt>@E(B4i)sz zvA*=95l+CGYg}doI~)V!*3k-{8j0j(c9-9=Oa62TK)e#?BClMl?Wq;>Iv!>dq07A! z?^13~!<4Q%PC+C=z-Zenvdi9TAs1_d&6Ghmm(Q(()1mP@Gp@2g{Kf@iEM^R$x7izR zRsJko3~DA~W^$&bC7X=X8%jz511x7}K9iny&;Ru5>!BBX# zB1XlC$4Jx$?wq_!1zI2{NIMoyFuBab!xk*~Gp(oM$aH}tQ2_*eDBNqbt3#u*S70$p zCW@ILtQa~EMDT>CKE|~>v|=8GGA)8K%Q)w|#NY^Kz3v2>5vc$khvN!?GJAm{GJgy0 zbgo>tZa_n;I~g+G^g6{YMsQZSyX7nUBe_<=QF*M0b{)@a6mpk8EN-;mtAQ71+J6Z^ zZPrAs%*m+c^c>H;0WVCeW_~CgM#U#!z5C&^JioR+Vp?X4X^fJB?X%4Fz`a`Gs@rV% zFM^>n<(e19oruw*B_poMz|7q=3_PpG@|ExUw$6kuE~)7pc#5PVvYfAQ z_UK1|2S7Cl0Z#DPi*WX$IaqX7V8thzz4?d1F3G2+r!p0 zErG>agRBE@jfnf+xo?7uL)$y_+`uNw4al<7^i$A;d0={biX^T13 z42zdS3@~Y%Wj1L5hC>%=Z1BY|aj ziHSjD><`ylYlhWeFbqVm$Uz(r9w|%1F~W|;&MgSz20aQs=Y~YQUA+2-J)dHfsaOTm zl-A)^gHLBF2opFQG*J2!+;3n01`#!KE)RI*pM_RAX-EY?Lals9KgtmAJ15lj%@zz8 zn$dufDPzN6SnQz@>GHiqblaIl8+u%fgt2&lj48JblitSHb~z&SqHo(u1*=3?P!VDY zt(WE6w?$DWFir(j2nLg31j-b7t#sFe?q%Lqfu7)W{Lm{GjobF94e#er(_*%&7bh5i4K^&dd8 zX4!ooc*>?X<(_ZvefM~S84Sh%GjIb(!U+UHSdK`LA+EHfD21Aopj}cMBq2pm5hQm> zDI_jQks<*a3_ucYfB~k*boX@kboaD*{roGRKC`m2s`mFg-+O=}wq93PRpyu9cki+P zbMCq49`uRGdA*GM9Jtmt9x(&k>rUgwmFv;Qp&wko%6$*BW88A18B_bq>l86KbDd6{ zboZmIBmA}BjJN*Y{}wl1_-3v-R~xPPz-K=jkA3K4Q9W+PtcSseRuBPNz$gq$Dmt>Q zfk|_$G1CK}$Vn@W3N-YLf*Ux<>zjQXX*(9d7v%{cXk|I-51o(I^XF1-!&AL3--v^q zYXoWV48FEj+lkh- zykVH*(b(xYJf%@R-tCPse#kLSsZm>qGi#p3-$-Yl+mUow+(+=6$VYj>5D5f5xJemS zuLQBF|BOM0l|@|`R09*UXxgF!0Qq4?))s&+QV+U z(8@x-8a6;csomCLm^qLJW*8goG9aTC&6U-3+J}ek1Q>m6HVRZK3*?vvbtXT&Gos9 zg!g2)_f((F2Ko&B&9M<6c6kx{NIx^6Ivi;wK*;3<$LdwccLv8NQ|zHZkgbTBnGOnP zZRV+111}+8TyMXJ8qX3T!Km7a$oJ9;K@MDIXZ{i znE`>1z*?E)>5$SToeK1T)&zLD?UVm3f_5E#W&u0b2`VktG_&XDX2;B1zi3nrvs|hz zD)aRnMZ>Wy>_dLd25CbK(h}&2Vs+RKW;XREJW$WftwkszADx|R2cnOR++kP==bKTt z0(NT+-1o;5%-2jb&eYF|pfZ!dGp&x- zSSaaP)u>|^9SKBWp!3W|1an5#t`!;;__|1ms2u^Y#ph&bD@njlI(`N`2NK0NSaE50 zy76$IxeAynjGHMi2bFwxzZci9-Hg56U5w9Rf(pACsWi7wPf`$FLt0uU5`lR`oZx=1 zi=r22lG{GDH*mYqIJ(1ws3356oLi4o3^mXvqurUtPlY`$n&w>IAH?1eCC7+m3r6Pe znxI{j+&-rXQ3g#BSx6{&%fd+jg*vC%wWb0L8IAIMtTh)hEvsQx`2=*_AyZ}u6Z0W@ z!5E6rU81OBc$!Vd$NTct<(s{IjQ2nQWOhP5E@Z+37fL7xB81`fDA0rG4?MyW1Ic*O z3GnUmfrUV(_uT(|_Ux%xZPj^ZQ7JC6?R9%mK_M`gnK5uDg$EQ(_x7T@x1CYZ5>Cq{ zqpE9ylK~WDyf}pla?sfA#NogH?YRCA{`a{4yUz!>F1MLI^O^6C$G-KGajtoP)Q(q@ zerb%9cb=67*)<>k=0ATJsd^k}sbb05Ty*yPvAsKvK6G7atj6JuZq(rYXmKviy!Yv- zpbQ6HqR_9s7IW`hV;;yR2KE@oH3S$O1Nx=|TggrUD$B8Qb`68(4h?`2dTl)5`ma&G zwJMs4>ZB8c-~P?mB~r)562&(`73)Otr(%w%b6Eq9Y+n4;UyF0CQ$+GlVSsSZ!@0Qg z+&$c54yWIWQJ?4T?nKr5Gr5f1Y8te1u310z?x*6^T^C|?WhKVQ$NtUBfG6|Vkh zrRtXY53k&-4fj(n?VM(6m^XPF`-ePhh_f~J?Rjfwq2zQbV6}^*W58x6XfW-ssaN)K zGKv(V73dp^vIWig9>}o6P5FvptoujQ;7TGDDVX#W(;1puC;Oo--e1GJC~`y+CT|AEZ%c6yU|t(S&ONeSB!DpPd0Var$|@v|o&0 zB54oR;cT1iqg~$MGkGW+W650>$kPWBkoYiL8xG5N;1C&^aq4$#AYS#glsjxz-e) z-^Bp~pfA2XzSO(sHW~p*jt>K8szg+waRIq6E}$qO%GUWN#hC&3hI#JK?%^m0y-qqW zx2Czj+lo#^A!ef!B9%c#pxNKr(WwxN2nsrbn`SYm83w2 z?w?$#gYt1!xrD$hE!vV#I$6_nDbNTQCKUd9xXe+s@ZYCe?lZn6w8&7ll|-@?cKL0= z&}g*JxJUd2VunuDN|9RfYy(f<+TZC#pGY~2FJ+1I=W2~l?i5I+A<9P)jbd~PE(NHh zxsJ%cqG8fWSgF6(ZpG=1wP+BHTL_e#(?CM&Nf%?!=iBS;xbVP*sGnNF+3;LIlj<@L z57mbfN>s+kE%N&jK+#$b0cD-&{@_3Uhq(Mpe=RQm(|-!s7$G=|asS7^EuQ?2Z;yxW zes7#<+?%>J%M`HxAz#qh^*NnarrO}M`_3({v_@rbAHZ^mK?5MJFytDtvy2fqX`hYN zhaS#3^uF|yMXkjbz-vbN}x!y|u zF~4j4HTv0bMEOCN3$)xukUm20J+}B%y8?JaS1`C4xx=HPT`S;J;16iB z4|}(}kK^j4UahV*V|}fG+#xqG+TA3|m!Lx6 zQo#d8i0743{!&&Ci#iy8w^)uJT!J7YQQANogyj+)8i567?v^C-7cwlU)K*QW*u+WU z4AY5u6ULCsaa?O=!3uTfZ0`z#pqg7z`CHqSX>M$*PSl+%rE)#$b-*LHv3sBPA|3M# zyMZIqdDU&viu{d6eZa4cj|y5b{82rtxe$Y%k&dPY4Z7y%%d<8rPFWNtC1>}ZS+>6gTq1?#< z&zCACc+dQx5u(Lw$N*!VArl^+W1DVIr8G;EfC9U&ZNFVhB&{c7ZFm>jTm0&lOPoBm zCTnH5uIN5!q^?|G=lJJXZH!~Fp1g-ajL~-%c{aS9^|536x?6Ud$GW9l-Dg%AkO;PN zSs7X+cp(@}X2HiSj^v(cOMQuN$0~n{ei2$K{j;_N*@r-8d))ny_rT81o4%eMVH}(| zNW{o1(-$c$rb0zC6JA465tK5{RL!t?jf0hdJquTcFeaTlh9AQBZ9~g*kR^x!Ghnb2 zlIfxsrO~M0q5~N#V@xZV@-LxCwX(;(qEHyCx(B8Z)#8$HfhkaA zrX@1{AyIV9Du=1y7Dkk)O$UlnNK=WPDI%#SF`;1DZ4^XQ8iQ(s&tdF}#u`MaO}Ca9 z5nwz=?g`bwo2C-s+yC98E*tz_I3u^W4f{icSagPha80rAT*I&wBLLCO?G7{;7w>Uz z)9Nle2-zL;HKstQ|6edJwX;|)J%h^ z$KB2(b~v#5JX|@>SoemT zo5uR?k9;^D_|(VZ?72tdeCvLUy-|~LCS)?oL?I1*lue@nq6F!+y9Y=H5W0il?$&xd zT3wG7_&mmGu0C*Q)YmuQBQo&jt5F;7MhU>V7&u?7z{wbZ;r1L1h6PKLem4%e&jNhc zNzJAsmoSSWGJ%1H6wrB$0iTB^%YdeXFMTl%zxg)f0#Kd?@K|SZ?n2Br89#hi9tL0e zYBX-{$HV9EjfXbQMjeCHK6^gb-*>Oyj52byXp}8sNnb%v`y!Dk5 z&$EsN-9B=^eufd4S3y4@4*u!%Y=x@c7SWr*1&$2;DYj#oc9sdeXNDk^>Xscc&|;!9 z{MIU@eH z7R;KZOm}mcG{v=IcRV}cAo*d>gS_8-6`eq~=B>NZrJkJOf)Qzi zKd2dfTSH;6S~%KdA@j8`++ssB6v~e_?RHRO|9f0b$Q%2s%D9-A04WG?tvTG4e zatY&Yuk#v+*wowMF@kKg%R{CJgp<*+=RqktH5|SvRE0%>l#!;utq{bX<^2`@ETT|3 z8K4DLu8(G`Qyh8=$J(rcPEjfg>gI^@1YQb<>Hp$T7b|T&P+z+Xs}b}QK>++$sf@~1 zP}kc;f@UR7M7l8kHu_OuOs`mI2d^;-W)E<9^^<6|i)JG=3|y(Yt zaGl5r+1fV?^>~)qh<Bh=yK7(+J&IACBLI^Q0KU}r7&pIWSBqmOEt&oNKe2F=WfER7^P!^7Bf zsQ_N-=oDlN*Oo!yt!6E^@hB7)=hUIQX~h%n;T9XyK(6;Nm=u0fKr0QRWFjc{@LQ1N zYR@skZRF+FAr(TtC>I(yWyG_dOKvqJLKeB2iP#3@*i|qE7`iA=uEUxAQGt{poxNy( zh!M?`rg)}~AIauhWnxSyhz)esSDJAK2eo#3BV~GT=Vt6}Z)N(~{q?T-yB}KvKH>TM z9Z@^-+V4eYb1UVkvf7H;8nT7L9(gbgw1O4+tl0stp{Oo5l`s_b-hS-=!@n1A{ttgU z_TPRdjoF#UpNI!P@ojO}osY#mtM|j-X3A~Wha9;q1tGak#n&m^yxEJJTRoioeymp; z@xHV7L}SD=k)MEft9PBl&@M#p#*LVK9yN#M9a|ej{WQ$2@-HVqE*(-^%mNm)gyk$BDXSskFYHY1+AV zJ=z!6WBYvaF4pf*x%fZCj7YP z;rqA}FtB|!W<*RAXlqpF!ALqv>0$uja*QXV#WA2_coLodVT_rd`KmVK+8`3Q-Kb7O z;HPs(s`y=fHamna^1de$YTw+Q;1(>??wLLDIkJE+ovYFAaKiP_DxC)OH1M#g)%t2J zRsoEu+q^!9p6k54LCP%mD(VEZ_BvRf0d297X;ZTSN}c-j5^0P8twx$ zZ0?1WLNm8EBP2wxd)aAIqhdoVmm73YIxhnxx4AhsqhQzVHGBFD|EFGKHn=~}gf6*0 z&ArtXx6C=eOsnhA6SV8l&6Y?X$6C~$c$%S3&TNKFhH}d(&-P3X9ji1?U|c*nb>6-h zY0;rqcAw*Zj2v)S<2mpJ*^p=251X$Qbz>G?y38t&P)CgX4J6c0Xq67U;Mp=krp4rD zFSO(G%j`xrLy^`@)3m!|ljiy~cT#wra}oAFBS)l^Z*fFf;E&)+8^J6-EIeVX@LNNlf z5~lTasyO{F5%u0)AEySx5LOEWn?wOt;Jf8)Zm|;E`$P)}zXe5}ywzyddEa!kYc)m} z3($keI5}!@kxgt&Wg4mA+?bE3bTSs8%}2A+AVRUizo^=Ej-ouHZ3`CNFYI9)b`?a= z5`dY?R&i<=0YQd5aYCjemkgOvMp1pL{ZE^W0luKS9^ZD zEl4FvRE*K|pbfVrHjTQ_4Gr=3RzI#^A7mvwlF3|`xfW2!I7}VN^WxkBYdc(XH!3Xl z#Jl{FAPv4a9}T^NztSvc5AuzTwH(i&H^dmCSQsY1@>pG!0YNmiY>k5_kj<~JVko)K zmW5_IK#i{%6 zi$}iqQ*qZLPsdYtJ{@7AG#5rxAemw5!I8gIv{JUV_2s!mP%2$(bgnGpiec?7T7 z4!2M(+PjvqB2Gl{)9C;EFU8iEzKX5@D$$M1P7w{yqX!cs$<;OZHG=o;Xu_`v4_v)) zHbK~Eb0<^(M(zE}*Q5918!>wOS~LK)J2!5|1Rbre6-W1OZ(aj*bQtd}ZHrqi5j1!U zDCfB@-5CA2%cIx#KowJxfQD*0POZ0)G1poJ!_0x`#|Y1*#d(~#sZ{mDdAi+8Cuwo) ziS46}VBvQbc5#0VsZo?Y9~2?m!7f2Vzf?T|?CNSg4Xxl_yP|GoN*t*mPWdB)s?^Rg z4~zGV`j$P5!ujx8@qBEK@@GVc4Cl5gmj?v(yysECdHP_=HoDJCx#YmL+C6nw+vECn z?n;5qnKEW9Iz_khnod@Zby{`w+Q7nf^m*uHbZ4J+d1a<_b!9be?%`yJ>_O|P(Yenk zoN<4>$hbVxA7vI&VKB4?V5(F(C*=*=2qC!rExF?P_^zU0 zliM*bWy5uL?XZpCa%=)Px5M$Cw#Vg-qNhOmY5s?wR5p+;eXIZ~Wr4x!L%q3l#7t&Z z-AVXff*5S2&n_~b^3AcqGv&t%R0gZ8jgm!Fhay23T|t>_#b|?sVoeBxhM@6r26vR( zoQ$fVDGJDIpmi=|06rg~6fE#$5%n=dp73TKjKGXk7?%RvR$$6J*Sh$W``e8`vr=5A zAv*PLf&qS0DLfxtwC0+Xk-TGuu6#2&iKFnL6rNS?`Wiz?$8!RbPWk12S-lRhSisp@ zv{1Hkx-63rNB~4}*iKPvpu%Vzoy$C9)#Njj9Fu6({i6#Drh$gBYgPJg4`%8-0Y|xS zzLx8e7V{`r*S9>0s*@47(XadJEs_%jx^}AIxuqrdY{Fy}VGqz?wCyz-2)XF_=b}6d z)mBz;?if8AGborxB1LGY!Sq^L`n727j1g|tx$D{mZWslejs2}_eDfC|Vw?zGewQ)4jh1OsiDw)5gXrv; zKGV4?Q;Qj)xyv9ca#NW!1b_hq%5`_sU@FpazRxF6yczuVL@(Ff8W^+n)n=T+xeMyX z03yMNTluErA!9tq?OD0s6hT;IZWW^W)AwD7yT0{faqlNS4G>U}kXC@wOq7mPN^hgE zEQCSsb?z#fGH1QX;fP3@J2x2Trht5A5 z?|by!@xYxA$NI|M@ck51FK6U*;mn=!-S7W+eCCe(V&%cR zV>}wf=*_pIHX9*Zb1~}oW7->~@i6Q2N2gdxV02?N>gIa-5YCE(d75HTp#b zKeCDwD&cHL&;3?(f9qG^H;%p4j7|r-L6b57=%{lLd(XX*OIQ`6`JF2_(VJ$p0H?$K zy;y{Q~KxQn;p0qa- z9uLRS0ibzSyZmmTCu=J@O&y+5`&`spt641P835{L#X?)2sm|LIjMpQ7j-6+L{3+JM_vXc6IyZx*^GVIl0#kHRm=&LX8i*7&< zmloWfXST@QDC&rgQm4y=^DAqJ7&D^DOX{g>_9$F_IYzCXI_%#XcANs=0{$vsyjpeF zkj_cl&3&|SUdsnua?5`04FeBeXYML@76YP}HbPnEvu0*(uj)J?6Jx?2a+9>r30iPw zI9SK6`BTSjU2A4g+n`BS?kvuhex|z3C^&9G$AWwQaXtr=Ua>r(((pN>+l)A&sdDBr zVQvG2<{8Bz!Nqbbx(ZL7U&^+3L;vhaaG;Md!pDX$LGK!RoRKx3HrwE`q)R3icDr>@ z$DX!?=cFH#zW65tlsdst#SAP#Ue;zXUjc3w@o}w7s1Do+z5vSFC;+iKgH0ENSqSLb zd`9?u)-e}b?#x~wd(xhJSwp297LmRd4w?5e(97Ld0t9zR9N@#W_hYY3V28{@Yl|Z# zqx3PDmF8J9hCJmKL~Nlx&haPMQYsW^(dRJ=>3Wabv6^nFz#uOnsHUbNXQG;1Hz#r& zyVZy&@YuBrl?CK$dsW{Pvl)G58i?QdXg*j3xPZeH3CX=VgrSU6D!U$AK8|*CA({;a zTwz$EKM0Ucn_+pq%y7?iHg-_>__iT}g%GZv6^{ z^o)LNvO;8S^`lPA=+UZI(>Dt5)O0L!_E}10gQ+`&D z)@w!gC7oifbMfRW*Y~n7z3ZNs1dIYm3v!rlk}jr%vMB?=!<66=lGJstGt4&9FAz4- z1b^E^s17|=TT2)}5IsoJtsY8p?gwRDgVAAJA+qWbHCmlJov4r^cnxL=hh9dw9pgL> zM=3_Mn%J(g)Pdl24K0QPcO3My8B1-Yl`>;mR^A!uU48z=7FAjS+j0N~M z56=$l-OgA=&&s2zTem?OZ5%As;|POp5!%TC27~VgW1IfR-;T?F<9~|H&woB9BY+&y z!CjyDXqFGf+E=R4M-H1h=aqY(CXy1EhG|=7Moy{0@_W&D* zX}}BPtGue?E|(eAS%7+ob7y=QNFDSFHp!D3GuUOUrZaur$VVe5XwL|ZG3@W|$L?N_ z-vRH)TV^K%JAk}tq?J8+?Ygz63u?5rjLI$|8?8nq?l^ZQnpWEz31cIYcJQ~@ayusK z?Pa}$X=k$*ZNNq?i(%b!&9yvpnIIw8ZIQvksd;~8-mE6Jkjc1%C2Lx+T^jMC742{+dx_VJDBS>(v0+<^R5wqA27)9bf3wZXu3M(fQl*pR*$LkA zO9$$i00ob=X*xprlxG%b8wEwkOKv$6|3F8VMdXprS(>WvX1p>3lFrm;^O|4=K1gSS zpJEr5pc8w>er>EWtPWUrU@@YZ+RU=@dCzGXj?9`n zFEiuVZe&$|B;4^c2Eqc~+=?eC=dtozzfvq)@lcsI!TbBr5)P)(0wLBJkm(^No`mIH zox@;&(sf6mdd5G)0OHUU&PD3Z?`<(hUA5!)ofy{x(S6vRC}@RT*H7-1$4xShWC411ReR2X4wrmDG* zg6KN?$pT0R<`~80+WalFmZzLS@I0>`)ad{!yv9xQ00qxiSLsX>1G*7r3(ICW6czq3 z880d?gWAg_70gGPc0P`4*LR3o_d$CaCtgoW<=%CbMzoJpasJ#xw99akFV4$UTPg-% z%Cxuiu7Msmh@=__xv3(v2@0Ufpa6J`=JlN(&ixQU5sA7+WP=wRQKrkwS&*lY+StYF zaM!`qlbj>8)G&?sj0UR#@LI=kI2IjkqUi~)1m%@Kwt+Mq@2&xjf#WWfD^YLTXs4)U zXyB+s$fiZJz(s}TfmVhO_3?NZ1xsR}nAKVlE@pzM|J!lx@BM@5TzV6Q zosTn5JrQ?&;^VQ{+K4I&d8XTs3tPRouX7ZuJ6lofccQHBhfRYlYO6%+kq6`aCqBY_ zr_q1?jX3!FD{*jTixJ=;RBjC>d=s2%;J~3VPjokOos>B`rs){K=D#%58g+mT)-<~x zYAhGjPh02;vg;FC@;yemO`|=Be5{{83l}50y}g+9`?>8*KxnGd2vsAikb9z6uLp0S zrS`%I%xqBw+r%LG#{8+u$gMRz%B%Lo41kVDXVuNR3e&Yk{fqB$^VIXy4x=q@l3+A5 zO&Um}RNe!)t^sV;+)1$D5+ioe?S9o_D`R#T!#kPTa94^3zc|rQqitvjZ={N2%`c-f z#s?i-3h>Y^`C&AkJn}W%PZ2Cs@@VRRSUYRgs)aqcTONWwnKbM-!_g|n0{ z76yAlTW&?;d&xBZEr;fIQyqgkC;-lyq5&WUdtv~U>3*Fa_cVJ~*uyf9(t+vN7@)Mo z9yr*E#ntGII}Tj_l7EW`v34xivylS}k)@Gf$IK|Ip7eHCfi#B{`M+EOWIozn11Oz) zh5KY`JaLY;O(UQqJr28UrelRZMehMKFwhD)G{fR2a!VVu&uwt{itb>K_mB;AUb%NF zc~8DF^9z{tVSJK%@ssxiABBEtpNg$`*)0!wGB|M#f*lN423T&1<7z*ibJCVOcI{oQ zZlN|~)`bfYu;CfF@N9QSrEO>Yrst22iq{n*ft0Cv106`p@5qqD=CkTZu>?>&Umghb zG6Q@B<2Cp;C}}p!(Q0F;Ev`{vG`9TH03M)h8R5d1OqpUh21E)6M(_;8_OppDkTeu2LZ5v!IX_#W)wgR2i*~1 zL7^4a5_x~O6J5S#D$`1%s?zAl_ICP!9>D>D_eZg-rD>Dr9r z$!=1q)J0mUIO?#Vk@X8hdplzcZ1-B?y#p08GZjZf>!sLOtz-0xH8|0mky=(>^1DjA z)gj6vf*)YWtvXNi$P?-Sf+pzR%22@%k7hoFaAkoBW7bicW9Vg%^W$g&0M=U;LKzKN z6JX(-X(mCTMNL(ne-u3&sTmWyZ#lnudpX)Go>%V~@h+>(a!oY>US>DoH@|BHjmEP3 zl!^tN~Ku)@xC^rya}puf_V47ovK;745t4j>h>rqWkijvGdRVc^rS^r97KJgEr{_ zZ25VjEY<*}$~^Ka-Dk*(g-h0>Qqrju(P|x2M(eR-7T=w@ELiV>eNt)>G}gRO&T%JC|`)GsbA#I%pg~x zLq;zME_2G1D?>M1*VnldfLI%3+Pl$gWpSM{q)e!{%1>q(p@rQwwQYw-(3F5c{jAlH zhh`Okqdi$d9*W%|>Pp@N`uR+Oxv!hyR6hmm%AWe6!AF$QPorv$Kcj{xBbQkKokruf zYsH-Mt@A9F57I_ylljgaLwuIUB=9i8b%&8BJ7y4oE@eaybIpF3t~NMvm7;703Pa^~ zUFoC)_Mg{f9SXW8uo&3cZ|t$p3%suV!5%UP9hs4@*YOc(CeVOzd|R>Zw*VneiG&_` zj4$JmKF*bwQXh<71$(OaJW;m5K#=Tvg-&T@@G|)^eiOe*E{wKy8QM~7GzgKZM$=Z zcDI%+@sK(H1UI~zT?uQ*6ysBuJP_+(>M3;6Ouk72vlT^(-p>~4bp3)&kBS{%^4Clc zl;M43lW}vyl*BD4X~psgIoI({aeYaRT~uPI1UM&VR}H2R`! z*US}O(^X|0d%b28k)lb1h{!guS|J-$xL!UF+Os!zI(`JtD9kks`^jUO(vaS6HK|Y~ z!b2!iFn#{K>v)Brs(>0J3pFfiGd*Q=Y6@BabHeYgd+RK8Eee>7_POYPZ0~e(yPFY) zhEAhn#Id_OiYu2p*&(ZlKnjI6qLiT;m=T|U^U`K)Zf|pEvj&0+k+c9qlzb~|q2Wn~ z#`B?BhbVM792AIUdOoJZWgT|d1eAD#^l!=$}}7v#m&wLwCW){MlzyX z5v@xh7(M&M^UqYS%P|_PgE4$!-lqCWo;SeNogNB-;JQ8PRI|^8W_ADo|MW>jK~&hT zh=)t1PAASiP3_y+OQGxT?I*HKK*#)7aP0P_x0+r-wO7LS=6*WJGJGu>Bke!D!F8$Ik#pJg>ALRgW zfl|`hLG&YwANeh}m>@@Ei_-SGg%d|U1TqI>oDog{Ye1C0Y;0krTkD{6_FX`3T<$Q+ zmzUlzi(>h0_RVb zOeYpB970zG_`RFExt(xtFpjy^R-A6uv%qe1YYVyODezLmsKM4YXhh8tb^yYIteHS& znTI_MT$eX%X6B)a9BLP=jYwz6=LP&D=I8!=qaCT@wqDN-H)W~pL$c(h2R+$XtAkGW z`p^Y_+VIF$!Ugcusk9l3I&y0(ETBJ~5waz)bqOJ(JB>4R*C-XX4ts-;8Zda`WC>tv zeUVEk2OjoA#LF#GHj5*(#X8td47g5Fa4SLNE}G9jL92EY{&IoMPt<_~8t8*djdSqF z+LmR2T(w@xnjCG4eLFme#`=-Wlqn}^TMfLdaWErmV1_fSVv}(4{%%C>(uU74^_yw! z1h8fZELaop5%`%#XTE9Iv1Po7Erl*zXXedpos6KtK!SHo^EkJk2tKG{5n8BZGE0gz}(ah zpaxKau4Cw4*s3BpDUN@IPs+QJ3C$Mg2$Z00kx_DqN?M@5+7{?n@SAs_skYtvi()IJ zFH0ZjDJbLq%B8ZH5j*y?j4!LYjK*~ff8slzd3I%GJso?l)j$vpw1!t^4+n=3VU%l4 zZgny!0-*E0Xq8A#r=w7CI|WYAnGTotOfflbQ;D9nj$EK{M0KDsBR-oem>M)Lpn+UgcIPIMw^+gcgTv;nESTcnX&@n- zh)d@#VnjvqULsuYEfU<_2I;w<*n8cIYE6^(JBP8mg9AlS<`;EzL>T}T*U(M*^T4ku3BjGW zIRd&?kr53s*c%zWVLrpG87jb`yF(WB9#5x>Wzsu!MzAefZ~+6;GWv{&l6sBj#kJbey3$G2&Jn=A>AT|>2i4mHj%ZHkedx^1yw ztKDF{M!GsvWX#|6!r4Fw#l5Yo z+6d{Cg<X(t z@eDzBvA;dL+o8YO=f%u&@;c-3b>+`gzWoGl-C8e}nVH-^$K1^dWicAGk=6zcW?pq< z`Uo!kmq+P>8^81tk0omhj*|ih?*ayL_ zmX5*he4cixUh1p`h;tsqgAFM9z8L8v|L928`FK}p0IhYxj!m$nEz9LWz=hGj6E2*$ zPd13+{$>EUXEqmeM9Cw8nIP7PTi5H~7U~r`a(pt$_sopjk`IDDU%>m4Tr>b82? zElN5+_pi%7Q>Lc+C!+yG(32HTSz%2^DHKVvBEs`NqUl6k7`Y-nr&5UUe5N!-NMM>M z%`brhx&W!6C7c84RyckQ5(J#@pYYpdb@&p)o;i&^8OyY|3NZg3Rs?Mse9* z;TQy&QAfZaPn7Vscs7=%n8 zLl>1$p|@CQmuN`?q%h?9_ktttU4z)`9u>zB!+{+Z(5v(QJEQNjX#G{q7J$RWzFLSpvi1 z01(4CHQe$lmrGE_MALa4yyxEhSNx6)x<;pi6?mLI?Fn5KXjAapdz1W$c4IUhM&p^sqj~mx^sepTaQ`6Y z_jl8Q8$rqw2c+U3?(D=8PO-@BB#ca1l4m)m62?JcleW;Ijw2m^6VUMbH)4F{W-PhW zp|=;kFaBEW{Ih=?hhKd@7Pqg&@ZxJQooIL-2afSjzTBc}s?WJdGmX85C_yLBGbOHb zHj|b^L@uP6hBl)tj8lSkBP!_W@|2mtETlpob2k7^&d>Szz+;pQ&-fWeFr4yKON$3X24v9>4CEic4{ z&wgvlU$4I(uYC230G^w%K(u5OqE2d*1@G34*zVdYV$XFyd&)yR3LQ0icUM3b5n&h& z0Id13i{PN!PeX3z&Nb*Jj?QhTypR!-(N#v^I2$9m(U9jM&pNb1-iX}M;U@BKA#d(D z;QHB2Lyk_SQ};Z0Km^geJTL=S!A>a^X;M;`)&E8ARgRT4m(K+HfKuc!i_y5y2-HF= zmqyL{IEPGcr}e=OFy72oGA`=_hCKU_-wi$lP1+D0i#+nDQKZGlZh332wxil;MyZM| z!mbGLbSU|c>kW=<4J?S-CV0`Ao4Iv+tvZ;e)bX9sFJrX$a6+^yxKWOCDTVu(*8k%! zp@J?QB(_9nW~K$ep)5_Ao3_pfzX0E64GVrIh~cNsD6_8UiZ<3QyY7_IKAM>mRNK;5 z2kc=$3TT3llWBqymld^Vm1;fZ*sgW_$mN+_x>zhT-r{IjkI7wPtOk0yEiUOIEw!PH z2cFye!`hV0E~#tyl`6rgbulyOrJQB(${cMeZk+;uAu}#@lmP;6i7f0<(w}d2I2|>g z&rE>+hB*k%RDgM&B@i=9ZavZ%9=KHO_;R-x?`t|n>##b3YPkuu$Zfib0$DTxyih^Pi5j&PdF9rZeyUKC77L>;1A zVZ1J@M-fDx5omv#AHlo%KtLvj`Ui2_HrER?elvNHtEJs2vAyB;+(r7n<@M5+bYMPz5B?~8V((vVu=ToeDaixh!R?|3XDE=sYs)#YBEX3p91;7$KYIzI8 zmCG0!0kaj{&hal0?X}lxQQg~$gMasb##?{&KaX$xg})U0|Hr?Mg}vP<1EglqX9=1g z&6Q%z2Q}^j7LUY5Y~W`h@7~*})cI@t5d*%FbsUaGViMcGHBV-YOk+waYh)b1G(rf& z{@r{$4ehaGg$~}&UKLB_dK|v}Ry_Z2{z+W;**_6a{a1fAj@oC{xacAa|Ki{NoA_&g z^KZmI{1^W$Y9Dwq?*6IokCh+#_BeWAov{-ca#L$pl!3L4jf6kLPB)JFMLS>{swl@6 zTiGYvLx61kZ1JQK;hFV%)EhV+#_x8mWo3Vkh=&R2Kr~`@WgGQ9FaVKTj zEoYl+=!uJPj5v39;Micst+5_0DiF_QgVZ5+E=;g(0(otyUGa<{d5-kwM(UYg>YQ0F zE6US`=`5I#PDdIzro*EV&o&*6eO+IVT5~0VP0>;n2ckZ~U2T(kHOBt9^>6^q>@KOE zd1ix-TU)Q5@_=-dMSl}H#3XNLM;X7Z#SQxA+!BeMc59h-91{2=>E$+RBlLv5>By{% zXYBkl{q0x;LKzft`^?DlIYG4R$95F8M96^7qkS{m3XOcenP30TQYzDcl?OG)UUcG|Yd39O_kR@&te9kVpK9n{&kMGaJj4_x9zcynEz zvAf-`7@PDd_@0HF+M66FB;jR&yT7Eo&S6^@@S%_k5{QH#gpM7;mDwY00BA zB`(6xYq@U`C+`VAj-!Vm>M`#>iBJ`J3_c6~DY?(?RRxw7YaQ&F7A2jH*1N)TxjmSBu~v@o7403OgO_vHd; z;1beExy2a{hB|UldNt)lV{D_1?M)mw@8tepK;XzTuN~uFFD5viC0?s8*Wzg6Jh{%2 zc2Rx;4O5(^^xz2!k@G>ZT|-nMC)@`ONR7&@2va=-ROr;xAoK2cG-RHxfkX4fgQ!k7 zqk6EDhGDu;jh*fw_K9GJe7?tXx}zeUXH~KCojVg4g*!it7Ig@R08PP#W6;TDCPSUW zQMrxZ^tHg|Fr5H~OxiNGBgX07GGfd7Y7I;kI#To!ns43Qjq9^&od1h|CC>lYPsCf> z+i^{uWHz=)e(=#p;`{&PpNP-=zz@Xvryq~i2Oo`lzvWYL?t4BP<%b_;?qwp-e$*eh zE9%IgU)x)oU~P;4tTU(&Yw|2C6q@;T-arG zq}M6F4*_^?b=A51_#*_?t+=L%&5maF6r1ZfowJThTV_VeVpWT(k9dC(dnzBDgX7Uy zm@dy$Kekg4WxAsB;TBu<-`CA-YM&f!W})$YI`GW;-Qt<|lvV~^+DvVfzRLbAlNq2% zFm+Oi1N6UG`c{VJG2?K6(91C@kD2m^*4joR`m~|?7vA@Xa7SV`iOW;~nc}(>sB%yg z(1}z{x8`w)2$_zq0KsEqYtiC_+|F(J+Ufh zGTZ&$3co3IoPzgUs=#c77O6{sk$Z-_qv&kkbf*Q3L3{bE(XeUxjt9!%5SVU`MS-tW zD?}I#B3~}$K70H9*a93#Y|*_1gvHvH%B$cQITuF89snAcAwXiU+rzoyScy!J5daap zhfW}kv1VF+qaEdj+sle|BG&<}E;nKv;#fx*MSj3TTi1G&WD)7w=GWs=jfS#Gj-tT* zRXhbjqhxzvjfcQxAS&nmRZy;be-$Fftz#SF+;v>09_6iqT{Mj-3Y=8VCS$GCn4{@I zgumbKGZs_JT+Vnk!rEfDu(-yn3<*#KIsz62RD-E=7r9FxE06^+#XeOWBWzh~7a%3k zEb``=0Mg8qe8;L_=pii*kuT>kg%}#Ny`9ZeZIQimux`R;A#*(c2tIhO^Atcb+T7(T zu5*nOUYnxK!b+a6tU|7&uSP9Xyo`00M%1Vpjd8~{Y~IY8 z6(Sm={3$n>GDfdGMEe0q(fIZGl#v!KdO zW`hREIQFi*Y!^0*(i|7p^585C(sLU^f8!yjeVp3$yu-=M;!=b+|(m#lr^r z7|7w!w4jCv<{ot&tgTsV82V;?{?-}FWt@$N zuv8I_LeV5@3bQ}?)+syTE|HWDBS8X!TXxL^VOuWSMz+SYmlI8=kZ_HJF4ELG3Y~&B z8DWGm5>e;gT3N>tuk!hNIy6sL@{DGUtfy2RPThBlGa~ZK>O`yeM}hqSzw6wMHdQxfQNT1j?jG@W-_;1x2MxK4F9pK;LW`$biv9lvy=cTZ%Xwxctt``BCYH~@&W-8g03vi`#QFv97 zME%g-q9hB7+5djkXzV}L$+Z`TvMfEg*9&J#Ym^kNigR3QK5yL*3gHvv~# z=%?Z_%tYmDZTV*w1Bb-*(!(fmk9+TNk6mPEAE#!VIb!Vl0G{jHJ4C~y*d+pGCeRje zSe}piYAah3n~wBUHl7@-=Qa{ZAYD;uuEZjI&WK$kjv=$a$SWO_GFPBxX~FxpXJ$;f z=IcusBGrvnVC`sXsq6eBR@xdT9V z52G;SnSSS1H=LTLW6t%(lF=Ws?N}I=9l{t|_Lg^S*|S1@VSGn8up@vDB9~dX8P|FJ z+wkPEzy6ow{vZDS__e?PkK&!*eIYJA|9X7=i(if_H*WGI3_RbkC4cIQ{dfDTe6}5v zI&)lY#_FT@_xj6t7n%H1^7l znVn+k$8l%3!93fquPgrGsD$(ftMV$Pp=2zraU8D10g@{V6iOGep%4(9B#n3>vg)up;6=H>R+ zB1#q{n}u`O)>XOWYYl@-I~JkV_>B67;Yxe(F zHnB}^&8!gUm_<+q?19jzuL2?oF!2L|rebR&`pkUP>)g6JwIUyz2<-&0yqxl@jNUFJ z75pS7(q9t2=MFJwV+FmDdmfbwQ3|kSaL9Z_u*^taX_J*lDJ~c&itGxx)H>;-O30*3 zjq`^SgbAZE9_h@A_0b8@j3@PsiKYiclG#BA0+^N))tDMErQ_*5uGwQ%6!mocZo~3? zVH?=2H0NTiC7~**%+pD~APS7N#Us?os5TqVK#4)O0~2vjD0#tP7{|1-(%0uF5pn)9 zdYQGE{P1lmuI0J$2&Q!gxn40}BC6=-_8Qm9tsb3U zf_OaGJ^mKexD8Ob*KvuKUBi|6u33&pJpcekxjTsxLT}-Q_pyh+Yd={$f{=^AO-Jk8 z-3}%rt^7_zu7bc&d)Qlu=4Yf`8)M{3@o`BYe3 z0{0XAS(#d-^y`2L*Z%4#f(0?2Yi^N^3Sft(+`iSpupqe}rz_ZTty&PNfMyDkd(Rk5 zo|Xgy_H}x^XVtKccs5q6v2nT$A5czY(u~UP?soJT<0BvVVEow6|M~dyfBD(?3;*R` zk6--1|2Odq|Ji>X-}S>k756;)L`GHv_vk|_kHwXb_DvXe_MsJg!l+CEcndg!rD2z= z53`fqVim^$&{0;4ZJ+#P4%P_?2p~o}hRg&Yx27;%(sABK!-#Df`P}BhA7&RAdIGuO z7-vm5O+1{i7fbO~8oG;mlC>{DvkT5@hAK1+J)OM(?ga>Ikx%pku?q&jZlR z1T7&4CuTa31#Xeio1GDj6t&$t|2gKJfCPOxg2pp=aEbAr`^g`U_kHdc<2U}N|4$5F zdL>H8=5&7;{hhsZzB+4e`TzIjb8IIxU#`YvIE*2@YTt7q-!lU^B7krQj?tLWj#;yv ztqyi)3KfoWmyM3Ys`cV*aP&OW%o_H>`IzldMj$`}a*J05mlj;@J07^FzR&{HsRbC zp#A#kGkG7Ey6oBSP9vYMjmkAy-b3S4uX1@&aKeEFj52XnvLR)X(w~Z1-Z*KE+3F?(voS%7&;`N)oJ*?JpHhBtYcYd{sSg%w z8;tbLoD{n@1Qi0aLdSBM0y$w4#qZ9E`Cc0ASM=ptY^4k!mPscpeZf79C2?LvAl-M8rE|yFHBeNygo(;L2i>h8V^f`sO2i^^^28_$6_!L_6QXArX= zT$|&ok5RYnZnflCP2U&K9AQX%DkzGf;JOtgw{kE~E5`*JBR-oay!EZ_$1!k?5jv2T z@i8dI?TKBE)6|s3hEP8`x3VQFkIfH^n7X}Q^kK4KpjBT=I(QVDkX9w9Z+T+IZOdaYA`UO+)sLys)ei^^d>ko z(`$T|iRk=%U)@;%5Dg9l)kf63$NamP7?dyN_`hW60+*hBJ)t zF}h+(H?IdcH77f9218zFma{=8u3g{G{pixS$!FR%>nb$9+}LFd#%ALt^;uxYTZLhR zj&uB7$!sC|4M`?jtO9#+8&)M_duR` zQp$#6YjK>t0@UN8lH9)jg{`oSwb>_~+zend9`(}+Ya0xtcng4tLz+i_t*!E8QFKQ~ zl?{6YolI1xEVos$qWMh$4qzrf(J$myTb3a#2oXCUR$p^FTt@k-WnM*h{}WW zbDLc5yg&%_Ls`d>Dg0T=he6%~!HTJTo5~nXTd$yP6m-qa3ob^C*DiYG^VxQnND?&Zh%Snq?82GIl`ydsoKqn>(afZy=y}lamwrOh|d%LBB z$5~_q&i$tDqa0&URaQs{Q|dJ%LYxMZFNzy5jG#5o=XT9lrt8=bXI%zs)X#u1=rY)O z$W))NVy4pIB>XaEBNDVq-aVjh-_Y+XWm(vQA#)6KI1~4m4v8SLAA1^Qi011_jTyTdp}fy!nG00@P0K_{4OA);-3b-r@Qs@c8$ZUQP9 zfXe54Zpkx}^+1*toV)veE%ea&S=5+OfY(Dew*P)uheJN-ONXKE1h>MnSqNuktC2Y!)=w$PorYt~y?oRTFMD zqcC1XThOoCDr8)sGDQAuy=oVSDf1m;aA)!p{tW>E?pkqcOrPI1?}7nqU9tx`#$^tA zx)d`Q>4=H^2hf4*Jtc4mE&7aY#8{_XD*!O49X2fS>uA)Ad$!+<$F5wACwF$@aUA@U(BO`8D^4v# z_X;3U{W3F$)5{b4q%E@L7BK^YEILF63SL-DrQT{URB>o1=mv0GC~S&+hRx|itKO~U zEsZ&9nupe`wrW{b?Jf@G()CfJR?iSO`{3u@>ukgZ<*)-quZt{{;-AmUA}5TkTYHVL z>`&iwnGvvMByGKkp<2m(Se7Yt_Dmu@;}YwNGc1wgS2po$iV`xP9+{Yl`(@ z>;u>4vK(V`4B5RBfmFUNP!iAztp2#j*63L~qdqwo94&q!qfg{bAfLzBDwpYuv1QOf zi)|)X;Ov28)<=0PZYo{g_c)h~&HlNJC-||?fV$<$7a8h`+N&VB8zT@L(Dv9x|NH1;7&n!bA^-+}sEX zNzx%g6(1_mjm#8Qk!)E6jYC?+(6?(isD>$G*Ji3X5{xxObKPx%vX~xLVBB71>RHBf z!%TtHn=4UmSlA$<-RgE>d<-TRy`cG$4l;`phYFNN(6dyZ{scJZsCWik8WoY z`#Xb74KW0S$u)EttHDnk#W+!JjVA`y{`og|`?0gvXZRLbf!YulhJr2AfVuCthLf)6 z)+*CI$1~^16i{|^!Y6$1?2U7M&hp4?wt$nCTvZ%WaU7>w)=MDx%tIq0iai{wsQiY5DA1-(-87WE@ph||M)G%mua}X! zdR-J50f6AW$|D*!t(d~fn6mN|MNnD%X}2>A!zMh}6rS}CtjF4FJyUfy73ve2 z2~fO|&hP#QpNiUYJ9+hg{Te{?H#axqxtCv#w=TVtGe^b2pv3@# zwetR_Wf`WNujG@GI0GJ*FD6enE`jwry_v%)B`_W$fcr)U= zk-=x`i*XK{QWaEch1GTDsS{Clrr4uA{0Z6N4GcPZ={%v2J9*shJ0Vb+B8%>+w{7jE zE4y*=GP({((Ggf1b9$o*pa+0vSvcX0=om>(D2=jTJTSmZ)T|1%`0Gnwx(g77tTb^t8Z)X;>sFM4bnemK! zmj`kkoM+niz{9VsBQmS&y1Mp7Tb9crJh1R9x1P)EW)QToW&)~}m9%MQ6s%wHnj+1A zOP}D8!LSjt?Q*sE`YXS(SrLw2{xS4?mVhX?d@_D1;ZmH&xTtMY_Mtc9HefQK(O(IQ zlmqE*aAW}K7|krC3>M5v&}houAan`-&Wl*x-#+Er%31eeXWKvKE76Qz{6IRSTZC|+ znGS&|p47BzYOT4PjUB2)@TM+ArMU(-;Qk5`3a>M^?chR2K^BV@RrXeyo32vG@{}(L zjgZVQqImp0>UCjqktQ{IaKc=g0;+(S@|5_dc`cAi%gdOZxY1re3vxu_rqjpK*~Qtz z2>6_lrVi&2L)ZoFcCGw2RSv>@MB+yH5*i$q*XK4uXroii zidY;F=06o^7i7Q3_Yut2L0Rl!3eUnbnX=kwf?|sT3MiP0J!eOxT$?J;?#@oXi2fmv z5upgq`PM79iUvJ(EmKmGQsF2feT6`UHbrldhZz``HtIO9wT6+qwFO2MC>+<@PhO{P z)Fj_ES~|DFk(nxWovaco7O9o1`JBBydu*E>f&9>31+@EB?ioheU1usP$1ya_U%+E^ z=m2`|4lt@&&%vA=KVx-0b%k+ltXH!_xQK>uhJ?Pk)(nl@#yM}~l~i}2fBa>GBX=0Q z`qFFh`G5Ya@oWF|U&hYnR-9?Ch<)PR1NX++cRw9RcitP*Gj~Px-S3Ik2j3T^2OmUs zOEGx$o5&OZ5_!$kyip~bF81;!d}E>-GJfYh!IpYjTdBt)PHnke#t9$B7+LQF9uJuR zcw#ogEpR8{A*MrQ1b-L26hnS?k>fE&W5)bPytlPCjeX?TGuL|>JLo@#{yK9b9aH2+ z@VYxw_tw+cP$dhr)Kk;W7!lK~h+2YyDZ(LmaE*6_Bfy^WbrLuF2hslcC*zTie=J`9 zd;cKX(A)ZrZe|AnUH}(UACErr2$44Yfadc@2hqK8C3bP@yBFV%(T%M*HhP{6i7Z#K zkIS+7m2buYI;U|uL2u@9B3SwC4^cfr*0=YgdF`!uxc5%HXMZc6+3LlkoujxD4Ameq zDrsk#_XI%HN9PUzI&ML2RI1Uc5xJgd0E@aD9|Gw_+sLbJdj*(9ZHjdq%h;1@$pFI! zO}08M?3uEyLpF-F5H0(g!;g8KI~qyE@9q$IN-$tH%Ox0>vaHv!0L;cFrmU^!QFpDH zhkVGZjuHotyzBf#T>jLlNOKpllqH6SuIngt>gcOJGc}$u2qO6DRB)ET?m!_-aJ zgLQVU*;?_g6IC}7#4`sApl7C@C!?%EaEFaHGj|a!0ld7Pu?Z}6E>_QbPPyare%1`A zyN=6Z<6L4u-z?q|m14 z;F&$a-8OatZWo!ErrqD$1$m)F7`QwDA7?O!;gykD6_~<7l;k>nPO?B$3Wg3|v^N0AT`biVV}rW z$7Nptt2RyF$UI+jU$@1HI!=Q&i7qlVouea>Cfl~fC4z%ZI0=4toP4%MWNRIRlU0cP zfcMQtFA&3dI9I`$e3s^3>){>i7^-^1f(@R-GcDrlc7_=txkl&bGjn6QkAKQYW*Ug7 ztz@^}z*PA4tJ^4Aky6h(2&j|u<373f8n9=2OM~JVq=ka4vRNdUqRF_kkqyuDzH7+S z*;6ag21H=8m>a?kfXKs)=8++OUb}WPR_?qb9(eL;o=8N+eQtmCxVJ?s$Gu@J6PX_G z_2bR2zk&g&#@!FxjjTg^i0$&u=H;!h*Y(D|7ovXmg=jtbNIJ9mRy{_qz8c4Gzkx!d z1AvwzjDurT9S;F7%B;UnVYpqb8bpWl&}hiqO<@m^uRacSbTAiPjAW0Af~8Tz`RI+1 zW#}-1B}4hc&jaqQvj)4Qlb^uSnR4%Y@YgD4&-Yg0AvU~*n;2E)EgKTS51o4&0Qi^N z5TU(*AS>r35zlnghx}ndpP7@bW;?$3Z~byy`NOY7_39@4SYQqS641o;!B)gwEAgE_ z{b%Cg2OkMsMjT_f1~+cR&f9Orbayu<+k3J5_B*k4=~AqmITh#bdo+4)ZO5hG_;R!! zyb$+1c3_VuXXd@63?f&Qfi(GiB|SVK*Ozk~s6U^~{BaEm$AdA=S0Y)`C{ zaYsQJo8d98>YdKb2-7+W^~T*II)dDF!1eOQvk=xjA3vP!4meXZ_xi)tQ%!)wNa?hvvjCz}8vd)l%DpxoD`B6UhT zIv&8^tu8jhqp__!unQoziLuy*JLjTByCoZ0@S1vSs$IF_zRV@Jm138SwhOt!-bn-O zgW{;1F`c}326^eI)O+orOMcFs3#A30)wWpQBrx%Fj;mqkIzBV#nhmrz1DgYoa*MNB z6X}@e>q8ZbdCjtztq^cEnoY)L&6fehEC$#~ZL*-r^8##2V+Pl)r&i?a_~i%0gg3XB zBQ1o~#hJPnL=~A?^~(x*eMuRcoAid)`Z?_aZaIO4a4fe3GH1cz?bB7AlLVH=?gYzh z2IFwYkj1qV6yCA;Q6qZwxY(@(33IlEgLkmmhX7d;Xeg^mL*5ct>0jKTWs$c9d-F#y z#kETJ-O6x{u*gk%?KnJ(T%+uDv!kW(xW-+ayA4+q zX30(E6js8zDl)2|N7Ut>JiMm@lFjOuv zh!`zOkzu)3&HXeAeT(uyA&%FSugfR$CVN18PBmmxshzju^4X>o8?G312)mo;o^2R=iIhl1pEO5B4xi4qcyl-Ep^&;2deU2&PJ`twoBpoDzZS*4c*U|gFY1dWc=@gOs>P8)Tlb9l5*AJl) zk}N{C_Q3NDUii{i;_{8Fh0}4kx4se-ZSTOTrD(0L#R?AW)M6uE`M19rfB37Pk5gw) z$HHnm%IDYOfe$L&Oxm*~DhTeaG-DtM3_; z75uhH=CRTD^SpP&{pI&XjOe?5>L+8iwHu@7-@sY6v%0@8C>eHR!u{U+neU3npL`~> z1xLu>HGt1MU;TPauIkl<9Q*{4BoIAMJ!`ahx1u7Z8|%(+iok(~X!N%jPoLZ(h%CYsimv zP1gPFcl)_SVRIHixEVd8&+ISHKW!>xOP%o-f487lJ|o)pqB0TIQJbT-?AQ&>%v)fE057BB0P-_PU*1UNJzBS2}2m z0hgJ-TaE>v9+uSW?Q>n9Ioeg!wRHn6cn5j1oiO9~+ZhVxd~_B~q6cYfvF$!R3x}b( zL7?Mhay(bQNdx(Al<&bZj>D!p_&TPa1#$2;h{FByJs(m4H|bQYR$ zlR@?gP}r^1wGCos9vF{8tbj1H*d9EEyg3LN##Hc1S%SmDpB>?+G_GsfO7erYq7UI%t9rjkW9lN zSn~h&x_zpbAc?QRaF9ZoMpOmD7@5w`gK8Le1-aU2R^t4bW^9~lap6)NFy}$f-V7=e z9OX00SQgo>0{4;WgzDLm4)Vr-}O|MF8g5l)ZftdoXaSy!LZzA9A z_k1Wm^`HEyxcIMsE6&y$S;+LFK+x=HyDXG-J=zSQ##~2bQJqm&kt&6l{HKiC$;Vt z*w_3p_snc5pI7cn=tk}^fHo8O*j%Y%D{LdpXZSLLbg98>Gu4kgX#Df=o-MPWW(9GQ zJXc+I+0Z&8gCBJzcXxO%=;T)hdW_rkXbXjzB{p_BmatDg(WN{;r-RNt{Jdy3EDyrL z&X~$JdiKkn?Zt9op~FVB`WPF5F&*ft?GSXBQNY$^Fe4B2A6&pZwDoz;2lvZ$Y3MEA z9k8`BW$fL_u_^C4oc@^k4H+MD4_~#J0s|d*))v46`Rm=Jg9md-_nMgv#-x5Zm!fd; zmVVan!NQ>Hv zz0Y`1o%OM)+1o;IYS#Ba!#IdWZ7$X~T10Xy3|fcgKu|K6Xww{ql#?(GW|4xaXCv&y zAt;b39hs*9=GPpA0Bx#B8MSgh86ZKjO5RC8_!0&oJCKzc>EQ6iI2{|vXcSCG7U|OB zzJdj(g%O_M+!02Bh(x!DaL_x9t=&Do_jk%7q4Oytd|n3v8D;j%b{`B)I*+?MA6hCq40XZ*c@ zpj!ZfQ$B=|j7!vQk&N5;CZInoS9$KK8;mj`OO zciDCK*{8hGP|dKBjY*}b(0Sy-;_rkn^CIW(7aCI zoLi20Huj$Yq;RzEbnu@BJnLtWu|_(v5@QGGpeLuGsmH3i6kwJy+a@C|GxXKo?JAzB z;5`q&#}l1h;v}x6LhyY^vh$B%>>x0)?#1-K!X*5uxr=So}dZ25~x)H zOyv?zn0rbG{g7P{J&Di2#n0J5NqMrkS{ZUlNA=Uurc>rwStv)ak>~JZQ*$R8*EH%j zZA0ear+hs;UWgu_cZmTq!uNtE%4D&XTmRy{vDNTS-PKp?=L}#IV8I*fm1@x5BVx^P z7TIw40?MCvk_xngE*gNqSP1H%j9y*4^))L*mzGW-CIf9OB^qRm87XLBJg~!oxn;!4 zOFN!boAs!Ipxyp(dvK=8v+yDrpc9s~%ubnv_->zi8iEJfq}ijlPKzWS(hs3z%fO#W#&gjk&>OoVLp$RyS%m zDpgM?a;byJfO$zzjce|kB~meJ&2#rBL<_E6IX6b)JleL$#i0r=G#COVqX~2%Xb=u%vY5Y@Wdp{U{&@oUsH3}%Tc2r+a#nC3qB6(UE$ z$Le~U>m1iCc6a;G!9pcX5OM|$Ct1j+yHhlvIlql|Zp6R(fB(<%_BURPeI_ubCVvJFe zrT!lCD~b(Kn~a9D6B^Iaxih46_yPn$pljxR=GA51`{>bC=ywf)+d}U5cz>JE-{hKW zOy~xL-$B2+yw*qFyWFG4eO-X@2}7bDS2o1H)cLu$NW<$R9m*7HYDXaH?(GGirv z&m|WeM-M}xV|KlJWOfDJu+YvZ@Etz$~b z@bX)+INZVk<2ICa#sit+2$`E(1fbWO@D)dazLeHal;x%eJYmJ@kIiK#c~|% zFvDv%qV(Du(f-EOcx2}!9zLu^ZRWc00#WG!GTKAmJj@8)V4iOGa>*$pPznJOln4zn60#X8JVJQZk5WyQ)p@cqSH*7hOVyPD|1E7in5@@;?$X> zhTq~*^+6kFbfJ48VF((y>QGR=cC=mX%XS(Mts+JFr2PK zxn_m5@`ys{R@PRDP-_VOD4lGcFqa6+w1S9JA{cn+pqRih!o=is#+hFCJyRN@p#%d- z^Q66}!Flg;xxouEK}I@Jl?$dxSC}BuWV#6kiW9}=Gv&R#-A~7mxD2&T_D4Y}KX;kg$WmyRett7A&;kl&-g1bjp=iUNk zTe*MZ=1zhi#YwKvt0%cRXmp_II|UyR#K%?!7CjE9*$1(Fmp* znw&bb7B??ljU_HEBkMJQ+ychD26GR#m=83*@1q}#*T4AtvH#kuao zc3&q_F9&SNZE436k1>xh9u}yHzHKbCi1H7)sEY#a!Sey6+&LV_7OrsupZ8}YqITpT zvUt7GwWT@yNauMt2l(Z0{fm*0MoM1!LrNx~;CcRTW0mwnICj64^5U}?T=dI?s_XD> zzsvP^UK_!K5wtVEW=emb`ELRifAZ%(7lW_87F7(NMp}n-j6U7yY{!Kk`Eu2wXtyf--g{zmN zyni*~@|&@U9B5G0y?LUl(&^JN4`9s@Rz1PE<MpPrC4341AdO<^5$lA`X1Y9+PAPD7F}7(lZ|DNS&ghUCn-z^M+(9mb$hFP0(;V5{RUw)3Kk*b8}R zb+x`k9o9K1;0JuC!-4+{k+IuOnz1y>=(ZUmcG_zhU}cT6eyVf|VzCv%ZDn*njD0Uy5ucjv$4H>yc3K>{IWB=o)v@rPAXS#MTuu?W$^3d+Ay$A(W z@thnNXR1U(DmNM^g~TE+I}0ha3b9VkwTu2>5Sv@w7!n#8=_}+$?#*W5z<4zs|L)EJ zWN$3x=|wi|@xTJ^k1)6$F3~$QF+H5A6W;5AiU<9E3Wx`(xE`YNxii779j=YE0TH#j zTWX}Kg=x?=-=62^w>;-g{QxOy$xcb<#N${KMPbXTD< zKG$#Fi0Nn$%jt|zdgd_P+>6x~pSyW8jw_4tAkq8F|L4Do?jJlC&Bq^^}3zjcx2n+(^|z8e6{1IF|h|L!lxkNg*ZDK0$ocx-n% zarw&CEX1?9k5QY}B8#t}neBPykux?&k;ftPELok1o_GA)-}o^6^F6iB&myfDIUT)g z^Ol0T);{ANMd2TJRG0XiYyFo_X7S1{WWn}7J;ho*>7~HWQ-}1Si zP5IQ^O#tdI|K9J%o4@w&qq#SVCj71-KLlvup?mztI2aqo=%~IF8-S#>JMW6^SHBU} zE8mRD-gN}Ji?b|6YvWXGz5Z5IJyU*TozF2Z_1z30_CY$>>ejmoke901A>?#njcX@F z*Em}p{vuI6LKcUkew2s9*g*bD&|p^wQ!6^HL57q$Yg#nAMqzG|vSqGISoyy6gC=Ic z@+^6KJMesgq{g^rCK3nWHq`xn`&gjk+|x+S)82~p;9FnNBYCCw=1T{k>2YnW;DXl; zNZdLnC$%ZsucIS_9KZ&@XAg2@)MW*?^a*t$7M7{M*-?#o>bP}&M&mjY zml66sYhkPb;PZS|UGh{kEo}{*(=lhg40d0A)!7yiW&zO01Q{;*>45bOT+XX@%k)%B zQ|bgjylCLmz^Hs|qrqRUdCg8$R<9wAKg9@Fifu4ZE zekltJ23Fd2w=beox3UAZrr8we#CXk6TJIp8T{bq=e!Hurpe~nH8IQiix+BM#nMLFP zqF~2LTvq@&70k4~z^wpeo;1pJJX4!!R#sWh2%d48vChCav)A|pdBrcejG;m0b2@nM z>2pp0`~GM=$nzxZWa&7SHT_RvgUz0+b$*$BNDWE8I0ggYte9tfw(xH9?Hc_p4a{o= z3;FM(?|I@`xQe0BSdNJv9SFjBD>@L3G;51QSj%}Fp=e0?%?AGbqL8>wWid+89uUNh zaOOG;#!%z#sdGB;c{@JooMi|@5%Ji^HxUw#vtrSS8ja?<5)NIUAdxhpI$8zF_xUgH z=^BygkW49I(LDGjqX6#ex~@nK*~GaMv0%{qAlhu!0Dz?D?G=eER7Db zP!FN7sg0?xJ$rn23xP3u7eEdX_Q9wCK8~S5l;b+f5TUTR!^%yKlFx81uDc6pIv7!p z>1}0u*^@Z6LDYkSx+ZJta16NVbO#8L>tJSPq>tQSwmi#-*n3uL%TXnYvAbPyAV@EU zJ=fP=L$u+K85k6LQXHe0v{4qIyZ3S%n@&gUYmu1gVR@#Zunnv8G74zc%DL^%v1sh< zA~+bh4Fvgur;g1=5gIR}Fe_NCC(z-$gy6DH9t`378MM5)c|C66MAz@SkX^ozCXDey z)YewAJ*(}IyVtjQZY@qQ7V@IRv$|KWMESzmxa&h7h`0Xj=VSQX7ot3cmKcZq(R|oV z)y$2~@(3AL8C|M55tN`K7SG+li4dp8E(UHF8ToU6=WoW3{LFtEXD{3t&%F18@u?sB zzBqsH9r4DiZ^U(+sLoNcrCI_Yq@1dZ)Te$xq9XOrH9pTmR4PXE2m*Xq{STI_xE4OJ;>cimxXCQf+12GD<5z=a;^DCc;N#fx*+*`PZX05VbT zQDu(k)xGy82$;im&0|kik(rGO&c=)sM*9RHodz8mXcXQQDNkzF)D@8g%wCxadyV(m_*s}o3jjrI_} zGJXZX-(6exvuVJYyOCm*KtOrdiOElw7*^H{j^HCYWcJf_d4YirMetx&Z9FOtEOKAJ z8AgMVtTW)-@fhK{ZtIphmnEzRaIV@lGmqMSn>l&rK?R-C5j$@8f;%pssqHp87eFjp zYefLz+ON&1G6M@4?1pEr1??g)F;@Z=Y+)S$=#p!>WUUZ(9mnb}A%VYkPktIvX91b^ zAE_SMI@k3u-fIKEJnhXIAMIC>uC|-1jf;x$+A7wlUfW=el|IcK5o(g-u@RF^h-6}> z=&f7gdDWKXDJ>{+NM{~?k2T$;d3o0*}-Bs2Y zJM_t5**PHZ3m<;h{m*8kq#=db?y=QDIXNTk_3To&1d>NUQ8XFq;O(uRGmv>q$~AWn zbKzYWKVl9l5bra&sb!x{% z_y`7f!4VLzz(|#~*Wc%H02IuQm5wTzLTz99>o;~#&I!-qT?F0(Ph79g#u1F$eehSV z+`zdEl3_M;xLxt6kyV*=Na6C76-Db4;g?u}iaPhdClto?#Vio@Gx2 zt-~n9i88BwQUVYZ^n6i}l`?ypnVNE)@<|5^j#w@xv#*a$^O`8L*@2_^(>rc6EP zD6>7x%QZfoyKC>fjWU^DTj07;79EOm1vd@AlYq&LOASLa==b97H!sGS`yPlo(VjBp z@x4n#1eY&giQ!)FkD`Y%_g^5&Sb&fFH@D*ar#>Fd2kwjOU;1J!5OE*Pm7_CSh@Nyq zZjJzj`0=cKJz+dkWW^>lH&M)s%y|!5?;scN{q(oSU-=*Y4*byt7GD!&zPYv<-~HL| zW$Z`s{MWx86XaV*kI<=2d};nu((?(bI$MG|r`>kuO z$j@qhU(ajMO>Vf!9e?mo|LM4Mt_d3H5V1C*zthP~-1VKED1GSZ_{g9B@tE*z9h-d@ zUigh)kKPylAlltgw9x&EMh63LUZ@7`t#>d|D|zNNj;L|rOk8;U{^(rWjGh1R`8eAm zve9t^`RqTjw7~DpHlN1U0A4N9%EBsV%S4o?lQM=GhhR+)g2e-@(nnUDx6zaE5l0}f z%~r)ce>NFL0~&0=TO-AJYf#X29hV&_tys^cyCkeBT84#9Q+M0Y!6II(#??c4Vt`Ta!2xvM% zZjgQLA!eC*XJego_SimbPuq@;H8UH^6h{75ac}Aj6XCA>UC9Gwc6Pgko(nX1LtWBd zxnz-zyWpSey&2^?KE|m-a2h(Z+v$0?*_r}(ym>$LMi8MbQa2MY0McFN8{<@chS^uI z9cMJ3Ld0;3bz-xO)-TlmD932h0uS*Or z0koh|kX3egs!2&U!o}yc;HL=qz?78!p^Xe^2roljAaUGM^roczs#OU9wD6o;7!fDB;FjNn_9ASRl!#GDO?7vA^O z{m)ue>CNRtk@B#xV=2zA)#CJ80|F?&I$Onxhk276_$@?Ou~?#xz%sefK#Nq|4`gff z>qxDVPXa*%CoI}680esRoeyPdi!UdWXN_}@JZ%mFWCQ&~MW(w15{C#F$_0~Wpw02H z3sZYcw#NGkoPEp3L|((#+8V{y_Fg))+XuE7@fuN^VmF<2peIHKBK|x47|U^Ga3k`xR{^BHser-hHKDI6lo2E%urb?*=lk*_Il&kyeTqe zz9_p)^(=rPG*sv_J%;dEh3{NFc`N+|jCYHRYc4u>_xbu+_wiSlqgxkGt=FR2swdBq zKA_{>eYcOGxQxw+Hc=Vc|n_QNcmPOn@A zuy_qJxpZ(GZDeGH2w44fo1$I61jVL6Q{J&%rph&{I!-fcM#>YMgLcd1KwXgNQefcn zg)*m+6(lIH8dJT8+kzcO?mFS0(YlcrA`%P8u4~AGjI6HbxlU0)<2*FV>aujxCTc&j zzD3)~>n_4n8r3*+_Dnih?IX8Oxfdwf1V~OhMr(lf2^h?{IR@{e5zry4JVh~E5%|Ci zo(|YT|D=&)RaT75onIEH3RL)~E(>}f1)l|o6M+^>t^`xqGi|rayY3)xQtED2mOk%-sxjHhM#rZl6MGQyiB#1PKOj z+IllKxgO3Oe0I^Jq{tL0TaHQlug#Mfy53rIHT{lsFw?QmTrIk`z)tX}47$%g>(#WA z>9p~Y@JoCv2vhFlnfFjO^e-g>MjOL5_UXb3Q~;EAX+`$S6&Q;r~&jIJSSk#0#HPeDgy zQdksnsFDoGe#gF}!-KHKD@!oVFP)KwKC=Lf!L=iu%&}`vhjyR?;lLPE7&>8(P~F?_ z#O|&?M+ixgs`4Njm8}W@5aAeIUcPcYi^TH0cD_Y)iT)vy83zw=Nhb4bqdu!2Wssi{ z47oMOx&i6px>*{V3eoABTk1rS(3ZJpnukcfT5}C8VteNx)6stJ_!y+hJtn5UK#h8+^LXlj?sMc`C5K-e} z7eB?ksZqwbC{qhGqqN$J&CSgiZ*E6* zO5}#oF>A2EK)RRhL67Wz^dm8U_IzCWo!^N?qC%HA4&XyJkHMK4J?~+ZI?(PCpS#K! z#t?Ul$v^Y4kHmlR-~2VkTg?0Rm*cjaCgiYeBIqHerA(9 z{Max2T--ND#Byy5U8oS5_K=fVT*ENl^%LJ4@BQ>AqR7`o42PX~?Tf!3SO4{IM7z_) zQ3JMku1-ODS;lErd4>npETmr9yIX^Zep?_0_^e_V<_7yQZ+49_9Rbj~dwWqmjl%-y zrk`O>o@JlHpa7W&b7IKFvC$a%IuBpx0QYm~_`F69Szp58ms)M)%;mlra?(b|i0+Wl zIdq~5F;+0tKEg&pxUFs6V&pw-7koL< zMj?wvbjdHq9j%~ z(Tf&yY_GJKTM;bfAv?TxyZFjJ_4>S>-BIlB?5Dg~Kcq7-!qh1{?sNpat`l(`n(3zl zH7i+U$BM>3>Z;McH1UHvb9ti-yJH{^(SlWW_{(!Av|GF;QWNahLb(70+J8rbgJRt3 zgnQBr6pDZgfCk_8d;J87r`KAsN?@5;V%}5g zm-#}gZok7gj-uIE$$Q*hAJ_h7Rs~DovTiQ3Bu&f?qesZ6;-_=B$(eq}?2e8)+rDxy zqi4ZIb`10%?`QKGn98jf5BHa-JYRnLntn@==F+hMMj*V`!-nE_^^a~z*QQFt406>4 z2IlgLfn^3q+(xjaSY>q1FBSOjc`#;rmJ=8%Z_?XdB$EV;3r{|B=d%i#ZE1CYrjt*t z)MBN@hf38Pn8rfKJ9p$(I2mV}88VpyKzT%i2M3^V1;7?x>DZkRjMZ?8J~GY4;G8f6 zNvDReE8Sj;^zc5DM^Po9>v=l!%qS$9HC^P1KCbPIP%sJ1=wOOR+%mmn%JMRi`@tZ( zIK(}id%uT}K_KVmPuEd0&4EIwaMx~ZXCa%NvFa$MCndQ&k!b9=NPipc`sH{mklE|n zS?-W)1VxM+K~S>ft0}lD&T(Va0tk_ppY%Gb+yM^UpP9w(HYmFzE%Eeo7$gNH(?P~( z0t?50axzc*)fa+jiu(XXATp^DArS#9t~P;DiK?#k1I|1VO^Qed3rRZtquAf`jB}kh zAO-_0{Y@?EkaJB3LGnm7_@jF#->-)b{{JNRiNF%n<@@(ZBZrMT;{$8t_`vQDJt$!=fzm;WmI zZ(NE~XNWYBd6jhK%vyA=UXAYNR^0Vn-xWQ8?3FM477^r8Mzmv`j!_9VF7}!DHDvj1 z=s0HVu5k~z|6h3aug1Hdd5>%?CeGLGUq$z)?mQox*Ei#9zw-xC0<3-d+ddJGKk-Do z@X9N(GwP>~K*IctG`mgT+us(^xvyQ2VML#%2F9qTM)Nv*3#|kQM*iMI6FWRUjEy_* zjz9PF|7i@KdpVYLbWCK_>%?|7NuF(uGgG@3X4X?s;@V0I6sKz1;O0OyS6nA6Pk z3iHR6=~kkKQ(T2lOUS|2q#u18k#!}?jLx_Mz*FDlkn5|aoUM6LSB&;110p*D2xVQO zWcn17m1lGCivQEG8_2jcXaJE*blkF>HpPh3lq|N;7j>=)46uW#9E=iMl1r0(d5O-$ z=-3?q736vS^cn$3VZ(ggG`LRJej8qkWg=oWBiPIHHguw<@Da__SLv#)&N?QZAs})b zb9Si9pa^FQZ@S%X7RqTOttyv3S?j~KW&`}JAVBBpXlsCMb#yoch%p``c>6GDr!uuJ zZM+soCJ514Hvsn9pbS{Jp$ABrvbGSejZI^iNf*I@fJR5>vELRN3mWYI;C%enE;Z^c z=%l~U2BC|iL0Tp4R<>A2bG4I>#SBc!8t>o!k|x?+w}tyGeTaqUnenlnzZf-e4BJ5ulavXfs`wD3(wPUKR$^ z%Km~*+Hf9`^K+j1SGN}1-nI7V9iJa}Vv$CMXJe zB3hNkc}aYp$Hz?3NmKU@YU~o}^KZ(1L~m9e+ZWtUK#oyi7WFF3gPu|Q6lY+>>Uc~; zq0BH+rwlA+9>+MaT|R#pH9l*VtQC&g7n}PDS{8Ab+4KN$nO`&&b`U$AA@dj`c+iG% zir6yTNS>$^7VT9L)Orevkv%pDgv@=Gm=Js_Ub zg=CqwLx2h-jse0Mx0nQUMI7K*zZo`CXU+sDBPlSc_ZyD zXvub!2#o3YX1huB+F;I&*xc>|kh+ZDb*3^-(Nven&}~9Iz!9o35W%Q10Sd-d{*4*q z(5MR@Dsbtb;FrQ})#rN4<&k#I!+B;ZN+m{Fb@-;Rq^-qH>wpz2n-7l}D~88Rg-aIg zMp1l}*(YQwi#$<=YJ)id^hC=QqR6|D=im5W{z1I(JHHu7X?RAUX8O20jP4t6=eqjB zz4yi39e2ePf*y?yVi{w#aOqlfUVbT_`k6l))lYpkc6qLRcAKADW87c9i6XttbNbN1 zOo$Qq#>Pgx_ru?MYtH=d*4OP{T|#gK=N^7ICIG%~Ub-AFe(5Xm*x9?|e}4Aac+Vpb z#WtUw@C*s2eE2W#$G4-9`oh2i-t(ExsmHsfp@+zbz3r_;Ib?jgCCk4YA8}cH-w*$A z%)N6NU@?p%BDKM_n{fgdy9q5H{GrdpskPO-r?JN&*Ib4MSHAK>EMU|Z;b9pgvy2T` zM3>9(bJ^uH2GqbH)?nJw@tF69QAd`SuDlb+-+UF6a3khML{$gFSip`LaW9=YA5$FX z(y8;JzBs|DA0J_sm&(~?59P^u=NfyTk?(nOBeRx%f?(|iJka4E0g5aDYh5@W^D7N( zz!30QM-QN(bv_3G^-08ofXAoiaY#dh2)F?6-6pHCRL1AANjm3KXHFOOCpZ_EFVrXX z-L-3j6&*_+Ma=yDQpPh&i45y3&5IdK+OONd%MWRba_=|J)d0X9G3hvE zJ+$wATK7~y$>;GjRu8Y1WIx~*f)XcKyw#VG;YypOMCy%pdg zgD;+4^wMzY%QbX^6zhJ9qOsdWk7hOtAel2vo1@*$%6I(&w$pd4v+;yX5vO(`mobn+ zYq&gGJeOLz&Ixu1#Cq6lqffUF+i%}uxy;0Jf7jL3mpSODeU&EawzRc*jqBvuhXZ-D zQI5;BdRQUPFzc8I27Q!v#Q-%6A{~?S#*gJ%zRNAvRuB<0AD^4;(hmg&g{*mv^mZ<) zreM!GtCP~u+K0R^&rv}=-{Ay5X~s6|ua>P7viR>PI)fp)q|aDL=U!xfbnqH}qj!K1 zzSnPm_d9oOaE+WV3qSx{wop#cp}ow1$PT_hpjI@PBLIKbllMJag77QNQk+?<#roPx z_J#IAlF10~NJj*-4re0-r$7w3HG@gJeMTdjYdb_)$BfbK5k;Fq2FKqS$uoG-tIkF7 z%e4>J}Z+=lVq187~(ZD#FV#A%~PaO;ZRah zV+F=};DBR7R*6i9;aS7(zpUq0G51v$pd;7O5bWGLno252Aqwt}QrS)AE3l$N(OWVc z=V`&-ne|H2(sWTWj9FTR>w1k0Iy{_@tsRe~bqfpgVE&$%lxZG`<(Eu``qD;)HXV~a zp`oc6fVH*}?Q#}Bi9Fqs*Q_i@d$j=%T+cSv)PWzz&CPDO*VlRi8$%?Y=1MVg#{@zs zUEGSZw!Q)=X=8krVsmppu3x{w+=#_RY55z0K|6Ckz?&MnWn0Z};AFp62cQkDL z?V8&FOY+E5PsWe@xj!3wfY?0a0i`qPt6@~$C8RqXR@XOTjB;Ll`(m`=$IkO_5FsDL zXFl>lboMyDa`9~?AeOmaO)BWEPHGxcI`W=sM1CVH`ICFP1`kEOB4x?Xj4^Iap84m0 z^2eg{J70*U5rzvRJO#|$fUdI-JQd&a(?5=k6zOaw@f%a_8(rj{b_=e+gZCEE&G!oIdGdg6>?4ub1ZH5oC zfKJ^^`XLi$-i+$}+fyf%;R1{ndQnigaPp4V!cm<}ZdHR81~od1#qvU|dS(Lpk||p5 zne*eB+Nr{s9z=J)8#izEqic60zH&X5e*$vpT4WC`rh+bj77NUbgj)h)Xrt~K5V^x8 zmnyZrI0*I96Eg(?KHud*i)(Ee{nFX- z@YEjs%OQ?biklg*Rk6TJn=e??hw0l4$h@bvBo7_}An3=mU+S&@jt&WM;C%*o5}Lct z0Whq{7Yr&yf;8oK6pAd7P*Lkb)u&2nVhK0NYVX}enums(;7}Dxo>0GDXNkr{Ru~~sG7ix+ziA`| z(hgR3pYxjAsUM#q^CYswHmi|{ocBCwPJmI;7@?F_h{8uMW$gUjAaXx{x*4l0Dky?} z3($yiO_xbG_kQ;}01&=&9S|C-P+8caCfpKCg}W%QSOBG3MT%1L>;)Y33X!Ylb=zlK zV4$EiVEDT0{UG$BcxEmvGTGVbA&|Dqb(;>DUK-2jejo06fs{ofr-u153qyEhsaW9ekQl%O@T&rw5uPU;+%=js~FQ@>>^UdihFJx!>a9aeVygcg0=nXXAxS7h?w=7f!w~ zFv?p>T^hTLgYw%w^GD3V_w!tcyb4WZT&{__D*J1-{ z&%o;L0CI`a`<83y5HueV348W{!YQST2wUf#raI-?pvRw%!$?+JAzjQ?dX-B-^5_63 zDOPDt?|Kj!7;(T?R$-?t;FxytO_FedTxc|S{om!XUdfT z#6mic;#D_ofMp+wZl{MFF)bd7>S6;7UU&h!!Egwy2y(Q=%8Q_iDBZaa2K~(RDKkxI z?rrUoQFZzt=#vglo6kJVTFfDf z8f^i9+uZ~>*;AgN#q6)AK`KAmE>rUdW+91K1t;1-9-91C795Ab&xr^I4sr0Fc$&rN zbBE9yn`&!X?&o}b=7{SoIyP`{oCb*A%iCm)gYwJ|L8-0N{mQ-iY8TJ;ZH3QFS^{X~ zTv^OuQ~<@dJP@d4K{7X9_}F{zdv>M11hHHz0wwV;(}KL2>+PnEliyo@E1?>@Cmdvrq;it2kC`+C*i<^%ju0J|-a? zk0~l!T{=cyNMj(f_0JB`{s_mVvlejZT&)(D-Y##n5ZLpl8?mt5uWLuz*Z-Y?W#*J#fv& zD$cF$@`I>dn|P92@4QAqPQE~PkFQOXo9i5eh}p74~%B{DDRYUQXXT0RW9|32VKMtI7c83ogTDy;XZAh$I;0g7miUb*ik5#JHY zqybohex>bk+*w_TmtTA__E6l%pMEOVFWebNWuiwwlV?y*i6q>nGwq=4qhYi$I9qSM z6BQhVL_PbycgN<=R=oMeFT^Gw>Eh%#SN4j|QcMM5&^dnKV;_kReEQ?LmT8fWd}LZ_ z82e*r$h?k`A+KM8&UfE+S8UvOKBh#w!+j##?g$1R#|IvIDBcACdH(IU;(D$rs((2Y zf6G7o=a+J*GcSG?-~P|X`}+vlZmzAxU;g=Di0*HHIZm&hiSG6N80`*YtJjZ(4?P(l z`Ex&&>2KYSYo}LUeI;)G{ug5Y)wiR994!-aHyLjey;Q#u=H*?WSC7G-|JTA5t_Vat&4I1xQI-sDXBg4US_cHLRv}AshBb2YNshT1AIy z*c$iTW7HGu0>1cs?m|L;G~5{>9~5i49(k+&%V*R0rOG0@S&k}lkH|n*>zkkr@9DTb zTGmdY<56*(wEZ~DGGzBYIG=S;JX8I5=}@_Gdziq)0?8G0-)pT!=ydy;E;bw3AgUKY z89h2q3wxEcUVnfr8=dNG3n!(`@EF=6-94XxQM=% zqetp|)ow}35WLNd2RcyLMP7%l%9%Sf1S{G{*UfeM+PZXp$cG*7e4c@%J7*Sfw1?=s zy*(V8{PAYC$=0~G4WQZ)m|ToUF>x^=A|%Af%L@_BjhfHQN9Z_-v|T?kQsF59Gzb1k105Gq5C z*JpX)o8Ve1Y4yFQ&3e3LX1xtW^+RiQ9Gwv+4%w|*xemq+HP(|!aV)70T*ZOwk_X0u zIx*;W@mhc|7G>#-91PD#|3q2*!BZ3A;>czanFQA{sL z$V6jK2&Sg;;0T69-WnGx1dmZZ6==}E6%nIwMhiv%=7nfBM5v~l%FG3%*&A`4hp&LH znhjo8=v{A7p`0It;&+9?LL=}G4!IAS0}!ZTj7cT%Xg{4emojdv)+@2TYWgfET|6Iw z&C1+`Wt84D)tCfoGdY7p?<93UFGBV+ruJNi2I)8TqFpzAqS}hlc))?W6 zD6JY_L2+#%Eb*na#$|sGWIQyBVHQQS%6#T=tQgwtdCdJhU%3S^XtkH~yZ~1}wzv1v z__;sd>R2!A?Cs^+p28<6*T5;%!;y!49OC4Q4s#iqFs5u!q98CJS+9UMp|_a_d2SZP zlfP_TJY#+%=+Ot9Su>F9f_zeiO8#?A7sqSyi*0u`4rY%`Cl&xgFo7XNey!0^X$OFT z0miCYM!}HR<-JMNyW=>|9Y1{bOg#6G|5^NpU;S56Vea>Q=;?UsyFV3=efKA$ap&nc zamvk7oU|J_hjyIB@$7x|>oI-vN*rCf7T@uo{nt_c(6`5RQ07P?Dla^PQMl;Dcf_d+ zXK_NP9ROq8w|_u44y`{qn8oI`-MCCtzBk;DA-sUI*#Ymq`yY;P|FeHK&i~+NVwb4* z>XprS_2qBIy^Zzw`=9%{`1F$xMW6e7YF)YogjB_51h>XeeEU^6dT7tzc`XFp;pZnl z`LS5ObR(8{=-_5IhBxW zbY>;y;EB#~N;Hcw#0fv$Qs?@7*dEzu8kIwHoq$PzrF4?{=VlDiZZJKFBFL}dRyLgS5BU{fqv^?JU&{$uU)oA zAa{q98_-rB7fyXdbeWEjaZU(WhHf>44@R;YdO@{W1f7Uah+*du)J-@ zo_y&TY83)3Y_fyH#HAd`Z`*C>4jJtX|D=KEHCO=ba+bQSMRe&@y37ET%BUx1nh_8{ zwOS4AjZrOtiTk=Ax4`x=la9Pr;qnnJsXM z)A`-Di=GgfnY9DxgB%FFv|GiJh*7g(jlY>&4L$RWljMeWJ$Z#rF`nBQWYx=g`C88N zSZz7VfburraT&p8Zpwox_M(#_6n8qu7@MH^ul#$n+w9pM}X zo3TJ_h|0GDn5h>T={Qn65CE^`r%e?4FyDQ&MFplQ92^Wz#tviI_+ri;^^Bps6s=}G z3uWx#+#>=v)#Lm`#csLEHi#NQgg8+lgmhe1c3Z^Y77`0U+!7_|Fl}Q41GgUtfO1X< z$f@>1thP;mq5cStNK+bk5|+w?yb~EtVgv|reQY=`=BB1(0iA{r0nRlO7(6=yz(^SG z?OszAx@fOhptXqz@D>0tMN_K-bhsUXG7O&J+^sF}s6^(K!bPNlL0iIj*w)l9+vTpT z*8w@U6)q*XxO(MAjF_)`{7s44S^2@!u7D)CxI6~7_@Yv6aJXIDL5O(sju+gqtwn)w@UgeBy z1y9PE$}VEpa0>Dqa~@yIwe!2O@D7#{Z_&|+M>aWyJy&8Rb;sTqOo{irgg@A#|F#<+bR*QJ>&=qdkxLONTzqFM)|%__H-7$e@iQO)loZOP0RGD-D-S-?uOb>KoV@P|0_2Q`)-=)IPu%xN z>^=W-*fV~-*GZm^5YFa%o{Wb-{$b`Lzlsmvc=L_e{p!~u-n<@*fTa>Tzkm_S^+YMY^ZtwtRK$*Xu0BQ~zdz|d@60d7;k%=^# z=*{6yCuV~lV*!M7{~5YjZmjY-fF<@|5of;$a9G(N#shP;ST8p*sK^lG7z2Dm5MG>->_qsUHQO{GrSW%#Ine#bqV; zEt@6evMNDIy{6p(Jlwjj$d1}Q($nDFJwyaK1%!8oY0ME-?48ScuC*hb@JXFCyP#te zgzMZrs@j&T%8?nuUe^||I0I=_0_Ac|QPW`UP64*HcM0MN0QFmf>oNSX2Fj%c!Ii>@ zh@=C{WdT7q{|shqW@TS}$zTtA&qGj`)ortq))^&0gMPUk(P9_uP1b(ltgK&1=d2?Y z+(AIwy-ufn9Hez%MHF5E2wBi(P~c%=X<7gZ)-YLceLJOJ^aWt7*p6a5p!T=O;JS|Q zy1qNDiYQutW@b^4q@6kB*~M|ej!VClEp?e|`YtBvcTpKngbyHdiPbjbuC;q+fCYiJ zzIG|KC^Xk$7g4zQL;55Pg02aZr$yB@2MD;|AqpYl%jEeBOR6 z{NQ)I`&kRXQVjW@hX7HZX9_`pU|~eIqg7(~5>PnQLI<1}LMhTi;fzRmQ11Iy&^tuqHgy=e zjmCTPngtg%gs9oDO1r_>m*VE;ZtQN~v}+~ryZyoS36MI>F1oF`PKy#S#YF_tVibXc z+p$a|xvpm6i#^`mGk*jT?OJpKvpxOn&a(iZvgsnzD%^)}9lW#l7*@CI?RQ48yKjn? z&oeoNaWZwq_SQaf1sCfmlcl$7UT!-QkXU7_5K0e$h2u=40o|-d_pIxT7C@@K!6b_~ zbiyj8$3v>Hjz^`DuQe3S;*Yj{?EyPBkhp$rJ9f5qGqWMU@$+Mlc}8m(Mr$2(AeowE zj9rXNpD0#trQq|=H7V(x3Y|vXNNFkB?q9{g7!{bN$|Z?gG_a`09{eh;%4=4}7L^*B zeH_bf9zDCrJt}GJlns@$C>ShuKDf-{mx|KaD|T<&$!}?J6w5a{(K5}Z0&xF%Xt2Wd zLwNGirAu+w(~m}VrHvCQ>P5zk_vXbnqlE(RzV>THODk_s2H# z|Jv{UHlsB>;pqKU18E^6M#wu9s_b1WBg+T{A!7o~F&S=$7$kDqqP$!F3G&?Uw){ zfAX3jNPf9f$VNK}*PPe45lQJuC`k#e10Ck{C4L(kCwdsN&rw|;9sxX``)-ZKw4yQOUMNPg;^rMF${RAe_MQb87DO02l zcmN2R3lcR=T)*OPjcQR0BtmkX6m$i`TwHShqERzLK}r0=4lvL{nO%2LS&YaIh|onP zuJ2h{Z=`JWDA8`>Q)#JjsS}JA&2EdBx`f?VDVDK5=a5&AB z@h%A4g9UuuYT0Zs1Cw;@MNx;5&@F_@?*b$RsBrc=aFHBFR8I1_3FBG8QMa2#>LyQL z!|3=t6~fP0FJMN(C`;aY#Hp34r%#{4!LOx)i#$D8XKQOS(`Yq~D)jdZe8(uzxV=2$ z|E#4L;xLEMU!Vfll3vM2jTq)b3+ij804?5ZVgQZ2l>;Ljvjwir3D{g($r=tmD>8PA zl1{$Y@5lZ=%85~tr*)i$PSB{T=tOAK3mXR6u)_nA0Du}jl%~o(ERB--tUax{`_ym_Vy;ypbj82@a1El{CND>&;Cqo@9xGcUwbjW`K2$%tAFq%BK9|< zOC;aN;lKTjS7V=_g0vE{Vk=carS~!7R^D7PIrHR$akSEiH=lnk2H(6ELq79?cRw26 z@tNJsb&v#!SQmuXD<8e}|#m(RSQZA1ijr%d{O``e46EXRN z-;3p5Cl*hw#SuK3gZ=`|DY`V*gl60uJIh=d1Y`4H7~9((?{`zT7fp3n(JLH`$K)O} zPNTjBvp!rqGOLpJwEo8eG~^DWJ3gAkHjYuiXYkWkpt z3~VOQlxNJ)#p^eA2_lBv6K7;^a-4ZK{Q(FVIcIdqy>-~yFLxZ79dM3^6Xt{7*zMCL znnvAbHy&Jbko?wjn2nh595bBpJ`}hNjs%57+@Lr zxm@59vQezXj`B_Wl>G@3i~&k#%#TS_$^@_|kxn=Z54orI$>U(@9l*S;&+uFqmr%N+qv&Jna(BfHx(iAmuMLXS&*IChtbh<5 zMwzPkDuX2pn`Z!rGC_k&mj-DY341p5!6C?x4{#qQQ$*>m1{%4$PC5hK8|_-WW=AX$yMxuxGlDeQ%S4MA zh2aPjsX!ka#xjIds1!~WM58spuq#}J#!9FooRMt>{d=otql|m|VxLMEUsUU-=UYc;=F>G>KX2wk8LuMq>W!e+U`4xyOONhA;7jDnTyXmvqaWVOZ|7q3t0KFH&%$qX_d1N-Wff?M zOqBp3M#0j=oj%qdI9~yl{>UIwdjd0=s=!RJVRVli2)(&R{!g$6`n4rMYG!Ppa~3>8 zCqa|N>e9e$h8b-u=l47Pw0S4)*9Slt)cB>1PGa+fBKs|iaPFX@7Iwk+Y(JhgF7Wga zUuKZnaHpGoO#3Tyl{xkQmfSgB$10du`1X%HkgaOHy9HsUFhrQe`jA@yh}<0!rpX$_|+Z=+SBi8`vq-Qe>G}kVGK<(ys zXxFP8EJ&DZiwcRT z*&zBRlHS|di%w^R!90#Wk-JM9IxY+JL}encqL@Ws#WbZ8^bo=81U2|7M4k{N7;-#1 zNhYv@GT8h@$7|}wEQhFAz8JOJeM@8Jmj)-dhoGG9rCq^!m75qZ=sp7BU%zrKqY4#B z`VIjKB3U^n|LrIz*qUJ&EwV99=ek%LC&s1R_&JS2foP>k7dr}tx+|yEi3E%{+y?!?Z1kpMkB6Xyclo(*1wO% zo10NOwhm*IsnB`me>5D&613}YZN{~$SE9MPA_76q;JI}!$|*FI2Y#PAdn(R6^JsJq z0X^HDxbc;jV+uI<(I5RG=6x8?z504O=)AgMd(zhRITtK`>cih2AHMrO55~lk_1^hr zl)m|5OkaI5mL7j1qwv`aFCjP5?I0$vT|s97yjJGxG%?^h?zro|II{58jfg8NaR?Y+ zI(0gZ8T%ZL#NHQz9ckjfV;sSPz>S}#fy+dX$3~OrZG~&A$jSs=pE7oJAlKgkGXhN0 z;6|uMXl{Yiw&kgG5RjR2cXi>m&Z5uiC$>|)^)7~t{sOibqif+Vr>SF(Rf*tsIp++%_}kr1Zv8^#2zUni!OSi=Zqwa%&ofJal13c| z)kb$MqBiTQEJz{K-|j#f5?E+A*6p0xYDTqjd(XGDvYFNhAv(g*)iYCI0^5Ps_upd&+In1WL6 zJ>T9BIXCm<5{5Qh8*RX<&2y{*U3Z#UpXE}7%Y$ZX4zUxqTkrJy(HlZ6?rSl*vZT$< zB1G=teUu4na1tytV0#lRVMFtjLO}}SvZzn+Sc4vBjRZm&DCbxOjQCP{g%8f`H*_+K zzVN9JKm4p8H0@p@qEI4ao@)|D<}zGiSSpDqc~Tk?)5=;Q9X-aKLGmf(qEH52)36t* zR}m{hEm8ci9N~T{fQ(6$rW1ky*-0rChIbP|73)-fWVbVooxKSH2~)X^+tEzB*wL&} zcip3wMqz({7?&?yPvxo-(G`{XroamX@0SG0V_P-oIAtA2pNPc>MqyWIP0JVE-xwqF zQ+@0cuuAmK40+vZd7KZ1Po-CQEnwNk2ybt75iXn>hH;3IQDAfbsMkR=6)m^ZK`U;P z&(PUv3@o&B%oR~92Gud>P^D)Rg<5SD+g(gY3kG!V(pZBd;BgFzKs8c4PoCs?_h~e6 zghbU7(DOB%`)o2wXRk6_FJQ{mViWIgN-m2*pqbB_A~5pPB0Pr5ZHZ49x2=WK`6_7e z$$Ey3Rz2DlPMP9kjGhu#v~+Ynw}g_XD4<3vqGk6o8y)TS>_la)MJXE)72pQP4NdHq zunOXLSBK3>rWVm?n2`~5Bg(V6sO*fQF+K|7Htt&}vg6aOi}sC@5|}ax*ClO#`oaq@ zM(O;SIQQT~C^XMveCO}ID>g9(H{Q7scinLZp9M_={+2ObHfTAxax*Se+wt7XFUK`# z@Tu?o&iIp``-Ql%xgD2Yc{Of+>A8$Jm(7!bYTY@YV%a{|{?T>huDi7zn|s?h@zVg+ zTJp=i+w;nmg38=Aw{DL-_1GgZMZO2u_u~3jUXAI_ZhYUje!jt3 zwA|yjkKS`%eD2#mnLXW^RrEIZVju9iU$4d4AN{Vl@ZjC~9L=wdfnNI6UylREUAcZW zN-k0Ge49P#P?l2Oxr@pOUG4Z~O^AE6U6wJ5U9MN6d2RzEFeZ|=X$w4<(+DHK@HoNY z%DI@88!=(-W#~4&axtdQKNoWsUyu3GFdc48I)F3i1#ONlUy3EyhF98IU}zdO9hCN= zG#``8m!pJCXp2A@3F=OupKI?&%we(ZvXkH*xl@mvFu#{pn#ks19PASTJ0+C^V_Gt+ z2Okb_@CO#XYAB(Q;8Z{;N86M`uFff%%%2fUhzb1ScoQ$OI2 zCDZEea;g)M*n_`Fkr#7P52d%)!l1p&2DS& zI|K&Zq03m-a}bpIES)<~a31nTALqB@xABr^bSSUV%(eOxe6hf-UajR05bbE%d3Y*c z6&=T{z0Lwu#7=tpweZOgJ@~9C8`qq2Z=fg&$L@nOGUL-a7$=~-rf~3@PDRBv-Jk&K zOW+Gap+Xo1BIHay*Pr;H*DJJHiOu^(#E04Cv$h8l9hsJqphnfA#RZgYaiJW0-Q(EW z8gg$_MHq6|i(#Bw;F>mm!O^=uv3au_7vH&tF&%S_2-tn)1-jwGJX_}?K?`9OKp0J1 zG|}&M3&UYoB7{f<bA-Q;6+LZhFRJU%kH$ z+Gm*-vK}C#BJORJ?Q=}2I5yX~?C;D=2yrI)82@~N75Cfwm8Y&TA3p;*(;!q(I1P~e z@LXrqoacF5Y$aCKP!i~2k8M+p*RI{fp$`yL#>6Ca?gs}LAcWpy7;}A%XNam*b_Mwa z;FIfiI7XFR;WypwllV|x=lZy383?+3&4L>F>LC;v#V5cLu^QDMX7)v1stDE*2(bJz zyJ3M-7Ox;{(#xhP6%0ojmKqT}ulM&miHhZQ8QRtQhf(iNaU80rKx!!7yHa`(oVp)Kk%1UkiJ@m-ud-Jrl3L_BQBe7U%A}iwGY>gno?NT>t<& zyLu*$;Pn(o@VWo~zm1i9?unZ>Zp7Qa|Ft-H>CIRM*qi_o7NOUo3WEH3oaM5LgnpHf z^I>NPd7C23It^$)63Ffl9gEb>TxkZlmT{hX=>DkPdq?zkI}vjD${ zW88V|t+@E_J|B&D-i$_%C|KZz9N9EQ<#undTb^{NCuj9}$giynyS4~M+P&FN37myExIgW{NvkDxba7@&%^IilvX`ube9XZMa zteE4z%YcUIE>MW^^QP-j$P+xmF&O=Jh=9h(wXB;VE9$GeZ*-DIkOKFjMn*&7x1wfN z%1BDjnNp!Bh{P*#Eo7XAA1Ef9m+03llOl=ZXBN#XYXz%XrW@OxCPPtQtac2Q9 zU)mf2w-d=qe6I6-gD~}~YCwevcU$fdkZsYQ_SVAdtUd>{;3{zV)>PKK)wwrx9gc9k7W*O}JRx1L4D^e~nZ+&A{k)?egEnxK z=YiSrm2SnWHCyGL zuBoFBf-J9fPkoI5+{3vF|5?aC=7g_ezGfU48gg*Nm<4~OSorj}KJsj((n#Sq)h+`i zntShbNEWv_Yfp>Qs5=NzlQYkCC5R>xmPrO{7h$| zTq=(TzPPWon26P*e1?Jk|Csu-E@`vtz7yLq&k-3py;IF76h>kO34o9ULLxv=6bDJ& zZCQ5rYI*s>YpH$b`}z*{NAT(^U)bGR-KG|)mPl~~L68`$3aIfNbBv5RGBXDMe*3A? za=w+7nGsJs&pG?-;lGEok3Hp|ofj=x4OzMH8njLuuN809v3s82>YR3G<%o?%G-$lk4l+srjIVtsmu;E{u6#M?7eM1Jk?q?(xmgJilv{ zGoQwNyj}yZ5|rLHD^jQo@0qicR?Z<$N@5-sXC@$MpSnCZOPBf+h@DJ#a5_oHgCCd`x=1ZwJ&@TW7SS@+J#;%Jcu{`lns5&w~kK0U+F>fbYNhN_^vc--{Q%_05>EhUbqT#mQg%RowpQC(+rU4A%%6*IN|j z&ng4aGj6cv4c2e~eP&NbnU-+t%=^FmMOLPF2JnF_#@g!n!67+^!QJCH_`=6x-9|Rv zJBf>5z905W`}NO!KK|Kn{NwoEH-01j;b*@P|GRH|3quvrJ~)c$v-5cL)1SrRo#U7@ zkN#&q7N7kW-;E74KL6mC@y5UZqj=|k`v1nk=seocpuSilHz;Xj(_$-mT&1a@G<0Y? z;A_^)<{T~7TW__>I#n7_eOXla+YCn|k$?)Yu3i^B?vSy{9*UhYmy!9ZZDD)u*!fIk za?3g(Oj$Ii!s114#(Elpbu4;t=mNXdc_xBwTD_+E^{+9$Qmju-qGtXQzV`VR_4)Nt z*fn_o-I5-)P7OS=ow7zeZO5E988L%58R0d&?=FOEl(@pG5wYu?@;RLv?PD$BXVZD= zOrw_?WEHrwA^$6nd^i=eaxd^f875fi?y0*34|vl2v~s>u*z%f}W~qcfy$*3s1cJfOV3%Up7TJLqH+8hKnjaJb)# zyLS)sGddTJPySL5$}bijX8OXgHLq)!jU>2!I%kfv*r%>{jp2oe#THt56}eBE8q)p{ z95ZKs)7j9QZ?=Zm08eSdctg*XfRN|8Qm(DeU@i>5W_}$IFP~|H)REc|TPPcG$@&Cn zE#NUsf6ZFzvTm_H`@3i;$8NFa(KGMQGf#&lH)f z6y^D#TTzn46REhBzy(hamc`L~l)zF-;z{5d%;Ww>4Yo!m4I>lwA_0R;Ih!p-5Lr3c z!8>yi_2^D7mw4|WA_(WboTwr7L6`4dkG~-#cRB-wKSfEp#V@yJfbfkJ7ai>{aI zxQb#O^F^RW(4OsCy}t`C!==#UrJLV(->LN10~WPkoL}PgUUDxDzM-o!G!WFG6c8BE zvrTJU7l-AzoFl_XH{M3CA(gHFsQ`IjTqWN^wiv)G58rzf<-K7%_sLJBM<`7NT#vv1qqu(a9p;VE zKNwi(mfb_hb1t-;cr7X|#~15@XZ2=N22DWB6RD zsxzo2zqbGyO#tEg>@rT?eII(PqgiXmn6*+^tH{iX`?y!yP^Ih9?(fCDFMU25ckaaK z;b~m{^1Zlv{5TFf_4vfc@5iSPj{pthI3LgOfbp1FFJ-}FW|xfTQ-AQAarg6|$nAO; zfAq&O`*(jFE#}ucy@)cOsWVVPk|!VQ#XI&qa~hsA_0l`GqZoz{-Z84jz&;U#3+e;IXx9vTztdVF>o%@2)} z&CMoEAmu{e!nPqRo5AXIy+~r8EE$67g(kK7w{r4(8vgda_e?rqowkY zZI_{9{*GzM;^8UA&#r9V%LXk1@g8#Q7G9%H8ayKp_PmfL0*yP!kUGQk8tk9)h$J#U z!HtGmfC6|z)*btNKIIwC2XD99Od}`%sna!*u2*(a6dH$_K(QfyUb!oPO z2Lw;UTqozJUK4n!Yl@IOABTSY!SB)mn!|fV(I0v%9mj&_{qnp53o><{lwHR!pqK%C zMw9E9F4nAzynu}Gc|nkCtPw689dxxd2GB*%y#o#9&zUJ>?p<_bm1h~4L(&fM9EX>? z-vo8SQ*5*8$d95)tY$ilmEZoypM71gHn(+vBr?~TgD8kxgiidWP@1C6YaZ-m%qJEm zpm^W=_~m{~&xxZ!1^;uKHQJ=diY%)WpO&o+}p$yja8tfG%+-704RAT3Wv`h58H7tEJDZ{9T`~K)DbFP$^}<&6}I^TZmnP{UUf4x5Mkarw6DpouL4)E+_onjy@igfWnA_;UwoFPdgkrCkqN; z62A;d=?9E4UKyI_HN9@V`rL-YJocH}y*tD74opYL$nuAUFF9uT#+n0b1PpbVPH?V9 z58MxHokAN%wgNSkJBl&S9>7qzMX1v&S{j>*xmhpc^x4yBVPxI*;_Ky;d6eJ3j1Fs} z*Qr54>ijbSessMiev@p!*lfqr){R;H^;L(%lJh&4F&)tWgTG4_}_u!-J zH+~k~0Y-(tr=xLHr87M97+!twoBuHCcQLl3Nu2!TjhO!6Z=wx0S+)8wAUhpNa`86Ij62J!wIZ^r4<6ZqyJBSJ_X$`+-LB1yx7B0G5H#n}7o zs~IJlJs!o)2n7b%T#e4+7DIjT>L+3?4?uVM=@x}L*sH_~zxNx_I6jW$!w(|<_Ag`i zCw~(C$xT#{!>oGu{)n{)mNei^`L~J^7Xk1-k~Crf9*zTh(5OFHI;3T-Gs+^ zuCk-2oFP?Sa~!}MmZG3ZNuUQ%SAW5{c>5R|6e5hEbxwB)QV=Px1dE{0Z=C=7q z;7@6kU(nvJa0MviE#4FKTw6rP_`Sbdr=_pEUF@eIz-VFH7)wQ>1sCxEeyUsLB>{m>v(C7-%Gx3$ znHfDo2O62wVXXY#Z+`jp>@0^@ogTLfim{OKbBmU@XZ|Wk2Y&1EdkU5N{t#9@dJVfo zQDF73x?K=HUZQAC0Vwh{^|;pH!Axx!OXj}3ykOz3<6y7N=P+n|W-`0MU{7K(bqkmV ztq8Tbn4a70mI{r9M;2a;rg3#OM z2BY6qwXQ*2HyAPZ)`P70``8}s?lWh>x?JZ;aW1eGtBJgqGJKJiSb=1(QGgbr0031G?XiuHx~-hq=wMe{=xAJBT43$7Vc=;|I@0 zg|YAO1j>y2^w~v}KK@d?^jqJ~d8`dXzWsKL|MmybnM^b9o?;0-nNIR1#7QbD8x}$` znpBw1I}dK*KJG1Vu+Ha?9^xSws++~+{5;&->zX$$sN^oHrSATI9DntTaRX02eB&3f zM0Uz6jMlmmkKI~_!e~EukOzaj(7`L)UB%vSd_4{A;&1*kwmcY2qoGlGCH{9wiKw%4vCa z67~DguRF||2Im=gEQYHMzgx+kKm<@Ek3p;`8EO|u!QU#qEqqYBcZ3lFG`;W=M1)_s zZxvlof}ip4V&{D!H z0Q0Hi2pFDj@LC1B%(-U6eU~>z6b(}sx=r1gTeJie@R7ems+0p2rv^#GssU1e8r|xq zgHmHH)T#0%M!nzt5d1e#%;re+3 zr+L2Sp7&xGOEo%p@NV%^*3?((jx`RD1hzXKxJVsw9}LQsJ8%ALmrIe zF2f^m8BSiCpJR9yIKyDPt`Wr6z;iw`i{{WPd6aFd<1CmndWq*-+hu1~BZ(gQt#hG4 zEV|~&2TTUub30%mUw{c{@7iWmTlvU)v{_dpl{#WZN^ShJ;r{U7)><;R3__!iBA(ov z#Y%lFe;AQgH>nhyyZTXi>M$i6MJ-kWyIJz2I|&k#0vzQ_?WieN^YJ=vEI#8CcqwPn z6&{2)bcToffZ%`r-7mfFphatj<#TU(*)R!%;y))RqBh*A$7UYLE@D|E$NLTd)sRA!UWmtB4#lkPcW-Xq?CIM%7!H=I=Oj0m}1|_#Rz&IjJ zJ#BNcPReZ=SECtMn7{FP=8$$6BE49=5$Go)SWJ>0cnX*&$=MV5mc_?A!w_2Jx0o%m z(oy89_j_lrm(}X_x$d+oAYkSMv*}UWXu&<&W4tv{=<>#JJ_ZlctWYXby+Rcm3tMGD zHK>9UbI}@c!{c^}(vmtp-r~CqxbV<_6h4d~THZq_2LPkY+3}wOEkewYQ^zWD5U*$& z^jR2d8&O!FU@={n+q-K)FT2>DCL`N{%9Eq4VigrvP>c|;K+;^VV987xA{p1vrOGOW zJ6~6(peH}{oCmFH+&u))^}2wyNeWEXF0giaau(MrbZ(QB_R?H$Lc?n1sl|wb4?}>3 z5`=1uiN`WW(v9^8zyCKeedpac#5f$?yBAyLarNH2afGM7 z!Qj37gTIdv3j5Xn>bIkI{~%62cofSxo^$ee4TB0T);Yf*jSFrHmJ zj>Ror&l121;AnsRejNSUH{#@nKa6<%W(>aYnP}a)AM>|<63ajP-(vmaH=}OE3ZrGT zNoDPUE>JH!9^Tk&1S3A!K6s%p3~Af}xaO6ug3m2=R)+IW1!-8?vk*2Y zof3*SFbu9=E;I}#U@&BFcyWjBff@7OC1gjP0rB8PJ z?(qz~%IPA?dXDIV?Fhp#f_Z=an{oJ0e=A0R@t3iA?@?3-d$Bn@N+szmcC4%8a9?~K zK3**+u|+Sn_XmkHOhfco$n|a&XDH`s@;Hxxe^mI%@6gK?6cAXfsxDA{svvbb%wcPU zHF$ccZdq0Miz3xE#vP9rJ!x8NcyOHg?kPsiD5Et_Sx*eS6!Uv zQ9vuxdKS!iVy9Kxu2~nby$9X*hd4~gvgt(So*`wrK(HcFEFMG%GP2-y-X_EuxQJ_kJE@F1?(WTaCTM0zhePtMyD(B-mhn z@TBB5omkikBO|7K>al0R>hFIK559zPPGu+pa=<6MN}#|m4`$y=f1v0aF~#j5CF_D>QA4}GAFDuR@q4q6~DOz zjj;yIYj4&zc^D;Qn&&ANyghD5(IOG5bL}QBFYNUz)1Vhs-Ylp_-eXg<3lz^(u*{Lm5JlIw zUFP9M$9ijqPz*D-S%?hBAXow=^Kt!dGxh)#GPPBvzGt{jMiU?i0tDGmhXuaQy`?U; zdn8-k7EgjSJaf0htCR>xuiFUqkX>0B-m2&#z2MM9pBPyF_E|p15TDoCGrmw_ni54w z#LnE1!@Gw)gr>`ynPXciBQRGH-jjDs`?yU;wh>|5K3H#hHpL?aRG~(q$NR2fhP)s# z_%PA8UH7u5wex2VR{iUl_YhJQ(Jh`xZM2H^>2)*_2>rf9@i4EvEIpjJYvdW}3WxyF z8jakeEe!!we*649{04z6Gd_)M&+6DWe-@*sXR-hJFU8$Yyc!oj|GRknC;uUK(`~%_ z#@q4A_kKN|`_|Xu{dXV68OD9__;Fmn@kVUleK#5qw$6N_< z=hyLA*KVcKdk5&N@aZv2ZKjrt-Dp7UqDw13tYq%+HN)qNUs^X(21ov*%bNYqd2Dd!Csjzwx&=w*?CL}Id5zKeuZR(fMDhQ((N zSVl3#;pTiNoC}PgXD(NT>a+R0-m*y76LHMXyPZShetI^=P-$!#3-@%O8XA$FJ=2S# zzXAbS9AwTbC(cClYHa-P8d+_a2H%N7K&vy=oal%Y#w_~dUPTA8O>SkIBES|udD@Y= zF#$`bQ;z-sCWK*KeU-n+>gsY8o==XgDZmB-X6{{swFJGJA2s*tb~g=_OX+JCSMa#J zm!H#X@3Ej7Znrks5W*2FBzF(?;5q2cT0msL1VYVM8ZNa4-vSzyg3W0}+aA7>U<8`D zutqV8FAz2gM9<%DOIFES5z1Zo+i<0xrNE-Z-xxRq))2ZvK7~o8S$=D@SwDqK@8H1n z0f=4C)Z#nAogqS_AP_d!#O)}IntPVx_-H=?%mp6A40@ZBGCd-|DMEyzdd4b9F)xU+ zcPy^awcb8s(WBQu=QyS}v3I$N&J_m6IY7_ca_6)8=lKTmakj>VzZ+fF1flm;=bl0a zZyFx+tZ+Rc4>EEd3N{NKoi#}R?Vq6-Cvp61zZUmC{c4>5-B04Zzxr|D*2U-k+uw~- z*8T7Q!=FZp@s+Nov3}?6XaEZ8@QYz>cW=P@d0)LN=?y`P2m=GD@>8&(qLT;Zj|yN% zzdmPxz=Ld}Uv0!*DF;j^eAi&l7Nv7CmKJq@hr^WP60)#c;PH3bQQI3vg}Ii8`)S}yti|mD@PU`!eCb8Tjt+X` zmr;KIAt0_2tCQ2%9UMjpd0W5vW|a5(0aL`y!zVfZHOjZb<2O8aadMI%r3Q~05i%`g z+HnLw%I(s?@fadDdSNAbDqGhY`OI_PA)-1;8Pt(bo`*x5x@aUkEe_-8VNVzvKU*B! zRR=UJ(39ro)p5`d@6XfX&M&9v814=KcT$0)m!OUMHg_grT)OasQO>NZ!B}Ubm`2e} zayFizyQUZ=>s>JVtZQMc)Z6F<^lq_>%F6R9>y4~1MRltH!uu=UMgW$uB`>7Y((l+^ z&FCtIxl!h!4B zp*9bOMHrv+w1JvN6(LDpvGZQRs{&CR6y(L4o;Lhpl(-n1^%wB4I-*oh;|(QK5BmJ< zP6tn`S3R*$XD;0TuKaRu9bMCy)&v->$h2SHi_b&P2@dRnaf{Eh>K*2d0ILDo}3xFqW zb>*M`76#ryFiGyfhnXO6cup#-Sm+jmb+Qu4$&XN49%(BI z&KwvM^i(6OwbfB6MO=E}z?<;+u|xcV4EDc;7=^m-Nw z>hZ!ftkiT25SUzNjex0yEzil;u^7oK1ZRPez@s2dm>r?jOBA(<5VNnd#DhK=dV*LM zaNVpHSzsoxa*XEpj}Q9EFu6d`SZ~**k&cNrU0Cmp0u!y{P ziWA7S(aIdYTU9I&^7+~1!?wcQ%SgtIamb9))b+odP1AVGFxmD{0cZT?yWEd|uwV4t z7u~uYM+GLG@<_=K4X05Mmt?qa*S>08UUi zg2y~Nz65>r;8JM#sD-2|+diLPz5jO1QJD9?{tZ0KTKvWT_&>(0|M2s1_t(D`Z~XBe z$N0~FfG7Mk){mb>Jb4_o>4-JQ3vjIzKG%)hK|xxev|)|r!@Zj ztUxb|m+C0=^71NHS7*^1?#WJhB&}(SPOl%g@;aZfmY|Dq-AvEp#Y2GGY#q;j_=9M^ z^=EOvZw{GdL;lbYC`Xt)E8-2yr+$Gh3A9*8 zkA^J_<#n9D^>#E5i?Dodv%^4GL$I-LhEm3OH^`tT%Wchb0)#3W%R6^s`|Lb!&o5(V zXGzph@Bi?xV|Vr}nwMv>o8s*;$+}TQ6t8*r9b-HH@f#SyM$E6q(X_FW48dpX$O7s) zM(`^tHrLP`zIsb<*7egtGNy3DSh7Vb^s_j1im@`?cz87AebyIJ=c$<7Bt!S`VIQ7U zDVwLw?P%(9bc~_-(MZR_e0EXClYGOPa5>M_yA<3PDLVCpHAW85f?6Xei`hKRuddKn z*F3kGV8T$ZGM;&6WZ!LGYj{IrYxuleQ5QHCqj-#(XA0(5_JC(-IvhRgqA={%#AI7( ze%Er_opfxQZtDm(%WTpj_~`cRm(Lm*?YIS~K`$Ku0ft6T`2b-+p1;-zS`_S-K<$sR zDO51h zKHD`*8^HP}K!D#}PnpNLr?brSEp9b!mQfIR%(S7H&3p9z-38#$jiv?qC>}dPnGZ>13hHywp&~Sp2^cc8w&kY9bW04p z?+TLiO!xQ9B^iq6HST{ox`HX3mkXFyzVPnL@w>VsbT4V{6z>#ni$x z(wiTntjx{G0RlR7ZZeXEJ#z~NGeuC0cMPo;2w*Z6~#B4ZUQ=>jS&w$ z`CG=X_qKx$4QOE9!3|3C*t{UdYWB-t#taW|_QAvW(SQ1*`1Swuzlja^IQ_%_IktcE zli0lTizs0<%FA)oSsT3ojk{aT6lOhA_h~z-RF=sfjt}L`Ts`?u0w%x8*!h3{L9diO zqb`i211+~G#_YleEtY2&jMKs>8!gRJF7WMI$!!?^uGS}=)Cqy+?+g%t+fKI@p>}m zGsq4`vMP{PS@HMog%@HsIV(JPmE*$?5>%7`B!=lL-}sHN%DMzF%4hRTpD*AEJlvg5 z0f4^x%F9tkalwR14_sP=e-2FmpBLwG4KGxNhtc@h$C+#;7Vkca=Kc|M0eCYBAT#{W z?*N6`_#&#%Y=d(4gfF*RdXinG*NqMLpFevR`^QHN9vNWHo{NbH0awG6ZAK{T^7?pXd8}pdzCUitrjp_-k)eW98 z!O-Gr>Zxkxd@V-fIo#L$upYPt$13K-!$AztAp-L8Yz$!N#s2U(jrY0jv!?)M=xeIZ z5V(p&@Zi>FhDgfHq14Lc1ghudE0z>4%NNk?1&i##WXp`gzK}(T$H;2vrSl8q^-uJlR z#oX?O+yj~jMC=CG!9Exbu|LF;IobG21MQYN>n=*=LT}p_UrV!iQnY88<8WH=0_WEUYGAWG4Y=AkNe!#*yG$VNgIdH1aH z!w9Gx3PdPE1UaEd_1e<7^Q?=p+uJaX7?a#8f|6zZ7?j4njgiIJK?J=)3+iSdeZ6(_ z@$QS3NXof_=QypZR5|2+^L7s}0M8!KAfi%<+wjH8N4Ifl*lqHlS7^aj#~$_==vKr2 z$0(^3K9u8aIX!mISU8zIRVDR=mQ7Kz0B3RJ*{6d2)YI9x*<@tP3aHy~;{+xEE?02IhnManU+}FDmxkkps`n#6Oul%5}x(><@^2*qp za`73aq6GnX@C(9g4}16dw|svxiffEwMMa4$tlY*p8pY`^-j36U@4@@yc<_nWcoN>@ zfBxfG{o>~t39uGp3D8=j$P!HP&FC0wHR@gDhQF6%fC4h&K{QsHSK-ZdtrG{|`cB;a z?e9h7{{7f$2;X@xD)37kd2a6aSlC)D071>;J9xiW*-qD^WaCyW;V#!s z2Ss!X=qz>jV)NeHQAcT(S$hlQO7IZOf^b7e74!ZsOg=n2;NdbD6xC^+Q9ZgH!tWvr3o$xnWqk(3*FwtRdHd2J*!iN?#Xro5Km z(I()d((C}{-bY>mr1D9wiGXbvH}K2_dTp({PLavzLGlOVifTU7K?iCw0aS`Wvu?7Ix2D-slQ$;`!$e;>A~<=e}(~#1!Yj zIx9e9trc$f&4NYv%)(*EDp)lft}SUWR^Llq1IV==OmDTt`q;nSh*fFFxVfKEVC6($ zR-D9XsJ_VCyUWLUYLm1bK0AvdZ#0V5!U(+0w-~T6HeBHdcH|ISbjB-sz%iE_> zt?`=1)A!usl^z;OSVL|#wUcv8MHX!^5o|8R!*GCXy-ynD4;Rv|J5$i;(`SoBlv@aH zh*CsWXTi)_8TwJ#tL)|&;u$DfyJj(z$kX>Xnb&|&C|fWF?^~GB$DCeH! zJTh=@;|7Hq=L7?52q}xT_?$+aNx?7(C@Y*%jijq)2--+!N6!mw<~iiErY)>D@U^kw zM2w=~$;LX?Hl&e&=`&2l{f%N+71{y_p~nik@;Onykqqx|(P4(!RVpxnp-fS-Xjx!k z^NF$b2fUWZ8pbrLpn$3PG)RIvYbD$cH|PMMP^v|ar=E)J;_Nc#XpvkIy%CW|93Hge z=x~pFZQ|_YiutVIj~zm3Slm#%3KBupb5B5J2Ri{^5&hutiF)NNLEq-K=yceetYiQ3 zCR!*0Y0?^c_HvPz*AvOz29BGt7|jvUcDv>^m(ktZkM8hJY*8HUz0R}mp@lS8c-=|h zF{17zhsfxW+vvbw>TQ*A7sa;(Xg-H`ymllT$#b+3c#&9nVKKc2$En{F0?nL|kajd4(sP6AY%W(7bDoTq{?4ZN# zBeRGad|l`HYj}Bc@-()1OWW`KUaSoN|MYL7`uVS7bWfvpcAn?A8$#KsumG-0E6DNA ztMJ7Z-qqVr(1wwLjHzd$gn?(Ql`3BA#XNRAvx2eR0Z=?OE)E8wj?PoBnRh3;WI9yMFEJ5{k(;}V@Y z=UTVM!j{O8vI0*-M|oGkt)rsRa@*2y*oBwXTTC% z8}H zN}a%3S>|sqwK_LBE-2c+CzBRXoTC@y=R^eAV6TOMJn?K-H&t5XI(?P{JABv z^83H_x!1FTd53ail1@ZKuaPSDBlCCJ{DlS_BUM4rRX}K+Z2W?v6lGhvXnsM45|tX> z)Noi)nc*P5B%WQ3&Mt28P)7`&`>cYz0)AsiOi`DFigyioi>{obX zdT$o~7}mF?Y#vX!tH)Sx82Wv*?LlOh=fQKmxIqI}fEo9UHUL%)jQed&QEDFH=)z{Y zgfFi(Or^jW_O>me9-BzgPJNS!#d}uu7Wsbb82mEyufP~B@DwIb>pR+Sr{WVeTZDIw z!Z|y)*bxQ6*P2BsQ1dQ5_kEkfFi(}s#pRfF1I#eq2Hr{*-&NcJ;cf;29mWrawo2P= zeHp^jfTR6Pw`6#W@sw7(*t=ZE0HkiH%!66hn~2;GJ`Eymmf1V~LmXnH1$V-Mjhoj;BB$uo@GJUx8{ z(R<1PDu!Gvc*a^1*f8m<(pNEfxP0-Ytib?DV%+?^_x70esnsD_E)|T+vOrwKYuftU1-7@`&eAi zsA%NuoTp=fj=BYOG~si%`#nb2%`md^c~)m@ty7m3krC5;%DbR-&z8RMLmGGZKKrc0 z8%=4}-ebLyZ`R)3L3x;q2A}J%kn2l;yWmv=FR)g}m==@AbgXm?EQ<4WtKBV-wpWHc zWVrk3sl7r>*{w57t_

>X}Pl^_{JvUF+O(hCa|))7w?gx|K~JIsfT+{4MK$YyoGY8lrIBYI$Qo!*)Efnm;u=>4+0gsVrZzk-(<*7G zrJD|sku<~i*ev)y54d6OX`FFt{fso%cqfwQSvo<=jXKQN=1bFrMtDu1B5T;2jA}BD zk5q!rnd7pS$*ik0_kE)&Ze!H>lLn?Rbxw6=1$6?h9X7gf){H9PnDQP2U_Es#O%(`o zY&&iZS`(cK|NicGzw$cI;6Vi%lwg_Y=bwb}iLjg|#lR&*OSvl)SC)liKFKyTK9 z5Ub9mj7*T;9-nEkDCO!7kAISR8sBR|m}`jb`SF4RJsLx4dwoOp01Jq+HyFeL9+|~x zd2}N@;!&@oiz#!#TSrNlho0dHXz1NK=9VGW1kbh;1H9-AyR(96QXrNg8N+r)23$Oi zd+|?&4dZc*!s?z;k7*PvnIAN5Afh(UCS&FTuqYnMX=FfJXHNjPnV1hk0Aw87w$7k= zQCMi6NlyZgh%dd{Y=*&jE&4L5;8sMNXsDDvsy6ok8$(;_s5x^~If#1uJmcxfQJQGTIdPzSr#;p?LELFwCLsh{31@CT_n)zwwWi-)1XyYC&7!kK)=0K zh(9iU6V- zoxOu-4i0#z=?}*laryKj%2v2zh|A2gb?=Uhp5ykY)hwdIqg?#tXHiCB4?guO2JvEt!M}_p9(m>I)2OXx&>VVy{Iysi6!ZXTNy!VXUT$ zXngsznTLldqICZdLxMrF@eT5@ggz%2ryYjj+Cz6fc$9{ve!Le8z)|(npJGbWXufza z))+eFWs9uCfYE#D#aNH8m^;P=-mxaaz2aqf?)-Khm*^@~N_uYU7LA@rOukf~DbMzF z=-av#x~dcP0JB10r-k|H$$9R(H5BK774@P9-XhwkC!?5H3CsO{w%8`3sPdhOB4{GCw+WLg=|dKUNvriS0pbJv0tfp)>v8WE2>6?huS zu}!Vxu%J*7Eg)&PFg|raH~O<{nhd_Y0YIN!jx+6N-P7I>U=09ptC~?&fx5Ot1MfUD zWUkWXvl-<=s5H9DlDaJI(JgDfvVNcrC>SD>o#fyA9N*d=BNu)N!fixjFA#T3oiV#m~H>Tcx|UgSFMhjvW)H{>rcfmL`0_d#FG zCbp-YKsP;V0D}$|dP+GpmE*T`)e?AeoO)ip=YD;GfZn^2WmCPTGio?f0x6v%9bRc+ z9}lAvypj4vhg5&gnud%{&#&`{be%_mSmpQr$>(49%KXO%L?|Lj8Po}87>U;n`=+6C zvQWc3t9cPvog2X*pjpEgSulh8N)OWz73fC-&tag`GrY{HZ8sSl6EZX+gX+1%3tU_0 z9=CY0iwr-xEiVmJJ`){}kXej+Ek_80S)7LCbXTN6b^H4c75}BR)sbTA)y5~&bjNk*=ye%QC5M%1n$kv5IaA4 zSa?broRoQQx8mHtGXxBE5llG*1PkL{p-A1*QiKFqbC}OP(S1Cpse8I{mm7rH7mdXQN#$(R#a^KDwigFHHI^M|b*n;p{hlKTt1t}BcTLRb>g? z__bzj^pdrbc~A*+H`0*dW(=7gnAaNGLbP%p`WBB}kHa$prYC1 zLXHlzXhqj?rqQdf zqCfB_TC?QSK6Z*_7JH_uL@iL5F| zw#jYWeGr=y4;-pR>;C=R9%n?VGU(?vwh@Z1asN231RHtPCOQLv{XX8=W(BYt$8Nre z8sndxoJZ@EpNPG0d^xV**&gGnV3aG!q#AtVwy={YS>)vT^6MMCT>!NajtT29hyLmS zWi}hkutpX+skdyWe0bQ4cAvQ$4MO&d2$SoNaZW`pU)Xbf1<*jhVR$s6v*~S|pfd#y z*0jiS@(v%zAfPMYw>%#l;GwK%KAkZf9PX#CHXJ^i!^`|Ls$j&Rh_s?NHDH1j?qwmL zbp6QVHR6|roxG+lHO;s9aNm2%821yf8`X1*T)X9#I*V<$F}a$=bf$xdZs&P=>FRLj zrrk*6%^Xb0uGMR-MK=BlWE?{!FFJPhrXH-FMLj~?h+?-tL!UNJ@t1E0}=xh2B1fW}=f z+HbvXk#rY>J4C_7t`>ci*#4m{asdzAuUcz!|5ZGFbdf(#J;8~2iIff7jA76PD6-DD z+|trUF}Oh49umqaurhej>XwBwB+WBiDAB8ek$Eb3xv3XsS1_5Kdl)e>tzjfX;3Nav z-`h?~cFi-C)Ucyp4ZigQ7Vph5t}=v3F@c(61F>#y1Vk#6 zavE?aJ~elW5%r>L7)Rz&=}4=+emkpj4Lj)prl5o3ov%PeBs|CP%kXW0ui| zXL#=B6ZZ#JzslIG8$S(aib`EK3oh-T#M3D z>DV?^Bd#an=d4uCTsQPoclk`~Lz=x|w1#^KuHFEz!8d3Ot^=+>6BR}onk`3{fKB)Y zW#S^46V_9x$sde&jZrDV$ECGv>)81mYr9K9Drjw?UI`fz0B!)ZMlSr~vH5>x$@OrY zsc0Cte6GUt+B~QQ`dI8$MLFd83o0Q#-9Qeq(;U3hh2BP1YRuhjY4)#1>;q>CBY&Gc z?VEZJ$Uuef4N6yIyLSNC2ANdby)343-6wd%8}5DZnb+dBx{agHewv5Xvlz~ZNXgJ8wLdh&wJcGiH2=JO9?I~zyo;xcaCM%!t}^qqHDcc~~pp!8YeM!5la z*~VjdqmWV72>vo^Bu`tIb#y$4!+S@pqfrp_6f_siW+5i`81(HC-#}J8f8!E;Vl_5; z^|llj%ws$USaRQ*3X(NbRB{^^ywFAmWbR%jLgtJNc#TF?`JAit7d(&8y0am-pmMFn zWwNHv$uXgO(x^hu?0q0_uttIby-UyN(0HXD<2gES`~525!lpIoP&{UJ*b-1RLr>c5 z$0+6g5L)B8@d)HzgU2}eS=Q3-kj795k|ai!Rz zQ$fC9l&z_O1bh{{0@Qiw9$^Dum%^!KE_olm zQ+F^Pft=O;zR%_8KNbmIcg#i9porwA^M?$UbqL@C`C&Z0N+(uekvf(^p=mJi-}!RU z=!P%**XpUKG55htF!QS1u*e^>2qr8`dMyYz#J6~93GodF$!9h?kWoatR!ZBV_4Hy9 zmm|Z^dOV^+s~ACk9ee5`un@@x{IgjR0(W+XuVhNcBav)DYmQ4L?6xa&!5UhXyv&&U zB{8~oJ2KSbS?cI;J%mpWGWWD{AMJ`VD)_W0CFc#Z*hIoG zl?p)xQdF};jzUqOUhiK&GFr6+UFbKrNAmk%8ek=ZgF~Ii}P~? z4i8@;N7-4ey1v14fWMFK9dj>JN#$6LXV9dPd+yzOXUJ4Vs*o8bs#o1MMbN`kubp|B0_3(;uq7+;&Jto#hy<%9e&AdA@5$m8A??E*c zJcbqm-vThTD%K8gEqB;0U}|_${&9>dLvy7Xv??BT^UiV9?>!%zgZ=2=e-QEDPHZqr zALW<@O*glz*x>nhP;^a<{SN-KuYdw$1DVA;52O8=PsOT#z*r;NcZX4KcB9eXkIm7& zi2E;QulSpvy%lAQMCIx#$_)$?K>U^k-1}$$Cc3}z-Dtf0T2ws%=gE1V(QY@u4c>io zuNNf^IkG!`^PMy}wdY@mQg@JB?Y79~dNhu*mF{;A0W2rcKODvuVAgy25c;F!&=*yJ zqZ=I{P4uHSt>ItK@_e)=TF-~S=wxs$atf_l#fSmX7#;Jtz^ zl_l05K=Iai%9>YW&;!sTGuG2UCgh2v}hzho^L3+v<7)*j<_ov#4u zC#Pe6FCt?L4D*wV^Nc=v#=2h`T$UzNg+{%Mekn}q0E-)~tuZ`rXxvoM=wgn8qaoA* z(D3PLsu#p??KG6`Lb5Pd5YT0u8X}Fc7wH`u;hO*&^$gSVXYB_a05T4%$wXNJ7_Zg;U4fK+AOT|T)@k7qA%R0P~cZl#}c z(d+gRiQ+&UZIqt7p?A+@Hw9_*GgA%F%=KL%!`dAIh;x!wMhrI5R*J0y{{YwA^jyJ_XCDIf=Xr;1c}6}yns#YP)^F5sKb>b0v6 zjRqn9Dc`0(4702Mbvk@*k%|GN!?&I*pl)#veDp)-htIQyHb%11Quf_|Cb_pB8p56) zHuJ^T^?-BFb{ZhnjD~^#=Lfi%MCZYVB-{#db1fpW@WI>&$SD=L#vNjz7~CVRAzWUU zPGY!A zk5}ZS4zwL*52nJl#dti=E^HpRXFY(=JTOemr0pxezh`$R7MOX7%5Fhl8q0GLH1k=l zB__aun@txU18XrFN>gCMvkj#tO5#}}I@d^f*Umd857X)Od&z_zD{0s+l|1kD4YMO` zdVpJ84K?o4_0CySD8c7tHwBx&TtgIK7I;FF)=)qSZ=g7G>y$JW3@~=1FNV`= zS>VDttkx)2m8~fp6hZQfwDcdcrf~6EBSCcWS_&^jH#Bd!KEwFDRu|c`b|nqF*Sp3j z+L8d0b#Z((jC-ZL6`%ug>w zT^Qz0tmgA9OznU66LJ0QS@iDQkM?r_0chuE+n;|eY8dEpw-fUn^BMMIe)1%$-C;Ce zdM+Bz-NWE89{}X-qes!MZKHL381si8WZi@ukwEyBPe;7^rI=2Zv4$tQ_Z~!@d)@x+ zUq$QatWN z2|&ET1FydFeB5$fNyW}{bKf^gS-GrZL^h94UPCJ zc2{fg+9J-+^}qm8JV*UIn#_=AbQg1DfvN@Szs zcynXJwRr-U;SjLL_?9|E$cGN+9MH5{+C6Rt?JX`;w;HVjR5Gq?M5ADRnEo_uuHDkn zu!l=Jh>Y1CG&Tg2wi<1*wYqM*0i;(Aqwiw&;h{AGGRJU>{?@6eRXbULxLmO|=$hm4^MuhC^!sye+Fc?Y5 zmrekn;zI*3ZQTK3-+RFz8b$;Eh1YF`nEm7P!Jra7Kp}_^=YOzOE*iFifozz#h9Bb! zV`Q$pbemy$QInxp^Vx=vFU}VjZu9A)OBE|h9|PM(L2Cf4Ft(f8#pN`{czos;a<4p} zH*%o2?Y~5UJkPL16K2i!v`|JNvmnXlFFjD89{va;P@D?qHa3thhU9R6kRh8vyBddw z2w_vBrhto-c5!(=iE|X(asg_))zdOrfrpE6&&CN*Mej_bms?#}0LNw}qzh*o-^{(L zh$tz7bYl`pWG_t+_XJLu~bm%zOviX_?*I(ufs-V zDaK1i0pu|oBk4t&gSYt0aJ}^hdSq&?;k_ZG6G#fwp=&C(P4o`$#A0ghlXXVsRBjtT zkEh)D%qOFQhjsCjALE5$K=FD@`{p>|SG?5bgX37(D}r_CyzoL?k7nG5F{0E=9i6@R zL3IAXuSN4yACKN^FGqEJ!TJNlKJn>XkI_4CM(?@jqV)J#tUh=rnlHZqGR2#EddgZc z&tLg;)b{sd^ur$mv;j>h@Ydb^xIqt8SM%6Dei)UueF`wJcz#=qhxE+iq}!Q{8}sk~ zDC)>s{WGt!o+xy{Mx~9XYNs*z69reEPxUz?AB+EfIKLF>XmvBmlgjD_`QtWsNipJ1zgP-Ku1$) zuCGO6dcroIvLf0dS=UfwvY5}}YCKNe;^|>-d9^SYA1e7rKF=a!#-1P;!)~<99IM7X zi!#xdeGIfwNxi|7lXKovU$zqL8Y)-brM>!HzS)( z3!Sf?uy{>C-$&=lt(!IApZQstFVNPx)9bYGIP>~~Xzt}ait2V9##S@|sE)1PbT@?2 z0%gR=0dz#})wAja0lbk8PtY`KY`9%MciwYor{^p9Yk_yy+6ppml0`GApk_4k@hwnX2!$2XfsOPU{SmGkaijJ=NoD>AMH#L z=@pz=*QDd%ZYse=<@dh(mDjU78cZz1@DXmHhvrQR<)5TtKo*c`L|7+&xA4L0&LVeX zL4w{~qs9bpBA%XT+#x=bE^srn=tk>FF%DhS@B}?KD!I?LP@~PM_`KW0_Wr|QAnaqb0MNP3(4+c+_7oM@-7bct*+EKz^Iwa0aagM z-1#0)-3n*E9ODj?kMLq_u#?+FP$o@*i)fzV_}ykK875`{4JT#(lvU}&urik&*xCRX zLn|WvruS%ih>FE1YsLwq!3-AID1@#gQ^q9S#g7~ZqV5RHTNN>@(0g|A* zhmA}olMH9Mchu@j3tkLc&j31x&P1gOr-%Lc%*ot30iTurOrr9BrE-rf%^xcf4jET-+-VaNCj8QNW7c42Bu;(Y0EYq( zCC?CX7kXN7rz{w*)kAm@@4AlB?jQ7_BcAeXlwnxMkWFkDvwPt!h}?hqrHnf136(Gm z6;B&{>0?p<^k<@p;#@=6CSH8^wNFLNu42b$s~>+g*3VAT09FA4tNA!;!(mkM&h1^% z`PHumNHey-!0`Rzm$A9`LNvbe)mS{nSn56f=&u0Rm1x~LifijCP|mx{%h)}8iryVh#Jz9?_AqWX((&lv!G6?U z{un#}eGznbz4ZD3Vm|{ol84GXCu7~nk_DK%8Gw*^2?{iH);ctYeT282jeDNG@yn>b z^hz|Kt^Ekfc<0j}{WK~z(!nd6UYti0IrmJ5H3}NejBAYDwpB?_%I$d?uDkaDm>NXJ zaejHubls+g5kkfcOJ~mz?}ry^yKN7WGTLNQs*4dI2N_UScrn3o*3m#Oi$c{shS%f~ zL500I>=LS7vEfV6Fh-q;oMQN;qhC5b>H}|^5gGm#Y_#wmQ_guqFB(x$T;hs3FB!Lm zz=V$ao6_yE#weV#(=oun{0QSmXy!1?Ig2%A(ae9e5fF1z*_Dbp%7ljB^+XN9*bAUIq0Fd+ z=+W&ii_BwpSfD(*mVeoa2&Q%+Eou<~%D5u^YHk}Vl)k-`t!Vbpo4tL}ZBg_g{il;+ ze|UP`teK4CT3CSMe(h#MR4;;Da-ZwWOJ#+BssZT4`0Zxp2~>ubZ(tGy&hzeVxYHZ7 zptqq%2+o*IF=X)xbJlZkzUcwO)K-6+LqvSCLiY6O1=n0hm-Q{uJPZy_f{XZu`A}+^ zGvu{vl%Rs5!FFy&WO^85y>j_VFIC33TD~{EjUhm#!aO`}%C2ZBq5NmdRgJOHgWzS0 z{p7{=!7v8LcZ*btLJBV#{Yej*4{lU`C<;TFHgVbtczCYGIcpoAFi(Cf)&!wnO~=ev zFjkEgMnGPlT}|L`3_c3GWC#xhYAF8(k7IH*#khW$@5W=ZU=J=#WNnyK!BcKs6&526 zvE#Dvam(7M7=1=QpZC*K)M#Q*O4~{t9N$58T*nexY_4Z%NacAAzx?O;Tf==6A94ec zZx_4{Z+20rb_TO)(A7KdG1htPxUT!%e-V|v<0xTd%JAhBBfPEKVXzBNGRpf$00Y~o z0xVy8Io2LsI~zxZ`PTH>Zt<$_y%e2a`C`of@ZUuTx!ePMlve@@L*~_3{rtyx|Imxi z?=W`LpZ${cyNThiei?&1;rWbrGGYvqsBMv%GrVP9-{PHY0C#dy>j)k(Eu(UT@6!`J zIt0CUV)52H$(xlfG6NvNWW)|IpdQ>pml6iXbVgaC7|w#nE6BCZ0>gzyrW>!B3&8F0 zAlfg#7+XCVcym6P=1v9Co&}(GD`ZN13jw(mbg2R;ui%j@&+y6ogkHY=rkD#vPZnU zt0r6kRs{?;sNrMi1Oczcy~tOm@enKkOg#t05b)k`FZI4wK*4e!w;&pvh(5%qX;9EV z>K#*fhkF<|JaMo;WYn|AiD8@zn`ZIPab@>Qo@=y0yPzS|v1@e(8BNe5?ezd>8YFmI zpkc_}2&AETo5~p3TVtr)YG+Zi2KPqWgYK|{o6e{XqKV^>)d<3!tH)RrXTf!81l2bXs-DjL5Fg>resvB~7t==ng@ReF|1< zYzM|&(55siyGAb^h~rHl$^6@lJG;g*Dns@fYIT?ntM-78rT*2>7k%oz7n9+hrh{BR z`QQ1NF3v({c*Z$OqoPBo&fq47HUay+?|t=kIYY)%5vtoAxHr}#O5}smPcI(;aKoxQ z7!{tVtXxw_R9@Ga3pqJ;o6`a%!oZo7gVeJ)z#H{20u8JN;mNaUh8Ajg>E_h4;}G-6 za1@H(lZ<2%4Y@$W+`G@c1{m0g#YHfT&!7@D%`lAa1$V(^q&tUw-q%1^`F#Lm^->t! zMp5T`j?kT+p0PN)BBzrKu#E}I&@F4`(Dd5-j9q3lyd#+nqZu+?TRjdlihez@&c-X= zL&39wi`y*}&_vlN26Ln`Z!)CJ%pBV&CUa?e%Z84en|U0!)>!TDQG{wA?vQ&d?F^Ly zpUY98*BSm^i7VG3*R;cLSdj;Ax zm6t^7dPwH&4Yits(@UxXzz&~(A^Lb34M3GUKh}5^&2Ro@j2^ug6~=T8y6=4X8!>zG zI7)ohLPh1E%~;Rgei&W6>&Bh^nEm|^vu)}AzxqF69QWe}x^5eTSUf$C&Yk0!UyNfh zM7ex=KdyiFe$<#xJpTa0xXDx2HuG83?%j*|-~9y0qH-?`2FN~c0S3Jl2KwwY)}t$g z8J*IyQyB{9?YDTwqiBBWQ?Z$$z{k+zbDxj;@x9m}hdbuqI6jQ+llP)AyNm!p$Sb?o zxE}hK)fIp%+`+ikSi)BtXHa{@-d?QFN72NAfWUF~!J|xHKYUwiAH5Q)0ZD~LC0XN`^ylf`4A89&!T4<16}y`kp~n4Z)zVvVF3GMeYZLvQzztHX-6x*8_* zq=lDwuNEFVt}L=;emV>4A@!Kxx7g0RO#SFs+(EE^aFDuY1$Z)dUqsY^#9N~p)&=Qk z;mjdBnR_l79fU8rwT3^7*-~F>z>Cy_wnk&F(ceG1kDY3zUh*r8cA1++=WBJJQ37;< zD$}M=8C3yd8$s4yPH-4zOXzBpgZt{Js6@3}?lMy!GFjGN%?U5cfBwb|b&!@Bg}|Ps zy=7eRQWJUB@m1$$(XG7B=he@yz4TCrF0OIZFy`tBpXXt1uo^N`j>F?&RQ}+%zxKKc z2ldE6^dt*U#jveN%&>sy(xL_bUuBE0m&|f;WmUFEMY8Bj!M)<$jyY+J zd>e*j5aa0@!#ZXWi}1dlc5XM~dJRD~H()LblVcLC=2is$SuJTng2heznF@G`(ei)| z8PdMZdNX(M*7Z!SwBPI75532NijV;!EcWtx9*-ZN#`J27(UWK}D$nvBR?M4&HH_AW z@ry1+f}CP;G@=TK^cGDUX3b~^-}yS!NMz5Vn$Ad|#Hz{UhJ~zKFkfcJs1~$IWCuO3 zp@&=S3}3C^$^h#uMt5@ZWetBk7gu<}xl^)CC6)|o zNY0-r?9eZJAu~3+Q3+K1yrlgI#Y!`guAZCBQwWb+NemGpZ~%_+WCDXBkkB=|?`32` z4;0F{jxb@@oHPP2u+c-2iPf!+%aF1F$|ytjy=L4pafLAq=i4}#tfI+SZ0cix$ z&f{>AV7Gf&*~=}Wg8?2s|6Ej{+4Tf41ib}cu8|zJMfq(22KF1b2R4hJUft^A9OHEr zt4j-dX3=}`<>($AW4LPohGFvfHGHjNkFM& z$uy3>`D;=AmCr@_mCwidgmpSPiUxIb2JQd=|MW>jK~wrq8fw(vH$@WCz5ZGYjjvA8|(@<-?Px!Ye98 z1U#z417x%onv!Ic)yZYtK6oXX_7J!l#ij<3eD&1~m+!iJ z(R%rnXj}Zo{mh}|e*X#JK`pu(b5D#Ka$Wo0KaE>F`uh24Z19vEl%A2@@~&sID82Mj zG(PSHnZZiIY@}2VIuO;4Zsd!3oi786_xE2 z`obq+aopfe)LFar*;zE7yBF8ywXY_zo-()5NmN>Z;u12G5?P8dV}Hu?@4s*-jsR1F z_A9P;_lZqQ-m!WxNRvlvM$oXXd z1m+|7QP&x{xf~Z^eT|?%*c`b#kpu)uGscogvQ?~M}bq2^8+uC*7*Y*~cn zjmRpmF1_hhzca7RKi560-RHL%g^+%xK-G(bp1Vz2=ubRtWW;&cT@V4zNM4FRH&u5U zPG*3-U|}8h1HGxYdc!!{fUE4of$W$%@MK9-efEN|@KBv=fpHcoOCNxPTiZ-cT8mW0 z@P{d9=wT0hWzx@V&S^y0b)L?a-7D3l3@d8BbobEab!mw4L^!`jE8A61asepx?O}mi0x?Lk$4g*7)&W1KbxdcaABZ|uXqg-0TEUPkku7Eg0rhQe?HlgnVWD(p zuDb)`S^IuXOuy@2g*q#bkt{g1+!) z<`x;YQI9(EaXTHw{OklEi1F zSJLQ$buKDp`}DgmDyT-gP-y1AZEq@wn2oQZ_VRPF|MK(E+#kmK?>&sW|J}ce?$>@j zZYp?3zw(9fpqvN4`#W*^?vogN^II|d(GR2lv5#TwuHxPQ?LXticH+*z`PWhZ?3bd( z*qZlWEaVtrTPlMuzYyyRyw&W)_~$>5-e+En>!}Lk78NmcLJONsvy~e0{3_Ub*-ChDyok zZ%C@Y;9*n^vP?2%c+efCrVRQN9)qglF-`(@{Nmv?i$`yz_lv2( zJ7?~ETtIzeBdzBTQvXe%gN3Iuk6yHeXhZ3ap0ky=TjSheVO^00V^vc zQ07L^5|`i^t4{>P>f$wX&;Xcf64cLF3)jd)!%PRdFF$)PAYU49<=2p&XK3gg<&GqD zO@VqCJoj7k-W4EJePSvtbH&hFZ_)^C_AjaPH@eY~{SwdJcDn1^CBUT@`};aYtPP%f z?$u6@x0aENtayj^HgXamq#=U|q>D7No+Y=lY5?K;IpELge^YP*5O*CZ+ZJIL2cBWz z`CL}MGj{Ecy2*3oou77Khi)*^vlOrZ3Y9T|xm{B2B)7l6pLRj}QZxY4F%#^e5A`UL zE{t(0^-Q&*>(JAgpHBdw4dE*0p#gR)@(sFPTj3a7cXg-pbiPI~SOrLK-Gb3f?Q1Ez zI9NK`zQ>Znn}Qs`KWnS5w1J!N$@fM()bl!Xnnb}#@k!~rjpc{M#`o_XMde@q&ab`B zr;;HJm*&Y?DC`WE*x+C?PfyKBiYlDA-O{oM38m-u0J|?`&i57)Kzu#(EWm=XmHX6X=+AK~(2#Qnc_&n=WROIWZ z76MzmUkH%lHqUbQeC8BD(5Z}+VJhjxyrg^P{-8q|a1^0Ox-Rf?Czp6K=yG>GXlsiK zP&{$yP;4i(xTleO^bO$^Z7QJ?G!_(!(ml3S@8cGMvmI;}ZldtaLm#fTaaZtQWweR{ zzVz@CL-Hy>je~1YZy5rtMep!Ilsnzb-&P*Tjqd3xV_8*INPln6}rVsvOVe= zB|f-w814PT=-pg;)!EWJYKc|joil6=nwJS50Fccyox~Y9fRq4;O5v3^Lt-& zgc7~cYoTaSDkHo-L#FWRM;?ZG^m5HL;v2qoGNy10;d@P{D3BvlR7^6Bn#O1EwHLTQ z2IKC%IQS>uiDhR$4dmI6e!}>B&~+6{j9de7(|P_utbg)f*SHHfZL*LN_h1~{;J%6^Zq-r#S_Mw zCLi^`_3MnQ7t5!1Cq!AZE&>6sFJYLZw?&zSLQ{|Nw@S6k9MA>Zt5G!ekV6d0{L#D7 z`|{UfS8GQdy5Bx|%$?AU@4bx-79n>%KXr$VM{4&0vx6qOa~%`(jcu0I3oAT($Pcy#7$4$lDQ8KW&;C(O%YRdt|XSx-}$O?n{9MteM*Ntsv2sEgY0pw5Onax_{R zaQV<}vPKH6cQS;nA@QpK?dT`gKm%_m-1}=d@~jT{+FY_d3e-QYsoS~Q80=2d1D8BK zZisVM&-C2oC33>L3OWQTT|Ddmk9(QVE ztZYWZ>mChmvo!Thsf1BPraUduIT`hJ=Srr)kTJh?L=62mk(=c;UOu`~Cx!n+7mP4tb;BAK<|Q01fwZA8BvFxU8X*>@++k{u9$ zezWdO`Q6(127M;|a>tAZZ9$*38StEXL_ntF!*pVp^Mq8gzheM!`}B+)`+4SS1o5NY zWu8Ic*&(V^3-FjO_Yn}9ep$wFZ!aD^IK;sJv#-A{iZ?Id4YKVXp1X+f3P!)ZaZz9l z4Lc*zxZ8-|3P$%HUSP17bG<&jkirNU`aRgU;~I;=gq9HD@v}vSajaZ+PoMWxu$+L! za~8lc#)7H%=X=F$Ee6BDLdD$P1m!&*(!RZVotu)WgnAqwxn-=+B)8eb#_f2B+=uqv zPE4B^G1jXQo}$9QVqT-{v^fgZ3Sn+wlzVunSPpUItf6LfCUeG?~0vmha7ap;|z~(-a{8<#BfPNFPSx~oPydxO%8paTf z;~l}Ewu+U`?xS_x3^6|m^b#|F!+N-dZGiFhbTlqc1;pTU z4W)T9;D@xg^O?VyvPh-F{H{hYCZ4E;fHJ*+%5)4%na16Mx<(riV%`(wWiM=n*xuUa zH{n(I%x#Aj&VYC^%5I0zYxPizUNqvxHES_1D$pw!**rsB4nZoHQ;Zz<%@e%f3H{3M zox5p7#IhE%84X#DFA;cX&0I<-xUv!r4UCL5@HUgLjhYOGvBFbT@w5%|!V5+UYVd+| zC}T`YD7og*F@^&W$~{&HeeW*MWL!Lg>j8a?$-VPNIov9?W}E^fqZut|H9olnVBk3o z@L*9!8deLAR1_X^;x;@~OtkDNpm)OSw%Dzr)cbc1Vsm;Ca~QUSQrdY!-(npHzy9?& z`r4OZJ?4IP7UjnuMCb9FvHj8a11XNRDYh3s7F*W&{GAVC{{BU76RR=j^|PnZzJ~{m zp|J_hEyi+kaT@(rKRS4C*Ka?~mcH@$B}Hdf?DpgwbkzEinT07eSZ*J0B;4QW{J@Tg;}j^VsJc&27F-$!pvD7?8Sy> zrm=VLH9u2_%S#v-F5=OwBfL^7F<O7)vvoMc!B=hjv2rgl-!{<-JwXMYk_SL-iA;07qZ45iLSEZ!Lx zi$IFWNT_?=Z^g{6(BTNbqJ(TEwHU+h|4rYt?a?&zXO4QfoiQI5ISWkw4yV$3efF`#F093AQXzXT-Qj9J^<1>sp#$7vRByqvQ<;iduU-RGtyn{Z5D)Z@jYWyqOjrxsRU|diF zh0Y>zjiK*6IuQD$@Ni=z3H!qyw1CMOw;uD&#*m3kDD;+HAZ!Tp8&9rW8Big}cesyW zTHXKQx$aYQ?tlWA%0?9e1@}^0DV*Uk8HV2#(p(efv_Xq!tpRj047Y-b3^~a$dV>nG zw6x;BqhjWH8V12ek()C*=i0f4G^@ikihv|T zcowC3?z$duz+!oV;k=*VBVnP+4m+KYns_%`Gv4GV=tLAZ&){5LA%8 zXO*j{kZ=?u9^3JQJRwe{;+8XyUMwSE-pgh&6$rk#yvVS^kd&QWxZhH>`(d$GVn$Mr07op{&V*;UNWpT-T~ zB0bC|d}TRg^Kvmw)!BQMW%l^0$2Zr&-lq`Q)#}?%rXvzy1%R{K{+0 zYZKkCd^xV)dBTv_ap&`2j?HwIO1C9Ig5KS`&qv%jX3bETfXEtNWpjn$(92R6pg?v{ zpF|B$ZusqQ!IOA$@1J4p7@xFy`>m+G|8|t`--*(vUyEyG>-x=iGk@;+5a~htoX>tb z`Mb(9W_a4OpS%TtfIswZ0pmB1p5<0JI1K*O0AN7Ua>L`S^$wubxPQcRjra+Kka_qJ z=87`FrF8lvwinN0{qT*re)K_B#P8sHoSigU?qlDg!*){)D#}r#8b(Ky6~LG~Ev%Oi zbj{v;D^|}~XJos)e*k#9javUGtXS7W@Sg5gs9_}PS2xkv=w%~IC@*_T_+=>lkhS!n zr48QubUIG2UxTJX(Cd>2vU4bUSi@ktO#Ps7HzI1p)23WE7?n)5@z1WBdV5(v!)J^P z>7eMzdX~NV$q;pJA2Xtdku5^H=JAnNX~9_CJMWkLi5Mcgf+EB7jEiyT`DgJbGJmsP zAlLKUaiOPdbioLsJqv8sV%?Bq*_8}4qZiZoI8J1l-%N)r0F{Q#^E6TSg2U4}T|9ip z%oKQ*@@I~{2)XO2dWFXWV*=AYsP`BuDngIFF#gJ08<}XMH=V1!HZ--?*InCyDcjk0 zV{do>tpJ|zs7@A_aJ>-@_zzGde;Q>#Bc?u(My3e7mj^WEDXi#PK|vORN`IcmrCyQi zr7eRH++Y0*|MF?)sxzbGQH6J1D{Zg< zFq;dnQnFgoEpUAWkDKc`+wS$fM$GrReQ{BSilzvVAc8+rXn7=`v*y9#LFlFTo^$!IxC8~^y5w~N7QPK-Ne~UU3_e2{ zj;}mRALRfO3HqET)W|gv#LL6(i?=UvQjW7?dI37F^~bF}x!XkiLFpG=%Zr;_Vzf|9l+0{A#v^btD-omv3Nt>may!8pS2v zuY1@BFTaSlK8w|p(-=Q|nr%e&=GOo{L#OpF1_dyt;1@B7Bv|;Ie2h4J&x5C=pBKThE3MO9RM=4x%(F2 zP~|7x`|csfONVJPe{8CxJVeHf0N}M=yCoDlS6Q^`&;Kem4}TfeyLY4f>T5ARVZQh7 z$NG(*#l@fhNgRIncjDl``EN1oc;*-9xf9^($w{;i?AuVtTFIs`g5EcN84bo$F_Ojn zd8cqGp(O&y?|4e^v$Zoenz1FWZ2!=F$7yXR^0E3y+~k zF)>eTaK>-Fi|lq2lx&@&(zn8R;&o|caFg>hI~2X$D1){3Oni+y>LiV>1zgv^c%OT; zbDw{vfi5o@2hZAF$Mxwb)+eVCyp~a!Irn0Hv+z!U>)fG(<4f=b&(x4jqcBFG8n1pT z%Iyx898ku5Ri758-JAd>&|`cuWoCD~i#a?$0|;hx2e4~;u{-EsP=@?|4M;XOKS}4o zN^NDt!nTR@;4?@P?BJGTzyj}e+v+!?XBGwP87c*ad@ZDNKYnEAJAr!k+JL9kXGPtV z`bU6X&u7~s#FT|aweBRbs8gQQ>(g+#kH6rDLjPnf56f|gH+VQ4;B6*sMz2{&W~jLU zCORDZhus(q`_Y6>*%XGw$&~80j4reY4{tR+a-AFuT{d+`Lw#SgM&0%xZdcNY(=ZaP+`U#abuf9CfsGFw`YR2&{84vY4_e z0AWn(I%#a4-J(Q|vyP=;-;{{yWPyi~UmX~2Q>q$tx4P7*sWRg~>kask_QMpk#lw2~ z76EF*tS_*YboK{eEILY_*>7Ex+kTY~fr7l{nj2}d-bh-y|K7-qM%{WgxDVPD1%}T* z_gqGcD*yX`{*PYQ%e%gwu?RC1@d7!92@*-dfZROK5y!Z*tT3XT=VlPA@Z4Px2Pcv= zKa&M|yw2R??GMi5@@kUJ3p51j@iO@gm9S_wM36BgfXptDl2|MTufUkd9%Im;x!2Ew z?)5ed;aEM`=6UxIZ5{xlg5W(KHi^IjnGcq^ZS>(5vk3+!7a67#tjXsj?Tnn~%y4X)vItH(67_ty$nT zf3t=H4qixPk02=g8hB}C3IWMso*GLG4zzV1qQ`tzjzxv#ysU1w$iuVwXX&+@MsQ73 z;>pH2KA!KE$F^-JxiSAUogrWw0j?}&+#3jF@M7fy6qWPTh}r=w1&;gf^YduDj2?`v z=aCh!%=e?O8;rU;4Gf{113!A{V|kru_m`uylntO3EtmK zrm^K&R{DDi6;sB`TgK85CeIl@IEJsWOVB~YEvS&IeEu!hnYQo8}6fh=xOD80{r4javU0Jk;fw6SWN?=k?c!n~~asNFq;IIPtrfJT{wPj(t- z2YqxDB>>JAUiIkJk`1BYx$84zEP*0&0B_WggA#JMhITg_l-P^U1%enQ*53Mv*5Lu5 zsU0^{E7Ql(*gs^fqXcPH=pj#}&P5;jORBK>4c;|ij&bOeRBRXnpsc+1THL<7$!#v!_-)(~H_b}Rl)Pt%;VAP48|c!+V| z?-EKZd5YzyO zcrUl|+9}c^*%I_g2MV4rziqV8ds{7TNIgAVXyevHkK`4Y&zHYN+?uz|298fFVv!C(t|)Hm3sCNggH+t$I3!*!8PMq75_ zuB>Jp-~`+vH?%ol6gx)|cqu>{KwsJo zEDTk9Z`LWUX3Mykcx0-`&#hus`RgUJkSHF}qF$RA!wyki1L^Z$aj-t)R%lTEDqo9N zWHxibf&r@=$0NJ*iN+DQ4&VplZf7wKsD&&piYMN=pFZ1-@<`F>XgtRz;c-HD7fb}-SOUo=S z;i@WQFG_;ay~7K$z-09=q2#QZcD=d-dA58sV$KRSo|tO`7VakW^k zgkrFn%674c?TT+}ALb|q=wuDY`0+Emx-m)-ps~+b&|~IF8;q;o0A#`A8U!o?&xJk$ zCHS?{vG9?xqJ%{BYrS~J+Ia4hQG59l1z6$&75@z&=$3Ewi=I@o13+4`MtpX9dK#zi zy&K~zJm@wEy;IAy1!T-nXmfurftBa`3nHq3i3*^#1Rs@9$gRD8Y%pNXt-`w1A;|Fy z&&AbW|4mfhe;9R@-t;OqfA=@h{Ke1X=G}MTopl_1{cBOdu&v(yd2~PXTGZSYgaWCW zPj?=N#~9RJR57^KgM(Cd8ILNU(cRQh}*+%HUMx_IG!ipf8jWmZ~X{>g-S-Rs5oo)4r27= z61%$x|8$^>^T)%qKo?nT3vQvk={60n5eoI`5FFz*@xa-{D(ViG6aPtQkOvM zB$3Vm^DzCX-IcMj2FDZ|##XP_5W8P~O8(V|XV+2gAt2YoQ_k(3QQXV$y{|_skey7K zLhChpP2;G8HxWa{N@%XY+g8%i`4{u z>iG`pasWOyK%;GB&aoNARG%@U|8g&NBLw9$KDXGBHL!;R_ckq~Lu4(P&+G$m)tx@e zTM3Y)gGIS8TRPaTlhc=fI`F(Hh3VX=XAxDL6*vr`fF16&AXB=_~6Mj&Jl(>Iu3Ag7O&rfOST+9cTA>W> zRU9jT!0nSqvB8iJ@9sy8F*qpK($OI_Z^q`&|1@s?em|;v z-K?2#+-{lMq6>_I$gn%S)RMI*D8d~^4aRfdY=Q{mZpWVjDE0VUzn39)<;sRO8QpRF z;&XRnb7wDVzxzEd#^XjWu(VOSEyd#T!>E4gS7Z0jek(RrtCQ!k{>HDxyfnqIuvsw~|WXOWwYb(#Q_+ego$3UFAz^A1)34-r1W8XybsQv=z+!Rij+C`0_p+Tl@@ z^yrW7M6-Lu`m#3CC)XX|25)sS66+gZur8?!pl`(-VD#+CvyAHGzHJn;%AT91f>&OZ z?L2@AW2{{0@mjm!waTM9SMNRhklbezS`AD)}M2#=%O(nfH>H6XS)r~V@&Cg7;XWPBA@msil1 zIWcF83N`TR6HkOQd)p z?j7UwbI=4bF*l^0*A{>|J@-tXxPkcaOO}fzEk9 zhF|07ytAML+A4T9bg+=c4uVE91Qiycxc?Grq%bQnuA%uK6@gGO27n!9dePYU1gl z*xh&Qv$6#&Ym!Zc;9LLRVm!vro@FSmy>HsUZ6yf1VQ+ZRGvW_kd=Rbu0g42np5Qf0 z1;C5SBAYlhQ1k*O)?LJ{7mF6jD)ty)7%*V-w|dx?q?_y$Uj;mAAVv?L#_Z|}qtVgy=JBQ$4cV#BZFCx1>0-25jZ-XP8DWJsR!`^@YZ{xgz!TjXs;{Lz-?^*MR+0!R6gErPe zBpBrR<^3UZ<*BoIl%BsEyJG+upu-5B5i+aJmF3b9fS_!<@Qhhcz4Fq7D1QOZz0KMi z>W3CEEf$*fH%!b}$^gK?$d7*cPAtx^P@IhS@ky*MMsa;LN~3!VkS`tXCGVjaqP~B? zY9bFtW1x$-^fU9P7QA5~0eK7L`LaGkhTe-U0Ln;Tb1{j{|MEXaV?IHLp{J0U+)1I9 zW=uAsvW8{LvrCLA`INcWs+-un{WfdQz4wZ4g@WGg0^abXO5{|KkOfT8#lw2eAAbO^ znHyFY@j3KDZ8?jobpmi>77N2ebHLZ-=_%}_F@}%fRkwv1{&gGd3Ma(;s^gX4O64_j zWqnQ`17vimFo0Sq0iJTKr)VX<^+n2w`|H&iij9txJwl9@7VT5H9^K^oY7S+-Fj{)R zI4?ZYNTe?cO1*d6e12Q~kIq3qqxo3_^WTQyF~G>t-e7>S^r+~Dvt_aJ?tI24D%X+kD#6e6*^D3br_lAC7jV_?Cn`R z=ooE9J;_$WMmlvuw(yRQ6_1fy8Ku+Xp8V9n%X=Di!|H;@Ttn8usJL{>IyUBQy^H$Y zh^@S?&D6otA80+}2CM5$wfC6Y5pt^zm#&rXed|-NXLTc^QaNYm zI&~5Q(co*~&d$eid0|cp&jN&EV-JnC1qa#bjqBV8e||QODd<0Yv#a#D&fIivYlCp! z-_UecUZW)PcP6HiaRFR-i#WuxQw-i@f&ie%&4U=y>B4YFdv1wq!%!e?m<<+{t+LWv zBlqOV84JK-t}u!)fsOw?-gb--o3GAoTo6M8Z-u>~=gc+byfE%uBlG*WgfEV*wHm%C z7(=d7`5Jy7b{lc%VJq!+E|ie~504q{fe>MQBpzi@#(0A|&o5HJsN~%{-p9a8=+Wp3 z+D)K$v3TA?FWROS=4&-p)+`jK!U--`tqEY^+nXR$3PDRQ=s*L%5shJ0+dD^shu z6nM`Mwysfy?PN6~qCbs?DGR*?(=gu8iv7W0kJo&bsM!3Q^pZw+?g}q768z|Cb4P^C ze%Lc;Vwl?f`mC|z23=>hJTx{EBLfQN(tx5{4gnlp<|%;o}+IG7} zp80Lh^$pj14CvO-9?X6_!3gtQL8^Vl&5K+3Wo<|A@Hj3$c*>L!3Q)UYc0I18A$+FS zMq%qgq}Rz;=93Zp+IZgAfRn``tC&RiWK^M=~gKgk-itvpz zXpeJSB%otZO`zEk&xSOrk1fXM+D0!Z4vis;0L@TFykF|W>lpWq^M$ucS#treVUWrI z9-F|}Cq57SfWCPW9OJ!Vtg98T{pvrAt=sfaZfiiVofhqHU;*QtXF?V{ePjXN$SrXy zQKJfc>jir{ogRJzaKD>PW6JpZ|Mq`~*$@BU(fsKj2RJrrbLet*ff7eIsD}UnMhiT& z=4Jw4GMDCXA5t+7fT)GC_M&IyqGVJAr@(6e?87fB@VV6-{=WNs)Sd&#AVaq!^ zK~{4!$>nOt8mR-guX=#-4d6HRrKVb)olYP$Spc}Ki4t_a^upbc1q_+Hh1Y8}n1;$N z>=TfyL)u)V17g(B`54mIe%NEh`R2)-u$m!s^;(|cA-LpqbqW{xAG*sUT5Z{@UakO= znIk{d4d(HSXaGFJKfPeT`3)N+z%g}U6?)`8aU-^w59h(s$>MIi2pZ;gEiEXQ7IsJN zqUZdYp%<^N+|H=?tWMkDi1Itnk;c|5sYgv)8mV?K|G+Jo@bt_mF+T-J0s!Y=Cqw}w zf5JQ7lX3cN_(i>*gV!Q5XnTM-&hdDh>#HntyXaUK#KS!UMZom(%O6V{GrBsB%I|#p zi?6G=H71P@iR|Ay(1u2SG`^0DbCemA7S$CC=$_1OZgp~TO!n&~8zQlXu1uCWI0owN zG_w9~FpA!MfbmIxg{o*2X-ifvVL_L^g2lHC^H$qWp z++}#b41XyA9Tb?6jtnvJE&U8viaIUY@dPXug!cp%hOaSVj6+&TLyfd!H|O3%DR%)B zf`d5b=;wWC0jhM}yz*yS1uSOgm?dliM= zip`N6i;a71!>R{RRHK4W6#&Ms=5)P{6-@z$t($E;fT8UbgM^Q zZw>Hp?UvPy6$;*MTd0G4Uqh06m@zmusA(|1$)jhn8cm{eu#X-tc1-k-En>rSKsc}A zHB(E?He+G__7Simy9V9Lu4CuJkT!EPDo|2L@#r?EPokc(pi;$i;=U>qBT#CP72f{M ze4b%=xAye_f^(0Q1WXJ-*v%nE8M-V+c+5fs=FvRZ%TV=diV}k7GbEa!xa;VC?B%H8 zMcf+F20%0i{mg;4RBi~oXRTLR*XCzFADFWAG7kUXUo)S29R1F3NBisF#PT)Mn;mkm z)C|x|egw@$m1k7iyw7}h(92WQOaplGTGpii8nR6=%p!PDS>BwDGWRT?*)8DHr_U0A zu+WT|bpRN;*Il4d{le$$DiXD$=c0m#U0F?&UpzUk;?^_xz9FEwf_I=_xsTC@G8^j= z0F^8+>ww`mf2^EhsJ4KtHGC?#>fF?#dvy(1V7yi3g>{2}1-bI(&Mdx@!r+FdUaX#4&M*=_oHis4;_5T znj8HSq`O~QL#{s3L)XyMSz`g?4g6G`1)(=v0QO9SX&@L@5p@)dnQlbC>UF|nYxMIB z`Un_JXF4uV!n9XL>IlBA6+#68(pKn)1^3^@9>DDI0ONwLw@O|e>ef8kt_g)#EQ5iAL%%^^W}B&>4Vkrm@{t zZT|@YYeuC_|3a61E^{s-5)hj?8cr8TWM@Qlx7&*|OwT{{g@ulSlL}y<#{G4i?J4h= zyq!AE2)dvlTbi>bIzuD)L7-?9!yX%Xt_EX21N3)#jd*bPC~16d8)Tb>zM7V{jnnncEEVIXXq>Q)nr zBs@IE6b*mI)y(@TfS`89SAYm30!7$EkmuGL^b21wigg3LW)l}HwPg(RVc8Li1vRv` z*Ri1wgu>9RQH)Cj-0+qL)%Wd=2b;nS^Ef|832s1aGAn}F>7Y1DwOG!lAa9Hr&o*iT zsaR~8%Y}zN7P!Go9(JNG79+VuX3jvp){wCg3l%_7kuF-tz?2%yqgY3QOS@$=i~;Mk zjfuxdVx$;A_BJ>4n|We{M{mvN8oIx{I8Rz+jR5!b0E^iKPXjvc^duE{gv4!S8A)I) zDhi8Y((^YA3Bd3)H5s!AF>OBd-a(z*J-69GQ`hKvv5DRqbuK7_$#*P(V-rjkMJdov zU{-0x^$f(1f}dZG`5?4V>#*2m?(I>a9U%bmYHKTj9pNejQVA!=z;@s*H zp^O_Hl_mcHDm*TDHNlfpV&PLgP`}*I>}y#`4$x^}Xxm44zpR~M*&B?QN1{qcjrH~T zDx0gQNGdS!7J6Jhd=?F4Mx$w?9=F?}CE!bxEeg{EW%_v3Dh=;zq$CeLfo?ZYOrG4p zGucC5jfYIxKfW=$ikoLovPEX|a4)u?`P%2c6wQ0j$L8QplQ`q`eg9|C2J{9%M_PH1iv^Nfcq&sJdY~A+KyLL1 zU4!!Raa51@b7z8~Tdpj&$N@fWkTv+=ar9>J%kBZR02pAHD))H4vLJxn&3F#_hHGnw z{pi2)d>ZB3b3ob=qudu@p?%N~J3QI##Z|=F<5-`b zz5Fu3f%oxt&8@3j)HQ7cwvK$)45gzFjb7aNjDodN0dD~T+YR$UXBC~-R07Oer6*`b zel>yu{A^mavOWg>^?`=A(E{W#KBJ3XZ=VqLkB5|4SO@oTo$nQ1wRA6ftSeV| zwHkKxsM~`xJ;3XFmg-h>-vSDCi$}HUdL0T8WHHxi-;uO9N- zu)a>nd~t=uw6~24nZ`T3~bhfvJH&otM zm8&bgr}aUC1>Rx&5E{d&$G?~(OU!XLa|;)o+a?9 zFh5U()ZS<_lB!(8Ix;`Og*r+Z*5Ib2%InhALP4WcnRZ35SZJ7r-#IbXOw+*cf@d+I zlQQycdP3(l4-Z2o(t(9mA4P}qP#WZsWrL5b%okAKWYf7Vyng5SE)-wK7~^hHYUN*i z?<=pHTQHn#7$C!q7>jd{d>z@y#sVRYvEg-)C4yaSvEarqk0BnbDV?aNsCf)x_BrSK zqwN41cs*1PTu85u!Ao2V_f-BOGSpfuxX;{h7z=EWC7Bl{Ki+S~y~9rIc{Vx=m<+=E z8wfW=A$t&uC?|G}Siz(WA3{`P%4?VPxlj;aBq!5&Z4<(d zE=GBs0Kmm*H%qa%B}vLQkmT>y>X2IF1a^ERHIJcC)UqleNo>L4%?v2khk zUY9N^tjr;^*edr7%d@~sK#L(55w-g`&FdKKht5y6)GUY)BsMJu=M2jEvm3sjM3=}sQ^BIQc@Z&E-N5;l;Dov4i z#Pa+qL-w8fN6{E!wB#9Rv*URTJ{tffkHPn4<^dxXYbuE1b8hL{AW&IwqpD$!3>V`G z;NtftYpvjF_#I=zf;`sUwyw8nAfcl`kmt=OqnM6I(Z7EeBj1SAcOJ&_Vgfy9F}Qb( z*A6e*6;VOvX_t1DOK(~lK?m0bn(h6{uSDtKFfRV|KSc?zvV8I|M*rd8$Nc9%j_W`9 z!*<#O$Ex-G5 zX!)7flJ;DY_pBY@iQlkR?|i`T09nSpxfr8DnLfN#5r{GW1)#SCUv8P7RqRGVc8tp~ zdRF(d{@JIWwXn!>4M?<`q1({p@2td!cPj_O*y8Dyl<@>)Muad%n|5Tg9McHMXBqGb za^NSjspr3%LPO*XV_{3;GHbCKq0|?*37T&3>K0s$ae{T^K|^e-SiNq6f%;P8mO4PWKtJbpTHbd(_2xT_-*f2K zALc}>8KL8MQkUN29wwumqmL2N`ada1;3%F5lb=-x4 zE)c+Kh_WaY9cougk%y^J`#Y#>1s*zjjst~&p~Apx+7$dx7W34p z>S!am6L>)5Zsb#TQ=daso{1-pKHlK88J3rh3^m845iZV;@ONt%>Wm*gWezKJfpfPh zkpMe;+8Zfn%tn>-z&Gw=9g~IP396WbdSBkKlWCsdr;IWd`9Ezk#$K7Q*39d)HC62F z3>`I@Otf8i1Zk|Dgq!deaelgp(MUsUI0THtAni52f6$ALm98K%#+nAfu!+}n4evsX zPN$CnC>TO!$YL=TU4&`1xlIUi-@S{LpDmQR3QYs=LK;5Su-R?w_@Ewlk6O_i@>~U6 zYBHfM#GS+7)9Et9))Bzsors9{b{I%rbBl^!hM8@?Vo{*^sV#z?xhcFh38<&6f|5}U zmy2dEFc9YOkwb*zCffB)+!+*==oWWjrZYEeRjCCp=BgAby?*a?b~-}AuVb&@MJV~k zka(84jDNGRzQGWAlg0Hjdg~ToD8&4}G%8^?J3tmFY=r?AHA|?%uv+{9ePKj%_J;mb zDWk|#`W_%NM39GgR7MTlR^sVV=G=2T7vE3;V<$pwW?_aQ^sG+hSys@?Om5-Hyp{@{ z>pksH;d1{sS|w^N=U|$^)BwPV^>%&&DZzjZYy=g#7Z?vl2G{F1g5lavP|Su2kKu`q z!p6Foce8{RD$&0@e2e0pW5BbwI`kDRSpjZH z+>_>1cpg%;|B08QiD#d5wBYS>7LzBZsqBqv*v8Vxjf#7tN3ocpt(hO+AEg0`_Np42 zM%1BoR`QNO&M4&@uis!45y*^E-56?zPX@i{-aACG&w2JbhWvj2GoOmVOE1M5{<&{8 zdAk~oI|orl=IX_c3a>aZy6dx^9BmI zhrxgG_y1LNzx`Wr`0d{YK=fnp!Hdy5yqEdlXW#$pxE^5yP2Vslm5sgO8FU&;tJjq^ zbKx4$3ViB!Aa^A@=mF-IZasvD?d;|L^u|lCMEm2P!YHm{`IEPkhFj*fKo-+D%%)Lh z?l_rogzqtp$V36b? z3twAb{9NoXiWSDN0Tiz>vK!Mg7{vMzU}67#J?=6Fn`hVElMls=FoE@>g@e&pmNI|u znf7WfBT5=pX>2cYnO0}WNazxMpTQ}w7UWhd*cI;Sny6De3DnTH-6nxQ&}bPobXD#Kz-h?C zRroudL!CqbZR((P8As54`iAS? zSN+v&3w&^%F!nk|>Y1h6Y?->=ynPCm7JI^fMe`Yfgef8;)!RHM?b`6L@((c6xI!8J z)Si6!yHrKz*z&zn0c@h<)B!#r|MQ;6!bhoVU3h7~=5LL@e|xVTGH>yom*^0UYK%(fX#R@SUFaGsTa`Fme~T_mFtwt33w=_;O{;X&|w5}KP>72Y4T zKtC|>4uryF4dtpRJt0dJVTE=buwdxLK`0;&*nyi-x{Wvpcw4R z10R2=*imqx(FgZ?asQ~D2GYhH&a_s;qk=%TiOds~Gz>{n*akzh-c(}3xF&k5C{)89 z_Lt9NQ~7*)5`g$|`*f=HbxxhxO>#u!4JCSZKvq8z=~? z%Ui83^8<-NtIMk^JhNHStdAFMa|BV96`r|2^+SZmFVNB&hGlxdMi4ZR0vC^Kve7_jey*v(e1we594W+RNxL51ky5<(r440`9R94)30|=5{m7YaW@Hq6^VBj~b zwHx-D7y^jMJ@90V6mVbja-*jw7@Jl$+c5uVLisR4`ps_?&&1bYrtTz}+1Bck!~Gn466OuzEQ82s1&UG%^G9q90XgdRt6 z^Atewo!^L61>^hhS=2i4C}YEV0V;SNK-zx!K3m>h0`RSAP_gQjTMJWeBI7TAGO7SW z7CE-hos(Z2bH<}iZ_#e7Dl^9oM8ZGfO_k5`h92xJ=F!L02jY>5X=p0 zaEc;iG&THh3UA5$TIinvIz(BuF_=L4%qR~uQC?K?LwHs_W=m(~M{nDrD|MP0qGWo~ z)Ct!Ms+BFl^a7bz|7wKw;4LWAIj}S7;a*WdX+GNZ)}dp;Qa=pPZH`kNjQWX&UMM=- zT7d=Q_Ar<{t*!|m;W+~Gk0O0WY|^kpL%V)jIOx}Qhl7VKxdp7%!xQD6)r$Qulr_`1 ztbN)C4IX3XIqK&w9=uMZsX?8d+@^~j_wXI{s0P*af@^5~mkx+yg2lPlO**yi9TybM zXJc#=h7lcUR6zNXt~!w($7?jz7ToH;qKhAfTivU^Vq6{srhYXd%P%SCuBEj&(oPTG zU-a00p1PzCPi9j!{&swY18tGyin#*{r4zb3dzN6cH0}Z$^{I`Hl3&vn8$qd|_i?aS z8yrJyooHyzgqEPvle1--K6GFpJ47G;m9om7f?G#$^6j!)EB+ z5Wd~;CU&L*v52w+g-$jPK!K>)N{@x`G_zXuHl_^JP?KSEbD%~OcvQUnd^=va+XXEm zR1LR7fh5j{3H3X|D%A2sDBg0`ZzeNd#iWz1C0hw3^7uBw==ZSC< zrKwC{Ib`csr!2xJ?6>$3>$k0kBeb^l!$h$)8Ba7=%I zksU(2{y|jgR*S+&yzXHK=A#NAQS4`Q3z*104_QFaZ46W7ZVKR`aTi4^WQOhChA7BN zR0>TEGYA+OjZ)k>(71!H+YJ`ou$ZBtB6J4paUIulEsh6&?=ruBaS1AAU9_!+!I^zN zA6ZIf5-4a0vg01!hxxKTMl0(tSU;;iJxB;Wz*vh;e*!=|-x+rF#+laO+0!}no33I6 z0CfC!07gAPW7E_NYi}5AgFw11lH0HrtZ@~9hM}<$k4Cx-pQIu~zTf8T|waqKf#qMyJJicZgdhd1%v=@99!1Y=gxmi5Di24h!zz5L! zN572HQ+FBguq!|qbgUd7MC&`>MdqQSgp-EPH1vnLKixxYEIKk$X1Lxhg0}#MTX;tU zDPQ8<i{7rDzmF&A))hj(_+661|sxjrWh?(Csmty_Sv)Dd5iB0bys$c(Z#Nmsn!|b=fumE&`m_a}Fh+xkG z$_9F{j?Q2yqRwkOJa;{K0k{QI=6Ego65OUifX=p=)|jh%(m3(ijApXV);SqA=Cbq- z;d=fxvxM(n`u5sZ(J%z73YW{GkG45yD?jaI=ldQLJHmsrWg;};J}7pG zX?R2=m#9P!#w{hO#<$7n=M#`C*XKS)uJ55BpS#z}(0l_$V?Sv5Q^rn`p&<3JuRyR8 z%PQX43`U(tOmB8^IT0+}BsggU4jgCE)=&o45Jq~xF1BG6_l|l*tlR8Fcz$(S7%7O~ zZ`a~zua!ncbZY4E63>aZpi4i;r=rpL_4oGT^z0;_J^6sYt-$X9PC5vDAMhk78nZCE z3Wed4X>E$5(U#sCCXLed#(a}uXNA%{56r-O&RvG^8~|%1>})3>k&zO`vzo{Iy{zn) zK|O8i67MQ==Uijx%#f9uN9qf(7V{~_b_PS3Mlo-XH^=kz5adsV!Tg?8z%rB|PorfR zP3E%bj^zNS3u51M#w(x?j*RV5Sv&&}{Fu;gg zJ$+A)rv2c#sCQjMZh=76`Fk^)W4PyGQyD#;Er$PQGS5_i3IhpOr@@!4^pyxMH4L0r^0;=FLtlh>fc5dstdk4b@!bV;^r=V29NI_!@ z?JR)YFkaWDdjDSRKJ`j$pkIZtlmO|Ozh~{`+3C9vp=~ef_wL5#@oB7|oh2`@NbpiM zt^r!X>kW9K@!&8k>^JC&^!i;#=_nt{DEu^^YcZRPUy70r=P7|Ffu{K8=pi1eK0a0mh;XHtra1uf*(h3Jvz6`Po-^ zEOIlN$Mm)wgFpD^$mR;*avI&|UX%sm`uz`LHy*{!)5pjLidh4~uq;H~mT))#zzdAx z2Yjv$U^x)DAh)KiF0PDNs{D&eY*U&x-s%dpn6nOw9;u$&;SuA(i#M0;E)@0b8Ea|r zR=-n^KKE9qWzTh#UE5Vh#FS zkDgGbR7LJK(p&X}P;=v1&CZ&b7EpJ3@YTh|dCUP|Mj_Ng9_qwp%m+ONUwDkE!sGRN zsOH2igwz4lb65YFj_}KO zo0^<;fzI4A=jsjub$9LzfacO_=%Jv^2}uA^q(Wmsn5fH&-weSpS3~&bi$%NwF!NM0zyPCgci+^5IUxjv@n6_w zjfJoP&5*msz2VLP82Sp&{o(uXu&5wlXv6JM)LrIhzRY8Hi@bk&<0t@;uG>OX=8nzc zKga5B1eqh17tC4ZdP7@=a0DqLZ<#@y=yEse$@Y1 z4n$=#HVhIUo?uu-w|d~Ba-$gpq{b0Bk&-I$I2Qq7D$W<`VBSTd(IO}-J{rh`(xr0Fp zr=VwtM_FS1tY)|Oa2e&Oq4zb&h4LQe-uUibKW68nu&ry`f-Y`rc-$z4+vBQs-h1&r z_hUSIU@Tbu-u<{8dxDt)>bw!Syg!D^1?8uPN$+8W(KjMsJ~j8=GUgq0&U`(e$?!a$ z6rW13!p>SAYNH%A_6K=h`{?Xh^zI(T{!1?;pJ>n~=NGZI*bC#>8ZsBgR)sFp$B(0Q zGtW6LE=Q6uqdJ?>1w4#F=Bsw+xCm!oUSRN0ykGpaj0n{6P@6A(tU&%dJTZ*O)jRLR z$zT6%^q+q|Ti}{*sbD0t7!mnV5!rl(Kk&RYwuZ{_;@@~Xd9aR`dt)^`yjw@^Lc;;$ zI9F>2ZUEYbU&}ZMRe&%I3ca8Ybf_L4MRg4yx*lBX`TZ8HZSf{8VpOgXDtMQ55Xi#k z1z$2Il^JuhQeR-lK$G^#v#eVKKr{c#{}k&V{AF~O0Q0@WSpE3#qs2X207_G6TN~iW zZ?at(@3YA-b$F<@jt0h}I%M8%_v%<7UXRnK(`bL=Ydi{LfNb?X`C_u`?9I1h{fl2j zm36M*!REHjWMkGc=dq4CQ@KQccnoo?*U0A@wR7*fbJEDTJu91(^jf*4RlQ*QaPQS; zZI137Fh{q=>76g*d~}s-VExOWH%Ohd6ku|VDO3${MkVEWXk*(_i_1Ju!t_|CG`P7A z%?f%Z`99RasZ06*hXLb2aFsd1f}X$E>mdrjjM5E|D;*u{o77n+C#M;?v$@OO-muU! z7z}lRK*lX`>KeCO>Zum@#~FbyjDlF7rGsHO9SP!|MS4Kv?{>RxZ!hWRwa!=ByLa!1 zxixsL@aCPTbu@l?Ou00%rpnVe1>`ulI@)ToNy zw6>;Fai@UMR_5fajXE;NETB`rvli)qVH@n=sjc%!TzRKB$HeX8zOJ1Vh&1xKsuxx- z^{Bu)b+)#QXLq}W4qwb~quuML9(EqW0k1Jt&L{Jp7Odym!Yig?DqsKdE3c32)x5)y zp^Wuj4+a?cVK2Q~KWzVBL&Jt?GO=$+9zt3fFXEVtml%{0N(>~AG1aD)n2ZOTd{mX_ zK;$?XdAY<$Kr56w5@Bqatva#?b8Aq?osap}b42vMz{?!D{S3vM42E#-A!zr8ZN^>! zjpC`n{3AoL2*VJ=q-R+FF!Zl9+v77xQRMGg(-OGKs+m~#`68!Gd6pQ%79WB>r}+38wXXUL}n$ia|KXZdVYDE9{R zK!24+!GzqG`54|aq*UkgqF#-J!o@?1>rgIPxyvtBHk%gd4~!0g-Wj8zf9ov9coQB9 zVKm3jYpC?xeU4iO&9&(%*yea_?v(lL_vqtz-|(H4$}l2^6(b4%4p5*iJrdJ2DE=_`Z`}J=?mrnFqHzO#vCtXi(qJhEOPA_8l@VzvodZtF8 zyq7yVZgF6)S?|hz6WxVjhu6+8rtqNhY*?4;F-{)HwFd~$J4OPMH#~y4hs?Q!SfN+f zdd|lcpcu)=kx}l@bBs~BG0kR!n4~W}WdW$VL%H;*X^phtsnAc5%Csor1f1q~=!cA` z`}X?)Bfyy{S2!ZY$wpg(z`Z^6405Cwd9Z&7sK*8{kE_e`I6ptnYjpnF-2t*+07X*= z>Slqzd3%fVQvO&&cT89i<~V&_-8h*{`K%EHK@xM~J@tqc`;OK~ppIO((~sk$!w+?q zIut#hDP#H9h%!K!vAM%4x59Bp&q%lFj*gEsa2uhYGh{zUKUo8lBE@rzO#8Y9K3k?^ zW0FUB-YuVVH;?kUW(_5hI?6d}WDTpU6XgYKob>bc$~E3@OZOQb%3})C^bJImo%x*3 zA^HHFCG7^R|31!$+s0Sd=v4SFQzG2g`w1qjH9#eyMhal#t}^rYEO}IZ?Tepy-G?># zU+-rl(tUmT!a;r&~#R9RYDgeD=7gZ12oRNdHutF5(B5q;m^o*H=M%D_%^(~YH zhTDAKJdH&RqCz-ID-wMa*vrq~#dCLmydJS$e=CV1K@Fulh8bSL+{M*U0}{X%kNGf6hmz+m3bD+lP-kI^AAp{Q zfh?fR8U^NW;biUra6L0Lr69O0dd#KFDVIUGw$|-|9v%ODHp39!#tdMm!Z4rAaJju6 z2^xdvD9GM}DP%ZO(4k-qyWKd1X$L&JKLhvx1{f{hqAunI9WulXk9FF5?nYQZs|-p{ zuOe8%i^(D}uJa_cM6J*t+ltY1eW4xq@=NcoIOcd8(-YQTC4-mL#%M9I438Q1EaR+s+*ReLBHbR`iLqnGxV2yVc&xdwf^p2;w#IzWXPPPi6+|5&bbYxG z3c}v;0@L)=FD_8t%n#$H;<k%(Gb|mqGwj2Y|{e^0rFNLs{%(<~nKcwZZmE zkYCmawM~lZgJH~{K8^W1Z$=Hz`Ro_(#N@|6i{2}rh~t0o4`cr`pUiD_p5#^oMCtY2 zoS~eNZBHcg-k@C!Am9udp8oXb(LC74GjGS{_yF&H5Szy*QA26ki7-Kyo*l}z^z1Au zS)%~m@s`ca-%L>8OsIbBP6HJ5qmz^cyT#qYOVwZbj4|N20f3Yr+>g~`PpHGo?)DO7 zW%D5E%+I6^^h_^J8G+U%_}6t_qX)_uB9Ef=to@x1NNE#|t1;u@bC&I_;2U_p+~hOR z7KV#JXKin}Usl6ITftMc8>P-L);J{V!+X*D+SgN8RRN~sN0*ErrT^}`Q3k-&&=XIQY+Zk00?rcu;L8bJ`yc)a!L=~)iaT(HL7)~h4hp>>WPA8f+rm!=cIx5O!+Lt=H7^{8Jz?| zSucPNu#UyzI#sLRyn&uTCg6K_k|C7HyOB=#QfA;w1Frqh!P7X)FTR&OP+TYYmb=A5 zfT!LCY<5h$0$66qL^@8|8Fdlkcb?iH3t=txc6Y@Xed~P%6I_QfMG(vb_Xf4xCuu?3 zYJtKs&L+mZx%(aGPfdAa)@l)%&+iTWt@hTvAA7f$@dCowX2XJ_VluNreSjh zCJha1=QbL{S)yl+oZ&2q-+`G8W6hU%kh~^Z>)2M%tzRXDMaI*(Pg8-qO~p=p0w2TG z3eOG5b2h(BCN0ia2JyjQ1=B!u?r)(n?4VlQEwBI98sc{+Z9k?(^S>tBcFH`QU@h!*1cb#-PWXQ9Kw}r{h629yQxLK88Gi zp_6Bfd5N&ghdiHop1%7|o+0l(^r0u?0X~-SxQgzheBp%8sa-5wGc{%;jo~q#S=_o; z8ya0R#}#W>!N~TWdoJo9dnJbd^mn3<=YI3!@5k*=|27U^d=Pc`Z~Tj2#u#I6$G)vm z0|2wlT)Hp56s5sFSf)s`c;JmW(p=h8Jk-l^Hm|X3-u9QzqR|>85ZggdjcFC{Eq?T4 zbrWNSe%Jh>f@5yYyT@@m7wp0>hr9(=`R))r1Mi`?JV>b0+T-_L+>FtAI20aaRO-MVI3YEFv|kqfRt1-{OXQh#IK9O1 zp_mhNFzrI|q&Mk_VZg9V^|)2lf?^FHqlGqls9QWyiw}wC%Ibq*D(I?wpr~t=VX& zPD*3Q*jE}r^mrBs!gHn$tvQmAI$fy)_-9?ijYbijEC|*B>sZ)KOe1`QY&$m7j7Cc> zG%V7Z+)8Dna$yeci`TGjx5$yTS%PNnF`1sDSLW#`YM_fQrUi}F$*x_MdpLc^Xi7jp zV{MK`T>a5h$BlHVU3chC_QD7Y)8e zdOB(yfMSbzXS-tLMu*usspmDA#aLJy^pJ){djtdnDey;!2j`J{5W_7NhAXV*&i=R( z86|3NxeO!Z1bLvvA}*8)P35jvYvF|fbrGc@MiEXH>2VLM^NX;&NF2n`uExFNS{xj@ zjfh3$zR9o(HH_lK4c|-v5q2XIof(#PPqq8Sm#mqKn*~Eg4SaSkMiKUqIrNr1BiEuo zBMMD~*aif$X&%@=Gr151f+`hVWh%te1@>{fEYZL z{KDH6jI_GY3Gii_W`7@rX2gPL&rwj*NpTi&w#9|Mj>$PYPldvU7aCR%3cAF=L!$Kl zJy+U&wM`AUdx=pV*)U0uhev1Vk2Mly>rEQ|vPeyX&cjjc0s;*-KuK>H<)pX1(HJN+ zdb9|DMxy9=he1I=4Zmw_x2$n{*iEBtN`PS{pVTY}Vy@=eHyHEmI_PVjG{!Xzez$OP zar!jIZ~Y{WzVgM``}Ai481ty^F;+ZV*QkxRoDm$qv^wy^hH=?iwlU~N_x^Kn_{uBM z#pu`ER@w0wTKHXlRbH4|4W3Z2Hz*);{A;5!7*&;*g>`$!cQbFg5L7<@e9ob@Z&-gn z8pC1o+xqdt*j`>mQ^47H>E*iJEwMo0GW>H*{%?i2HIT)q8n)IRYVfE)#l;VXaf3o++j@#1r_g*SG3 z^Q_PE$!XjIR6HuyJo?UXI-oE+A)-GljMc!h@T>#CpoT#JAT;kD@az>lhpyWKLw<290BA8kEt>pMO;~c2!Fz)t4G#oFkhbQ8rA?5vnLNzsjK6RXkIUG zQ?@m1noIfCLN9^3p?AGtu1?w4`RR0vTx}YBUe6dFzo?h8sTlI91F!&)sVg+DJ&Snl zqz9eqOIOGqLNz~H}Ez((8#X4 zzU%d93xvUJU}6|Rqdtd8bN{ky!k`V8 zd+>ptXc01TN>=Rg3yff%wg_qK{WAd+w+c^I6@`gPG|$ZKF(~FGEHto9qYB4@3sKYp!X!vx zoWAB^DwrQ$HxCN~%Tj!%2iUNJ)v&TmcH@FFiLQlUC}&%QrZItWJsidkZ04{e*O9fT2Jb23zr*6iyaNaM0^}aEr{Vw{E2*MvJkD2o(q!accwxr0M-d>l%NH zS46rS=GKMQeGs|rRYzCub64mwhyWA4xdk*5-Mx1z18A1Wl=ZWia?ApKbb4Q>JBYi7 zz1ZvN-mpp-_YQ9f@_9H428M+Rmi_2%iQWq9~b>AeCntzAa_= z4q=2x>wqV>^64e&flu+|G}c>azIXpl3Zzkq@h{&?@4s>PD5`+B8`jf^fF0dDS0L#L z-`6qVR@xh-so{~7-+3#RKly&FCwTKHtNO<7vA%i|m;dn( zWAxJ>#^T~4s#c(*56$s}xm#q6MTqKA zFB&i1i7nTzaTrQ^-D}PDAZ|xSiO@NQ?E&n~AYWdwya#_V zhU>RJi0x0`jPm6L{KxvEM{0)oktMkkhVcJ9gK_e$_o;`NXRAv?-b#mv>zBwaFpt-Y z$Q|5+Ap?-0Q+2+Kvf1#;FOL`<0)joT%I8`W;=wxdm-3_m&k#2}r=xN?x(XY#WMi?K z{JD#zpnzdoJf?xsIXK$)kgQ%5$D0a(t*1fn;K-R$$nzTDQ-3%8WD&5>6a@VLi24s8 zS+gv^4?9(rRh6#NlzsknzxF-dJw1MQX18Z{cV>6iVF9cIb}0Y?5C}#Pf*4W+1&LIs zLxB*35EWt|AccYiNEnhJOmKIB^<1nOzcW2E?fvV1{*}#2pOt0x`<<^_s@K(3mHFlO z-Fxma|6}gC7Oq(g=IL@8dp%dZXpLMJS7X#{hpn-9tF>d(^R@m(&t9)z{h{8^(7ZN* z&zhnz-jOLr#;HS4MfaIvMXSL++#j1|tk zYQNOIS(IiBzA#+HPd?}RWFQ4^+3u+74>NS?+M?|_eD8fR_}MuR+G-0j2mu9M3vH7i zOIw)Z#W35*P1@P;$LO=^V2ihPaD1ja*m7qVc3>bKxvz#^dTI}()xwkLZXN`MPxCHCx$ZByETNE z0DuKY%y_O7i|@iOA{+*^1H z7BD0-WD%!W*`=F2VDrvnB!7Gh8>nfF^)m_JsFa&(l-StOnJ~cl(B4 zdi0|7gK@D}of(0u$71DB9LyYp_Szcq5We%h6R!37Q&Gd4OfO^;gV+L^atjeDhTN<_N zd72Y+mbn}k-0jnNABNPp0|YEy)Bvw9H{c-w$xIBoqnL-k%E~KDD!=6EVSHEMwM7)T zYi4NG2vmlMxyJ5wwkPXuwzU@$bj+-hJ*5(&jTh$F-S-pYyzS3}SX?(~Fyq;TvRwIjTUJBDXJ;zeSrCr3uPX z>A`Q-S~v$onzJY<_aF-tDi<$CnR)fzzRno?8NKS=+lmQbSU^5GMhQ1pW93tyh|ACY zX!7dafAwzxC}*+pnNP&g+c)Ckm%kDZ{-s}y`s!L5S(Y#Q+g(YYV?LIj;n(4f+nF|S zZ1bM0fY%KAgs&hMpF=6|9tPH}gcy4F37-JWJpR}mx8d~4>MBG*i9dJ=lEcr)#o~i_ z&bGC^bw3u)uO{!#F_yB=|NhHSJ$F7zg8A3pjQRF*oQ;N2+E~rJI@}OvrUhzCi~&9Z z44pyi(?cVQ>Lu2n_h$yfn0xKj$gC2xp&ybDfIzMf41D%vR4HEBb6MUBM zm1fZgfFcVJ?a6NboJT~XxrUwuT%%Krs_HzsJyavBt*|C%9&pj`p|`oGy27Q>;HmGG zLG^j2c3Hsqh-@gJJm#7cQ~JOBp2}4SLYBD7v$LO5l{+ z^Wbv}FpW%@5)rs@`{X6BO>m@f&+s1uHP4-gkw63or=eW3qYwj5R<(FW6~^_Va1n_; zh;4G?1VtBmAR0I?^J$_(jjj1?O`_2Pjr%f2B#Gj4_`C-O6nP3Yzn$(}wPfy8f-t)6a4nSZ&=ZxE<1jB%GrSJ5^FV*T?Re*W8kKnf zhoK!jK)`|wnO-i;EYjBCX~-;&GHjyJ&f^4GB)dvM0_Jg+Ol|R*Olde;@4#bnp>r-m z9fhJX6h%7^L;Eg*2GNCAfkTO;neXMkU&dr{3;#hEkJ7bT-t8-_B6D+r1wVSA9`5&9lpGtYKxGMQe_8SBNQnLPk2 zPzXXNL-&qKWy_p02hZz=dfCbB(2(b7WCTLe)}|5QY>0)q#qi(;M#Gbp_-AhFgmp7F zoGpd%>irsRZKF}4H_Bhbv4}TY#fv#*o*s4Tfg{J;n{lwa6ZhY_5%=)m8jpP>+E=ee z2|&5P+A9;er3_kUPio}FLvLD_uI5pwb|cI50k503Dvrd2Zp>?ffi^^b0$*5NpNDM# z6v}!j@TT(`b~_;Pa?HX*M`J)OG_Am6huvYy;L!xbf;VT5d~tm}njWTtH&DL(0AL-U zd-F~l+`19n7hjF~wMS$1M}I1F+})R6jn2!jX4u=R^}QEgNt!OKVe}c>5eBmH@HK3> z1*Oor-HHWwk?0{SSkM_E;q%M~KA&Se^Y+ty>rPDW?Z@G}x6vo?HtcbVJUW-^V-IFK zS-sX%4CMkIw(lKNCzss|~H@L~8j zL7;M>dj-FoFa=jI7C@Pv z(8)9W4omWUzz^Uo&%}q;<4K?A`|KihrVY`koBGTGH?G&_`E2=3#)n z0SM|F05jK2J7X8rOe66%L2qW~!b8TcE#o zdJs?%i8zTCPSyI#M$G-(mp}8oC%#Q4pimTrgm9c1#U+%WC`Y1e)Q*lo%1l7wS&gpO zEvj=N&6(NZEAtS1UPh3iG(zTrnQk;tVCyU8XfJ~-Fjy{{)#XK=TJL@aBH|1T4ztsf z#Sd@4o$dXFd+R$1I=;fKW~ zvXf}hzUp~`8FS7qN*HVncd8BrUMA#!637wMH{@Q_RO|a&v2X_%dD< z^eAe#uxHT26NN6LKB$;`D@1~dM9a`h<}Jp=HF)_4t{3!galeb_*J1@F?vcZrclKh9 z&#jntV7Rj=;yH7G$Qz;)4V0SUC8m+Wz{(bi8bO^w;B6#xY_k{ES`eZ_R;-L1S^Yf2 zdMyGV6)3$APoyhh2n?}jp(uO-V`RA5T>Zkb#tPmLz@qq0P{NtkVC;qyhdbM`bN5~x zBS_Q1L6m;o13+MQZ9Qh%D{;JQs8))nn#>d9vbtA5g^?WX zjq_~wBNS!{@76u*XaVL{RK^M{9lKElC$ATwjJc)kV9%pjkrx1U3D0_j$8N!rK%}?Z ziS{Fpg(tt6uRp?=_HW;g+RA#i$yL^B7q8$+MojUvU;CH8l^%Kd@`adOe=qKT{|B-B z(T~K+!w<$>y%D23w*gy#y^EIs6?n1zZiM{-a0X^HmWp0bxG0a~~58hU;VYbg}VXzH#xV+&E;LwcbTrk>(=08K{Asxa_Id8 zqY6kyc$tGnaCXMxJ9keB8jTjXEnEGJEW_tx^nXS}_)J#XpQ?A5jI_fUFh^dt#s+>i zRjYy9-DAwq(WnqZWq5*?K$i`9);x}mfTKu#s>_T@E4LqRLpGY7=f7hY_dENs+nHu$ zH23VIvy3*_;L3W6qN$Bv4ydq0Mh=XcTJuooM}dy1HpU8{NGqF5`B0<6A`A7qbj_YA z2@JJo+D_&UeT&G7+XyF_-ZLGL^&IGPqu7qgJ|jkX4o?o^(uE7@h-tfuoco9c=OPuH zTd9^B8ut&3k^{wC(Ob;@>?~sD3N%^}e}K|;&t>KfSX7CX9hD5hAv_AR1$E0!Pv?O# z&6hB2tJw`FiBt{Y=uvx^ezrZGSiKIBSZu>0d!uP=?VCqBNaMc9v+{re7Q5&!B$Bl% zQ;)$dYc5$EuC__>Fr{U%YUwBC<1rMM} zBf`HTO<-*RN=D-jO}s_}yHJKEC_V+#ZH4=LdyL(DFknUFD4zJYOHi5?!)*8Tmg)?U`w`FSVKy3U|*Rnk>FDuvZ>vd3rY?OI`-+e=is z3YAah@>%AXhrFm|So*1+x>bqmX6o&N#o?|GvUPhgcP-I8U z&r@cYPL5%+eLr?O!?^VK{>wP~%on2a_(x;0wVL7S?pyCfSY3@QxKG|5<--Hk1mkHh zcLAcsbsFdryxO>Mp1G8>hM{85a9$hrhG8!WqgNx0r%Gr9K+wCW0z3`?4@-K>EUK+N z54!{8s}d8d_c87Vd=D_hClFjidbqtC)9oJX$2~Fh3-ZXldvP|{k2nA8e;l{}`G3#& zS7PbopNK0z^QEXga4Bk6F30&F{{nnfkL^GCTHJo|#pu2LRy3E}F~5X@gYJhnZpAEK zeG&0Mc|%yE4QGsd28A`_xdVET=z>%Ccfa{=oVFU!cLMDvIlcq$1C2c^Km$K-G=p^; z00fNC83mX*w;l&O@W`F*m`4svT-#~^W?p=D)sx)R5gtsF^%UzZF5?|uKZEDzHjc{=D52XCwqJ^05BeU zhP!wp_)DI|sqiZs1HnU{o8Rwr@^ufY@w7UYKXsgj=mc3WhE#;jHMZ%EAxpBaRnb)p zr}A%|l3>$^4bg$Nkv4a3_eX>*4WxS4gT0h)Ww;OMm|`R{l5>aygHZw+=9=1U#_i9? zSZsU8SFLg4U8Og_)boI97Cq@EK(Xeck>uMcPXG(kbUcD=2`2k6>{c`6<3Hmeh>{nCKna} zdCXaFT}O?Nav30kai}kxn~q_oU!n6Ua}!7v4i(SMv8$7@MciGZtiyo|luoJp*;!nk z!~OZ3Tle(r9ZMed&1<$dF5~=KSZg%KL&%(yc4dq$(ZSZCG)m?As1vo9S+m6&Hkw8M z1m4Jdo8!+N5{!R#}geklY#uBU$m-860EW zEwsZpV+8Wh8}1>RQ{#)6B&+fVYLCNHdQTL>ZJp>b7^R<2|^oPXndr zdBS2!M7;q$sSL-ksNMZe?&DVY&7axu#lvsrnAZeCW-n@)3g$QxIZ9Xr&JeeVu?XdW zS`FiJkKusv$TZN>5R9QJnJc3Upz#4Y+RCrFZy6AtwhS+QW?0T)eOn&6PHP&&9o4ce4fSnmMhRB2VVA zuo)_YXy*Byr)#GWWN8O$m{A5NV)qf&+af!Wu^>am;@I`J{jv?LadIPMp2(YkKV1V zsN!Xx8i7OB`;6syx1Tzq1lSW`kMY1uct}eRKZp`ETnkUaYxD5Y0-ry+c_+eccW=KN zWgH9~;yB|uXh(oAa(ML;9yxr(TA$)TWTPY2F~5v(VVIQT8D#aaKY$;U1Pm(r#(oDd zD}Q2ukN6xiR{~IZruP|qGQIJBj5|9q*guHbkNsGz{`60z9QjoOI5e&R2w+Kz5YLd& zGDfkv=4+TD#*o{(;BP;~udFT4Ku0wgi#c(O;Ly^{Ah9w~?u+ zUdIlhy^ID*e(Q)>66kh-?$LWL;+M#k4{5tQe4=q5i02tioFA&P2a&;3x!nrzk0fIF*I+Kns^@86#A^T*YXN!By zWbYQ|p9gc{KnuDAaBfu>K-tVjo5GNBU>&Pz66_vB)Sj5$7GTV3pflb-Uu-|Hf4WRz z9-}D!Do~*zVR1=u3XZ7M_IZX)EHrB{F^k=bNtm$Y9+aUwx@jjpWYdkPx^Nyf_-u&kv$ zj`beg8>`2LT%{2gmD-uFXLA}9^9aK%XwdtY+#+gGV%laNnqY$eZ1Fe&V0n+0mADu% zJ*!QO%)tKC2-E6(6#k{aDx`&F7X->H<~U&#L59GRMalLy)=!MS>Y4WEDJa={AMd?)jcV|D~1Xm}eaq zAhKI9m6yS-+Zbhx9H3=>?Od#X>e;Lb6BO*-07P|qP?UAY2h9~+-ddbkP0Jd|UsgJwp_|GpYngx7c+Vq` zrcaKxIw?n7mS@1TFmcug2%r;k+azFOzJ=4Gr;frjWdZ@XSPv0Hiu_OBydC>5y%VQb zo`~_vgK_fo7oz=_el^Z2KA$@)VEF|8KjgkQ?!|XXY^cOx zMnIPx2i*odLWvvcN}VvP2dQ^zs1`>E&s=j2+%c?c&&%- zKsrn(rMeqY2mq1%mEvFyrSx9?W!33{|B=n{ZrbvvM5IhJIHA2@4Py&3xOr7Ea&*`WZ>|dg-FhTSTWC z;Zy?TJAq2R2VHdTG9Ac$)s>ji)VVsyj#0a!o_1}FINDo9^=vVA_Ap^$2~M40>T$-v zoIIfw&6_mQez=osesMPGBn|NfiqB~Oa18Bam*V;3KlIe|YIo6*o|J>I-?4iHvp|bg z(b)RBM#aV87EDM5Z4>4vp66>7W9OL>kiIhyMGkBR$Zc$7Em-*1XKchK!5~KkighwAd4={ISU~4bkq1l z$PA|#cZJ|qn0f?a9wu|52YZGAAEc6%fwQ>;D`ZHaHr>)!*0w|gezDuS_5zekj=23Zh2#2qhiCChI`(j2KP8(4Np;e?(T3`gw^vM zl((l8?tJ5$vG%zij+v_)vH9(<$I8>s#P~XfbQAQ>w1L!DWVOszo^f!<7d)OHG- zFg~B{dUfBs9j(uPigiAYV}TU($utG;%YSpos&znitMlyrXf4O_0Phz0m4_`_H019A zE&-@GT?OE+FBe(65Lr5B^~{ci(ShneR;VeB3#li^w%7?tsfpr2<6BleNQ`^Z*zWP#h1 zbw0tvu1903irz4_b(GcAvc6v$Y`fg$K4+xET}lm$E174ZH=rOF>*eWG*b84k(Pj?z zKv$RULkk-urC`BBR{rZiaQuzj)lzq@iF(R#0K=U*m}VF z4u}0%YBVzlVo{gTE(_AQHR5GI6`Z-uHJ5QlraTWLi$ms)93#8}0ju;Kw_dKYZ|YQ? zQso-=?PvE&UgtlqEp)4ER-ANM{3{F|%a76aSw3|mL2n+j=Nj}$VH<^dV|1|rr^v8u!SLa%%WRHW4X7hB{+FBt~hI$p!QH5OS zdVmx!r!M1aQ^-16{FgdfK%uj0U-~>ujEg;KHEk>Z2}-iC8~PbJT}D1_hLg2dII`Af zE#m|{c;#x^H>>~M7F>AsZo_1J0GPSwKJ(P`qDZ|>cp#=25eva2qKLx9u{EYSQU{u$ zasD|W!>TzM7HY)Ace@tFF(4*kWBn$~(`xD^x~Qlrt`%Lmc;3qb1e?ip5L9yx<|({$ zZ!m>9PttR+>RbbwMQ{)hcINXQuww4nXQMDNY}u-o+xiT*8Hk@8yB(XnsRFSK*r=I9 zgTg0xQrXA{q^hScSSZ@$qJU7)Mj7m!rz{#dFhrgUKeGsEupxmJr|yFpA$1VC;7O0$kf_!60v1yv z3WyVviJX6?7WmJ~QaQwJFP;Tp#6m-y3W{+!PfwXa9_Ja1Dd1dY2$aUYMyV=E17wVN0a)<(d%v$X>+9mIk_4?v+>U%-o)$=6U) zeCv@U0O6X48dHuq(@bjtc`OoN#pK)i%$)zj&ev*ZmdXgLe>%S4*H{OWlt5>t9`~6@4 zt$5%s{>AL8-?b}ab2YB~C;x|tXFndJ?|c{Fa+F)V(mP>Ji+JZAz+wTXYt+MVo42kh zmr#=;j%&Y#`g*d#v$@4JwC@%$e4jzReqkj(|$db5w#qE5PyY~yDBRES}1P^WV zeuBu^M<0tLuIu03jq)=e12EyO2-Q2cl z(SdEM`fj%TN(#|S{ckRE_gH}AgA#8eH@Ux%z4O~Xjr7{#Y^kR$$ad0 zdnuQ>#g2cMS zjS;~5!_aXX;M;@-Mn)yP4uFoH`Ej?=?d+KXI6|JQac;edA*y4DG3d-iu&?gZm})c( zH=Aa%H@kf^8ZBsn{9FHG9g6zG$Z+VqhtzLvHvcj83`cDsBg=L(-3xe+ShUs0p zIEJX|RXq2GCjb?wOd`k}8f*8GSaB&r8uo{I54%wvBM}3bunS{a32!T0w-i|@QWPj3-WJFJb$^r_$&EI*0M2IlNJooV2VLKa10}BVO zF)+L)Q>joh@1vrC88U@{u*l2;6)Smp33^UZmTt2!j0lTia19yy83j&o;d2cIxdwVy zHnf?*Z0;{jI#mP>fzs+`+69)52G_15JY|&-ykdynPH7fd>YXAFDZEBQWXvOIIh(XZ zg4916a=80=Fo-$$K##_-Ss5ej?<#*!NbB$QVgW@|HXMr=W0?H(cp4`Y&^$t+a`F6l z4M#>f@TlakQ$U_xwULOGU;dSthaT_!;UCA+PycMRe)PH6{r>AwfBK2oe(!F~?GK~> z_UmzaFp9_j-~TXbU-@k8zxGP3Jn@M*+uV-@BT0Ix7+Z^U4&i4Fpn!J_zt7ehQF-X$ zsGqwWlY94K_r2>mcdNElSXi()L@{kVbTx(b5Jgx_(yIAc<+vV&PsV`cDTKh`E0@EF?2iy zPz~UzdB!`ld9R2DG0w@}PMlr57PA^_)YlBE|W{>#&)9~O8_T# zAL)%*tE0}47vxQAf-=miydwjMSMH)oox=MWzQ*zlF}n7)H=?LhV(4FeVKi|#Jjkf0 z9=&>X04VkRef6l(OdSn(A~n#B>Y;-JXwLnIqtn91bJF8bSEWU0phM z)@9jXO}oJo0+3gF8qX*y*KRh&m;?QjUsnLaRtjnk)NsP04gGYzgaa%m?$WD`B( zbwxp+^eP(f*rUL%o`%4!P}ga-_DX=og7R&S+~PQRmz!$5i&`P+!8bqESw%gS_R3-N zKCjKOR>}@T;(ct#2X-=3Wq_bfn?Z3;X{VmD0zP+RF%KO*ok1NMoe!JP`P`LdbP03U z9*+QJQ)ErBC(z3+edxKlpZw8}J?{l7bs0wkqu~+xX_(E^`eh-E_T7Ao29W8b5gfoo z_Jg}l+3au3XH~o!aThDYB?yy6T;4wm=9q{%RY9mB zS@T&kg+^AcuuwuI&J@xxPxv0el)hXB0D07HBfCb4;#^Rp4F&kh1{Eo!qHe(pA3_7z zHBPV9T)lhGvoVjH?RaEH7)&LS%*DqQRF$YghskGrZuMiVD2mC@qD$7lTH!w2g6p&L zmD@o6WXz0aDN7l!{bZ&xmISmUkDI0PshZCr@C_4O!Udk7EO&T^yZZ|}zAfBSDm z%D7WGLqWO#lQSWc_`kh#M>d9FB z{O4omg&*YUY*s+mS{NFhF~)$8w>)%)apL)>GGjjDy*@4v ztx=lIs~2O^8)c=rIZFEtoUjh&^bZjJ_Jh~(My7H3mwqnyo{us37k}zYQGr$#p{{=B zQ!&6Oc3yZjp7`5;GaAo47CYbkUflf6-;2i8bFuW$wdmg6iR*v*?O49Jff1XD34lvq zF1-~2v)&da;W^p4>*9J;&=VO`Nda1R7K01m1KJ>ub zKRtgTL+Dcg>(X=2#rTc4V!X5#%fIlKV+u&G2#U}hv+zd$##S_+`ba8&3!fTS05MB~ zeq;wv^X&DPzrBCd2ZcZGj83(d*}7(!C7>WjUhat0ljj5EoEEpeSd!&P^sL` z#|ZuCc_j+G1RG6T3D#P2;J1P+7MK2*vmz|YzI~yo=hd17Dad1``z;a zv~POtrj@0!4xEu%cTTytJ)JVfZ-kW(rf%RPsy82p47{g{e-XgK^Pt zE68rwl9&$IZCvKnE#A<&nv0NV{0DI{bcSDe-`B0&9U(jdueFwysS3w@c-otl_&o%b zKcj(S^#mS`s)sO`_vq|x^S;Lw;@O~N8?EIGv5xRO-JT&WWDe00R8Ql6kbCtV547R6 ztW(e`@gijyi;xkWFdr1GJXwa6-hc@J!Xxr20rT{lDi6JU{t~| zJVXU}SU||4LK~|@4d+U@3_TsetQbbuR)H`dWl^01As|Zum|e^{80K*sqLB#ionTL; z?7bMH15g-bbcI(h(DxcJW)&|ldmEtCx(L5siOdNwj|TKzDOI9`K`k++MVXs5m6vRJ zSyp)Y+#K_ghK_00$ueihD07=DF?1`_W|$I>*XJz)K#L+m>pM`K(9ebq3V9<$M zfAsZO|LkYt;#YpM*z$%q4;LDPlcdZOWDg&oVgQP*IjohDEG}t2_3>;|YY`db;TjLz znT_5(JY{LBM`LxlbgbYhKKW=I0d&rwk3i=7Km2cE=Kc5LWb1z1{KG$qAs}S;&wjv~ zSK{hdzKF8vqHL3v-fM5gA%MO1BcF{i&orNJh;{Xc ze>g@rZ^se*xAt>?A?C6T@lp(LZ$|aPDmOtc018W&E+$A7V0%*B97aZ#J!MX_@4Om^ zd`Ev59y%`0;Kvgekd8bb|AR>5F>+?HrSfnFk9XkFJ9?FZObsE%HM__mhqi9j;IFT* zLU#)!ah~vwZ9y&ARTr2ZOnHG<)zumli{|W4k-ULEF~4k-P6K5QI=y_>&LZ9-@~Ga# z(Lh|}-}9^WSZ+5kq;BD2e$3(i_I`o}$56*gtS;jrv*x|dUOG+rS!;EmkN5Pf5cRIx zxN@hB#y+<&&tY_M;3k7;ubhLH3(Rwn%)Ef1Q&e zEo&jOd&WH=hKI23MP#&w11W8!)gd}#WEVzc$oKW`)AlfCol=Fx-A1;5_I20EFQ2QP zG1{v)Ep6@Y=h+`>Am`%54g0$dFMAI#SLteGG3zdP_J>gzZDOV!`LxF0G^BEwK*qU3 zXS=3m6an5gQlmF-y_F8K_Cq`3^%=$oG--3S`eQ))K6(LD6e}xBaq0Yd?spR1bhZI@ z0Hqu>aT77EL&*!k}4%q{26RWI4acMuo*UllGrR%d>Xd zt#Tr^V%r=8J;B2>$IZY2Rx#*-Sn$RyAkpv*Cgu}LF@{k`7?%$ZV`#aJ39%P^xX=sS z!~A^;1q*1%USjIyNHiD7D1v$JArEr9SF2XSqjeF@%VW{wC^{XB=1u`3MWKaZA%(!u z|9-C%d)r-zUxqjsttyHi0>jcD%vr`k0YP}ZrqNL2t!G^rDLRP*cm!fep-{E zU@#=sd{!gJGp%c|o0Z1P;wHnbQ`ZLaWINSr;cewuoeKtb5l`RU8Rlx`(=3XCr(7o$ z&TwZx_BXef?@_caUx~Z#-i`~u`is&0;m^nJEAPbWV^{Dn&&8?heRrSFAIJI+e>TcI zW0856pZRDkpy=zTfCiNA9-h0E<{Ik~^g$M*{LsTOi=sTbcR$|$>i6Oh?{b#)TG~KB z0l4P!?0Cn5VE`v-q}}GH@pIuk@TPgb9mB2NI6Hqe&YI0Uvep7oL5h*3M!SvR7m*}E zx;apJZ2=x003vE@?bv(s?U-$?rN?#SH~u9?Xel21KmNyY?%8Lfy0H;6%(F-_)MERk zH)9!aP-?c~3?)?2AYyRkDZe~uCs6jThuhvxS+BwDVLb5TpTfh%xB$KmR^cU-`y#wQ zhkOq=_Y!=^g*Lq5TDwmCTJi{8*hRp>v6og8bfHX^0CLG^PL~Wxv%_090~xTP3QHu;{2cc`LO?f_x)R0 z^KoMIOI~3P6+EI@K;pdJEtMOArivMTHN|_Md-czvf`VsJQp`LPK%n@ce96P=rehTQ zk!!%~a&;ftZ1a4(Q(7-#KXv!Ss{?de*49^|*#fMhdjz&SyPnW?gt0VYhCV?K+iMX_}9A)UBP7z>q_6M=k zHSCTz&HXg&?ugOb_s|rLj0JM$eFZ=VhZe^j<6(Nl_)_WvL;Xeu1RK*s+r?UVx5Vd% zTx(PSpu!Ze)X>*$Yc{H}|Al}ZdNLP*h(;e}1GqHpilfJC8rVbbDR4G|o4IThgy3L+ z{_y_Ok)8JdOz1EByPuvYA38DU67)lrv8Wr0egVaEq^(grQ_Q}OX5n{>-q4{u$4Ihz z=U`$7Rg6FSKI@?vU*V9P@vK@JX?JU7I#62*Ut7N>DD%i_f6wE?`MZvO&@uLYnf?Ss zH0wB=$d}5|!eJwl%4tS~SU)2gM%(rW{nTyeH`aCy%<2}JTo3Xunqm( z=RWbs^J#QIFA`mYXLlo4LG+|?GV~ykys4o^Tjr&qO=bp-KmhkrE-!mffrb%pPLu_) z^sX$DP)&5k$*=s9~cP?$gvgchO!M;MJ%IGB!;;Z4X2)@VW8YYiYJ$Aj}Ya- zH4x&%{rK6;hVj`{qEUf)76le~0KBhZwm-U^fIL*j^NwpMCKt&24u-?n+uO;x*7Gm} z3k!?A{qFrdwYY=HvSuj^o|b1z#uPYS7a__xGO$KgCLIi{t}f`u@^Tpr14vMLSp*{u z4=|py02k4gfI@Vi%9;D;9%2?&H0YMSB7cR@VccCKh09_PSP(%L1QlT{NS9+0VEAmk zaf?liSh(+2g=${w@Srfn*^UzBD&v}_5Ns%D(q4~O!BG+Ww+7sBpRFXlce7EA1!!qy zs%Z-MXAS@Z0pf+Dlnj$6YM#;;H&(7;^X zv^R|9ryc?zlw$U&kH!Fvy!@GuF_%%?`|7u%xx5^&|K=aW&ENk0xb(@VV&&6M$E|Pv zfEzHF)wLMB`gTmd_pLa4>&*`o!9ldIU4>`hU*sw;Z|*(T zFsjHZtch%S40yK6d3fV)Hfl1VVa~q&vXZv5V##sV~`UA>IF@f6lo$2mAZ6 zXtWI@eS~b;weNtnIpOys=#Vu(DDOkSRO1Vujmhh8z#H(zd)FiOnBUb0;}l*zxqLOB zQ2{Ir@Xp(x_;@N*At*u;vmb;!r?>CM=-wt^?c;IQg|`^H1)WDcvxE+i?=)Cb?s5Fe z_hVu6dYrxcO3WM%Vjjb8G{mlA7KfhBpo<{7X(ZEN$HlOX%XPWFKkpX zb`=?5bgqf?<32h&8onCuFO--EhTR?;R*36y7wHgp;Z$?S#t>a;Xx^>ThTbcBuRO%L zX+P2dDlhSS0g`l#tc|fxg%7!=Em!jL~Y3j`^0!7~& zd3EQ2x?3m7z3wHPWY^Abb%6J9j;_6RVmT(pul=;HLXhO~-x_~z&G^*oMj!O@jk?SM zl*+s=*uutf-}7rt081HNd>VuPI3v^QUAH9X&I0t@+-E-a=<_PpWG0xzFk9}cb1hV| zFjns8=0S#j^b|D4DtQNKAy*5e*lOv?%`wr%ocIyVkn!5it-e6(V`%t=%$9?^NbAZU`$p}aGto9ZCP0m z5p{0MfGO)JqDBKHgyOGN+i`GgL;<7JN5EmYRktI+#DbFS!F@7^q3$#P89)I-4fE^8 zjz$G&Fl4Qv(BMiZ4}5Sz+-fLrkZCl)MPY`Z1V@C|HT(eZ^j@o3@h-Z=TVYYm@w>nZ zg22H!$vpnF_|B92M2{n$Ax#W(`&p|g{T8%n_yq(WMQKAG>Dp{JSZWQS1&!wJMhGF} z0QIV9G~#-sf)tg3jXPvM`9RuO=|6_?-S(xa6eM|EC9_BY5&`e^hcFD@Y6q2h;E)_kdU2(&9lU(p37`q0g;175ILNK^kh<9(p7V#WBXlkn;Mq2V?K< zW?XvaiI{I!qjUFe)YmuQ@zbb0^kA$${$$+$Xa6#0-+VRqub);IW9{lC3>9=wV9OXa z06fbzt6sSps}EmIz)@Q}pZ(BF0MY$-UPWjTdU@;ay_iR)JVNpmMQ$B}#dd;+r3*!2 z+i+_uz0mr`IXr)iAN0&<6W3&TdA`^xBS`k(o0ZRgDrVtmTX|MDOYlwJ8UW5;;S?O=rNY(?wg3(nl;Vf0_V5w$B1#Qyef zre~IxTYv}FjC*weLjvj94?>v}06@*ocC6Ro&R!gM`>9vz>uVAB?gR8tppQQmz3;yk zgPmSn{`sGX-Yairlx^|Cc?@kt+4(cCS&Wc%8H@JT$4HIecsr^9f`i*zF$btQ#hB~@ zTo&YOK>p;9|1e6gelKEYD-Q49jM548hIOYCWggrxaG66;g@;?dF%qfvgQXm84Di=&CVor)qtMP2=w zsY;&h^W5oV4n=*W202D=>y0~Cb-69r?Hi)5hLq@bXyF!1y*cIt9LwvJWkHMvUMEEd zMj9GnvCvcpF3)mjU0e%Qkj7odNIN95nAb+IR@y#kBNJOxxZt5i@VS6pUFvw%_eO@K zPqwm!XLdW?)ZgoP?-wt&vp~`&ZnjF-;ne%(#1;kZi z!pMV$UjqYrl#HT4!_&EMnD?s+H_vu2Wl`M^IT$O$gK&&93C2aaBPj}83S;Ts@gj61AhFqmp)#hN!epDsY`@Exd{NYGILiC_ zdH0YSIuJxO3?0tml|fH?_E%tXPnXLy2n?DuVy&P`GM%7pu@1_{d!P4gY~N>W7P)Aw zou39-p%FbQScy0xvLQmq%smtkJed)wRs)guF6BO65v-_N0A@9BpLg|5F_S8ob)a>25frtNSod4nvN4dQk+b`qAL8ta3kH*&T|9+gSv4$V{2nPBr zmOuTm0)(&t?g95)aZ>;gF;<^`8exY1%%^v6FWaa#AG#9djq@>i@9h}A^+xpHyN#i1 z#C)}$^ft_xbr0~M^bkzhsHM_u$K2{U9s<2sbT~b|w-LtA`IRvm=78%P?j$&ly_a8) zV-SAp$tSawZU0-}i@X2mpGWWC{&qCst64pO@4OVfH*dtsr#==lJDqs@|NdXc@SU5n z{ik1xJHPS!F}QI%y|(_%&A9u&e2x2cV*RP7qIP~IZoPtGy}uRP-+UpBT=U!o=!zFQ zJdE8}Z%4&(z#E_%^1J)4)-1+ljxYY-i+DTpdBDWpgeB`^BXqS7!XBLZ|9Zk_s=7v$pu;&yWJwY#^vSWZOP8;PkkW~PZ{nFJt>#M8TW8-WF zBaE)HQoU~;d5X@^-(HxBm6ly5(U06@8+?1>Ph$ zP*-}m)@ZcPSn!rL@T|cBPLlw{aJ`0mmo+fTpuW(6ske;~FB+0&o)%T=NoTPjhR+U} z7Uo&xEtq!yy#_3GBx2y6ZwtegU)d!V`equ2>vW8CVzZgsk>=eo8tsr~)7aeIMZV7RWXp~7?UenU z?Y-#pT)~ms?gTVNy%o=S?o%Im-hwt4NpD?4n4X40rh#?Z7&_6H`y$;Er}rr;@noE) zdG0z293+v&3yemkT9=qm!nUjC-OUvmva*06d&8g3aWB2+qC<}iYUo9TWKhb&H;s$c zpnCUiGr$Zm7G?_~bm|t5S+%O*8!EHE{Doy39{@fWyTHW$_U5_uvfa~J#hA)~61P9} z#JUMgVee**XB~#CGY|7yAbW^rrE0^d0{3ze_VFAnX7JRete%uUFe;Bg_y7wS826%Q zj{q1|5SV!uF4VwJ5L|d7*{O^;f7Xszpc%v5IjqVOe^Ql#qKL_#NDs|Sv>O7Uyjq>D2BiJ8*%X358|l5n?kL@ z$u3@ur-b4jcLq6MLF-&~DV4@CJnvq3J-kDduv?b4zxzVWqTFZZ@Mu_YmGyXgCytB? zz&EBMm_eby0E?&a-a->Yg>@#T%U_3GK2$V3Q2d}*v(^ub&Ih6X$a6k56c@+?KBMTu9UwSEK z-+L=6@XYw-7h|qmLGPeoF?em;=)UttoV>*A=mP6p+>+`Ef=1}(me*Jf_?L0Zht_vk z2qVEh{KJv=dG-ZRt-PmuXw$U2>V?CoK|xv#vxih)&k zWvvhQMYV7p&qWW~N5Rk8C`FyE;nHYjp&GX{VmTEkAa^I|yS0^-XaYvg7+s!c4v$(8 z*0X;JbM$Y)fpSuuPhq^RFfhx2&N`scqB>kXUN_+#17-94Z?;hyT*9C<8{5)s5;_ zjc%qJSsT-g3((JalwN{jTTmhb$9KQWd^{1MkI@9ioo51VLQ?Zz})&* z6ryW?tuxc+8uiuI&cQ45Ts!7@(3f@D*jU0|)pCuucY0Z`mG%a%G#VjjQWwsB=|`S< z-oy6uL>jFWU+x#wn2CHW8px{X4?HvxqMy&boawon+eNrUb`V`-Q-#4~fYIVs2YSS6BOx4_yq{$vo*lEU@R6>X90`wh2bFwFJW9jUP`om zak++~VbKtxqC=G4wz(A$6+lc6H@8_}7!46V!xeqU7PA2Z1qgC_f89 z9FL*;?X6A{k{czlU1SlUDAg)Y(X9Z+4sBMT+h|9CB2sj#n>iG>hWLIH29}Nsi!#J~ zDh8Rzbb!|zdQMgXr7xMI=i4HSy^iBPiI(|JJOjB~G_Bz_Ebs0I`_rp<{7ki8;kpI_ zELsLcpp^UOJTWvzW)wohVa{D+?idWixkbxyx~0(12p$Y0RsD7v+U?GI;O)bPJu;+J_jD3SQI7lh-in?xijigLG0U z*^m4T-aWf~F}7}QMGasuf9-*&J^pCUxpDbI)Gl1j;to&&L@&aO)rc zlc*x>$9S0sZ@m;ttIJVowBir|p~6x*95AmrXj@vWK@anQZVlu^c)8{@BZhzt z3lj~W7e`AP4FW(w`{Ao^pafA${JxhR;TkdA*^Js}KNYKwJr<*H{8>~Vz8vje_;OS? zR-{QOU1$V;*PjqkHbXzTAvc3{M$Q zX{y19kPVMcF-ZHr|0gkz44bF6A&dLHG1YkxkUTsKojpF*h}0>1O#V${fsS!o-iYy6B`qsW1{2p$dkZI91|TYl-xH?MqU!Ad! z&9q?e!ll)?cz!kJp_{c3+q)j+T>yRUl(lSG;cs;L=f3>u=T!z7dVoNNbRFQ?nK{~{ zJlRcS=e|g*J}ok+Ver~`?5pebXdz5R7+NQ40R{shLbNFcbbo)CizDj6Wr1l+f#Vr> zcDi_=D0L<)O4Ngutt6WW(CWiM?(du=0)mG5uEihzk^9g!?ktKY@01s3<6NVZM$m02 z=Abo%He$&8!&nv^XxL>et2-^Y6ZOrOj8*_tS_n7_)ow_+)c^(U8SWOP*-*m0>UZyM zVN@W$hS$%scrJV*Wac6b49VEQMWHj4W|b>^oI3;#5K1PO-Sj*FMlU!UG4Psm(lg-W zd>OvtZqQM*tf93-ln8Nab1Rj+>5DvJDCJa|pkx?SwWhH~8N%f51C_}we6e6q<82{| zYuefCC2(2-!1%htoFIzF7L~nLxK=ZQF~EcJXCiBO!+alX4pU87Bhxlkt{MsiXrVaH ztuDnfbJGAXK+6h3wE%z3A}qQVvtB3z!e4Vo7;nFITMg60U#30stap^)j7FGmyNTr% zD#0FORA4NKn(X5Vt*yoS<4?xzH{VM7>)B>VQq+xMv$;@pZ5gi(;p%p?Fet0G1ts16 zIDt-Q%a>#R(z$r>SN>dX?Q;FrAAc;1wyKvd#qoQ$qW_1#pE=dlpZsFn{dd0`@y%~X zTZ8yBUx~Q!HoU)p0S7?9xAu#-@kWE^x>e6SscRyQ1#Tx+^=mZXVb}e_(;tZu-uC$3 zZIsn)l*+X<)>XkXb2{i3-6iMYM~^VBFK@&t>z0uKl(-R=-0usxc0G+sNgoRbC0J$s zTrAoM=HC6d`|_I@)BDl9dOmiM#r9ABL|pwle**)4H|8mL;5EPMJ?j}6<1X`8x=oe9jgYBKDUS5yI3+vIlc^3nYHw;gW zcRH97*h6_-U5R05FDh<{<9#*rAqvIn?)eKF$yZ0adoj6n4<)amgXYMt#^Ov}VmSHZ z&qnq98s7UWv4~zeTwaOA2Oo)YZ847bc4GS0`!VBjwA)*8hQSonsuSJPHxCe>pZ9Pr zBMfL@cuYfa4DD6qdUhJABMZV*zIgBgez-mMSi>QP)xz2ifKI(+v9CGVOxN*#-%z>W z0sgSR#T>(Tn`Ti<<91N^i@BQSGa{4#K0$zoJpp=H=LWz_eWmX1S}2A?A}QP$vTSd7Q@7=c#>#R~>4_m?J=8Dr0!``2yo@Ti zms~y+{2PVPAm@?S%*SxMj+I`lIdctWc1mSj=8g{pZ9HFxqKu7k%c)J*Ivo!hL-x^y z+87O@b2l&TcO9Ul;j^yF)T4b^S^zhJrhV*-ZQQH_^fB6}E>q9yROzv66fJaIn6F}_ zi^fH&j|1i)IMS#anRGtVK-23QY4_BVt|g0r0e0)RU|Uem{dtJjs9BZ4!W*~kIe-#p z(^CSCE-yFhaqiqkf+6jtv1;#OzXS`1y?3uS#?co(vAJRZh4~txljLY!S(_LI(*v(JEDFs3PTS2`{#`RLxFk9~Ok{?~h{Z{&q%Isu-l)F(J`E_>57UG?FN28FGYy z6j1o3qA^!*4p{VUibST9M-&>j^qgB+iZ=60Z<~9Tdnk1H!DBUNP&OV?G6#U00jXOB zK6`eUMm($P1x|YA@(V*UEh2N9@-af9hpO_B-WokaULFN&?!BatGZ#aXwUt%I#M&sR zSQQ!n$W#pI`V_wA$;|C6z1h+IdoeCm;-UZK??n3xpN`vq^sRXBcm6Qeo_Hjhk3SgY z<)y5Qe*gD=FXpX8f8asXOvLv8_^+aIgdynPkI`!{MuqWd)FyiZ2I$nXvKVlM_q%7i zQ|4?VmG&c#!~#I{=wO%!ad^i1()s07@NWGYymvjl+2%vnFfe!oc*WBmUdac!N?R$j zILs48j8fBEv}0a+Ts+r$6!Y*_yVp<#GHV`ti4mH`;2(54(fZtH;=*%3634AptbXca zvGqq^%WZx=45MdjPhZ3HMJ}xs>Am)D%-_5dOL{VRy{*rGI?jLk<8kYSms5c~_~Xw- z_m968=P#a*@y)F`c-6wTaV&rAsil>@GcUIdh*LcL zrM30wBO`9#ssU6j@UxlHLKRt?_A%~-uy}uYYRVd^A03N_R(T%!F?#D@rifO0lx`2b zI>Hh0#KAGVsmE7CKAV6Q+kmTUEud1@sS}O13GXvsi=1oF((zy&COpG+H65cvr0lzM z#E`g=A7#o3J#7rEq_&TEWa44e%kz>F*%4IiS7FQTm69wyAT>HKYD z9Brx5YkLa_T(pO-opv=rfooy3?Q9O+*~xKULUxQU2!8xF8euJ%wom8SG|L^FOKroG zPdysv)>_Hy)+7ij1TaP2f^@X29S##?{>bBx#M~D?`}p&TQehO0An(IS6TS~1aFI{S zFa|etk*v;K$8+DY+ZdN@fWkyYVjAru z7nZ-PFu(V*z|1Wfnl25Pd;bhC8A7pI&@{ljA!XiMs?5as)nen2NMvehjgmt=~3Dk+|dWFe_6%@kq2Pxasa6WO&}ZF|*9QndS*) zzQQo_69OwB&cQ&Q$j_Xam8)YG$?ayIBfrwDBw`au7a{qxWHb$?3n=sHsTIs8Kr!9n zmb($&7D|k1n9E9($XD(XfbSfJjA)KrudrN^@epL@VQNAT-*vfh<1XN9khJ&c#jFm^ zZ=YX)5cgjCb%=s+TcJ#y`~kzx0DM4$zwitVDMJNBDGVq}o^B1rD)YL82d0rQx`L*O zY8Pb&XhL>#JDi4Ia5wAx82HTTLAD%q67rCOmD^+xxEX{YE0kGNUy~Q)B_A%0yw3rM z*F#lW4_w2*R`6sGvNKs$ptByXu|3*x0pr+aFDOuhXH(YME^ot4yfzGA1B0~t{Ws#x zfAJgf^k4g>SbO%P@&0#TiTiI{j}ttrYj}QkI9&VeCoxvHV(poaMjxX!{QZB6H@X!I zwohd(6|mZ+i_yf8c|N#DewG_}Kr3wwZ#QMgqjQfZlUQstqrBdZ#kPmv;MHt(WAW@D z`rG&81Q4pwx|5+eNQZkvha=>p>^4#qzzDEcTV4a;)BvjRCZ6R&r4CKTQEe|}9$rP7 z#XHc={oVpxx9S}MaCg7+Qk>#BpS|^N-2c;WN8{2)<}T0Pcs&j+0>cY>=&$~zn11Jl z*xkAp=l{dM8SPJhCbquyomd6{c|7gg|LNb0nOiq8$PYvb(ArvEi`{R%5Zmv*pXrk; zKk>zAoLh@CjQT0#DzP>i?)rn5GV*c)EiC9-e&kwAP-6Y}@5IUjmlD_r=sXbSh&8G= zYq_(b43M+I4(O=J`@(mEzvC`IkKY#o07sr}vALDT+1AG8Cm)SD!0gex@5R~HPRwIS zJvlQAp42A@)%gBShM`TFxGhgPt^oXxF{Zt}PBb5W1la^69RU=P7nQgGT<|A9xw9j; z@FGX17sjTrP~yO4z}Q*r1Lk|Iq56+YaB7(I+G={5>Wlr(e(E0kN>s8@Oabz>ipr;6 ztwn@+?5bgGlbsHbSyUmj9&jG(E@YSl*a=P~r?_3RB{?G5?_AZrxV)M%cJ2bi(w zSiR!!$MPfh6|@Kv4guWW*-CQ5`=+7{r#p4ivwG-hU?G}~b4*3K!^vo(<8{}UHYObk zt`oGY8>~-Ir+eQbGVQJbYhH?Cv{OKsTf^1;Mg#;KMx8qr8=kmz8hyc7)g!aWm4&KV zXvbP)w8r@$vz`WLt$>kH3+&XxI&P*;w2Rs>=O8~>gXJM#w(YfO*6x6{YGHq@gVQE? zT(9eBeTEv&=0fh6*xcI#m@UOB4%9k)W9Pcv-99{HeS?jO(1z%5qYd)IBM)E6cF1#I z{LDwPiav`bP)-&gcruuZR?qbmWM)Xvhpw$OV37IPSX+W9f&mvBZwe2~30nv;=rK`| z42;i8GDQY?REpiX*; zV;RP(Rc7;yd45B7}E&nT+>^*@4XCxAQ*0tVaE zi-P4oSo7b8?cI;*JS>)TC%^{CvWcOqHALau6T?2=8AbU3HHV>^0T9__#%4EJB!zOF zGkWCrnKcaDk2zZ1uKVw^rF7;EQb_TPVGieP_}SxQ1!1!YN%O+_=%FNgyL%~ki>-E4 zFK$EwFY-uFYrwi2C1Fj>ZXTkWbgRXXkA)IpfPZOnepA7H~|!QZ{Ccf!9lEmzFnv@09F}ejt0uGv?-mb z(THP|Ssmj5@6W@>V?0<-*Yi-FIe=RpLo7&GEJX!cSbNal?L_0DhoXexz5TC#Gs@hj z!P*^k`f=fx|3XwT+SmW-zsTJOsOp$QNeFOqdP6ZS17zQo@c`$IZ)T0l?eF~{Y8TGMnyqu+yA2>% z&Yt{k%d>->ht8Ct`NXJ)vcuSx0LGKeZTNx~4Ax0{zkdZ^v0=Zoe zW9zOBLEj1B{CK+q|DZ3BDZ%92B65X-b;pN%ClGLp9AX5`r`ZK zoqPFQI}1y7qD=qEuEubD zs$Y_GDb@b8juJmuB)BIu!+2GNIWPz@uP zH<4-j77N)-YP2W#b!(EtC5U3plhk zng8IkZhfmRf|$xP(ZYx^kf@lTyLq)KhSpTX`brDL2AyG~eLN27;~w}A!`2zz;yHTW z(w|4b{4fynVyyu$0tx%_&&2j-H*Vdzi*fHnRS*Ep%`y927a^-b`I&{4eAxLOAea^i znp11!)d_e%3s#%j(D1d0OJ@LHu`Y?Z7+&jNkteS4!Q?ewDp4FV?3`US4nxTi(QQ%|HuDPtp3R7V)F;D#O5on z#f4`c#e=NG{{6jJWGKULd@K6D`|sl7$DWOYAACEGU;SnpZ!BMwQ66@!vs%|iCwia9 zplrPg59-iUJa&f#9--js0RA!*m&Jw9$qslkGsk$vZhgUMvvzv!S(AX$FzlVJYT@(JBNV$ z@;vls9aRuE<(ZeyxJEmZTjUtSm@&D1>U0j^$o;d5o@exGD0TQ2lBUkmFce#O=g>8i z*h5ig&jfki!aj7DZ|k_Y?Mr=Xaj2j7b7_>}f4xE-2_r`eo{oiErW833p|V$n;Cmg< zy$$$27MQ3)_~#Z`y=0r4saLXyk1@Eb#@x3x3|ZWXY!qk9Tl{KWPnGMeJ37R1*w1m9 zvlrk9vXo1=A{${bf~C{qzWU?Caq5-SGZ-a-gX7jHE88~v(y&2B2FRRL=nPgvZiFHu z3;-Kz4zv^YB=I!?Q34^Jt%p4x4;W|RmAh6NUDL4#tg}&I>2D*LDQoV$i`uT!^pyK! z_!gRu571pa_xJ#V@7gjH>BBJj9Xj#1+ZzjK5ZwTvXBY{*)B&lJdE+P}2n8_ENp>7Y zoH|@Dt!oQ2Y4Fwa)>jC|ENW!nJQrZcvk!Tm&36PTYpX?b8#^7E))8bG^7mv=^}6@B z2-S|2!dMpAri4!f`c>#U{Rl2;mRn( zGAOu&5?pTDg$@SieGRlg!ZFB58fY27e2R;pCwk1B41sA7%e4iRTv1`JXIg?``zSf< z1ysr^0`r>_#%rfcLu zLKaNw9SKF<$ISqs9{`Ql5M5}>|1oT7__3_c+c}mPkNf5sAl}n#w5~mX0mqYM4R+pq z4^MrX)!P*eT&1y;WAqI2&6i(_?(Mr6)mF@{t;7LTIfg!qtLuz)F5dj@e;;QDC$Rv~ z3g`~Q#=^80)0e&$#~geV$W-e`J`2Eshw9B(#JInUakZPD z>9ctNR2cv_`1gMj^}TMi@UF9G2A=2e<=12LKmP}@_Xod^VT6A%oG?pV`SOoM=MTRd z^QY5z`q#b`U;n%RMU3xvV*T+eQQcUHw}0nP;`Z-D+wNXWc6OqDM=Ez%#mj8z6!l0X$p>BWtdL z>tgk_DUx}2H~=n{FAyKk_wS-u&vcynz#>?mH5d$HXM2;M08Dt>j(A3=+>VDiN6s)L zt}$|JUOIIlGGQ289i-9FKpM{1bJJ`eBGX)ou0(h8o;t0lLlW#;cT_@`)RA|$!dhKk zbbOm4liY~sYQP-5243e!I^aApKTU;ln$b+|(P*t>6HEclbDg_&meM(t4q3m1Zh$K2 z{Th0byTDI@jCekWP07?h^0!Wz>zBJ1n1-{q1+8L!g58)-kPkkiGnAot=%IIP7dmxA z(aEltG*QQE&?{xPxe8fJqm9bu0&yaH$>Gq3UR89iN01mn^cYTMKVJR75{u5};+ z2@P?cK>|HI`@YO)Auzg*`EUcHiD-~wPTmQAb}JmiN=Hv#84dffy|6y~1$%oESU z1IZYAm7p7=+;$?b^%7tGLkT!T9~7O^KhJ_ zIA9S>U0l7eo{cEyPV95X8 zy`9+J*-Fnu<`D>JI1{9qMnLI#UqO-M&9npaK;1Ck`Al6DghC5L%R;cp|Ew{zMi;wXa@4;luA3effX*gYU=m2j7dy%Rh*@oA1WrxEquG zy;%M6pNjU=Po(kou%70_4`NtQ4y>yNMZ++MVm-n;A8i3JNA4|Nz&H)lE-wO%j2x5= zQ}69Zwb4v4aROLzUed-jnCgLIR6Pft#ef6!)BrV*rMiwM3Lg#uEXt$x7Xl~GZ&w*~ z-n#>RrvT&A*u8Zls*gPujg9rF^4#u?d#uGNpa*4r^>XxY-$O=fasH>jl+lgucfOOJ zlIe+g4EyZk560e`*OQ($8ZjcV$g@0MPET#p9Yp8VcjD~sPSo(m_uqXtmM^aXteWw? z|M9n?(QW`Zj-tJ?ob7K{FJ6dwc>NHir<@sruQ!+C)UBv`t44iTAEQ1;$K#lXhx9zH zCCDD+(unmv)97n}QY}IsD8 zSXnz@LE;=bQ9v^3?ZpfV^B7&>E(|m|pRxCZ`US%)FTrbip9_@|vV9OI8um&f&Y;}{ zezgl{U7$E6b>Gimos(Fv*-(7#P|zQuL! zQkX*fnuY{GYZ|p$ZKSs;ZBIThh!$C^Q`F(k!6LadjL51Uv2#=IJd0!rp2(gF%x4Zm ztbw#(TvM+HTWqH@!r&Ph)?*y<8eTj1l`a~yJT>viExeP=S-bVt+_|~@Gm9Vz^EcI@ z!y|7RJ@zw54L=1Udfx(5#$_Z3``STHm^p^Io^8hwaU2!s1;43J?cw4YWdvDU!tX43 z7Dg^DS^G83;$~^?PAvD(tG7pkeKlMu5ynr z1nBmy0+g`~1%W>BE)`yxQ^J}{_D~XyVQKy(^Ai?Dq}R#^e75@!-4C9{5-e;M)v-W& zoK|giyZgy>tu_jjaT@|R-(_{DVdxX?pG6xmqLGCuia-XlNCbV5Mlr+MhM5o`J;_A) zT*JFCi{bS{=mQ8r(I{ks3-~SSMnG(Ti$V~wnjdq2wT$BY#zPkJ#2ofqJa}a_R@YW} zRvCtz#@)LpTvoQm+_HHAW77z`$I)s`4ZI;>dlNkk^g{%$akC!8 zo(qo8P^^ZgqvQ0Li>(=iw3P(SGQG80=yY1*RxZ_q@AK*+v=O)cC1b z5yYh7`bVG2wXi+w@n8b*J%|ZP;t*PQZ@ialKX!{8fNA&5x3edO0AQwy*M}VRZ`_RX zmwt#bpp{U}%}+lQb9f5Y!7Oa7#>Job(b#?C{ir{DEspNpiP_a=Jp7;kp8$$Wv3Bi& zI7DVnFqD^`eKJ#dXJx!rWO)DX7PsV{@*28G889ra+zH@3FW>#|Ms|ifd&oI+Vr&8_ zT~@b_sniXxT6d7%J%)%yPtY|6SS+mKrNcXA0LcNLUDTk$C2nauMt9U6y@uk&zzE71Z?!UujvJ^+Y7cp{^Pyohw6|1?~zuX5zd~DTzB_nJ7)EB7TPh+mE|Tpp`K!H%*(dh zJT}u38td%%IX30Qe6Snl{f*GL<<-1))+%s~#`PGTQ)7HOo`&6}vEW7exD!BLVOWf3 zkoKqund2=BDtV^6qdM@~PPfna0CFdW*O4b=$mp=^#A6eLoSp$%c)@2By(!Rntcei@ z!MIW7j8dwuK8RH3L-T9xjA^^#RgFAYxAzn*a$i(rM;(oo3hDu7C3- z_yYMB$g0mZ?%Aw|wQz3LYBSeNXL51TXh)vSsMo}xSUhu=<|W*IR$Z7$gpgr%l!75C zE1N}8B9Xn$Zsvz2xdSYeQ}v%+|fdEKy(d8rg62CiV}ty}f!DRDkaaFJdwV>>#6Zmg(` zt_S~N!HaDK8vv!bc~KZ+>G$#02gxl(m>orrao0dUcx5@(*OnP$g?pUE=Js|fDGOZ` zG*O|8YSRk=PX$0AL-?(C=h*Vl1H2KLX*le}&hB>f_B(FyfH6=SG8@d4fj^L^<5%cx zaN&In+oe~*^9=`Cg_^q-1O{Ahb?lz!_+y9^av5yZ1BT#{f{*zu>t)W+J>YuldV85a z7Ond`pVP=`tOXUBb4O6ncd3ZaxChI_`j5Fz==F&&=3V_U-@5UeeVH|F4 z#_l`sG7pSIzZ-);`+6MTyBDiJ`?FDg>X|r!zq4=$1Fj*R2Z^7;w;FAG%+K4P14HX! z8wYsp^UZc1Fmr}ts^G1sQUmC@-S7~PaH!E#7+Ke2yeYfW?Yw^@N&tsbg#F}nnhHR} zpt3W+K09BH^@kq>+`z-EX$8Y)fnVlnp-mbt1isQ#sY5?jy}7m?yMOf6*#G)Bb6$ts zyFYUj>;J(oMF$>hW1P?Z)N`@>t?$JKqZlR+HbzY~K${j+RAd*Lfz ziW!Wx71wsf>$#;2km6CmJHPwKQA6&w|LBjI6Cm@AyK&>6{bo$A-;P)R#ji&ReR1dO zFUB0>JTbKZ?;NRsFRn)A;VZFFuV#crP&vQeMji)Q?1&`B0EMT1n#EYLy3nAKba4yY zBAz820d3`TcnXizu(tVK&~LP#{a7pjhGrYh>{W0oxB*~j&}}uRqz;JIzzIx7$Pxo%T!6DQf>x-rwnl=p5E2SAZ{-eOKB~}qZGE*cjDkeIJmOXF z*8^SL8E|@RF939wpqA?dsOky1-)Ms4x5i21X9FjrN&mm&tJ7H8LuJhR;yfB9MDKV{ z`9r&*@m{p?7Y(gDk?yo+4B!n`<$AVmQ~FJ49Dm z@PJ;YpGXj$jN!&vW1Tw#^Y|xOOrxN++x6^E???@|WDWsC1ka?$Z{e6EQqemZbKOuo z4GfpTY`ivi0`muqijXdQu*E!|Jz_yeEU0_JK~fsKl0<dO)^X{h^J;?C8 zh8(1`w|5@{rPppTgKT5?A7+(V@E#?Yxg;4gySd^1+u)&#>FN!i-j=K2BeEoMl!U?zFX zs#C$oZ06Z5c*)Sc0+i_lKn{$h@!j3skNfwwl7S6V$)L%6`6ZxnjEEfcOmB$S45<}e zwE%g%u7~WijeeM`k{NAAF-3@*4ZMR3%Nd^cU={D|k%w7>aAcnWbI3BM?+7VaA3%ai zCW*;??ILH`&1Mq?m~wu+$2#eKDRBMWt?WszAeET29>KWNOC_x!l}&`P}~0efM3i#rxs8@_?xq4{tfc=(fJ_*;xDJM`PvFpNZSQ{=2cn7@O@@ z9NxYe5C7U%xtPJQ=*7iS+if!8F|?=9wR|3L-ex)IBlpW617NeCMgsXt zOs%Zm+KH1pccKD_5j47$&d~a(Gl~UxQXpmTkHce?z0L=^(z*$h^8_9^Qopg@%9Q+3 zI5b8xl}Upu07k~{Ha|hEb5)OMMAaA8$f!?pixu;*{=vxO834)iJ5U)J#T*}*I+ZqF zy9gf}{$+fM9P*`3Gi}AMT!+)aebtXf7_&%AeZ`y?>_(Rct2mFt0$ayn`c}_cfaCVr zet(}EqZ4_}HJMuP!Fzd@ggP8!ZMx7N@J3c)?{tj3mpgtmc%CYn+YPz*0pmwXkxyt3 z|GIZwAYERprye#ksQ1qd0Y|LMuot6IFaP#=fxmqnVQ2ut& z4T*bJw0qh0&R3R~88pT?nXK4)CWE=HLXTCZ&J&M5U}Pf)cK1>EEod>clMBz|~$n1(;CJS_XCA;H#t!>z|=56@s0n zEO_FUS?pp_jAQkf+P&>wHnXWAoJD2)2hZo)qWWF|P#9oV>B<9Cn$lk7AQ-bi$pIKT zbG-L=M$rRUiT*DD0^Js8wR-!sE_HWe}IT$M*d+tZ#=6C)q4&d)mw-cN1--bS| z*nR8WSYn*pufEARnJ>IO(x~y-DcGjSGNQ(PmC;NIsqEm*tj&Pm zVY^sd1(@g|2(B>vwp=K$tp{o>$~qsFN*+~fw5q<^j@{SZfp^Zs8yM%CH{&eRX%^=$ z0s;UR@TVyxm9K@lf)LXQ7R{dVd;|wxM89G94x-BULiBk2K5{yXJ{1hx#HQOD#|Zsl zgiPMB9?2=Bedm{~`2i42&HZBlOpjW$s~%Qw==GU)vzSi9JDQ9zdVoyUNuyvrkK3B9 zFkisHx^qMDtk;|AO?0h2LEQSa1dZKRWt0?VL^lfP`P;38tcwu`!G)1k!`doZ!@+}o z51@Gi)g#Ysax)9)3T4(yus`TEfs;qX8X6xA z4~s|xpD_{wnNp_k)B>~U73gQv8N=5)Y1VvcmXuS^`7lCjgd_n0 zYi2>(PRH%aMRBqQKf9B1y&6gzs9rJkB``C(V6P8tg^r{(T-q|hi+oiSCJNxDIRA=>_BY*olAZ2!1f{FdKx^S_EAI0UiCmLrQxFV& zvC&2WjhF~JjOI$yxleuUk>^VcybZHkP_(#k3c)}G8afqP$@Z@({CSXrNC<{kf$7N& zQN++OdSi_h1VO3jDcXxX!w*c>0y-Jkus`boFhD?9EQzoT2v}!QRVJ$9ws378WLZNP zMNwKVPtEZhbB)x_69@+J5f0~&Mu+R~3@yPpz}l$#jNI6hzGYm+vbWGOmW*b#?oNbF(9^ytzw*m*@fUuEF_vTVU;cWW;ytfC{d7#gqCUS4odlh7uGakT8|Yr^hKi^mbQD|*T=-iFEO8ggZ~s39m0r?TCy>0Ax0{41>?W&SL-lg**q0E*C{vV@`Hw<7&n z005$yh9TNg9dA#W5;S>DQ2p5rulVkKKa$tDhWV@;MIEwgM$>q59diz&zxbigQ48da$J! zVR^{Zqs5mRXX)qZndw=6eN-D$jSVpdg2@xv8B+W)ST5eebk$A1eC=Om` z(};*ttw{6;S5d8bjvm5k^BqH3R`okJ`}$hEVD!L~(L7N}#4WmVpQ2kOR1g|5^DHiO z1w^ZtDj=~pG3*0VLDWbIUfD7<5B+7N)334M7Q*%;gwfD~duE za!BFe&#XQ0|&k&4WSm@$*(gi-oxNKsiqJgIu5DXQYNbcLR%^k(j%P&QxyA^}?-@(ANnWvtM+Z#_~ex;qBT>1RP%<*dQV-mE(fG~7}^rB_~F zgKth^`Ozm+pieAP;<;niN$@kz+PIHCkN<@?1#c4!Wa)v2;^^k>nEcrbarDja#N_?E zvGs$uU*dtUJQt7r%3p*>=He6o>3@_3ao7L#Z^q>9_oI8`PCW3r&&KroZ$ulq z*>6901Mp9e^iYqDH8_JG?84Xm+P7l{W!h*pk(GnE_^D46X$!oP+b_HtmG$-5wTN`w zkIwt|vwq>Sz3+`c4Eq@sK-xY~&;m$6uRwyI637mR7GN2j^ULN{#R-EdIdh%JI(%bo zhGF|WV2AlHUwbf)(MJ{qmYeNtf|F4d0Gi5mdiPcw3raBX9;$PCf<9rb4*|||MhQ{O z8D>UK-GX>98bz(`ndTZ7Z8!uT0yV{na(kG@eGw3A zQI|SLN6GY+IdP-8S%Hsk*TGX4<^Flr#s2X|avhI)Q*Ak!j`30*5Bv-*bcC~CJbGiE zIp~ZSvd_+itgl7A+7j!lG>Qp+G4|?hjfQq$7X4*C*TL9gZhHXOn!Jfaq@x75M*q1B zEZe){FsO1;-@;?A!-w7L1h7VD`GvWv7nAL>mJ1e5z-`?Z!(gmNx{bQPw=*{9GJ9wzwx=+@XxP z21Ur<`U;#J)XpB|puAw;7R(rqS&oD9EM@X@G)Iwf?jNr{ERxN$eDD)mwg=j0x6 z7T5+nKF<)13~DIUFO8OS^B#sltkNALl%nb*4897){r57id&S$0#b~(=4`y}_A~+SG z&k~*brD1bkYb!;>#1Prtdl>ibAm$aWcO-= zCwL9oz%X7f4=F5}YqBU0{C5vvncD6vi?yb?Fy6=4sBWcqYJuE}=D-S-9 z0IGZAX3Rkc=Qyt-!X^kdETB3OEFoa?jG^RqpRL`P!I-r+-0yuH>p$^@d|r8XGmUqR zYu!6Nhv%S|zHnh9$`{YY(dNB4z>~K~ZgFib4gtl3_uj!3R9C@t6+hjq?| zB>-2D07}g$TqQ&PdT!2jyuBTxPCptCKN71K*W#ozh|RZe#Ob3?$0PsQ|2|4jT#LDt zR%~6r6}Mk{8y<{kR!VW>U;KJ(eD3qndi6-7TzKT+=)Lx491m@>q4#D8+uW#f zgN&g+EJ1Ht4N?|R<}K*dy?ZwsUL|T{Zi~yUh*RL=%SC#FqHdK#>-ky@TA8qf*k_YJ>C6%^qDEaGjwhtPw6oVIQgeBux4p6!Z>1_ z3=uEt*)l)9Oo6Y(aP}rK)i`D>Swsg9n?_w(s%C`d5ZcefhvixW`NA<7*~JqXgwOA6 z?WeQU#0j#%PlIJtR(Te17(yTPdbyrDD@AuWJx$Yh9;rG?0AhMbnOEnT(|1wS>3O!h zd?Cn${)WE&QkE@V-q8Ec=4Kc&9f|ZnHQ=rTTY3wwzhP`32 ztjE$Z(;g^W8f_!WwijmzsplM*&s7I3)va@};n+M*Xg?iCowke&vJTp>v_t4k>!*}a z|7ZB!`=~F}$);+ut_mKph|q($yw<-3WQG1eWnOOKn~z@C!)Q)39kaw5xhB>d2wruj ztf$I4jhTh?((NcKc%PlcVFY{}9(Wc3>&GIk;Z!W){1*LKk~FCUq*)c+V2afu$hlwq znNK_~GFSlNxeywz3}JFXZd+oKGEZ)Y;Jt%G3k7Cl$ihvy!Ldm#(5qn!kyLgSViK@p zVXSy`o-}rZ5@!*r?y0XEsDow_ffbOcMBZ8ck8rqcEpt*Vo{KAU8G^HOU9(k>Djt-% z4fFHn8}{)e+|+Mnzk-k1dWzbiF#_?Dc(Z1zMTvKT5hK( znAP>ra9RMUpqkT?8QrqyHWu&iK|3m*d5b}~e-`HOMyJz@yLUU#_%y3N&*K@JcV(Hf zphG6oqcCc6csOC)&=*FP7Y^_ePJ5Fm^~M-@7>(zfhlfK8uN}70@TfqsVd;^JM3tPR z7i0H5fsW%5U}Qx-|I9IZoeJOc`=wbb1m>*bktXX8UB(OS#+~oK%=|&PHr_xf=8gks zYZdAdp7RJVy9#J3LBqMjaaN0t-hVG<2m8@~=j}MUe=n+hW^n69oZ(fE5z_LtYtRS9 zgXhN(05TYAL(_Y`IOOxQ0MK%)73Bts^5UhKH>7swX59bI>oMByqX9JL^U>b{IAKg$ zm(OF+E79B9!f;^NP(Ft})>LrGebc*#Khr~!Hw|$~>!sxw?sa$%CCU93myijRiS%>7 zwmhY0EPZ#bzn@|I8hqHhwTb-HqTgDHkN*$b@6y1&mF%*NGEe~L*#cb@<5 zfAvqZ5zq|4Xk1*4Ii6i#SpzuSEb`id%#~$~@(`oC zo8!Cm+$Zxyyjx%WMqGXNV{z~8ce6KzEmMaWKA1U{JoJS*xXyZW2>|(9dbwS62%SAw zz;JGFx05Y-UDHD&MS?td!lI-aO3*HXHp6kD+_~Vjo^4iq#LtmYa19axv##(yI^3rH*Pk3$zdh3rY+*5a16JTyxV>ZP|ma^TnQ9V8C z%oSra57D!Fqjh-NS`+UrC^i4C@wNe$1-g2csn4OKg>iN+bq+RfaeS_ydFvJ+PFaKq zQODp`03po=uW?Q8+5n_gs*N;y>i+@q=$vdyqhTsS(Bn?1DaP4? zOdVPD7n2u=xooC5ytX3P;#o#4yjK0Jj@L@+ty>&z>V=yoU(Eqb?OozA+zC{fTj6MO zS1vP>d-d|Uq^o>lvo`IQ(G<_7pRmr_yz!_XbARDyKL5PATos0l^Wi>0wXe*WZa=R% zm=PZP_`sa=EU3}^d(qcjPv6|I=M?AG0iLX2`c+hF%A+7GCemzpYEQvrPO9jPq~=$G z01QmT<6DgT2toh=|MW>jK~!_!JwjoQL`3HOtjN^B+0D&HD54|xviA^nJy?;b6@>1& zH58-2{9#cZ!zvhAbjW>2C}J7S=L#-hnH&Yba82PYI(L<-i-bvv-q1=<3j{U3hKV4q z2zk--=WQ+{!ZuW1oC+i@az*&|JtGP$7S z*pgk~p!2-O6A8*TmQq$_lm;9=k*^tuhCBPXV~zYLir2tv^K&~E>~W$ebqXz^w_w6v z`->PKJLZ{(wQ}4_^~D;(i+9n#yOm0Qc((bGr?P5%wBL*B`dT()8SUXMjE*t~I}86U zxFrwod3K>12Y8IP{@_nCigDq|hZ!$?1sI-1W@^hQQ`XQE;53vfi1GG5%3vBRHeDEDYV3(+NsSb(+mdsWX7f>tRHKkmp{Wg%CjYvl*|e3-1~n5pD*DJdnEF-+mG!R zUypZQeks=fw|^^^e&$DF@5MJ_`0n*se(K3ML`E)s>e(p6Ctv-){1>tO@Pjd8440n$ z2=haksSl9*lkKfoy>gy)0bpXxi~ZqqG4A){sNaJp;W0o)qYW5muKRfJLjcVFTi5N3 z6{j}0!I0QSwsY%loO}9-xcITBW9yCU$RkP&K&BFuUp#l*Uf^{>6F}25;;mkvU9cJ+ zfB~3yKf1gmF!2DCad(jCwY&a%@7+veY(M_--R)>?Y()3Qtr%aw5f%8r9SuW%KfZep zV}>4(pADTNXM8FZx4O#6m=1wCTvSLJ#8dQz#hVNixmAZ+BLZl^@Qvn|q60s7=55&I z9(_+o3ia}8OesT1K=-6EQhsW=Td&Wv8K zMZLrRK878GW9kyQ_cdi%(9f)7;nd}(LjBundjW*c@)@;SD_ZSUz<3W@0wCaJQ-m4f zK0AzEcuUv(ls{8CT(yMQYB z1dVXF98_gA+=}leO0wGQ@A_ypQeCUTNGj^2_yx#hw2# zzpMV2O;xg9`|!MW*0geA8#V6U(@5_$one+N9YYJcca0KpPg5c~de&cLEe*VBJ>2^0 z8uuya3Fok;@H@ajx(Sx8@5$czzwmRPf8K@H`~A=miiRy_(Fn=NqJs&Dv(r0FFLnfx z2Y7!f_$;OZxf`p}xS1=~DEVL+H5UT{Sn2KTIUHh)`K*R!0d6IAjJi*PL+d>zR z;ap){t#vTuredESoW%}E6i!HG>I_sS7VRWhgJjO#eaX_~aH=8!-7@}J5j<;X^uf@- ziqNp@_I5Wr?s=W(K`)zkXYB-NOG_3bE+iO~_G&lJQuf$HX(EH2fan7-I0rf|jl1gI z6dp8D%)qdEPzn*tmUES`MlDo8yqp5>YYK~epuuyEOutB>+=*~(=$60Ke9*ewti~M1 z-7utVVsCR>)b4L@r-64nnc-d!!0;Yc+0Wrs*){Gg3;0f=xxT{dfK}+CSLP^joZIe&^7pKj92R^Y>S2?S^rtqYSIVHE}xH573G0a zo##CjupRNL%~tfTzXy$xC+=f+IF*jo?R)QCkM@Og0FGXqJ^Rsk<{$iD@$e7h=5PL9 zY`*YPT>at?0g^B{tl_Lk5XLofNTjb}N)$WHEWX77O0D8;Q8Ud&vrY+x94 zyxZlT(^v*bK=Ldk%mZMUi=7JfXtD_whR)Drjk%ZcmUKGYzudWfJIYJVC~^PxLstO} z9_n*1dhfg&=Be!kF!ZQe^g#npX>Rr?4!V1J=uPIc`9^s08d0nOU31s+UG_JCk5pWG zgCzi`j)7a3EQU2Acml9v=CQ@tCPs(=4t5_KSi1wC30#mqWKa*D>lttE=HcENPd!sq zXnL~BY^_qXmmMST6?8?DIStT99(`>D%`+_O&~-x@K-kn(o=4GWqBG%1n~;G9(VG}1 z#vy<(M6G99Wo+rlU;xiu+~7GzZ2?TYZzNSmMQ_}6%BkSVx&_w2=Vtz!FC!1038L=N zAf;~7c)2z#sL@M}mC~&)hIHu2qQ$F()H^l`Xs-|psh65<^s}D<=%9}rpBh@*Xu588 z*2v09df?tu9iowyc5d%euc~)#Rck~f;UZ&g2lhTn}KORNNd$dr22qhxJDrftCz0T``~C=9*PZ0(iLHrhJ(OMmW% zo_F!hTPhSP)btSenZ;Rn<)=q8Fy~=(#~3e=kqv9Icmo11BFqw9BDica4IgLnz!83C zXSzka9+YVnq-)qb7XzKHF%X5Bo7bRPa8j!=IE2Ig^MwHcd12skU!+JBMPea=N6HQd zc;Sp&0a>ayqUi~87ds(rZDArGAfKN_w_Ipe|o)h(7;Q4e&LyV z)Cddr=;8U8mo(RCE+-qy;90lpde%IXe&2Zr zpi~6fltLB3QK2FExsZYoKj&U|y$cb!M(2z%N@E3WdN{^wHw-lcL-aEIsPVHuvdY&r zun5WG$OHy4GoKBbHSSrWr8b$@$ zWp8DklPDeM&;>8k*!+1`fq+GKK8%*rMUC07ozj#I{=xz7~QxJs9T9w|M9=Z;~qx$5C1qS zyE}0XV_WdPMLhy|Ju3MC`m!dTiC@7px5~MF{(N|J@8QtHTn?l0z{M!x(RkvYd20`X zX|FEf5f0H>4cwh(dN>U{b@zB%uQA3L)$l6!Z{3MAXlJ{o(1T;r@ zdlNomH0jJRC2O?IdJOK|jnS>!fIL7P{4lz^34qv*<@I%pRE4!aifMlrr3yI4NThd# zvbPhUyBF+_p;u-tj=X=w^+w!e$XUR>h*3}gW5JK*IKZ&%Vu(go%{TCX0UbSLbO0?p zhFd;X&k7dRMHXucVg$L#wO`AW8Nt;< zh1}2toy!FEXbt5X<#6A=va7RXy_ZF;&PUK;)H2(&@;6+RuN#t2;|_fUB|d+5uN%AF zQS^E?kO5>FwO}r)D%U{J;sBhJ(UB}bR_+C}*dXVtgPL<;oEFI$4bk?wCKf81qUh}R zqsQE=?Xc#8o8=lAfziowORPn5g2v6Q9_v%gRR@v5=lW%;7kcaHtsztD#FI04k}>G@ zTa;(2(tYr*scU7i_pH^3HVi1j@Z5rlid19>i|s^n=6Lo+iY%hV2!;z57?_4v!&neT zrbRe-w2^?!MG1wur=cMCYg!y&1faOqXMtSKyw<&#R>VU97LIWuPzRYmv;x#{4D-df zGa-oQQKch12bs)s&0CG7Si-Zk)uM=6LomS0=|Wid|FSmB=Pbd3I{`93l=F0S7!Jmf zW8!a---o#tS3+hLCEMFsMpw|?@@DABF?*6)w)sW4`@>NTK=I~^RTM@tL|kr@Fg;+w z#y-ewd#l63Ow;SH8X-VPr1#0GAs9TLITW@?kbBrpQ^tZ=Jhup!348`9@#*j|dZ0ZE zeq>rh_Rdwbi~RCzx1AKZDo@rk>XG|SS+gwaz^Hf-h2VqPCva0h@+jEh0c&P%9jIkRf0Lkn9F*vSdBROx&qtV+hYvq2*ci7aOVfFCA}_u=GkKX{Sa$o zqnOo4ABc0Gd^+C7i=P0Jo9E8w`Sj-=yB53G?_^JXQx(Il-8>T3!)63)(hcIrxYOZ% z3=e#_gz>{NXODq1blbh}{8{Y3_+p&wZ1LV=4Da8IV-)VW%NO7m^v|Rhz56#ax|H6v zLJPmj1IO^P9-0c=pE?RE^u_WLO1X}5mFM*yHAaiftB8uh4spcVxt&fY7qTQ>unmRI z!RQI#Ext4Yq0X_2K8-#y?^Q+5b9=LMU--}72i8e7YVH%)NSH!I-?6&3YBf6O^G)x& zi^Xtz>M`|!g26M4-Wg#~f9XvMc8p+VkA48RyI_<%MNfTYG*4NY!_iX@`&qa6PCS8+ zF_|-#2@KU|7@KE&=rHLC3)g);!`{p}8%^a z`L)SE`kOk zy9>z-yq1cZ&*o8?d_HqKOi1HmBL(wPD!3Ap_4N#!$i^_OhQxzyiUKl?bDwLi1+Xx! z@HG8WNob@rB%);r)WERU`CeXWvv?Lhobq`*?g0uG!s{g(`qGnE7!@3iolGf!5k;w> zL~BPFCl@3`LoBw>%1T!5d7$zTp}U|wrvW3lAKbASdUnwbIT^vwyIEu*{oUU1Z9EcI zIYUpoyLez2Ptlnv1&zPP%$$zT;a7~pe9}v&-p^~u7*%@bf)UZ*7{+v8zqwA2g;db= z7^Q(=Md8z!7*-cRC!Ne4BI|PlhqXaCP>I>jV~FH;=%0lAs|b z?_(1R0gQs{JmdxW!0u9p=S&%RY_KP6)un-Gtg5YHtwl_rg75i#Qr<)FkS%x&)}IwMKAn3 z=tmhvIMZxpgBr(W?tkPFsVF|481-{2S=6Sg%5IK4-}{%U&3vAPJ1Pe|o;kh6^))Au zis+;_j?T+J2>iFG<6X^L&CIjpEkoHV2j`(dmxrvxKEOlIwS?Xlrj{y;G0Ey@jDR<0 z4UP|4*VS{`rq->GWemDaTnz6Y^m=jB>t;0P07H0eDvfJw-D;@+w7VbsDD;o~zy5yQ zcK9AX5|8Sem`WJFxVM8RZlsX4gO<(n8-R@6T-GvP$Po&8 zjCXF`gGF{ngHamw%ZzKWiMP)<4?6qN-dK(0OBZ9fxf8?Nw=w$r+~Ev^S^1!l4j^J= z043PKgH8~SVH&$jBBKoxK*An#-HRhWn-M0xc9k>hEw5#fqf=sjj$iLi4I*C_h8UeT zc27Ls=0c%2Gi1-__oint)ZjMD>G_WUhI@m+I{Ts;e2J4N!D;61fd`-yvRe(~7>ure9-JZ7Stti-C`87$;7Rl%Dm3(E zA9@Efgt0PD2{BzHZLRrwJ=o$Jyi*pd*WF7`QV^X09|ckD4P}uaB9<==y&;qa%DIK{ zY~iK(EDf-^@=mu8K}C{ABJ2-coOI`CKdhJ+Sg0_alLR*hqR`?J>5_oJjg`STy)ChaxFyl0x;?AXFQa_Fqolh zK|ogKiz+iGB*3x5-(hq*W4w6Q3Sc1HFTil+icH9uxK?`2&+`HJMe1d31B+#t%pw$c zGX_0VE23`<6mzW~&BOx2AWXx=rXeaZC+VF`sB!0CAt+qLRThhx43&;butLWr+!G-* z?~2gp2;hV4Aq_)S+sjcx*{W2Q+O4dgIAvZgw8e>ef*tFq$EbogJtWu8*Q;4&?>(pe zK~$_qs8#Xu_G1SRS?;c`t)`b~TEKR)B@Cu}qmPEe^pI098S`Sho!#ITP>Oa!o4)r> z9PSOGdv`n6W}f#g8dI5Q{0=Zei;YGsUA`EbufG|mlcQL9;?eL#vxB|eXkJ(iw<;at z?XO(Bh9R%Vus4d@qoHGQ5vr11t!Z=b4)Xn?a$vt!$xE!;p;P?pyD~ z;?)OZrrr&)j zYPMuPFeP?SC_2HEJcmM)Z#5#)M)}J02Yk`KbR}AR9|1BFXlg9#OEKE-#u9UOy%*a{ zv48Vk%t==i_qnT=V(0c&HmcdUcp>)Rzs2+5BXop5q26SrQG=EzG3^ZS?BB~!bQ!Of z+d;Rz3`OUz3}h+~CV~g&@G6ygQ$*R#@EFDE-_S@vr2f$Y8ygX~8uD1>7FmmQW zDY?&`u_zCj0_1mlty@TY@!UGkK1>jhh66p>1-K0lFxco?^^W>iz+mGXo7=3wpH+B} z;UV_}K5Gac01nVYmGK;$H{`CUSJKJR$uhKS%IGNe+^xX6wgCwmjv{=m0TkSG1LYJR za9f+YUBGIaTIJaKiZteoOD|oJpY%kR`M!EqprCW3uEU{Sdq0DGGqyIKsNG5R zW?dTjOT(>0CP12BXy$V@_U6K+n>uPR>gT!`1MyjDSFjECaw)dgX&mRXA=virJ|L;k zMXDW763jX)6$Ug>52;5TlV2X_Q$zp)XU2kGJ@-=T-;^~JlKNQVEf~>8b+JWmtu|6( zltr~>n16A>t=(f}bt#=xqbJhIaalBO3w9$1f(r{tmzxdv#;7?ClLe;G81ut*9*t|{ zvH&JV#f@^#yG3+5in*`+*hil?6u@{`0FWkpKOG@F2dA;yJHohw{?yt|5QDG`f)sHS z9wOl1Fr^E}V&?WG9_(x8Zrl!_x9`4jb6+xrUEK`7FpflstE){6yp3n77?yEXg{wgH zetpgGuf{DA3HMN2=XL-X*<3^&!j-F5s2)eZd&uW8WM>ExV{j!rxvl7tTo^l*ib%2e z-F4B}!@SHVpTpvN9=xAZfMmc7g~60Eo5b`8M$;3981<&r6B;HBs$W?z01)u>Iz42; zlA-Y}`*ovy%%3fxgwvyh88v32TZoejYuMkA_oDe4_vSNaPjwU6I1f_|<6%E(=zES? z<(HO>pL&DV4j8J^xLeV0=&VRzq^*@{vDGKIqT6_^E# zLV9T^@Z(XhP+VMVQJQqrkmyy|CbqJ?lJt`J_wn2y9u{8Bwo}FH~grS9(z0YAE zMbq7hQ#^$pip0FKBlF;!#<`U!x2iG2IOaj+=8o$ZH==~HG7?a(HSt^r$l)M`)^Kp| z{wCuAxPj!$C_nq2+ZnFAyA!hj}r?Y+!B;U?7nw1j-P%i);{y` zIC}Z*C|`RxmVfv&F@E{Ym^G*Q>1QH3qqz4E{!#S4{+-zT{XdSg*WQTJ7vGF|?xiEH ztmp`2$X2C~9G{(91j~GpUBfP{5`V)RjU}Fc8vA!Vf)=B9IKf!jO4lPt%Q5Dj!~ITH z-B&T7ldWxx_4~2E*Fkr{Qz*t!ubV}R)=<>=-UOVq0b1&+gZ=#&Z*Rq{d0oNmqM!qs zo?=u0<{3T2=(^=i+0vLWJYJU?7)In#UF8|(u4_i7c*mU{M$?L#5A&($E2UfW^Bf3~+!m&<7azdH-G?V=};qA*X_Cqk9$&o8ro-fV_q7 zK?I5v0b`wK0RU!n$6*=a|G||KLKLl@tYPgq^(0!%A;Y8VrYm2Ugm=Z41x!9 zxmf^)tO{6K=uiu5Jxwi}T%p_a0Q;59IX22y7OX;r*v!gf6Z{?hD6RF`eUX7r}#a?+yXw+^B1_+2%DQ=4_7fLUZN^ zsZuZ4*F8_Y<9^akM>LCz(H}ZT{xxSu^44i3>#JYRri z>N5)~`P+lnOtl(GP`_))b;#s<`vsW3Ky!01ojRT^sLkB(Eq;HjXd z3p^++x5*(lp2q2XwNV4~ln1bx1~a;|(yC>FwSdMG0TC#Sx)Boi558JoSq{&UcMX(@ z)L+m@UXq6g=yE*xPd@KJJ?mSqtBX-Tn4Dxteyck~abZAUbW!K(GTw)Xk3}-Hs1TJ5 zYr#Z@pGA1)Y&Goe$CP=7qhS)wK|(PYO~a!%h;V7>7+?*B=AQJe&HH&@w^z6?*fy5t zi9Bc6LMn^HG*%u>y0*5==V4Ho(h5`y8HNzZP)t!9P%x%jQuMqv=;^ugor)TwIxoF- zl{LRY@Z{ZOcm+nGcX9lwrz%sfplC!<3b^n2`6Md;OYq<{8BYtt;I=cFT+eA|r<=_! z%H_FeGiP%sdhb@FimcVb8h3NHdhR6{){1HGZ6v~xd4D4Yo$cMEsU6*l;zNy@UhW6R zkGVP?nc#R@Anziog7C?R&L6{UUTYp8Wl@b|>*8e?p5x-YPZ@>QtS_?eEr67Hc0*5k z`zrP#WaWIVr1yOCAz-jSbZ@A_UNE{IUIngSI6@8c(-yUiEQN@-Qn1vOj@iiyl3dT? zR6EAP14P{DwS1oRR9RW{V|dJY>!Iq&>$zne0>WhO)86&7DE9tE|+uY`!YUbT|=)qi@BNYdr%dTkhsD{9Ez}i4CKtGQ>}<4RkXFT}+A!Q3(?%dzr_$7AJ(Kb!p}Hvh@LiR0Jbj`nju6!oiDqx9x`F^l5yEcv7%UvLj3DhhjC z6pf(DOW9WD1fZsM?8}}XnK5z~(cjz80@q1aNf*_}u9x7bed%20BrPOutggl6aD-u= zNJQEJ)O|sUU~e47eRr z1D$fA0ZRGBV}yo&!B)A}@{JCIGG>vg1%etz^@x!$jZA(yF?1PrK0&Deu$uvni7qtg z)yi=fople7OyHw8Y4}t*Oaq7arOX;#tm@6f$NSJn7;Mfr+t;FN2Yq+_;1u~R0Rhaa zeAonsN2MNebnDBtSZy_uw(gxyec>FGGv4>709$@FN~rEt=EkEDbi@epIX|OkZ^u1z z-g=oV42_c-j`3>Evh-bN@T{P4Ep}?c`?zDx(qV zAJ@Z(fliPkbepa^OWsnEe7G(~C_k6y(Z}{5$#(3}LNMg#c_=h43?+-pXIWDKjAO4uFVnkjV>6|lDN$b6 z(erZGKp-QSOkKrm>XihK&}31k72Bna)V{5sTgCRFf1%%aIE|g&7z6LtyR&!AwD{`^n=(DNcNyYh5iT1PqfnSrLY1 z{)X^GrM7g{c+bq)*=ZP?Y21%v5y3F0YuL@tNHjwo2ZuIv=rPe!tgfvj6S^N>=I-|n zqla<&Ff4B+uKAe>bIFSMWFS$eX@g=Lop-iq5RP+M7#sc(mzw)>ocTV4Q%fyEgDRlK z(5FV&TZ(9ND;ziWn+tO>6(vJk_R!aO30&OD;UNQh8g{yKU#ek2YXjWIS40my^-jhF z{NSyNAVs`Zx0@IKP{CTHC&Ke}xE~r8AV6cN@l%k~@ba4X6-kR0vy~*zwRgNZeHn~Z z$92t42`kK(`FCjkw-UrwB$ji z8{ox6x8~4hp=TYfH-|!><5_yiI4Jq~V#|<2Jn9Vg7t=7g!8dx)Do%ujL1$Yxx2TeZAiNVhp7!{|+kPpSeBM%`L z#~8&snfElmuffPCGoDj;*S*gkA3KAR@N;_J$M$^30Bb zu*mbxN88)b$s_vzAGA<*3G%slJ@SV$yfX8sv{;Th(4)4PZ6m`t8s%{5X`-0fiF z4S6p6Jd{pi1%qx{B6FJ<>S?D7cq-=^{Ud18x^OXSD=X2&ThARf19R$nJRuT*Ay9Lldq#`U70y99(diJh9$H}z zUdc2dv#`yt;cbtvO&!6vdI~=!i|G`3pE0+HVVJQ(xt~X!qI>7vN^1@r&cAWqoKKzgi&RloADJP_zLtias~~J+Q6Si zZta4o@GmUZ;3;cg%*z|uI8564zCBnx3*ShKk!7O?+8XC$xHD|-iq|M`Ow^7_^BUy{ul-u!!bMEqv-JiKxLRhMsPoMQGEy!!aKF0#2kX! z+aEBA9ma;2%nIn)dJh>z17=ef3*bD9Tt&5F*xSMo7TWDHwi)c>IofPrAuz9n9f>{y zZc_lORTB;JPvf84J)nueAgh{rZX#WtmN_cM?BCPl0~ShSqDO7M$*2TM9tKFmVu2ou zT6oF|Q(>Sy*48$$GNfbc;zekboreW;OUzwE?=}b#tBS-LgI3cmLMTxLRnJ{v^gLf% zL#n{Ib%yEoK%M62&GQ?Ik=Qa)(KA_~AZixLVcan|8YI1e58Kb0x3X$e<$(^#HQCu4 z#{C_Hg|SFSiv*h}iuE-V27pAMk=3)TsSP!N3Ea~)wL4Z(pW(UZMoT6rEb9$KmkBg@ zCTcWlD9JhaWCmcQ#}4LVeG1{0p0m8~07bLOKq+y~1ws3OzNQ*Z@!-Hz_wkKXm0vH0vWap4!fob-F+pZ|KaG?dHhasRjf zeXKnGaO~Z953sTyON{jhPrzco^mI{Z3c;c^2&}sv^el9A-@n^94QC6Mkg(JjA8v0{ zX}W$AQpG$Ez}L`PtE2Q;;Ocr0_I6`%e+LjWiru@Lc@F{c2?kt)BoHtZe~ghi=Uh}>$}L<7W3W)AP!Nw(m-&(zS>OLw3mkZV+w<~ zy=s8hwcj5je?_R-z8D%*+rg@8^BO{a(whtd3WsZC#4; zlpvn#lp`YnOMtc&WV_5gJrPcW-nMom&+V{a7&*Y;>X}N{Ov5IBqazuQ2FZw>2d5aF z)Jd^Pj3mjWMqjyw*?h89=Nj?)5{@7D)T=h~YU3WG0|!RkumjH7=jn{-xY@-{@4INW z<~BAoxRDpu#%DN2umZNINC|6iO!1mBQ}5_y>!|72%+Grem-iLKK|h?uosM>^@ZLph zf#79(*+Q4+NJk?rX_t@zJM|d_R<9&)IA1>JS2o7sTU)2i^4q_yFEWlt*vSbOBD-q(KjG`b*~0jS4qYaTOd^?Mtp%+5v(Br7FBHs)b1959@q zM^r;t*O#k!e6}wYl|U{gu194UMvyi+U;r3f8JAn|#XVqvDS~UYqW#gAF>>bGl1U8f z@NH%KWZdU73p`^r_quCn;o9h;1V*AT=&SLTfyNk`J-jC;E~BL|%QP^Hf^uZY31n#q zBk?ChbF54cf)_?aJ;ui}XKZ_1i_DxCypE8Gf>l;xbqqBh&YUvO&}&}?fM`rRjBN`A z?$NEz#p4^705vkQ0+%P)fdWlaG!eK42DR2IdMWEkScGR3L+?G&El7C#-VT1=5yCDa z#qe=o6;9RxIN+qJW0%Q9mM|^MnqIMCMV_6N{V=_PV>{WconhR**UP=p>R$!Cyoj=_ zC^&lUKF83w3h7YYfdH5<19kTCPW0IMJr8t%Cq4v7Djh+U^gj~}pq!7n7gL5Y_@9F1 zeYsyDUMOR&x1Zr2{-%Vv9v0U)7pJJe6kixtZlMv=L$R>OJZN)cE!Nl1MgPvd*n9V8 z%tE&l#^~V~EHKyW;W94;zg>?rl=Ye52;Pw2tgN5nk;oS?ZC1PIaiuV^4OEK##D>R8 z7~*c60w|aQeC1wv3-2&4%xiI}&A6Ejis9&3z=Z*Ue%>q73)R^SEz2v?Yz`yczH%W( zJKbzQTUtVvkcXA4mr}uaN}j+%kKF@XCR@9?kNXT|pI&+$W5hh2#~E`TAOjWv_U_$} zy<4}S)>%yO1}e~Ur7<7*d;5(> z*(k*#TcckVtE%J$Im%w{Ngu&qXA3bn#lUlE&nO~%;Wh4%(MYJ<1bx=W=s60s-32p% z*EAIBMpL1-;ufHrdQm=%3aD@FtTxW*lXAwrn5$mxxsBy$x9t1|oR;qD7ywN{7iq7P zVC1kcps0AJ@0QL+5G=Nf{OBpTe_?*r)=$uvTM$l~EJc(3%nnnd9euKa+gG z?>b?=r?-7>eKpTOSrClOV&mWwm6tR%-6CjFpPv9WRdGfg+%08|lIv;IL2%<*WPcLw zV6PMPsQ_r8frsw|dIAA+@NNNgVXT8X#6#B>*@4sOwFQm>g5udVep(Fa&Nd4-jgq*) z(Du2{ed75Ygk)zh$>s)nyA};JFgz;_7s*3OU{D?)xeZrqoO)TzW@x7zD<~uPmu3-# zn=ln&XDKoQr5GwXgV1itSZ<1_3Z-UF1=1#QngEG9KV5`E2zVa`pG-CGFnxO^!^SpU zkpZ_co*ljyK#u5)SP9K`kqOZbI=k_jc3hyKn+SWA-u2eRH0ETS1MM9hN z%8qnnvG=>rtE; zy-0zPVV$eU(TdAO=;Ia=PqdokF|Y@@?|K$Gm7-NIu|}0xSy_o{9W={5)P({X!&OB{ zQ_lbZkpie9IL#2h(E#UYM<>VU_1I75gOI3*rqB%#kbB}8w+np+I8ftde$m&Q7SG7) zdDenq<(CRNqa}PP6h7I{tH`8*%2EVwXt8dkIy9W@?ZgpYhDCK142LPFGrfWTFH!&1 zq*=Bu_Fl7WdQ+yWyO-HU`{-;*2q92l10S3qh=AaNOM+{@#`;vmxq=7+tHTi|goMyW z+G{V@rc8Qos`&q&*)1~bt;~GiImaC1@oqdLSXkF1^SJp6znd$sPa4Am#<1!2x!=u; zQp&7boMI+&%^*4d{*SgUEcfy8 zG-;c3ENsJ#Ll8wYUwXzyp0l}o|ERV8F5&!R-tBGI#2yoc&<&EaF*PLkx9j>hQPAsa zIq}EMS&N009?07j@#`6~yR5xP& z42z+ckx^?8ka1QgIq(DH$F~Lh3#xX((Y4VHXC9LOe&}{vnUZJP^4~a9X`m zc&tRld#ukV%@2|hqiaPvSj>!(y}g4WdTU^fefaSx&+u{);;P@Ul5)Z9ejRd&8kd7>E&)?jP7sr@rvg zzajQfo}f55j6McAL`&DBtG=AJG{URv{=I-ySqaUZE+HAi| zAYcfJT38JesWyiZ?4ZXKr#6Uk#Kl(e361p7P2gjC3gC&NYSr#G7VPy9rAxGXBo!+nTXDGB1= zcB-3%{>|lC3OX-F-jHOBwj5T2)b*2p{p)Xn_4#spq!p*gAPLj+q_o2Q2KkU-1^#nH zZZMnAn(p1@`I>v@k>>f|D!%L6-;B*?;gy1p;AYZjiJcK?`KxL7hw1k`Mq*N~pBZ~2 zKjO2^6wT6ex3`ur(Z^*l_?Xgglyt&xWQ^h}bHnE&PZ~|@(Qr=$$xbl;43OXf^gW9({X>2 zC%GJLWOz)WaEhNZC&Hp$g2E;fl1?ML=rqo#a61`wc?-Y2*Y9hqZK@P*lLxJJww%S! zl((vTDFjCzYdAxx_vxesM#%=xi6HiEOD5-xQ>>k=ZOGu{T6o~N%p8&>YebKyEz<-@+96?v!Khv1zl@@lvM378l;*!4Tscv!AO z%6?YloBgdUA&2be*(9KZG+DDk$gC_Ov9Y2;1S?N?12ZU25U!RhMDf4-{f89Zhhb`k zA(OBE5@DybgsL@V=^+uBLY37*M0JGUc%E7AgNUB1SMN=NM%d}$CnbgX7#+eO>ppeF z(E!%eUnU`3tgD{)?Bjo(fMrQh;_p9wTHe3UbL{y-P6`yKwd_w@xYmL$2*XqaWTSSU zEi<05wavKN3w9A?W9Z0to{OrU@LW^Xx6nA*nVV1zYP)-&$140&swKshD|pG6N08hARt zxLZB9SqfMs3VW$3FfV(eUuN{ZY`)Ke^#nem4K#y6&=(^tswO+=iT-OHem7`W_fao` zoR$0&;|9ZK>E`aSWO4%R&?0YhX?eo<5c4j4sJTYmB?8)n(gI~#uQx@XfDeEt0)1^>KpHx6E>bob*Uaw7Op z?x&~IW|$h$(F@7j8mUN_$B*>POOexcijJZZm9iBJtx;^`*=kfvk)egWyI>r{t-?0} zE?V?69UDtnJ{caFWsvkYaX>*$$iDY@(Ov;!xsNY?h(7)lA7EGVsUn3TNJ@ z+(k1eXR?#Lfu{+3$6tyVop-P3ODNix_OiN@0yZUmd4AF}85Cen>DkF;p7CEiFtuC3 ziDM#verY>vxYJ}u6qB(aT0kEu{F83Mix(LqWHj$z&YB!_zztENefj&3@t;U+b6of7 z_d)C8c!>b<&PQQ42PsWPoLVT2a;%rKmx3b8eWRgK^xH|oB#%=9tKy}XEP?4+++Za>1ym{CgPqN>IwS8U z)d0QRGje(uNOOje8*ptcV~q%xu`t&TLm@e!P(Fatx_X8X(dC3G0Ojr5H?v1xRXxS} zyFb2P-v5};YaHR`&4APdhp;ICbRQ#Ds#zEc=|0;$AkR2oy-Fz_WI=cD*~RJd;}4%E1;7K>dtaVX2oNDm zs4X55vTvZSS(Oq3f>>6ZkQYHCgsA}WJnQ#a${q~B&uL2pt*vcNQi7LJ?u2E6NuJU( zz|=3kr4lrg(7=Y}cXoL{FV&X12p;UUj@K`jZ@>9!sysy`7+e^U$BUuihQadU1w6Kz zKmjhtqCe+r@ue?3nxcG`0^9uNU0(g(^5xm)@^bHRdGY$qvh&S1%R?Sa z(lC!A@9vl1E+;AUm%^&%-2y*{&#MgG7cJuC@BVIz6fb@}8YQ_l^@;AoqhhkBXd)iD zJvwU4jEJs%^Pl{8m+$|xf3bZ2{rAh=zx(}i|HF^V68%1Y_+fd_%Ju4OdG;Uu)snTj z-2CbLrvE zqX%DqGe&{v2SaO&s>a;@D!iotZ;#H9^YSu*rjXK(2!)Wg0OE| z?E#ug3jfHNn-4`yk!WpAA&cAz1Cv#R_f>=GXf{Pz(GkHd z3_Y?U`o;4+^k00nxBTjxSL2Lms-p1@Pkr?yb1Z_@CBq;dmeYOr^}8|h5(~jQl@O15 zU+QArD*I{Z)t2cd8h$wl;VVT8w+yW(3EH<`f7N`OYim+8hPQa@wnk4u9A~EcCyyNL z_VziA(!1dpT?9Pqm?Fvy)R{wkHtCDjzrBB$&d7NThP+@c#_iUap6g@GtJ`WFec3K& z2Mb5lvu>`U$JWmfwcBeu<<96(^mgJPmtN>|q61`0^BC{Fbx-|RV@wT6bct5s+%eyC z4ni>dbo60S&xR|3fjdhYUj$L1M;l|XV z{rcD61Vbf287+*T#!zd33>iIWdwXZ?U$40C)6vD^&@~6(Twh+y*tRr_YCZc$|KRWc zhJ~}AVmZh{K~R)7Z%u$M?$*??24~^-teUVF+Jn zf}qGkp$fvHfK5c$-+crRMWkY#^@MpSawjo4x^Z&Jcs!yJ(iNJ-IIPKG`o^|CUN;4# zI#XzcmruFxw?2yO>gFQL@Mv;agvj<*1lT$-kTk_Z%7dr#a}GTf6OlI0?Y)`rP6VVW z4rE@1Bn^A?QO*aSB1hIYoItqeV_fS-P@ci4VZ9;eE(RU09RkKFl`}fZsCx3 z|0WC67Dx;nNRxpSjUvSWot5e+Qa!mwleXW;g z#j=;fcJ0(tjFFIVZp{oG4VZ-Nb`D?X>0K=sfBJD0)3fN&y&D;+*2`Y!36D`ru>>bp z@Cd5rZC(vJr@h%KrtprzV|_0bRb~tdJInoQg{tuA#er)41X{S%EN3HY`_UTW8NKQ^ z;-Ls(+5OeGDb9=KJ|k(fvAzVbYbRu#<*hWn)o<+VEuZrm?k>-ks}G-+FO9j8!jdM? zl@Bd4SQ$6FZ(jGF!mK>U@EBy4n-u$H3Xz<-`gAmWeEY}mg3Fs_?;rj{QNiWaKmO}% zsyxZtixDma>CQj=>*f07B8C6s)I`daQ|n`t20wY>FVP~SpZ9^2;c*mD_xqjTvN*;^PN;qfpJyreqm3`iY4g8n9YXH#!^nkC+x5_^qEtpVSt&AyR_qeu z5ShWQ~5VeL=uV3gGDzKvZR(B_5AzA8Y&oJVp?kONTwlIH^~mc?QeR z2ZO=)YQWe-ZkIur>0WZ4A#;1rz>9u^r=rc5FV}gfC;0Kh(a6n_!H-fB44LljJ}{G) zHxH2}a1OO&crj|x?i6T?#+pMoo0^>{iP#$2?H+5Kv*;Tr@fsW%0St%Lpt400EJTam ze>xpmETZewN!{yy{byf?!u#duC|)P8!jJi`dNR3{E**z2p4Y+es7(LXWLPnH` zoD2c~_1o7|EXHc*1s|e)i3qR{=+ud5K6*mhZm% zm@xS~73WS!lG~O$pVpb?5Rl|)2_|`SRosw#n}u|Cp)YyDxO?m!luJ53TRnHJ9ueRa zq(qR~04la1j1;;EjjhTifsu8r8rC|6x?bfOLW~~w$63!jSsk4e3tB${aIL(2kd48% zv$`ldu+l%;+38Ow8gh6T!XhL^AhtS)Vo)mjrq33);0AV+Hx{;;TrsaYf@ywWh_S}Y zPN-PVL;@x}op1}U7g^<}x)g@fZkc&%j`8l**0R+c)D}=ua%Ys}it`A(H4yFy9KMC_ z5tM!N$61c@R)~#q70O&k*L%Ns(`^a0D_dqBreghR!teg#df9$`&|Jb* z-;ANVqtoc&<+Ahj*D3Vydwn*ob<@k^F+@t?@5OZ3yA|>$?4p&M4Cl9h`IpO+#`la- zRcDQ#r?B-`zh2(_gTG$h|C|3}*?abRdHwA#TkmQto&3N4?eg%$#{@cqbXCqRznLW5 zT9;?7+t3KD1Y|r0X0wMlJfi`0G#Ct?3=k)bP4p#P!l8MiRlM!m=W@`5XSm@2+|QOQ zA%6mFS+=DQQ&elgT*MK40{bXvbF{@RW|-8%d>VcE*#w7H&=m1G$>+_5wd7Z_fA0%kx(o%W>BqM=#dESVoIu^aLj` z^ZRK=F*(F|m>g>N9HuxKuE(br!{2@%?>PmRnn+P3-{K+p{9k=_xcvHS9op=bU*bar zaM$CowB%XOmxm`kgCQK=qG9c|_iR_|{u|+7t4&#iFko0de0ZOXvn5X|OJoteY_ol_ zyh)Y|)idNZT7l~zH!wZVQTp-6?}OWu<<(&{8)$jVjKRPX4A6tFfTE6$HdRO$PMhd< z4wY-rkBISX9iyOwt4M@!-S&1ee`mjWuX2s|ItRxDSGj(23Y>(=PlCgmKYyA`)yIEx z^wvYfBeHrCUvYpKs98dPJ-q^9bYQ z{5WNofs(RN>Aw}DQ>a7ajUnu_yQ6Dg>y0qwx?VQs7!JWrL57|Ha#pP1Y4aHK??l+F zR78k~2{8pEekb&uMel1Blq_1xD*=VrV+{BkhG{M*DfaiLy5_N11(m)mU!_dmys+$mI9Yx(*(W9TE1QM_fIrtdEp*4c-T8Pv83HG6b z?giI5Wvut@d(~J+C#&t(ua=kLfw!x|l)+@*wDBoEQ3WCTC&B#SSHDacDFC_(E(!4r zNj>IA=x5+;e*MjyM0X$E^5A#hzHK$lOUJ*rZ-)n5PN)!Md~e==T=w34)pPPXe*A8E5zW~V0QQ^vua?Vr;z1otUY@Yt zhKN?4`?W4uDRxn+2?qwj*MI)Y5#ks6;$NOFzx=2FrRXQ`X~H;nb7uO@$?>v}X1d|x z=y-&mqRtn4yUV-3|5wZDAO1LFF}w)R@o1YHC27szD4))o`1wfVQI;WW6mRzk8^SkZ z4q<&6BLHs!3%=y7rVJBIPH@}Ya|+<|6!~TMc1^#t?9_rxbD{f;>F^_}LlF)SpgGSs z*`8&N6cOhGEXV8WbB2WHxz3zto{Ut48+r0(Cmq%WGHJlzr4|6~$kppa2pwb^PgMOxD48{RHY033LqAy& ze1-bav_oXW*ZTDYutx2NrzR!(P&hh9Zw4y4B4>V;VUs8vr_qtG`T{6~{P5`{eGv}G zyPoNJ+nip!(!6~u)_eEW*TMO5qA6fIeHWrF#|2L$yR|z6XpXQ=4oiun|7ZW5|N1}v zjl3@r&2wJ?NMXxm#6)?3v)v|d&lWYsBCf&1Jme_97a->iw-`+NW;XQ)un27`n-!fk zIfP^Tz>AbStA$b$I#*?RZ?B>X;_g-8#`^#8@o4$}hqJLzcO%e4m5GQm0RH&nQBMYh zJknbYbcAkGTo^v2QG#+(o=@OmFv{GS%okT{AqoY~yF_>_Jqk{Kdy20DF3d4rL?0+d z-uwP`^ALt&VPXx#odnxC^+T*X{w0CArJY=nCKYTh}u65mtP7t9~i<$#g zYO5X`P`n5>)|l%M;+K@(vy|dXoyRihT+6~Gi2Zc9$5iekE1&jk-;=-mNjdP?t$Sb` zu>C%Yqj`JNOfyOFOKV-dI+pNu3QMHHcCXp18$D^2s{cEKhPSSG&^;J#w!bCdKHntB z69!xMgD3dn=J8-}quVh6ty!TK!>Idr|LX58&))uGxxI*HK7L$`F1_{H+)|I7dRa(q?%lWa3=yB&-J&+sba+E z{Z7tm?Mrz+G8kGnqZCpdug&wewVcxTH-~i$2gjxR7mcA$&YuXu?hZNhVmVHJT%Ko; zh(?fa%|UZHXMZRdpSkr627rDs$MNDix2=q>!_}}7!APFjT8P%@G*0svy9%uT+y9M! z`kN^>h&d;I50XJpIK@k)s7`Kk07?p2F84_S2g3M)a-H(h7GaSNqfv3l(qQSFwSfDW zOl2q!4@2)pm=KGqJ{=#8HMy17|JB>W<<-INc#|KGPnRD*$e}$fdt1*M*C{|Rmy3ka z4?lj2x#dV#%bCF7&8~_(!cLewxlIEW9lZo4h4ZibTOVu%GNKJmc9J90g-@kEf$;FS z=A3im2JxeGubhSdFm(rCzuQ}_qZs4kr%yd2AgA0>5~F2RI}v6ICRT0y6zC4+8x!{6M%n>5T9aE;k1dxXWmi4$zo}XF7&$M$48%P*T*?#dr*RI|={BiSDe~ zIqlYV^+O8(`f=I)i@#i6{OXs>_Ah@iC)H8pcSk47!>8jBh|*fOmuEdNoX6}>d(L${ zvH$H?!Q|;!^$yUH2Dtigv|Ro8etGllFQ#MOvjiujV`$2p)xET!vkxDZ+cVYnc}VdS zi~sER-z`si?|EwXxDs%{0R7y z`$ri+@4P`rTqt~{h3-bYw(?d=HX0$|DPc((>lQ%D;Pld+lb562sPKX@M+uAC; z{>xvy8~8ed!ZRG@OPPs^P%2@jwZa-l125)8bgk}zndmN&0o zHI{9NYdrv&tf0FG&ii(Ofx+3?)vQy!%B0F$yRbhvk;S50HyLSgC4I|qFa{iJR}44a zdiG!cul=LnP{d((-VnmJ^-B9%70k9by%LU&xomj=h$+I#+R1K>umb!jxMi_*!j=w- zm4Mb>mNl+2T?m|2YYP}l-22lR>G~!nf3bY?=5Tq#i%EH2{w|m~)I`&*42f++T&OTsWn`OUHBw*gn!Xud^H@)U~xB ztgvkRiD>dJ+Esi z)(|>|g)MOek_S-Iw;!{(KYU6;F-TGMOChjPMcHk zJ=mTuY(laR(Kip9r@9e6Tz~vHZE-2GYsz|zlDwo0T^;-wJiPoX4PtmntF$})lD*80(plD{iw)l?nCG=;n zIfWBGv<2k}F`fsfT~B}!y4&%-Nx+4A8As`Uc?$S(aI)w5%huhq9lX(1Bjkj*Ut;5@6MlzYOzpp5`_2(4vCU^`Ya5eS9^UJcQ$GoUms51S_{?Jx#g>%-Qn@HH*-Odk4dpN^kJ1ZJl}e^1W) z!FImKU>$zK8@hY{KI6DGlArW}ee7_~v;X1a*<4FDFu*C{q0?|n|4lk!FdifV1LJNE zJL{R`njJ@?t7EV>XJ6*K?h+tx@0Y7&v}g{aRWw(JSajQC(t8?+9SmΞhxjA)0_C z;2a&$cZvq>N}@+ZJ--g;nsqTc&DA{VAM;nor8s?PzH!ue(QD|J{C)P%{wsg|8wAZ- z8G4U+ysGAdqrp!LP^>O0os-&EaOsPVPi=21Rm+&H7S|7g^)Hv(2X| z_k$_gcp1Vdx04fx05pdHW{rxU6a|gd90LohA~K(0lp&&r2q0r%FT&YMxWfphNAS6O zeHO8{HuS^OoR)(T1A?Y>t!b)AV-)KZMR97<>S;4dBF>0SWv)=vM|kF~=FN*>l5@j6Be3VIpFhpR955G; zl_fhlqKN+{d?z1+TUKr3z9fuW^V43B_vXGn*<8W@=V6Ij-WMW9^o$NTvK8z}hMprk zF^>FKAJ6^rr|7f~l1Po^{p8MVbI{UuJd*^-V{nviAyl>w4;v$8MF7rF!8w7#*pa5u z#3V_!od4p7!=vKM&8(Q~Wn)*V!b|++Y2@ z<;mM`5MYWg*hDv1pWaUd>KFg`A1z0}{r&1Ss|{%Glr*SJkykL+%9ij`DUZG`K73@A zH~;O4@?@8+eE?E~*T-vf5gxbuH@b$`Lh(2d=5vN3MSgj761+Yy`|sZ7o%UQjlQ7c+ z2(0W1_?(b*FZ@i-H2e)K1t`LhawIhSE1piN^}ICb7)iV>^yE`A<7ZqxNy$!> zjK-(PMz|$Qw7&H$0-my;BVPMCsiSC%pvBV@x$C*sKZt`bG!+do3MkW|D~3tWoT9bn zZ2Ouu%-mK+oM#c@?lHH_jox?PDsRB-O9t0N*TvGyZM^dk?(X00E#I}?9}~QG_KZQA zfOTv({#^tOy$VE;30;ki_F8Gq|kGJGf$}~C~d6}N^T={Js-Z+11 z0N~QRcJ`C8qs;VNvF|-xKxyC8o;BK_tEAE>JW*yf0VgMy$=!!#Cwg5AX;<1}K-wlK z@+-nfR%)`fGsa2csLxf>n-@cPoCkua-<`u?y>!D`aE*>dj-&0qCNk9*g)L1?52D>y zDROhd6AIF$lpOuaNLsfOKQvzWyM3J0rn$oVxJjAg~MRVX1`O*zX`5JbYaFJlV7n<#|1MqWu6@vB#d%iBBy;WbL*`yWr2 zPe(^1q%kf9LMR;dUa)SQo*%}8|MXVCpvaz%~# z_1nXqt-(V?7?;cM*qe)xbVw5JkA-XAM3?|?} zX$o1g>enq>lx@N!W_^DY44v(MwLX*QYYY~jy!$V|X%71tA1Rk%`qrU39#BV_B_Kvw z5}LtGapAgumqC~Hc(sPlg~0+ovDn|Mfra7W4buwG?eD#vv9vn2HLA-Jc;?kz(a0u? zGW@u23d5QQuY44E1WZ8Zw&$t#jIQGm%|2-!b1>}T!;eD{L3!R_i}ovn1yd0UD$0F~ zq3@#+magy{C2W7WXHrBw;~A#QooPPCUIJq0FMqx4{nP)-a>vL_kiUp#H@|tiJh3a{ z^}A*J7r$)Wyvty`_2yLyO8&J$G7#b9iKxl1|7>~w)$3rBn9$!KgJbjctL6Gne_9^T zj+gzv_m|7rAOEym|Ms_We_mcnT+!&wSo-pODS5dz!sIfgn?ktP?J<6GDw~tCwo@3q zpyz5_f+?d==y_6W!Sg=hY>qE8iuGhCobE18md*a2t!>fkRbC^WW;CG>=hZ7p1iLw6 zH2_Vt#P8@NkpCg&Zd&u`)f8|uPPnRozKxFJiTG#osM2GN zX+BdYkkQ9mXMiwP%yEs3d46kZovB#Kwv$FfFakOkO}d1&*ykKc#C5;^0vnx z#6xf2eLFI9!t{;n91qb_krurV;8xUT3*R;_*@o^YcDo2fL)YVHUyS_J07gOV)phbJ zcry&i$T94K+cjRo^XR&D{hXp6S=u~y@pkmdD7y@AqBj(IpC;QfA{y&u>zo=7aL>z^ z68ie9cgvsu*)NxU8x+3WBx};8Z{PG8in8+wES{tTZw7W`qNryQx;0V$tv7m5Z?TvD zb<*qkdB%EoPl_rykgan4oG(p^5F}@lg|9H$x;Hj+QM6-y?$}G;5YYoSUw&D>`D$-^ z8&l#Alu?<7z}v)0f-Q2**Bm-F8&v?`=ioO4mTuI~tJ?{2d)t(oP$ed@bu6L{NvY8+$BT$tfyn3OtwZ-?5_zr5 z%d_SEk3aNzo=HSCA>;Az%#{a8ISPBR#_VZ)o)KduF;vgs{qIXrBy=AhR-w@y&5y_0 z8X}&nJWL@p9{=nG#^528)$1uE1cm+j+df=pXOGA5(=$%5vRH2(v$6@h1Y7{ZSR3K! z>kOK|_~ricCT~NPDuquu%Y`vC9?$ZOzzuSiii-EezKq1v93$7Q40GkkNwp@`6tVZH*e+)@-NLZX}LTLx2He+kaEdT zY-*n801T&kJrM;9J`Sp}J@DfDKQ8b8um6|j@sEF4?tcIMvi$IA`S73p=gZq)|6u=c&*onJ5ar3K%^W?tCF{>E}|FZAzzo6K>vO^@*4jds|a7j%y(KYPM2x*e7F&Ya` z@W27t+0&0b+Gu>I?n(9C-Z(r)KawEYe97;ZFEWghmv#mTOFw?m*P!qC=0DT`G`9RX zV@iW8cm$6KdpSEj50~V%X(R)ai)6;eR(v!H)d%4Htoa6DFzojy83EfnualKKDI^}V zRB(|;-7`>I^ngaJiOjv_Y=t)ljA*aNkVv-(^}{E*&2}FnCgn<|aP&krq$9t2o9^6+ zR(^^~AOQ6h95N}!-pGE*P-q;wBJhk3p41v$6`xO$w53L)YDUx}^E?pw1XjK(M{*uzp1RWD1Pr8F;RUc$(etyh505Vw8+|dd_Y0I%{5@i>NT(tLF7G zhE%*pkt(1gBq(u}=E7ot_KUCgTgPVeAXKuxv!c!uutGW*NQfMgd`)przlu`GYqGpV zE?89)!VFoA5~1M7&(ahLoDhF`b=i2DKVZ&Q5JG)X>=>NBf>57RU<&V0-Gt={zOG%1 zJw7Ku9x|}*ntw(Z0nqi&o;;t=5B8off0=SW%nNuD5p2BBJZH#0SZ>W+L_c(}LKq01 zutIP`?YxbP3>!Ee3$bw{iRRZFx|Zb`%Ld;p#~9<_FpCuwDVWxG{4oXD-@Mwj-DfzC zL7ukGXW@KK19*xhA}D5SDBxvw~BD$u_q81y%P?#%SB z-%U>L#s1;4^%sACd471S3^qdq!Ya>9Pq`Wr6&&mJepD#cDAOFv-(WRWZ zGxW7qzCAykx(Vi#b-?kMO%#1XeNsxjADD($y$`==Q;m*i%-QLLVsj8ALcDqqz%P0T zLE#RKMNh$BG=<{e*-*?pX}^!b)xA7;I%iU;8Q{&2;V|BDpY618kq)?5 z!*rXzVz85E(#MP{sRzd15K&*!pWr_&l_y*-G~c=}l2Osb?EUXOdITx02P}zTq*%L0 zTJ~*5_5RNDp2^^8=&rNwSFc|$d%NmcGD?Dtef9SYj&y_Tw$f)#O$4vcIn;J7NNw1? zrl1wyjgIO1-5d#f+;zIcM_$O9{8}Tqa`_ov5$d!5{(tkI{YDoyH#*7a>sNadUa}>O z`){AQa0p~xWK{|q1mP6!_VPBtc(9*06wr7n3RQOU3=elQ5K@d=TQT)037zoPl73(M zCLbOMZObb>S>ETN|LzapB}WjiFy319CU-?J2zB{e$W=wJ{5g9BS`d@43FcfqO5Oy8 z6LGb`PsghHvy|q3-R0H>FO)Xs#U#=O7|wNiHpAoVJok6+4jL9gHP7C^wY@1yW>ry& z6yFGu#=*oa>G*NxxYdg#OQ`T}o!+EVU6Q+TSjRCnp?-D|BARsbhr|gRhDZ&1zBGST zm2|tY2x|L{eH)i>(gX*C^OSygzxJ2MK(L?b8P9#H&bp%D|wf6HmxOFD%}F}kq4lz_do(o>#kOh>|Ae)zat z{P1Bp`0DMl|CfKB2cNeXPOrcFLt{0ujqsCzdfK9A&-SNJ%gw+0|13|Beq5g4hwt;_ zv|LTGL&dt$plz1Q+0R{M+1cn=4)2E;ENv%e^;moXPc)urvM@T1} zDXcymJi<%Tbv54NB|P~#p=sOH%dNdpMvj49dmO9=1e~iQXz-yi(H{QKFTu zo$z$`DQwcTWg}yIitC!wtgq{*(2TU?xKk!*4-S16FEGE!3tJ37=QW)sum0`r+49Gf zB3Y-Hn4lFt744!Vk@Xy<9Ly(8&`av^or>z7VV}EVt=;&{T%GPIng@53vLew}2WwCB zIl8*}aJE#Ck73kWMMiyfdvX^JRe)3W8Jyccg|t6MLqUuaLrHA4{_6}l#b9e_I)~%( zJZ1QkQ^lBVjbziw(Rxr59?}7hAw@r+8Reuif0 zpjWRQotw9Q{V?Zw{0w)y3UmoXgB%}Wbz7``gC$Wm5u^+pDJu>n-Nk7fI`2O?A^*(X zTXUjbbGJ`@FPI%1reDH4XG!IMB$Vub)q2A9Rk#gqM^dQC7|{o^iEc1oH7goB`EvZP z9c-dgqQBBxo8eY7F;R$nE0q;JM5gX4kBh{tz!4uVn7{-^)sAN(ek2$>tpyMwJM z0MnuAJfN`{5QyRxP48Dl#!}mwj&kxx1c1Gb`l4&8sxy(iwQW%VB=l^rAEYC`ovmjP zYaWyQM#}4b3iywI{IP}Jq<|yd0RM6$=11wxV>?AkVPc4B3)%OeqlxI|Gbh~z>UAG+u&!1dLFRupQ)m>DmbMHY7FoUwnHwR=d2C5aoG_ z_$U-=EhcJoW;OWRd8+p;V3DT5?J*&>_wcX)K?=cJz^6EirX}emC zlP3+{ln%Bj5xttZM5vkhwp<@4bYxzS9)%a8m1Qd8@(>9*R*kyP5Pxe+ymGr7jJ1 zvkr6f#RwSVI=q(MgynWGjdbp#b5yToYZhqUdqvC4kY?-n%y$dYjSfLMmU+a z!NJuQyRAIMs|*sfB~7xKrzHZzQ(9{|eh#w>wT4Hqj-1E^1)GB->q{{cx_E0+MInht zM%r2b&k+{J$Ka~>R+7lbO-TqPGqAd;zeVMu1L5ar31N^;<_M{1nj-bRwx-1=;k;)( zB=n3q;rPB0J`c^4@FJNZ0*Hnva|W$->!N6lT{h1U#rYgBHPLA`d>IVxpFJ+$XQ+Rd zA;rsf#3}`(?#HtjIQYjd7C(Ji2fkKZx08|KxoAK^u^hTOq?3$0hKAXi+rj?!vK76l z7kYhoIDE*_5Se0-uTjXG{^ne44qi4kbA(dx8dt4Hn&K6;M$(IN_Ic(nH-Fk&Ew6qy zMg!W%_nUnt&n(&|!i!GiwAI(>NpUH?)1X52fj1IN`ECZgK2zjN`78Lt_oBep(Xg=@ zD3QoSIRby)`pL8%x5G!l>xT~?CR&DuIC$j#H{ZUUH8NDgIY!M;KpwKb8+B)P~e#<-=JfD&fyg&YF-EyfYBVNU@AWv{snkCH_v{NM|bBi5DoUWh~vVL>Fp zVXNIm)*vp?-f<_dYI_>+3)@giYuIc&;e?mG^<@?p<^N~Dcr&mFbRpIm#@AU(3>@95 z>=~y3`>e-NVkmsAf@}>Tc%wP8L>*F`V~nbtqnbVeFaKog@;3eD%zzT8QXbZrFb z9tPWl0Gl@fj3E&w*f0VB87dnXcBZCpE@pe_$<1!tKs1?!J9BbGw5N8+u~b1L~uJk zil`?SFaPd$LNCf*u96uaGhF_*=N(6jXv85^cy#twQx?fgg}yCkWaYGx4(G29UoCGA z*1iZ2!B?72j$4HDG!J?`R0b(!@aEJgptDVwG7%M1q|D5wl%(${6Y`VMBp0j?fjSdm za6314;`Q;&8~=049q)-YUPixO_HWOYLHytR8OR(5_*Vb28t4qP)|SHI42==jB)9Sw zSLOK7b3{hAhW2P9mH6Zo1J4+u2o#nsZm(W_9j+c*>v=FqjzmY7r)T5&lRs!jvF5?6 zx5+Kx=*x+ss=Jy_pRGYSTRK54#u$SU0{LnVcNu!Nu3iK$p>9Ric4EnGqf@#5**_m0 zNMow);PFDa-Ck4E(0Ck3X%>yg(BkU7TW`*9aMqJwie(N+3qITF8J@hZg3cXb5OG8% zm7K0{65i2S>+GIi{o}okpuvvsbdvZTlk5V%L40xYM%xl})em-Z7??n)VJ*)O_|8TsV%>60G6d(iH zGX1K0Yf> zGze{>7#39kZajIEU%r#K+HTGX-H1|`s1FH9At~oI3(Z3?q4MyM<(_a|30GD2)*yxw zB|`2a?47A#4Yr?~Yct;J{vinYtln93JqeHyLO@+*iNn_H*Gv&%#_>dPyyrr8LdY{G zbrNE~&l)^y{(_bSg%?5u!&m!T8C59+3MCw2(4CaAa372P^6WGmU37gGe^&Jr6eg?) zmN6E>-ANv-{JbMeF%>3y8C*kq295ojtLKhD;assLJ@6v0Bhfa*4Ne3#g5;eHjF=Olk|jNiW{sFS20-)_irWn8TDl(nx|ejFF}wb` zYE?{|9V@+@K=l%X7+Meb)%>`Xdj*Xzs2y(mBgp&bB2{ccQa(I09sq+?C&0nJX zvp;>;dkNV;`&ELg_4ghjG9k|Gtx=c?-EI;Jz7BrzZWQ}$E$c~7g5lYp{_di>I)S^(o5j(oH@&yL_jW3LvnQK#hU|s3P3!2M zs3_0%ZtyS#oMtaR%TBIBvza-G( z<8dyG9nFoh>hsCHyEdgg#=%n#gy*m0qAPz=@S-@~~gzCux&I{#ECkQhS?t2L(Z*1)a%azXH z79B~!lCRHSXno6@zVBKogdEFs58im$-+KoK-J_`&hb-LWp$5g^p@nm+Zd{S2%j6z~ zip?oQJ4iNO=H*8s(}1jN4PaX~?;QP#v`Q(@fiUr$sERq$4N@hq-@Z-P#hmZG<=Ox8Kl!iy z=3xIIWki{$bb62wJF82-aSBf0Lp3JmLBNnp=$~bYP$DL4qDUF@5C*n9A#h%N&i?ZH zU~?X>7>2j}yFY$Nxu2)l^T4|wf&oCqpz$DfbL(e-M#POpD63{X%i0M9gB*g2f}U_i z|IQP5A2U4UW%WQ;Bqx`|^PezFfS_>XViAPyc1JpP^})BV_99ZD6bn%_c(r^ux(blD z%l1niy3js_LYX1>43S_W6a{AX)xStlc((Ca=!Yp+7AmT=7RrHD*+sMF@#-TULU+3V zH789H#?T4w`9VX>L)GTp5Ac#25nlRqe7>AsMEqoO*C0{~lR!Jn*m(0+LB>wci9Mb_ z#*k}&d$`g~>B?Ty6sVl|+yX zFOz{A5dd(QE__+*2$<#V;R=NqS-b>l#_osh;QZWm!D!anPr?NAX&0MJT^XSIp+)J`*xVyWZ_@%-y;vf1jaIkH>!odc2JuNCJ!$4c$f0lviqJ==bQize{mmEgRu* z=bNwcs<-l%g{YfT_*OwX0=$2Oh|}plC)gkF64D8ZtvqedxsSdm8e4CSv3kEL#EfSZ z>vQgW;|k$l{ONmf$B6{I=(_EU!}ITdT&{n7Kl48P)wgq$?d|DF^o6dYw`ixOjZp)S z=rbTRUVLbc(YTPK6Z2-jY~$NSvH$fq895uv{gu6-Pz=~eQ*}NT!qJO&Z9?ge%0YjY&MIyfB&~a3E z_6~c_{zN{dE_wN688RAq))*L#;SXU_l&MCY?O3r|*)oFGmX7e6(44hBU8aSv+*4qEkwHQrrY{ma0X&O-J1Z7pY1k7f zjS+R%JTX6#S;IaG8nZB4A8VPA`keBUlOC@SVY(r#+5))dhBOOe>>%720oVOBrmvR? zAP2uVtKRirGHC33Re(d3VeXr2g)^G(#Eji9G=-o<6s*&hzthvMNofoJ=p=}!(E({UM67a!H6M8=@9BB;U|VN8pDjzu@2!|V72zi0k%HjX>_V`og#bl zSAVhW{`s%Qt8o(H&B@s)d3oZEgzXdu!dLI{Vm6~UdF(G=D8a1%jf}Sp#c7=@DipBI z4Z3VSGi~F^wLhMpE-wQVOO49)`;SAT=tRzaQ6WDT=18->Nmxpgd5{FAu({ay@=r@ z^sl2{?}T6Sp)o0_cnq4{uVNeC2}SvI(WAi=BfHN%i+ZAf71Fux+0#6?j3{uHYT-%W zTq~FjhS33|cOp27H=6etk655*qQ^oZy!Oaw-8oiP1lbaT_Sm)kFve|vkm{3#(CBP1UEPMU6# zMzPGC$)Ys`FDJ~%GC$BDKkW#57RiF8FLqKa4Hi4GtItd+r{^Bg=lsSbZl6fil^o5PP2!3lPo$$iavM!rS^ zLo1M= z%0aarTh%$i*WuwQCbvz&0yMS1X%NS=I!aaH$6;0pYhNA^V09O|&Fav1-9as^$WhYy zVH#9Wbu2)k1_vvs1bqAM-B_c%EI8LtaJ1<5WA|I)dMw1}OGSHg?xu-(0l*gh)8zjk8>Jwc5&ik&_n6v>#Q@*_^=3 zOX^@N;&d9`nun7LSWmQq3%YZiX96-KMBYi=z#8Hd&9S>8OCO`PHcJGf%hqtD?V>8~ zgxpj!_j2!e>dlAp8pZFUVMe(0c|xK&d3cW=q3W6usAwp9NG%N0O-HmY($Op;Tm}0b z5A-nuEia?_p8oNV%a8y1-z+a1MxXcFJm@>G_eaxchVZyMKks#xN69c=tCf$x>zZNG zXpqCwMD8-eT0}G+a|Vz5tm+4m0>UK$Mk(*bBU{nfMvP1Op8nxa%a_JI|L}3SieJ7& zAG+)D_|L!pVWqL=j7M~uvj<(VoeF>3^e7qd9RLOMfpG%<`s^w?J0HdF&9jdmra%4t z{YCdt`0-?PEBb&}cz_R#VmNI?@1e8tMkt2HCLHm$%IKj^w93=z|4CI1t>vYp^n5lX z-GGYHCNkIk@H(OP#zFU0Xzm~cvgH^<)pS+N6`ZiQrxpK}^n3%O55@$B!UJDTI)-CivpxiAXUpY>XRq4!rLBLrR4MB3<{3cVFc^?9DnEOzMTuEkSkA*qhC58{fuv z!qy@>AV^9Gtw7fQ{S!D$I;TLn6hCK1G=a~##i4@;bu6;3(>ra zBQexC<@j^~Ie7T$b@+1(`RpA=TdM#&h>JFG-IaiE$cG|s(%APxlwYBN)cG+{ss~dpylCOuI@vv6-VCl)# zCo~Yatt~gr6$BY&PGj1QXdKF;rsCDxovir0{RySB7R@h$q$nt%VDdDBio#UH=p2FG znCBu+wG#x$u(0Lw?!9(`x#;*FM^Hh2z@i8p~w&hyXT>CG)Ix+a~_kQlOE~Yxah|7 zcwPvA!8RUm^rc@vSUH600ZsSKo1BPABua)6zE3iIpR4pv)}X1`>)zUB`)(4bLaShd zj_rFFr#NYV@5ASHxD}>u0*Y}wz*aR}CZ zJPPF^IR^@__VU^re}5-~xO>RMon*?IY6PEf(sVgtdQ(ucdT+eTi$q}foD;!|n|8so zj-0OO(DZoUqKcI2)PBJEi|yqgN8s5L&dBLZqq~lb1JE)&95c;)vhW#{TS>*FG zO!6{Sfn$R0=DZv56|;BnnE7J zCzHRA32jvqI)8}a&84Ac4V1jKYI*}^BV@lkTlNNd1me0?LCZzOi|e8O38B`gu+Z8+ z9;pIV1uN913lSpj0j+)ZvbRcwGf`9P@K^Umyw@ps>$rbJ5Lx#T?^Jc}Z7jR{ezIzL z7ee9+Pc|bA$4w@nO&}l@|)U^gSXV26q0xpI@Qep(coF*1*g-M;r zrU;P`9rYW$T`zp6hcZXYwLB-k%g3v7ZIy$|5w?Ag=g<1@y_fy|lmMYj`djFnkRJo2 z@f)U_`@8v3F5?w|Pqa#qKZ(ICj~6lNjetFJmjTsv>o|V(nRQ4l^|?9pEQW7WnZFtRna{KZ9@+>-(|9;4O zzs@MQKKhW0aJyXn@yF$nahC$Q`eVYb^$9f!$1{*V-z7Bj?x{aHZaC92bPeH0&+@Fn zn9*q)*kjid{`dXeyL~nw8m}C%A1B->mSIHEm}g@wQoiUHya+V#J#w+&D9AUO5x(A(u`arMfq$6U-pq=~0F$KJ+sJSKuwB*y|7d)m=@ zR%9^-;M^I_-z2AgXkEX*JfALp^5$0T=uXj3vPDWu7feRT93(}7_AClUy6mgN*Uk1x z$|c;Uxa~gDlOjvjB5(IV7L zeu$`u0&e9kOJ4=O#^9N|t#!hEO6~SmVIDcQ8l2jEQ&e!NR)H)Nbv!#i4Ne&v=^A^w zY0|?C3|qj?&rZfT;GKJ(NTl)6oijohQ7{a?lcI=6MHEF$lD)b;i2w;5_*wH|Y2_>$M_HdTsU2p-R? z`_f;5QNl^%z3XMdFw%r=H+(+(PyYM=wcpV8@^!*u6yioiH`VK`hY&}2--WcKVDDvh zgv8c7j88~d6c1GBj^KIy=3tuOsOHn>_U&M z#W1~G|LOT!eMmve$v~@prVo!>}8fGK5DEk8Eo`n1kghv?;xuB<2TKCmF{bs%dyUv2( z=Dc@At;x!qLT3KaN|(&MY}eG?&4DL7Z(dJ0^y>Z5@*s_Iy5`um`~2MJSKohHj{okD z%j5C!{4KIX7*aZq3hOeSuJhOpPDt5zD~!pofE74~ABuPK%w31aq)y~rM@Y3cpTQ?V zwL}ZnPI;xG#&;!bg+8;jTO$pyM<4B%rn8q`wX$wqh=t4q2Tv(j?(x99ojc` zKpaoOz367Wrr*1MeSTjFRl;0LPJ7G2d=$TxH!mIuk7hLr`y3tho+T1u6LB$r`XbNK zI?RsIZO4N-ea_=#Xq(cEg~mN6WgHZTFK5qla>MQm_@2G?t&e;Ihna_^Gw6qInqPBq zPsG36H~v|2{ZDyMM+tvN#4?=TzJ9%IZ8GS|g_p~Dil)b}#{5rnB4hBzn*+V%70cZX z&h!%T>>p1q$8&#*o*NfWesy>-$5Vp|oRY=hptpSs8iNlkt}f!GWY6nY8_VHaMa+#K zEw6Hk!*ads?7bS9O_@H#`-iXJOv=vj%}1Y{iLf40BjRy%^l7|*&m=FLddXRUPRi9d z=Am$pJesYmr%GlZ~j35kK@deMzFV3}Ag$%u!o&-CsGb@XH_~ zi}7LzTWONP-Q3k|iPm0a3^PiBnIYlH7ImVQ)%8C6Q#OJ zH}l>pdwm_81K`BZ!?$0j!(J~>XRjv3mhyG;)=mm21;JuMs8fNN)ytw~$xW~6gcZd; zUN);}7zmTH0(X~vg&-`x2<+qW#qz`ZPdG0{k{}L{jDeNOb=`3`JSEDi3L%73gfKbl zl;c#YXEoYZLHNkCUtVeK@UVQ!%Dd%d1R+3C7{WRtkZqyx{#4^m=rQJqa8>ZL!i2JS z_x4kiY7X*16CQK?W^+7zwL6{cgj*&b9ieG<(*grpz~2uJSzuFSmVvT7S@Vm#`+Lj& z!OPYY0faym;*@;6-~1etn<6%5_b_~PVCuIaKnSF;tZ@iASPtUzU?)NlBg6@^eW4L zLTVATNWp}fBSJ-K6sNR=`HnYly4KR)1gwzSvy34(uel<>8>?$$h*1u$8H0OYHS<|Y z^i#O|u}kka_m;<=$*Y%_=OcX1kV#}O56xTAQxh3aZ*t#^z=$0aJx!3BkN=ulp2bgw zT5I)91*sSj)mRU?=^4R>(eY#}hS?WVG_Nod94>W;>+{WUar@zTxleiDC8Sz1l_IC-*Osb(9V^2CHSnTfYMH^uDlQ$bft6PEN`^Vev;CIuBYbwSp9SwVT zK?p=C@#rX|;Ah@X5>|vOgJfE{!gHTlLl2xSdA{%=9H0XafH!+*kqBJ8l%qyNd5$a3 z=hgQTtsC5CD5G!fHSxJ>c+nn4=ioY8N6zNX@=6DmftoY1*R#n#jue^q>BGlGg+ILMp>cJg zSBfID!`{2evB`mmHX5X#95V96c_tS_`;?Ym4cAv^%lT&~P7d`k${ys#L5tS}-uiw6jf1WoE!}nS5?Inxp%Qdn>9vFvX2DjG5 z5R%8&mGJ$?Pd#76Dcq+s7;yux&#Mu%9zpJ06&+RMgFf_~nsK$Ejb&^-B)XBtBa$uJfup|vPy&5u}ke778@+%K-1Z@`#bRS4nzY-B0z z?&P@(Lx0XXj>riX`2e3|c?OJAoqF{0=G9)$yj#xCK4vH+78zZ z7_nvzUdQN)R)pm>1Hr_i6GCu9m?6?R_dDD6K(6%z1jXg~+FeSciJchJ&Hx7nVE9iE z81u@rLby3jcd9;P$r0`l_Uu`LqVa{LwN=EBWC6nQoQZhRL%kRltFmm(V3&nQ`2z*K zf$xN5$I{DS89aEOkd}i}?4d!=-4N=9+J`(t28Oze7a3U?-V5{7HO6L*ia_+{2-OK! zN4S)j@EKgMr`GkH=%mY=F9wT|f;;P(qPx!6x(>My!RW_k_d~|keS+uV>BeHOF}7Tl zee3fgh?@jluOa6AHN{3T1oa4}w2nDaDqxC1rXVb)^8vw{4EPUYJ8)y#Tb`bULqj%7 z3S3B+(4XBRUHh09hh8*~`FwvFZljf>)8#JVCwIos6crH4-2e8gABh(tt{NmlDjM{>**|lRR_rcS6=Ondc`;3oGQQ0z z_8l6St%JR+;b}q#eS*3FR}Tzti=gy5q1GkR2@{#H#->1k62NwF%;50?!9V@%6VO_J z!uca-;3*lA;z?-sPHT-8&}M63_)J>EoP!nG?9Ik(&MjW!nkOl;fCThY({Yb)88DQj zsAV`87`ce|Kipj`r@{R?xKfmdhx^g|8sWrOT6wPCgK&<~t` zPOce`kzyST?a@iWmHgF?TXbQZRy5R4#cm3gdB$+P7Dl6SGIH(EC!)#8RY-@{SK~n# zUbMxo1c%zJb0M?UeLX}I!u2Ej2t$m}yhMKwUa7?2Nj_grvAc-K-L*r&)=q%KxAace zgV|?a{g95J2b~c=5p~Hr*Xh(Dqv7exo{{~Q< z@J<69fXDJ#Lob&Dh5{kgIQDcqHg>kIgow%iHB%TbC1A9!PyVNjQJ-Orv3K}c+<)}-7acD-&5dkV9Y;H=Zv zZw`Y$g3EZCEphh02SY@a&C$@FatpcdK1&vz_@)v?qFWj0q(`{qDKG!O}}z zh)jQyb;hS)eesHXKD3T_`vKn+H(19$sA7caRN2pUAh(V`jc(?>^*;Yk0o(Q}h`~RR z+xmp68ACmy`^WicJnPeBNaz)K$UfMy!Y{h}Oe^oNy^$CIE`b-vdCLj0m2i68!wFdi@QqK&$(J(BHMU_C{rKb2Y(IYc=1qqE z=gHS!C6F1@UiY}$a!0;CCKED5;V{SvwQDoGwf&+w zWjti`+Z|+A5NAfz(AF|?UIc)nXRp5+Dp(vxE}UwUYa5+rH;hg^CkipO6L>$INzIBJ ziarRbEAFG45WT(ca}02Jn~-{ItcehMaJL}-jw z$TtnIq%@z80j8TM5_GM!g_R zBB=LHhErS+@g{hjK1Vqdq_$N79#2I0LxY1gQjoGqkWNLmkwUu03@2A9==+c#5Lqp^ zESWwNA_x54fCD3eIRNso);W%M8sg=xI2(IiD-SF*iYXz;GZBEf?`}IEd9~6WZoe#N z=MjC(&0CmOkW)yN*PH;Ev&a)Ql)u!#5MV^0LjC0UCn=g zxm`yn+pmmvC&kem%z}66u@B*S^7{#9Tj9pY9Ay{G-D^%@I5d%Ry%cWd*)rxti&{a7 z9_{LaH*`Z8HrB<*qvh`0p)$*IsU2*_?2G=;ph>Rz1&?O8dB*v9$HJghuDCjvIF@c31@XfQQ8DPO~lz7-- zWDx#iAhecIi0x$-NpbHNUSjiAE&nAxl-5!38JUi;2*(fIKidcUnW!3?5=1x;MgO0K=-(Wj zCW{DivaccVLu1SipL-6DoU^&`)Z|S$AQ^8PqLg~KUnLjMhS%ibkB*O*lZ%`S5sa4K z)0Z9TyPkU?N1Slh$6U)<(L{VHMXK&%qF>D!Z_c4Dt>GE|OLn1!DN2i9z;^A+E=tPt z6p9y(OSbe(1;7`YUB%A~aa--ylh*KJG#+n|3ml;})cjNWtLJ?6(-Fg?)KXkm3UQNZ zFY{zyy?&Sc$>Hff$y6yAG}q#~k5jc7-6w02b+qf} z$dD&vh9bwA7v7g@<&+NZbPeNGh@YGdE0G)-*OR~8t@=#NH^hEj-%#nY6-|% z&7si4OHssVO)ZLfGbTg=8U3qq84vazh0B1W*Vae}rR}Hn<6Y+!Xc6Apnlqh3H_{8* zo3H!qDScg0)jk|1daPgMyhnD=q^Ww=Y!eLt8#|)c=^yoAo*B(-1p9--*R3%+-?U#m zU=q?o?l@5%7O^@T*aS^z2qndgnDDLq4JNc$0D1;NNn3e~)06Y@^5kK47@B4(5eN9an}5&Ti|8nW z<~v1DL7to-vRHTL=f`6y3+L!UrEfps?m5?r>OP7XBxnGj=YTt*Z2Squhg{qzM@uDYJ~tkkN)7Vy1|lprSbJEhTKG*-do8(p2q_ZZLS4Mjyhc3=;E8na~x@x0R0P zKxms6OLi1gH!-#*1l5{Gp@2)Ya~-ZeZaf(!f7d_{y)V};96kM`VZ5>9xB02~&cO_i zF^im<;u<}W2@^~^dG>y{cYGBV(N=#m6ofs!N3l4S$u^qDCbW9o%{e-s(67&v^7f{~ zej|2G#!}C|jP@}*e5#z6&n0YlQj2z*8OHX{nz!&Yfh%W*-mTy1Tw|OB`zPU+G8R7P z{eqRhnS+j&hAbFWEd7eRwXGWocU*GA6=GDuE{cDh!>zWQq;TroV9H0#XiV~R) zYYBak4u*#HuZ4vS9N=AfjUaDYtpUxD2k~Bu>M{=tmMP`%#kdp}W{50#Z>9p;3_V*R z%`50OpWdZp;$>sH926Lc>O7t%IBzp3|K{R+`J3aDX86&zQr;{B#PgUl{kP_@GYUj1@K)34J`}UA1D{at@4otKj(47|!SU=w38Q-l zNJ)j*#!P8XOH}iTKd*u%ww0&0CZT@&t@Y{$tM%|7&B{)afiIS8MS;lSTS_?AnXId(d^aJx6bK^$0IVSvA~p8yzqb8IRV$ zArZwz6C6Bw`X7GykTLPFyng-l^6uTc#@r1SCJ?^TEBD|F$1Jekhv$d;>ygjnygW_T z_HCEM-K|J(ItW!pIC_$JuJ-D8cN)uZ7R_aqooScuRO7-}7SZ(t-#sgwzG$t;QqTaZ z6a`*3K10_IC^$i*>c#ZrA32j900z>-)0+G_3S-QZOKW93{Yg$MU}i`-FXZi;w>g`3 zW#o7_-fc8yPI&e4^L2Q-%45G?_Ja4;*1@Ei(Y2jGQ`ntBCZf$qcq%`mgPCS1l<+hI z2q21FEPjrt>oX5y1!2mQQ=PO-)*0vfS)_Tsh`!aHou0Lz%Lv9^`MlaV3{ioEae>UA zu}MH&70?=r5WZ=|kRV!coi%xuawjT<@i8>wTE$8Tivf0KD;xy@x!sr22YmwjRAqcf zz)1Fo3w>!n$bu&1PEIeD^9u}{a)*?J@rw{ZaVp@GV`O1%rCfIQYz^6*{5(OfAw-uY z80<@ph$AA5tt$Tg4@b>$J@9}MM7&R)q^M4>m!l7Pe252Q03L1DXCOhSQq&aeSZooF z6T(mxg(ANvG(~{3oNa4!jIK(4bF)QbdrLoP1wIM)#vt&wo%8jds_?-Jbxz6UK_=;c=q2MKAX3AdfM+jPh6AyXyq!M9+qliuMNi?*kGCZdR zVVaQ0vi~{cD|AWOq6mf`nse=o))42$axa$S7n{ow1>d^wG9vC8@4owQoAN{67{jG0 zq|_7C8LV=Y#^u5A94M~I*Eba*;0eVBYeu2d>IeWix-6g2lF$wYBiQ=8LB_jjCCk-C zUV?k&_=Pi5_~X?FXD}5?n^1T{51lBCY3QNN$AqPd=j-b@AW%3ti>Heo6bU+}*8XkH zcFdEW^hM!OQItDlfVV<1*=Ci5o^_54Yo1!uQu5$89(0Bso}R+EV1d>t8;S(%eT9j@ z%s7s}!z&n|g`YhRio?zqMjSe^<(0r^Xx>C4QaO~=04y3n(vya3dSh&LjVK9zX%I}) z&()x(NGN)=hR>(2qR-ardNtV8KA3THqzo7k(-XfnFq9ehQ(V|PR48~)daLW#{q4OU zUGuQ#7~tl9*IeibRL3ESTt?uFOihJ$czaAs{%t(-H-u@=g;#S>d$Wxe6T0ezcxpVV z4fk{qf(4D5u@g9UW^<<+Ox?D29MmhZkeK{yUE+bgT3X|n}f!0 z&cV?RA={aStT#6CM>09yls>hElc(&^ui5Sy5U!QS;Y zd3tGIx(OMQK;)1p&UNyNvPGjp|DVH~`_PvNh#Sa6#tLWCedCdn=a)IL;mNZ{&f|sf z#E4StjO@t=yRA4ooC$`>S8rbrZ9Dx?It$WQ;;uWTt!G%*BKTWDUkm1Wk5`T+fV)` zuNbo`)T2yT{qj-5u^8g~^k(_}@BcJ_eG$-|fHYQqh6*eB)A8AC+Zb3Rq!=osQKE9z zC0~STS0pEFgUy7F`;lw1~B_j#06+IXVtT&3E$15fi1sP#GmO&&Vqd-h^-zH=$WZ;Dp}MP-|IRmu6%& ze?;GqDGMRF@cij~IX_OY5gILqu*CGV1&>!=lY(pbp3~$4xaW;!5FN|)^46y6z3(d_ z($mKLbQ1h}esiFpMjCO2b;@C?pM%q?L@E9zVa^N)LgY&{MUZAR zOu-q;x`|CEx7No-u;=@8_M|ezCY!H2S$z65?5u;D2k3rI)^n$os3en=JC4Zi8Dxo zxwH_Y15Ne6VFuTtJCp@O=!s(3jQ5A+2>jZ=4vM@dhQ}J>O&&EzO>uzso=u@D4xC6IQfPf}qkn}%M<$ZjTo|O= zDRQleDG>#^4EX!I%U&d024MWez`RI_-%no(wLe?QpBG~sFnF#m&-3^XdoIUH1aS?; z<5QK{D)jTaq8;g11~@~3yj2j#TZbBKot5{2%XynLdNMcAbdX1Px*r7 zUUL-n^~I1wQ{=8%h_#yjA~@Vn;W0`BV|@?aygFFE`l_{s>!}sN*J!I3x`^C*F6Zhp zS$EO>TEJgtn9!@}cr~2W-O!8Z!FtJeh9SONw@`E3oKDBcf1KzGaw0#}blIg~Uf=%m z-SYST>d$7!j%Gbi2_qGEH&+J2`~>>ea`-CGe5|Yh7ZpsHrpZDCqF%R4ZRAO?z;74T zkH@FSDU;ijOu&m+C_Ty#Lt{$bDWT+xfN;W(?qRkUL`%Z}teeZbHB9m>1hZ7G0|*9* za#CVhfhpXl&*UH?lwt5E6F!r_XJyK@^6a&eJ5+)S%v=fY|7(j&MRQG23V;bui!4;Dp9jDy?_ zi3MvF!;{Z%z6^z_f^Gr+Mr%T$grcygbGwB!DGmFfr^pM=7y{As9QJT-pKFF{OnP>j z_mINc*v*)1onBxfG^%yz=MBOvBaV{qd2r3sYv9HMyLf^N6TO%(WJ{Up?S9WV2{$v( zZefh95s)ab-w7_BJzjNNIEn^-Fkh z5}*9&^Sq~kGCW8L`fS8hG(BN^^z63KW)7?IWSVH7kR?~tiNw!e2kS|xH)b7O?_0!1ghP0AO%p^%-FFv#z!xWA-67y-V|ITv=!WtcLpIuAWKon9GunMKXd@(YhJ+4xf3h@HHhu9?zcjpgIj| z`gUo}8RcL7l=j zM!e_K69=zf4L|5=x;38p&Nu|$e*12msuy|K9K_*qa>jh(ZS@4+gGUY>{i0ZTZ|_yg zDdV6qr;kVUHhdMHM0#2Aj5(<%_cI>k$|pQOH2tt77MFdy119=Fo}%xJQuw5J`XYBl zIM6eLW`#4}>Z|`I>ecipYWjy!C%}wX=ngeQyE%*p+tFY~CqtL_tt%kXXa;!MOU7pc-NGUK7ATPLuEX?PC;Vyxsl!?Gxh$szC4aT zDEP+3J5A(8e90hqim4~>9b2j{-|XRzbX5p90BEsScQ?yX2nM9pBeMXAAt;3=g$5|t zyYa5&h%ZuTlQ-iX5C|Cw#vAXZ396>#mA1A%57?s&o{lopt?B8L4WP{HHO^cVU^bS6 z{hc9PIdy!znG5SVU$5i;Y-3xVuxA9P*!246ao_cyQz zxAwMA84o6tsT*wU)qzR>-Gut)ukR0u)lNdT+(7BJ% zGkNvsL|9NG8gqHpeH_Kr;EQq5&}BeMD0!a}Fz=CxqCMN2D}MX7NM6I#Gf-26SZjXZ z;lgp>Lb0O=F-;5h-h}i0+1k>o(V%*O=6in8I$CFVR8c)j8O{Usg#39X=vlbU?ZF#f z8-2cw=X>=?T!SVV`TgIY^I6xAVDEi@KT9Dw$#7CjEy4Q?Ppq$~@iv2!0Y+Y}k%|-& zV>5b$bCbhQ3{S=_A;<6$c2Beq9tUsmZ;AthlscGzTgqf;C;|CzGKhbhz`bic@agjI zBg1+|^VXzk=F&vh>TPqPaMyYYjZSWZJEQtG9N8ytoXs8EWYtY{5563Fq}a%qenwL& z<|nneNR=ip8PI4kT7`=#+-eNFm~2mD3?*)d=JjG9nbP`&+9_iWfM~|&$K);L5dp}} zQi>W_@d)u6zQr5s{MAEQdE z#o)9*o#G#ZrE8Zj(w?yid@%fW3h!f`br|P7l zMa6OfehjO~u`foRUy@7F6ay0t;2pY`PHBLgvmD{*?d0gki5NeoUt7fTRLtH$Q*4)# zvE7aLH6gALi$N~W&J%_$B4XvCY)#Ha{_3YNQ$%1NwLOghFX>O|n{+VAy9`lH((izz za&WeAgw!e2!dy*WFXr=z?m?`qIZci)g6jdJ%nnnkFSi0tFqqC?AxV=BOt9t#c{N$F zSvS9YyR*E}x=~&;1o1Q(4?@b$wrBZj=)+)PDYDoQ_HIP``pxUC&ph!Aon4iT5gfw6 zFf8re-NPZkmoN7MD&(Fm8zFnM*UAvm05^GU@=+=O2+Mj`&%MMJQ0mR6HJ__mx2mEV~F)WPkf9wifUk&_zXVN z;VJr<$WIKxV+Hq?1f7kydSp5gemaXGQ+$*F+!6+JI8HQ3fVqPLCS+K|lpv-3sh@w> z^FHxFVi)`a$c55OYY4On4nFIGZ1=(WxB!6vrjZ8EI}y^o_Dtu!b%! z6RNsE4*n@?&qp^I3yMrvqXOnZG2S2$!@(GS_|-?C7tBPkp783r|M7Ez*5X?$JTOMa zGHI+SK5Sg+7s{P>Q$JxX@M-=E67VbK`(!iPARm%bWBm2JwGN}XHl2YP_#yUW#owfi z{`FbB5G?7>`JliCttQ3hY;V2Cp?GojMgTs+!H)+izu&sb8F6PfEh&AUeFuS7TFsErwC zB_%n^lP>eXo=;cd=h=eIK@4BcQ{gD`=qX*Sr_l9@e7C!N6)`9Bnp=Xa`O7T}DMx-GV(ltr&DJ{G0%O$h5|aqSJ_!K_ zy$&O$h=@m(dJ(eUrrfhsA>iyJ3+?n|IY^N6>V^Fg3v0S3b{>BA^iq%TjX+o+MapJll#; zF;G?u)Vn$d=#&N2fD_gnLHeb`67#aGZ!spYvuIa+H}1(v4jTX7JOiZvtk7OxN+S zJQuAE?$MFCz)80e==0vF9Lwwe=13t^)I4dmI9g(hILU>B$KasnP2E6nXK2|nIIw6e zMlplH$i^n*6CXWmU8Bem+a{%l>2WVE|-6O{BgOAUe*ZGEV?T* zu!HrwZ8Vt{?KxNrUtcAIHhHOE0>Lc0sR=C=8e_CggfTeyU=W-HM zVoPoAq$pmt0A1;Lhd-wRh;v{O7IO6xK6tU$Aq@oHXZ zK=c-$e$IKbHP{J$;B6^+Mtrf^3$G*8hDv>4)s)*0#e zVye=6&c(R~Fe!Db?!VbunNf^yh2_Z;<0A1%$@ERGPZ!Q)8(u*uLl+5_>lEhsMbB0% zGgsqfyTdm7EJcmh>6vH~-GQr$|5rIxBbzhmqP}i5Pqh^3M3K^Pb9!+Z-!*n}2G6}Z z9XFIVM* zcOaX$Cx&dDM#x0m|U!DxnbkMNuZ_+>NUFyDb8{8i6qQ3kIv_wN~jq zn?3ygPJ9xoU1KN4R>tBmM?%2+t0hj+p^fGjBb{Y!TQ7rbJ4L4v2z*W0t!EREA~%E* zLg!r*MufncV|uwp1MTSx9aG96UC!y_?QaXo~sXlnR_53Pk4Tmp;ruHGY{41tN_$8jaB$WXT5#_tT|ypC>)f9040aJAeGaCFzR-S{2Dhi_ ziP_mgzVfe7}hu4(-N8vRuP(K^-*k814~_Qn#I(= zm?WfWDAE0pZNIA%k0>myBZrW`ti>seo9kOfi^7N}gJf3f>?v=qU@0swSIAhvxU+XO zWjqZ}df>&)<8pGY2}Wb|?87W;J%>GuGT=QEKtfb1Wxw!F6Wo~BE{Ci0)#Br&6L^XZ zXV1Ag^recEwf*7veA*#mP(s|?X{(n>B{-y9*F$QWdTZ;)+-IHR!Gmzu`N_r88W(N& zLVN_1sK6+Q24u|5X;?9rdk6vw+pYr)O<=Q52^&gCmGZPKj;JY#jjU$g9!6E=eU<@s zlm~N?m3;pB#c~_7f63s97s4F{5d&lh%{Hngsu=HqA=7;XDRsd}S`}8tVstn3j3;I; zG2{s2?rl(OHTG=BYtDqfbyH9;ZJ`v(3rgc{#M)p!+sQIeM(9M-if|tD67CU*xafa64$;O3|CMY`6V2R&m5@yXE3 z+GMBsfnB(U+d$bo`_H&&5e;mnh@+<;GZ6k14~YIS<|lR7?>dk<@l98@H?MY<{rJt+ zw<%-|R}-OXO@IE&Z#j3dz#TgC9R z?UbKBy!8UOW4Pw#^;2X`@nPL>IxGF%SeUdJqntVJ4;5vGaS=N z8yPjPUcH$}7)7k@?M1NR6ri&)I-+wmNjJ$u@Kv-(zWbU&$ky>QT~gfO$FbYmNq4v| zcrd(-#h6SCPqbuu8>EQeccDTPi zg~WF`iF78}PxjIUTGejuD#m0;(V5Z5Pc%+8Gbk=D6kMJyr)R4XFx&gcjKE8-;P+S2 z()G>N^3)z=-M557EDW<9XHiUbr?70_Z{ECJHYm6zy`I8jfA?eFpP~%{IcxJLSTtn1a1-VZUq_vX8HiPe0gdF4JHg5LLS!<9>Vj& zD_=gZQH|?7_v?^(I^CtNdmKZGp(11`EVFvJJiLQfd$T>`c?A9>;+U-&J@fWT(}>gM z=JI?(^tyhH7fpu$WL&`8$*s~%(-c-!?jLFOP8MAWot-a*y31h-Yq-D}86k6+= z)|-?a=I?$f5?jrlWlR~;7B}0kjH~yv`40Y~2@wOKSzq$*&o_6LXW{5NX65}~NB<`b ztB=RC9cy!Md-Wun6XDk-^W15w(>mmhN6CUACHAw@?VIt&-~nOsblU8;7IRq(E913r zu;P}YjIR6HAi>{=R@zTSTb zzn(eQG@oX(Jl*P^RKTsX?=?=j{uk+rsN%D0c2a-Vgt`Hk@H zGkisuPb#9%Tu-o1Z9um$Af>Pv3>3d>ZX5smp8EUGH_LIfeZ`e){gnDvysWpxo5Ssd zr3jgBbPnEH!}Xe7onM~y+0OFoUwl2D`RUos^ygSl_EMcaEC9TRt8ghu1vV0OoA?@qTm2X4@=Sh0~XfyOWH6n_}N>Y%&<+_mZct zUgu>uPhPerX7EeabWt*F&BISPIhvYCPtSnlr{Ws^yeHX#Yo__3sqNi9oLSa0Z4)`-{ z)B=zt2;%2G_REvxdwfGG%VWQY)~_=dqz{Ik?j07!I8R13HwFuG-_IyN+>`!b5Nt$K z(zg!Vc|L90b?g(8cfXXXC#YL^`}#0PEDm_mxgL`PxVioZ*=@$IWUbbdQeC2Vk@H<5|c({3;7JYcUJe z#DpRzADJ>*!?YNQ<-pQGSgk&!PW~j}OOQ~Cgz?XwIS2{60_63LMI1s@e@M!8Go~*pfjFfwZNFKuEc7msHzZA@bRIUH#c_*NG zvY#Vzgs9<$kLIYwxg-HT#EBLX=UKB1#fEI0@DHB-?IgcBv;@c+*ZW(IF^Zc|2nKMB zmP80dR^Gsl$UhAwYA)bug~GywzDt2FMSiak5V)jUZ)E9 z32|Fna|C;W7xOrPK=pu zj@IQF3Rm&sc!%918Nd_wEn?*z>j#C<3;1(C51SD{F{)KyoDq)r3hwu<-NDZC?(J^!X=B3fU%rI<1ixs;>h(46eo~I+*xYZ8De%^F zad|lkWVUl%x-QSO=gKpaO->aH8j(nR%fJfI7iY=%i}Q&rP+EFIhzv3&!40@H({U|% zwH?g9{rX)b^SS4*)$=pAp05ornK|uPLz~Dgxh)E4PMi2Pn!?|_cH6}+lTq0VV@ObX zH1o7o4wfPmoEwGO1ILI|?tC@Y5FXqnky4x-P^Z1|^hGXQ!}${Fl`e3dUh?S1*Q9~^ zNuC^}FSh!Olg4Jn8}Mhhl14V3BQk+@)Sc=10H^ltYb$$qoBp_Ko)md=jv{}e4JX^Z zPVw({zkc=6dVrxrUcG%ar_3GfIVPAKCYja<*IJZ#w|w~U-SR2p-}BKH8k=1NJzS&( zTwX*|dbg;wCo`ovh3V;3JZ6#TM?`pSRU49rXiYN)Q?V`7`f9&Pz0u|F<(l{FMpej) z;Q&1ezcN}MPzrNoY1aTELIzjiDvFrZ7~Kt!A#R!n#FV#Pd>0^Waf_CQVQo)1%rf3f zP$QzTTnO6)q`zm+DxuT@DA_kV!f07&tpQ;4vUddRMG8+S*)|kLhI|VqwLODKv5(X? zIF+kwSULhr(1F3r4V9yEYzc8|Xb!`uXJ@Tfo7{|v-lue4CP*;FnWC$%nfHRLe7W#5 z1bu#NeT_l3uG{XY&>%i*CMbAnPo@Dz(2uFMr|MSsJ-ww_`>v%Rc<+oZQI30r95S5? zaCMdtNf5B)C_oBmi_sE6!;g@AHr8~!x;$s#2>H6++OtR_Ow1Ww^~KNfy$VpC%NT*J zU>Ig_iC$c{8Zj+n@DWso8Qh1Ed1n_Ha&wwr_!j1U*?PC~l)w$mz$qciyP3i|O1?oT ze+HOzjhlG{KNs{&m2^0JXsn~B56h3y>}i7Srg=REo5zf-&)w(qJHibubv-6l#Kk-P zoN(0U)n}9_&z!f;^#7c||B@mQ-shPUFBBK=?<}Q!6%7j2^Coyeqo`b$MJ<2Gx(2A( z&zyjpy}h0Te!Ok;&~t>~DM9%8`3(IyX!xifDZJp55WCAr5LP8f*PQ_?pNRl~3;rB0 zH$jY8)=+WJiT?eQXqXe%tZQT^$n;!1I!ZY2iqXz!naEbK*@<2bww@+? zRIRHS%J67R#f@YtVZLr-YqXv>eH1d{D2~BjxRau|y!tdaxv>L=!pi{DJV+Qhpal!t zxhTSv$)o|2=P7CYs#k+tt)D!4uTRIv$&6&r!Qm)KwM)9T!3zgKHNCnA&y>G+H0oZ; z^3~z)a`;LQ^tHlX`oaz;bxoAKztOH8D59wEzWTO##!JZ`%Io%C>XoB%)%rO`j9JE3 zNDrb6h~3@SQyxayuTakB0XPBF?zEqdFkK7K|!&~vs)DT76oJsIE7Ke*|a}9c+?>5pM zc1zvqMbCm0f>2~|)C4Hsml6G8#Q7$R!-WLx#now+)!9^yH+BmW8aPdvUd?2t$wSLs ztK)kOz@Dr~wMUR52=UXNGvFlt`kd0i#K0sUqMhMdY{HX#*}R0G0n<2uFrmHf>5)C} z%QPAgN*fQl&n{DNC#UNf%ktE(4h|YKD^g(t#=Ih=REd9Cbe)=JHXCa`hWEJc%eFUN z*xBB4#e+{{;Jf8CV#h#p5>o4*E=W)uF;RuK!~s0-odOdYN{x&F^||~BBx?RZ$+h4V zM#_hzZY>X4<(GGjmk`<3Ghan*V|gCspeMRe+pE`amd(8TRW1k52u0Y&_Bg45@p>XU z+Y?1z>=#}u^C^kzjDgk}u~XE<5$|q2Yak+NU8?I5?NmnxXxp;XJBZG#0)GvaVN8XR zI!8@6z~+e1F!MCQk-zT=H;toXoqIJV%ACo7+s=~B013VXC%7milRFp5S$i(X$*qwK zUfn2{zD7YdE}Zca?;F$h%8waQ-}Rn-@;52Bhv>-;4MlcZ+&ZC7c5aHVx{kueu<+wR z66dSS!=qygf&qD*z?htL-}cH&>0Cxb_DE~fs)F7Yr?5Kot-`k_%x(h09SnFmtZ~ZQ zIvF)QabZF^R%x#Z`);u3c`eN=-WYyro;>T8*tp%;8qhhVO91weK8x3^A@C^Ro2_K% zPIx@6Ps#DvjiG%j;|5Qg=QNR_psleN56n||liTh+ z*D_kxCO7e_hj)$Ztjql21bjs)7%yrrxt+Cg;9d-n@Q2d0J7uS!MUm zNoL_thVXaXUZ&Z~t+Qy?RuLSpmya?n$^6GWU zMfe--Fj&R!&nEiDSl2NUudD{Wm<7-D)$7A#<6iTLcB3nW$QmvwlGP;T)A9RwiAR_0 zY`&_`=iK{nt#$UMCNzyFs`?x+M89CJ!H#PwWT#Mm_4U^iDxbzk(GPjSxRfFwN6d$T zrtL3fPA>20>e-s<4{Lb;-Jb?V;Gz$Gk|#ODNa*_JMlPbshh(VrOM%P|5BKyQEKBFk z_SN90en75VYUH|gAIm`}Z*95dIIJ?ZQInlI0oqaqhEA6=*wiCMD-^9-GSAjT3}-vZ z^1jbG89{6CFP(^2Dfz35i*XpyoH`YryTagC2XFK4>6S6+qFX70Wx2dKZ_KM`xp$)< zPOEkC%+-VXeNqz9trY6<86zuPU=YFq?;>u7N;(9eZ1y~)97PZA#u12?Hls;fxu;%j z^XFrMZ*w|CZ8vga`aqsWA7R~e*wTk8MV)v2a%%>XW3eUZ)5nu0mW32Dc;MHptq5rP zXos|sFunqm1zU(cgDiR;Vz@}yx45`i5rm3G*5l+_8*?K8cbLUImEzAb8d^wT3(#3J z-9dQHR*arQ@VwrAw!AseIcYEANV#8}PWXvsMCmdV6b15X#ws?K1i*SoLsntLxt)Sv zyBIa+Jna`e?<_IOV_l2Zw=v!HZq{gFXW3)jidyuUl~S(1|Kt0ZkTB0^Xbc4xUcx|x zgwcc#_5Ej!3QzOE5fR0Ies#aR55Bx_pxoc3*o23kg}{y3cA{gIVn=%@87Y*_;}=^C zo7+vwIL0Cnbx>ldv$loQtaFrd_vo&6c462z88wB;0Ga;k z(GcNAF^ZUs!5FQH7Fi>lDe9X78OovA-1N~&bNeoaILa%#PVn5dHpNqs@#N_5!Ue^y z6W(KVkp;D$6xBSYz^!|P?ESt(he?)Eh@u@+AlUUU6A+>!_QyUUtWs`Q3676H9(4^v zEd|y*(5(+ddFdDeSsTNrMh4BN&dAe-I@nW0$& z2iMTl`+heE!aIeeA}MVTUtqb8-{--bcg0)foy<})25B zzX^=dz_97Q+4az$Jp8HOX}B-(p!rS9+F&4jKj*c_Pv3vJUjD`D`{g7Uvybqi;?=KT z>mDb%vl^E|uVme{N9}h))pO3zH7~gc7edM+bio^LdKORB7DTu`i2o=)@EXH3zHwFn zS#TI%l5^n2K>hYtZ(G0g*UL%oq4}w9r;LL>8qzNNtFI2@8*+;Sq5w@Edo5toLipp+ zhmoC%QNc*9lh(pa?ExyPoo*4|q(}hBI`Pd~iL-Pl|w?MrV-7^XGX} z(02AP{1MeCc1B5aA%{PGI?W&u!cNge3uKH#uFwJaWEu?2P%9^q>~OY)_b7F|b9#)M zr94uOM<-J=b#!tu(KA$~FS}H!t%~Fj{rLE~xrJ-^yTM0256ukS^enw2IIlc+^rG{s zG{?hikBi39ZgUX@Vc<)<>DNI|DVW0>>Jc~|}?#2k z>iP(MAVV?mC}df}uQYi?Fo; z(=v*L&NQNNAcy%MXT7sng#ce=rEkQzLOr_bf%h06DS`dNy&)=HnPx|U;+EVcgaKg+ zx-?f|#5@Flo*j4)ByuuGa>Fq>MZ%ka3j{H}2Y6BmO1+aA9jaa@2gW0#sGINP-hwGX zjUM1$<)_xD@NM6}LLc6gv1fNhv@-`UbnSiy5*hl0ji-4y-(3n;!~8!5NUjheGRR5Pr#p40A-|?}XUkdF>Z!36pO9G$_nGITz2D6P z%p04sQ#eaOPD-IQA0J@4kT|=Ugw|(o6u8|Rg0L}d6-~0Q^*Wdms zSnCuis+Y5K>0Aw7a%)2q(IUE1NGd%C=IfR?bx+AB%3^9H;`3;fEVkd?c4!WtYejQ) zX=9X4Jx=TVl5%eOjjZ0H&oT)1GGbmGzUdh}brDz%t#+5aox|i~dZUL^=3y_Hk|&*v zzDHXK5n}W+A35Iy2p%+`i ztzGKWdg-HkLLsdh+c*CVq~yZ1k4_j|p6d>mN1j5JX0gTk&wlaM^7a+E?4doQ@zoRD z*@F1wZgw9q{>XFA)%1>uPSC?do-)3Ch8Gm+?QWCJ8DhzRDSU*X=#tz=bDSkS2IrWF zK|}|=`{ozZV?cqj^@ut<$7ApCtLQA6ik6c>q zZD+j&@V$eWHzFZ12oy)bPA)9Q8!v`o(1Z1mSGb0>RP+-zw%)BxKe8Tgo-D^F!dGkY z->!xi!7+9oA&Bbb-tB!<>)<}hTJD&U#KJwke2gg9FqNuY2jq}t=EF*6Ln?WYxTyukhFi z@m`)oFv(R@+7w9G8%E{9NEhshX!KGap}t{_uwXU}Bj?sU3APAwdK83bp=n#iJi|6T zO^D>vM)-8ILl}7YvrD2E&xWN=hkLeA;B2M&9Na07B)PEt2$J4=ND-e!M?a=uPZ=J) zKJ$=+ImN6i9&!T_Q=kl_|fkLDE7$eN31X<^lR)0h*^79m(@gFyN%d3)UnbY%O z=Md9VIxoY1_zmXr^WdZ--Py%m22k^hQJ-(+!9^=iq7~_-=iN`?YV?B6DMZD2;7CF1 zqcDYI=GFWOeM+1cMu~&*O|J=sQ+C}yq48kmuBl+}+r5YaEXT7XH2Oz4uCLzj+Rr?; zXwFf??!=ewZ`36xDcBE^v1#M*KIX>x+wbN&rv&y4DFX(E2nBCM-)siKM9J`bKPG1x z9;F5pIr3l{c4)~2U28azD(my*BZki;xWXspFn&wMH(~z3xMm1#F3YC`@Bi!U)AG9% zypZ-7g?vLqaZ_W3cP_bJ*0EHAgBn#^|uEkGzoQHP`7Q z&JbyyyyFSIQd)hc5ZFOk_SMT}^XPf6l$UeLISHdU8&`OnC(1z7m`lNN;2QX9EiF7O zq9>BYNZ#Lj-E)Iq(4I71ugQlucgML>HnWA0i%8bIKFopGO~&j8iPcavO`%?0{6zoW zeEsd3NAFp}zIKK<>s?f8j%DRNhvTXHX=3o|^vu_9-!AXI{zV4CdhnH;Im2lT3y~2< zTG&%Nu<>FyzTogtW?e^-#{=Ytj*aWdd3N`xCt3?tC$HT#ym0zxiXekOx>L8nIhw7E zv((3)%_fX4;^*1=8iPMRK8r6sBbn7);80{?>OI^SjO4??+YXAyC-=$cV9|Bf0-qEz z`6)dq0{P;F*5OZ=qfZ|uYKgW*f+k9B-9PCK_7?h?U1Ag2z-6Dq--N?w9QYk8|voGYG5CJeAX3LxKT2wJ>RJE?M^z4lFb{f!}<=a| zo+H=;j>)wYCpJqP2GjM;E zWxlhYVi^IAK`Hle9Q;SQN7;=K?7=eS5gr*TJaNSyJ}3Cr(4WQ}7mdu#4YmX`CvKTSg;_dc=Jsn;h4A#A8<-Y zDa3o4K|lyls17kvw?PRwsf@2S2hBtyN73X}iv6+q-1OY*p5ch%b6XkDFP5u}N71b( zI~kVIEnyVMMsZL6zWJ=O25&JNPA_F{JQ~4SPDCBJ-2t8uhf=xpR2;}lXqXgTi(2RH@vHO zOh{T=)cq7WIsEM9ZanZZV@4G@<@$MwzmAU2mLLA~{p|2jyF&rM#jD0T*nhRWdH1gS zq$brOB@2_^8qH|bvzOem^-RQS4h*}!O!2Nwl{gnVhN`M(N}+?@yz&?625a^N&WL(Al#u|z6`)Qt_- z=;8ESJ=FZ%l?-g>I>%%1&^uV3Z)s-oV{!qwY1p`B8G9#Qd@ChvlvdN9x3zT}-PecE3{ zP%J8;Y`GgsS%H}RQH#vt&I! z>yDxFEFw!{3={FLG2GelCjgpaJU*R+I3_n0BnbpYh`p*px(9oTW_m`*4Qe524N+=M zsuEW#TSNj7o^f$`8$mA9;OFYDIRvkFF`E6b(a~x=QA`OJ;mZ-gJQ9MBa1YjV`dtKy zIJdfAejhFLXI~_0EJ^vamdli(I*a)<OOw_Fx{94M;120Y+mrQ!VA%b-8~>u3$XA$0W*1QDen zwP6{Q6{X?*{XGKC7LnCBuyzK7&&L?(cZ$Y+6u@na^J5D0s5Q&U-!^{-72m_5)dqv? z`{k}R3E?ZWn@)ILAGa;GD^B4WMQNKe`q)bWE#foaf5sJPNncq_~Tj6`;<7lF*(gp`_fCp zBHouC{f~J(vnzn{h_6L!=Jj;>FOJ?X|NP_eavxve4Pp1UueO)3-n^PrlY8S$xtMP>|yKu-Eq2oTy~8kvp+At`|a;Xacw?t z?BHcyy#9yy!(lABj?3*OM}=440(ig8TQ1m^tLYtppOOI#7Ux*_a{upt_lJ=mkyguG z3#PA=U5fq~=ocyKi<1;qiu*}xQ58>DOx1VtiE%JTXv0mkDC8_k!htwBxf)tni*|#) z`{rwP2=Uoeg}?Y5tQ0msnW%-H?jkwYG2g@I@c)_IUZWle3|-nu6?u$NXda9jxFhq- z0lq{`j!w?zj`5&}G0Z1<7_4AW>Vkpy_~c=E`{qp!*zSbdrGz!AdVE+F`ID#XK|6G$ zy!mjm`!fU;WZIY3l>(I zE_E2Y>yjHibPeV2qyTmK5k^%QAslQ$!ZN~XyW1H7DFM0UvA|j=&tG>W+e8&Z{DA;yE-_9n4PWtU=eXQ$JJXgXT9I^K7)^a;S}cfR>sHSR`X(v*m9AzEi@H@ zVRqi!Y`apJcypdYJIL}iV< zAox*~F$pVB9?;&(Q6_z6`;U;Ficz{@v)@(RB5r-0Jr@H@lc=C~q$Ytus7_cd7))b{ zTpymW`UD2b*+qC9MI8oG8oCDhY`jZQXVH>L0cOIdl&Z)~^hP1@Hd(b}KtwZ?8^aR4 zJ=@qx&OBRAquqbkJpX;Pa6*wq__tkeXHjxydFa}Q##Io=cAhHn#zl*PXVy!BQKp24 z{nk8rvXYYj60DvG3Bq&lQI^PsMZQHaH6~+I82;tnc6e`;u6r3hPxW~ZAc&|uh86>0 z48)$HhlA(0uz_1|51sL(MHhJNw(X6&?C;rk9xTy|`+KW-`-Ps34o-T{ zoF-SEVJ(XAIiV)KKnbkm-&`BFf6-V!T?0uTLbxUmfG^oJJZ#+7yNkV~KJec*S2JJ>Gvt2U&}b<>`+(2VW#6clCg&DVQ*|8J8Q8!2efPDa}2GcN-AYD{UTVGM!CGLR zvlE_1R@*WNcIgHupPJ(o2R4QX(%c)4e(~+s%b)$~7wNN|454rvJavsl$7g4!%kigA z!CgcpdB2^-y1y^%t;aksH6eJOS4B8CHGPML62$pgtCLF!(6Lm%Q^sHNbhUf^9J02@ zvzHgYi&O7ECxoLSg?S7IM6cE0i;Wao#JLx;RU{6xW9VM;Sd$6}$Sk{fF=E|V4iEPN zu;UhEo`m~(mXENr2!yV1yS9>rC?OSy^Ab@YndIKX*LcTL1LoqzGn~ET-9rhoUfpx@ z>3F%jI!Up<%#&6S)WlfoMhnrbL2&BwDP-7_gUN_kEkM^MXfp~X6yG=E9O0aDpOiuH z)HGr`-t}BPjo)SHC}`nIln&R!#KRIebcj}ZxOt$i7Lq}WZ*IBOlhPZ<5%nBikExa_)%)_cXcF*X9h{HqNz zGv&4G@qao?-l@EIU+WyhtM~0p+1i1tv8*S(qNjvz zWvZQRy_+8L&4m|A;Zh_}^|Qg_cS?}(4lwftF1!-^;fe2nHuQ!+ajIh2iELzW8grCY z9@2!E`}^UiD3E7tPmP56MGwaDj9w#X8pqgU^!K*t9h&hh?~g+FT*|lmT`v+N?x#k; zcBwIxg3B5~180J}HBsi~(|WCAD5ZNvD*QgJhw-lajGaumXMF|B>+ZWxnBJ!pZZde^ zCwKn!<;n6dZ0U_R_4BY*Q`(SPe(e5f%NtJL z*L8EH^m^7We)(qkrq|1@VDWG@3JiZxkP2+ia*E`{ceeG$5XKjHVoYnA(;S9O&~Pw} z_OB?8hdlCNAjP8tDLg(8k8j?+%^=MYlA37Fx_utLvSm33Cqv`2!nYov#=r@d+I%bS zQln$bALA#k9e!u!QS_hhZQXU)*I6=a-G2_>D)bc-Q}8ceZa4qKk*mB{{AQO`BS+hh z6EPwC$az2)m7Sz+sO__DslgQfg{W-`|1u{_jn~tsjNs_Ib?)vSj69y`To2ZONPeD7Vw{N5 zoSYqv^8^0&+w@yd(nNA1cbv1EfU;u`xPWR&#hvKo$0*IBfG`ozMQ|R=XLH z2!M;z*2L;f=@|3H3r$vH$cF?#iuyLsh2Wn3+6l%eF9pwgLugrv7_9lbhZl~}2+zrZ z_D#?@X^uc}dJ)g`^}*J1fI$YC&6~XBO$4X4A2i_<;kEwjnC|rWs4?#&E*7rsInU<^ zU;Fl_IIF=GZYgFFD!RD1AEoG++zF32&o!6+YzTpHF-Zc!Tqg(HKZcZO2m?Ymp70g= z!xU?gjTD8`vwCKXAWCo!KhY0Z8V4-)!hz5jMbNTF0BG5Ip7%b?*fY_U$6AvJQBxfu zFhajiLtS*T9?`mn#KK|kVQPxV=iuab6}F?Cx>poOA?b9g>z?!0aMpX5gm;UDGs^0r zXHSt$6C@KTO2U8u0h3s{7egb?ND%R6F`sk`j~sTpbd)*o$oHIb)|?awjS*xe^hDp3 zG`N9p)0_oTz6@qYClNPF9EtUr`{kkG3cSYXKvCxBWdTNRjpoF=9>v>y<+NvV+qAU+$qLl+adTD@3D*xHInHjLrbb)SQ`u>0(Bj|RU`@(q|HN2@Ke*nhabSpLoV z$?{vY+5Dd;DDelujTZ>o!xaAO*RPt)vxN24R7$@;I&BcV&G5`njm{>z6{yts2uCwJ z@a3$rpYX4>sq~2Tzdqaz_Pou{YlN}6>wGp1g_?`P(Z5Q;Z|ZjtEstXFUY_{gf#Y5` zQ|_OWsprx8O(gMU`So9YyL|iWU(R#a)JZbqxzuU=)_R1w72FZBl%SmreKcs|Q8U2g z>pep^JkdkO8X8jx&%i%?^Cs_BVehkK-%*A`JQjh{2?u){8K!SWw%Gs8KuClw?>}jW zeZ741CZ!fWIT(x>^V?3Yk3%x+UU`-5#|!vesGTe!&&QbWImqoS8O50*Uvy2h)l6NH zqSa||6Fkn2Pm*UB%jwnX-BQ9Lxo{Ukb0T}cXUo6rH6+H zv2lYS*V)?@306 z6Y2K$*5*XkLKe!^r?WT4`(~kj`V>+_-Lf^O+pAxk%$!- zI@}$7eBY?|SxOFUSbI5-0*krs?w-a#kkqpfY1i2jqrE6$hCvXg%F(TDCr+tir*uq~ zb(j_IjP3--$z`5n_e?9=5I$$KH#JIzAeBp_Abm5gL^1Hk)up^Prr?Q3%;APnMdnVk zlmNjH18W{V7|KV@mao2UgjV}jz^8(I@}m*XJbV;Fe-12xhjklne^OfTGOa=*&dUqJ zC9B*R5g&u>rr)(>^`yWMQS0$91tUBa`!~teB31F4kpne72xEZfj7%sHz;NV! z%*NOpW!@OW7@pI0;6xbx-1TmEXD`^NBr`(Kqi2nCC`n(8*5mN`?^?^hZO-5Ij8g)o zwchoaO8fip{-t?LUbg3V_d1mkSUrzN9;0DUdR0zXvEF4y-(|+FWid>d_99s-vBT7& z`O7QtiaXqB)-nfXG-ZdP{JgVaq9v`9;O+^%*9EN$t<25g zRC>eKy7fE=O1B>(0>SjLxs4J>EJkR(6GHaXaDaBlun0eaduVHl!Cp3|;m~hrV|7(t+bYy2|KC4PFz@k@8yjssL?`a$xi8O+xn9F5P#h)JQl z6iquzfTv8PU&mXQcsC>B{paiD|MKyNmukojaInIJD~7&rg`ug*zC zK~!yYHMKH3;a2(y1%ZKQk~?aA@bE+sM!@j!g;ZgWa}Yc+2O3BbD8`jfr??#B{MGBj zG1l)=a=bMLY1j$e(RE7d-Rn0~d^#<1VQOEfHJKqT?@D(B^R6xeZ!f)iP1LCQ(3j&VGzX2u?vn{0j&st|pRd0<2zH{2oUq$vOW2sk(0v;ai!Ld3bV)nF{w-m-9E=)e{u=J7WE zgtwl&%yLce4j^Af=n+InVkN6k7DnEDx!uqE#>RVF0s2XdqmnhxI^p=?H+X-K|Fptrv0WPl0@fH~XVy5IVR_BMjW zfsbcQFudFdU0Gp@VIURrX^%Ktcz#kUQ%TvwF`|lk&lTRigDyje zunDCTxllCdh|FENyyi+l%f*_LXM!!+E~LBGXkAcqHpUPA{P*GVcg_8%MOo2p*Qpg& z=kX;Ya10}B9FHdEY(A9Hv`B4U_S>R)%D~z4$LF`p(PiFQ^P|9%99hAgXM?LQLZkhyME5C&fN|*F<}N)lMS+3%k13%4 z^bg-H|NO(za+I>YK*OAp_y=rs3RBE#hs`g)eZ9PS^D2dHnlJKD3KmqQ%+mE(M$3(hfTrT z^ofWTM5SKj;qC11PlQp(oUGVV$lE;%^rk0$@FFkc%H|zSha33UZ-W9*#LV11&lw|A zUmqU!%#4M2aGL70HvGbC*OW_74su!ai_tD}$6-LH(sv^-<5dN8d-3kuulAOMgS`Lb z_Fl$}oeZ`lb0j1FV7Qs*x;KUnx)WivJuK=SIvc|PycxG}6fKZVA~NKbGy-|S5s=y& zWJY)Xh97*q@{hU-9VTzOU|GI;dyvzh(w~DUE%-bddb1p~ejO|AWhXDc`s$nI>vzA1 z_Q)fCx9D_yAHlCpi+bD_+l}vHWhuMe;2`=D{`yow;p1ZtbueibV<0$#WB;{t|1)-i zwYI=J&ejjUx3*ATopq}=5;<+cYP5$oi| zG4=qmDFs77m*bf*a)RkTN znKq?7gwIlK75(Im{(J(F+z6{5p=z#hdn>2W?+Fo&4sI#@>zfIMAr2PS8isOT>;5S; zHC7mlvloG@DCF|GaeCf0OFCiza6pg0+6)1^J6}@r!cV~$14aC=4iAkiw($Vec`X*1Ic|Ux%J|Qg^e5G)YlMs-{#0t zzP_hCC`FM7xkty|t^$>CHzzG^c~?_cFbZQ$B?vE%l^(8Eg8}ob3~G9|CaxU|39?oB zFpdb85q#`dJ_0C%Jt$Fs@#?H&>_DJriYY=|b7{^@JMlqr}(TcEjv^hH)dQw9- zyT*<87zJzCS`~G|K6qe2upuxACA{N0#u)WFV^FHmi{H&tXc!zRO6w6Z0B0~Y?mVmY zkprW+8+R0cbAPti8oFZ?cDTf+qikvDJ`vdv-p5lTBYK{30BWKTt;gr-$>pIrp&!>g z+k~6$>srs5kbUDn1mmmVAhGiAE%ddX< z&2sSSARMccNx2J;w>C|>I1#+|+ny* zc?u#!d1^I&uKM$h6vMW4|sqxt0gD!p^H+}vFc9639pPUv_HqQN5G?K=V1HjgcTSEsF7Ke}4=j`mcxd*~X-fG3;*el3z#b#v*^faf(tc248-8wm2Y+ zqr}i8OBi#naz6Jnb|SAam>g`y@rKh$X(Ve;KAklu5%u`5bq0Yk{G>1MGY}MMigLU6 zDPR!Th*jqxE&GJH0shs&e%6w`;c}u`#3>w%p+JFvB8c1vDUXjg<7IupXw%h*6=eHG z0LjWiWP5whr;_#a7op3CMO#u;mXsQ0`{XAi*~2L;41fgtZIh4Pn*(7RVZ;>M0Yiu^ zF`nRS&LKpzh0UI|mvWbLnk`*VB3$FY%sZqoxA#L5;#VwTo5Iy{mvMkFg(v~Bf%=?s z=&AiK6s94=skV+W;Fr&uT3{-nx0od)ZzZJk3P7BJCEyD~5WtA$X+nqR8LazB@oGwZ zu=BXsvLi~dz0G6a{{I#AU(J?fS(=`=T>H_=RYYWFWmi=X6$ByG?@3q&ObIc*WA?BDP(2V=nz|0g+#Tl4u ziWCDHWY9CsU7--;9)HC|7@4OFn8=NvO|ymT^Q%6N0lSB-8iO_&laz_~oAngijBI;O zZH`2)eXKnZXltnVq% z@ob8Paf8AcMPu#aLE9?f&l9{K)5Sl=Z*RNbyJ&x!KwiaLxBad$4&xb<^hM#ZHOjzK zV|b)r^i`O`!q&pU9_o1IbZNUBq>OXp^Z*FO~Kxs256 z8TA<%Gyblh=uB6ulINv}DyTV_P%>lbWz5lZa#@TteILgF`;?g9005llcb@pHVNWw> zphK?F(Yn{Ig`mJ4xj{2JCB1W-@X1WHhDNTL<1D_3bY1g*o<44mG?KEb7OG?@Z-P%PknDMFKTlI-m@)E z+8{nXkFT8x&)Gmz9z5f{-}Nu@_Us6mGcsDAy>t|Cvo>tv9dopRJ$m))a&VY@KT8hr zT{9WE{>TwpL)v!H;Op56)%BMbC(GY|`epg~=ackk-{aYK&ynNDy#K2K2-2}yq+08< z?jigx&ur&Vk4{%V%)n#^973auXfiBRj}BIN!uVb9oRB%it?#rv$#_6E{bGgbIXX11 zNKeEIVpMy5IhyV?d%cVw2g4K7RTSB#d8#RUjLkZ67Q{1=Ej&zxMu2bdbtN%WLx3@7BlXD zlOK=2=bR7vsejW@4Y>c3tJJysuw}mp$tTwguq}<&$Qn;-B08)N3)%SG*tV9#{iDW{ zjcWdACFL_kdfD5)|5Pt#CKF{UxXSSK71Zyd|R%1xP~@_ zP0^DAle`mOdK=(722tyqjv(?oTmK}_V|!0ie)CBn8oUCP%kvOAFE1upJ@-`^GII7O$CeTF z;~+Dr(Ka`PGbW+t&6L6Bvk<%*gm}~9{JtL&a7um?e(8m17M%$Y+ph2Q%l#+I4{r`8 z%%j_xXEWB0F(kya--O{3DBZAl?>b)@Cm~#HWs1R)j8Vwip3XM9fWexB!Bl~$j0ff$ zfELJs>ZMqQ-m{8*1^~q%<9Wr=ll$qlBG%O?W}atlFf{sMZUu*O?g~zT5xWj1N7w!4 z->sjVrv2--*UA5Dl?$+q66!Pk`h5;OSa0f#k<27F22qJMGB93e-!Wf>hl)#PI}M&m z`L8tz6ifm)9-Lx8peWE3SE?~|4!CVl zw_WFW+9sIMT)QKx=w^UE^%)~J2Ubu}c`EY64ve{N$k)+SF5YJkgg~M16!6!^n;Dr1 z0a-xbFY(5w#*)5WKDEx81r401JQ*K~u(f5xllMt+xL4MvXZA`F^WK3NEpCUP^Xhxa zi=tpujCk=lHPyA)t!EBD^>ZQ;kqr-ww~b!CH(k&?V+=AD?fstT8nXM5SIl5UL&kef z=R$P&u;MOoP;m8U%G#D__ewIrxfN-AMcmT zc~-pC^^_qH$-p?<{XWHsSIxtOD9*#27HK2=ag{J09td})XHxi@r%>{Wgn34D5EZ@+ zz~Zx!z2sdZDdF(vPq({wbL{Rs!?oQqM@N`^1qf0{hw`}%;VvaD%IPc%{JZ(g4xjYS z=+Wrcc%D4^y6ol%y*%0(-LR$sMNXPWYqqhq*Zdf!(Mm z%$+<<4jKR~Z@n2GeZIS1&QH!G@RvmwO3}v80RR>KPJ6T8|Mc{_pLfw9C#JE}Pn->H zUEO<1nX-KC#tMeL^}mvgkm#fAnH(mF}leYe0b%qsZ*;&fag4=#BKaIeu$BpU;+0 z^#Y>E=CHq~@zC?G(eQ>GH124^@IRbG;$wgj#3n_!RkuSvj2R@#SgDYvoHgGkFo-7~ zK>-T~e9XgrsOdm-qV(s?;|QU9nQeHEAGCeMRu7fpjz;9A>jlr1ki%0j5t}#R2=X{I z)?`s`EyAKwJvA$9PW@M7^&kWD;TJRWP!X zi0*H+RZLJqiM(g}W_sN>9w8z7voEf?|DEv(J248YM>@x!vZWAc9+ir1QAN0(PXwGx$9*oxRp1tP*i>5k7Qq(Vc#&FOh)V=2XqikcG9ReA- z@sb*!iy1;qNLHaHjE&j4_LmW({3tWu!Ib`|c?3b2%Gbs{X@@?-teN?Kr(}9|YMr+L zU{Iz2oO22&7CR2MngfqqpKYB6d5u7rPE_hGPR_5UFh*-mTR$Eqyu2X${^@)<_fS+! znTayC=Kr~QVOHZDk0*iDZBbOnnj0q|Vs1jHs+IwgwO{pG7h!+9R8d#9PV!^=B?J{I z&x|#NWxu%y&!c$%Hf8y9H2YiY|Gs&jM~{o1f7!pU)Qd!CUO#UWutXU(751a^l7%R3 zqZ6v-s~Y94;-{3#tzx6rfJ$*63j`Y(O^F9E*m<_g44l%T(I?#}~X0Q>-)2FD`*IwCRMm@ zcVYqpZy5!tr# zG>!2jt&d_g{4cd=Zt~ESJO@L10dQkjQUuFpW1AM;$%E7!uUQ1`=1P=yO@G?QZ$&RJ zPr4>YXm=}}@MHiR&r;;@rf1m}Czb06YyB3~UKyVqJ&ZUV5DSg|Pz`Rb*SCG%*n#f> z@GXUNDV*(bcV~X0gBeAR+RaV;!Fdve4M4RP`v)&ZhiHl=Rne+Pujk9t=pV6rDf~-@ z0@a_BO=%ZA=y_OKWZ`qDRyt&I{*7_XwYPqaj582xbe-SU->3gTJTY}g=@O38yZ2|w zoE-d`sw8vLYWC|td8%+R8FuV>_EZg2Dx_yJ@LvLvFUA|^7RQ+bnAYgf*F5nL13X2- zRxsz@(I`15v2yKp5{O!N+tF3lP@3=Z{AOhGK7e$WT~!dd%5RRvwJ7bt*lVZr=)qN=hqi0xCWe(7P8TX_ArK# z-}xM|cq+5!G_CP4BZhcu1-9_i#~4vAW+TNHF+>!aJlo}kJkJw624Pr=gh3I{eGDWYbar|*o~(W68=FpR<26TI3Xf7wIE2#P9Sdo-5Iv7T z3deOx1`H?&Z1ldf-G$L0GYg;tpGF*z7|?Xnycqb)X-E(n?Y)DYX9<#A*$Q4he{q7; zr+|p6`{qgsQi@tEj?!rI(aIb&xZsVqG^@@e`f0+{Z0}1kY%W(9=MyS&$V=afYU{NG z%P*L~%d-Fz+jgv71eo`s?A2(l`MgIANAHtft$t&()pNhSOo0OWQLC07Pp=Pp=OH|BO@oqO~jI!|S$>{k!j9 zF8}qv|K~C4z>gHr8oK?m92^|>eHk7bL0r_u+ zMBGO0lp;tTZgwi8bTZqzCY_qW<>d|C;|03*+k-WQ`bZ><=a?Kn?_~#&D1(EcRE~=R z-sT;um-6qa_h=2Kcx<+3rDu~L)#_Tz&bF`g0UaW8=Dx!d(Ty=~I_Y7l%D`vu(c>IN zoiFe1E;3*oxz!Wr2%KM>cHbO{t~>sqW{Y8K-D6ba2kNJnga}e|qdVqLKYYfa!z#8A zyH08XXql~Y$*Xbx_Kp6{VUxxLLX+6|)tZT9?!>3oxrxW8gdzDhhxiAe0nq3fJHH&c zZO4?>xr)IjN}2B9fX!a_?ui~w++!;ZV76OQbY$L6i^i?M@v|{L%AV3sflJ#H*RA5} z!1^zpYx}t9sq{E-N!L7SOb?HnEH6w^$#~9#J~CPuwaMCJh$tp1Z?YfCWrQ(`VvGBX z`_*LKUZY$DLo%%F`8sfM-n>L3$Tw8Ch8q^d!#B4kP+r@;D3G3IG z_T>0@_EpbzkA!28TFQdJJc)2TQ(G>!^1gK$($C%@GKy`WR>JByMMlDYZASc3fBDn% zF+K#-cA1Bvvd0;iX|))jI13s5_2;wY$6rqpa24M6s>ZO4L-**m5l?=I5sWJ+!2Sy9 zs3zqt6LO(tTOV4Vyf=nFPaxF0zY_yL&oBZrDI`Wu)B&4K&4G|~^uPeRwn3(<%Y}4! zC5$1X#F&28lRA1cju_bvg8s|Ms}*Q$y`aiD1gL*w{+4Tmb1MH$W|T%k>jFA6<@7in z7{Jr}a)%V0mb=zK!Qo}XuElJsC;Bi{3~ss{^D>(8kra>D?8m#&|sT?)9YW{G5Yd5G^?xA0c`;+~o^!nvBnV z;}ylSnScrDYrvz=KI1Z>Fy2!%rB$zV!$b?C z)1(9Z>AR}*?|Pj^pZC4-u5td{LjBx(=kfbh^SkO^_pJe0G8Rf7AU)Vgk){Ae3xHHR zILNpg!x-BEd0XVH{M!#_IU>5cHScto=vV_p{~rH5PRBi;s3if?T+6ex=-y3P z-`(G&GjHbDSo%kg4C7hTQR%u?gVNRVmVswT-ha4ioDY|s!0wBqz0q-Z@f02QIm7(n z_-xV!U!yxm3Sd^@KKs*~zf|4GMt7HI7Im9YZ4S)gfxYPQbG$_ta2m!CMMpg4k4Pp% zqi*H*96TeY@NLqW3Gs^~r*Ez@xW9L>>;*Q+zZwiDfzoA+$hUZ^RZo`K46>790i4o( zcEhY4;Z`s``?_;VZ1ZLGf43>di3juspi>y_U*G~-(q~ir-8J}1)o9Ca^NBED_HThFDUIed*bo{^tdWVyO@ zoUHDHh&-vC?W0k!8YoP$RSReZ2%)rPq(8A@c=P1$^2U?@dZZc|@S0PZHsd~ol z#-rucenP&@%U!o?XrFtKAn8IyXn7NpkL-SPlwumJ<>=mdC4CMQK%}wv~Hg6i#MRd97`FG8Yp__KAt&h;4Zhar4 z=~T+QZ+r`*O!&F)h}fh;W-hbuyL+#NZjCZ;f{J!j?RqdyJRKdzh;}Raf9Id`j6*_e z?%rpVO7s~yjjDRL0}r&)S_IcU8jEnYgMWm~$$&^4quIn8W8djop7<07c8$*&<%#G- zH~stXyH0&Ugk0`2hIhsRWIXAMTRo4rWg(-DhB3p??%!O2nE^ds-RrF1KQy+p?r|3B zPrCFjd6LFbBXk>2SSPgrdJ3T5Q?i(G;e7#$cKqnI;qmhA(qaGMPv0$n_`~;o|Lby^ z=WVzHYP;uJ0e`>z{OhmDt25REv|CT(U6tz^(nwW3NSAHITcTuSJpB?4uH$byQrK2x z03Zuu%$N>xAQ%D$(ur)&!4P5Gp!A#n6kf$kbR+pD6J!l9(_eC^YM>}|hRWO+G&Pa6ttokv=4?!K1zDd$$@rKa;{Z(xt+_DvlRDH1FYj{-$>K0O zmC_cY=RakXn58HB-6!KBsTv`erClL`NxAVRa&Ju#HD1F#Yws|(nQ##@_Dme+x@=K;KD&+Pi?+Ho%Iwp#Tw zKK$CV*FxLXInv5|Gus3}Iv@Ythx5thbIPQ6f43!cy(qbqH*gf8$EmS>Tr^}ee);1c z{y6%7^laCOI?vJJUq3CUP6$nsm#=rpaQbT^wfE5`S~*C?UOP9Jb5VSN0R6X|U96|& zFs4w8{4C|qAZmcau0Y-P0yEY^3yKefXn8{l?wn05tYhmELoTfa zJ#NRaq1Gm>@Q?Qf#l$zGg!lIyWUo0zkLfv7-P1nb+I<|-v^Q{P(L35!BH<|EbVSzC z&Vf0qK`$<$<4|V;3>e6}o3}ZgpWiJ%f4Eux^N%OXRZ5vheV9OQDKcqcL+Z!E(Uro62dSgh=V#^6a5(XDGUb2D@duxl{&90=15owx2- z$flprcy6C{ohU_<>DrXVXZzm$(lq-%W3BMzJm7Mj5!7jH&K&L*^!Kx9G(8F8DfgkM zPEJo32kQ{z&DLJ+hNubPq0MDqi)j<*6qk@2qw*=H6-^=LOnZYdndnBCvkm=j9=`Xm zamz_cs~F2{^BM((CYeEBHGy8j@@q@YZqk3-TdRW7NQ0-NAq|h;*r9H7T8*hcDUqIe z)$hkC$-g%L|M&UT^7p=XWv<=xb9{f>=MzrvyF&P0H;lDcbK-3);8d(74aN%};~9Eq zlz}QX>~jml$eV*20^!{U@wX0eA_4BBaBU7C>ABX2qL?FQo0}Seb?w_(=&* zC3FfMe~=?7Aj%TEygb@nj`s7Chv(K_4BOq)wpYhxNqnW8H)JasZDdFeFqpPSpu zbnTOLoRB!f6s1}x5m$y~U3$jC)HA*rz;T>f6Kz**UA8?dCD1c7@)6OvJBG>75kwEnmA`&9T``b^J!<v$s8LW5u9fx*wYm5>Y{b{ zoc;|ycff@; zqG{Yw9%y&l5w>y`J*W337tWY$CA<-TGbE{j0fW8f3mL0tBcK;&jz7FxPTyZGmmiLY zDKrg{k`Urvn(xVntL5#7+vWI-*LXVx%db_SZpHwu)fD`VQP0TqeTu_(wIZFWU;|E( zLAF{FUfq0Nj!$ovi<>XeAOYz+`x%n`(EH;jo870mjv(6}XUmO3n~m`t=h)O}>3CM5 zJ6^Zrd-oC|O2C2x2N<7_Qf_D&i?;^fLUR`rJ||dmM`N&!Fn5p99ebKmnh$_vJ%x-2;dRRK>K16>c|hM8*nkVexv^oK_@p}l2Q9ecV;x6~H=YH$ zo;PM^-A~1Q_ZHTNGzN`AlXK^Zcb@?gJxes<%Wv0y z>EEAP>)Uu(pZJf-(ASKEXWjRm%bxf?-u>6s_g_;;zou(0`^=7o+2&?#=;!_dDBcr! z!;|zKpaFQ}IpDIHO(=FZS(|?O=?f9T=TDkh*O>EmO4SzI9yhW?w@E39ut_T^m~$fK z<;D4Ub+&>rnDVmn;KP^Ae|u*mBj;G^3_<{jQNX*yHvyT0{lf+xP{?@e9JjZhj^c60 zgH16x*VrRAsQ^6@5y0)VFt+ixj2Q$wQ2xps;I241ag>+!v4n zEa+jLGeau>Zi^PZYX{VI-ZoEteRe>j@84XVFYn*KYYYrLla$`rI_N&xk-Yw?ivJS- zMaQ+aAuyB^0&Mf-IU8$N!PU@4q0ucs+A65@1zGXWcrtKBX6O!jfL)WPk4;9;SvxOL zK2ELmlT+8XgU+!P(lNj~w>l1%tw3AG_G|McWpqp26TQ-TOZuc?NT~oxACBLx_A%K9 zR_yzqlUt8oq;G&UEyUG>-OqU@fIpyOfX|Y91(&8pC!L(0%xR#0dbKRs16>Yx`YFm| z9fxcBUlhug>zV)P|N7tjAOBlkAmdJ&3>66lr|f*c z^_b9xJpP);FffbH6a3sD>@9q-oSfZ^L6YP8um1Gi^8K5mQP?J|NrW)r+G_UOgyeT7 zhrcq!>~)TynACpm?Y)gfN3bM!P^KaUp9`z@h&%joG7LKlj!7Ja z_{-ai<>cbJdwiK_hE&0;qcyj|XnOWA7@|gahHTAYGdw);P+&r>IS(fP#Qt@_gSUqs=A-awUuZ7_Z%%}14V@={T}!ZR zE3r@i@2!W@dv_@H)UUxh+cVk@upQ4HaCcbWS!u6|o(qcqwG_u`agd5lV*H`cs?}qE8s0YSsF{q#Q6I*P>Er4NU-vo#saAP2r7N zkOU2QeUgw-K)~G4&k*__UJ>DFSkuS;>UQNthqK7l|NZi6`B4w?F8&B~^}PW`WU5Ku zwRq!A%-jMW8HEX@_ZE;yrp%S$o}CpLmlbLCmFS?Tj2xn$-xcCDsFY!*yft80fdiiQ zc=)|P!rna-)pah>0sUBqbxSH9q7tKhit}r{W{cVsDmC_rUNnR`bG>I`yJkJ(ivj5W zjKASz<(?nD?Stfy^TLl&hyUCzr{;e%gdMPoR^L7sg&b(U_f>M+MV;{ z{Co@zFpm!}&aalOKmh)o>hE|=a}!Ou0QMJ+O*%uTGkVl~=#qN zV}6~^S06#Qq8%eEim2KU7@_YcBGm1qgJ&HB(5H@hjXyLa0x0pcKO#>G3AgtGIz8h$Iw};l zEqm=X!&zM&EzQl=t${!U-C%8jG@TEh1sJ|9XPPsuLOHZ}XaP+eVeLb_Y7yp?pl%2a z`r|W7@Ghh|4O{xMW9sHuJ_@3TAn^9qZWGw);TXyb0+zGM`{=LGE`=&Ye3;N3z1W%U zb1;yt8}@Rm2K@TvF5z3Zt*8pOShiyT8FYs5|3=>zp0u?N3Z>ldKRWw8#%=6!KUA2p z2*=S>jFA(lzD!8>%b-kq8*d15^n!vhBn8341$KsrDIZ?DbcW)v)zcOBioq>-GhlGK zZTDN2)9bZ3N)Famzr#HDXHTPpCOsLBM=|)$fg%=NpnMTfvspT9(hd1*zktV0~?BA6upgt z@rvPo7dYek-@sGP_hNVn7$rZ<*-z01M=h$okfM-tB-oUY?Qo2c_Q1lPeKUbkeY^@R z@FQh}UY=?0#%`S*jxmser_2NB{W%RWFYEbpVMURs-#}7|c@%;%_Y}P5S-_|8zJk1y zl-Y-X?B7yCe`ziMV+!;Cm4N>>KKMDW;ccINXdSNlrnXI2-Rm-dtk{lm=PBPb=bP?z zn@oy&Y3y>}wdYOfGMZkfmTv?+bt!<^5)^;mKa3s(Kl$I_&KAcp$|`-_bt`seq*$o>wRAM#}wjSV{(X#1A8tS*Hu5SqBQ^` zG|ywVwU1YS6C${2ytjFs?=mWX4Z!|S*O$wG?B{<;F(3Ebo96K)BSt=wbB^{zww5uJFAknB-@i%+;e{0Hx;<=n_-Wne-Fmz~ z%K+U@PF>+JFR@3B(o~=+&1JU_WiHojEOY^7t&T!*Fuf^ypir0mYa(OE3~%>U9@=e2 z!j6V*ojdcyfOrN)v`jwwA~`?3%(&;YIG5vjdcYj*d4AgOlsH(<3ua9B_YcN!{O`3a zw!yYuvG2z7ozc7cWaxe=^g9N;@dE)#!03+1y7GJXJvhj*>KbioZI!0aHj;PVB-E)N z2je1=ad~wS572i{`<}w&XcFH{hr|ZTu$z}oY(4|T*8*pa#d2QaH+%O1Tanr%w)wnB zmw#K{z5RKPodv2LSjDg%2YfDXY|YcqJ9=j72>LD$TM?V*OIgrovu!XP zB-H*md8d<-j9F8T9@)hU+#3gut&!ODK!2tmOMgd)#+xE9_VfcXet!_1^zT4>2c`jj zPPhZy*`F9$?mvF;UA8AC6n2n`DNw8{b3=yRK{%Y|wLf+ZgDav=rm#3Bx}4`l$s< zL{b7a&wtB+-iHeAQYcW;Z3B>Bg(ezhe}3DN6!8g)H=!g7WeiP!KOAe;A{VO~acg<~ za(9S(F=3kw)753hS3X7=Q7A}dX9QOvJ)Wx7?Y>V_zRxqhawCLnJt8xn?7Q+qeKo?^ zcYYTgQ>HP=UBWcQfn8`Dn}P@ihJimjy&r==-tDHtN}e_c#bLtNs$qGIA`~shym+V$ z{kNAnCqFHqR=KycweC6v?^GjZ>oJ3#VJFnvJA2c`$zD}q9Lfiy-hW&VnYgSUB{=F3Eg^_0_E~N#qo^tONdu*ynDBnUEMPt#2=p*O+$bZ zidG2A%`h{iiHD}SL_Z0{FgkiR=JCou5wI^Q;3vz={k=e-gEoXP*P=*~9G;1LISElJ zg3wMOWy*IB2x{yU>PA|PpiU7`OC|b3kS9HnAWXl3hR#q!*LqG{7YfS0<0)nw4i4P( z?AtkuUR2_7Ue&Xiqn#e+Ful5)h~sIGN=g@6wM9z z>7Ic6ZF2+|ZZZZ6{anGQGw$}(gLlBfUG)Ck{l2vJKD&v=f{^YZ6$e=8BFAtuuIdVI z{OrApXmZx|Cn>Y{_t(oW>5N}{@0Zr`e~l0RyLZRSe}8he{13-!q0jyQGQOAZrh9-?$6NzmXL-aAlG8tbf0)sKoD5R9 z4>R^JW?d;`VeX&bo&{1rFWwHzF-O>XD|)E~pxxEMo*YrHzl5e>9>R=dNq2S6Cd z&*=xPR8{2PHy*;IE2S~H$NsKDxWGe3O_4H3Wp;!#r@O{LuPJ2P+1^O+>@K?pd&|r3 zj>gFAanF+!qSsdf2IWxn?PWKapqI$Y{k}Hth?ECZxILhcTxQ_VNS~jZ3 z)MK1go`1*zPws~{psyQ52t7?J_2+V`q3$Xt_T;|`2A!lJP>4>#3)@yWRW1u+I1<-Nmwdy-wajV)G@EQ%(ZD&k$p97BAx#z95{`j1_DMai# z!NRF|p%@!8@U+%NMg4B7Se4dSr@^3etTeOfm-Du=x^R#VzIp^#W&rnr$u{DM-h9jsfmPuo!n@JHX)W)W{uHRlqf?ZQgGSW6ti7r*l&COU0wTf9-XaVYo{^C(6)w zqyokb>_0S4RO6ArXzR%kGG4QV=RYKGhpm15 zkUV_%YIk|Q^<=zdpB0C0zXHVo|N7iD#Or(x zdEr+*gXgAE&WSzR1FVya=(F8-^f<@Q!uTR$jzK;7a2j8U5JbZqB$dy+@s@jWboNHy zd-^=^oN-@^a21-_^7?5x3nU&N|JodmN9Q_%+Zch`m~8Y29l4pV*x%o3?t##N`B{qp zLVuHot#&e{UPS~|CqBlXw@)WAwDPqlXU7Aw=%2UmeoCj=f}1W$ciU?ZP^mAN9S!zR z66RLp{NgO0O;7c2c7%SKgHU?O?UMxt)!dw!K4(WTo`D(*eHF0K4`r&|pRZl=cB2Q~ z#}fv=E;ybepX4j)=+Tqcjwif?yh%SJJyrURcl1C!{M2}(wLT=q776>qp#kDVxa{p$ z)ZI9#6TBIpaw4Ru(R@Cf9+#~4dFEYZ*@ zqw!O=8&62BO0hoNwymriPyXoR^(Z+?e=Qy(XbeV6mjcC*iWD*j-8B5yq&A<+DL>8l zHEqOUTtY7<`g;%F>kt{wPj36Wqk|C|UC^!){L}O6sS<9yku8HA)7d}jyZss?W#HYf z1yB`gAsy+dvU0*E{mu~CvL*cE`9eCOi{<3~$uegpH;1W=jyYAE%k`h8(SFS!A793V zF~Qch6hi_nOw&DtdKec%sgMksR5zgO7{hB{IfUIBN0V>MyYmd220qC4Ew3>psI%Kf z3P>Jjd;4k1ro~J)<=-(mBYoHSZejq6W#$nRO=M&iJ`RZtkcRyH!v{TV5O7Md&z!O} zmEUKIF4QJ;G0so14^j3RET!{^yiK_dU8OmaVNa9|24@$D*%+9v;K0Xw~cwa4K~KL@J>C59DIg8 z@Ft|&gQ7zM(0aP}cn1x|T%;2EukW=+@yl$de7e#|zj+D*t#&D%gjI^i{JcjXC(qt9 z$dH_wGzSHt&8uAYeXq9}$W*xH@?fR|Ij^t>e`=s{q0M}JWoy- z{xn+o{5CpXHWu|V1JvRdd$FJPZ$CFSR(YV*<*lcSVrwV2J#`z++m<)1!P|5)<-~+q ztF1hr{XjAh3H;lg1MJ~jdPCS*K_}y3J$CYXe|h_EB9Bh-nqy?UZuCO@2leAeYlSDC z1JA9$2+_gb8uDihULIuJ8{75W^_*OJ+nC0i>w9~p**BmTzDCImC+@=QW=mxY!Z? zC1?0OdSzg*FOy>t#q_%qGo2{6?nnQ&JPtHVd&t*2cLP1vBU87=a81Ek`dc#?fP?41 z79Iw|11g?_-i)tfp@p)^l-#?s7`!K~;SYnf)?_^#J#9-W>(KMMIlBi3;P#_NXACJB2SCz|JzIL?=0*}YyZuiyO8{SIiSp)daCbe`oz=L;XMguj{&8$qW4J(W?K7AdZvzJeVM22KoA*o^t_^tRzgfdRiZ0M9oX*hp!2nHw zCxaGG+}&j85-b?I0jvhTpOI?*J-b2rg)$JT9|f3_o*ZBE8t>ruXEr83p?MyJZ7Vm? z$hEuM+r2iIcgH93I>loS(YyNsE1u!s=HKJ{Hy-4Xkt}x&fv1R7qw9Eg6)*qNSkAi7 z`zB_;{f89PS^qxk9=z@Q)=kL%w6zfazl@(Jbl>;aj&FI|@r^wGWiL5$;dMFs+1l29 z6s=u#{aH#(eqI;8&nYDBF7ZQpviIWY?mM8MpQrBKy-s@WyMDg!_mkG)egFP3#djJn zeGb67*Lmx%{)JwkJe=ZYcfi($tw4BE0~+3G20A6q@Ke4GO)w5K3?1Rl%hrW1!N=3B zE&X%G;Po6{+4o0>%R#zF3t#tDOw0M;UFo|pk&wXXcEzB#g)_CvNS9ik;9?Bz~ zJZi5=^Q70vKlz&UO!L~)VzzrJG8J~6QzY?wI_~X<^KKI_b)9}0qd%gF@T;aeilwGM z`m=8t$L*LYg@0#<&H3n#jpvV+7e`xDVSjM&V%a%-6F;xs869`pb&L_?!}|qh4i8_< zSV`8LtI@p4n(fTwc!i>(NzXFwaZH3oe>F}*?&Q`&tL!E}YZpfa(ES=8&Yt;nFqzgY zYXuLFmshV|whlQJ=f|zh?ev}Zj1MPg>BG;Be}D7|9{<=D(Q^@^~uzuJo`BfujlQmBT64@4rJHacW+*gF2pYkv-_LFMKac5_D=Auab*Y_VLz3}M2`9J?3{@e42 zJ%iZY={bFCZkBS8_lvPTT!kv5I?6wWvftSO{j-m_e=}GX>EtA%+9W6RAy-DFHL!np zeYotadJi=+tjDLyjV7ov3nLnUFbtQ|#YQ2<&?{qT#FL@g(GoJm6q7FL!kgu@1(9z^7PH&f=j&GK;OF7A2jgKerU;pV9Jv#vMMibu64V9<+(N`{cYnYRFt@{1+ zL+JEl#=YNFK}sD^SoVzS3oZOj8Bv53pFNxuBAUGYDFYMuA>>o9@DS>2oe}`(dcQe2 z3`d@5%^Na^DH$lQb=2U-ALEuTVLaU&eN|Fl-lSxq$iAx}$yP#*@$rt*>Jb;Qf}b7LCE!_(+Iw4YNPVQ(?SUFh&UR{LC|2_^Ch#+GaQ@l*!@a z#S}yf(tE4NVXF(?6?W9>QvM$(5vq6Yv~eq>10)nKRbYdUC+S3J3u)Aj0<>{s=1YvPNdiD?sy`jE%x- z%We|6emO?^9gA<}Lhmd%wJG4d3^An}Gk? z&z}ODZ|~yy6zy?q_&%DyjpyG6DnInRck$AXt=Zdn;g_!eYX;)yc<-my=igexzsA#l z$@~15c=T_{$=m3C(s$3|)6>3l(eHpQATRHGnNp#+H$|+H6(I98ITuZt9_r0U(e4&w zn4%NSqZcS!AZmc6wM-va-)ZvGVoWou=qgol^Euv%FBqZynef5~w< z$a^JQ8ex50-hVh7`PkbRj`s{qzk=M=$w>DmtH8i4=*aHq6X~q*x?Ff?>CvbGDdjWQIhg6ElX>a%b zljXSeA-j74lcT-eKIa+JM>#pt27yMl?)T`S==51eT(?spas0s{0A!rv2MC(Gf@u!U z&bD(4q?&Wk-o0M|XFB?0GNzWQ0X9sht_J$zA(05cZg!G2t0#dqho(J^M~y)Qm%Px+ zY!^D%8=oZf%=G?E7r4Q$Q-=~(MG^?WiB!p_NKU_TMI+YGZ|y_wv2)*0UO3p)W)IEj^glMceUxC(saxu+*$ zh)lkmRnPnLo4sW(Z$k88&dSb+m;>U;+3DYG`;2q(Rc)>)QZpW*VbEbo`NU{F`_tFE zkl({mE<2lzdppCHk=@-1{XW}`7I`cwI3d9Mz=3UT!YPavZ&_Q~ZW{eDi|6uUdAciO zN(soTlZkh~yj^auE|90iecn$rp>9ID-ESjCeEL|lh2o5q9`MI_Ksq260 z=ifiwE&tEs)8+s1>xbn(o?R{fYks=O|Qv zMlVE1ZA*o=c{X+jkQcd3#aTP@tG!;n*vlwv{Ni>Bw%S+{TJ$kV24aaAZD*YtX`+2S*1LiV$+U6^9 z!4q=O%H+wrpUU|2yyyYdea9f>IDxf`GoF`tLEwyFwUT%fpYJud*)7w!$v?V?I>@^- zRve+;=ys2ncX_~%L<{31`Uk7?;MKk839q+%T#-@T&R}ijOk7P)f8_I%ZDltDaDYoS zAy1y~ESq}=%ZBiAI>Z4pm`M7rTlefw$x&m&fAPLCtF?OeY-@noP_(%>yuOpn1|)>x zwHcQJ1E3hW`+G;D{*>NIk9v-0117gRkltrc4q8W%56+OZ?#JPIgNS_nS=XJ;iq*%P zo-sxM^L3vOd7+}bBAURxS_}nfT`)4Arkou=zZdKJ%gDPwgF9!@_ZdC|coZXY7y%Lb zf}G8D9JtniUH}x2vNQkgyBEvrgRS&NbWAVeQ&FM~De27}(+Kb60N-8Dt_t)c=YXM9 zu2bMdwr!*Bam$ON{eaft77^JkQw8wi@H4aH1`JsTaSL4X`ma za?OhqO>U$srLUaC2Lu2Ndt329G<`_1@7bH3;+j){&jjaAy12xoY%JgY0|gBK=DJ(B)4DnZBj3F3fA6Xc8z`)ok2w=iryk>_BdPKU3EKJDMT2anjJHCqd`?q^e zPLGOg%S?I#Hjhzgt)w3K$Zm%2&38xB;nKk{x}`z{B- zX7!KfHI=cR;y&>OC=@X5Ko0>5LACNips*>ByWl)`InG z>*<*);_-y2q~9Hl{h-f3eiZ)Yq%q!PdXxmtXfB_Zvy%^jmvt_)BDNMcW_1AM(he&f z=NJ~DVG&whwRzJkx?FPJ^dH%tHnt9$%5ltq8V4Dsy8#D!n0Ic>9G>Zuz)tr+d~kYp zxx7EQ?z&{aSdv+5AkxEAo?a_CFmZF=Cqt{AGdW@ZwGKxrV%7KfA5GXN4u!lrzM%)u z!)^ff$PNhS2k^di4AhjX;U|lnorl?oPrcF|p3(OdC2p(cPcQnM(bk7xKlxXD{WZO) z`0>Td7t7Jh!|1h)Q_I0RJUCij_5RMz;pElNF4!$c!fq|E-+b5ltlRT$a#(-*@!fKI zelpt@udW^X!`595=tE;$6h$_4Aha8T-k9B5I>6Z{+LF(nImY=aH}`8w+d`hl$Pi+5 z?Mn%O>c?)T0lx_fi@twuiYy*?51!h;5@-=Hn{IhD?1Q)u5WJMrmKXYI~URf{a zvojs0ZpO>N!RG$szx}dYoL)2^4BNevQ_TMR9&@=ebdjSI>UXw3uV$@OUj?Np_l#8_ z&_H+!!dCC_A>p#k&w2NIfh)ObCW7Ei!CB80uCsm+J)skFQ-LnWI)=8t@<)eShQ=7s zv1eNX9u1%{6Y80&ScUc|aYDHozm;!X?{E)wB5S%vZuQA>6biOQMp}j;W+dE;p`kTruQ8gsd~C84v)-v z=#7hFRK#}}t#<+*8_{?te%>DAD9qfAyRIQdZ$@^6=+m zt>!VGH6Faj%L9PN%kPhQ8>c_3>}7Dfr}w-78#`L4DOd3p)PQ)HF>S^(vpd4?UCWS) zfC#y7hZc8ri=;4`29UDXnD_He02_*OjtFj^?xoPtt{H~h)f!d~)1B9!*$Vt<3M+ad-2I4*~Hsih4L;*2+0u#U>}V#a3WM6DEyxtQQCg2z**j zPfi-|3RvhLuZZ9Jre7Stoi_>W3sd6*J8vF7R38PDJY9~Cjt0Q+*eBH881RMm*W`Bv zfARGfg?Lx-L@Uz$we;1xnS-Z&t0q4Hfo3+gsquKfe$my<5!ZGx{kCq_J}2(r=*7^3 zv!V|@VHy3B5v5mA@!OxrkfYzhp*{XNg(6JR|8ufI#;nuU zM8UHWPjWKTEg}w|0#V<718e3x?Y+16vcYshP7}lt56H(mj7Ah*yKa0O6L()g{c-Yv zA9@g@48cvW<_yp^1D}G2{MI8XF;7e$fnKFpw|TV(ux7B2;5?)04afW3sI-4_?vQIfG2;#Yma=u+0}?@5J#JxDEbmN!wOV|Lu-vy6PBuahd2a6sIqLvtAA8@kh!7#4pPZ)24 zEpm~e6OxbK7z_o=*G++e1=Z#^H5BlGD zD8^0E5!#)UjRr_Sm|{r0loaKCdFIo2O2HIBh8B5r87C*>Jqe`kY~!W8o|Gc5)vkgy zguebn1Q8j0wt|58-k|yq4L`+W!z+zj=y!G_WF$6w7Uj!e?{2QO7uF32^_b`=jlgj7 ziU~G7$}mIODenZ5JmD1rIvz{^AMd@l24J+Fq8ZaYu=jved^JUcUJ-OW{HD>5QJ4TK z<;Sz06TkYMX9=v2aqnKS&a$6^IojV{-t2ET{%5mQbZ>YnLlhtHZU+*3i9#3;!~gn7 zHTIJ{HbH#pi>{6S-83+r95y{6X!B9+zGEBM(>zy-zjbvVJTj4+#_m{IMvC6~J?#?p zmh1S7+M?`4{qQwfN10?67|sB-&Xn56fTCwA0(<}dZAy4&Is0(2{QYky@gSOMDw8tp zxzm8;@@kxy=~CABC|G=me+MjO1o1v4`{L!nM6xu>Qr{s>u>$G<4lk9m~`(X{JZhcAHDe5PQ~v##d;yjHY3yEtoJEAQgx>m03j@7^{?`uA)4W%Vd$ z4)HD-5^1>3BNZ{#w}7WT04JK`1$*e-8z0%$rvJvbfDgVt%c=P|U|(uBde9m89Fyaz z7$3OztkJm{OY=ZKV+5L2;j8TuzQ?z-#rHRGh)7bV&!6{+@n;}KTs?~;Y_4QeYv9YP zoUe4b1JtCWr8Ax7@+281&zu|J_F?O`A2^}A8b;SiW8N=+`Op7p`RiZ*wKb)W0txZt zVW6_fM(4}nUoC{~reYs89(w$cUTM66Eprm}k(zQ(sT=^3O)$SXKV{W9_LKMEnBs;o zrHCm=9kJ+G3@-rECkSa}vd;Z?Ck%9DSMLYCY!<#YV|Z zPB&Qc0F%0G9gJq-xx1KvcicvLs25UO^isvpi59-<}U<1u3n#G_D`Wq zXD!<*GF~)6V(g;{p_p)Q;}c!Dh)IRa^o}MxfEcnox235ELbOr^s&Wadyypry2ngV| zhRduk1t_F9haV)!k7CvVPBEZRmmcyW6UNNg;2+9x7LtOc61v}77{}0TP9f-C-iz>| z{h)XE)+)wMpm^gHrrg(r=X=&xp9#_1>ZDz*idw~2V*v4NpX28_c%pHC=G7&1=kc~@ z4`ur~huS1e)=yhbgw~wWd%A_@j80=PioxBp~99 zbd8g!2O#57;A3MsOqO0Y=A&NwfyBeDN6X>%ljUXfKFqN1Y)JW7o47STQ6awFxE0ok z@>w%-tng>;^mems<#ogXeXo1&ro8q=b5fGcbHdc|gLICf)9tB&A;HUp@Krc7=yLd1 zckw_UGFYCmm15oy^6fP~*Kc2Z5^wFb?mLZF!zkOqc-DX^9idJHaD4J~Yq=&%IR)ea ze@lf(GXbyKNaJPUf8QmG&(b&elmV04<9YFZ$&UA@n`Sb>;n)iF0QdWQ&zC>`(;pXy z%c#t5agyN-Lb}g3_SK1bupHzt9UUGG?a=o8^f(}YnSRiO>vW9_=;-;hG32ZqzyGl6 ziRhrnYr0E*d%m2ViEggBY{jH=5ZBJ(^77U9dEhF>txx){aqe`#0h95R$ik{;)T*c* zh-U(%4CGBZj~@CQ&+7poT7%!nJz&A{T2p7q+HWD^q$7`Xo5~tFe)aNAyv^h1P0Hcx zlDf`G9YvxLY2xeFlCcw2A#)eEUzW4$ly_^U#^vPXEJxxZny)EKMZzLSzef-`gA;Md z5tt@zbca8`fn=#iYXtO|FZlu9@F5^@m7e<0oL;|rxqScT)fmi+92g+d+Sm@xu%emm zuk09E#eM+Mhxc|9hz`)JITJaqy0W&+0i7?&B%a1oYqaw!XIiT7*Pq`l@7}(hw2SVm z8o4P>M85O$^&EVTYwl1lH9zUp?4*WXvt>Oxpf~!7C{106pN*xFTCX*Ksy(gA=%N%I zV!- z*EWE*RA{$HsUnDj7YrCuq}Xl0+6)0dc$Cp#rJFGZ@N8lGzSzCGw#7E!+ZJSuRug8p zOi$?h^T!ZmV^kQkxAAp~>Rulm&faz?{^lC-C`sWP$~4a|<*l&_V&Xk}G*$h-J4T~# zBhWv-J55m-tLLxEWq&tD*xnu?a34Q08bo!U*MD|$9Ll>HCZ?3-1N%0f8L(oKa$U}m zhX`kXs+@FVzr46!&MzD!bDbf+88kmdb*&#|&Wo4g7$cmL7nykcL>otRi}D1gxe#0j z!OXu?%nwtPlaEaSI<}Hxo9#skBU5&a?pmb}rSs69MQdR)ags;N8KI8e@}s~Q0|Oi= zQUlaCh3v^vucK2aeE^K+2a3+Y@x_WSE|S z#^+&U=-xt!Vhj%fpjc~8xoYfR6DHx?_a9Eics@&zc%b85;os&UlER4K`>DvCwXryi zCmxM%{oB#T)|*E>^F_yKak%5qAO_5yRSGSGwFPB|?px1Pad)I((CG1U;QEXvXI?X= zG)?yb_0X@mpnbm$@DW-vl%7Ai@MpajO`ZoH=X?YJA!Rmn>fZo^PJj%KmnJL#_xwp7 z0GjO#;K3#X-_K;WTV*^~V8qjZo?`ReZM@Z(0kfm7J=zOgH<)g*@>{%atek_htH502 zw5IauFBK|1k00|C8Dx1)>mmXt?SL=n1EAA9jq&y>;0Fi?#(32$7_iepI&rPmeh_F^ zw96n3?{&{{C>oC(sC*}aot|ASCzn?lsPq6M+db85NV6&2A~%4NEm@2Kp5!q(##yZh z0n#|;*Pq`mZ-05eI2xAD5jj#!HiFcAIWeLYJdIC)kPF*a177NITsw#6w9fYJ1A#hzO3#_0NFcBki>628wbn%AGe6DEcNI#~ zVfLW2AHadM9eD!)*e!bc<-yUc;l_q4etCW#bG-0WWJ$rW(ER;+^lJc7u!7gjUTVri z20ph&XJ;pY&7I|k@7^@NkIUOUa?v7fk)LnwEc^RM%gbnq7vG;;^vs6?s8R&2SM=$= zc7BL{N>SPgAZp|kLQ#m*(|1iadNpt3k;ZK*1FVvU?wb?S*dGdOt$QTpGK6z6uh51N zSVZa_Ax1(qoW4^qqn2Ay&0IAqn8{13z=PbI;QmUCFMQf9>O6z(iB_!O% zpfYa2)5nww{|J$78vzguNEf(q@=mbi{lY|hC=oF zi5)Bp&=wh%VYRyC>coSx7;xoNBt{}~fQZk&M9gz9_VoXLNG7XW0WdG@VK*T}{8 zh~>KgqMY*6=!~C5QJy^dy6A(hIErT_szVu!Q5(LG$NK(+{ab~D*6;hg1z^(qnvTU>#8~7Vtql-ERM*LZG-)`=&lb@G?v{!k}(~F_? zJUZZ^+3J-i7GvziM~8vv?Wf60gL160J=Q>AOO?m}sP6)K4>CN!P2Wj*TTeVrIm*$C zjQFFPUB&-SJiFBm^q25_!3;(Y;GT$6>rT!9K#G@v#hLha3izUf>zT|6^Y#vTDOPU|}GW)=8;I6a?En;Q9uP&v_XcjsV2?mxYI z+xiCpo^GY=@0OjT?Pc#^J%gRGwU=Flln1RP^3+2-*~lQQdgUS~=2^dAW!QiG>F3#w z_Ws>3z247wtcRmSRdwrmesy)ym`qijWJ zH@4EpFJDcq0I=4e11NU^6plt?YHc|_&W#|01LhLIq$z;$b?lGv(rvE&Z0rFFJRwhw zCx8ldGa?>q8Z6$5u2LZrDUL1^x{n|11i<6#@#YE=ja5-6kI0&H`iS7tL9; zn_~rVKU;3Dq&IVZ0vC2=y?m9eY23h$h^<}+uim_A{dx1Yk*|m6NHcX=05*HUmZo>q zdRR8QJ=B?qj?BY*U0$6v?)W~NdwZouZtW9;KiO_|S|UZ?y?(Vkv^9g*{4C?Z+{}K$ zCah5b#KcIy3Tf)a%#>*MVQa_Lm0V#1h(Htugxpv%%)3))^zvwbs!pe$dy97xATSY% zj^UbOgeHZ+JT(A8;Rz3oa^~F%56$O7z~55LO~Rsm{B#`|TI?2zvAMWxaX<2UwuTXz zCIp`DZ3OUn-qqpr$ux<8J26^l^3JWztfQ`1L4d(YqdZ_A5MN`U~)U&2FS z$ak-KXpE`5;Dq)TJ2f6Hn*jrc4l#u5DHL?qZ`tvm46lhYl&9zC!+*l%n=0dz3<_Ap zKpsynnun~W1kf;7M$1vKA~8FgDWCwT$iZqjzKlTwUKmhSzUu)>qB?8606qh}2;2Uy zp5)Chp5Z|YoB8~^7f#IL&8GCXGH@A2f~x&9vPmDg@2Oij zRF~3NjO%Hy$*reEr+b^CSRSIVVb9xW41R7k_npS-z>xiZzTAGg9HnRh0ch6gc@d>& zt_qD}I>_?wZbakmWiEI^DAW2g>U()(fB3`e6xBiUkSp8?~k^YKfd0Z{uSm(2INhJynS!Zb}*LYMp&87*xgffrs>Rs^uo$J`nJ4ADrsx(d%JyqH=gL3pEH2;Apr3H15aFMNwm&^vX@-OgFGH(Y;&tdbsQ# zWsEZLLdjCCA|%ecI6FH_k9{6vuCC(qr@%?{P=B?XA+#p=V>P1sV`Q)=R-m3p-A$e^ zAQz2*m&>cu<^B8jO`pMC?T@H2`6CZhs2wBZoNjj?j+4kF0WKqMYhV8;o&IJ=(?o~vQbkSC|J#SSykX}qTNni>MAlbv9f z=qTGj*#IDu2R(H{pYJA`&AUH8J6}#tJ~Wf$zp*=w55VP+N1<011E&6RFC z3_A!Q}&_9HBcs7*3>+HFI*5|h0#7ARj zv_ahN`?f!6oMG}7iYyB`%2UZDvY*0J>E_-Klzhw^mxpb^qwg^06p0X`etJ_=hQp%- zZ=3Pl7<6;yc+klWN9c#Dzc(``a#fpM197e)euaT+laoMC4D~^zpoQ9rCc>+mdGCLG z4e9PCXd)k}fi#X#@8ozhBu+`vm%l+ZZU$AzTwj4TgalALe#DTqpz=~FovD~kr9hJ> zEflF<^Rq&L%@x=7eM&Fg)lUeR!g|^~cB1u@Xl^T#?PC$Emt51Q5VxX?IS$gh@u;FR zpLO9VLeC-O5utf83@L$()$R3po=Ap0`pveY1V-*{a>y|r6tDBt6u$MPbqgFNkhdF1 za4mjO!M!{%S4KHONJ#L7{pZAFD#W`6c)-k~yb>--T=Ngx_aFmL6t! zqz$x+s?5IlgloU|?6Ws#=>+XH_4Zb@3LpS$%EEZWj~{$let5mV?C;5`ew^#JJV%tO zc^>R!#CLf00f)ZxWE4~0SMxYb+3m)6+ucVP{dquxfw!XtKk}yKAEzB}?2r2&L`%7T z3V$MXDKfxRq-H-adbjZbw!q==5*oIyc#DAmz;r%SbO^*9<#A3yR5V~%N1+WkW+xKRSCf?kqFLwdQC3*-$=VkS|JoqV3F_CGC@z9sy z%LYDkS|bqa9OsOIcn@GCOG3o2-W<*PygR*Yee))pFXJjw2X+FQ#&Q{G?vutZbWHBz z;RJ%-p__=9REqVu4dkC)1~lvgWKr}&NWef#!(a}+7~T{l=KTdnq&H!t_n z#|lzEMsK-!^)%OAm%b2TjlkB5tZQyH4oy=4RLQ#hKH5!X_kg|Vsoiz2h+_d-hx8%P z9M2zrI37^+nunS$(&)?9!4>d+3!HykPLnx$j7)v~oRk`OYoP97+G}5}JCT5~GliOs zXPP0!kK;9`8$7Fb)6+cXHPSQnHoZ@^a~Nm$OEyCudLprNBe&)d&_C&g-_~7i5&n0; zlzWLt@VwE(P7iB-dVVz#20U%NKq;VcbFcrv)!df>-PjPFqqBgG6;zC4*k8?(?(VO9 ze!ulyf1E5g5w3vG4}bVmpz%#362(#!3AC;Sk$CAKFeaU1z3e{VBpzn}bg+}U)|&vh z-9OOPaC0I$qOclV9z!duQB7N&Z(+~0%(XsEhU=c~J} z$2gR?doOv@Gz+zsk5@^Tzt(nm&;d z;O!HV&vTBkh97cNikEEdIXS=S*#V8{BxfrOg(kxfKu$vNXA!}KZR&mi`gne}6QFL?q+mtrNMDna1`!ng8K% zcX_$5t71ICa5hoi;2J>@ad@1uRmk=&5OSN5lYXIe>>$Z_G%nFFO1-a);#Cu|6KFWv z&9H5^et8`q<2&{hP~?3|MZ_3meCQcbk0)KQM$!Td8F}vm)V!WF+3<+=)sw4_22%vu ze1Sz===wL>bpfp&)L-LG$#o9IjLXo)LguoLhQWqJMb zaQX8;{9$?Z{VQsr?`6p19lAt8m^YnPDL*~CVzTa&noYlL?=jMf5my6jjh?rz8|k3i zyQ_4NBe;{a3>BHDXEgQt)1Uq!*?AGqt-uM5RJhAK3&(4W_3HKO<;9DG(IwVuy!pmO z244hxfb`R|lcCAFMYDOw8z&hoMcJz!2P_n916(TWbGQ^KPdb3U7Llbx*X^1E{IUmt z8r?@y(NpoE!Q)jlVzb8k&$-aIEo-ekhkYMeNkKuYouG+C0Di{!h{7yWWoGeZ~&q$FF(&B9OqgKYM{* z=YjOq?)x-=9`CevTibifP7bAU+Y&yfA2wnU&%NEf=BWPd`N+aUTZsZ1;~5hk#z2@0 z^X|1+6Q+zQEaFM%h7yl1zwLER^(9~!?faw6#6`!H-4@8sm&pkOc6#Paj; z=y;G9dG;9gb;_9czL{cH1VZZ&cvba^r|gHFZ3ubl&?sccr2q2E*&Lu?iw5N5!J4H! zfU+8|Ee=!25ko;sCns0SukVg0M8wmBj7?@E^cy`|sF)~&&$IeuJI0MroRChp?w*%< zWEd2JTO0mtEZ@I!K*X0}I_PjCq6&RM^MD&u8lU z+9R&HMWOl2Xw)Mo0)V-MqEuu;-tOsp)2=s01c3gx!y^V60vZIDfrrSKF=`AcDX<-G zI(sGDLe^3P6z8KywzRE?-gQ6|#VoJ)_DpDAA>NY_3W|o*hzC&2Q(@eLlGElB;U0(% ztj1mCr{X;0Vzjn|)l(?EUW$=nloogrPi~F!%%}%0RLR;!aDL7MjTntczTG@EXqlX^ z@ig}B7=QAo(Vs`dyO2f^QQA&v&Gr~%>sga3iWZyk2ZMfoV++t)w1@wMig~LH126bH zd+!^!JU3twk(Z;rjpdK8kH$#(-nWl=q$$Gv4E~#!yM5ksGkTQ0cE^8+*PSl-{CT|E zbz2!{bmWCgZGC1$QeK|GI{{jDnv1#=wJHi3MR0T}RLr=kAhPfC+IbQbCAIkCP@|pJ zqxmYfvo9Szs65-&{``7xA{f@2$DFS^p;3Jp7(w16uSs5*7o=tD9IzC>2>D9CDZmny zd-+OXk`Vrb<;~vK(8QW*=gb4q+LV%Yire#y6GI|}lvqsj!k4k8h4Iua^J$FF#G>frs8m=9t=kop3H2F`g-{()ZU+iCgJGed~Etf#450 zH!0*_^Sl|Gt?p~=B5e*F`R@B4CfDyQgtzbBruRf400sT#kGf}uLmf^0d3<~v@1G6t zZl))JDFtY=^P_dY4s=Ts?Ccyx-<=$>%f^`OMi<|EA2^^}V*4DG&zMOv8uAv>5DjNX^KhRiW%T45hKD80EK}C&2c9jt9{@s{s}2P`qW) zzOIYdeIXgB^WE{=Y4X76SpY&W=RTo9`QwNJnPE!C?@RPZFE-DI%Y`DC4BHT6332ue z^R_LA;7>=XM~x@JfOer`=o4XW)4S1qDCMIOgqjCP4@#CZvseGyH;OjTGJ_}oa(Z&H zoV-7cK`5Rz+CkqjM1(~ZD}zmt7U#{|>)nE?o3JiIs*`VwZg+tX^dxv!SEqS6ft(8k za?%8%2r1#{D3YPy{_Jc({hlZCz`&cw$?4UE8m-ZGhUr=B);|Y{+cq{WZMzR-OYsY} z*&4@7aUbI~cE)jJE52~AHT-HEs&@4lj}CZ%Tmw7JN5o_GKr_4vgyI{sL6AdUtmBjv73m&c<`F2Vbf7^zlh{d)q9xzVdq1@Z*o~m($aW4Bzd@w7QPr{qDP~0oH~_ zJKq8VAJPq%2}UzacX}%0zUo*_U#^#*e>n-%993r(b#&2OxS{^qMLr!CG_(A zW;wmMo?Q9c5APGu__ncryv^9PUJT;Rowa*3sRvtc?=#T%H|M>#+5(~E=>?Ij$URR4 z56jji%^LGzH5GJs8)kTFEzfav*qmKw0pH)b_j^>q(AH! zI=jfJ!xK$V%JcSKZ4uy+E{k6rBBrt&k1DKlUnh&oZ{t(ruqNtm=nXk`;A6hiHO4YK zTDsQwFOz#GUjn4&GV_8iyFVZR{EZ`(tWYY>3c+iD(Gx##K!K)!a9DKa$O_xhb+a&A7n zKM6ElOtH3m10{?A5iSqocsf=z(d3rJT_?01#Y*jRLwS`R`osGimY zb!O5b$hIs9@8f-*T*RcDDXZJN2g}9r_43#vM>-1;>1IsRB*lXC?+G!c7_UN2fBV(y-k!Dx99fr0N+)PMf7FpI*G z3`X~P`QmlYV}N*Mxf4BHsBt>T#awdIlLPAEigRqk(XfVM-N?A_9~_KldUHjn-%Wv* z;tqg`A!c`bTr^GzH$EY^>G~D@L^Nj2qJ{bA8Ku*8z)S`(hS^a_xV0A@@Wd;^YZ>odrlrzG)#%B+7~8LOrp_;BAkgB^i=}Xqv!q%4dJof zgEzzb5poiV;Vtyh+)v)<#fuk#2MXN2UUeHQm{i40F+-{v%VhSKy$tblK0lpRINS9=DcP%|ri$DHLl>9!Jdm%|6TE>Vq#N_lNIaj`0$0 z;z^56086%LNfTHAAcnD|T@^Ws5@Eu=bOQ2?=k14+ zkvTgWc)FjyJXzjnFmKXFbRv zMgmugyV3&DXnX1%M27C~)@UfeBo#T{b>oe6jbG%hJ0*~|e98Ad4~RUAXRQ15u81}- zUphGsfZ{zXBwp4UsdNGG;0=InS}p%}U*7zr30v|HIWBY?z=QYq_6`GEN6kT+l33D< zf%VpLjD7d#yy1z*8OumH_nH3m0b6uOgX`PtcxESibeIlkzR45%vq7V8(p%l3nSYM1 zYj?~m5sc=<7THH{?JomdJ3DJjW7v%@VwZ3$XNR6}_?id}$4it0ZDvn4W%6mhfzJc>~^wuL47Ea9DVZ@;a1TF8s= zTwXd;`8=M!%|M?eG&jqyzZ_5R`3c*JIyCkP%Lrp)*o>+Cld5((jf)ANV>TWEq%Hg` z4D~6-CEQ&X`jT$~27d3o{2*n__z~VYuXv36?sTGhb@+6Q2?MT|JmbxX@wg~o`gx89 zY!!uxuP;&%*UnMS7&uEfPc_odcXszTm!l(vaSZda{QTqFy!YerfH46BiP;|ui}jsR zi`~CV#{+)waNVk?#{eTg>5AXMMI0)87oSYQl@tafZ7;466urr~G7PRW9?F(bd4^7T z=yiN{K2enE$KK=^O5bPA8B5Ay4bi1E7?X?;g)8s&`$=#Zl8_Q!f-Ss;=cH>q5E53@ zqKNS_k8jRDfZ7@NzH1+|ie!qF_dI~1-|q6|FGKJD%wf1l<7Jr~oTLxVGvL-0 zSY#;37rKiCxiVu%n0QvfuwoJ)uK zw5;i+C$nzA@7gpvbm!zcVIUjc>eT3GNHE%hel+e-=i}y z1pr9-kf}LhIs5tWKGbuEX_AmPN88Jr*BRRud@71#YE1j31zq>#=2=8&iu*v`J%Rq= zkFQdy3=Dxz_yd^w2y42rn=;mcY>H1}21Ibcie~u`;&|H`8pe??DNOtMWA|x1lnW+- znt4W#LcEukpO;^LK3(pw&l?}lcdTDa93t%=6NZj4`h7Lb)6z9h zHBOI)q6;XfyIWx5S{q#F%lFO3PH8MLiz2=+b8So?Q?Ft9#?a@Cd<&m|QUEIR*KmC^ z8L%cq36a{Xzk2Fcym6$I-GlJ0j7{lBuXxlvC;Z;OjfZzQ^z41#qns&6%ZT9>P@ZKh z&YWD;d=l&Vt8!k%VP8M=)*U$7!E@120S*H}2(^H`xjA1%r9@Q#(gA;66RF1woce|y z=#;0U7Y7;Kugg#GPR2+BW=?9eTSK9hu=Af1V(9|+`5pZDZC4pU4Sc-x-zMIYqd zi}3{P?70@&#LN5p`?KD^zI{6>9Q%<0GSMW(hERWm8RO1Fmw?Z zB#+9LxV3PPK@i!|)hff14B;Whk(Z6OJ!1LC@Aj5|`tu(Kcqq=n6Q~U2PglTZh&+mT*)4u z07~aXyxtSVP@kjI=7ZKH5>CVcFKljnU0%M~S&r0eG@i?gvv}d%vX|a?^4MGgr-AA9 zuqx3lVcw6+>FN9C7}&@&1Y)0eJ=s#Lz-!zH)E>P2ZUsQ(u4e)U^fx_tuRu7RaCWX} zaCL6*qpo z6Tu34&sqVS1D=|*7f;>1J>R&enGoF{|A|uJ+cCPWr8%+(u( zPc0gnp@Dhh&Fia++157C?1i`R&!gsM)_999xxBhw{`PM_8tUj>k*S^Cwe|pzR>St{ z^^1`gy5ZriLYO9?xAdz#Q=O4`C-RCPkb5TY}Xg_j38hGZ=4=lhs3r3f6z2McL2Vird$ zN$nV~W18_+dT;ik``Lo>Jb2!7%xB8Q;wwHmzfg_Js5Z}tPY|4f$G}ij3Q-iCEZ-ct zm>B1I$VV!sP!M;+GrJ9SMx%q6{MF&cvbUe2?fbX4Jfla;i&uvUC!tIbyEl}lX^#UD zri)UFToJ{_=0Vpz8w2(9sREyrX9BC41SWDcW`|#uN!;MrR7& z8dJQsZYd1BG5Wkog2(H&HE{(Gl#2J!ov;#kp;HVTjideg>@I8^lWT4q6ZYzQ+)7B) zKG;Sh?==N?p;D+;j|B*pk+4%h!P)cY4+j_lK?Tw=#eA0s zIZaaXMkX}=n=})txdR0Q z%toghyDgD-%Rxr(Ai&WhM?u(vMq$X!?`}O>{=+~2G5Od^k({k`v5xVKHI-IUNxiSo zCwlT+&Xcu${K@;wxbEeh3lRz_16=kFt0|H4ILNxA3uVPn9lm@$;Mx9mZEdyg+}hc1 zY=QY`Wv31Gim$ATT7v$${P_N)_g3YGekq;-lq&cYpH2ELW_x+`BAPtP&Kr)TkuHSfkddv*!w&_|y} zi)gI)c?wER7cV5o47~Kt#o2Z8k%7*^VElgX8}dvMX>=7GFqU6`{UsU(`19)2E2(|p zyb;6LxNCs(^j&hVfYvzy^s8-G&ZL;V>AbTAcLU9FKsKh*|70bn!pu*E9AC%m6HDWBKQA z90p$ zqDyPVx9?dZJsSJ`zCYdxnbuJ*USE2S+isvx_f>n{FRqRk^=%xpC(p8;$vO{z-9nch z@0qJT>i#VVV^1g0BS5)>#^!{(i_`9Z(Px~~N4>ArFFn^9+08^R$KRQ&W&iMCwqoK1 z>phWb12{TLk@Zqd-~6Y@>p1i)?=0-`;9-jrQ57w0rda>s)%No0r3MXY7@h4>%+T=p z*-gllhe%;4Zre;4w#K;I4)@{sU6cB{9N8k#a|sv*f`|#D@Pq|ZF=?v>#3EdFdUCy- zo!2*H|1EIwDKA1e_gMs-Z2~C(or5gKY@O>tZtGc39!J;+he)d5Z81=} zx;BL2#VeRgcy6IUN5eLz#-i)jA75=PKYX9Z5rVZv&e$eA+@ey#6LL=QFaYo4Do<3% zhk;Y+&m(r;`tz=*>^65c2k3|>*lqCRk3Y6xpC_HcfMP)5?KLbYVn8{B1QXEy8xZ5v zp)Uyrsa@yT2MK}Dc+-tWzzzEF3~h1BhOT3lxqc0)lu4&w&EEJO*T6%3cy$Il> zXLx;{I?wF;A|czO&@-$FJ5abH>8_2LA!Lfvc)d?ydQYCtGX|-eOLKQmX&HvrxF38I zieB3uD;6@o!#q<$K%5E0biV8P3OnU(DPc+*sB_>=OFTl(%Uo+Rpy&qIe+n=0*}wh$ z@5@iWoD7en8yaisbe!@f*x$c?IXnQgF#5b|ZEAP9!=b2&285CsA|5eAXln~^4lU(J zc`175qXn?_>ecIHW2@l+9f8)1+Z6Hp0cH${oeT`M2?Y z5A=_ZhPr3Ag`Mw@FM983 z>zDV>3yZ$^)7t;|>*efZILL^o=@3-{oX68^Y|=8iv{`e-m3tWjyK@*Ghkl%21pd=c zo+pP1NIvO1qC7w%Sw};>E4>Fi14?*GGbaLL-BKlu!%?Mcj#Gggy*P)obWffeqe5<9 zNJ}(-oeX(q_EKNzI@x{J1BI-O@ak?qHv>9>l;+Q~wH|WD_JXTT5;*`s8F^c*Cc1h{ z|DB~91KD(0v;xlPSSd{V=hvR++L>M~CxJIKy|c}(MUY}67or^w+p)G+fgQZeiQ#CF zcXZ}3kw1V_(-off!BGaIYfny&r>+8cC9Cw>+DJ#INOO}OiQfYmPm(YD!1Yw8sCb{B zL}ueH>n^%L5Z4r5YuB3p^7Fg+<8~1)? z>6F)cO(UMhC+ZAv+TlP(wf5CFfj!W8&3P+obwr0B#2Xr|b>s9wK<8SzEn9VP_*-@R zYK*DZwC4a6;L194RyZ;|W=(AV?ZLt9;5 zAFZczjiLtUTj-0kwNhB9jfY4g;AaJ;!;$--j=Y5u;3uRqj#XJE5Xii9GbB8As z!+v?x`)hkcOu4sz*mpI@&oD%-Z$eo@t09)0U80tvqt(95Vlldu@z)fS!FSU^fIc%x zNDSlfXopEtKpN8MVDvc;Moy-$jNq8CZArG>@$xC7&jcnE#A{czcsZxTh453@7m6Wz z{zhK;s{?yCTZqkd%N=G@5$t{%n-Ct#6+)M><3QhJRd53_&|Z< z_44c6$Ey`Ed;s~I#(Z|m_?_*)62_f#M7X8yo@O!2;4>T`5vL26hha` zp%&4VVPY5o2gXDwX)0Br`vj|7Hur?h-{#3kHOzUBU^+qSGSCIA*?(?7Il@T=-3T;r1qaCm7wnd$z^|N654L0SUI4NEI;fdWP^5gQ)|MZ8(w7LBK=l4T% z;rU}tnIfv%il@x>hiDH}_-^DJ4aDz>;w2 zq~4w?8Ah(wnnRuqC!vQ;&Nd!l-~dADAclsA zH{)y#7ztigKL#M8g)K`!=-NA96ptJ}9OG^hPog7W!lOP)@oT#E^x1=q^|R#_JsNKT z41k(&G-JrXsO$%Pc)w^*l%_90&!6JS^a_JXa2Q&^`1Cvvns=E#S=TJ39`KL`p$_cw z;2}RC->vI~0=ur)QBHJ#?#7QCrpL+0MYID5fPG!qjN56U!0d*ceTGdTl z*6ps}lDhy^I!ixxH4LJ4QVVoHzL!Q)Co)EU3_m@>xW=LJrS|}LQ3wu&GcXi#J_5=W z$R^)*W{7~%K|=GNqr+K-{*OPrT)umAF#P5dkxyw=Yhb&vZQ#4Rhu!;NKt0cx%%imw z)c}qDiqxu#-`v_;_V?}ce>$lOk)A_oj@D2pe)*;^4E^aoq6_Jy=rZ+A@hp9|vlp<4 zmZGQ|fdsZoBna(9*T=z9JC+`xkJMtVXWTDi2QIHpMiw5vJWMI2xEM9nhPFt!*>E31%xFPEDm_1Ekc56PYoi+?0vZ&{D5$&&XR8ZU+e!e1D<-+Wy_#{GkBamA zk2ewO!Gtj7;|BdDSPnsX@(58^FX;Y0MbNXMagS0^5HXu*|8{N-% zundb(2}Y+xgdpWn?BNzNo_@_qiSRE}Ou?8x56`u4-<>V*@?IWv&qpZ}ibfua2QnBVpxr@B$hD(7{m5(*v~wJwo%ayZ+n zyn)!5_&l(qxN;8rNWdsL2^HImRNLDZj~Sb?uSY-12~cR2QoMi>T1;rX?=cVJoF^sJ<WDK&xL@-HVmy^eGBm=&Z6`F{WXi!l&!|0tko1Kc`D@lfE#YfXdzZQY`f zbecj9MvzCUxO6)&?fUv^=C4gDWiULMVo;&32nWywR)Cf9Zq3kZLdWs0 zKKcxrbqAyXIa~5x1V{|(CLpax{>jN@3eB9B<>mBfe-U5DFY&O$f$V4Y;8>(FwQdYc z^pM8{1j%>QZ0sUuU&p(Vb{NIn92G;^)$qsf4yR%teIDS&9(a`zxe6puNDATrx-LkW z-nO<|Lo>$RlFYW!hmLP;E%TB@0_2uIe%A4FGntWJ#DBUcUIzwt(<=ebp5b|n4I?qT zE?V~qeMc|5Vh$Zy{bDbD6c}rcB6<|2XdBU%+F@{>Ki!z3U4}woBRSsL-dk>>t^M!P zMyZ_v-%^$*=anay&)wPIo~Rta23$N+Ba@8pA8s%I{=fQjMrQ4NX5MEf zCmBlZruPFB8}V(jlFsUJ(Rh2-|Mb$SoWk7!-2MIeX-(7xh??^3q^P8Xwxg3dJGy(e z8@5V7i0xjhM+iWtGaFY+8A!B+3;mN}x`mNWU+5yJsMwZHAo%H%?RbFhxl6z9E)G~y z+{l}rf=e=)9-Sjl8QpQtoR#q`kM&hA$J&y^baux0NkCWEmf6nAp-e^@COWqPjII$) z)#Qr|J8&iw(l~9E-0I%yOX!iVZz8jOZtiYp%tqsrs#%Yx#_Q7%se|@wZ0E_YU0I2}0o+D&IN<{kT{Pc2~qrE*oPOiW7{@Sm_fj0CF;3BH&41JO3 zNlkE~(lZqQ{iozSM~i1p4_KS^Sn~%H!L{RIKw0mR}8!tZj7o;=xEUWBfn=DB~% zV07=xVn!nEn3K1A>RI>yss#Pg$PVUvv)Dhzy0lP-qoiTXzk>t&@lbp81str zh!_M09rI4OImMi8%=&a2m(KdpLeM7#^>pKD-wg=FP+BhvnJL;~KzPMMRSGdESVl;9 zLB{jRldqw_%jNxt56fi=BeIWROjx`%a)^jRARV#@D#cDHAzk@!Nd4wcLkqpAhe|Cc zq#9rJm&ZvcTA;rBWjsp7T;{0A$(FOnt&0eaO6dI84rC1kjl658K-vK(ZrMH5^n6bo$qAL5eXCwx2Bj?vJmQ zH{ZR=0Nf|5@0MSF`K5UV78>*J^Na+=nCvAojbB5nyWyRuj}*r$Al+Sd^Q@gDCR|Q2 zQ<^U_YOnLQTGT|x`r<^uTVx`Gn=%rbozo2agh4-;eEFItnXZ)?E1v4l?~W(^<`#~j z1}dDZPnK=ja}N5PPPxA`igjvVUcSl0-&xL+wZH%Pw)ea5Q~U7q+~XN?`||bSI30kJ zXa=K5m*79%s*MglK?f8`P)w)CLKtJ$oWo~ zJDzrLz|wY0juIK$c)qhZ7)#?CG}N|N6Qb>afoP{tnNHBeRh2OuJ;(Nq)11$4585aYLwEG$+Og}l*&XaV{WM`x!{ZQ^x(y5pRW#q z9ZT*df?`(y4^#uI`;Yn)B#*9pqES*tH- zzr0K?I4nB3^{ghU*wwn>J3xLs@8-%$_{Sf9Se)@N^-}?Y*%8)#8B~Ux{wEXf-XE`E z{9$&dpOdC+zP57GaT8UEu4pd02p$W&Yfv+XevJlt9B|z4nzcqkLz`6x1lD``+<`Uj z?L@){tv>;9tuz47-Zn+T3M4#&gL9RIsT5VoVJTktwA{y-w_{Z7vCYd#IG|32 zD>@NDC=~8xj5vy#7u15R?J7PXL-yiQr+%8 z$AtG83^}HUF$WJrF$wUXa-4ym)7Dy4G9HVL=T8yc_x+Qr3{LmoY0R$=wwjwFi5M;~ zm4IQmDcWE}FeGG{P&;|9VfJt7ioCd(mLW$o3Qg!0QuJal7{?Fq2_xovwCFa*PN&uz zmoTO6D;T4{anE?DedlY6yML~0H0!6hZZ)Wc{87v;RH`RC@FeVYa|cQCLeUS#=$VZ{ zb*JzutF9 z;f?sz4wAiP_waBIdiW+k*xKu1z*Cf3VzlSqaq02-acl9gG5I}X!6ium#}kWpC>Gw* zAAWeXyngj+p2LeqbAU?9!?rhuaSmf@d`}YSX*rtkzj~>0&Svs>JAy6zFR$3}Q*sZN z*GF5+4}bh_xlo*SeLkr&Mr1ofISodnwHScpMVS8W@!4{f@qTl3Fn#BpJhmCIuoaD0 zBR!V-8=$(szZ-vjYJLh^W1!?qM@1F)DuzuF*icYpY}(0c{*&YfOw^%lNB?EE;<T%-%t{H`co}=LpMGQo0+o>uY za1s{f?N4MNnFLO?YNjwXtD1ayhDk4cvS=NEf-&ITdnbkc&wqNeygpPxoerQM@t25N z^t+B1on8hU&Q6nbrTy01joBREzW-^WJ&Hu1JnI4UQO*ybINo#PJ-_}kz{oaEwFk~Q zz!Oe(+}%6O@Z_8%3sR%>sX2_UPWH(Youuw#Ym;%}2{g9WMp2$lh3bbyJ^*P>l7G|j z)1;|;9DVm@*?pm)aZL|gU)>HER39YGAWDch^awEV`C~>Y;LErv)D#huPT-_151%ck zAI<^^XS2T2RE~-ZXV<9fY^Zo##-b!!aL;Wx=b98BK^1U8d^mJ-7q$ubvdRbIM1Q$Q+n8$v81PTE407S!!ZVK@7 zESr~zpJHs6E z9~PRS(i}uZx99OjRlJ^G-1XgO6YkQF8v6R$z$xF^4%c^t)9G-&_!Z+ z8zKv9i%t6`Lx76D-d&AxSER-pR^2McD8+$4jKTUf9&-goUY;&#vI$l9$nPG zo1ew!S@GKB^~NCMv3LrQNiz>}b=bprqQ=(yp3TS-H0RXYD=i1b-qe38UC4KeE z;Sy_ZO|(QxMaR6e-!JOc+^8&Q1rZf>roRP;z8E(qvcTbMOrQ}Gy9tsI`vhb>L?~gIHE^BMm?%(qn7;;IWF)Dxf z;ms&z(U{jS4wmT|9{r77n3Hp2essXe+4)+#5D1vGOiG(lx7Z3?tpgn*{R4#P0Ct<< zxV}loatfySk3oq}=zuTLoMHOChx?Q6_3G7&<%fU#Q=dPd_QEf||89Aju~27W7ZG0K z^w_2GY$vZZgDlT|aq)gRJ9#&?4bSsZMZ2aocC*K;$fU4q(q`xpK+#`ZvDRrG%n+FjTagAssCxNIOgT6*8>7PTd`jDBU+`WM8fPm$-p$d;eVh(r@^hjWB=^= zyyL*%jv5EehVX`q89+5CfG&^Pch%2mH~xNjTlgG+0cxk;M4(R{%EjsV^3zYh_I~=e z=Q}3a5w>;%=xK6tcGa^*O=70*E2Z<-zx{pr%fJ48xgy)GgZd(lgSD~yNf*xn9{^pp zL=l{blPDh^x4l~{brA&4gp~3+-qpi)=E9^T(}C+Y)#wlyJ`xGWqx~W4PvSW`QM0R6 zU+C;-8#bro+iayhJ=JYAd(t|P4bkLRZ(b%lA~V~Kfm63Er^hGD+3DN#eD*G#hsW`v z!c$`=TcgwPKsHJ56vi5DY<;!nbp1GJ@%-?9K;-oJL*Koc@k!Gh1k87HJl+J-6l>>H zW<=tGn>=^wT(~e(lM>-OehG~%%bS-6DJDusI4gohNX_#g*kRMnJ6KljWLS)jz6~Fvc;oB@B99B!uT<7U#QjG8SJf? z@nC;z+3WTCyVs*I9<=tn@HtYlMIMhjuC~~iOKM+k4NOl4@yf2C0hYPN6O zC9kUXw*~;r7*lk*G!cGL6dtSZL;G6h{{7dB<#?)W*CM=cjaTSx&}{Rgs8mq%unF@3 zk;Wwqrn^)B><>=h%`uF^Y-=U!Zpv`qfh>Uv<7c#?%-P4?U!L3P)%hq4%FB2szlM$h zB^}Z<)X)@Tt;An*{cD4n#?tkVo&a`o)?J^ctMF2Ok@Chn$0-#N0?{Gs_v+=V`2TC3 z_Sv#4KO7&=akdGY96jKn(8`u228Y)>05~DEbHp*r)8(xRt&KZYU@Q$GjHvj_k++k# z4AEOVIePQjTA&bDFHhz09*2bPsR}U zp00L(5TXt|Xo{k9Bp|*es-c@CMbl!n#3PSFox^@IU>~HqEk1wB5H_;>63J~c{ zpz+AX&Ba75Uw-$)@}k$%9D>X9)8UJ$l8?T#lcZ;ey3H0&hP^S`8N|c07HS3_29_90 zYj&S}+y$bY#)h9bTBA4UzwRMA01(@zs}H^6HQ?R0F8cKFK-6QW>!e;bhCgg)oMOLt zNMDCJB{Uki5B^f{C=$ouV?BGZb<*QdpoItkTj9fTQksu7T*qfz8PB5^J^MxXKMdeI zOCTfJdiRJx6dC0-N!v<|<;b;8)5E0s0n+mEs_Ff)i^@1eLDi!;eGMqbOqV%pcoN8S z)bjP+^>VPi*YjVFQ#18XJ?xI&>?S)I|JeklI-vs8i_YUHlHIhqqq^3mmr0y;P^$!`xlk*0X7hwCC z4sLFyw@&nal0NLodS7sh7s5V1t(@G|t<{L_991 z+tX4J*)!1!orUc%l+dFhB0{=3MNDf4 zHFVakjZp-APEW6vU*2CV@6I)(StBtRTl$2DrhV*MwcevA{SmENbKcpcYg*s&j$%-I zUV%ALmxF`-VK@yQY$vpJi1%S#8MlZv3Xle%G!0V5{zHQw1Depi| zO6xYjUGs@j$ii{9z$to>tCs_(bl3?1+5N9C*S*G?(gSN(SMux+r(kcwj*Z7& zZGZf({$YA2P&lnMT0MJg+s^rn&DbX-pP)XEkA>17Wt^pG2>gS_v zxHFc~`CT4^s0!m{jM4#j*+a!=A@gJ`{%Bz^{q4k+4&x2>Hp^t1+K-tSyEW2I) z{x<;8h&dA02Ci8v`#l2+IrBaB~wL#Yz7jix&29cAcWygKMw zRm`83AAkO7afk_nAQmQFvc2vZnXi$b-@@uC4na-`M;E7EfSobxMy!%uxS+x#-HQX-Gz zEvX#xq0!FlRd4K5Kl3GHkTYC-1X4C?eF$r*VoKZWw)afnV$qC!s zNpCT@@o`FuW4JbUX`N$?s|!bqKc7@7Z}ar{eLBdAXAc4&XVG9_V{_y{MDxbmOw!Tm zKzgWgju)TaEFhEd3}*R$#Lzwr|0(PYrG_y@-aS_HWW2Y|Hbd-uj8h)jOgZOp7yhB z;pO>NU|)LU;qv~&pM~&;R&eCEo$wY=uAgfIb&37u5y; zuWwFAFR=$|BCO31-+vb$i##@__*`))hxYvJvN3ZEox`xT{Qm&~w27XF=bK#s0000< KMNUMnLSTZ=N4m%W literal 0 HcmV?d00001 diff --git a/sdk/ai/azure-ai-inference/samples/sample2.png b/sdk/ai/azure-ai-inference/samples/sample2.png new file mode 100644 index 0000000000000000000000000000000000000000..59d79ff28fc57923898ff20a911afde70994fc6e GIT binary patch literal 264509 zcmV(=K-s^EP)Px#1ZP1_K>z@;j|==^1poj532;bRa{vGi!2kdb!2!6DYwZ94|D{PpK~#8NRQ-9( zZ~J~1_N`~0pS|}v=g#)Mwy*8$2|QfeUXJY3W=jIR4GabYJ*axs?;{EN?SEm zQ$>{^DWPquLPKd%QB|!%)TT`kL;D9U1qBk!P)rHrg6}HA&Zwe{crum0ZWtz~O@e0p4#r{^XA<=58ow6!eH&ri$t^Y*g0v$Y)T z?=Oc3d-=Yz?CxzXJG(o%YPRQmdwzaio}S(<_jixW!^6Y!?%m_^`1r7FJuS=j*7mZU z`&{{yKAc;&wwArUz2)%uXxZ7>UEV#vTb>`EmUj>Da!p{+e?Fg{AJgW2?#cPIm4ErZ zEccJkX+M2DJrr#Hzk8Q2xo@YMr+FQ2M23X(S2i9Bn-WJEL z?VaFvupFPj^EiF&<#^h^zfB*{x#xL#xW5lov>AG)@txi6<>B^mxqN%MJms2Dw&&jk zPuuR!UfXr;cJ{6CXXupcLZJKmn_Pvq`@QB4FLHhh0@>j62(Qpv+xz(d^C5J)4ZYLc@{oQXLX-O((=eE~c5o1nAH$oc(&pgcU^zM7US7U9SdI>l%I}?A zr|+i-#AD>HG8np(lbyi4o%1y~Bf^1Ji!KFOmGWuRmHY-dyM2t)XY&egIqe zvK6^Je{r66LbuTD?c1y6_V&KAPo@ao(a~XWKgjRMZRGFr`gXaF45Ql;8soE0(bqNU zp>XV^uYg;cSH|-FFnV>8_F(_;kp9yK`RsL}DY%cqn}gi1J^UCwi_E7z*V2Kj+q>LP zZqs)9ewPOJcDI%fUY?eYk2!ZA{BG}VO0)OgdzIt+%j=KcME@Q~79;Dskwx-&czl@O zX*c*^U0g25k?EtOqsSGy2hRK*ogXg;Cr9b;dAYf|UH;D3zP{`(2g?^e|Nio+&%X+d z_LiHAx67N?7t7^E^f`3dKiFMfzIPg#*^eB&tDJ6=fwVzJZZ2+@`*D;u`=`tCY501$TN&EfiL9jChuhobA^7I<{EjU-+*{s8cK%-a zyo=r+9PBKw-g~*6y*jQOKi$7uZZ7Y#-;`HGf^Hq8@3U72%i+ms<^J6Rd+-?j34cP@ z$Mo~&!;9FT=znaDKX&3Vx{MabXGhDc4_@Z;blD3(FUps<0cxjq>*gYS4U9Xn72D}+ zKX&#mbR66JZn?U;jzGnh#2y`=9xX@NA0OXEzAsCc@-^~KhuHGk!ZNtZ_%d6!m$6mgAQQ5(_?QJ$J_9gwWBJ}Cx z>^ye|@l8XNfGN7u^@TN=ak z^t2t>zOP+a?fg6X9A~r}_|s&0z^Bn0I`kBM#_PS^{pIAvabP}LE?-}UZm|u4kqykH zX?qF%E`!fW+PDp`-(FmYUI)vk zBDV*zV+Thk;os$QdHK=u!3XaL_w(HQCNKt;@D@$(V{^zE_|WApxbH=dF!AR4DnQ{i zTNt`V2A<+8c4HUGTJ(u=iSXqjRtA%*P`*b92s5L77-6UEh{ajgz!&LpG~=uY2BR~d zWnPTrPKdglHvI;aHaI-ay^0V+h#3!IK4*oHbx7G60z_k&pA&UFJvzo%j`4cRF$A@{ zXE*&FQ=%Xn-N^A6lL!*L_5ueZsDGb`h+&Lk>S{SoDjB46y4e7;wk9Y~_)*i|f#h!9(Zr#&z%uop!>bgE;DwC?usD-fAbjeY}su z9a_S1+768(SZn*7#Wvutd-Ng*oZ4Avg_p~EG^j#~iX?vk%+9{oi6QzE)Tt|7YVt_e3M$~bJ z?mn=&@g#hUj>fn==DM4}`mQ(u6D=9egM;JVQ`{o|l$e9sOIr*i4pxQ&n~fnD-i+a9 zbje(Utpr*e|Hv|311p)E{%mO7ATfQCsr~&?^x-f1NB=_4X908Ch1qTz5b*A7?N+Yv zkgSle!|pI-4A{YoxYtTk6^2c_?^A92^{psZ#D!RJ)Y|i6YvS}62U}g zu9v_6;nniy>aM}ei<9H%C}YmR3ZnBD9U;%`z%SJiGI^PxfDsgQJjY?6962J(=^%I| zkP2_vDZCgscVmD<=Nvl_KqNR5{OpBB>!kMzkkq=<|PP@f-$p4;h@=&UgGvi1_W2n{rh zCivmAbSJC5Ztk-?2JdYFM|h7858iNF$c9|xY{ z>G5#_obpmK#}A@A+@rG_8lok7v#A$v-Xw^<3XKk`%dv)`%g8IZ>E2^Ln`}oQR#}eO z&Gqbe^ezCBQ*Dw>Fi*;hu;g#61cVW$u25MmZHli!mY~BE&H) zBJeOF?Qqx#AVQ@0Te)`#DUu0rX|sx*cHAp#e~gk*luC7}8&o6Y4{=B~=1tBiA&Nb& z0q2+z5xf+KaTJ*yrqA6Frj9v?3RL!UyoM^u&nelSIrc<$?=S%+qTm?tobY!mQD(lT zjevuA6)Ztc+7}(A54hV31LlDX?KsNdc>dz3!n(J|Va729ubhqC3rfZ?5Vp3D7GWf3 z#gMqTj;inwummNcBUBLbCre??!`uOa<=|_7iM(H-&Ck73y+YH-bf|?+t8o@|vC{53n zHg*&2Ht-1F-X`n5jFUY|*8lSTWT^>)pCc>pV$^RExZd5;;w8>~e>sbO?56*+Eb#7Z zbU&IjaFYGPo5Q(ChC+8AV$5@Y)+~EPawzG-!UiPkSvd|<$ zANgxQ6FI-*y!D|k@*Nb|PB5*B6QVt~s;c@Ln-KeO8z;x%362CXNTaHYw%SAg27u@l z7jWZ`RT1LJj752j82sG}Am`yceQof~dFR}3ctd-|N2fVnBjQTJRUX8+FibTP zz@vQZz!+>nSE-VN&T;Vi!Q*DM*|sB)4EvNji9MK=^>c6FQvRB5wT%~nJD(H){~1DX zb6C^AzBndrOm%@lZh0w2u}YFZMhhH;FR(x61bK0G3`JNUJZgmU9j{J~c9zqVokYg) z7`#S7B6l%n&E(Ujdz_oVVqluKC&Sc7I}M!~;G4ijSz7jrEaQ{1^+SRV3XXqhdKZV^ z;e%}8zPY=p5mut6l*+tdKqGn}v#4VT3~<2fN^f{`kn00hVI$ji)bKYVQUNRazqt>fe41_R*myQt=qEaU4zoElm>M$Q_9=AvCeXTHCEdr|mDSL3kK z?=D^0PXN%sCwMFIrf(Pcq#I!2aCXx^_|fyjH`lD~^1%n^%lUi9F~kXo(&tThb@A44 zTWFbUkIxf8g@*cD`GjWu%pbae?PK{0PgDZAy2p3H3C_u8IU={+B$Iv&JO{b%C~#Nz zYP=b3!TmTp#<-3vJH&1bhh0ZT(}y6>wMy@TRJx{pc-~xI$9N|Q2%UHL1kpRqIt71~ z)6)bL`V8*M>Fy!xFv7dq%-9SWpy0{zae8PuK`RckZQU zt<2@Oer#mMwd^U~-irftjLosr-P(ZAl|vz0wP%OHB{BjQ^vQ*3JA4n{NQteJ3>(I*Y>Q@3Z_>_Jv7nEQAMj-cc zv+?%BICoK}vP^|4-#eC>$O+*9%-ARi0w#Txet@7Xgo?9fR54hmzqEwnQzj*IMQHsX zKfz$Y{m@TLUO!vn$d?+Z`~%ZOZr}^zAu3@_>t(j%&4j=lgM~24bPNnZf55~D3LO!V zRsjOXyAT`<`Yq$r?so7wigKQw?l%iSD9Thu1b@U8EQp$qPY#xYIHsp4N~oKi@d+a_ zBvB#9WSoRDg{Xy9)Q65yt+oL{LWwS%gH0wRqX@R8M-PJ|*R{G30mxl}JpoHDkbSw3 z@-m`!S@Lf9Izg5IEBB^9ijG!ks!_6jb3u4K!wCB9n4bQ#kD&`Ma}i@#BM_N6I6N-R zDCARw{^>sOWH+AxebF_I2Y7rFc?L#!IhHMFizfv^W{D?R6qIn#jC?b>2)#;$Hrj4z z+aRd&Dj1A&CR=H*6{^S%MbJGuKi-dFJcwMl=j!@8${gpE?PO!ff_CtjAP_AcLJMP@ za6dkx1A+?1V=Fpm4A_loHeivPmKp_)0pQ3tZQ%x3$c5QecC#V+~;UNVui z6WLK@9Uqmu1+dDOx%lIL9$R`&29pfERiEh7>x&q;IH(uVkq_QCHhU6Slq%yylhvD_ zqSCS#xojpFn$R*ryt|!%lRTi$!#Fn^sB3qke{eHDh0L6qPY}mIUpeWwZ|}+@j%MVM zuE!z79*|QwuWh;xjozi7i;LBv#$g2iv-1Sz(O>ATMfzikhJ=@o!>Rep=|@E$b|N;F0MEF2F8s{L};6AkSi6t zoV9moF}x@+xdAQ6L+zaVg0yznvFg9yp$`X+hGZ43t}i3kq2Ve!`f!(i$*DfjVLN<& z&b<%$9Wffc%Fb@nOEg61fjRdC7WP)@{ay5pVMiCA08`cI>+EJs&*5Q;PMT{0RDZQvD0<4N-%gj_xryfv?r! zwBav3#PP@8#8z~^R(7T}=oQZc)Ts$T79X!1pdGvpxaF#Lk5opLAgJdY;eN2$X;yK{cYL{J!*q;r@MR6 zN;H6p01vVo8)h^|Af}Nzp8zB(gP2k)LU1Z5NhvT9b0(?~!6}2rK?G66o`{1}oGiw< zW?uP2lsY0zw)@Cu6(Q17Bkj}4&3VYnU?5edWQPwUwq~k=jw)ELeSQ+zrjIs||G|@f z?fULH@{vZ|x3?3Aopuh70%wQ^#>qxmjS#lKDS^zvb)tXKVZlrLfhp(pV{94THo{!S z)EGp_(hjb^Rpbv$nz&SJhW}{sC9okNR+HR(m?={AuFZ!>UCj3k5TnrC1_j|^+ zON|PZjiS_X=32W(;o!SK$|2)TgnvH{@96ZT1`u@M8ocd+Ggc4ZZ1C)P{gWq7$6wkm zj5(&_au}W(JJwG#1&R5Rw$wfRf=A?4 zkg95d9!l_rRhUM!B;)G_HbYe}&d#F5lZBv}xfF&P^nps+g%6nV0*+uR-`vo%x!1L6 z?=YDkry7oxwqu}&9z^0Wj_lZc6^*+$$M96Y95Wu(5R={XF>*v#qXVJG7&F`O5guzW zRQb_oGnY*ud^q~p3Qw*Z2Q)^o_opRY?CzO>XQ*zsLrbsUT;=ke1i>fE$=PYzpGiW- z$M2Qq!@s@#txjNLpvZ+lKI|{wz&CO+beMTB1B>hi{qAmW3m1C1_U`7sPWtlZvDK5q zIHCPL_i2NUG0!DjI~?lG^+V@Oz=cs4xSyO#YKEpOt*RyTOI!3YmkTtm5*VQQ zL4vP6JjP(Iq z;$nwK?qq{_G%^oA-^;`FOBRO;eu6Zt%_#wj%0cnnT%q44Kn=9gR(5(`S<8>uW1v3D~B9X>`E zz-?Hz^Qe+YkXibHsC2qXL2lOnO#<7iD^)l42JezM8PQZDx!gi_r~D*yC!9JZ$fC|Tj$}hh83cFRJ-MckL@)_g1t*+QdNFR>XtBbU z^ATKGjO!VcqY%T80{FY7^G;j~tngqkA&%it?Rd^~QKF!cEKK>uSe4I?>kz~Uq|Ejt zj=PE(<$5Cc**ChBCId;n93~n)j)9Q<7^j<)!!!-npusK5XeMdLPO_P3MnMsrQr5_& zLFiO4%{rynxmGz$n=*yF>o}zVU#138_fc$PXu&*q2`Jvx*np1_b1fV?CYM7LMOg|D zjX$r(L%aC%$>wUi|2Y!$PX>3G`wuy(F~Tl+GDaH3(*~gNi!<~cP609fqZ)j3i)#XF zg*Cs88Sf{%I8u2Dyv|XqfgnaJd}*XDQq6Y?p=7oiMAwIC%?>yr#~mR%ov#r2GX^QU zb<89yEh`f$&C9mX@;1s#=BiwUH32|yJqXT?*t2!ApiDX)uf~M3!v&7`LjKU43^&`! zeX1sSZ4B->@C!7wU&8^*oU06^J^I+PZukec>SW--E0v*}t0@(e+g2yCYkOaY6#Nkd z9&`y_a81+weitWkkl;)(On>MwItbv#@J3%F2aGJZYBbX?XLpcHS2gAG;&%D)^<|Bg z;j8lmUt8PBf&<6>?R95$%P#O=P)S+D@#WUbE%D8EzXM)(}6AKC8B%*<+63QEe7_ z;!$+c@jB(~4Jz=!&+rm{bc`Kk|Iqa5UQl3yXPju{wM0%^X55@pSpro?}Bhwh~N%q{9?; zw0<&K<`l5z5-4kVTME|C;u1l6}LO{G+4mu5Yd?bCthbJ9099wevxO<4nGb zpN*_YexA*3aL+*l2aZrg#kq(0A*7osa5p7jL0ubhR(gg+w za*y?J_- zAdFR%iGj7lV`dQRN1JwzJtzPs0#aejIc>n1K%Mk4x5*6W9*O{-amJ46$5`l|2}$XD z%Cs05rHzmqmlG5LUE3{Br@y-xf^kSFjg$kI4NV7^iFj)avM~a;F`PbY4011L0-m&9 z!DEaG7~yg(CL;K6=0gyW^dWdqny=ypzD@Jl#?ZL-lig8xjsrmNme|NljE-m#?POSe z@*&^nF~2d~CemRLgVAA;fC|mwLueRSeHU=-?~}tgkOWnQDSJWlA3(C*3tB>3j;5Ja z`8Opx!fv9D8Lf=MWTP9|Oke0CgOs(perEnBvy>^zJk!NpjLckmV?c4Mk?$KT(V0Qo{@cYv)$er zhhzw&AGv>E*eZ>&<0ek_GTD~7I-h!V6dL7taODh~yT2!E3A7KkJF!kcfu72dbfUOs z9~dVX97D$_+UQX9Cz;h&U^`BLFx9VQhq>G6JON$<1{$H2Ia#srbHjE=H@CjCVZ zB8TNiXy#h<6b#_2AXBgb&VZNuqZgb+b_ox73qQJqmz<1zU?%j*%sdjOJUn)oys?cO zm21;@`(xYFeA|b+fW&5AMk{YB5118N5)CrK?vD*}XZlV%DkAu4(rFBGVIp50>H%2p zg}2X{Z0a}o1C#r;UwWif2p99QcleFhhREoa`d2`Y(HlLZEOZSBj)qh~N>!H3qXK+Fa)r;}wY1XMzKlvbl(R zg@rK`#d}olAP%<er?U!)HUfiwjt|U7@H>Pkm>WjsFPF9* z%;b#B)Fm*hp=H$d$B^C_uam{NcXOW!9Gn2AEnC4aZM$a`!2Gv7V~*ysajK3JZn$#% zHCoyv<-tY7pCGo1BB*HQi_GCO0Vpk_Ah7OuSngM58es=p0)?DcJt4&FCR)~)@iODX zB6{UnlSNEAGI7VmlxNw22%3Yhu?`Ia8+=96W-BWrmj~%%Dl>jhkZ4T&AP)XmPzcw+ zYb=%gsdy;$-CkZV_sLpY4as@NQmM}PxNKZIg{_Y)88?;TkE6v8v^dIn{ZeGSr^MDQ z$gRswjOObYaZ&sl%#kOKOB>fpv%%{+x-g|ja<&O1LL;)uz?<1FD5x_E4eNLUb7d|x z)+PsR_Ao<6P6@|T+H7?ze;%>i+ds@T_%<`W!76Ab+mX!BHAY0Aj9utRi(E5?LdKew z9L&}5BR{ev*@`DA2+(9%(XGH#-H#(N;jr^Xu(Wzoawk(VlURV103$TNO(y&pzP}8t z@4YYq^)T1TWaivt9&rev`SIyV=|#o_CFE9yo0iKrkKUQTKqm=)D{uL`PdoZLNw9PN z-dXyH+~?XiZ*G^1=+>c$bqP-BegnSX-S)Dp9+Fq*T3v~(C^L6{MKWM=_2Ol0$?;KX zRag(s!uzoq?lWmk<}6shR+f!hQ3mx4d1Z6^An-Ew<9ygabggmCzh-H4kW34nb8xD7 zxukH1PxP*`C_vBe$fl=@wA+cRxqlouTfDJhp)+0W_b^A5LN*9C?FgP~a4QwLR?9ip zM;!x$GweZ{Yx_thV5!|md-)bQ;>1;!*yKs-*hm3Foprts-ct#KKiluIPXTJa=KLHN zJkSR_T)<3sJKP(58z_gKhNVpO1ji>E`#7}4JG?9%BKK@f?RI$Nj-jLKEgA;^M|+LH zb%Jr08r(VPpPPDb5Gt>194;JXGyX81XFiowueN}Xw#WH#dn-6*T9qTttfU~Y(r7{7#ogs0!! z`w+~;gb@r!oDV7lC+!}^xOL(~jHm_(I^~ho5XgfgjPN|=2=ZOB5X$*3 z3?NJ`5$m^vRwj>g&oKg6;}AL?9n0QD=t`{UJVwwbxTf-vev*-hh=Gq;49hrD4g_;2 zyY84`E~0cDm*U(5yX(P%Ztl}Rq2z!u%sr!|lpzXMMG>?lLJUZpxs771G@mFpCvzKN zY8ep{Mf#II!NJkl2Ijz<_9r8qz{u8;b`-AP(K!Z|@`@N46ZbY7%e~qF5``T)r~UBl zF28SMq#3djI2&4$3qes0X}T~U03WqO(KtC}U1U&(yuLI(r@R|Qk1WcfWep-=a;36h zXXu;xgq|443Bk1ydvF)6J7zQXE^{@Uaq%{N#Ccb)f)BpH-7~tk#s%xtz+v3pq&T#S zEJhZ=50eXyeX@%1J~(ud zAastiG>hGsa>T4Obe(%^49S5=KmCG1`=wL(JZcx?x<3XU-ZDJ~jZ^IyabVVN1Ha(7 zdz3&jWW{q6=t$SAILX6P1~Kvw-i_=>rh}t)JvwJ`4{K})aGH_i4{Z3-c{VX*HgsI8 zF3!nj2(n=?LnOq~q%;eMY%LGVF()2dAm~vcfD;^ZGI&j<*&pePI0_DDGV0Uv;_PI3 z_5P~_0`8L`C%ekkw_OkhUl6EZ<&#rjL*i({dyk+t5D2|o7EnLtKiI23Il<}9P#RsC z3W)1%&DQhDPIUYnB?k6pwAv7yC5!%C+qJ_=GiB1v@jdv%^u585P7Cu4SjD?4RozG#}-2gEv`?uIk5- zn)x(%p&xsB+lh7@d}JlxTXN3zD?J0r$bH&DpO&Z#Q*hGGrag$xP(z$!_QILHu;*I( z*R?h_GUz5eWGm3B*XEkaZuns$riFSu3hW*e2P>QDbhV$qbaTwaT|3>uM^B|-54lH+ zmf3ThjVPbGyx|_BeXoUXO4j5R#RyJ05;oTvTTvr>vIX7*E?)yQ=GqPoFz{M0y z@*5$PUqso)_%H_(5k_Z946+0caEQtQ5&RLvz>?#T4*=~|?_n01-#7+@uXxWt$zKiD zjp#bJt}9%D4pR_7)}yS6*tlJTnR^|V(K7%tO*2)&DU(&s;$WL8!uya%-2d_M4&+{0dl(b7ttX3XL&#@Xft30{N_i!M9Dn9%mtJgSd0! z(BS;>9(prO6H%wn+)qh3;KES)aRkW-J9ZUC4~+1Zp?QP^C=Y4Z>UDnORzU6MIso+X zr0`4drjj%z;RrY<6z96)*0HYa>%G2HrhuL{C-C67{1y{Uz!h9$#2(|&DX>iW<~A9+ zV;o)ZBsXxsjKjFPyv?!5KjTd&l6{>VpQOF10B{c1x1KlN%wlilO3)XlmH?6=75Gq6 zvi&X@dbnIT@C05hZ3btuNq)f~i#NA{?6^vXoWp@tY1AWl<1lKxbN=e;c6pm%M+va= zRH`eFfyr~`f)n|>49z|F zX^0PAoi4{G2_TzM4L@Z__6dLjVP$C?h5(oYoKR`D!kmpsYUUd_*f_H3{#Fs?37QK2 z`XMvURX5XSu&v`0Adv~>XQm>jQ7V4|wR{Ub1y&O%1aEQ~N8MFMx8sm=T%c6FSYeAz zb{}fDEq%uZ^sy?qum?F-+(K)%0IY^=97lker@{6_c0#wV$QV4+QT4DvK;)uXZ?5r_ zgBj!cSEVwMwUgq47o5qnDrved-&BB|L%+2TtKdy=Ho8n6l+tZo#S>rB#_)*jMu(!; za35Z=hmmFYMpwxdyg#<#xyF4Rb`zxJb8NDJEA6B$m5J)Mc0xmR=KN*YY<{0AOMC1t z8FZ~7pa!_n$J&Ejui}iBJp ztJZ6q%!I4(1Ovti`U32-Z5(AB4ClTVL0reIWi-k%5wr;U9S5II2v!cJ!9+M=2xUSI z2o%A5x|cE_lC8pmA?bK+pVMH}F|8Z^gyyr1<`~gV^axy(QM(+X5~A!%z4fanIUVB7vU@yRwUVsjKne%z9Oco7&XCxkn+mrb@LpRIx&xKOir z=34{5%GDTy9`5JJ5oJOf)g6z!-CoB~+@kq@t2D&$`ueu3Vo`MMjUp!ij`K2+aINxm zcwu7*lp)cLLu(ezAViSC0x$iqFqW5SU3dkK6O_5OH1D+ocZ0n2{ctyrPvxfe0|$7^ z)=kQ}7En1Cd2|k4PLlb|WFBMG=oT5#K>tS%*?NMr@TK4BP2?905$VV{J^4hAwCg_q z=-|ouQ38VfWVmP{hc7iV_My}`Sob(-d^+z|rj;8b) zZ>NGV_go}Pmi4;UTmzq@Te&_oxw}u05dEB?*XfIl*haVV8M1TY{Y{3m*Oun&&=u`R zw(z+3Z~F7BKUl1chX#C!1D#ip#?fk}nR{6wd@>=IeI`5jKXY?pLj#QSgg}L2s!4nS zehr-Hm~Uw__CNg8w$A!H-^UkEMRx*2G#Ojlz$NgaugC20yH$7cmp1cz7|BpI2)TON&(nX%F|oof&q5fJKQ1R|tL{uIDbrl$#Qt`Vgx zF><&)jsO9(NJqE`e>2?BCv52jkT?htWZbf_$&47cC|npPqcna=c(jT@f)9cS&N4q| z2!M>(tmcz2)ibfqV;F(p$S9EOZJeAE{yZ;?7P-Gc$q1C}XMN0M(2sy+Z-yjL2+rNp zuHEFJJ_Zv$_^ffj8xyhupJ>oHTQ2N`5eB^kj6sDDhMUONF<9;S%eg9VaI3LPf9n#L z3?`*4ukt;&0J6dc9-QeC#<^D%KaU-SPVPw?Gwv9J6_{j6gnEVw1Wb%jV8+v&=j44? zIq5U7X`jM^lN@#YG?}-BLIhR&Z?2{6RSd^NAGnR8 zEG?(aqc1TA;mPZ-UoIDKl;@t8voos=n)NN4iLp%}Sf-^C>+tRot|}sQ69Z>7$_u zMw_lovp!?IBm1YxM&18-pTHw}S((qD&AJmJ`vxY#5tNo$?QjbL36Gz*LW6wvdq^=A zwERq8og|dD(cr@mUoY2z!!6C?`M z!{20*^p(LPKwGAy2Re2^U_NEv$W~}^dz}nEPM)Jv$vY136I~9-gF@vj-9?53{Xd9z-W9mRN+9&rqK_Mr#3IVAr^zayY@1w#ip)KsrX3$>_;1xTb1HH`xm`6hP1}Z5y)S zparZdY-0_w6+y6I9Oq7_1!}GzA4R8D{l)t<-ga|7!WSW(EMkF4b7rIB8mK^x^G<)~nl^8+iUwc@}iDUC*$ZG^0oLvo(-tgvWvW{KuU5Cz8sF;j-A zQl>50Fb7|n!c9N*LnFkK6-;|IklAWP@(;{16Xoqb){?d>xG`MHKwz2sCMvZRE=rkW z-~ryp{K&oQ1dbXjAY`XB6ow=0Gr(zsfQdHi;Da`t@ITEI2LO6*`8fRZNZjeExg20h zli$$BmzT*3jG5~4^+#9B+qXAy#$aIrKpwlPFcW@Qc&eUTK!UsZv!L##zBD zFk+Q)O*?*I83QVKlubBLc}e@)n|9E{PzRl%A9Jj{$@fk?-P~&!f-DA^{ARpE#~7ss zx9FE^@VxJsFbR!;3=B0w;RBrmFGoOY(bWP@+Pr*yy}Z5<%QA!=_+&}TQP_Yz=CJ!0T1#g_~>VL_jRWFj&tVb#O8F&IYu~r zdaSbatL=(&X|N}wga%*yX!qvwq8ZoeNiw#`96G2F^@US`&G$%Z?zi?p+KZm6 z8mdU$M2?;lWRo-U$G{V4l?@KtTpz3!xT0}0w(!@}H4Hq*hOQ_9SF|^TX1F7_wXIcW z9gwNAMJkj1zN1Pqa>3!5Tuk3g*pngFQA9u30gjXIAS@bkLhvBtwzcAC0xh1b^u{|o zJ6)bfx6lx6fIbP};14H`2EA+d-o^POGS&`R6ZCTEwfVW8%-8nk&pS8ZfgWHB>e-@O z!5eM(9h2F>*oqdXMW=Edx~Z7iC-|NxLI#haZT|wtz!|{Zn8v}%4$-FunK?H46=&Qw z*lE5mfk*JMgftkd+o9i!7cbI(cv)RmeLVh{2D0tL1M{{<$Ht!Q^(8Xk-`W`(3w&q6 zr{lmMnahr!+Q9e*4t;T$>nGSFiz*li`w?pRz}}_Z>TB*r2fhZMb9vhA$zi$+2nPZy zF?dtXS}w;~O4>oyd5E{plyVKLWn+r zZU(nUc`(Pu$OMpHOSr*34(^~vv<@=&5wJ#uwNMjrGI9jIL`Iezqi}sZCJbjz6 z)`V+d8M=avBQXoPf>tF(n+@%X5a>4j=gayGsIAK1bE4-kJc78p9|On;u7Z@d7@TJJ zp&uf^wT?RHx~N11M>}v~G#E|8ANn6B9V4|7ek(Oum_n92!y%M3{0%N(XDp0?3c3K_ zc@7Mh3_e+&Oq?K7%>GLoX0qRT)JiG3$~}&ZT!2lmVIq%As+mN99huv!pnC(=L!3t+ zSxL9n9rp%1FdxTRnT%<&5PFmI%2D`QLm64ODxp4QX0klIkj))Nkww~Mij3WIY8xe7 zD@X$5GO2WpOyoCtsBww{iy@D+k*hiz8eF+ncGv(q8=0#U&Td1?&QS?nWVcMG4IFrL z8k~+2ppL95QR5%2@eFg0=usTAHgOe}$-LhzCq6%V3O|&>W z@i<~=om=%mHl0_Zj|5Ei&KQCdywS}*-UA=Bi;mu3hp*wa-{o!KSWDe_Cct9M5D&hB zHTS6~So+no@0O@_oxZ@zC{DG?5K7=r+wNs!OctgehGxk4x_-EcO!`d&&8+YVEPWg- z`(yMgjijm2DCoH7A@_`fpi2QZxBFzk=?jR-^;F6NSAKV%MXs6b-kaH015ba`M1Hf0 zR)v(jFq`ft@8qG6a3v@p235 zPsoOlGuu*TEqXL&ISznvG`3ma7hv739lPjV_U>p)NR^bQ4NcMm7Rm_&?vJ>k-xt#k`* z(S`zdEXMKd@USta&W|ufcmfl68341jRUvGI4^jG9SK#ZIe`pFdP3d#p(`|MXjIstk zLdlu#w=a=jj6>QJl&rJ?!$h_EiUNkNf~N**X^YdGg(f)R&^H3)6A2q^WD|`WpM)zX zZ@A?yIF}E2muqIyU4k>tIql2f`@D7x8GeAB!4x2w#cUCmDKnG5?3KOni}4n0B-mm+ zM&3q-Ma+Up!KXFEA&mw~vo0gY%JRpjV`PkHGx`LClc=mmzD`b$G}#&KkD--}M3BLd zdWxg-KmrTASDtf#^ucekr~yh?*$*AGUs$u7+*+fZHjPEPhCImn(ru69OM~OU={~cE zZ$ppD2!pDh97lt*1S6-X2c_>#3@pCbQAXPwrV_ra1E0}y309dgo=_Q)+f z^k1W#KFBBgVc2bFGZY4{+q6xW(9R2GtliOxSOG!%Es;i+*aOEHeeJoYE2BMa*SQ;( zV2?b0OI7%deNq~RP1M)Pc%a}&a}AgYD7(Z+A(rRuSL0A+ zOQQ_RL~q#wgZse2VHd`>oMb{aS>nNQ1nelnnEtww(IA&C zf*YJn!qYD&3~@NPF^cFWlbwLDb132<$wXHS%g*t4 z{FHOgecBMvuv27AAKquJbgd2d`cfJOUU^|x?J_=@w$R**mQIs7FoK2(s!nMy>`A-P ziQLCjbd5S3nsc#@?o1 z4qE{H^c)`GlgA75w+0b?LL%@g$=bTkL*%LY&6acCX>vP3Smi6hHqG~h5gWrs)|qRJ zF!*Gno}=$uk{32T{R9T>2$1N)J1>__?SEt#HtqT||b-{1|;$sd}rGmt@pxtHvQ2GIq9 zu_2Un+_JpI$lwia^5Xi4W$>L0pUkG!(!@06~8JI5w{xfwhr?Di>{z=e$Z(H7bF zI%-LpY@YA(t`tENTwU)vIzvb358rWfd0D+;$|B1&MpcKGw)$95t}FA?X8*v4AZcHp z#dvc)92xWDV+!eNr}}?k#sEhdjH595Fb=-Uq-JZR=xmuqcA^C7pTV$c1+$bgZ8Bv2 z&IA_OVhm@PW|9>l{kUInrY#M~(g!zmlwo0}QXPXlbnRYYo=jCM90&TGoLR?wCRAw< zZQ;Zzq7z|gc|PaxO2Fm~Pc=|+?Ap*DWt1_~VAr*B5fEDii1C0wIt>L0*OK7vv2wxy zlr}LmE`$%<8PPQwg*5u;%TMKhyw)6;C?zFm=AL`ur%x3Q3&XU+5m<{$j(08VoRh8K zxwoOgo1Ampss@gG(S}!zj@-4o4IEK#hM%kkHW{n2UrKfq!_Lv6yX;@Z2rP^;MMGP` z7Ft-{4M)g+@kl=$tNv|di1VSy`UgM5%ef0uRM9jpKzj=8tgXq4)`_s-Oq%VcLI6lW zx4&b4iZXnc6W!=Qpf8*pL+7fb{SI}Ycj!11UZWdiXw$EqVTMEXEjTl}f^DVO%gd|f zHn4e<7MlQY@{GQnG?tzI8kcWyhauTQh0q}Vab_qT`0;+OA?rC7`rr#a7SIZKERM4m zM*w$~7Nyycayd@lb)w`nZE8#Kgcsncz1Z}jY4a$Iq3>Y3Cfs#>1Cna*Xo8miG1zQ6cR|W!{ zl-Gbu|1*(#Y!mqaABRNmn7Y-OwBnn-oaZIFtZ(}@pFuTJMebmR8p5lgrDSUCAh_8A z@Y+FX?A}nT@$FB2`u*kX!$+s@~@fmhJy zBnR#k`{yzV-szcOBdsL@!3w(Kda}%U))wbl4ti{@fQv&H2%_Tz{PXFxzq0@iy87Lf zH6shCCRs2fbaP`EoW9L9-pc9aMk)d}GOA*QU+_^`SADLoY;=E3+Ef|XX~7fwXXvRA z1D3bHzP`%MVZuh?f>{OTV4pCD$Z1jbTR=GAoZA-dZ4`E#Fk^=ade0pZie~LW6tav~ zGd#cbgE1UHnSxMlsz%WI^6a+ZfI7DrwmSWNnVLyX#*#3FfO^(lX(%vMI3w8Qa{g_U z4WZX@W4|%a<3z_%Xk*khHcsRw0zM_5Fgw>-q{HJf_?lYdk5- zbFyShOrov;Ao7d`=}%BV9v#DTM!>OVG6I7bl1>y$H)d~Jk*$su{Lwu!Ae(^)Wp9uX z{6mUK>%!5aCDGwTH@n8-JftYYt_vsSJEi zkH*Hy;?R{G)R@6HeP6}cFfg8a@P;uwX@(rzU?ng8&I*$=oKyKqTjbtXiA;Vzhaa}%1f^^Z8{+Mbo)lo~_0hlrtwI;SQLfdn$Vk`r&vz9Sb3V`#f#?Pl z(GT4m8{4=E4x*#@)}S#C%ryeXo9oNa&4gdJ4P;~hi~?^k%GTKjNn8y=`WidS_k=0E zR=?!LopwPAxO+Z3{lWVlBh=|bLdsfnE=sOpTlg;hsmOTJ;f}3{eD}$%(0QE`CHPbQ zIz4;Qii%QwyURJ?quW$uBl8iq+W9a*ka`*Wfv-ow4IYLLN2g-j6L1Q&KKlAM(iR#V z2b%=4x7XE8vfz5mb zD2T?$V8k#~`3-O~uTC=C2!x9#QX#~QPU$&J@H>byy3P~O1_?1b-8R<&xIdME(k6yg zIdgBFY~bY7jm3x%=M7wpZ5%-EW7tGJYoC~0!tffoLcf$-+tdrwJsxy;DEeiTz!v4m zefk>%*()P-4Dw0g(N?dGW2*tkweIVI84|GDDzIsIg{$~gP~b4I4NNn`@~CA!3X&+F(<%%b`ebW+#U;D}du|ks zcdV48gi`2*-|3}z=syiaNk_mq_JEyZ0ta_Y@TQa;k=IMk8!P6_Wm_{Jpp)yG;els( zMV|7dO44$6`mO=Waf%4X@+a3*ly;c*;lU|e=!ioh3+b&ixQonP$w(jXa^1%9m`4y2 zT)e%B%;Y%!giz&q+SC?0DWTrRP+FkpdFwGUaIhRk_5xI{oAEaj?ELPkiZQ|+{>gC; ziID;!{&nbp@eZ3TbvgLW{0?ZS;wX1xP4BO;223_c!qQV~6(69GW~_;TJ!JrhuI z%6-2=zLSS$so`z)IQ`eKh0JKLLW5%f7`e|+5|e)o+O!eEG((P4GvV=#U?&0Sab%0Z z8;9nJ3VltGtIR2r&}Q2vW1)f1saW7Qd1XAu5W;nJE;B(l4u+ApBrOA-Yj17@@w2dv zGC8NkQPVN)GkmQ&q|ecb&@S+!fpOETt0|=(pD39dU!E~nunU@;>u|*?U+56{1b8Ow zGN29q0?tzmt-y|c+4^E4{pJ8XXYyFujzcl$h=Y@@cgbqHkqhyv^CE&n(&3qUwuc@G zhEu@@?NkovROd{Eu4IkFnW_bQO18{TGJ~C+b7x=(yzmgD>;L1K_7OZbK>rdIPvXGE zuok-49>wmEk$l<6%E;%<)pgz4!R)Z#`Fao)jz0_ha zMsOR%J+pXq`L+T)%X*3i(_jE?X3Q9*agK~L4m?F7mk)615xleT2_Ob&L>NQj(DW(W zX%-pb!0?Dfj5Q@7gzNV(FyN6X=U&3LPKMJjyo}j4Gu-^S58CiEUg%Lgk5i=8+%O8! z^~M7qMXEE)^`b`sNXtcmVI8V)ed(5KMgQ<&Od83>XelGh!aY9@#_}>aX|Iv*hPQ1e zxHq#w^B5*OrR)|IDTXpt>5@J>0V-|a>AQz0$?anWTHSziSDMFXLblQj>;%~O_0iF8 zvq^7wV&J8+jRiB(9@8Qt zXh{|-!;w!8#LG;AcJPif%aaGybH*78(Rj zvvWHi055`rYx`mi8~UfQy~u~jP7T}wV`V?V?L-L;UY^vfwlc}^J17BfP1t@&63GUzKxLdWD#7h=0859`FNSkz| zGtZ90oy{2sKXW+nxmDiu*?x14pi#hrey%aD4M*+!AAR9Kw%~zweVPi)X@DQ0CwvOs zbdNO@O5fm$pAA5^5@guvz@ESa4@2u*r;^igdwn&S$#1qJ?W#th3EHz=WU6wMPl448 z*+k!Oyv)Y%6908j{Kyy_o76;~OoFv$hslHKx_swfJA{{W96x_Oeq-dL)tHUk&k*8q zX|AHSji5S$NVA0i4R`<%5j2nynG7P6ax>;>4S|OdkQ;()N5`wsklv1$APl2H5AwT= zAuI$ceQ`z%ZC{WdW(VIQ(Nbl8_Zj$n*0~dC{p1S5&}gRZ>UR{a2BUotMp@YyN)E2V zyAdi(tW!s?{8uKWP4IJcok$a4o-DfwLqyGnNo76;#7H^3?i>pyXm@7+j}p~5#K<#T zCXVF5(2?_x5()^$`3~HKu%+qThxX`eg9muEixf&T^&bvSx4V_^tK@1TtFd4Jg5Of?|R#V<4KzMR{&666u3Ic#&HM z3!jV^pPird+8l~JM@d3{w5!7j4jc)C-A5B+Os;McEEsnU?q|oxb;RV>s(8k^DQmi~ z6V?ZM>wC?H%(Z}@Wo~CxyeaWACd5=J5OY+c(7%~z_)cyF++brAeTT<7Zf;5j||xk({%P?8U|HaueQ? zEl-@tuF0z5vHSb$<@)lv_ZzC9yFlfHYY1~39aqPr;0r2)efbCt^u{}G$20qaJlR`JcX>-6*7006NhUwGlnjo} z(}@l#`W+tF^L0bVxmTZc-1LNg##spR&2Pw+ImeETjhIir$>q#>Vf?!^ad@juZB_On zX9$B&Y#!QktZZ5PE4_kUbh_3)_jYJOpBuazY#ToCE3nr-g%`5?aHupS3%NVTBVU1M ze9kGf0$*y;QZ5%95oBsxgm zC0lHqd&q%`1pcb*&;c}7{V7dCKeR`ynOC(L?n_@&y%k89BzT&y=t>^w7TDQoawl05 zl%J%-sWy-aNi$lgmhBxJ2L|(?Eanp)<-B2O6C?$=oq(Eq$vt@B+2OqW)*gO4R~zEl zMot9rI-9Y`jR8O;C3%Jt6J(qNWX|Wh3g2c;X~thWRag?4&$JXgBd9eP?L@Lhr(~ z!HdFm_IHg>=#=ve9~_mQ2xMW%37`H)qgnTvvX6jY2k&*9QzKx~mJ%~1Li3a3?5YcC z6P~RY1Shg2Bk2lkq94JZqwKtZfFdaL zp7-Ok8PbutTWQc^g9s_(tipxS!8fZeulHsPjFw~YXA?_OVmCw*&xg$L=bfoEjG?$~?p9rbZ1vS&vWAnIop zquH-rq1Ri<=5B5*sxvZh8ko*b;XcWMik3=GGvnZ@5;b-PY(ranlb`S}AQooVj5Co0 zjr=5rk2J2}C>n^C?(M`6dlUJkX9&s+o@v+0|D51oGs;yGA&8wlbz+c@FP7oPgHhv0*B7 z)ozCh$wo}LK*9PdwfDheo^D|KBt}UH0zli%c~!Q%+pE6RrgM~H8`vCG53;HK21jrS zjD1CS=nQvlfR`RrHt>^d2b%z~GR=2_$v|yIDU?H|wyy*!@&}Qq?Xk|CQ%BV7xvw1{ zVgv;c1RdczR#8R}6#sI(_fFr4s?TN?`A#7im90d<3N6IW*498SQF6pj8O8(I{6$rQa}&LNgGA!g)Kw*2#qyC3=7% zw46)tOTbiTm+J;US&1l`a|*n{Er%j73}~bQ&9oWGD6PVt754e<`v%waUjfZev~)g) z>i9Tzi(jm?C|=;mmm2gQAH{YJW2!t%Jiv?ohC;g3q7HJOV?!H`PF5NEE0yKA=Wwm{ zZzaMtk{vl9@Pe8Va?Zu0h+v#PMu=^XiHO07miRW~;-Pc?^eeJfezj|#;8Ua;w_Im( z%vGEwW9UA{li?t*GD_K?%8_?@UnI+iH(L1Ao(Q;A5RYE<4@OtVajo~NM_++WG%kq`aY(X`(=8SR~7;7FswAkYlg{?z^3%) zNOaS%z^gc$lVcf3^g=rgP-YwFHO3>Pt{$#KJwW6D^bL0V&~2(H9+L$z+~F62i988r z$C#a+o-QxW&l+5rse7A%NroM*4vv!zu`K_&apmx~dyq3;C^Hn0!d1WYnFA+(;5tb# zc5pc7@%1X1o$sm@V=(EGQlypLLL)MhZo%aF>Q*9h+$(F1y_*w4_FlX=?LBDI$5Ig^ zFv^4lq2Qn&ef%#n=GkR3hxP{tTg&PB;d1`stU;T{Ft6TtU-wjMhL)%wTE-p1)$uyd z;5By9`RMq@9W($V97Tk;{kh^ZALLADg$z)&xFjBiDDBCUCsj zM1ysloIojh-S#E}_C39HIO*of6I)|1@4T!kwo|su=dh*ZK)*`V9Bn5)=I&O5^F>n} zDZ18E_Uz!m$Dprc0^On8=pwsB_x!n50&7y@)rAFiOvvT)!i|XXWBuH7=bxclyFfI7 zLhi5c%pv}*$=((RK98GQ-rLg_FJHaPcY!mxpCF<;z;!`0G84E|yFzykb7RIIaW@$a1EK9tluGvlPrBVOW`wB`UI6P_=La$_Z6j(R%JuHU^xV*$L!Vi!&Vy0O zFYTruR2oKDEib|l$5zElTYU#Q;Ys_VRDFINWJFH$$W~xOdlOZ>9nQ;Sj$^RWU1TQ;90OnmGzGR^ zHqwu?%^qS-c``&n*tBFW!Mg~&;6)G-N_QMldGg`mcDat@HYrZ|n@u9z6tmfTvH*`a zc{$4K*KZ;>s#XkCnoh@f%t4~PD87y+$2~TAesU7{W4v-{SE~z;$sYbUTh4CI0eIKR zXnlKIg4#Y3mNu-8)y0~EKSyr3tJRj!o8i-!eX_9Omv$M~RwiNuoySi)0T|=RXD7xx zIcW|cK}Q_zl>j8)!N-VMx|R-+6YwdK!o`lR_*Ej!#tE7SLEF=jHF$c#x;odCCy zN0T`(gBSVpI@rCvw3oo2QQ=<78Lsd7N7l(9$9o-lvGzQ=cmz?2w8JzJ%d4x4oWCSKxi8m8r)J4v0Wi&Wc#?pQ z7l&4y7Cx#tk|BMdQ3K1I7c@7C2;BuR?B2+K?h!!ZVd%q`;jc-1VDGqf?2sX*+JnHf zR!+>#aGb+#&>dbIQow_G1eDy30|#rssEjZLY-pb|&+*y>bV*QDJCqA)tK{JLG=bKc z<%$higsQ=T!xsc$zk5|<1YQob%$(iIcWA=~w<3~z$^6hNztUG7cyz@R29k@>Yjli^ z3-|+XaSffCEZW152DUkDt`s{$$6Yto#L&sP@|t+2pY$8HELRDV=s!EOv%~L2_e0O^ z@+WXd7p;}T_MD!-2%jXoCQ-(5NB1QIC~x&WFF*6P!hoOtf+r07BLU6LRpkV&!P0;? zcK!JDyhD&?{5Ob8pY(uSz-#EncaGhFOC5T6TpJcW>U-lQmrnx4+>E(8vuaV1MHFSg zd~ZoKj8T@JDg4UqGmeY#6pf(z-pDBg?;b`}H1l80N7}E+&u>Hp7=a;tT|PBC!e}R# z79N7(SyQD-(F%bgbfP|j!LWsE-CYZ|iEI%|8~UHD!I)i_+Dv;0)=3i$BRHHWeD14r zj)F2Ul&cv}1jRLQLSWZ)g|urgN|=cnlqVlU@O=NoxhppgE@(7aK@5`qvQt{`6rUh~ zz(u&j?FLOb&@})kX8Lb7Ky$NUXoHU2gGcVgM?wt-5`#8y>hBQ!V*?bya1X;ULQFXF zEe54S8~IlQM`-e=)HM~DiDX6Y)=DT2Y1^`|W*FaiHD>@_xoBhHEXZN40&C4%{cUw( z3}KB`z#}P)1HLe9`X>hgyh>Sk$XF0<@RR%fwhKZTVha|{*mfLNG<8a>iGDr)lE99! zxVpOTc%r72LcswZlfrZ!*}EV-ajAI26wQ9AA**%Q)9`q zK+u{MD{ppA#D?z;KziQiSrZQ@(3IsUgucXL-lq{30h5!eOf)+{g?F9$w`3lHFsf9W?)JFRGc_VW9#}%Mt>N`PG_p8kIlY2rXP`c_3~`_>}Nln_B}>9vWs_U z1z*N_=psl@#&+|#Tva#mIsw&LcpG|?tE;yc)dBie84hXmNgmsG_+5LN^V(FIfhRbL zF9)x}L?>-CL2ctSgC{wGCxb0eK~pqji}XQ8*dA@sZ7)JWOUv@EnYcUfSie&tSc5}D zn|Kx*+6L%X`p9wgcL|wPLT=(voH+MfC&0?N>Sx+@A9@>eHaE+?EepFR*K3oWuRQ4r ziuEz0*XAMNqsH)hpe6_8JoYC2d%I{m%ML!WruvZHs|&7m+*VlA?${95*W;82Xgl;x zTlu$Ekij;5n#2`sezTw2E}o&^>3b@1s(0QM4-P@q0LM10$*k`I ztoF)dl<0=0&pp3INOd?$7y)nu zS={`|cSgdaYn(l}2OS1Od|YW@9AMU(T%#euMu+QV9~2_RL;P zkJ$q`VGn~E6*XXp0OvYF2sdTB0M=ljLK?Um=_UI0c$KW7I0Q}+q-eL}J-ObyeK|~q zqYDslGQp|QXWH=Y^vf6!8PmYZky6M&w!xd{H9o8)MR8>Q9Pzd+#yx?#QGH-gap;n{ zxfUGs-1iDgu93BCmmw9=YGERExP|r_46T~YhTn_~0Rk@rH8bZE+{8F}&ot}6GyIL!nK5nL%;SyLN!U&h61XF~Rcm!wB|85Cc~$z7V>fZes~bh1xIHYso^1qVQ8vGCjc;3 zX0_uP9qiK(B8d>9k9Y~V@~M2Io-7!o}cOc&T?`bev*^)&8Z)n z2NGLC|LX_>MbOEI;hxF9$+uH=`28DuSEKKi{5MW0e=>DCklnG`pR^;8r=1*(tPDSm zi+6%&}kyDEuaG(SiKoJ-bv|1h9wrp;=IBf+)I4X040B&Wv0@R3DK~aD+a6F;)`gon1jf zYV+a7c{dN977f`tsfNbr9(oAzRMP?@u<0BolKVJURCf zM12|qHf}2GHO^4xhM;3$BNAy-ze%64EWH30#aSztm&`$%`V~-EIOX=1 zKn;`7M?3!Mi?DPnoO6zVamtMHH~{^K>=`6kiJ9ONoD{#ntFJa1p4Hix2XW#IW(=OU z@CB^Wlh6xpz+Jx85NXHGuq0Ct-1GWfM!`Id&@i9nJ><(*8P#rIyL%R+f0}+c7Yc?R z>r8$0!lh=9 z(c%T(0JTmepJay8`&4Q}M*)QBnrtvW?f{F;eP%}s+xon%9I{xa_*>aveRz7sf zJt2Bah-qhCBfE|x$p9HJtWvthxQ`r3<#bFCl=S+!*w+ywy3avfV_VxLFw@B`f@ zSaRK_suZ~C1tSgzG^(6l8EP3v`SSc=dG+2&os*#C9eGJdwIzYiuvs(lzzA>-%!J_U z(AS}pll>TAxJ?GlxSAB$%sVikryc%%n^=R>;csmVWbB9%oas8JR{N4e5|s`a<){1T ziA!y+orFc;l3)wZH%_l}333ztZHFqy6Bv$mY@M|*l|4JFgrvQoeT^E)Qh6p{bb{S%pb@yc#Bln>KRV@} zHLzlP&R?8FUq%P;N%C16l)fz@Lzejkf$;>HBcsJDeRO<1NOke6(3h-n=1pS; z-BJehllUi@+%mN}KS#=z>^qY;2sw4HtgC^h6{FZ`Vso2-x2 z-1d2P?T`a~dsL5co{W-$&Ll04MZYQ{3^*qwLPsaTmf8QDFh0vdPft$km_xSopFN+C zfrCCMKhKh%V3Qrkh(>mdf5~)Zj3&W(WKlqN7(+~sWZ7ihT#(L+t^5X^vg`~O{Xmeb}70A^t@i}avNug#99cs7nL zvYB>NpDL=OKG*hH?`k_nJPC{5-*`bPHKfeMyM+BpVYwvVmR2E2M*{violEIY10 z%lE#ZX0Z4sM?sUQgOJRx8Ks1Pl1hd*(y*3OVHot=7FPy(y8iR?fn z176aWo{Br(9qjRQ;bnA^t+Q|r`E)IK1qG6a%6k4vs|aF$vmH5xwki+kr(N?b*o(-A zRe{W23}Zsr$yNhN5I`Ie#l^)%6$3T8gkN?7T4R(Jxq?-|I;$F?^1~+w#d!VaH$TSvO$mmA>YoBM z%E~7vX`aErlWX%ahSs(24Ga;sW=e4cDh74zx=){_S^9AuSSW23nBub#u1(t%iW5=p zZN^sm3VyUV^qwHWamq7UC0fax}aUeJ!r!9-SoSntF z2tvX-l?DQd4-BZsV(<)pdg}cVB#TWb&r+Us_ApwDcwJ+1lSx8X7aXEH2fzHQ@su6+ zr5d5JjM~d&TxZt5>tv1VIGc>Gd<{(G6?~p~KZxv*A^cZz^O6$EJJpz(txj(kl0JVN z*_??}advTr9S$PPGO`Vy@teaD9H0RQfwnRguyv9mZbn``kwdP^DiV-F= zAH=zP%uqnn8O>dO)KaH$fC3iu48CAsnCK&&fQvv%`M!_LlK0pIvdEDL0`#v$EU4@3 zc)?F}@A?u<^b5Rf1KpkyWQP>2J)M?3B+&0>(JH^(dmohz+w5%yA2;? zZn-!2?H`<^&%l>f;(P?=*O5C0T_sU=?|ST@XA-4IZ(w9^YNrC* z65gr`dEBxSG(%_38oaGi~6J>?CjV;_yyC@D|#H+>~M?wYA5L_2M~CI zD<0!N9yhDcPMT}?Ho9Hd(+7v(I(_2_$GL9_@MOTA#xdzkA8ZgfnjHow?XyuPy&4XH zne0E4MH z5MfN@6sOYF-_l{T+Yo9t&@uiIpmZwgucF`&K&wGW7WyMv_|P|Mg)n{IGoKWNu~$+e z^Tm~8pz_5?=g5*X|mcKV?n@>vx~Ka z=HS#uv$~ct8Pchq5NI%&O>W{7ht_IIj-$U|2@O4GZPxtJQSb>l%|}SD!Hw`uBrVf& ze=81X71&k-gD+i+Ah0N*I<6AZ(rJ{3L+Av@^r_U0E;g`h%aXEnZ~^S%?L~3GPfjcU zbp<`+UX)tEMrk+=vSSIT1so|nH$ThD=OD{YYp*j(vePS9gc0xta~M$7HN*LU|t*Wj;B z88I1!ho%B}Ew#J3K-0v`6KF(RD*Lksq*LsB$E;Cm;A;9&P$Ely`gQ)!0`(mVe7T`V*K`vFVG+ z*e0~HwdM}-Q)GIEYjQ03lTE|yopTYIcle7SiZC_C(KT90r@12lGZ-cejMDkQLdcatu3>ZtZIwBjYZ!0NTuHOe97AA307H7wEM)B%$H{%{+_6N%ZHr*0&wL`R zopRpdM%p?}MsW(42&LbmuDNsIF<$MH(00kqC;@_X4}7W#st7e4Wgeq8Ss>waoPc)A zXQQYX?2(TN6c|bl6P)1g<3Y-L`RuJZHi|F|@Ay@A3eXHEpRVI5jOW(T#-KSzNcwV| z{I$fDz8D&KiG1-!TgYWEp8^v+1x|2(^l}~(MG`5NuJ8a>c8d7SfdnTRQQyBEWnrL{ zjQfuFzyl^7l;@#w`5Rb{5|Et6!CUQYiGeaygO08jovRjk?jNnX1y5yT@-^*+-{c}E zaIR5u;B!Br7HoLDu=5e{C~yV3;<0Af42;LCjM)tz%bgytT%_f`suK@FJ2FI;(THLQ zszRnHGAD{E%8a7sD1Uw%sv%qJlPBvC2|kc($FyaW+1R=`%t;J!n&aFjsF2BXR4Ps< zY5$+6Hi=OP#W;I=JiKV~cD+^*7fGrj4~SjQh!`XROtGFNP<0 z)}(`cq>q^^@ku}Qg2TH{W^dA`V9PuN^9I;64xJ2;ZNZHDI(aj4=E({?tzlQThC_Zg zNULy$E`qiOZjt&CiX_v!?1$MdTBIz9Ov80Py#ny=X&?JZO!=Ejb@agr7DMo z#5#nSE_XXgae@&WhiwA%habL+FYQb}(=PjDs0Uq|bkH#Y zMCf>Wem+aMre8SLIYmagMV3H>4*U{WYrjh4!NoClR&WDeItx!YRIbWnZGj>*$??ic z&h;~Y>>yg8eJ`b$#pzvw5V$9(hj8{jk{8~LEu5=o);c(1GxD8W%pSQni8k^jDK-(9 z^JbS;nzJKls48Oooo!+#!S6WvEibDl?34K{xjy=V7HnXXz|a(q=uw2@4z--MT-oJGJyxlV!uv!RSf}%4`k;&2wy>j3j8` zBup@}%%`BXB0v1qS27eKTB3T4`>qxMK42-t~1+DrnYptaZd)SoUX*GB#^Lj3n!pIzry$8e6 zwpnmsMMRJBbb?cr zrSMj)Lic8;QRaZ2KrB=VUN1j;uzcyeKC`^|%+YfC{_gU{@BaMql^^`l^4&lBrRA6Z zxX;fnU-{}6^7(DccmBW^m+$z2&o5v8flueU&n}<-(r1>JpUQo&j^e1jU2qBRcgxk= zo9MZ~aNy@;cTLC&TyI0;tH}I|)1&2sPn~qaEg2`rvU5w<3S#My8Nt>fU;v3u4QPn4 z8fS9V48(kioFmV6digfTWkY28{SV$>&fj|x8}qb$?eBiQx_y#tY~R`y)zfiG=r-_9 zmWtnu@A;uhQlVO3>;C-EYDWe&Y=o1e{9l3^9Jv7v}{yK{wg|?(*`3 zmxWIoU>4*ECZ;4WVVSnbs0!%7I@Lr$9J07i)u#6a9#tNMW}oP~p_}#b-J{dv<^234 zyf^R1q`MBpQz>k`wg8m3=7J~}>WQp6F0kMnh$KdA#Ro^&*UOoBx? z!W#{2bObM|`)L$^z!j?zm?t3X*nb>b>7Dk?8|m%&7&{{%7{}lFG5E~3gr{s!E33Ia ztSOJo(Lt|P0Rvx}D9z1Z5x8B|!CQIspthBs@QHZXq$BO14>ZnRo|Y#j^Gb|4y;qSv zGWF)|Rr-IooSeQ04uLT+ot~c5hF6C3TP1X*8z9l52`3wwWJFMhcM?)TGaYmcUVUQ6 zuvV^XV0(1DyuG-N-@042-Ng7{#=2>oVCC{0Cm`d9jYAV!OfF?}3}G-;sfbYpWGS9~ zA}!hGj7NG@s$)(JHUg!~;E*l-?RN3ia$IXJCzVm9si$OYQV zcf#(sHrxk3k07Pz(iIapM09I!`C2+^CXK>d*3hvr{e(6Ytdk7ElDk6Yv=gV1-_~T0 zyU%fO&7>bYI2uUd=cWzY7{kypbYFj$w)yTlPOB51N+$|S;CiWxBlIy2<#7yT6ioCu zfgt{m^X6O+_P6>tA}8N>+y^<7)dqeluNw;L1jP+3fhzy!doIHpw1SKBAA!a1K7wWq zhP2noBSqVA&}2UXH~i&nIcLAGR7*A)0T`VZU8~wqelvd^M|1MVNf;;g;tnNEix0iM z6x>Y-OmJaW&AGn4ij0ZW8Jg%u{_uH~cQhPlk>Ejg&PXKibgq-xA}%wGBS-?RMq&wS_dQ~$&dF2DL`zJ2+@ zU-hNsd*kH4^ySYipZ?6};>_%?mQQ`|}>Kodfsk+?^wL#z{Nw8rWL_ z2repTFJ7I6H<7#ABeJjZWtb>-C;W09K69ewKN|v-KYbjlHbzbnH!ZG%l{LD;tbYL^J;~a-hr)Qj)O<;?Y9EHws_e2JJs5&C>=D6t5 z*vR}WPsYi7Oy_)OPgGXWi!E*zpL@w}tCcy9KNHZ#?xh|5f)j1l>J*-FlI^TLagbv_ zOwMEX#wW1XY&{(*P;F}}K>`d-1S@;)?RR_$GUG4i9_Iiv?Ffj%UlonC-FYzKk88nS zxy<+4n#c=WMuw+aqoM!?fd<+XuJlV61|IyIc~YvUGshuqg)U=X1h7i&>pi(C@YUW1 z9z$Km^gF3=1H0jwCYtH5!A07!0;4c&sb`GIrGbF2MsHnpZTwBnP~*blot= z!U+VjWqIj-u8B6}Ux1^-1uzXKTA*m2)%9A=KKq!)5u}bdbRd^9_BtIuvS}Ac&f@?g zGnDQ{0A(%AD)R_w5YcAQh>%e%gaHuK4j=o;b}Tbhrcrp3YyAIgW(0YA+U z`ZyQNNk4V2xlbk+lro9G&`6#}nz2OM$CMbbCr-`rWqU{O=WbG<5Q=K6jaGZ^N;4g!GdLlZH z@&pHh$>@_M<8;RKJ3$DHEhDA>82{mK0K&`kh4*CODd*;ych7w1blTh2caWlR@GjC} zOlRmDZb`e0%0-+6N)4yaSgY(KX)lZgL!$JT~V2-|J)eZUsztfPj1HvZas%r$QI{Hxp%h6JtruT zv5xmv0~D8U$K1mPkf;j zfmffl8rgI4R_gYWn)4TCpc^YSquwK0*^|YS<&`e*>abA9jYseDrTVf?EHwvah8!5x z@Go+889PHiWRB*-@KSUn0XCeBWP z9O(*v=uL0kZ`@n}hi7!aaeA5#3LjpvS!-7A-T+eDrw?)Spm$ozB53gamE*M!Fd`1IHvG{w~;LYConM9VK#jzWhfDian6Y} znyx;^*F7e&cnsz)S)7*&Y$=lhi9>fo+DE{NNT+T6wCtBQeAgERu}-&=*$M*%hi7RM z1GyT+5pWkxrYU>(yU}U;i4h15b%+=Guyay2x9XpwRS`GMbw+uDAAGDbt|tgn`YFzB zAcuk%A;TfG??j={IpDS*4huZm!4JGDO~YU1uA}sC+#UQgF^_==#A$#q*Dyy=m8m4x&o!c-zQ7VX1U8B?a!LSYK$^csawPB^3+@gn%hU4ajj?rMNoW%M z^tn1{WA|XgZ}2gq9F@RC_U`yac=qP}Q3# zc`|{a@mSt|Xd?PX2Wu#h>D|!l^b4oU_x+kLFTde;{zx3{KeYUofA)KqpZO<$+49vN z`}XBC-~6hxa0PQu?~K>MhD?tVQ}|}E;WwG{sB6cA56_mP{ZA#xdw;pUKUx0Ve{#6| z{{P+H@^k#pZXuXS^m-A|9bhC|DEmS&Hnc--~W&P=H>bOe`>k- z!dI79-|@Z6VVpWcbAFm&jU1*!Pfa}Bd$K{GKhOEgt_4#v`aWF~KHy3D8v0u%w6m<^ zFhO5hMppXRV05Za1_XBRmgmIj82WhXf!#3V(<)YIO;GTk9FYkD)94#HN?UK zL%a2V`*dsq-?2gOA}>3!0ewOycnj3PrJUbPIXnSN^&I?xm2PTJrd}uB3JIc-MR-2d zj?R+8k@+}GL%e8+|C}YKUHhDC?Q{>GHayg)$)qYH6Ko}LR}sLhbTtV?^pj`9!=>3dx;cIu ze&{#f-H*3jjW4og*zQ03+ShCI(bUVW(D>@YI~rzg5I&mVYY`|uVA^1-cb4lLYh(y8 zz_dx&+!-3FHbwvnlOUt>hH_6UGl9Fqf^UtG`^9G5xd4UcbI7% zEPGss1Zfy17#hdEJ}yz>pera`5aL*~SF|F7-%d1DAQ-20)~4@KqM2+JWHu2_Z%=z8 z1Z}F(t?bdvT5nO(k?Y(m)8O3olLL*c5inQe-qxWCH@uY3#sL=6GOF1`<0!wqW9@0fhQ17fM>QDxK8}=(M;2^0IDGV5Dx>jp@D2 z8@J2P{%h}+|Ka~-S^f`yX>0k5fB$Itg|{!4|JT=EEr0T_y}$fR|J(cJSN}IZTz=-i z_jdVxf9z@bZ~O-zEdSHL{N?38KKRjP`^8t5Pra8wAv_D|gS*EPZ#LPxCl!uEuQ-bE zri8N*dLQ}h`WT_H;FjZM^yq*?~l{m#h|_e6B%bCpe}}#<@C? z>x)(RKqn&gad_4Yw_4xml`A+mvfJ73!B1eL8o*+(sbrhZtg;h{uYL$F8{9_ER4D}r z?h%-fdB(f%)(>*zk?rcZ?8cr6Ce|t-Cp|A~F*&jpzdE5kZ;!i;1A#|p^`|{@P2ZUw zv>n^d7UcePNao2J`A!qLfbFAuaVq+a9;59HBMRX80wsAju@Ov9Wc@+0ZE)U>K!2uV2v^ zfhGqfTmYKCz~fNq?LH18L{xeRk_eCSO7WugQww=89AO6L&v;*iIQKM@$nGXlR3llf z>MOgpz~nnYGi%&zU!^>R1P#~_kAuTFh8tXs?!XZoE3DZkVx008C&zfSTwWwiB%BhM z@m`%n1y0(pkcEzfw2#zzgHigLNHl^S$3AUfI=T0tAwTi|W%Hl|?g$}u{ znQeAQhEC%^AEHbQGUFn9VC?#$kqu4tu?At3XD80(aDS@{+K5{5g>h>TV*!!$y9QC6 zKM;8Isht_lNdIV>YbqOP$!LUEB6q=n&XoJy2PdC>#3dkdZk#t7S(Dx(CzPTNuf}sq z$;RK5J3C$<8eb)=#NXVO4gFRTe=KCA2U{v;6!Xzys@GK}GdLwTDoKC3%9j<<)&1`hUK#HgJGkGm(3?H!fdk3M}i#XN~&X()lSIggcd$wFI`M!0s{6BwTZ~0&T{O6Yc z)6G|xzj*oW%k$2s5qr6dbCkW(0r#VChjhXW^5&1jCi5HsdUBFCUIL^F7QLXK44iY# zhJwC|hhYc903NM<tmbjeYs@xO(H5S>z0) z??a{cm)n7a?<;b;YYsH?c9sDWGkO=tb>RIJKtfBv{y$S*ubs|GiC0v zVRRln!6o2en{0Hf_M&Z(-JFkpyDkUQRdCMkbA@G<@d`*T@*B*83di{))#4_Zp-Fud zo1A-x9-(QzcNil5+orV5kB!~Z&P*yK3!y{#=u=`AA8?oUz#ZAc7vR$C&e?E04dKbv z)!X(zHc?-aiFp#C&)0{aC`2!=V$Z!CO9JOQ{%GKvCq4v=u7f99w?dWcEt@Nn(Px)` zPoUQ|IMN0=1s}acBk);qwvNAYo{NSFi7wKJ6Lq>6D?2aYG?fDm+Kj)8w(2aL943@H z`7Vh>kP0Wum5(}gL=1!N`Y`sZU33Y2*+v1(DAcy^3>gLQxK{)y5SB^K_5I?>dN4|v zmKZsyuYRRaF3u*}>?EE#P`n$5nyx-pGG%`=eMM`Lpx*2D3SgZ;oM03dF~MEsuHnk} zD!TyVU10V2SsZ-YL=tVAr(omk4>pdkLKr%N#qLdbJ}A?Ml5JwoJp-%v7mpit8lql^F9eN4ef}Y^b z-snpvgl>Yf!}RMJ>CT)#*WPwuMps5q023hy0czNZMr30GFu;?A?63*mGL5dB=zDm1 zaCj1)%>og3;R#-LdCBrBzy!w$mXxT?(!N{%!7sjU)xkJ4M=YD3pkkIfCd=9|nT;`T ziMtsSEg6tv zddZlvCDpsI=)B;vPIwnmvsEpB$YmeGO)K{`#Z03B0b;oZx+@L1oA9 zqnBi=%4*ApV;$Fi=k=N<9OYQ}A>bG63nCaiC1i$o-n_;L=J!sVarDGYWb!5mH+z~< zw{jo7G+9&W)tsCLpn+wE%7TA`oAh0~!1-X(Cyt6N%A`4m+K@0%erV3n%$A;^x(;hr z7r-9*jGTpvxyMRxoCb%LzViFZVq>zw@C^QBgS{v}`x}L~ivwH`yf&D=XfXYfFV}he zuo+`6o15J)0NhKkWqf)sx<5%By=1}*6hK;uo4~owFQ;Z0PkVSh$%hl8bAp^Wx%4md zMWfoYG$rsrTPV1;UjQAxHF?0BAmRl<@D}X#X(^v+hpzUiB=_YydK894W>oscEB`f-LhZtRG+wTC1j!12%=7vDNjr>9uaS^h{MTco$BM>X0(is83m9_DF^qhNjiucWq>?LZ> z7Au)e=#e&R%pyc}wskx+*~DZ8*Nu=)MB3lEo{-CAS4GgDu|R@%d_0qy5DF}<2t;@) z_$5*)Swn4>EP=5NZeXJ%xn94yR79o>*NJ=Cg6@2eP^abMql6U+%=KuMU?@zb+$35+ zo@*6G$6EdKN#`4T@s820Bhw!^m-V9B||2AdUv`E4INxV zcx4o3?0OZW=dhm>=zRG4a(SC;+gIAwuacl>8qFxlD(h&jEdS#Ce9q?v2KORasET8-3Us%5KtG~4T%AftV<<})s z|G|IcdzR09%V*O^VBd=ICNtX}Q92x_9vC>Xmfk}@GO+G&K#$pd$2*z$QL-oRBYzo8 z&*RwlmVe=o+%7-+2XB@?{O5L;uRp~=M<*gRp-Xo34&EkKtyPWgGK0`>-^4=MLkwL@ zsOg*RcwYZDhUrg#<9zw!zwo8yZ{L1#ImmhUn(Q;Qloh5E;t?R(56A5c@vPV=c-&NR zat-bq24N(%B~vlpu6lGCqbdMktjQTZnX%sTXmxM;86!uctWAEGpdhju$ChT6Pkq2a zu|IfkY#$E?o;s|Yi}=L}ME28DT8slSPvCL+#z)u7b^4?2h5+!J95PaD1HEi88vUiG zg1u&3=imI8!T2s#vknEr|!9%0Yl4<6sP7syCkH2S3}xLCxD26JT-H>`|`a zc=b`(vXdQ+k_o125*B@olN%hyzW8l(3_bB|l^sKZuH%U3I`(@Ub!9XCpjjPeaK@{_ zl{O8%4a>lZQ}@&s88|&Vt1P>pj*VT_efU7fa}W7JbMX6)CWcxC5_ra@xu>|a&-liq ztUWJS%*opEe7b+FQqzJ7u;}9i9PA7M(f>B0GrGMv2N04mYxW*M{niW;i#`KmBT=sj zW&ApJNg@y~z;y<9l*sr^ciC--Nr^>>Ezg8d5Gmm?t^H!iIOj4a9oCS>xKm~VS0o8# zh<|zuZj_4?jxlN3Rx9bBk;(l~(=g_t zOX0{L8g6?mr0X3|S&YI~aW;S`cl8*eQxfntngqu%jL;G)WtckUTvul2Z;bv#jtt?H z$iRYU%FW}<2}Jh*HHIe{Rcit{`_VPGz>@C+f#=mZ-?fh|=-GXy7m?(-h^Y3Eh*OvsD>3(`@ zks}64+gr)pAL1*`+lw0}FGR0jYH_d$TrB5wi{ObAg)t~;R<)?o8FI#@} zH-2&X@(;YfeDL}A((YuqdLM_7z%FzcgE_&j>OpmdLrP!ymrrmZd&dS^1j-xmDX5NAo z|IlGS$M%eaN1pp~kXlh$ea@X8E&speZ29*uzp{L7`#Y8o-hZz~jr>L!+80Aqr(h3X zoij<*`U!$`fo%}aKe8WMKX_bg#@97q-1f4sV13u26!RvuHH*`z;Q1as3J-mspaF)@ z;d_i^oVBNF&QFi~5*``Z!EpkilNjpsE&G1gxhrW-%k(10w0E0qwt?i;eRw|^xQ^xZAxPtYfZ%i1g<<@%Ce{?2s`DRrcsC+X8p^(ao`i z`OF>=(vHAv#=MOsyGNkgO4vpY1sxP!$(xQ^0#;%`&T3Bq4+1jGrf=-5qml)a*ktyt ziUd~@L|0n@3jNSKKg;I|NHLm?W3J5zebLw@@Z_{5Nc<0b(G>&P;{4U&<~s91=7nkT zIYD-3U#s{PdIlDpAR~N%%EHe8!MRdndk8qtYyQenWRYc?f-Yy>`Ng&4*eL?+Gbv#-pw9{14!VstA{2p3RzbuY z#Z$!e?dH|FyTQF2D10nDa+3&ovg|R!`XoRjEK!^o5Q>n7QKkKT5@`mhj}h@1SOT)X zD5E>p1u)jR3$hb!+WK1FIQYV|ae7`>(wAIh18X14s!}=<`1GIS6I}-eGzfi4^BgZv zbF47?l1&sYkPu|3M1b2TILvhR%)j?9+wbE74nLm6~0 z*XQ1q_JHX9k-4rMnSOFf4F|ZaOs2}_jdRB*`^iEMm-jyXVmaaXM^GJMjs6PD)(J?K91Rvx|T-%)O+WFi;m1SHn4D5f>pGY z?Qn8*iJZtly>;&GW#m8Cq7|7Q=QU4RpsBH1@)-uVLNanXxaYsLG&+X%jyZ%4>{abU zw(?`M=Wyx8=qu6Ev8l$%1atA`cl9OsaMWmJjMdII>61)$)^upWUiw|xOOR4~8#=R_ zt_2%Co_@v_2||D&(?85T?B+C0@5w6OO|}kRy3U5MF{`5iBj?m7c&bm~k3dDA8@gym zV8lnT$?jPb98MlL@E6Sr=f-x-Jc7Xi@0?RQpMaPRP5_egD!E`=?dk*v(3t)^JdpM) zi(om}Nr1}+;Noojg+%fC@?yDsbJ4=A1&Q?IUi5eM$Tt|wmiJPp-44m%>m)m~jm(bE z(Z3b$*fn^=zp}Eq7(SCglAq`^TS)I?_t!;{rV<9P;U&8Pesbw}t0EGBCdx#J#sN7P z>d-}<{TZh>$EN{IXZ$Fw%)M{6D#2V6K}6u}Ejvb->ue&y(=GvzF$wa|b%v9@H9Coa z1kw`3PHAC`CyapCkhT%Wa$YPXay~vHOq@F-LyV>$ZOI{G z1uu_HSyED&E@ukgRtSU^xn%^S&JgSqoygpqb;TH!5m6)t85WIV1Dl@K75#<=N-3dX zn2X`|3QYuFR7@!aP)digJ8!>A4;4lViO!VsV}S?`7(OMWz#L9SO6Gj}T%T9fKS^D2 zaLtr=TtCj(^XYfVo+vvWiw3<<9iO!yvIG}O!#M0O$HCdG`GX^SVAE!dv%q8oba?R? zL&Qi9uClN(Fg>5W^o$X4%;w$flt|^_?%KquINLZjGop>%l`4j!ckR~E^5**G@^Ad#_Lg7&uRJXO{r~IU@;5Kem)!&7??F89qbV6= z41KThi%bc&8IPUt!{jaO?DOU6w@V5~_FhERte9xtf@jBN!5qLATBbdY-bB5xZJ#ZF zfp5kAQD$Q2k>PweyVcI!NB zbuEQfRcL$YC#zFUL}BzPKIvCm7YRr&l97?wm*=NlkmnQK5)@3f#PN9nhv(13+_am1 z0~g12cV{)d1Q=J>g~{WRvqBxG5_=Mz@R_djeH_sQ&=W||q0YSs4ER7@T9E?wxZ*~SM`HKNiU^yJ~p}^T=LPX30?H2Gc;tI0v`wDsh9Q} zd0TA~TsnpvhsznFMs3Gt;w*l$>Yl>r(=oXK1v|XNS4<1(J9aNJP##vka@gJEJ9w6k z`9777ciGvx@dL z;Z$=xu;Mwt13&Z}8U}|A9Ql#Y97WgAvI%tVWos@HOjyPija`GFzX5T|?EbXrFBD7x94FEqJcML-sLB!)$+(N!xRif4OEN$Ce;NFia8cH&eHV zK_WJjl4Rvdjc_Xi(*S47(4yn&?DQ#c>KpqU51rTJvKWkWyt97OU&r!58zr6WOXk7Q zah6J94Bf^+1Rlr>I7}+zEZ{XlG(iM1i#D6B#~{vXasoLK_$R{%hF~5NRNdq|KEs>C zLJ0;F9Grtv?@4ygzT40bu2sNnlusrhDC`6!a@7D}WTKQ!TeJ6;p)nY9EjrvLGc&fn zW-%>GhDK!I?8SKu#Zh3qU%v5;*Of>2@9xUBNBGeeICi`qKF`<}dQWDV_Cq%r;5C}$ zT3Ln)N6To-uzvQt&(4e=`K@CQ&i1yo`E;#P{0!$DFMssU>@7d}Ke}E1#eZji`3qk=O@PiB zhbN&eheNLh7W7>E+q4;+YS5y~>Wp5=OIJ9Sc?4?YelN5zIZX-w^z>*si^Eik-Lc1* z?Z;UkrccA;9zM@cgPA|W;-1tyNn$fSE@$nK0cV> z5UtEEX24}C9*WY!xVIbX0CD{n%Qb7t?$az7q(Prr26W1i+9q~+>=4p;W|)1WzR zqlGF3yV~YZB5kT#D*NLLdX#7SWn*%We$aw!pd0(n-mN&$r^@LjQ0hclZP#av9;2DW z$pTf)w9}r-8M!iKAW*{J(j+jMnXj5jmt(PGLrgR_xy-^X_tg(Ffv~mbfq86B^dK-g zSKFQA_>8X3;|W*||4h{)HZ*P3-oz$a8^C$@j2(-N(COGZu1A2P?eZz#;9+;n{4kHl z6|BOtG^=M9;Kt6d@%5|ZFMsC0v@#Gp%g_AL_u2;mVeak3x|{3lD$BEqo%sspmCOpK zGYm`r%~SGr&gWeB=A)022zmO-L{Sx$4bf_dE5xGJbQvRLEQO<>EcZdwn$a>jMAAB< zHVO}5N|Pl{oPJ9KVKC4kV8;Ymo@lM*F9H>MAPIwqHYTD_*cnG+pt#$#nQIv=&$=HG zl=Rmvj6g^P^4L_JXO4MGk;y}{TsR>R1!VkW%LGeg=CP-lxf-Uek!Rp=?L^k-k+YN; zBhszlx6dBu3ucsd8$p{4YJ}FYh)MfIL(kRAl;?30aB9oAm~y${h}(1*n(+{575MN$ z*5$i5pK-?8t)Tk^Tky4cB&{<+0}`P{XHlWHI1^de%1>174ACh0yE}d5`t*GpCa;-$ z2rx<*T7$RGu?Jq`z=wNE+!SI~YqM2Bs$}Uuuo=_RMi9sm%ARATEzsd5a0%zoEU=aK zvJAl;ZNGB);bFEOePt-;Q;5yqvr|S{{-`{K^09N&gX{N}eewu~z%*+|=q55A zn$9C)Y{ocSK_8=MF(C5)5c#uKH;1Uid6>X?9vLEg%8h9{2s8R^IB?p{yD(Xd$h}Bb z=so#1`hkzGt#i1Eb4UQl_8=R2*EvRRuC60%0^IzTs2oHV2wQ`Md0yH^JZPWM zQ~D%nF~;6;@aTfA&Lf|O6lCok0?K`K6n=Uko1DfBFUr0n6Y0-+@EWpkuA-1T&|eTS z0UBDvCw)a08#I{p-FwHDjw58_=CeZ`;m1RQ4z`#LXuuk{JjFuZ*%dsQz(BPiZoKvo z4jX4GkjKLuOYdo;B9J|rtpA)NOA-clJwF5ToxsEK$G*c~FiqdnTwv%d z`Z~evfv6qKSq9Q16wzqfEm4DD_Fk837&ng(l$g*7RKDt%6XhTnMGSRM*CLPOsbJ)$ zF?eydIoWZIFc-5SYHL^NoFhF6F-7W(!<^?NCUR{MLXm^0Y)a-{B4becq=W(l5xUul zsHeYmvI7TG!_GTuIn58j!4h)h0A~ab%EgHjK1?Ds=)$9jz8M!8CFY6fZN|(9A2@wt z$_Uv6Hl1{ma~B2tynf4;*j`m*HfVYuogs}@=r! zIVZf5xqBRMN_7b`@@dvQ9&mi{RvyIzwDU+D2V;mtkkk1E>2xA}u!Lf{7j4m`fkk#> z$Iff7nRbq$Cw#zb5{s&cF*dX!i;R(pC$?tv;iGGmkhSyn_096phcjy)uQ`7Bazxt0 z7qX@b1t;t1b6#lLYDe+gT|T{r{ya@_?hK5{*GV1{PutTS1mvBGvB{_ z=5sHoVW8aH8~K?me1f;xeTSpqjPCeE4>FElXt2uroT6uB#8$@~*ztca?Oa^$FaO$~ zykCCTzxlBIyMOa6j{P)!mVMdj8RHrG!QahJ209@s-!TG?vw~sTMrY1l1Kp;gMfNM> zJq_NuhFDoe@ANEzvQ-C7It%VKgrW1~Q%rAD%6L;IDjk`I}eYyqxYQ-m zoUe}O#y&lejbrmW+-)E;I7hbdj0-gq5hEBqF^b%ar4SX@j zW`^!%)kdxrvT)3D3dR+1PqnGuw? z?<_412Z2U@j6eX@$XfQ$VI4SSXIye^vB`PI_6S&x*J*~qfRItPOcX^+XAuCMI}SO| zs2cwShpZ1_CL$B<6W$XDSc)_TtINDB5ms|Yz)qn->z zgx%l{PC<^si>y7SMsbXfnJJEevmnkm!3rOzjYa{%bDspLk!0>eBiq0m1r%u}TcqY_ z+e|QRcm@-_8m)(>jEHQc<8;9fyf?QodVTJlbKXSX?ul_mD{U#q3$V^k_7-b&7r)?D z=O&s=n_%jC^LX6=IIu@1wju|er;zWnW0a!5hk* z#wmz`m0UlJb0?D?LD4q@d~`acD+?8w!R@&6n%Bk-pIs~PnR2x8ufO*XzOh_B-7Vkq zBVSm4?)UuU@{j$OesuZafB0LLm+wV^KoKw+Ws{#MadC*Q`{Z=!!#H^OhpsVtb1c`< z69I5J#Q2aY!Z9+=NHZp!yLXRoZ=EfF@o&Fce$OA>UjAGE-uCkM9$zfS2Xw_Pxi)R~ z$&RqRtBjdz5IpQANDXe2g&R*NaUFMuLtsGDzMKhjd{0Bol*oC6mV@5Pr?Nhn_xFuS ztC*~Mhi9|S_UVh`7?q>t7oJ`$|MS28x#iFP{V$}MV;Mu5GQNvhlRo_DqE#bH{hEQSZ4MsoI52cnMew{YdqPG$G2!V3n(!jQ!pj#jq6wbqz0x|P ze|zZ>r07L>Lxwr}mNFxk-h3&GHS?ToS`E|0L%7zi8z*P^XY_NMX~uGcB0Ki>^i%y4EJjNEuX z{O$F1?X{r`eQ;E03r36M&^I!LrmZT5Uez1@wxyl&Rj}f^2F&S4zpXgNSZd3B2X?3% zyaH>9w%G*x)rTDOXJfC)K>bFx>St~>|9ZKn3g zNsM!pdxfFGS;UmKqqJ#5R%O{h6MLK|q?84+V`xXH=UH`&W84;Bi_Y{V{%|E=oBqq7 zT;INOKe&z05?JTznA85u>;#iH_}m6&<2hke=raa@QU8Rdj5*;#FVUSgI&n$fONk75IHKI~M6kZ4l2H)&tr}QLe1Tr!bN6XoI z=|i2e%1q=&wwX&Sq%9-H2s8r=e2kQ3Ze3%{OIBYw3tgtIWR_}B9<@4Y9B98qBmf!x*{`m5JKlp{f1Gm7LcIq(06MPx{@abIm zTcnO)wEr0ya?k>OOe|EF{iNAKXY#*HF zT)1hnRunits#E9<-x=F8PS2r}Pnk#@{+^)?hAoaD_j`0zKei17^Oql;XqfYwpE9pc zxXtdG!(^OF^&H;>Ncx(p$w}z_BF^>V;C%UGf9JEyzx%asTkfAp-`|8!J*agE^cm}VL*=Bv19RN(+e377q@x-HYEZ3>POrkRFx|VDjGUGUW zuMtki@TC9jA-cL;l9W3uN0qzk#n@I|n*=msRIlLCPmS zc5FMmh{hB?yw)xqs%_494s|W}vWsIkCW!BRkhC$2;J|a_v$j3`3dF&q-AG{h(M&bA za4K3;WyD*yhrP0~VXMtu_XeM_&q~l*19tzL*`un?MzhJ5|) z8jf4S;yWHp#fNR!JPDJV`|_JEKz84L1x2a~H&-6##Z!(mwle77-^!*1DV|!mU*5iX z+W@t6P5*e^*G)%u^@)!3(9>Pk1Yu*)jx2yjAng%8iKJ`U6?34x^r{I|@kd88rm7IQ zqc1ANbiy`|6^@;U8=N|%nf3*(Y^6T6Pftz0(@#W#!fq^62+Z*Yt`L|kVy<$t4I*e}F z78D^1zTP1(leyDh@!lBBz|D~ybFPP8=pxHmAIXT2fC(&VH-J@;RV*kng9Zr`KL{WL zgI+ED2S=5JI_wxSisU$Y;X8weM(86`6Lj>cf%K)c`sQsEu&9ev$0(qWHSm?!7DC^f`N4e*8CmdHEgxl^`o73eoP07lvS_@z>t;lr*badh zU51-+^oM*Fc0(CE<9x<2=6-kuE_wiahR=7ek~PlDHh7l5Mlt}waT|{V+jW9Gv%cT| z^asm9t|N1OcHQh~I-Y(I)nmQr;C%{{iJ~1kCz}or*q2uOqOYFUzq))A`7+EH@s4cK z%~lNj31$NahlDr&j5~8U%KrzkHJ$8MLoM-8CE>UuH@WWqAqhfk$OH&uuUyY)%WT8O zItgtXJEj{=5&|Q!!;{tFt`&$rV(8wq(|{!HdpZT4Yz@aqFTf$#Y1JjWDqA1G^V+9nLN1r+?*kbm{LpE6$J3Pqjk3`0JB1V{h0h$w;4$ zO5=0{S=Z!)eL$1i3V}!N#b;S{E^b>5fE~~MZ9jrR>jXpKH)N?Q)DD7!<7D(k01%qv zgXOQ!Uz~?du|Wv}wnCHDzA^TR3<;tUoTVR1f9}6uU~1u1bZ@ z2(8f?fwU+HL+!Jfm#)*Xi93jvHfszDQVukm&py!|Cy4DLtQqGnRqT|I7N#$XK@bTO zf~|rvOY$XxP5TzJX|&IAQJCC1!X{FC*9c06V|Jl&cdzTz4`s1&G#+;lF;Y;OJ7!s& z0lwPlqAVdIXVOS7zeU3Vr<;B$O-of9yoydQ2rT)a6rLT&plC)Nq)Bt4Sxg8(y-uG5 zhJoO?T`q!`A!u(cd!czBF^f>!I6srP%8Sqpz0i)KM2}`U`OP>2kr5*Z+RWjcj}jjq z4}56{U%cnsqAx8~ijIS_1r-RrN7C-^7@1^lfv2;p&8LWgC(x8v+s)e3AzofzTrY1w zx~P#u_X5%ltSd3|FZ3xq3@WFcP!Wl)BJi(YUpBxR<2c3~4bcuAjirD1k+-}}wo-8O z9~ks|gds z&70+4{c~ye_-sj>L0oD;;WqL&M$1G(nbNeq!ZdLDkAB+F1+C}1HU?H%SxftPmo}93 z_Y=?vC?uDGta{FN3l8l1;ehElBT^X19bHbFR!iKA?pag*`21w~OM72l{`AvVmxt{S z5{O6x5}Tdi^pb3my-mqdpTTZ;oNJ(TnE?Gd4vkTJaei7D zx6uZC_an_tDr$YAes&gLl+4(EEkL_bKo}V9<zqV{_;D*0o%g_UQA-UsDaHzhQylp|&40bj*ZC57^36hUEW^xca?3w&| zyJ6@bhb&M$JAct98V*maZQ=b8X*0XW&r}!2#|8H2vPFK#TNk`(kjVK?pn7(8R{22p z{avs4#ZQZJUB-^RZZLx$9ZHV8^TX&^3gjNJcgQ2{SJu)t+vYns^yMB^kZI?!`kfoH zCrAyxp$mi6-z8j2IcXg8KFK0Pp&=q87^IZqt=2=)4|EWtvlWGF2`s-mV=_!r2FJ*D zkbXjubeLP3%}5$gdw9skJh z3Tq8t=olOtC;-G3oC$^JE=8&O0Hvl6xN**HG@ z87>umImTKD=B$`slJoY2{6MyqD}EH1MLBD%;^v?qxLN1kR3<c<^ zf989a@A{E1FE2lMl|Dck`qc4oMgl|_2*~XC7kYl&Uz9AT^AY*av7ED^N99`Zl6@*E zb3GmzV^k?}U0hABjq%z(T>k!ths*E&L*H2b*MH$)IZp=6q2i!`%6ya6sh1Cq$)M?2 zuA5!&V+ed58pk>77ffK-TvvyUR@%vadbqnBhp&fqt{ndCu7j1abZv52HR|bSKc9@I zAfuI}T!^=YF~Nd&&>to9`71}?zWn^jSC^y1^T_LcXAr~ZEIQhIV^1zGIq?Kz(Jjy8 z8+YfN1wa;A!UsA-hd4pb(L9@r1nFevEKUV%sx;*2Av#VE1pDNtuq9wKBv&VpjSNXB ztjAzugy;3X$zB5sdXaa1yGGx1-wuAxj^piEDgBGB^D>aBmTi)k(9f7Gx|f1c1Wk6r zV;n#fx3WStO)&P(1?|;V6ES(2TsPFX}|r3-+21;WCAUE#A$|39C9lLIY%dg1S(}`cUt=M674Ns0Z110Gh^(@(2EX}keR z?o%C6dEy(!53y@g&9MAB-nthJ;D3|$$qMO1-*UqN5toQYWW<4WDcYPPAe4o%j!w@_ z0LdrkUggi;C_kN%aR#SZahzfAO?Qs@wACSLU&$wjd~dlu%x4TwPmbfpE$jlxI~H34g2ls$qT z8vjz-!EFrD8trkKXjv65ev!iAWh3!{sc(sUnCE7VeR_#Zl+zR0>)ey?X`~Yv69mvTP6tg|aV))&JuuLjwAny8 zHjS-tuIDaAVM<5?&4XcovlUN!an`FAFOP1D_V&^3PP7UWyoCrIG}4SR&Pm3qK}th_%BXxv zpEU@^X>zE*Rq(n+d0W0o78pVyR@b(U>LVZpn}~CwY+NFwe(T4Z$xaRk5M+t}#Hu-0qFDZ(WRbd4O z_+V^lYcpPzdjku&7};h{WH;wUeBA)ib0Xd-F9D)hc$DL2*sD<7-oA?=%bw`*+LH%4 zW*BAyG)q@TcnMXEk#YBvWMd~$mNf#U@W!ZRYsSJn->LkXUSbqtAu z@+yiP`oI6pr^|Q#=ogkB_^EGMzWIATzicHMW?e0a!-)0W-Dw+qi0FQ@Jw^(@#?jYd zuR9ITXKBAV_e(dw#}K$byxx|%3w$YQ=&*IT{D1zx`^)eALpRI+<7@9FnDr`1ikarp zl)%s5^oxH^k9^3W*K5E#!RI&zU{pQ|U%tb`XHNLu(z?4u_v9rVl1=SsU!TqVIq~8E z-#GTPkydk^D~ft(lxxFp>jmt}=mj4+hIZuBdSBPe0BS&$zv1%dpTBFlczSO+iW56M zi)?gE*CfcxI?>ZOI28fG2Yv+FW;tF|)c3tdPiy#tmkjChI`S8qoSU=45CtM{KjR=| z|L!qq>gx4nWHq`P9H7Wh&J6G9R+%j@c)1`VQ1KEWaAVoOyg*d!arjTXw% zs)nwe`AYekwpA&uF<_XKQ|k*+@;7=CdlG&MV8G}}7dFijun+e)fkl};^a?HeWQ6&;bgoN;e95&n4Zz5b8!8|Hegi7-1mj}6~d|NpY1PU4r-C%UXC+#+a zb+8$Rh)_1DbP@DXR-GG-@28>(kb3 zJcJox3J}@kG99PPy#&y#3T;hbI*Y=0Qk_61-2~ghfoGPIZEzHJFmy7HuKORQtCN9m zj>Exmj=GOP;ql3F9Q`~cz;NRycq{?$y*UB&&c8ELiHiP$t>ei zMGt=d);?VvZWkZfyErF|i{;H5hST`- zWNd3?hb@v7mWF9p-+kgF*IXt*zm6UYYK)hssnQ3{-Mc>ac^T(U=Q$3r3Q_`fc|k_V z7B@>rUc5R6V zYbzbc)`d8A(q!BE0`#0NR94czpc0>E_%i$nZgiCqSNdnv;XuX?jZvq4=U00SFYwE{ z>ri6&$iX#0*pzYPoL=rj#{@3rak5bD(n*d z^r8CF{zA(%6Pl`E2>2ugWFZ|F%+a;cAlJYlzx0{5Y8P@wK;%gVK@ip+Cy?PY4iCNd zct3ce6`fH@F)x6#51peM*bGU_oawm6qyQpN_HX5tk=UKvy++9Krh zVh6&d0mMV7)tKf$ertG~PTyK45{xoI#N#+-Jd1K%N~u<3TY#S;|uEO4Uue9F`#h6++Y~SAf(R`o^c*ChSG8GkOd9EkBOcBjZxz7y2}V zAOux_7={oHRIwm29>a7(f_?%wbZ%)bJPtL>_ZsCWQy*bWzs6+GkN1|B$+kUqCOVB| ze9Tj*$yo4QTNb$Rl!Tj0x6%=vhyN|r#ZYbqXBqy{=}8?iC%{ml6MoyOsCQFJl=*1W zY|=g}Z9-OzQYU1^;24+1Bd=QI;6v@@>MlGucwBz?*L~~qYk%txE}#C=%g};Ba39#h z>A+UGtwFMr@AcToD&jg9;`%S!@DyA=$0*FP@;V#b9cPRL%QbA6M7F|@w7mSu|M&Cq z&-{_Q<>xPslD#ruQDzPymvO;x#ud-1*U%IO8#&X$TxX^`=dF}3*mfU{s=kJ1#HqYa z`vOm932*gt6n>mV$4+wmY=3(>jd&iVos)d$+{v2^Ub)_0TsMDdrW-xr$elx>2|yXr z(ev7he1A$Ge{b)bm#xG17f(}MU*9cnFK_$&Io+u2rf=C5S$YibWm4u)09N}xJ>j7g zs+4T8r3NIqSD(*pLSRN6%dbu*$wtIs2M%=eTU%Z;Z89y}K!*C-+q6aQR5n_<$e(dp z#*;0;Uq;i+_YT zRZuugU`hvD1(wNghEedURh13=hE2)O1S{sPO}U?~A$Ka1bgK3_4VbLwMly!J0+9Uc zFo7Y1T-vJOeq`(B=CVuMERmJqJ0`#7h+I^I`Ka?eq z z93!}zHbduOJxOlat;QtqJ8n{lu~u?`_a->@F(m;&F1U%|67a1<8;t&k-*~g!#F#b! zi(Cw!W$TnMFhr=)!BnfWFB~mD`E%d5{MfJij^*$)_$P>y0ZoQ7crXT&na>7)`#MQV zzj#xgAyQ!P`3++0=kR}=>Uzw!$_3cr;!Moi@~8fz?d4zn5093+lTRcsH?&&K{bLtbQFyI2T@ZK@^o<@MXU<>Jc3It%ltqR=_P&uny-v)_-LC}Wa83}Lt!S`|dPQ8^2|b8qer$Yz{- zC%GX!LG@7_Ndxqpo6>3ezA&ym4w;P!xXmiN#A@Vt-pLT&uZBN#avqOlopg#mGS;gj z^~ugOAj=;eX4@v?9a7r+7O@1$=AF1U%~$RZ56nOxhcZFoJneHCTFuiSoL6)!?KRK{ zLJ|(`o1}>TQvnHV336AtY~T5BoJX!f&(Ic(oQ$p2LxEk+c?I(%Amf01qAMP;e}Yaz zK>r0#XXoe9XFMEROAlR7&k)wJ!V=jiU!z-`9safgk^Z$!KefS^`0G;zxu3IB4j0^M zdy+CfBz`8}`KgihP%H$-gTl-wHK>+R5B+*1=fL4!)eE2QC#$7T;H!N=#a!z?PZC|e zohN$&ZQ8adQFORf>Nx)N+iL74DaBiI!+yboT|xux-`(FP;Ml-RCxmFtb+1ZR1ooH+uC1ZFyvi*R?v3J4=@DQt>M^u8*TF3Y{ae5K zx#idY+%H?cVyrzEa8G^SJFr)A;gRFL*V%k=eE8}hqd5Cq19$h0|EOk-g*I=?hO+1FN5P#mfe!xumj=-e5i6TYmoWQ_Jgz^JMVzf|Jhvl>sK8 z_tK_7n7#(KI`wSDBfHXTXD2tqj{Zlsn?=PA47~*w7MZ$?llIEaz2*G<7ky@bYz?RS zuH{taQWKrXIvwjHvdMzF;$6<)+$1{=eDt03)hFXihRovk&U|GQ2+9WXtwKpb*p`A*>tfZD; zS1^*c$SJ2qKG2_iWm~JCxo+eb>o&n!=oJ)e7iY*%1$1zdd4lOkNjYs=>wqm#CAH!A zcI?J(&TH3%vl;IvSJR)Dp?I3X`MxYA=Q~{J`oPOZ)+y)SI_n%`Kg}nwag^NxGrI`x znPeAIVxD9w*R!o?2ddiNTu`4AxoBlHcuk-&I>Jv2{@6DakkR!DY(E7!)y<8)pTIJ< zIz6y+oHm)0^mXBzG}uS9PfjZPmgTjCEdA9sygVJkt~YZ}zmgaZ+sNR+kBG!eZ!vCnTWIB*q=Vym_P1VAKNm=KvN(pEcq_t z=hzQJBnC$2UgZt#C?aAoZgzUv@qlz1`gOyQ{1?I?nh0aEp)m-#g%UUs%C%&eEJBH= znQ)j@cjn{(q6^13c za;Lyz`fu4ul_J04utvWHG<%H&hQn8?gF-5@b*61_BCLX<(mm~o7?rSEO3eNG z(s#$}a?CYoDr4hpax(YmYhdc@S3?(lP9UQ+nB(Y0K5|VJWCNGCaWNEK0WHRHjcBhf zFT;nMwt12ol_kA+9WKyB?5BwwFx%{f1{@&&N z4^B5c7XXId!KY3jaBfEiCVR)bQ6M;zj~?6XJ)R4i@$`DMvoVeoe4P65Gmd;4?#AF^ z(3YG}-{jY;UcJNpKm3bV%YXfUf4}_YZ|p4J`0z~}%FNbnupFD|B^RC-_dfH!#x-)h zA3TkdAA}EGYBr8_eOzvpk1A4jFw>>(fq-o20DnaEHKaLs8=bJY5@# o{nZaq4OA zH2oc?pOF-f9skpmKInY@@LTo3uD7`$`kg-LIQ`{d-Pyp!J-POy-51N>zWZ$YkKqAZ zt|Kq=RyJ~3kc-!3c%yed4b9vnASAnI=O+m?Rrx|2_5w|^%c!hxp`F9CF?yqBpr%$~Y&uGw7}Ms|(4+Z@-f;XJVWUVfG* z?!ga33KQg%&l@?G#Bp4%O)~>?WUTZN6y%z*g`sbF=X-e{KG6yMozOp9+C?W0R3gSX zs;UU!WV+-bjW1^}UiO%+GMT>g*9C3-4y^1LI&<#!;vC&XE5mf=BzTNju+csX|9r3S zaDV6k&)UhrgqJJsC}eoAx{F>86qDePcKEl_%J1}1f0o~_*@`avPM12@Blnu5d}FPU z^xx`5w%(s}a3vR4S67|m&~s^rZ8EIW=!$^mp8!BfogFkWz3Us~-09hQXlUL1)erH9 zp+RU86YWhenCM7X^+ z%TkGIjEgAg5tb0zh{8BEgyPH)o&d^Vjl0k4JAJQqZfmKFT;vW4H)C6411(sA)viDVpaC7?GiRd5Nh`{Qf~^^S5`a1>q2@+%;Lonmq- zILPTRL|~8+P#EP{5u_l&UR)v&wxO8_Uwg`PQI=#(GO@^c`U>4cPfp)`3{wTl@!W@j zQ?5*)B$IzB@5KKLUcR%!Nziav7U-OmDcHWA! z-Puvbl6BybpeQuiIOT3g)kqbk?C1w}3WNUe8OIK``J+%Hl>J^K;ku2J-vZNdoOJh-A+^hs&m<6(X2N`jd~G>?O~)_xZj%3i(W*z;Xw z%i0rxs1A1m;GwyyjrFu=9!w60IXw^79&o*$b3M9*f3aoD30|jm@u6Oh#rYg0 zfrw$lSF%WVv`0YGOkrmWl*~Cu6@-^B!{1$UpYy>%l?T5#MAxtj&AgN88XLzSfui?g zJk{oS@`8+cK~3yVWD@Vmi%&KJ+}eoas*&PD1o)bL8%bV;=ajm&2PJ2)%%>0c?HE`#mxNJ2-3;T@wDJM{l2 z>(7II>(cw6Z{6|C=iYbdNm93F$(n4-vMgg8V?!WRzyZfV3aX$&xhk#-1r-t?F$o1E z;39)5AXFSD6{!3d{&1$M3`_}_5QK2-SeC4oy45Z9^mf0)9nO5tJ@?#vKi}u~dtV9m zx%>WpzrEM+tY?0nwf5Q^dhLj0;TBIaWOM`r?@?N>|J_S;nudx{^_y{Jv|8_Va21$R zGnB1PDqjb7uQmqjO=!o~;1#RI@rm(n4K;dnS}%PYdjKi#fYXX|tN77C8+~FSybJb> zvTcgEHMHFkkG`O~v7{hOS&wGh^rq?i$2;pf_#E8a^)W%XwLsJhyFBlY5Yqnig)A{$ zsSl2dsiL!!7t1@`@hG^|KRZwGlYo}J9o%o8D#`fzGOgfs`$!|iuw(40#gJ@^@8j&q zf}~#X76U)Xfcy$SrmW`{HI?5r>{TE>c~nuDAvB^(zhFl7w~==dj4BySQO&3g$}>0- zxs5MouI*@lKZ;G%9H0nQ+8+RiP@OUe;mk#0FzJxlFeJ`J5F&kt8OEUU{t`tq?h(4C zWj2g}iS#|GpYgh?h$67_hzdfE*N)A)y>6Hek*jDzY1bey7fLV(#^QAoX3W6A5)|!h zg-rdehe?TH$~qk>k8q+{IAXNMGbg0dtLnobbc!w#;RYsk>RkrM7*rpGd<1r<(G=xY zW((mx3h0?|nTv@uhK`Y~PLo&!Bf6IQw8+txuAa?BvC=XEgprM-gLJ`1#)ApxDF%iy z{6fLj=;ILr6PQHtIaLEA+~GOjF|3I8UhTqO{{hjX<&FA_ebc_V$8~$UoH=h+u zqa~!qo?ku#T22a8JB+zD#z~-HGvdh4{`%!Fzghpxzx8JQ;zx(;C_K)Q9NKOSNck`F zgctRZzhJyP>)!Oqpp0DAG1Peve1!->1Ris#u+4o&0qwNeW^(x56lQrTvKv{bV##%~ zSX!sACvi5+#Xn5@u_J{=4`}9?C@EiZ79Tm?*RCdfRtZL)KE~IByLI{Sa9uq5Y!r0l zBQ!N;i!(XRQ!dxQk#Uj_hqkT~CEdiBFz!!|4r7!B1QSug%hg4qp6C?dc?_@4MU4;n zk<|)Z`80NTx99vpj63+zxpTfL#psB-9HR>uIQ$YCls7%UZ#8%v)U>J6$T)+PAGt>- zadi61>)@CouHnr=MmYy?LvF}vx0_8AtDQQKX_kx&LbO$y7^m*iNB8rn6M`;-BQn&i zN$$_t%jGTgIU9gwfEiK_LUp*HCWeR+F1@sok`1u(S^h-(;xP2deGbt?y2gVqWoG02 z&;h}1b#iy(8(a@w48dA}^sUX%7B87LK{Iu_NcK(c1vY(5Tl97KgL9HbhVRCCs_xTv zPIzeGrK6rR_D*?$p?`v?;54|KCv&9DwUH)L-+0u8Q9FQw*Kf2nEmXS{zJoh9#15bg zH5n9yG`k@v=pUZEh!KxYyVEvDhGQ9h5uW0dcG`DS8vMp7vS-&OL!r;fA^;99=ZP=Q z9^B9c=`Urj;}94>R>AvtVQ+UY_c%sza?4!lF1W8i*XX4|DS8AhB2xmvCrvLkZq937 z4OmoUpcuhGI>c1Tl??+7K`3jU4(6r&j}pBc9oVB+w32qyAHmu_YuVS-W_-ILxE-+e z6A^an!u0r;xKQ0GQH zYvXzL>@+yM7X=y`r=I@3VB_Vl_?7n#&M0+8IoD@Cad57^zRouF(ojBAN4*W-Zw>tREdOYu4g#UhNt#BA za^7`2)qnB-_-g&zzj3gRpB!kmWy-5IkHn$_`rRNAbX{NbQBM z5d$B%jTz9iM?Plmp2I@bsGlmeG2ywkH}ZKM`id@_0u}9c42cx-`w`iROe3w9x9#iG zBRCi&U9_JXlJaRTr0pEp4Df|^;a6!-tTd2H<1!mY#JFUr)xGc~08mGHv~f>44vdkK z_qh#5Sr^WPfZ)7qTEtO2@PS@^ z#yvOdphO>_Q!^}~FM};eQ(pUwwgr*!Rh$Mt2!a;pXD93V^B3!*=O=Y=I9sNUDj)i- zP11H&zkF43of5+vy~%AfFhq(w^X^5VP**m^OD z$hhCwH4DLwCy18r;&GReLNG_YF+!Gc&bUUYdfu`GNxNNWk;AG-4vOfX_Ud-L>Cd>x zD@G1pO8{AfaVQcM<%Fj)X3LK^4pum9{frX>sk6;mP%fqD{0n0Se<^3fg>HV3c2l*7 z&-u*=2R~_}o?))TeHRjfF=d_lZ2!SapO;MA=9WBmVHNhy<9QoWGai&y{w7-Ty!tqA z@H}*hhnYH*0eb0gYpgJwRk_O7^Gta{$+PU1z@Y;{12510>SE@U7zFs`guQri5g6cV z8-;i#IZ8Y9Q2+74{q=Ky^oQ5ex1w<3T#VtwM{v0L9s^qbV1Ub~8^kg0v|h_A%h0Aw zc?P$`z3+3+HEotY{jIEa8Td^GpnvY|tc&y4>tFb{PS(HuwSy={Q9j*G@CQF|Gi{}6 z36nEN6=lkx6F!i>_j+DmX`1B9D28*eOE&`M|J29$^7o2Ugl4K0b9dE{?5fIjJrM?>ye?`1^|4{LwFUQc}5d{u&7uz^#m*uE;cNV&?PdW| z$Mk65Uy^HGlMo(3Lh{tH*WmsN}z8?n(jV2S^snWDZCwLR1dRs?{jWC6xT{JeW zU@K(1%^3%JbvieAGwo{t?1unSW?I`8&{>aU?H7Aw#GDv?!wudGrAh;I9$hvvV!U0i z72MCu)ZlOMS!9L(io3K822+08@{|($H_mzB61~}RijYgwyIw_MBxh zW;7HelwgcP>a;y~0$Ar}UIigx76YPq4&wHUn%)rEYJMjUKsTES zaztX%UpLoQ)JyTlNQdckNbqkx=0U+kd5lq=Ib17Gn@;fuamJ&+e5n$wmpG9=27z$n zHWyQ*OF2~mq`dK`kIs{Yv5ZSUvq+C&417T}iH%B6`!OCgM9MIL6?(V|0~8+|j7Fs* z=O_u~wshXyo?ms|*?il0{fwhwzgefb|M2k9disU8*V(HGK`_00?|SXs z-#lAg1{@rjdoFYLWT0|JdDPnElb};3@Uk`NuKTUdMEAetUU?|r#{}lH6l&QyZ`Z%} z|F~R#{x2P_`@2@t4$YO3wd&)Scq@JQEY5P1p(Z~qR~|8p2rj-e!Uv4@l*i+B4)IYc zC<})(=K1aHDq&)pI@*myr4xW;+OCJ2=VZ4I>KGp*rY2e8BZgS;I zI`JoK)H%!)ly)|r?b(#!&X2m${4d4_Ya?4 zq?^h1Ikt*DHPe1z%U8<0k10h4zyX)@=;rY7b@bImbbxvu!)2r%KT3P4Dz=vLf2j~{TUf%@AudmmG(3A6Im%Ki5NS6iYX(A)ZFfdpC8G~oh8F$~wGTlD1 za=!+(wk3Q3FK}t9Mdj-0G`4-on3oV~TN|bs;h0m?04H?y<~l}J&;w>`BpB1${$8pS9$H)iX$Q*gs&;HS29hn`@e02ca%v!V2q3o))aCkeo zwGhvyLFMttOOOJ7K`h$^r<@W`lnAIo!FUKk$Zk68V``K&x(10eSaTv1VUEXSjs=t8 zm$Fq3b(BxV929#y4sf@rQ%^08S}}hZLFCy`vYu0}BcPp>eHDPr9jgma&ti9D2*$t8 zf<-9O^6@D5Exa-xG4t!)jjxY46ofJ^s5hbE46GqJloBUI7HxM7`Unhyg?tUb{$@ zG6Waq)K(cg<(=yFd%B@u4v%N0x^^(F%|zhiE)np}O_(|i?A4kT>I?!6OSb&xnOkh- zUGHc@slfe~_TaqnoMv#_Z9anlu=1+iNAJGwHDfFDZ>Cq=m(~NTad13dJb%$oOe^yG z;Qi-ODxqb^ZTycS&^PO&k6sqpraK4#=S37z<`2Ga-aTHw@P!{-fBLW9u0Q>Yx9gw# zl~?QE_{!P(OCMgW7vY_IVfy{hckkidir+$Iwmr(X5q-w(0LRv7J6xuGlRqmX^WQ&N|N74d58g(VNTBppBA@sK&L~|nu{}zce74&l4~|{K zU!2n#!Q6D+C!Q{j%jly_+kdO$YjoxibU^*?+IaG5M;kG)7vT%Wo@4*`u_tB*)<&k< z$i3+Yd{LjRKQi7k{cmvzSNa%POW*m~I=NTpt>X{8;kVy-{qB14`e|uP`RN27qwiIW zOw*|`79V|dvQA^v=z(s>YTV=?_v?Iy=ZqIxJdHt5fyp};C_s3MIIi=HiA>4;+wVM$ zBWo8$?agDEQsSl?t5*UmC0sq$G0#`HOIhR6JtRZ%ZLKCVM1 z(^^JESJRL5AH8Mk;L`>xWX*!sd#Rr$*RzkFuV>FbD(}%}+Sz82X5OEHjG@#|`g0a{ zIpBl?wzUC*2|*6JI0|RwaU&ZW$I5fz=Pys98z$0~LhqwxX%A296N1Mf3*Y3kyc*ui z-SVrx;Do_b+^3hxfK6aWPot?wTixMh&H?;s9{m{t0T^E6K-!6pEo4WmS+XGc;d=6v zBOl(Y9hpFpjW8DQ<24JwwO!o`#*AZLKs96TV#2@yXJB*DU%%yrv-}<0C|w&oI$W9GZCdHKI8&mXwmRpD=fX zD)rqvI*Kugw+lz^Ve;aGa!UkffyyTEzzO+SnyxA!RC^!>n3kVpuGrw6p1_ar!c60UqgW_SU* zYtYkoaT8w2sB#q#O&jyM*1!8#F4w>O|4P}tx7P8l$V$W(+~n+wVmAGdErvuF_zjonmL+*=yo}Y!+ z<81H6(V`F1^YDo_QnHb}--EIK9(|FUy4WZs&g4yaP=A~RO0CK}_XA86WE_|K`|IVK zCsB~QM%1hoV{N0A$<*9mCui5|!|;fV9lb#|0S_;9O4H9Nw{?{9tcXa8SkxjyI(>Pu zzV)pS*6G<*;JCk@J~^tadfd|}7#(x-0tYyylRR5b?`&5v!9104*5p_;HN>5dH zrGF7L2N1gbh8Q}eJ0xyoqw7`hKf)>>)7$9fBYLI~3uH9K%5FB@1Q2H}&Pq1)sIz<_MT)m&Z>{D?O z$3ui`)wy~4zWgTR!$&$BeP+szj*`I~eUm{d(9gnBmcUQya8$rE)3J5>gN(i1!_w85 zsuQB0$eWB(rvy{JnF?b~TU*`j+VCK5N+01#@Z*)P@sReWbHnEx0?*+0B}?j}RocTJ zu^1hXwHl&htJ%9lr>C@{qxW_X28N94P2lmqbuUnQ^7MGU^XX5o-GhU!apAPm|8*kL zFFgw~_&kpu^j(IAeL$<%v1@oY11YYs2pUvoa@5wz8sv8!+wwpL9BGuDu@awN-~`aO z+SYC}OZadnz(|EcKIkiC8GJW=Ng>Jzam;n_8XSI)LMf%&5Tuv+Q>NZXJ}sz25YMf4 zL0t%^eP&=%EQf@<56%vAo8YYV{UKExt{yqU{ek>IDe z4qOzLtbqE`!RUlH>>i;THm4ds( z=IhtL@V)CR$8QIyyLA)+d3w0N-U$tk_7B(T!-MtL?vB@AxIA3{%zKa4Km2PC)<5t| zm+OD{#pmna{??22VWPpE`&VoKew3qyTwvYco1;N!wj%MR}H{a1j$?{&l-{rb%*D-1qz1{RWh4OHnU)`)r&TDXcRlMW)4V-BnKMqg! zYUNZa9S*zDCq2=%eQ^~%0cNn(5HWP>#}Q81-@N(Uy1IGTg-8r&Ukno-+Y3F@>3R{K zIla8?dZ6PNTs&%svT_Q@A~J^bi;Z$@!1KxzArxiq<45UPk$d+QcsP2jE2#q?=lxoy zD`TdM9F0EXi0PMgZzy? z84_!Ulj52P`use)EElU&;_#BaDY^ZBj=4S#RP~Hs`Ol|uat3PPqLXA9Z(^M30UhGW zPQL^un@Bx>{=9|=j|Q@i6-@AiW}H-p@+46;9lh0S)g>8U$DE^!lRJkznGkdD06l+`$?|J+Tr~v2LrMgfVAq;~l!=t8j7a(xVoEQ^HskdbyALa1K}Y zQ5apEIvm`gDGBuFt}Qfp2@XGU0oPh}nuMy}_U-jUUvosA?^E1q|i;2iZO0tOzX+zxA*ZL93JQMpeqJ>xvw+0I4SjH zT=Qtz>C3BlpYz5a(So)eO)HCHdFMgqy=XAEnqtDR<^$u&LmZ^c;yQ4vKXb+Ny2*?) z73=kdzzBwUjjx6`W0IauGbKLUq*#~(4)Zf@5|r?l2djHc~4?Sy3yYjZtPfebMBva*SF~cnl=a+oAnb3<$3bdVB5jL!31nm&hsb9KSfA)(Xu3vcn zqc{)e>+r#CqWZM;W*mutzo7!5aZ+3n9E8X}*P@2KG(5dS_-tx?R=W|o0{iGaz zNwaYx;^7nOTsIaxGhrM)L$ev9->2b+vxxL10}y%jmcGNiIHLKBDyzaNUy7ddfw#$# ze&9qzznk&JwpP&=CdPmc2&T}kT%v6HW5hH9w)bBjuHSfJ-3VOSz0Ipg1Ak!fE_nNV zQ`Y#@c#Ww}IB?Sc&H2T;czIf#VeLqoPy1;;@GvZ*qfYPa?8aEk{3he>V07$S#|Dtc z?c=8uYc4dU*|j}!&^R5&#(DRaI^~T^BS3i0VeIwa{EB!`(jjb@2{*m|K zjA5Yg!9rg@2tB>@sRl0hBe;Q?jzD?FlKh5`gUdJhZqJ6x%hTwVleXVQxT(WQ!gqL1 zoL3x1_8*3x=uMAxS~Yf|bJj85R1E#xOhGf3dQAU;>GkXAdE-c%HRSnZ6t9E(ajH0z zgIm~z1AE2e&pMeKc>oWc($VC1_Yd(&j&je~=*3c~wk|wt>Lqf(-q}60k#+iGbF7yz z9%GQj;4sn0c=Gtk(K}(rf*V)AsRynfxIWFxQWWj>-ld~C`4h0$aobdu>Cho#ST?L$|aj+O$eH|R~-VPIG*aeK@n@t!zo6{L)-lT?thIO9E%(RD$h!=|NAH<1^p*-_QT$F(gcD%TO(+R(}`+XE|`ehX7 zH$L_5Itk+zsI=b|)A*0!1xv1VheLu9ydQ^G-VN`4D$)IIjQxM~W`F&Q9~`bf`K1Tz zAN`f<^}qbmi}kC|p2Y#YS$p@RxZ;%?V$ae9myQ9Nd3W=b3_2tglR2-~&;RoI`U`(8 zg7|R1Mh}xQ(#9Z_T{>^ttr5o~`7{i?S>=>#jJdTiUBp)2b0#v(xNHI?)3%%Mk5bNB z7LJ)I7W~vmE_l~y+e|xE()o^;^RK*<|MHtjVA#Ve(O{P0+yu6Z(EphzJ~w4F@Q!vz zxrHlI1~27j+io@?DB#Fc{?I+2vVK79yi3#LS*lq%zHeE_FLzm@Nt4!jOe6tp zgO7e7_Z}Nl-dMWECos+t$WmW-|BWjSNaC$Jk^78GZuO(!&3uW_LBkWD|$1| zF(?mPwpoF~i{9k*E;2}_IQ);QKNul;Rry)JWDMcO$QegUrsd$s!sj;$_@s_4B=cmB zK0=xNuA>oP^s6t920q=+TW{Z%1`nnO=4l}M81IeVuAVP12mc%Tlg4fk6}aHnaI2eP zg8<|Lo$;!;4sK)1thbo@!HG;;zSlOSxam&XMR3zyInBZ6yR3k=5BB%g;qg%&fv#%` z{I9~dGE2%GJUNVhvq*1@Fj&ct;Kwd=Z@=|+*O+la1wQzd%)pEHE?Ak*A=uI!&e%ik z$r6N0K{Ng}J_QD^_-nh1Af60v#}KxWT(XDa3 zP9)=f%c5JUQ%cPuyu)M9RmX7%ocygWxQJuYD4mxVN9-zN^IcCOYiU!T-AE!#yt5l` zb|;PqJOux%YwG&nfkFP=Mnl6FOe=IbsK zX&E2odKE|QJjy789w)>8&7ntk0nGSJpB%)ovtFV7%H+h60r#^Q{g2*#dw7At4TID% zNn6dXl)+>~0#i<+X{?V^b}#T9hNnLjZ}_dlC+qCsQ|teJ_wM>9-rHY)&#$~$|J)ZZ z)?fVA#rn?6v$Yd??h~l=*)S05H~P%@nh;6DL!>i-!}4+ts=8t(BhgtQ?cdfA-cgk z6*Q+$C;SN}$nnr%jxn?h=ho>)YipD{j=*Kl z5O}*EShU;hOvZFu!=7sy8$O|mM=rEE&ZeV=65MIal{&)4l^=Aq>?ysWZ@30WgS1yN z#cZ0X+qp*v3b@FS0QS0R_H>;4%}%E+xBwIWgKIE7eR`C>Wt|Ds$UFGxfI5jVp8j_e z$=8nP4_+b^d>gC(`xHa!&BEuvWW4#UfGwm?Upy}@W?nz?kan)@R}kmZxcds;*d=RD zIIL;B?Q5%xRdYsKay@`-=ZX*-fNmmopKZ(l#~5WWIT(|muM1iTgGR>qQvzu)=i?~V zE@}w^c!1mcb7p`6(wwn_m(3&c2t(%(GGKG=qF(iQk)lHptv+o-Yf+!~sCOY(%3zKy z^MjLN;UQZU%&=p``xrKiLH-CRVI8MrBKh&^M=8`92&kNunft*M!<kS9@qrBkHIs*6)!$on#_P5+oyh4#XWrk7B zyIXlHEkE<^=0GDTiAt#&4atDi*r#pt!29++@4Wl0ZfD#nGYkaZR`i0+rWWQR`-H%j z_CV8YzG#o~-HDgF6T>VT-#@Sy{0Q0UX^eS{hD;DKyzagU8FPezv0->PFXwRr^leMa zZeoJT(+{P~KwZa4y7}B^)>RBN#RL}khbFk`R;KRDej%Ly&pqx5c<6v z9DercyX&Lf?^^%n^}Fj&ee-brBfoZk{h41qUH`Y=d@s((`8o=&9;X>jU!Ol7HLlT~ zuP(p7{ty4j)%r^xzP0+Yq8jy48r`@l#f-5q7FH)r%ry}9dxxaWQHYH`w&ijSE$}2I z1G&aNqbnh5)J6TBYetV4wcR)hyBp&xJ#3D;D{ONgPW7x2d(@kcKZ%3*{>9CDaTR`e zohT|s7fz{%ai+ithV&WPrsOPq)aR1}5knMG0O8PR+M`j7yoH{?B*Vt$oc3aD?>$=I zxSF|Efq;ciFRjA}ir+*L+RWzugE)Nm5(V9+kK3KKfBtGcJ-N(54yXVC|MW>jK~!0v zIeWdn|Mv0v!MnZn`ByvZGne<)I~Q-(+m~9`CMc*8MK6jM$}=|6GP%yh(twff(cz<-gMqf5NOsEtDC-E~ z8bwZn!G5Q&=$Ba>IGIZD&`qw>g>;kod4kdU0Unz1ncsD^YhdP61UU1x;no<5;VJFX zA>+)v45iP#4( z16x6FADjhtMmuoMpv}_`ffLlRKww0Jswkd5NRcw|Zg zQ=QwuTAF4Ye(OtymONMY1^?htp1l(xyT;&h?&^^Dl(*Ox@(1xy-sW27}e~FV)W4j{1@SfjnWT&uXxx)k2Oq|$c zCk~2K{Z*V*_{W-@H8vE_!HM+%_VBm1IBkb5a}iTF6%fJX=`y7Cynt#14%_eL12B+8 zc*|UACznEmHyM0{c2|m(5gYHTUTqF#86fYf#mqYq@PkBX(ixt+&ZQb#2v6Z4Z5HN< zx<%Z5DM{d?Bq()nL%X_4lze_#o_44uQdoqQ8rJG zW8{KsN3#YIaFPZsqIaBX9?IMmTv=oS7rqYn&_71I?uwDV)u8X_bd?X%f zlpzMM^B6cps^$%?-nEZ-(}2MRMyAa073aykeAg+w_13x#;u@i+PYcAFMn_73E0f=) zdqpF#7Q#8vsHrt)%Y--b-#h8^Nj&V=hi|XHe)zfd=Wjo~{?Yf2)}Q?1tM$MA_4n7W zJ-b{-fsLYxOor}J1fl&}FV-)9>302vUwL<3JU(vb0T^2xHDYHTT}wZMEp?swT9}mI zz_=YpPHnhEqWtwx#BE=~blqjH{ct#pH|6!Cz9RX<>lAtC^;-`P;3)01eR3ElGPu48 zzc?=g+*O@-ZAGExOYCI(*M@0>It$S##@7Dw+0WaEZO?XN9oNW#r zq8Gk#xxX&nOjP^w{B}LNxVQG+JXt?-cf9_BeEslW z`P%xaFMqUt`db(4_ddH_zx#uW^*g?n>tFe3{lwQ#*H3@vV*S`}p02;)WUasD(b4)n z*ALg{uWr{tIz4)Hu=c{!lVJ=oLsR++JUhGdB*|{%!y!VCBv%$|m^y|yC@Dgf6ZymxbZpsxUb!20fXB% zkZVol2M4mD@EmmYl%X;WCV?c$)8LxNG!Vjw2C1MO)*I zOwdEG8eop|W7zQYJP|ega+q_|aHI9mfpL>Oq9^!c59s0&N1jmO*g6gp!p7c69yIK$awmWUJ-(q{h=RM%xD)!Xt&QfV-=I=!Ac66GR z@Zbe^BdG{x=dE!okqgXD?da1^Nw(`%=jeEUy%i&;e}^)j-#tIQh>>R$U&j#VTAUHY zr$FGcFTlw+(aT|!*gV$Z)rMiX^dTDpm$P9|=$kgIf52GVeq;*jZ1uu8FqjamrX?)c zvFj9^qA4%Go$9PjiJnV~%@6B7s z>pMU2z3U`+t-=fL?gE2(E#DdXIkzDC%a4>DH5~gC+p{L4%WZ_-o@I#I$gixD2gBoQdGK@MKv$hpZgErS-*Vq-KAwWhsqcz#3B*| z`DlNBZ+N4FQzAcn_35@-EtxMHwVk%+bIKZ#GuQKb+Qrh-jsY5n>#j{r*Mr-@RG!;h z2cigd%Bsv^&8M%ar7vT0;`64$TW8vD^pr;Yu573Lb>$)a6S`TLrYn7RAz*%#FDN_? zW$t(5sL=ZQ?ooIo(aOomy8h1V_2cIc)*lSd{&bY<_eLRpI_Hzf!BKeVC^EX2Iwat6 z`g)i)9*2OB=#lih6JxO#9^22ow{P#Q&s@JrWdC}7Iv&c~f%oZ6lwu6JZL6DEi-Tz_ zZ@>MtN>QZ5z(d2?$!W(*u~NY0|MWH46u3+OGIr!KKV}Nk*ZB^acwr}AB;AmT-SfTg zWXM6MwGKR`J@0-OZTSvY+A>wOHJc(G(LEQ%ICl*Chl_{kx_;USRit51hF$!W`J7oQy@;iCD+6X|Xa5d`tO^VpQK* z9OYX6<(M-2A==^bUfPgK3jBddYQ}t`7ZgZ4g=eaW(MNVbx$acPs||yC+(<@c`$!xC zf-|PD-emC^g%st0IpIYc>8<$j-O)vK&GNvjG~JZLP@J#V;eqqhS(Ir`jKkp3pGy%@ z>PjEU?jZ2VY3LfJhwGp}?Tj$xOL*qqI!SxiarDl^xF>}Aw}yv$ zj?!>XTW|?EO;x(Hsi(fSW}Y$Q>Hl}@$%6>*!CUL2cYk30^QTYNKk@&2w*Eg~`u5s? zoR;IX{XhTOT0j5cJBdVt*Ldw71I6>r7KMN8wGe6ospFX_3`F|w*W$9Ww>?uBRN(q; zjzX@B*gKt*6P+AeehC^{-+mA1J@et?m}`g3p^+v#=bz7zA@%YdFQyX1k@Meo`N8_G>*wonbO7CQ@8Qn6mv;MFSZP}kP2XfG${XEdyJe3x^<4xB z7#=)`a?LfT^80rW*QY+ZS>OHs`TEJ{H|r0+dbob?tHF1N_~! zF*-#A=4qVjIa5QW%_bFG-4eLync{zF22V2wD_D}oWiT>ia(r}4-KE=4Gy?qMf!T+ z_PuDvA%GlzMbRbY; zC#12vNHh4EBd{- z=61c~WO+;6$744oA`6!K@G07tEtp8&RP!d-4BXYz3B=H}w9gpSh2!#QcoS}|qpLh* z_{|!pKF815RrH-VDRbD&z(TX}5{m}a#jEs z#{kSz3bX9(9mF`sxHPg2(l!yZl#;R{MFi(eFlaAd&ZY~l&(2k%Ob8BuZJ!{FFc3cJ zGXuoTKtRZ*t5QqZ`;QV?MYm>@vXOy*WrOp7J}F@F>%!F$%hFMVTOfF}!P;Sx9#!E}yoV1uJk7I7lL z8KD3d+L!4w?)>ka6@`V;gconZL0lN{kPbV1r(c1^>58T^ZKmGdg`s`)oB#!c{+vvT@Edvr5 zQh%##!OLmIrw+VNmVFL>$lDB(5ug{k4|&>99q#?6>r7jb?F_HyW%MnBMD9wfR-m{y zJd&e2FV44ebo9`DQ#O50kP^7kAid){`L7;e-1Es5FlCz6jqYiH!A5Yx0!R%{PQ>K!+sH<`T40ec%@lycLcjSFn2p2AAgfDO z@#8@p3)6?9(CuFDRV2pN|M6~UpL6u9F$d=0+_)HSV};*l)!3F4G7g`^6M{OK2~j$r z;W+&V$4-ax#d!?;1dn61J+>!X3>SD2RNc4-H*aJG{rC4h2_~yKPh~X#zP#w;;OrUw zuRMBM=O*Jpw%D`@0JS)D?Lz336POMk>_*?fb4Xu#3?YLY^Y(p81zvNqk@Woi&VL3d z^V)sX%77vu^;&aM0*HqQ(ys>5IW)`^BjP@cOU52NsdAWT@MrBvgnOqwW!jiM>Bs=&qo zZZiXgE#(;DE^ILtby{ADj5U=$NzI4wVHj*nUHOuvYMY#Kk2vDKKodGkx7 z+=J^Bu2&m4O;tQS+Fi$K3yhp7>lM6Lo@c&|$m-}Adb)hf7fMc zJ2^5wy*^Bl-IRU#>Cdk}|K-=~i~rZ(SbxjczPkS9cfPg$E4N=+-@kjiUWPxORPmHU#F~$7QVZRZb?&VGP0O5Rg$5V zJ{zH?Zrey1Z+~;>L*E@wemzUX{iVI{iU8TkjNuFilVhJD9GU4E9x{LIRlg~mv5bMQ zZ0R>ABU$G@--|Oihz#71j(HgVd$4m5p0ZH&VC^N6|Mbh-^%E~vBKrOH`TMy~o}v^1 z2*z1+Q7-E>da`W>HW|G-?sUQ+HiOHED(ehMj8N*MB;#&uWQ@@WXn{AFNHvS4;I)AO zW0&@{;l!)P;edBW0B*Z32~E?-MAJD^Y@2;F{(+x-N*!}J;brF0J#)_aw)xKPUL2A* z`TK{*jpXTF2id4DtphM=YgrBB=lGC&8`xM+1%6I<8tghAJZ_P-CnP$>K}V*&nMNg# zgGbJd$HCx_K5eiUoK&wz_mfA+VwC3?+|tWwCbu{dxt#XtGy1rt$`^2*;HFy>$0nf( z+J%3r0|F!Y@SSY{?>nFRboXI_BWooX<1Ru>U0H;SGwT*EM#tq(y4)j_o_4wo4&d5R zI~6C2fwwpP5NO7NPH4@MROi-{SjF|@wG&vI2{Fc$rN1pKzskFH6oj}G zC~_RUK8siMXD2orSl9$c-R%Yk2>D^JbuD7g8Vx8w|Wz!%$OKv8+3G`PsUX|g*;XP+Z3bQf`$P- zyU#dw8X*F?HCnuJFFQF*)P5Y#m{(3nrS@AsbeVCBv%rz>b%czG(=g*Fh{bTe^Nd?B z=XS0O^P=f?k&$QDrLbO{oOh8RC5_qZ4W~?ZwM(_b2$D9i&$dlW)prqOp!?yC2XO>n z?d_&~aMpgp(HUdD6vr8I4Lk+R5c!U8DYg`!11<6+E##|_x|9oeISs%yf!H4C<+=v6 zBW(?*Z-syUw(Gt1I}*tse(+-buCM;``ll~{b^WV{zqx+rU;dBQgYSH2U2zGbD9{3} zx9#~24yUgUKif&ZY{%mFrt6*~gL9u!8zVmrfLqy+IngYXL>cSzw+yE3`t7sh@5SD$ z(#*TwETeF~uMrwvrGJYVCiNko0arE82`-$pr@2$_hc(Z=uJNMT2J-b^6*AcReyP3fN+{ip5pvUk( z9N=5>QoN?$;y!#&&dHz!q3M+lcnvloYirebx z1R3eWcSaTpMDh%;gHwZc%8)N}v*nq9T;tUKQ;<_${p2;RvUf4oq36-jegcSzHh1=S z8llsFU};u2?bCmFc<8m^yAVO2bj27o#xmE!2X#<_59R2>8EY9Yea0wloO*Rf#|ql4 z4G`36Kfz|;WNVbGW2gV%yVD-Q@nlqIzT7ypg(tM#Yr&z+#n#c}FoYPv<18fBhiO}U zg=_nWWr7U)9W70B$&|2(0u#2)qC`&@$WVfllz8X+#d+*doO@TmL`OaU@WUAX^T?)b z&R*N$u*h)jZ`L~ro=mNnlaH!x#9hZMEg7%9@9#1O*giZsY>?S^CdB5FbHTRt2_Zva zo)=q@-;AA35AKZXK7;_EUQGaU1A-v9^PcIz_qiX^MgTDtqs$1XPk|roGp=EDD+!@3 zW$IZ3m_gRO@0Nk?KJ#H9h`x?j;y$JNXtN8I$fb)wu43?lbcDsco@W)k56p=!`xn$= zsuFNK!SPhXaJde#B{)LnnrFh#f_za$YHh~Mcw*RjjKXuTA~}%*W*y@mDg=IMs8FEJ z1tPgtBOPW2E02^#!EE*PBaC$t8r=eXj;wqhC5OI2aJ_pALPEUaZ!0Ve!PBRS!qd?iZ#U(tP$D># zmi}cD_Dv6j4lWU5mg_2?H*p5zMh4>HKl7RKcZZ$@egkjL8ZDo>xc&28ZKuNX=UeLL zz~5=P*RX@jRF>7_-Z1qjpF6qsdw%V!>+k&Hhim^f^mw?p-n{khI(~hB{opVD<@LYV z{o48$p8w_bJAdgvT^G+@uDktRZhUzfyL7(03Maj$Js$YB*ZJJ?=oS)m%>T`xOX-uRT#YL9#-F_5|rh50|6zjw0TNHjBfz*S9rpu?b{1@6pCM_z)Pvm_6 z>dpGz)7$mCLf_B7i6Vx};9q&;p$A0WrA6+M$1bRgaJp9ba{7+<@#4V!ChcoyvJstr zFouxYYq$IUjdalMWP`IR`Lr>C(;N(KosP8Wdn0|0XFdf^>I(wi+$E}%0TTe^-m8=! z{vH~Em)y(7F!G+hL1SA6%M5m5aIR-~H832LIx~Tp^Zx2~f+7+Bz!;uQ-_-#I6b$d= z9^GE94qE2jCiJLzb8dF%1D3!iXp=$aI0^c!+rndw`jx~7sZ;zKMY?HL!L?lR9o?<> zv5v@VV`sV~x-4y+CE`AL`7-#Jjg8>o)%8V#P&&DAq#?Mr_Tu#Aiv&Q{GsMZ-KS)p( z9UKRvkAsVdgHO{#^j_^k@GafHw>x%^jtiU7T=L$bk+1*mKSUNFUE4F;pl zsluvha85_ZOgF|s2<Q9n71Cup%5=0nvS_56Ya?K6%H!d{T(akxoY%?k`XI>2E2 zZPoRg7)TpdoW(QpNj)&|7~fzp_T{BrlE9q1`Rrp!K`MfG<)iCqa~7lw5wSYsBK=Dc znkLFvc;p7EeAC{W_e=9A^~vDYxaZ(pV0h!E{L?_kC`E-Sjn%#F>p(Fgf;+WkjDcqG zn?KCSc&pPCb%+Zlk8XJMMHJJ%B6wbYa2bKL;&V$bkMHr=%}eU9(YxR2>mUO2G=}-8 zJ4^BCGlqcNbrOl{`>yhiedwh@V`-F}^6)b${+)GreB7rUL=VSLkNP>~qj*xinT^2R z*CyuA3V#Pd@;-k~*OhtBC?CR6p@}eZYTyKYIV|1LENvQ#Im7EHire6V@v9O^-5|A| zEfh4m?Vn-vf8F-*&HZ%I(|ynSty?Re5+C1`(GgH4%o+H6zn8Th>@~A_C zqa;gsb`17p2RA_E9Nk2(z*94C`9y#k{~WHm8AsPG1r}3iUh4Gf znhvR}Ul^SNBRY34^b(m1o@N}84?6K8k^9YUqfN${>~m(e?*b9bfk!~b`4-VjS=-CM zDdXU!8H<4BdHH7MI3QpS>U}n<%-D^Co`t9C^z@ah84MlBR^*JHz?<}>rvqHqhk(Fu zMlnMVPkK_vMn;>odnG8agqfY{Z#NDD6CI|VPLC*?M%2?jgDKsfp~kV(3KnGpV}#>G z$!@R_Pv1IPpZe6>fuD0`M?6y|Y?_SZWIT+S6KtDbH~}{r_6?*NAG_B+gQNT_OLQtY ze1(?VUGY5cZeJ2p7YF`Io?jHQ#Zf6)K&NWEp zsd%<~>)D7Z+CYd2mg{($o%0EGz*Mgf(?v)aBvh-K+k7HBSf#8Q!KUtPuNeYFs4ZA9 zX4mKC6XPWjeB-rCkjFYG3w<##(v>0wslVI_Ji${K7PByr5KlQZEgvL}!#cJw>OZ4wv1tP@utiJ7NL))CGiw5ay_ST&KxkbVeE}%_h>(cVR(^IcoR4*uJa3wfD{XoBCw3L zawC^Qi;f|G7+w_GN50w;*x??|aL5?FR?S5{N9e(->f8e8ETF-y|H`eouo6R`U6P!imC!hHP&rti5 z1AP4q?2Zvy_2>FrZi_vuRX#pGX=z(tU+}Y?V^zfX7(M(UTL&oRi<|s}8483HS!ZLR zl+2JUl?dS|{O}~s;=YZ3-~nd@pJ3z6_}(cbZ3x}XpO>DT(dh#q_~IM)_SUnVw`x2J z1H~Wsnzh)%1W=elfWoe}GV@!%__h0UOYMe9Ips~qhCd(V6kVx^ew*%#oUY9GnB@KF zsoVSOM{gdipS*j#-c6hPp`Em7S97Q4c3zt7ROiu?(N*~PHrJYgfRE^iS-d;Jj_E>m z=o)GJ*_WrvNe@N9mpc_vx9yX44`Ipvm{l^QZ6NWE^S+ zWK!Y9SlaJC{hSB#7Fa6(DNpALR)!ANP06Iv#c4YZ09=Y_8ABP%?ZIilDVzYt1ygm^ zem?8mhK~B0aYy(AuRgbd5_(_7IXSzCUASV{1#*+!4G99ZKW&rc^py7KbWd$mpJq&) z7h^*Y*;S5UsSFR%iEKndiok_0O;e2SZJQ})3vleIKVkxhb$G%@%G2#fcCbsK=O4Z7 zVrSV_nM-5r6A~PWqn-8kTTk+P0x8?CTesHjXQMaCp-jil&OvamvzV3CMN64bxW?mk zV$;5-sJad->|&e(xqI|z;ImMgong$aH2^*1-}HPO-TULgj3M_uV{HQnjbR}l(nMYP zQKqah%6Bh=a54apr9S4wh?KsC92%tTrRB{(NXzKM0u2s@MQ5J5grM1d$0HMAo)B*| zkFG+fC<%{*NaU$16?jFJvq27|WIPcRX+DLF(>%YfAgW$! zh)EE~!6}=xU#TVDsQDG_vlVJg3zkVoZM;NWlJ>O`1Qiws;h89kp-UV3NI7#jgp<+p zTf016^vT!aR~?alg)nXGE1!B>Z@u1kxM=4w<^-V(0FN=sD7Ec2hY@(*a6F#gD4_O- z@%59o#~7r)2y%^m+U95&$3zwS6oq?o;!y?v#mm#uSIX%4=|LFzagDb}>9ppc=nP-f z=egXlT4-+V!DJBtYPIp32;A)khT4bV)EJ7ze5ui!ub-T*0U*cBY5ENhj)4ICP7(E| zJkNQ1a4>Quf_rcM!LNQ}{g;2`{ls=L8WcTc57s}S~9r&t!?X%!k<3zb4%5z1U(%K=|TIg9;kvz`Qgzh>Xf6<4x_Y=JS`I#4r3%3 z{lUlZo4@v&-}5NXCu7ocLk76>q3_f^jp2R&@u#B*qomSNqxhU1P<~hE_?hB0Mr7R? z7-lHOrT>0wwc^JK*OiOt3c*L5_gqk^_gg=GHLc$$EP5)&wg%x8R;j7F@=XC3wV_dF>PrVLD8L#$gLE)KuI>y5bXufXlp~C?EzjYSk&6I zf7+%i;1jIqn;LaKq|xa&1FtTp3#w1(+gU#Wh4#*{uF05XXutzzPB??=#Lvx+q)hc+ zD#CZC^fJc287;cIbM)y067ZAnaL?Hp7y`U;6!$V28NUo<%5-eywb?V@f=P7*2)18k zc%!Kc2aIg-te-uL0r5}j7cZ&Tcc-9f`e0K`S#SunQ{|EJ?2R2#o#n|5KDBRucUBVdh{-^K|>PKja3XtED7;CRq)_u*(iw$;vl4bU?l^b~|^BL;q$vV`;*W+-v; z-C%hK7*!`t*#$62_b9-jhQqu?R0w52^>#ww8uuLLad zI0zm&i9p7fat0W283%$mjz;hmhOhudzu<>o0yAUlAOb_i0oLG6G&d2=Ebvj}<;#mi z>aVIy4#FE2@o+j$Pewr6UVc+JHSU2OJ*5}NNR5CpMn`WStjl=9=5XDSat#5S>V-v8 z2n+S*{iRLc;n82er|dvJ*ZZ-l=Q>;({}ur*$m8hTQ2 zz(hSYYTU_61Wo^x4t?_a&ExgyANt<)-~RodS^whgm)9TqkN=%@`&WOd&Qae0l}2{c zR(IiI$n*`q)O}#Q`D&Ls&&bYA2d8R^snMw@MhQGF=BP0Zqov{tnJJ zRm#`ept`#@K%KT7UQ;NaMmq|g;~Y3jr-4U<7FOcSIuVgEurkhYRHr!YRGFuaY#FsQ z(K+CKj}c4R4<9^PSNEg9iqlcN^Pe_zbPy?@=p;fJhii_05>UiSMJ-2TqYF zXS8F?1A@ACVz(B|2!_G%X?>IQ&Hy4SV&#$UubW|f(dT;{h zlIW|*Jw9+>MgbXN8}R1p=@qGH0hH-E^i|hfG>3vS12!4e*Ysj&<39fD=3J=&_xP~w zZs=c!Nft4zYq+nDHQmL5p||ao0;e($4Xjc}+451^ zh9}0^!aA8#uh4$_)^YW$Y-Bf4i$RBbZ#Cug&=1C4c`)XDuN?`D@YW~HQbzlxA+$BS zI!33b4E#-y1N`7Jjo9*B&?tzKkpO$MvMB<73%k468eevI*a_r-PQbp;H+EQZQDHW}5HIRf0?&-Tq+={Ba_=7?|z79ewG5 zwiA`x8uvna_kyvL36Fr76UqSnW(-HkFbW-B&v8Ox+`|O9-N6l^FxxmTWu6T)tIr@& zAoWBUt(4JrccJST(lUb-hT@fV|t&ZTTnT{X@95Xiu0@oPcqDHAjI7{&=WKR(69qz5$;KI1T8KmT%)Go%4 zFz0M4KLy564Dq2sJK)#9(}APg&ivG9%WS^Rm_$0XImrhaNXgTOO&~*#(MhrV4Xky zRQq(0LC5@`BbVU_fHvYH<23?4U2eT?4-VfmCgXxvW71d(O!_`F6zL`Uo%@@zIl~2k zJ9;JM_TM~OKYF*be)jRv`dplN=2YgXa-vOVs^AL?NcB%&F*G*9nwOM>e}fB|Le8~q zrYYrAE=}RKkOvRsiZ0O}-DSGc_sIgxILWJwW+4qPY8&6;r$CbS=%)k$0gPOeQxT`J zRVRdYo^kgM32oEuHIQ(dwm2L5#$)D((O*j1%58mfCIUtrT>8hH{wxx#{RnTO3tif2 zP0=~$BJF{3Xc+yXY~&;(P2E`)4g~NxMpur$gg*|?L39|Wp-vqg8~Y?n!70LTaG~wV zObkAo4T@VYiwkz|-vDQE61^yhh0|FVF`METH(F_TYfm)Xsfai+aOvv4ex7z&z6>=+niF)-|PzI#7WT#MTUe4WHCj1)jcH^x`VK zfKS;AQ=s7|tNYg`VD>0r`b4)Hc%B3xw@Ra)cs&Pm7UQ{zx)~%Mx;G?YHpGK?$f?2$ zu-$|ur|%YB9cD1QkM}bck0Q)HhdG6&kHRRF{CNxnPpHplhelRa&OCTVoofh10GbXR z4VEUuykVk71Gz6v4pv8%et3P91O`uA3|PuT@)$~E7SG87$P{F}sW#Ct7{|EVVv;h@ zzxpBvb@~Wi;E}p*CgUDuma6Gn_dW7u)M{YV!sf=L#|V4sq`r5CTm06PR~o5O4D-ll zT-#O5YS)yNhBN%`)|aLTiMbnn?gqBEpB#3r13~58QJlujxJzA89`FYTm~eI(OPR=& z=)S_2-%T}zUTL6)y&ik6PZ}?taJ+AdKm^Dsd-nXa-Zz>s@Cfws#l?E@;w*yc6^VOc z#yA?e*R3!^Th9Prq&@hwNV1Ee0{4EL#}&o+I@g&C5xeY+Oq-KEZ5rM8z+_KsKFi>_ zYBE2-TlCLC$a98!rbO!Js~f#0>hqI&ybfJ|?)?|*@BGadYbVN|G1w~erY_Zi*Nh#` zYNT_WXD$@Z&pw_;$X@RsuBSined}kQ+*|+n$zNZ8>RZ3CKL5d2x_z%XG)5%>cL7BQ z4o-P@p#1p2-yF6yn*Q9G@jKDpCN?+>y|(pW04$1Y-Pr7k=XrVfsgX~W>27NEamz4+ z#cx>JvAqvW3+*nVgm>w?G)2GMr~2T|Jo06f z{pG{`byvVPXQ4J)avvD<8KAo~2Mtn2DM7uy=kS@6jEKx@Fcg5{&7oe}YNjf3Pyx{P zAgeT)R$Nb%d;4I0*VVoC`yL;zAIZ6I^Bw4pTw*mmVjS9|L+K*Z{r9}{yZzs51Z&4f zk5_q27v%J*aWHdr$kJ#H8~&-0jnNRLW*l$;Z{vs=zn$n}7*ck-=$Rpvo^@}BPT7qC zjQr6tvai<1;EUs9Z-d*Zb8Y)shU@6bll9JL-l>!F>PkeIz(Tue6EDLbr-HM)7XvlA zYBCNmYo5Jj!C(Br5x$gdun2jwGVpHRy1`bATd9O>AZ+Fvz69 zsrqn&mjz<4>>U!=!6g9nX!FJSY4mC^8oPWM_~4VeeCwUJ)`49EGagPGK7@bSg9arc zI#7<`Bnu)*3U?M?m{+~vHN<-`=)Rq*j69J{l}oMx)Et5phk$L`BZ_?c*c^k=Rfjpc z(~lS8X8?5`a^1o@hD5s_4S>J^Rb+xIMMH3@B}fT@0|O5XQtENW^TCX>MnF4=?cR*Z zZ(b9F#7lBu25^h$86Nc-k#7takIUm*R-QY?JIr_j*EXU7%hu!MwY}0&gEPW8Bkzs?~T_(ZkEa{t?0^WT5&z}F6(YM=YhTwkvL+An^6ed;1t zB8>AaE?AV9#?o|Y*#%vd7m6dNRv3xkphxg<=W&{cd+Ymu^t;zT@*@w|Klc7Ft?&L1 z{{40P^=~b&X0;V7y0_7sqv$v1=J=Q|(*N**Zbo_j)?@q2XTQTi@Y3c266h-*o3`5q zSq5*)7KcG#6`(%zx5baoI>9NY=ZmYzTJ9f(W=CH0oH}?Pzm%qRpvN)UK6VR;(k$(% z%R#HN78+j$g!7%_aDr@K@Ew>K^Wro8ja+DZ#^FD(I0~ch%KFNT^;UiuQ-9<%wi<)b zF7UufbzbNQmu&}(?Q_1DHleS1;p)9-nQ`cJ0KMf#>RKreYPyz4*fN>o zr|8M?nT4#O86$acc#yi$6O3@mGU|4c+dDdJFf)XhdG%RRUP+w5p+fiawz z!kh@4a{yo3w`eker0(nDjFW-?I#m(F4Oj33{$Y{Oh2D>i(zhU#9lQ4?_GmrqJbkmg zfxnS<+9mrFSPtE*7lRW|C-jN1)0Z7D3NqMzY&NhpXi9&|HVXn@`E9FUbN}Vh0CgP) z3=RZoBVQy3T{(#1n>5T(+S&(juurogj~^Y`7)-_;Vi{IDy@{?d;G{xghys|ot_qNP z1R&p}i5Yi7j&Upo5b5K~n^zt)IPZY80eJ|6_*SW3o_lO!Qt&(&>2823&wx-29FZD$ z5|uW)y1OZU6b}>fTDQWVvS1qnjA1s;NMI8QBit~=q<#pKdSlFje25Dk5SMdrp1l!V z9GSFQhJ_P#nljB*2G*kt9!2TgOvh0AbzFWE{B&I*=Gh_`uh~l(82EHuVW|>`Q#Erd zqaeX*T-pM!wrX(0u!MLous(gV8+#P`#7kAa!jyi{4kOn&g0BeGC`veT%?OkB{?xDXxd|?FLAr=?HS*W)>q0RFLcs4mRs5nMPVXlMbRIymXDf|jlb#H}%h zE{{|9?c+Eop7X?&m?wPsIs(+lE%lr*Zd`rO^^m#>%?*4~xbwrUUUvOs=AUzvMO&)i zJu)6V{`zM9i7$R*{l0IXtviMneVsm1Ze(Q?RF61PlwQC3Pl`b}sZPteYTPLkIkof=cm1!w_k8`6_kVru{L(M3kG}Ddnk}n1i5jW;8Tgz9EOpVlqXrv3L2T7>v`%hy zqnXIaoA8&h50>13%d{EcE)u_Y@F1ad+W#U?W~{K zIa=?=*zbzos_3P@>Cp$x2-0;7hk9g&LFjYmIdL)tO%ZnrE9J=bP+#Odj}se<_B@T! z?4j{?jKOnYbQqQFn1~$hOrO-b$PEj`=(U*o=&jHydhg(Pc6XDBYX&i8Jso3%7qTci zG|i+R9T~%9jm8jmI#Sm9I3v z+ellYQTL65T;oCd*DG7?8xU|xdkg-oRifi$)!+$G^v+&za+H8UcGS}x+9E@ylFp-} zX7M$c;`pSVY^@Wxw(gel#*bFvKYizEU-Dw=xK4?&fQ?=Ytl9v>^^G+QqG%b5&;s8E zZ!d$q93m4Us+aV(V9J!p?>^$Z#~-EZ#gnpy91>GU>wO=NY0^< zX+R&ejvl};lywGCW`y(@_jKGnhr^6EE3mC>x0Vg#Dl?I3`Y2Ine{-E!3}8{6w1IgF z*Q9l9tkBd>=cCNkF=QTva8K0F_^17*<;M$^Juqlq>uo+EGEi0ktDL}|{;SkWIOxom z9LkTOAAwLFQ9Qrcs5nM*glwD%3t2cR`rs9NtD3YJX46(1NjXjg1CO}+#$W_c+C{p| z&RI>{YPX7A-<%6hQ-s;Lf)7yIkKdcvq+^T$@vj+1JL&al0yH2Xh?>4r3A1oe%(-k= zz=CssKZ+*|iR*c?t{V$DU^Y9OsTUaorYffHvl|l|)yY%+ap+`yrU&@*reHA#RtY3$3 zUNbx?7XVtP)DCW)+bf8~8T7lw(r^Ctn7K(rbWR<0rKJ5a@cU{1k9_;1^~b;bo%NV! z7nsUD?PS^=KYR}3(vna3>aqE#hEP<$y_a$Gj~IiukJsmZ_;c&;y7zMZvmgE9`n%u% z<#qS9uSeit70(uZs#@4uH~r|#zu>TdIJbv#K5{l9&k3gA2G{lo?rTBf6^a%~3e5xdK13hI(_QLPx-pK-ke;9r)B(1;g8+^Wl%g1lMmm}Y8 z<`Ffrt`c#*%IR*0nRW+<%7q{BckZY^V;mW)BL+Xt^0S6hE^SIL&vV!*6S!w_q%%4~ zvl*LHCZ#X5iSe5(=H_0$Ocp14r!t?2|INeo{W*VbcYl4)gUEiI>WSV)MjSOtDNlCE z3jUWx^9UIJ-@XIh(P@fOJ@`#Ia_Q)Ja?+#VC;tqr(=hAYZ)br>C0l~oR(Krr^ zDMc_ukL94NtfZ%V$MGM1LC+drv!=lhK9FrQhB3y|u0wmXQ4qd~4vf53FQ%<+x|*KA zCnNhlV-WLc>X1?Hk>NAMqzfE(ckr-@us8Y*Hk)ZoKk9Gy3@DsM5C@0uI9vyIeG4qr zWt@QAv_Nk6;3%*TP3H+Gfdz-g)PaDFUcaqV)|~`9E;`OO!ejroMwP8#6YyYdT<~H{ z+db;h%5*c(1hN8NxJ8dAPme=)LG49;gDt!iTJ|~ke9pm+2GBIX`59ghuLdV<4JWJp zZ~p>U=^b8`r8zr03m=9@Lbv<#5&#}X+AaSRVlQ}M3+Cf!*34=D8qy?Apjw_I}%|w$e##dvJYE=85cOLrf)kDfC@D} zO1)^{UI(W!B8s`FdX1?I#(QC8P6VdaW}T3Xg)ntRd+zfHZO8%dHdgQ^HqVsb#QUQR z;C}lIv3WAM?Bh4-0ub^3_ z-95mA+Mdezz5NRwUB~PB**a8;Ik?YVUar6IYtPrawj+;FR+)u%3R7@>xp^!4KY|dY z{$DuKK8aESe{ENb<2UKNfD=69j5vYEPuAc3yWUy<>}OxBzxP-E;yV6I|JgeI`nM{e z%HvarXGo`QKj%9o($sr@;j{GEAHI*N=yTAy*!j7fJLu|r@DB3r!aO|Cv|;K}Vxa4D z%lWQ1`xf9sJm{$gTqD{#3lCZqzG*EymGZHu-itlaW-jM?pZ*vG^s~k|Fmxy-Mr5~o zXRVhH_SdV&iJIE;^siiHHeXuT=vr3^eWpzN%8_=aVfTy(J_ZkMt^MU%@Y=G0r}Rma z^jQskl&I*v5jS-N6mdrivTE28c3 z|ER%@wmUr*y>fPXlHUTC@I+vp#cvsV+AMFSX}VGmbtuML&38vLRu+FW=Od+ zteQt71jbAneHl2f)iYJW7XQYQkl)gB6R|4?;`KP>rW20(}`T2lRXK zx^{LCilAHRmuou}sbyf3HnLI1R=5Z{#u5oLic%3uV?0M+;85dkfP~wlSTS9wAbVhHV7sW#VcJ6mg%@i2z>9rE)iZGGSw#8C@kt%+Z+^|s z-+FBN94-Mzf6mde`46CO;6=X=-dc}>>wo+wpRWJar$1PK;^i0D>)(8Dou9?a2;ShU zn^pu50CYf$zedu?TT^L+vE;gFcZ(O}BYWk0teg94)^Mk9-#=cP(DIuv8zuBHm~LpR z=kTG|CwsN&bv|DOuU9b?UOHp(7UgH|a_bmyoSG3#b9J!NewzkIXr@2M&?@ccyKK#Q z#(5dN!i86dzEUK$`q^RPb8v+xF?F4<-q>JXr#<{qbINpnyO&ZfMqsLh3S`N@eBZ*u zIFL61MHkRj4&ao5oB4ITn*Y{uBJv+&oYg7)e_vkA|XOqngKK2gisN?h# z=e^?%4Db^8CM)O@htFT_QgLG(6C4?1jdAcIQ_QC9$I%caV-}0qlyivEWm7DawoBja z4{ukv=-$BY5k0u^y}?P!v1@d}n|t%b7`o9EM%>C((7Y!54XeOYIbA;f3NoClOKnEGom^%c;E(I=(U{SC~ndR z+}h)z_B=dqrG4!e!N=5;bC>!DAujBsfb`qEI~r5203%8lNit-!;*duq^?qb!W|VqF zJH(#0FtZi&3`v*`}olLOR-<3>@LD+6Zn&iPp); zmEJlgeG(eoMEOAss=uA8)x4-mi7?rRSf&q@Y&hTiqh^e9;F8#=BTVo z;Ou~BC$R8(DOqEKL!X@%2vZPPSq_Whl1v*$7$%Zq+upPfD!dA zwMYQqX3~4q@;tA5e0pdDXs`* z;D1G+Avm9O^N2qbQ)IrOOmGoCC}nbKj9KcmWd6t7`CbE;Z(uVl-`&|sKNgPuo~zgE zkN^6MwRcHD8F$L09jEz!FsAW-ccxUw*1svDoxxB~gs-7#pTl_>1OM{$YF)^@q>5F{Hd-0O{ig4Q zXGn6ZifW6CQE=0j3`^;U7O4v&6uil`^ZYv5d3(LSXC4%A=|6B6+J1)bIewv=75Sc_ zPV2o_BKh3HoDU=D{a(!lMfyu|a+Is&Nf99V9h?=1#+C6ku&6$$DL*=~DfyHU2*^yBTH{Puc(y$l837Xb!CE6uO`u1<*hip-GIhNV zzD788`o=enmr6sSF^rSJbZsY*zxnWS2BPP~U*xBY&G19Z399JElnGw#Bu5VIyZ?AE z_DRqZnC;4^yLOqQ`+KCEu`}A|(PQTqu~E^hveWdP>4R~!!&A}K=`}bR2Tpb%1v(b} zUMB#O1-F3Q7-a!cA;I%WaPqqCD9hOe0v!vVBQt3#*YJ#kGsC!hk4v4|@0j__-afXQqY>NDC%S@$kbvM(I`Pmu?>k-!qhXi2M~@;>DOd)y8vF6y2x*MFSK&br z52}wog=v6fF2j3n9Zh$~01VQ{`xKq@4RMr187ssHV&|Fg7yyt3G=^#v0~k^^1Fg`_ zxChx0v=O*TXK7zQ%84k{_3A%JcT~xhNO(Kt(sy4J5GBvBlwhxA9_$1(!d+M}TNsGo zV;)R^0AmQj5klp&N;aR`nK4WA^Yo-}?iELbOM8@YTcf;lB9wmZC*76Dsi!?kMW4Jw zMxSyMQJ5b$_wErr?HfOjzl#JJ<@^yOk%*@j8c~Ig@W_j-*U3@J!86wjg+-X*-aWYT zDTNlG)5iT6Y2K_Aq@jz&g)z=DAJ$=5JzFJZp-TSXqCy;4QNRWooEhmqP7DElaW?Xcfx*|u|GSVgO|%^&D5mngx7^E>reO1_&AF4`xrsg%g3$Z- zeepNf_q_LVw!_{0VVMdP8A0ES9j!-fQX*~WaiE#QwH_(&*!~{n<#+xI-&i%SbJvyW zV$z-c_2m0MxBkJOeRuuYw?A5c@c;W4*X=KTah*pllFyL+6&^kW_# zocIhZj90TPDTV&Pt1|$P+RimchZP8Ka{a~LTkG|sD8#h!rZS{b{zq_wP^olaL`D|; z9uAn(@9lWDDCw_$_s!qv7)l&owhmeAfe{|U5BI~*TgOOWj2AiSXLO>+dA&R0Fh=Bu zPOjDu+&@@*_a8U9)Gl2}mK(LgU5q|i^;S#s=4jeLqH=ECO2!mGFB2efxJ7uC-M}?# z0ep{qFqrg1e&E&1=urAn=FrAsHoFp#(c6vcbB}&y=;`E1%O?mZPCT8nS6tPxD=6fB z^-*VY+v?F1<(2$ZKNJsepD_ei9!HCUYJD`em<>!K(lL_JN2yN_PAYt}ZUG&FAE1n! zrI{wLgO9YY3|^eIM0C>VOM9wx`hg)%+u$fn8ywN!bg|!kVTVPJ63#mbltbrVkD z3*2~sZ38P~JTGyf8v!?gOLbUaXGbQ(J=tO#E4gM1XIWmf8yw4O=u^U- z5PcK6ff+a87tuKf2*l)dc%cj4bIsUNtn`_wP-Q}&v~|AiB+wsmJk+QZ}X5YJl z6MY*i_nD6&@TR6dh#$zB zMyABlKW_}s+6F2>XYm&2VFWoqq&*pdNiQ4hZQ5>nCP=M;4ZH}{NIb-9>L?6TWk<86 zeJ8;+m)^8_oR>aoQR9^|ITS-ckuf+i9)^WfUT)JNF|cqn_K zP8F>&VrpgLse6P4qs@XN5mWAGY+w|jUty+ zc*zu%$AHkU8$F;&cwp$MY_K1vr4B>-CGY{c`_+3J(%qZjEbZ%Fwj&e(MhyHgIlT8M z-L_wS4}K`@d_@<|={DsW5Q;xM5C5aS-J&*TR4dzW?|yu8a3RST_cevfFWJH~)$sa8%I;2b={6fuAv}8{Q`y=~jdn8=_Wu zyHHj>a%jlEoP{`TXxDxfRX%~2;;~h8$h$7C8JQH#e+O~)rb^%n46+7Q{)r|A7iqLG zweh*;x6bnUI>5Ph`R38Oi=+4E0eEwdQYAC+l#dN?`GCVjZuxAq?|Q$d-L@HcI%a(v zZ9c(jjMXJt8Hb0Hy@jg>x#0{On9>hQtKt*qRULdh2nvKVn zPBCvi%pk#lFieeH8a5B&5sp5rpZX$53#inyP_0Pnb!lC%@Vg9xErP(z^H@k1DP$n{ zfu~u*W1`fvWh4Ve&>^G>2?Agshk?0P&(44%RGm`^Vw)bT(aW7M0q;u+9z&=H(RE%z z5U(xdl_Prl&Dfj{?Nk9!A_1b*}r2TrBX`>tPGl45GWD`lo0u-ne&a|;UonZ0(X1jy7Y@S4Q8M%t^nKed z+FhenJ}GXCJ-wDxW(hy5gMWgcD70D43?KfQvWBnp37#!(G!9pGCyWQRE~7WDc8+sB z^hkSEdH|hDg{4QnmrRT`88kL!;Jp>5cq{jd6TTrQ6&WmLbFYrX_E0uTxk}f4c!;bA zVO7!;ax(|HfuF6&_UV1}kK#-p#7X~r`03Nl|4z2Hx@#V87*1|^$*H+|RO2Y+Z! zzjh5&;B$-~0(TtVje#NK#;AzXlw}P{V0@gmeKK4m!&I87F;TF!I`my>jn2`t-B~Ys z;q>IBdSS=Lzp1zU!HIC}Cv@uh8qFeln3E(IEnfu17jQ(UWM~_2>lznKCKkb4+;3r#Y z{Mf{M*k3$GAIzdLb`I6B`NB5PW1gPUNay4bkVYx|{h z>m;S!2D9v4uIrl)yq~_RSasUL#IW0y%EG!bQ5iUmTnUMOcL%%zTKz&^@VKnCC=pH+I{o$Dxt?o^ku6e@a%49t^l6=)hR zgX}(f69+6tD$WZ3cNRGf{hIy>fwdb9;g8&#=S0shP9w-?jRIOeFn94|=%j7+IA)`G z9+WQ#xZkEby!7JqRGMuZ9Iv7!6a?kv$!Qsi7tC0T-Z?^j6s~P?z>GNKNWVOOE9dv) z^b8JeySVf8oNU59eon>PQ8 zhco*VA#eEB1Kgm|i zAkZ-A@02%1$fkj#Z-UEbSJKCMGq=tO;JEau zr1WRMm%i72XFqL4Z)Mz^lX*Xipe=Rc^g@rlVmKYLtmyiNPy|kJ^$~7#c|#iFObA+2 z4{QS6&h>K)BHJ9LYNz|DpN4I?1rHNAl2MOv6k8rp8_6AhOJr z4l!6vT*@%+-tTTv8sNgb?xjD`V57EN^B(gn`_G@97kJ)CQxfSMgWhr&Qm@`n`l*M; zIHW)6DN)88VQ7t`^X3Y~7&L~1&>FZ#4YOBzpVdzv+Uh1Ogb2HD5Zdw)03Mb$;Gsw< zqXn56uNCO}X+4&hlFJyZcq}dgZzJKf*Ys-+ZJnQ;Wz54w4)_3fR}f>y426)$Y+PSY z^y<6E;|Qg|Q8jl^b{YGVJ$8XAw z@+#b+aX&N6Mr(mD#oI=x-v}SrXJ_+_t!REPFv#@K%FjN09p7SJQ*C>Z$Q zsTz;IwjQl%Amt}@X-nSMbpmAwY@s2K?VgmUT1tS!_~yHj zsn5Vsq;H*`uK((L=j-Xq%g*&0YwM+q7Pdy&|9wSpTXx6S?Pnm{bpP@3pZl#H<9E&X z?df3JLde`uDc7_cXRt%7vdd?T#(J=`{>blmvi{tUT&_QL`IUA5SH849{MPgJ>^m>k zS)#;HGi60L>gbDN0kZ?HHn_+yr(mbX%-ximg|wXS;3hQNI;tN>wi|f+k^e5E=+3WV z=r3Ncm*>~(y^l`T2QM$mBO<+KUceK6nGIHqIW)*8-a@4s$9(pqBZzF}URYsWi;4pa z2xqaK?8=5ARLBXG&PO(KGh){_G|crd+VnC1rv0=-?sKDcI>y{{;QwiwLLGh7x_B9X z1TP8{uE=2*o25*l#Mf!N*-imWzVC*Qzw3jm^@Y2%6?|Ew3O>#@ z!Qq2Acw`vuErfIS@!T4Z1V`b8K+(P(%|NDqIGgbZ%tlw04>PvTVMl)$7bnsE*KZ~x zKzFNK9coYrkpA##jg;}jt^YUpHGbdi@89Ru11lTGhIq-#$XePe4BYn3kv5yz-55a` zD{V81uH)6NoDY0#0>p6&`qV+n)4Sjl@FvPk)(erqY zyN_cC)3HSXJXMWX0CFnV6_N2oG4cMvZnriRImzJA(`>rETc&x{)9+ zMwDyNjO6CA2ErQw3g6|o)NVW!1~GyJY1j&_$Kk*OGhp@mPU0?lNWW=Afxb&VFsalu zPsUQuPUW^mnSnE+nR7hS&Ek(ZC)ytdl#1phMc_zEs&`HW^ARq{&h^|bT%&S~VOE(p zicSA^Lu$GV!BV7x5h7d1V3Qb(g`if8Vz4cIm`E0Yuz+DVZ5%}r9LAeubkXQ_#(&FW z+|cYM&eW{h#RQxet7qffG<8pjb7*HfLAnfva|p6klGpO$)?g4mSrsbtCq-=$mb*t{)t)cRv5_`cvO?v;L*qFRefL%`dI%?|itPy@)e;ex9-vqmBm` zZ7Z}c-gC8K*1} z>qX@Jnu8kUSqE8pZG?86dP-Y%!Rz{xVhSvJo%W*v3IjcGUGFYm&?K2tvYZ^7Em-r?F0zkk<0SqGKAg8xUmvwEDpphM_F z({ZLYWi#oC(kA1!CZ{R$j2TZFkPHdkCy+uP`i<@2G<1zX#;5`J;cy$U$UJr9GW)>C zXnF?#TyuVy2y)G-u~vvK!f)W6>CpSNTh<=Q4*1PkW4}K9=!3#1Q|y>WE%E!%vHKLH ze4VW7TWiPmkS%&`GT-_V^uaG#?H&Xf6UM?b`;l=^hmcqIIaiYj5YG4;eLa*A8FL`0g?rsGFMs+hs01%?H&vJe!!wLE zz^U(^f;=P!Rvlhr2^mI2K#Im`OzJekO?wP3#^5jqe)Enk&<-lMr=juYl9_J$M2UQu%j14_7e-)!FLB=li*4Ix) zXbz5ooAiYVEjn>v1UU7~Cy|Habl7%yt-iyV@?9|+Wh2sO$nB~m6R>r3wiyrtB_m?H z;*%FK()nY^cjK_^?LUohto6}{FWSF;w!9>gD0AlX?EJL!KzG96B@FI+*+o7|AA(Rr z9UOG7KK*xUA?-Qq80E~@j$^R2Io$W*JM`xM{LcTI`l68D&Ijs&^{qIEfA@Q*>zxly z$E>vd6i6EkwzH|*(VV$F?QUA0W7F5>>y%O7@BH<;YyQ{9hEH78*Wh~k-JUA<)0grV zqcODZma8G%!{g)ixBc8_*Ps8H2kXCa^0jsJjSts*@4Z~#{?} z3XfWD)4A?`hSFB2YmowFM><+t4y&o8}@D!g>z zF-9)1r zAmuYoGE65LLhiT8jHhvccKrA8@ySJh?J{k@Nw9N&CynHM6C;1`aq#dsx-o$e=hg0$ z4@76tExpDV;1_>g+(*BJw-b@=$ASEztF?ae>09eCa=H_^iue3Srs0ZFDfp>JuQdf6 zn8`8S>wxPS6!|n&#%X)dJw?bUTu%^VmrQSz+?(Jk?OXo?HnbrK=9uf?N6zUDco_bK zj&U^hI4Ftm;iaiz{1*_&PcsJQ)T1BptC!+f(AUR}bJC*>m(wF@m4Ez~hP&{(4$$tv zm2qm5KCW(db}S`Z+bCb8ALa13qYh|v1wEr6hsU(A!O&G@zp;~t}59fQy^29g1Q0cM)oRLV9}{bP6f!u6P#A(T zlX2J$zAV5f{Hd2hbsi{1i?&5H6d)s{KDg>F=TFp%;dz8R3~Q9m;gNKVO)#YV2=xd6 z+=<)+)F`vG2`-FIc~~jyv&@q^RoDb2xF9rWgh2vynH;qC)raesF&Y@$ZScwV5RwDg9Xlh(5*x_jh$7UNtQUPgdV+?;tqg9UmPXmY0s79Dm%eG<}+^;zy4VbLdta z!{g9ni*tu^GtDf-iF>k{o&dA8$S%4;)rn)@ywP!>WxClzW}V> zwyiNTe(CSGALYjs-aeP#vcVG^8z0Q0F;BncwMX!Kx-e~;vn-;*LtSXY15~A+;6^h9 z&l$@4Cx7a}`k(H7b^W1dUtJd;og|vSST7S*H;WKh{5AW5AJy9)>4WZQwjm7g>8(JhV%>fpsqec7x&srV90%A zFYYr&3a#_);mrs{BPq6;k$2u>#B(y}J;URST;K5@*yt_#tWH^Y)xkkxbarEaf9mz4 z^&|U-U33R{I%Fch!KXP}Mu~3oMn6G8@m4)bmdO{`Eno}px|d&-hYmRW9XMvCc<>&N z#+Ku5iB`vPxN4w9mkX$5Km{@!d>OmJ4;hRO6{$v6JmzO^ngMGzExg3=RA)|w;=Qt> z-~2AE^P3*89Hbo?5l<^UY#^2TjCyoejhJ>aj*)rY;{Az;>ul-)&*d7OjUO3j_;S#5 zC1_6erK$Y%y%XDJU6M4p?Z*|-2b{qlbQN!41E#d~_R z4iC%`jMs%RW+%IOoTCwN$}rLzv8Og9q8-C9!XD&f%-oma;*eU9GsY(f+<0;h?+S+K zdUZzAlfnssqxc{UtYEIwuz?ftbc)#+VZi8D!c0GY~oE`EXAdB=XNCPYMuH;~F95*u-IqaVKn5jA5>uTm%2WhuMR> z;%L0My)fwR&b=7JdF`qK47C-l?a1#izp|Wz!h?>fZ!D7@3LWwrJ_zP+=y)h4m-G1P zlQ3n*7up$~$R0y7(r2f(6TZoHci2e%QI=(lB|rEQre(Nd)wfDJ~o5&Enl5}mVTSx`8EIO18t8>nDBO$yVw+ zPPX%2WFuNO-G`C27*81p&ihus z&3+-#7ooKbCda$0%_;i;3jdV%=r5qj zvZueXH$9rhd(Y8z6OrGWs~C*Xvku-yrs+pKAY&__=+taJ`xHj`AUqOStGfb!4SozV zUXi&2f42rtdzY8oZHCO%gW%ZeZVnf7393Mj;G^s0^^V+~Z>dpXd!?c_SDL5-k(~A4LKn9ucDI z#9#@G)P#)*%r#pSpiRmIgCG`!aS*By^T&YBGu;eN{sNEwoRvsrN;ph2Qv$gX_-2a1 zczCg#0p3@YY)TmGMB+hq%5o~kc<5t0hsDsQQGvlHXKYfplBp!e>Z z78yK%x6kjv{I4chj@_D0`bmIN>RQ4zM^a@##zm?197iXQW z8Heuvx@4Jx1F=`oS7Gpe@&`3?8BfVbD94F`0++UmrMEX_;eBXu7e_V%o9{pR8(&|4 z!w2!KQ_wjIPC*e~zUOzNj(mf``GT|D%QuZ~kKB{0))&Rs2Gb(E*XfWA3*T(-rC2_5 zKR?HaZ$Dez76$EkFZ6VlR%!pu#vt3;7`56PF~*x4dEUx>{|8TsfkYc!>8 zrQh0+TupO!n9y`oDr4r>$;5a^BDJXO=&RB%-~;;vJDg&ZPURs{VJO} zhF)Fr4u|-&Ptas+^fB67nXZqS{-}m&7KTIBMUt^e+B9zOSOJrRAqB?-8pvSG))_tS zz9QZm-xtE9PxpaC$e3n6VF-y2+>k;xYZO4aj zxCu;%FTKs+K^P>=^X`K@fesjp-7u7j5=bp zctwQWe(ba1fo#UigGTze`w*x{69x|iPM^M z_xgPO4PW~DI{4-XakrzC(!Q}P(^k`06${Ij&wQ&vuR#j^9IfLI0^1R6Ia1y|*Efed z+q)Iw3UmxN0~+41F-+TtDl!l8*Xu-AcToZiazFb$(Nm24tCKh;@z&p@%$#u$Jc#2OnL7xtcc>Xi zVKx4MlVNm>Fkp;Tnm+S4&M-x~9Va=bJA#`uMh5Y{_Q;CwIvMJ?n( z+jozxMn=;|@z3tnkcQtlnc?GmQEqp1mAxLSH*&x2ItI1@Q3L)*n|zk8b1>=?1!e@f z$I)t^PI9hbPIZbXM!pquy6>7|`Cenr(1pjl^=shb{BCs>W&G&jz4e(mSwDP~NLxEO zMi=274zf(xRtLqokFxeCsqO~%VAFMi3JX@lGoeQ7wvF7R6KXg%?TQ4eAMsdPa*e*A zqxSY^6*<|}F1hU^j%imtJl|+OGRfiB&dj^x7+bRnKNPMm*O= z-X7^L9x_gP2<(1quR0*LjQ`^NwBtQFIqQAXHq-+H%jSVAql&(iy@>3k!PI6)QLad^1fCm7IdoZr+BO!O{i%Dg>X%E;oat-UfH>jx%lFc}zG01l)b6z#j9 zt*%Q5jHXmQ8PQBG=hFCj*BaL#SG@w5QFFSMwrYaVffD92XVx<$|3DFf>l2wD|K>jBIO~d17l)zOx*qA13zW~ubLvw z{d!A*H;Ol89(fK@tH3s#s9iMccyr2T@a$k#5#ul@f@@_bkK0Be<|<1}8a3rm!OJ@^ zB)B|^cCeQ-__w-svIs*ji;T8-L-cu+C?}haf6^pjn@6ouCw*W7c$}vfB2+iy6;{Ee zu4i05AI=HzuH=`A{H(xL7jtqr?6_yuJbcIk1nt2m!#9j(;S5l^c~1G_ew=};&du5w zM+9BR1btHX^87XtdvM`TnKMNJ0mYzzl{>Oi=5QhQ*!zJ`YeY2 z`AN>xbr&b>4Tte%z{h`&2{vj{rr0M@-$KW)%yCkPSn%T5BWU(KFYr%%64wQ~up!a}6!ZNk`FoLD(R^;ks2r}{7|QPs&7No?cqXS5c*mUU^bh&5UWp#uZh&LV*1x32 z1}_4Ya8+a+K3(H*cN#75F?OA@3rvD5>HaQsZm{rR9VTFV>+QGFPhhpmKK1CRG(3hi z^=jDT_}{#`%=Kx%8Ox%IK_7i8FyW+wg_%;OPIKU3GbYQDI%t~8^fw*H;WXw{88mKa zql`lv<9q{aL-+P0OA6J)Ta%fwK$|=(&pD71pY(P&AA4+iQxqP36TJu4QZX_>hvPjj zJM;21^V!ntd;5>plcUkS&18ZtgJ>Ip!$v(e6GJnZlTK9x7utoFyEn4auj^2BS|B43 z_>m2P4}B;)R}a3)0sSZnr{lfD!!!A4W8S_C>(U0hX}nN_Zv7?<#bf5+l z)ix==lvED^tgiP+6vQBe6Y}R6r8r2W02xO@PWc4J5Mmo4a6;U#=ZC4%o-{@0F9>cA z?FAVbCnD<8v_hGc0OwzK1Pb!D&(tzj=Are~$Ue{;i@J`1Bd89HoEOomDAw+MTgSnH zVYe$9yDvZpVIf)Pt0?|*^e}NBE5c|I>b-c^c8N3RetdM?>;@s%4q}xNVu(HZ){I5k z>Rc5TO4}C35Kc_SV}5lba}pY0Dx8ouf|40e+A=SuO$J&D>7y8NE2Gg? zmnw4;1)yF8R6T7FuG!7X0=c-&>!3o+ybD$UREe zr!nBmfYLU4_88^5>HOpV=Y*1xpH|tH&Dr@vin;YPlLFCjQ??iSrxDS7)9!d2{fz+K zA|LK~BI8wzex0=_mAgxZKZ+=Fc>ChTy8ZA)4ExI)zxo>=t#5yL9{k_LRz=~2=l60xVtBVQ2b9`vaF5bAmTm*w1AgJi zct&oG4L^+oJiJ9tE1N0P>%~#1y=evxEZVB_gu{H2jTDRl=X|$N4m_T(3I*o_TKeAL z-!+D}9jCuHjB#~O>TTd)d}qts1QNct?B+A)$DgI)z|!_mwuXI-ySln8YTN^h|G=LYyqXQ=L#`|N?@fzJ`EDnav zy&Bb=b&vu=GatUBuNr?Rp2WM+agQEZth?7mXMIjP;qaa2ra z8aJ5G-ojjE$TfJOz@jr{y{WR?4%v$TY;+i8VDy1ArXrc+8iCL1e$$%Tp6O#Yxxs51_S$v~3P+vd$rnykltY@iT_-2c%bRH_<#UsQ z-Fn#~*;hfj>*nVV4j5l^;Nxlaaf~3?!UB=IhAif|zL2(!vQ9^s7Bi5OzE~l~pNE$Y zQa((XYxQOVCnsRu>=OpO3jt6mz4;vG#+()B4i@3M`ZCBS3>`Rui-Ibn#?z7sKtxKi zDW(t>k}ywhbne&VYY`EN$g8&E|g$3F#Z+ zrVoUc#n5(@W)!a->8=Bd)yE92y5^ld*UHQ59?toVamZ9tK(nd$AF0UlKiSlM(c$4#l0uvet1D}ZB{2!-*Cn?qB{>~0&ew=9UV!b%= zn#0gCxHLZnmUPgxlIaVAj8t3Y;JI9p!KB{^|ARV%Co$Rtn!}Py5e#bp9t6&tb&{yl zt8uMuxQyUZE}RJ~-KFP=?>&PJ4&itrxe_2ZBjf=93^-3qW=1dn$-_R!__y${yy`yZ{> ziP&wCdOvvny+8K@>!11Y-Sx-5^#5M>{>m=|$jIvLz4hx~eSiJ6U;EB_c@o)-T#G&& zT^m9vrhl^Av|-0xtON#fnh_MV$ejSenB6Zfz)KD}ydyJcN3NimoNb+;j@{R`9OIyV zaHp)#+qA7LgRj5N{pL5_&?ywqs2IJ_)$5h-)FG$UHO^e~J@gH8Q6jk?h@$Trx%X6F zpE*-r)~UNJF&g>};Kmd%XRP4tcNED%n*#?hzT=~mH=SU$eb)-K{j}jspL^GD)@S3m z@4tJz9z=GmF}Mg{cHwLe2AM${eK?Q*a*#yi<1k4XN4Ewy-JLVnyf>tq+|;P!yO~pG z&Sr7G;J3P=U!zzpMssrQ=At1M8N3v z;nL{zUTqp%6q*{N=(pK2XX;sF;7zB-1jf=dcFV4D7V=5EgNu!7mNeu>lTJehK3O+B z2aipY(_>yv;G*~J zyr<3%`=%^D%pv;6h1_j zcOs5d{nbSx`?HHGrVcbnfq1}~$A|a|IYA0wJQ+;L17yrF>_pz?iNmav0q7wsgKlIU zVmAsg2ZD3_8=Fty$wE@}qhw^{~1^|)mq-Q50?Ki!XKFkqY0HF>c z0Stz7p^;(A4zUM!V3;HKQ{32FK8Lp(zC5k)I5?0dHOx^d`G$rrqueqebTJ4^B334H;?bB8BG>Yr%U@wne_H_aAAU6^ktU;vCSU^lEjpgYVS4}hy7|rb z*3H*GSl1t(tXJRuXubWJgY~ce{Xe<>cRzKr{@A_mtkXCF2cLdteQSN zygn+L>(h45^o@QpCHHRf{WQ8EIEkkiiyEMeUpaNu-Sq3y$8ET0;QEOy+>6XTJUm(t z9`B_J$0BkSSv~&Nv-R{d@2vX=yP<2kV9m;X~WYNPg)#pewJ)9z=8#MgUj+WdWHAMuu&L)jf?9!$-8aLClEsu3)a9j ze623tWwg@RCLjQ3#uFNJv9NKa{OAydm(!cRIg0ty-)#oOsbIjtNB3|F=rm=(4`yj- z^V9T*X+!o)R(3yjsRlIlJ9UVE0`FuTALiO*dyRu_;H1bZ%TjwD{^iL@<(N|qZcajm z5#1YJk1T52R3mu?=LE%(*^Faer)!PcMR4!wve?7m%{XsG*kZJ%<}Yv4A>)43j{1Cc47@j!bvLy)ydm>5gc~CalLX5_ zScD-kS~2e$W73z=Aan*yDEFmEMDrm4B!h(!SD(>zKzfxA1Z|jSYvAh`WRMdv#~_9X z!%%8Y8|J4lj{COJy}YvPxn~#Sp{Y+$9&0m|%IU;+ygUck7`^cv!-Qcz?N{tmw~E)8 zbB_@4vM&==pC+QZiUO66tnx~^K0BMXx(9tuW9EgMd-tkI^Dq(XFeL};GPnz#?-IQM zEhizMjwsnJz8QXsw)3C)oT(Bk=q-ft>?SX7giDkrYPPn5*J{De%d@jG{V2=`L&uls zBG(D8d7-Wa$QUbxxo>WtV$+2-rd%6G@`KS?6EJyE!KKefh4)NKiK6rn9&wuilqpUr~7#PE+ zZ83aP!_$UFfTlZ!B$vVy6YZxAo^)^ff66$6@;N& zPS)cmhwBgh^!KlS?BO@opZMlqTHk%~ej?_`W0c-s`IYz9*S>ug8K)?&O1nPkAwtSH zx-c+&%lM>9&t@Qc_#9jtQX{mS9|D;r%xCAMbaT69<6f?t zjFn?66W8enXReP!wW-tgq9?4u2+udozA@;GEM?fowesl%%v_ikY zv>!+M>HYBgcYkJG8AsYAQ;Z5+!F`QhXhF7XY^n!2K{7ETzvS2>yx_+B@XfeULpzD= z4ihEbzwdW~o#^<>I4v(1GM?$%uJ= z+y*WUc<{Orh7R-!Sm`3Vt&fa_#|0bR!7cjF)~enYQhm}kog-J(L2$dFvG1aP4vY76 zI2d|zgEJhSFtC_TU5*i5)}Sue>!79>xOejVCicvIHb@%(G!C+bgv#viafS{f6W8k` z_`1sYY(XrIpYAIQ9;?u8A2(l-5noPhq+}k@J-EVGh(*Itc&!>YMkfz>QjJq`!931fN#lTl*Uj&v%4`&x1 z3oxvr#_2jfNT&=xqMe>zMDRV*u(J*m{b2=&wdl<%K##ta@xu9&A{DvY$9)(R@Vxn% zS1|7$BO0Z<)71P(7Q@RUX$mUkAzAxPX8?Ubg1^(CIT&+E$2NujOdiPmH8l*C59w&i3W2~W0*R!I!&8010c+MOT2j&}w-rj57Tr}2; zFpQ{Z5+eg^nN^diBMRsAsH^YYMD130S~bsWmR`rG()Z1_FbMf)Dxz;^3Qjr?qkjSc zo(vrg+E<{d1Jn*N9~~9=1KULJug6J>73IivU-L9zZgw^(dRz~6`+8Ye^2y<|DUf!E z^r`JN!-$Rc;T}$6M9o$0>^@jeo_gnd7(O2E$?4U4LCM(LKI8GS39#;_FAlx&J_sxi zkTBPs0h?qtMVkR^F8gmj^D)=pKK~mdPnqNKFjxNetB31*Kb$c*;A0ef&fq?%YuyrB zJE`mU^wHK^4;ou0NAKk**5foxG?ITC8nqOfZQCebhW;~#xekWS*Va|9JUgI7v4tO< zumAvj7G-}ILv{z>;e|IcZHZD|-wUI0o1*j{r0>_yo~?^t{FQa~E5Ekxo}aBB``tgd z{>^{rr`JFG_?zqPzw)1~ciw%P2>+e+?f1{t`_C`?`qvXi{vy$1m2S?+bnyh1oQ~2V ze6q#C$M^Fa-%@@)@wF&t3{&XA&>F9@y%JD;&ku9GZ6}|u-7FcQj3zv0Iu35e;fVY) zCP2u4J=-Qal4U33!1);`C;}6s%0T7Xwx7uLz%rx46X0UJ3LOI2jVih?i0Q(+dolR2 zO03+BSH&DN%@Z2zhR4J6Z;p@Gy|>=U`R-b8eQMo%`gS7uDDoJ}cmRdtQFP?P;Q7Pv z``r5Qv!7nqJNvU~PT&AYGaNa#*~RLZ^hmC*=?%2W*tY?Wa?-Q{g4wi1X^+@?a>Y|l z(c8fzqjnQ{rk`FU%JZ6M4jjYkn$F<`9_YsEZ}g5HwgC?)@L;#Q6+CdD(13gfWcrid z09V_IuH^jNWTzO$m!%(r=XZzMx~(66@mXj7jdcke3Ry|cV@4lb*G|w|kqht%1Wo&P zv!qm>cHA4MsoA8^hi;(f(r|v$xwHArWIZ#!ToXNO*R+W2vmmYOBhm+3hld3!UbJPC zF8$eFc>8ubI{-S_OEe(%%}z%?Cpr&}i&u`==yyD?EMUBz`#SWiGZULdN23MY_0i&t z?d;TIx>xJ)_(|H1^AbGDTfTh$QTgX#Y&XXv{fN*b*o<_LGsgWe4JD!t($6x;i+C~m zK02Pw{RzFsy-da1zeMky-v~iyN-LNte}>Cp&^+-;@!Ash@$&w0kZu)($1Ds4F)<G0qQ3)6{URj{Tcr>EHp>fSw0 z5CdNYobh9iv%vZ)F!u?F;JZ&w1Rs|Sd2j`H@a~OBmvOQfcMcM~+w=zWneXam61g|! z@5Nc!?_x~%WPBL+v#a1SXgQ0}p9cq=2>^P8kawI;*TV>9Kn{L5Q=?EPnxA3KqN30z zznjg-Mf;sczZvn`(`LrE_nTi`KlRsteeK?bMw>{dPDtweRG)I{;L;;^0#7u0*AeBm zp|^WIi`N`zo~r6IVspAT3Nk-ZK8h+lfu90s+mp@=QuBSBO}MfCp?xY6oVNMU?e3m( z;c;z22-%4Nweah{=qzWAea6~u(msWS&Q-dh$K3}}3Z=4-?wg$5}A<`PEf8h zT4)G|&2*-puDY(=!7=?b!4WyIXUA>o&?#gxoYerSj+U{etZT(xe$e6PXXl+7s4V9< zzNBOH(|O!oHW4zE;42iBr0bn>Wl=^Z?1 zPumlm4E|?2h8`vV=|6Shp$5_m*Edvpw0zc-@IbR;Plis)fs~L^l<6Pmz?gE=D=wbi)e98EXGrs7_No?{- zoxf(pAdz=vaTM=Qlm$^d9;{VfV2`OZA!1$|Gdnbei=|OhSVkCCATo5D!|G}{%72R_;{v7N;F*WdBc<#x%0PeU7LvCuNBf7gEUv=f(SWZ-$ck_xhx*D zTD~ve2*0*&YJfFA`W+L&RT-3rmrtIl`7&^VVT9wZ^XELqu2D!^pBxMXp{3J6YVgC< z+SM24Zj_$7X(|k2u8i{(Cfkjt9mDKh^rtbr`oe4;%@BPXqj^{zz)e@B=C?(3gbG7b zx_yEm{DF@w7P^Q5=V2R@d9ycfc%pCC%NJ+e9nXB&U9NE|z^*==bpAGDuqymfoJJ$< zQ)sl)hSo*8apzi4KG$5yCuK!ZKMGxU10R0)nQwl3{f_sZckaCoU;2QT8lRr|7kn{T z1tupasgBnc*X?KnLvW-Nn)+SC+q!`Vg2CrArCeM&wMV~oyZ|R{NVYz`sz2abN|`nL0;4=7Wo&2XGr|)Dkbv}-K^H+y{dyPBegKQibJ3g6NXrUred z7tLq8mb=wK=m}5w)_rn7C($L=>u}fwOb&RPbyCv}TATi4yy%H;=#zTN(Rpau1%I5> z{7F4Csx#L78DrX&)}}++Z`vGQ;%r2>pzFrluR%{;j#1?%4oAnjX^V^qDvd2&w@o(4 zy#^yUS8GSs1u|2IUbBx2eMv6};L1z6?L2wuG4Xli+Y>?EL@aV=VW~xPbh+`6kJ{37 zdP}~8OEM20_-jgiH?)cMF&<8W;PE<6q^XA57&J&*^pWY9ae#(5=uGyK<6-T=@zb}{ z?p~bp(>iYE`R{8`Wh()Xj_oloBD~+Usd;0o`VQlX9>ohaNRc|v%u6kJ8icy(b*XJp zKZZ95jGR--oX0`v&24wF1HjP+hv#TioHj-005;#;$TEcEC5~Y;m`Rt6Be7{CM?FDb zf@5nnhXMCnh4ghzgPewJw4?sTtW+;`$phOq;~iLLPx38aSJZ*hyNZ#@L_)D7gMD?Gfs~~deRP9`PXCnIj9Id^6lMt z=70Rn)3tlex$BK8DSb2=#vmL1AvhKCoLgh1FJs25lTDgKRBPZ9xwqk7uc6ED8gkdc zitb?mbJ-rLyfe=i`gfz7;Kb+a7+Hkl z>+JbyPtgPigK;N=#x>C;KxeFQ1>R=G(w7KJ#7@@HUs{u~@t*E&njX^8v<_T;YaxKn zsgLj_`r1Du%r&^*-W9)0&L4$=5UNgXmdVGf^=)cHlugTfF6Wor8eWM~>4rr?~Q&HuS|fRJY~NyUH0b5gk}&dtfQt zw5^fgjh=JRxpw7aY&zOFWHJC83Ur6h&i$sJZ7@e4HfW0;oHavl>LAc*%19qGkc>@N zxkoQD_yV@V7~C^t6Oe4{S&Q^|9Yya4$euOu(I?91c3@$P!UNX2lW^@2jYPfllJ+|^d~d1O}q330OmJ>t)_*-@D_qu^bt_v z4b7@@NUZP%$}J;n!%2x#+)~;52UcyG12LB#=SNCC^=)QDS@zlXoZ8*i#gvo6jF1L? z1{LAGb9nE-ct?QYh>)1;DgogrW~=)Whq*4!PQrXPB#L23zac=zJ$RGa#Sm3kWukbh zVPMl2qVQm`H1k3U&uj8n&8v#YrM=8CS=?c}Ms)KV-Whip6?1Q3vlgQ-<;iJ9>nyJG zqK3Vu>yG0zHq90BuruE2MLf>Xii2|=9#go_;s(E40q-1F%0FNb15+cGeAWX_+s%{& zr=dd5jYs^7@`%{A!538zqb<(}t3vgSM*F&=-C+5nR>yv~?&Q z-Yo6|#{htLau1*Q4YrJD7NhahDg9CYU?upCylN19rX2!{?{kjQ;ZUa0C?v3y8H0xh zainJ9SQKAy?vbhpQ-3Q{_|i}16THTi?=x?%os28MmfL(rdEMuLe&gHg@>?IQ+tBOX z?>=7toj>rI_0NCmbba>QzZ%}U7zK86wr}tCD_o&d8lcTo!4~+xm_{rS#I^F)WBo##_zux z*6?N{Z@=9`-zi?cg$w0M>Q)BXI3$i{7Ut&W*nA(rN;5y`4{eub?QF)>x{9DRofdg5 zFQ8+}-Ng{S_nG7Mwa>n@PE+?~+I${8`zDSx+&DZwO2^S*_LVRui`3{ieNEoc3zR(0 z9Er(z@gzMn(K#62tgFznnHSluz}@Uzg0`;k)=zX3-i1a_PB3cpZATRE>-;Rv|K(ZR z-^y~QP}8PQx&<5%ERv;H7;$sZQvFR;bBL$Z2FK>YWdP?^b5n-cwHQ6Jfa@6JI7 zxH``{t3&k}{VV(1olevD$T8kx3q;U(Ex`$6oAv}==W!O{fUK#TN;Tv@)>AnM-9vtr0DDxoB zBg_>0qX%!&=+g|^LWJ?!&rfC}0V{B=PVP#-FyI)t5G>bucnnNaFBvQYID6iY2UcV% z7zXn}R1H7`=odrhIcY?A+7FAe~O|ganFgGNXsnU zqKuYFyG^5~b#Omd2ao$r+oY^DY>VLNwYiS);|=bIuEV z(#O3R8slx>G4Amo^;toRpn9W|@$f)jK0j$TfC2CBd2up~`Q`a}KPT%lNb7t*d;||3 z!@*#pZ%<&@7>B3nUGKYLFpqdp>MR`ko}9#K42h+_>ineaUdnP9Iz7Q?W#H0jJL&Gx zD?*I}GSVtN3N@c-pI;?T4&QMK1ejmraO|e7{RqcTy}Vve12<0`Q2?p!xOjP z{PsHk%GcKAw?A0d&t9xQ`Lmx}f8lTcuJyaGzu8r}99GZBzlw4?eQ_4UkI&Oblvj=T zrayZYz-ZBx|0wk$INvk+mYK1JZy5Q)JhD(cMb0Q~-x)Wgvc5vy*VbR);V4T{R7-JY zTNYmo9O<#`>$ZRdpV|ux%yk6_J*Dn*>-dx}6ml&%g0JA!J!Lix49_$Dqh5a zyth~0ZZXW zaAzcy<87wl=*iP{cywIeAKa#V>Y$m;!1}I}AgO_CZpk#i3hpf^uOm|#nCTeyM^*ql zX!80__5lpWK2BiwggH9i$>2oRjL2HH=zDqkvK>5ql7Uc=ozz7uK)aAW3ECjH~4=oFH9I$c$% zT#hlgL0pa>jIV^MAlQTmcHk_i~&I{os9nng(aeIZPYyRjQ%T8(ZT=YV&Z5}SS) zwtO0E>i4yYfdy=sKxE8|lM<2Msgb`9tf`L~%e;j^wl?rG@adoD%BY#TI6Sh-oD-yf zj!=kIwi*V-^ajvXryvVFvzR1wOZ!!tIlEz2wl;vcW3;c?4;)B@J*eje5cZt#RHs(cTP{&-+H&7 zHlk=gX{G0kDK+w0!LuvPYp52RS>I;?ma{cRK2KM!a$gCf~uC+o=Dr205~#c$z7l z!#A%2J7eR9EQ#;cE#aRu(RTg))U>XLP{mnN5M- z^W8Xl&1Yy)dV@Fm;9mHmCx`mt<+}GW#{Hv@)|=-atT)f{8GUg-G=K5-!TR;jy|X^d zU@v0qJWcXE`sl->=*itNj2qV27^&bmefl#$U!xITWbpPPujR#!Ug30$1fM)PjI$g$ zPtBsEI?;NjSu3N1K5CUe`bKY?d>5mMo0F*Mhhj-yU9o48?i__KTALDCd6l3tn z1-*+tvWm)RM>&q;iv)?McCKUy1KeaSXPk}J87O>`11>;FH#g|Z7{^YS#}_d~d)LXOH7!L9-im zk^_c~%-VkLp?P&zbik9h-fg{kLtn~7cOM-d+P(510}X=0y$qFS0TKI2BKL!X<09S4 zW((?GeDpjGOI5@;iP8g<)tG&R>&|s68F?DsSQLXCrs}@qxo-Xmk6~5_n{=Hc|C5bv z>IwbAv{EVvY)Zg%fx#$Ok2Vd|pyjW-TWKYPcS0&;%NR%#Sg>9lsOp9d2RPaH~uQ5 ze8GIO2af`)$HP3UTE|Gc4e&F%z%An->tT#_X7U?AJZo+DPQRy*mK!HUS{pNM7t!&i z>&faN-;L=if@temeR-~w;P34wqPV;XA4tg={)j*3>>`)l2hB3*ubG6@wNUOnyhAV! zW0dxHAFZP(DjQ=M6S#Tn7|-;@St3F>F%{qqSPTOjMCqit;!0ZfQ3S~wP>f@g+Q(-8wppW$0PmYWzh`05@xJufdZo$G zpO!S2&QDFU^zO+;R-YC>Ca97l8??LJ-$LRN-Pwm2cV7yDTcKN|~*5%i~ zu`b{HAd&j{`Wt`u=hy%K@A!fBUw`%0b@#zHW@zc-^2NzIeSTI&`sS4}=`(yF8xY!4 zdX!Hidx|#L#hdGKga6+XC0+vlP6t0-zaFyajQ zsB19LG(2E?))_~1RbZgcf~3k~ckqmydYYzA_U>loIM{Q{6F3RBq7!7yIETCY`xyru z(0!?IOvRz$qDgS6zucz}U7z=MMBdFk!&&qUxxS1(v9PXdNjSbm)T|=6KI;{o6n#MX z(3KzK)~Td^@GbiiyrNa5Gjy-hUR-k$I3gai)~-Ni+M#DSWY+!IRL3*&1^Krry=yV`~`}@XA0S8BEtA;M+ zCYm3IML(Pz_|AVF7iKn|I!2Mv?vR!8@I(PvL`S$7ZVZemW^OEk9Dt!B3tu`gPKQ;>kHdSG@S*D{gOa&8Na-rz0GW%e(sIpC1g{U{r2NdY ztMw$fVC)>DwuVQlkb5!sOdIfBgWd6_w|x3t1EpB*R|#Z15Ye5^@P5u#0ob-4wWO`K zxjnU&f_SJ3B=x`%*qXH<6T#mSwdeB%|kJl{RPXg3}FW1PWi| zZ}e0f9K0(7kHK|>x%6w+BshrTi|3r^-RnaW6upUIzYD(591lEreSe)sF8|7>_t*0T zR<}19b9f2-8P)oL(LXxq-Uj+76IM^lU`o69k+jg713FWhp=H_~T@?cnx@b#qRGtmL zlH;vx6}7<+XY6&Hi%5(M(Fr168PUjRBg5c3?bBsG1ry5B0~3+c?a@uSW=cR*Ye6wx zO77ELaX#t!>C^VsJEp9-M4NFgfE6j4CTfXNs;{F!zn!_~?4D3eu#_ zjiJ+Y^hu%)w;ltg*D>?)pqIX#3oJrH;4_N??i^zR~s9`#- zair-?w1z_gnk~#ZY>sU(HEY!#MhC~qcpQZ<#cJac!n+%$>o&9j@cPwdA9<3(wE|l@ zMC!n65OFzNm+{j*qeqE!@9GFV4~voQqYD5{$CRLXfv&2};2ahoNGD9{CI3Pk1BXCP0DKKGFuYff_ z%@7_xIau4r(afiagql_&Kp_&Lz+?zluQ+F04_tLtf^&`)@6z`aF5IqJi7J{mk4hQ) z&XeXiSdd|@1!cGM_W-|eS*{Xj1QOxM$S#yR3zB*N*P=4E-XJ_|e z7pd^nDS>Wr97DpnaG>d%JC9_B2ZH~gEqt>vbWs-Gt-jsIU_ZFLpYb+LUj-WgT+7`t zm@1FbRvTXU{o4JmL`gsQjStu3^WZEvY40T}GHA>1)K!n<=3d9x#?W1vD99)~d=!?L z9*Tz0yAAlEoSW*nH-;!ziX+W*?M*H;;uZOB96fd0C+Ch*jBMSPE@a%JL>TCYhmoQ1 z3hmNuiSc20WLv-c(1$ldj9g29cp&WsJq0U7q+2_mUpaCyAFpE2y&L`w-jCehy}Al< z5|N#ptn06TYrXu=2kX<{^O^Ne{E?qnfBN7%>j%H{%k%hsWcTL$rn{kCo|%8mh-D*; zEwa!vf-q?098WZc?`sgydC-`98Gi~s$T$LVafKfTXYiCZJKo?c4u&)aN2UYNfih^( z&(Jbu8}&ri>m1}-E)~ac;7aKnT-LCVmB3T^!~;DyeBi!HrFmdRBTS$bmwR1#Mn3w` zch~hBn!JJEoEFm+Zk}+6a&>;PzVSn!USI#hXXhyayi%u8_515Z@cZm=f4vFqMc!S< zQ^2*IGO1JNDRmgZ;)fBJIgS44I;i0CQRGXOwJ&`-u!G)=8Jb?1g5}9j`6~rcB+gX4Qj3EZ#FOO zRcnJQIBp~!=xK3iobhp9GG=Xs4{Rtk5%3u5>Ud*GMZw@0&xk-^oGdOQJ!D~XMD~;p z8{>&FaN1$iR1`a?E!T{X;bkN65T~KeLv$J&DU028D)1K^jgH4VA$Ob)HV5C@V}{|C z#((`L!78I1onoA_!8VF>*x}CNR%?7j^X^~7$a5e}Q8#lQS?()gkDjc9)M1CcocfOxRpBCC1W|Hho^#dU=EB0_8epH~#c6JkT&M9lyjfx|pJ zxk0veyq9(iZs!X^2!i7=1MRy7wO+h93uBm%8il1_?euY( z;D*4p4}t*u$4Vkf*qA?&^X!Fn1arD%WwYR>-GF@bM;q90!&9-zj0 zjB?R8$cHxC>1WOw6W?>>Tffh}p?Mvt;1+K=o1zDA+MESv&J<6ZVXU@M&~-pO-~85k{DuyUO`qUI{MgtKlCFp%G1-ulj4F@)jRzDFYCVHEM)Mf!Voe6Zfc;YLTmHc*N| zqI<27rr)-_$Iy{IL4!V2F~@ARbtBuzakmRjJJk>Q6yZ5Ep`E@=CHlj(C__Ydh{C~R z%-S~&`cpc)X?V0ns~ij=S~9TZ3-nLlKB19uvS17y%7YoREs^OIj&!qkrBjh!T&p-k zoUu;eHTI^utXYCLO>w7%1~W)`8yK6l2rW1@g4LtrqxMa&WccaJJa%vpI_mc-hSJzN zR`w|ln-^i38cyl_gTD!~(9*j~JbhFLAT-I%27kF3XW=5v+6{Kz%kFmuo&Ijt+1bn3 z)Hr_mJ&!aFKW!ZV`0jL9_(Cj;gwn|s$7LBCAoXHLS3bjp&T zby1R(45Bc;0ouH{<2v5Wy$29B?Kv@_GFOQ>Gu9j^!b6Gy*gRW%n?6TqjGWR)870$a zp9F~a$zd^1$2002Hh9B8?hJaBuo~3=*we#<3$dvs{fg|4x0xw3Xgb^l1p9V{BCDUHte5YM!qFC>Xe-p>dKK(WNfuV9p`J_(A;akRP zJku1ugzNBSc=_h^WmEI76HQp(P~HtK=7|7NeH8f1kJjA>AFP`XpT)>utsnpVr`A9D z*?a5nz5Dt)ygW_haIwywJ)fPoEVv5{kP24=uiyC|_$a?o;K3Kz(x*@G-=nDVyO#r> zu@~l?odOBm9vMm_HSA%)TywTU&WspZ37!Kjw=}_T{Sg}H#(ZyK&G)nTc71zp_{5y) zyqG3uWT(%$=YH!c=R-TbN8xt=d3DLIJLJUp=s^4(oSdGWtS|kZ?_Xc}p)ahmd z0a0~DP8^@cL3b0Gd=(j|d#anlFG{?+i(aaW$>kRq-{_A~jGtNG>>wVw7*Iw&c&-^@V*z{%%Gu{aE z%KFye;-{Q;>TD*KQ%BbaCOcDBW&)FFxZA0wpN0_V1w27N&XocJr8yknKvre_^W zXQkkm!@B|-{D!aSV@|bq+RL!oNpEhzrHvE|j9@8$<&zV7d2v>w4!-!y7Nn4rmK zD+s#u#6-U(R*Z(sfgS5G3xk4qI>@ML;|>yo1m){lgDbr1B?CBkdO(n1n?~)0LDPou zi@qK(3PZiq8)Oitfrm+bWgLsJ{B;Rbo@JQS1n4tr>}6EGWW2~KED`o> zH~mj0w%Xb2|+Oc|c&&VGz?Xmgb)`_=2yz(1pM9&TqU=X)7DbvN~# z?;-*R2ZvLm&E4RhvjLAyZ>J`c^%yuC?Ah(zdjy6{99iQa|hns_gB;ayL6qj?X~5h!W`({ zNXNaL`U|$GlKhx-Ed(j&YuCGiaS>Iz_z+s2y?T{s%Op?HHsd<)X}?G4$tajqw_*$} zeYeyNjb;R+Q|T3c2oKsOwnjcU_xnCZ_s#iL+wL`BRfYm@;PBT;Q#l!2t%}?`(W}vW zlr?&)%kwG-LYrx5f6~^)$+-UZx5oARAC1$GkH_cVJsf}GfAojO|6=duc=Gb25t5FJ z)AP=(zcR-{Bp+JO)MTRj)Ki@h)S<2Z_bp|)PbkoV*agZO`gv`V0?k+PmPj#jS!X8C zDK~ev12Ys&{;LDwd3Y$U=Vxh`3d?J~HgC@_K67)?Xq{x=eTD`dS7%Uav*AKBF(Qp+Z_MN+gsz+?%}u!tf#^CZQzpa^JKbNr{+HL z)m7*$Ne)Jd+!}tkxr*VDIZ3^;q1M0)Y?^IK=PfZdzokw^klS(j&;iczm4Kr|t$C)e zAx?WHS2I{5ZjJ(7?>apJe`Eq|hT7;j@}b_D(IwBab}>Ye5i4){>Q)W~JS_0V=%51_ z1X#wI!H+MdZi3HwRxi_4fkz#(veu_7>z=gr)s?N)C*A;;o&t5PTUR@xYncrG{B_c895O<&g^#R!3qH%DPz#1oZr zFe9@&6@_^^3%*8Z8rf+$Q$Ce%$_!X~5#%#ecI~13?GLX^-#`@LAd{zQK0pmHMUaEX z$q_)$?!#N54f7Tr(e6w$^>uxeMSeFOAB;_A1&`=Ggj7$i5r~cf_Io;)e;8>-uOi^o z8<;Q{Z>;c#$X8dVB}@e(1Vuoe+%@7Z`r%%girk}`x>M&y>em-usI+>CeS6gjXf}!Q z%-VWj4Ly7(q&!}DA@7WNpA7@o3BfcPS@Z`(1?|NLk6M=DAzpty^*RA_VjaACo`XOz zb^rL}EU@Hy=oOX>6H~-faTcffqfF<-lR3$S=gxTZEcf=Nw~sF9KpEIB@gZKT-N?{f zHpduP9hcH4bq7Dh6P>2A<`Uw>hp`<*tOgE{cT6^Adne^MT43t$TxZ<9!HgmP!He7R z!>=fqC@6}rC8v_AVTPl8pB3jz>xReFmyc;IJjDRvpLxISr@u`l&VMPK|LvC^MIj}< zG{S;T_v>J!JiILHm&$zzo*%*wL0%^Ts*_{f7a&m_adPh?gKv&QmzPK5A%^rma`C{h zpZR_z1vc(ej^VSFY~e1xw8u8S3ID_ikj-LnZ!Zf}*Y8Wm2NtQ2Cmb^{1%~1)e1wlf zq_HXscJyl8eCNY){OynX?7#c7?~MP|J7?qFt4|V9M&Sk)+aH^x)HfyuMGl^rKd>rduP>$9OML#bmF7^IP&=zy%j-eRO_U(Jf#nHuGOD@ zjp#JaT{@i$ffc``&DHD6@$$*d_?4gg{o}je_kC%1V|?QMALJVd3%;0R@%TSRPW#mX z4DB=~n3fS+^0er&srkanDGfY%#@XFKtH;)=3qFjXTezfgAXs#MX8s*@6KLgjz*jG@Bnvg(;>%Ls{J zt2%jMU+6|zRKu+0gTM?|Ca{Xg>8tC@1Wqx&t#38I$ylDHPt^}!qVLP2`bIhz~Xl_UHRP=c!`iQ;#oa~{X>tMk

|n|{!WdC2_GgKR2ZcKLnw%qQ)Ku6cXK^oO((U?NYzVEWdJZM-I$L` zAqJB}WFz|g^SoEvUD!k?#NiE<&|%h=4$(~Hn2MeQ9D@^p8FB}&r2?K~*2}m{@tZs` z)9yX{IHatlu8BDNF=Hgi#&DPiGb3XD<(y;9a~yb{`cCk@+-0Wq!)Ot(roPhe&K$j+ zsT$2FkHO)zUa!A!wRE&Snawh{tVKFp-YD~Yz3?;p z#c_#ZCO}eS!9ARqKXDVqUu9p1#EdH5SF~0KTzXEb(#+T&Pel3s_wnM}4~jOp3Tz(s3?WQMb8Xf) zc_!DVK3U*%!cl&6(K%afo4w~6V^oz)5c55KWelY3E?>S@d6b4{HI?=$J|6bL?pQHn z9@2srFm$d6r5f3LBYSpkqE;N0DCpahlksqN)<`gtn|3%8(?E6tYhaf_kY?v3bPszO zA&MZvFYCn!8N+`FeP)zj@Or{+P|?r+PXEYA4P+dH$J66D8Xu4I?|d-!pY4x-|KI$> z<3IS>{qgPxUmuroL=1Ns(lfMXoxIA6tJEW^lPM%GoEO`)&N&Gw1cfPQh)`;`7d#A} zDCrg1hSo#I@-4V;rX+omeaOY|<&=@O7~pF#`_pyb{X(DR_uiW4>~zX$8&Bkn95p46 zE_fv0yL2`>D;8>TFK6LTjXd()*FJb2;C;d49FgnQ`|ViSb06WV*X9J(K>3(%# z+A3K?S9QC_32;~j%(htNWG7F+;|&|C%_ZJ}sr49I;7p{iYgQi^f==|G-wmqbFb4a* zzFd9aoLi#L;0*kiRuUbbroQQv=qh~4+8Pdr3&EQi`lpfgW+T!cEW#$TaVABhp9o(N z)jbZK@FV3lq^cytlm4BqF~^n7@KX}t`P!hk9=;;CGmLyRbL zs~x9II>Rp=M%1RQiqxsNJ>O+tXk$5(k zSbLPK4gRHup{t%90SL2dqmxnc*CiPt4(57zgmj)w;4PC%X@Li`FQUlwy~-qi`cls< z_ZVV=#^6XHzX>u$v9?~F`=aNWz$!&l&lcl`{1|Y1Q^Y)-2w4z>_mTjfw*K2{l85Gv zK@~RChaDWAf7A1X9}`5Ro_LVcxOF@x@O7qUylfLY?s;#epGhES=pF4g<$>Q>f6!m! zaFi%fq_vhdmf4S`Ljk|=(6ODst~@2)arLyny}cF((Zoz3%&6k^tK)HYauLSl2*lY) z-8}v}8L3~_l%44^nU&xxc!TqRG`2#I=ZA52LbuH@zPHM`j;%b4TYNeQmwVRnch>Ob z#d!MP{KK*G`aCy-OE+^a-xc0xQxPGX%74}eA^PLYf2E06RnCaky_D?3`COds3Es^! zQ?UIEJ*3{?Dm1d`NpGYrZ){6vy9DY<9L_Fdqp*UT!WldnPk#)J9uuuF_CaW9lKuz( zwrjno4AMtq>5VV~gH(2%@r5kjCpzpp@jTzjXi*R%_&Fyf^#t$a%n%qEW+9+J{tex7q-X$>rZ`V{Q1L+arV(i&DhMeE+;O$ z8F}UqH(i_02u^Sl{%!Dp#}+uuDP>IOOk&MX#yu4TM+^M7=;TkC5=ZH!5BZ4LLHJ1>Xtp@pPPj+=?KMGZPXkGJTRCivVz{|99ZWN zNMH*RF$H&dH7vbgAaTGn$S97CT@XTa`Xz5r#1eUHxuRIAFVhB@) z-T~FJU*yRTx{y&56m)HP;Pm3Bz5@Ev#xo4Wlt0lqeTu)AdYvw^cZqhEW5}>aX6Z!q zlaVW?(j@$Mrg`~q!V;?FZmw~F0ykNv_XQ6OIv9;L)5Y4uUvp;2p! zK%D(&S3B!IeMMWVz1X|Xh>x6mcf5(v-m1yY&=+7ZtZ;a^-@eY_VLIC$Foqf9BnGBc zJD0if=5Db`LDNAL^7PK6wXF}oW8A$cBu1?UH;7%nF=!ff9!(e^_edJ$vy8kQh~YJM z2*%4}JQxjLJtc#{HB^CR3Lsr5BM@(n3}d=<1aao9Qv7ep*;XSr$n3aZ&Xf)sbEF=| z8M7mf=SA3vhv7;PMm}vYURC&c#&R`vF;Vm`pSkIfEN-ILwN@k%rLlp4B%q zAAvW_uCLlJ@_E!Ltv{I3VObdm1F`!@@G(Y|04ASsVR&gO9v?P0y2rbPpDM!Cw|rY%>-E6wVke$l1vU(|p8gyhJc;)h zXD!diqlz~ikNDCo+mz35DO`dMzs*YIIqf*mvhxu_pDExJP-q%q>)3T1iLJo4ojTUs zhxj}X5ROR4zk(CbY^Lt(^!*1OpNxNg|1-rE)jkb&p2-OhDVqN$#pa>Yvq*Wb5&7co z@ZyuKO($ zIcXM(ac8*kTE17w=X&Q9=tOwADdt7Lia+(gJ{gx^|K_;(&d24QpZjNCjQ{=be=`2; z?fc{Y=)-Y(bTUp-<~lrJiKLT!OMTvpXhP4<8E~X+5$;BC5I-__MWcy!-=5sZ5r{*+ zq(@-}dlYt;cZIJ!09^ldGN^y~P8O%5jBfLJeQALnYW1D2S}>&cr(($`es(SYX~U;` zkqM4jJd(n1t^32#>nOO3@fbX>E@;%b4HsAAD9ZNl{QkGcSN6A4&-(cIDv=yLN7i$; z*+nM;X_{zb<2JD35pbHjG+($hVWAVsrTOfUzRsb`G@W|poR2wH{rdIm>M~w!qwoAJ z^{qKUi^uDjRo?{uVrhE8*m@eN@I`mGOTE|8{b{K(?V!h%7swdCTrLr_%HsNp(>IsM z%?!Zzb~WwMuBWd#1#>f?oA>nJGxqV&=Q^#qmZH^H4VE}4!DHtz(T(W`eJP^X*QW9F zJIz#22<&Q8@~2{3Gv(ijPn2w?E)vlZXYj^J; z&u@h9jjLlB0o{{v`24Ln3&9_~&9($T65ZX>W5&&4}xY?7iP2OI-h;lc>s2ZA|?}0!|S;j+0A+!O96#M_vg!J z`fD2QwB0!X>9DbkW&olH`YPHeLi;pJ%OLJ-V|s$3y>1K=BjxTBHh;@PQ)0;7N# z#HB|7SwN=0&C9LO=YBnJ`^)E#5R2kPZu-Yj;jK103QQcXC+p@&tftP#@hV2-D$Z1p z-?;{ugYa_Z;Ia-#PM?gf7c`ip0bVo!fmh+0JHco(3j4d82nbJ0Bo01kj%3Z)p@LMh;l}6h6kw zH4d~rjJlU^`ZS#Z?TW|~$pubh@|WNE*7*1<-x!~NaWMWL|HB_0|NifbL;UgkNy{0kqW~La;6UWo^DCe1##;1vY8u&bo(lhEiQQ z(}6tVetxGPt*U)l(8zc4qeHIdySBOtgyGi6;;6y*yJwzRT%U)s1+e#VvR4~F#IcEq z>Sa33x2yBh@he|A7{Bt*{=PA`_Qqv+;zSS?TFuN5Si}2iAm4Iw-g2bLc>;s)nKf*1eTZRFsU4@bt(eWOtH_vPGC#d!jt;26X!w{ZbK?Ir^pN+bceba_WWp@t~1yhiMaO; zpN+TPdMjkU8<*jc%Q)@omnB*50a94=?BwgKxk3KFp;LBLj1X249f}X5xj^{6) zrT*Y6_a^N)`vSP=-Lzx18liuf5*uPja0@Q57;P^NwXjjq(ZJ98|PJ# zXu8o^`YU3ranJKr963bZEt*2Ka|*xB#kE8h`OLoy%IMfy|28Dps}$0Uxu-x>XdmzlGXD+3yQ@IH+JVI*yM zG-M4-)6sB=0VZ7V#AByK)xitIh#Ull{p_4D!Nh2qOW#ze55~<2L;G805T?y{zZtIz zW}dX3UAmotH&gfdp=EJfn5gYrbzz++*a_}+@BnTexW3K#2{Usuqm0K2<<$iRHKUiq z1Fvb^E#IhTKXl(q8Ij(aGjPCQqs;tfhFr11&}#VXC*M37JJ#K&qYOo7lH)gqCG{~Z zRjmD(I1OlYVZKgYQ?`=hf>9{{R9=9KSFkXM~ zap-e1{+B;;IR3qZ*W(ZEU5ukw$K&s~IG6h6u{tb)X+9U3id^HdC9Rg#wx9W@UJZ~T-^q$gj_=V|QH5Qc zb){fWFLIjKJwKl;jv{w<15EF7J!QiCFTeQi_SObkn3yXm6S3btV2r}wblwg z{phYLep{-+8KvHMQfR9X}LV0^4y4Ur=@PnYv<%>(})Dz)Gw=E|D_1iqQ(+*w1p`cG?Fn6Ql%&zZzh{DMR#E}S%Mbr9?7p6@x z+0@cl4o`y7;Aw7}V}^g=j8YObS`CGt%ozfYwfbmge}HL|%b}%*HqCjf9FXN;#>Bq8 z{k?H$#(nT(J6vB(xbue7{q(aol?QTr^Ds_Njl*6CnCThTXhf-qq99w!7PIIK;v){_ z4oMmzq~q2-bG ziI)qH>U4QB+dMAA&suh3h5sBnY+}utd4sw1bc;AVD*t1-kzDh;~`Glbq51!Y@v-7hU__zSEP-d@- zj!F+j+hznLcrd|hM#R%Npglz%z}r{P0k9d9{?7k_p~AiRJ)Hw^dsM&rt#q2zO}?;J z{L@V>9BK0|GGUq9ec-*7V&|A$*yDYPivY|x2gbQnbixi^qXP_g$D#Elc)GnhPZ^n$ z&=_Cp8~DK!T#(g#EMUk9C;IME{KQuEs#mE^`FZSLsY;J*E&8 z`K~me6%Ic~;5v_^9RXu->eKUZ*UUlsgqK$0Y<+NXGXB;NJs%%`{}0X5Wu7h90qr8IIH+hN5-NSZp( zj$b2_5bD8mGlDaynQiCu){J)VrSSGj!=j^3j1ygKLzHg9Yc=ZEMTK+~aVd2`V z1aUk2hhuAZxBM{Yn~}Y{I0^Lp44AR=Q5i1o!rj#jQT;It!D+|dL)YYu_gYJ_E)*N)QJ>5Oek%+N-xOAcjW8E>?WZxaAf?ARfV~DdG@@+F z^VJ~vp0b`X`DxNMmqQDTJE6|$BmdEB?U&PE%Fdy*d~dz_m}`O2UiO9p-o#0{{O&i$ z>#u%weDs}fk1t*wjsN7&esTP--~M>qef1xXjRQ8eQ0x1Jca+BW0B@ zUg2~B2N?B`?h&-O0Z;U$=-xy-tDU^dqFJo`tMiqz>ZNb!j%GYsY2eCtW7M352143M z*~=KlW`4*}_+e%YrS&Jzc~*IPk8UZxAtX_8$Ce|%qVi?RemWY`+GHOtqqpOPh*pi! z)2${d8#A6W`Qb8s3=SL<$Iw3Mq#pfOw-o%cD91to#~#q-jBSk^qnolw3m@wD(w$}5 zmW#Z=rwwPkd`5Q;4-7M!3uNDlk)T$_OjVj} z_M6=2_0A)LWBoIZ3p}1*j*qPc!OemMerftS5hHk+V6;8VgOD|-fnPl~4uTiX0i{6* zU0>eUj#RSD1L86AjJvr9-HJ5$>9&)(&hVM+X4g6bVq8wD2@cTpD$LQg!lWFvOOs;= zacI-tXCDK1~$`JXfpB(4%x`%U^pmlzF)-m7?U#5*21*cz{Pd^1=GN7qGC)x@C zZi+MQbXjchEO_H=`BW~5iQEcq;L9Au=&sb03iB_41iS>^?n0M)GQ)IG;ly(RanfPZ zhi>j9xb(M9OWK$YrnaQ41)pZn-=q$k)Wj)?o-hR5ecl5%heKq~AhIQf$z&bqz1kym zg`b`Y&f)}YC&&>PX-}{lPt`TX=Kdi*SPvU}@Y zJ~uN8pOfHo{ zTE_-2Y!RtDbKQ3xFEfN-m`GW5ik`u~j5sS%xq)EzYIli;CT%r^1A6p(J}+{GbbV0< z+|u52)}<7ca+G3>Qy%CTS5C^e>o~QWHruDPWdfr5omCv^w2!{_HtmS`8FtL*8AiEn z2cMpqFIR|jl6t-CUD_@n)RW~XHKlWLQKo^9?HE!pFmUh-XC|3+Ms>VgII&61d==s9 zcoE~icMyh`C4l$8Ts9vIwOTKV|;c)WFfGq%Dz#-KaLp}2`cPYr|y!{2}Tx5xTv$_Mw8HyNcUhO}J( z3h&}kzsr=9kGhPELn;cWGF}+lT{`UMijR%HcEZN;Fx|?Dwm$ ze|=nh_}y{!>h<{KD_cR{rt4}*gE^L^;drc3j8whHSuG2KTeqX`0*3}=rNB%9y;OaNv~kM^!ooVJC9t;byq8Y z!Mha?dDw+nTIbU?!!(P-hah2PT*is{==Z%h{^_6lW8fGH+P6Kmec_AO zJVA%4Nt=ZsaLwgu4B9m4t{3J-zZvi1Bf1Q}ry;~o!5Klx!GY+uuqnSAyHSIiXV@0$ zYs=!i=GNQ=EI_9=!yGxKb6Q96>$?7U+o$KP0;}^tS0>mo#!siuy2*jJs2V1l^0bt- zzEmrHyuF>}MfhOG2Xj7~q1r_-<469@?aBp=R;WFUciw$_Y^BfIt~|zO?d|Q4w_d!R z;wufPaHoJ6KJgj-4#zCbBBkv#VJ-^43`qeI{QIdq^N^^0xp4vpfaEHM5W{$Mqk|w@ z#EVHFj~z1v3%}ALi%|cDd)eI!zYjF_pME^?^Zyiz&v&fEl;Yf$-#eQ;4ZR`7$XSr0zNk zxrQkb*>@A%%r}^OvzxdU<`dZmAN3x=9)`J(w+2RDJ^n=eS^rFN>mN_q!D*;>oWJ5B z@JKg-18y8P4pN5T^(}xA?4UD|d%U~-M3O0MP6dZR8k^FfsQW6{C?c>lot1mWtkip! zdshr)O?!6fQ?c5JWX{QSI@=vDZX|CU%oJ~cUYmM+1ica5p z-6!6FFtqT+@6ef{WZXMhjbU`+i@?$LLJQHRsI|rytKgrs!+tolyJFr)Xs5h&(~V%# z&#U93arVJ?Q}5N-jDXfi28KFHft?(|umcF8C+v80 zSyw8dM{-cAv(L*;j$_9t;NwKLcTs+C zj$V(mS4X+FGXBjUerNp8cfLJ7bN_K=iQ&+PWNcOM{^B`|Zo4@#Vvf@gM#(Z;f9&*d6C_QjS9BOY%TIzzAD#nGhv-Q4taqNnQ~59W zrI*Qyc_=2xMa(*%z(McVfP+5Y)jJ)ydVVEOwbJpJmESIsi&m%y*@ z4?#Ehj?5bd0D~Z9XJ<}$6ObqeKDH1)oG*5RmFcAKIX_FCJ7Ev40*NJg=%cM@iw0UU z4u?+<#_qvk?Y8Evj3V!{XIE$E(F=x3I=ns&55lTcYJ(@j^JClr@0^>p#EYcRf&-sR zMEE2s7)#_RqC=4FoxLdQxyD=F(gu*#cSq-w(yw98XQSFMhV)8}ZR)G>NS&AG)?Lo% z*90m6K$1E!6=TAH^H|DUffwU1iB-=g6mImq@N$;SC!I8%nGi9b47!ORX3j77mtP`& z_LmGQKRrF^p{_me{UAE+5K7+n?tU1NXDDlsK5Em^=Tk3_)g`#Bw}qL~z|0sJ-iVU# zeGD@l3ovpHOf1n?_dNp-#`M96280{Vb|R$Dp6&;Zcz}i+f?M?zo|MJdS67LwD9GT0 z5HQ4@ah=~+!39~xAJ3m!vNPusxKB{vdbu?2GWB1>Lx94_P2n!^GTTi*;39PqCPr^3 z-mmLcaMDT(32y*HY0q17P(y`V1qPQcUm85J)KJ)RCvit;%D=oVzsFyb5SF^++4xdPzo zBtm>1rzPKQI1rBJ=)Ie;tGZ#5wRgUzJ^c<^g@g=XhmqBITS_s%aGt<2T=XF}@u>ei`{Yy}qo{FE0juaxXwB{Q8q~ z`Dt{MZ;_TdbP+R|^5Lc6g0p{hZ9E!`bl#jCoT_p}7X}Z@ZKlZ3B2g z8GFMa@Vtzkfx~&cx=y4YzF$kQwbMBxQ@`!n=`@b&SfQ6ID%Hzifj5|@qo5mH<;h&1 zjzsVjK(%M=n6V2P`x}gPNav%~<4YR32e5$FYXvf;IKb*TJwC16dHI(&s)>e`f-8ni z{hR~36x!;n;t{kCe2g~)wmmDOLMJ9o->P?t+-W=dD%Z4mc6wI*sISd%<~ac#SvN;) zKTbQDxRwP>`y4Yo=oRbvSsgz2ML`p_2T$~DTH4<~sBVQH^z$Bl_C@B|yIC&cG=o*0 zoI8DOW+f+U54C6oAT^eRcq8u2eZjz7=>NG*0%N?c@(g-V_iKRg4 zUOQhxrSxPDUnz6SVlqUd-VhoecT!!-FK1cSp$I}}S%?lEqPTCaqtx{!eKpiDjW;~$ z$21n20=MtHu(Ct&K=_)Tk6^%C892vMm?j<~aE#<4B?Y#rpyz!A4!IV_PF7<^_L$1# zCc6QO>@DG_azLLLcLdh2M&w~gjs~N@wY}Fq!hMB4_=K<0JBD#d3-}}m6p^8tDVR|l z(L8S@DtVgIcpM=K7SeVY5xoL#R~0~G>Lvu}Qb!?uQ&zSDd@?dpRi5oK!r&0WEc>9K zs#xND;33QI;Koeh-6!kg+3nrfOFuT#k0lYFNX`yPZ8eD(GB#{)qKlULcN<$zjR zg#`+CZuC0GgR*Ff7f(|PRob!IR4hR$G`Qtt?@5B^&*~+L%TSX_#<`KF-n7rZjC2zuEyr|QRT_+nSO-` z{mF+Dfc}A*ycGZGTjqwt=wBfh!IeR~ysSXSy8Pn%TJw8d9y{+-+^}UH) zYkh;GLIV-n3SX>57Oy{iIllhx-uO@d_#YU*^mKQ8?d)=Vyho? zpmVHJFWQM9$+`)=vb=O7o#)*eTRWRg(VNR7$hZsc6qd1+{wMF1ha7bd*pmQUvZM|~ z=S;zoKA*&CJdX}!6PC028#tt|s4Doouw*yzb;wd3;ZJWp#OHL&rRTw$JK>J8*J#+A zLh&T~5xW;0kzqDrH9W`&%aRIc3~9P&P7)j;q54DU;3m2*ePXcT=ki>zHJu-h@wACj z`ZuMeO@OXqq)py#yD6jdRsxw*W&$!;;LiI_IB94jkgl^GTr&1n9+;zG>7H$J7XdT| zPlqBD^{y}+#O4oEoTFC$lstZzYD+9DhdOzOG`C~YpC(_v8}7&?(6-wA>Gy;y-T&RO;9183SRvrKQC z0`DY0%DT!etdaJ?fXMm><{HGd8sixT#UMI8jkWPD(P9`p*Sah&48@B!E~B4w;v8H} zfl+tc&lNCeuaQrF%M@tWbJqE9Y))aU!HeQD4vG=LBw|+|W}a)-rD|qM`|RB8<0y%A z-q=!#6;AYpqfTUu!SuX)4xXVt!5?Icj=%6H5%MUy6v^%MCyeD~HePwCZ~A40 z0fuATVa&5qg>y{pp~8?$byNai(|O(Q5yJ()*1vD>Zibzvv*|g*N1PmZc{}|&417}b zoy{EhYUFcn=lf6m#7{>*1@Faac%kqE5AGT2&@{LO3z%Vj;$UH1Jm)tfA0=QmJ7=`1 zXM!8t+Q!*tFI|t$m@MndN$GbI+0A9!Qr{Xmh(OQiGw>9a;1OVp?|HbexYTHzY~WRR z$wj>3(3M^TYp?k9BlVz3s%jniPqn_!GH4b!Q!cEQBX!S$y%`D0rSNlp6f^k_e@@$U z;$E^B-O@Vxy)FBjjRf2If70IJs*G7~1Xotn+>W<5?#KV++5LF$_GOgbNoSmsV6f=} zxbMR+>$g{9`|5a6564?8a5x>{g8qBj34zi``p4jKfKo61?6sVZ(m8zBu~CjdBF6%a^k9M0 z@KruIs3y+YpTaU(U2b=(S2+R5+-rx+}mkhpLDErA^lWIvua{JI|28>cUyD|D$~a zH+>F}$}r|rUZv#cOmW{kk1O@+llwBbogke$IU!+^MlgN-us0kd_(7=`AUrX)2Gj(OSjUtITY}N zAA;MBb-NR#LBf#>#Vx|6BI!|4B9E{SVK&}ifHGl>fyQ(UJAgY2J7p+k9vm-4s=M!s z&aw2Q>)nT_03}lzgeY*#ad#?wG63V|K(t9{Oc>7uVe>o~`WjmI(kZZb)+7`uksfJ- z6JU0;W|qt({qCmzAgH~IrR90s&QIqecn%@*oq}tXfn&LJ%y&`iT#_;0^KR!UDc$J^ zEf5}R)WGKo!;BH6bL&+`m~MFFxvmLB;=%j931S4a zPC$%1CyIgp^f@Q_h(Slg(47Nt6L@ZI#p$%`oI16Q#`q?(0glQQSaZ*EM}ReYD zlK3pYLzkxQf-iST4#V0bd}t>vJcPcn5W&56QnJP`^+b74L1@|9^NgsWjzjS2v)7fY zSW@VV22`4V6p%L4lfcUmsy^kx6#6oE9_^&R1?&YO?N0|bwT8AGata>mDCVLSJAdNs z?eVWY3;tezRQlm(^aY0c0^r!YJss=!7imQn0AT5(FTJgQDNlD$nCTB$f%6FCc#KTk zhesY__#V%%qb)_N7tXnSWG#Y`%=(T)zKk}jBMqneVN8crnRZ>(5FEOorMnih#ij7W*9YEX8sd#)+sq_-B_1Kfu z(3Jrflv)05roH9m?90jNWoT@blwe7aCY!rmoZmmB9;js8JL{d0q-smOq*E9N4?!R! z;i!>Hr<=k}`KHx_M5z-9l~(R1D`B`g4H4q$?J=m+xX-lSOyf<%i9y{9!D5u9pZjj~ z3IL@Q;-qZ^g($P8$vcJuV`prJ7#%tY(mMtoxJB?a{CyUaBtl5tC43oHe^X~*olF}d z6ZX`>DFGLzRt7u!n=niRsDFf%f??b_6+1hd^$@{Mcs!{Og(0wj7fcuq{xFC+2@LQ| zc}-k2;>fv7qcfE4TNsOBCK&F2Z@l_)oSj%>YOKz1#N0c;J!Q4SF*2D$DjwbI#ReV* zze+Ogf-fBmt>!Srb`0>-!$dR;_rgHHtM#*;5XH&K=jlc88rTg@s5c~8Ij zFZ}3_Ct6xKJZUGLPA^e!($oYxYq2{^KKL}lJiz2Pg&N%F8Hy&IuQ6uC?-GrR+Rcd& zso3O28PR-~hsDU>r+r(>whqsxy*d&M3i_r!D-zV(IVAtBqlP&GPGD;*(+B78RdV<@ zpjJstnTc}ANrEAJ8oQKF{cz}LJ?Pwf>X45fjWOa=lAZD?i<|X8Q&|NsY9maf#Ye(qh(=dm*X)?u%DM#G4xks zg`6=eXK~6-PD{&F+AW1w=)35vB0D^|A|sG?8oh^(HnF*W|AX<{d#mGb#<>5+XWtsX zb#gYomT2sXj$F!a>SM6M-z9y%1~(i&_-KHXzIXfOob*T|_&6Dy*YU>s?%+NABX|l3 zs%qVH_c}&mqj;}Db-Bl5E;MChw?w7h9tZ6-U%(*y|r((Y2ZG zeuEQj(YVj%H{Amm#`@!Hs~hx!!*ZAz+yKyw#Ree!k3<>58-V&SIUH8_>d)K z=ir5|a!rt7nAqw9*J=|5B=8sHXb0R=XD9f|cU$K&_%f2qA&6&B55or!057&!xt|aFn5R0;EQu`u>Ul=Jv!NQ!K0b}^QAj0;+4r#PZe4{rZ6d?%25W(=XBn|kxy{+xwMy02C5Q5 zsEPnYn;uLRK#39uAkS6saxN7lW~dh#LADE!-}+~gLdV|2+}`X&-g2qN1LJw7n=0fw zuZm&ch?gaLH^Wop!Ru}+Gxg=8ut+uAd9JhN^V<#MzBL#Tw05?~`~tU5@WQP~x8ptO z180O6XnCXhy(R8Aip&;_0bnv#n8Dip4`=&} zJqTc@xK8C>9&vH%HH-211K(NNK8?cUVK-6$FJa2TQ|FX`wH|)1*`aQ~S%Ix)9ZVuI z;Sf(4%Ri4I=0u394WPi{n|o>`75&tW-r zfe8$$TGlKy*T0FY+Os-3<|3FZ^!(v`{M%nV9B)ovg?2Fr z891hMqo3iAv_Wj7X(=;0p>Hn>b&F>Z1Qxsp589V0!Rs0wYLw(mvjI9 zOJm`jH|QGbg={sX@GbibPdwh_2MySsgC=qLPEPW%( z8T#9(%$*ddp=WrTZ9>8+r-RKc8b(VhkTv_}8-a4XTrVf8bes1F+Tee4U)xi%fW?%(seNTTw zFGi4~+l&qwMK^y7Ihgh$?hi*X^=sd*dGvTNS6-Z11v)&J!YDGBZ}OunaiT|x%ao_@ z1s6Me+vD(XFVXRC`H(}Uk90bxmq4*$meuKN1EsX@waAREU(zr#y9&;raU`&^Y%uf;?hk_d4w(c_nY{Nt`)*ekU7elQ@-BMwCB9sw` zH@xs17{AeI{$QpC9-;%YvKR{Org~tk5LTZ>tx<9mbiTy5Zohece)FU;mII*~BFfdc zJA6p%={un+qNnCddx3k=KMY3Ld`@GTqe>v{1)qhwP3RXm14Ur4d5ztmI0xxauWPeQ z@DMV_%?m!vj%D~5ah^EAurD%!F~aCm*?L5J!d&X$#EPq&P-p35nNHq9eYRH^u+`i9pfQu6V50p!e-eX$3Wzt!cA+z zoykBZ(9~+BF3OCvvYqq$FJk2Hu4CXM5L!sx_o36x_q{zHA{2L*7d7&fXPBdopLOn? zL{}plTBg6YX6{%zLoZF2?{$2X!wkX0Orz$jXv)O82so<-r(MFBPcL_=rAv8&Um^zP zK9_pBJ*m2Kb|P44JssbCPub}N2FvNHM;t`0mSb|MAg?rj8+x zQO`Yt5##c9tX_p@POrxK``;Pg`NBKnm;R-n8^8VecgNS`*nVIR4uieG1)Sw;TZ5ls ziJrqJc%yX6Rrv0wz6Ua&IiIA-RgPt3(vrBQiG%B|JWAcWaf&Tt;|&PR3=w)Qtv+_i z7CsE}7z>BJP0YHcE9WF>@`+~*LC#@6hWI*i*tB&xf^HY>!y$d;9UC0+xz|N(L}Xv) zPCv-1^6f`)4$HY7*$@akK1L^o4)lBtB>m1wpi9uFL{TfN%h z@XI*p=L{C3`{TUP<8vb3a&fh%-6J3brd$EV}`fzG$#Y?t5GO_YDT?S&0E4m;Vk2;vwKLbF{0epIVH9e3!%!2U zV9!VXBUH^i!~pVSJ82Fy>PqrDGWQdU(0US+RFK2F2LtXHk^# zDlry=Z4IkA3WUEYvpc%-ARuTNJgHpTwcty zN>p>1YrBLX*C;kLGsICREZ@PYU5dnH#H~$UKDwFpX+mP6gj}Z_365xAx{wou z2YJGrn>uT$>-(c%-b$NKLKCr^I-aa=jjyb{8D|NB7}7gqYf`|*{VKdsQYAxw6`e-^d#A%QT`I?cRc~+xZ>^a^#N*gVwW-bD{I+lwa=)^)_>O)e zPsZK2-tB!H9rvZ-1CwXc1K{@=aCGN2OiT~tYjwc_7DqE4;h%|uc4YBfe4zZt>AQ?k z-MBp+|Hc>Mw4S|=65=4;jc0Ez$GcbG8k=dKlCE5lV@$>(c;mzr zCbUyNJ*c70Z%zQGK$ehFhbuau{E&KAq{P4Z(YX882jlk3-x`nK`glAZG4_e_<2+w~ z@ZtFSv#s&hf8n1Ue`oK>_{}KZ_jB(ma3cT>3IoLg7}EB1?(j^EaLx`XWDqZU}e&j67RIHl{C`kwwFhcXAA>-OU@G+^LZENP;WH*vb*mcF51MAkF{ zUFupWnf~g&3~8K^zyu!8q8D9Z;w{6h&N>Du3l4l^sItKmnG91{KI>WO?G#NQv_6OP zqC;N1cxxOSKCMG38;ezBP(igRa9yL%WL{-R1bXB;a7Ay?@AK_#fjRx-JnmTT9bF9$ z{k*wNI|&fjjTwCHQYVEC4*Qat-QC!vI5OeYjkHHU%62g34Gseb15ZEM)Q0}+8$ZK8 zGA@?XvMa99i%$5D_;fPb_A4DSGpEG*9>gH18I_+#te(#^T@#=0+T=0SZ9%A_60Nlsath zt9;VWz*X@Jau`T2Y3Ueo$}otW0K{n~BSPhss=K?JO=V*MQ9;*DM@TCe8}Kld5hkzF z&mTvqo4J(X!(3VgZ^~ipi8J-v+0XKg;5@Lv$IJxGec&%+?9QTS+FlgmPMjkzSb!^V zI2cUETM7vs^^;11du?NM<5h6J^s>GA&WjGvaC+OL#{Dcx1x_&I@x^VBCxxZtWb(+k9s#{UgX)0 z(5LT-4-arE;fg~w75p^BRkA(5$3L#u;-m~+&(P+PNt;}DxEM|@~(kfQGlytbLsd+I@7Ut8DvOeC4nA>5=u|A`$uB2Pfn4JD-f} z-}&aa`kil&$4@TC$!~vUeC7Gh_&Fk+weJQL(|PT?9zHTb!&&-fzht10f? z=o2N#rAq2j>IKLW~*EXU_uvOXwpxhI@ z;W_i5j*ecB3-fIPhiB+3Fwm>$+F`^zO9#(o;+7%e*Hop_^iAJSPtUt$EgGJkoCNNJ zu^&go9Gsi01Peii;O*)2gT-XIR1jL)YhQoy8hVkV%fPi6yu#;YU+TnxjFS>0FR<(T zUwE_#hM81==w*D&n@_2hP9_6jkS!ff!BM_w$Y@dGI(2n90bd>n;hC;aq|9Q3@^paA zX8=W<3N8Ul{lK8x`k{Np+CLfI_aiMf?Q5%*xnQ=x4o{o^dz) zfIxStM~Ye63*)0_NHFiEUlh{qtw@&V95}<6bx;C_Kj0zW9D9E<_7X8~1*WE@7~$08 z(00{e`sz8)yGZzX-pblwL8m&=BZ4K4%2O2T&_@lzk-j^4`^(*&u5aKn!ow*rgL7>vSPJR0YkqUj?T#>$MRkX?=k3sSE*Vd4vv*@|)6fFA%hjmTQ?*uPeFp*mc$OtfBYKGoDZV zl`U+eC*d7+wINS0c*|_|!W{fGg`RTFZiTnB$8jyZo=dxGnMN(NQm?xDOBE?Yp)-UC zm%7!FDht2zV9`wzg;d%5?zL_x(Lze)*@vU!8Of(VG6KyCjK~%vfUbkKGn!ZLJIK64M?Sr?G zeBhn?ljjH?lyR-bzDL@H*L-VsFF29~*@zQaBnQqrWAfn}n^F!LYze@Jz4eGj+pph|%jsV1EZQ_=2JVd+z(_ z%77Bfe3HmfimtgAcveHdvVQ(LBqE!XgMr1voIGRh-s1pQ_9JhrX_G$8*Pe7Ue3ZA! zJ7kWb+~_dgOh21he%FaymX_71rz9EG*3vOy$m!P_x7Z0r_2AH((rkodQ?KZr=;02D zRYpQ}nTP1HIl0%4d~=vU-;|YIRcDYI7kT!D-J-Z)BPI?1IWDZO;)Hn8rj!~Cn zz-fXGvIK5U1bs0BSi0jb#(i(>9qyM`^%?%VS}HcF!xiwJd-!Czh8{iyP!8l`PdYg< zbir75wmUs}oqk(;Zg;_(apG0V;U@=t)ZV2Yj*x7hKnhK)*DtaoT$Lf8fyZWDw=wvg zRsRs38oF?xUVi*x^kmu!?A<#grVf4#@XB0h6DH};&JzNF#;RBs_FPGi;f6GmSOnNl>-$5><) zZoun=jrL`Fy-^*r->G>c?QK#*see1nD@wY#v4?g zqk{44o#r_Y>4#_a^+`g4juoRx2*v|#U4i#3%pu*wQw~U_g~;9nK$~P>R`}JA;*yXD z9&ObNOdX7{Yp16dT84|d1RJI^X4{%VxrJc3~TvdM4dr$P%T)I!mmte0ozo%Cg1 z7xjgc>ZIW78F9Gxy`w*K>VKaJKJn+WobQV-E#q<@@yz17vJ~Rvkr;}6)=6olOXVGY z>N(4T)Lo86>tpnC4_%iI!>l)F>X~EqxxYAl2hh?W7H8TkjOq;B@UXzeyrG$9-fasa`HRliOId;c?PR86M5i> zayZF}v-5buZ%@KkE8)-Sm{G#%L;E`Uwb6Y(om%UJN&LQL=rLu&x4n-quFl3?U)y^z z?$6Kj{j6*B-@N){9R0Ij9KZ3e{^{}C?>!%15C4A@W&S44a3{v4y`m@8z!wy??euxM z?QCx3Uw-FaG1vZ2IF}A0>WbW*E8+E(=u!DFPHS~m_*lKNM9pGE?~n=n*v6~HMY0S? zB4zxl>HH}}=g4qXzvON=(BZ@wN{L>q;m13Xb&;s&x>arPYae@heJ*|8Nd4P6cO$oU zUfYdv-y4U|_tVZ&?>eT8EVpr`v!JK49NwwHJv1d-}!_6FU92RzgF z%ZK*()$p*5V(@JBe(J!l^TjKM5*gj&1SRJo14qU3xy@!l*b+E^+i4hMYogB#KNv#P zrgmg9=A=gNua9m?d+H1L!C7oZb(D?50(LIaSLf#^!I`Cs7bzEfq$`~Vu&}XvafiwRibV>Y$V+>{5CdjF2cBW1qe*-x9~TV;FsSxHnTYuq5i8dpJuo zayl^FWbjVP3O-7t1=Gzn{L^DK*|9_pv*)oeq^v;bkSHDF>t{Eq2@DM0u@|ePUXhOV zp_ZOe0_TZ@32&7_%5o$egbLxovvkJb`uzNIoL|P6<~thi?QDk0<~!rz@Fom;etsRL zaGP?s<1CDRd=|$q^~z!dw6wJtf=K(0deM1acEQx6qVT=H#eSF$%4w`TpZf?iBFJFxp;iq*x9De&SaD4L#1s`4r-kkVG(AjbJlYR}K-vx3W8jG;B(tdy^JeD?Q`ahqGHnkXRk2iVjCf99* zq&>5Qr{nh6jav3jKODP{QP$?^gg?xkSz*}MqLac4aEnJ)jbmO!VZ0H5*yPt6!67n7-wc;rG@t$o_9ji|}^i_j~@IIcTN32NcGTfaRRB^897<_BV6~5>sW1IDOCf7O+pX2^M zx_xCMvM5qDVJ`A4BPUXfETKKedPb(?dJdKPO;6D!I04D2WPAm7x`ezM@`PuG5w7P@ z2szG=^po+kI!gA&L^*t7x6L}Uxu%e3JFhFh=YAVn;~*OD44s1mdV*oT3k|N~d~k$0 zeVxyecDBMFXdp}4<$}SXKyL;TGNexuHJ?SFou}@Vaei@=z(#N}_lnn7!(biSD1H}v zbXZ9g+A*4)JQ$GaJo|XCNsRV#=nYOx;xj3cBVn$QXy3awRuZt4^!ZO)c!>it@ep3f zH35^L&v6wS9BHaa8Zm)#p zDJ_xHahH5&kP$)W394w2XM=$#MF%glk?vwmAz|QYHge4s?RuWlU&5o#mQ5Q?b%g+o zPVeQP&*@R>g9&1kQGOGkw1xSNb(_sWo}#kJq!F+f6*ycPT`&?T zpy+qoOqb9DX~c6j-cvY2V7u5|m;{#tPL<^qsz2bj zoG-59QjU~YL28!(i2&-or;ikdWk`LAOccRY1m-&5=kSFM9Hd95L)T%E)G_x{z>v1k zyc(xwdqzMvDB%>q3xvJnhQWM)zY_RdwI6ARPz2@XBb`b=^?U4sl#1iP1z0)fIaoAr%`s{$}?%fwML`L z=KIpH>JPs6oVr>rzw1~6qtHQ;wkI3~rTvPLpU~%;tLn?M{#;J^maQ{DW}#5~;Es&< zjB$$7dlM6X8ysGTN8hZ32lG7B3ti++q;~%LxbsL>=$I&C_Lz*IGv`v&-t_#!us<;_ zna}koRQn`&TOT9eXg)F(hOG{m23MDsI764TkJrO5T`dur_MUzj$~yiRr{m%E%W;!v z{@XwQGvlxQTYql+z0W=$U%5OVA6W{=*j8?W!PNadylM+>7iWIg$irUn(nx>t9R~6x z{*^jrJ;v<~HPz6kjjhO7dR!yb)I1uepqI>aqZ9fx_?!J=d}oT=-XC-m+Ts;5IqR54 z`}FnEILT?*7oCU4>Nw{+IHbWR)PXkj;wAM1n?7am74$b9kQq458SLq2a7};DIXhk= zWiA!tE_;YKWl^O~@%nU*rh|eu0wS`l_S6;K;IA^lArFlN3LFyGz{;?C4ec`f#0m6l z{!72_I!8fQ_IBp{q7ID)*7J+A=pSSFJJmZ4I&)7S)zvqsl~dj+@NbuTNrsv^d^4F=)sYg=J>L=D1LdDPR` zLh3S&N`dX{?zTfbS&Qru3j=$hzul8~)dUT2=|?+5_&Ey{x$jL;M@bN943KJbP!tio zc<8(sMC~hK>7o!zJyJ@IN~1U!_RdNVBBnyfnT}$3b&1R=vPS)>U~ZBz6%2x3r$3Cl z33cigRdmxELZuIZo%cmRHzP2c@#eh(aA#+3YLm(fJ0hm>{0>v*ne?`JL%1??4g$x~ z$mN!=(?n}hLV;Vv!im{P8)e8h(*VNNpOHIvqnu| zOdcnOn2yMku5+HtrQm}g!-?oQo@d`AWgVz3hv7BGR1}$KtSOaU=(55*W4&j+Ykf&Q zo#POYC~Ez%=?!CJoG++t#s))9&;GJPva@g2=}gbVkD2t~&`%z`;Rj=O z_JWr833raqzB(J9O=OS`j*B>!Uw#v303Fi5W|o2r_%n$mVP=Iv`%Z%O>A_M3a`dcn z@obfy=ZmS_%Cn*M;$r&jnKXefQx`r^hbS`5_qwA-R~dg4q~E#j6JwK;I)WYwU}ijr zLnqvMFmC3e{ydj9rgJ-;PjHdJYUrHcZv9~Knf}&6@Cf3x;rwH5L&FujUFj#d`~kE@ zW_>^`J^j^4r+4m1QViT38=r*7o^0=pweZV&qLTHUeBapN1=OJ>bB~doXyG^sc_t3*Y8=$Zd_K6Y zukfHFImiG3m9e}JzrweOK7d44ZpZ7-zBB&IAOHOLOMmjm#n2JyOJ1ZZ5a+9&rmr%_*rKht0MU<^gRoCCAu zWiu|qf2C7s;kjmH)0PR?GG`{(U0zrRKO=WRr)XVY>DUH1fsbu8hlq~f_vTLWNOnaV z>9c;aBd_DoYFqY&{$YQ~g#bz*lFJ=Jps#H}w!422TEu1q7bXyj&^u2p@?sgWxex2n zi`%isX{HVWm;~XabCDB+JVSopAu5t4R3@ zLU|nXVVV#s-{-grMMhD@aHV~WKA)M(2s+Ur^)V`puODWTGtwsb8H>OabApDkht!a| z0%{2=@-y4gSVe9WxdZ`12_|N@`&!=^M{%kk%8tRuu!Iw#2*LDHBTx&ty#Z|_UNmC^ zqlAMXa4>k8`biij9y}Hzr+P2|bPDIhk3+vH9fXkzk`>?`if%40Y=4RtsiT6oGR|V$ zUVieb&&d*O2QN<#4$HL0q-;V`k3Rk9Jzhmg_?+kseq%h*KnkiU_1w@uLk@d8*31%A z0yk&A&N25y$NAJ3URh)~yw$SLyRD4H~Q7%!Jl!Y6*%Gy8VmvNz*I z6W=^s>q*_`)Nlg9E8`=JAXSFn&J$&s7vergZvW_VeE#He>?I16DfreGKRd1uLYvTF ziMNUCz=KcI-;RH#EFSb*32mXzjQHkHh86!C^GqifW#$1F|Lvno=fY0`x^?Q;jN1BR z@l0@t=Yqp3Fp8(LkvnNaIuY)b0}Gy_Fl*rRi4HZ`i@!zfDldLjpJZ4#CYQW}2x@vJ}apK2(2C{rI>{E8^dMAI^iFGyXp5=%Hw*22_a*CqK zLXCU0+T3ty()3CA?n&f$H9WCy0#}sWS`?VHyp4OL)+xOpYtjXM;LY=q2fNX&M#-){ ze>xrnFTo856h1ZnAT=CixgLIh5M5rKkMon)<2OI^eEjmi`)`fE@^Ac$e&o762bkN=ae^LLMYgpxV&g}+%Im8}91y?e7> zQ}pNO8#y=OJ~N!ENkWCQbGL9-A4Yo6;yBE3LY?MJ5yBCCVZJoRmx1AspR zqfDy4z=ysqvzBb;?%sZJdU1Ikot!@BJ{yS!*B4iH9y+{JJOtP72dCuKH37E?&jMTg zc6|Ig@J82jl!9}8Y~~~Vk}XkH_p*;(Xr?*$I6!1AG+Z!Ae(;mh}q3uqF$#;esA*^}l96)|E2stUY8pNE9 zh*MXco7A)1zScoG8zUxQ9qR}~U_fWCax}H;Il(Zq zEDKua5)}=1gu6~)4o1MZ-(8f3$7*2VJwG&61D(3@Ro^Qgni)ozMt1>@^7+AOC7bF- zgJl#_W1r{suSfW@X2UbRmb%ie#0+DP1M;ioR6m7i3X*b-%JWwTVEIJw2LDs(bS!a^hwXjKjh1wI(#_l(BwxQHwI_%`6PVtM06zm?lV#QlkJ6J zk8c9La#$Wu`{@D1-yZ7Te(KX*VJsIzx9FHrLsPr}bY)GtW*0hCK+Ff)>N7FC= zfhE`T-F^(l;YKGUir1~^6K$Len*$J;#|)c?Rc&7Mr6`_8&0wN z-0RNd4j$a|W=3su3^sQ;$dkr9ah$!8anoDLOB7wuXF%0Zu>8 z5-h+0dgzzB)XVM(?BE%^r@_T}g4;R*Xth#|5tFX~tIO(`u%}UVDiB zGtY=qvYYnVLb&M)rS$Fm#KvBMA$`5Pyy-9qdeEH$tvbye4qffs0YSFiAkk_+PVY?| zl(|HY!x#NTF9)wTH<$f{&mdA1y&Nl{Y@>_=WEmj{vUAaV{b&G}DvLn!ZYZ~U-V3GI z^o7O!1T~0oBBYc(0xsX;gWz*Fvj}b4b}}4&=|Db_9`iu z9$+9Lb~*}DDlw48ObC7Iq)?^bomH?=U=)Hhwk0EQpsxHV-1=tBh3BXb;N9G0RD**E zfj$G2Cx{S)F;B-Z8YiA(C`%7GRZV3X7%1l@ULEE*U})Xp#L%SPjo$5KQ?3zCh-N0H z&1|kNc-9euI2hKHGn_YxmR}tmcWiBrfuT}f&hVDH;b4x-<>ZV8#@W(5D7JoT3!J=nUVTq>7QmJ)Eqdl>|66`mJt>Cjr}p9^Cp{Lip6=Em8dG-8hHH{#sA!4PNm&c)c^gBucv$E|vaD z9L4n)&&Nvi2s}n(25&iIjIKZYV7&k8X#Dj*@#Eva_*Z^meEawP!1!*A#+N^NIbNDo zpL(^X?&>>r28a4G>vwO`9j`Icr9tkOa%rDtn0qz2Mo6ZE?2LxZGm(kZC5m6a41G_c z{K@G;cc_R1za6=@#WN#LkBB%8f!WOr9?5rSUc+&kA>%76B5SWAowLp9%+A-K3(cDf zACZ00p1tVl4M<~DI&_%t^mXS81@H7AN2e3?t}d!$t*)Ub8_9;h%g_2Fc%$RdF9uU| zt1~i-#ZCUBL)Z=dbaEQ!mauPnVHpI`rGm=o@CN^BM}`y)3RiVecu&?aa<{CnTeaqX zQ|Zwe+NJaKxf$B%eS$ol4ZNAiIQj*yX681=ISyv2mEPr*Y$PF%m&YkR|0zP^Cr)GUf^e%MgeL*F3#O9;vIH z2?802GGGwNa9BQeAC1E?Fj)z_UFH@8s?T|}4z4~ka?3QfF=`BhK#f+z;7y%}Fvh_4 z_k2zRgD^9iz@&kdu{RT#vf{M#sy;J%wm1}l#>C!(M_=P)V57+FI3)w|Cd7Ppd^J9I^DwSc z_Ln~MY#iIRHqj*tRO$AUl0hq32Pde5t6V3zjBFzeW!%$llm=rx6?w|!zB2f@R9C@0>d9J*ozEagLd5k#dwvywDhI2T8iPCjz2h9UKKwMIUh2}&F2 z7+_*tB7gZ^zE+2z$ftS(CwSrOQ@AmI4sBI-^*`5K^*=KXQ}OkC+PsGB%y%?Ip3~3n zLe}~wE#9P#{K|9c*RCi`rXzI+whrBpjVRTP4M70o9mBmfBlIUxVxskx4TgQIlkTjR zSdFsFqp2!*C{A*=Z?hefOdXA$!;5_b;8m1!BBNNIu5C}Fbr{S$=)^@k_Qe@K`)7V+ z{Ea{TGvjanp)Zctspe%I%x|3?kBdCB9@s6JnzYiN8S!@FCVqFlpE*MZhT0zUu66u4 z-wUnJoMtmK>Ov{}Oe)rLy|d?Y&lr3Jc|1me+p(?z6Zws9-tVNV=mD~pb~=8Z>(!Z& zZw|bavCW~H*+`k{0Gt#2bj8l1-6appcADqZeMmNpokuU|N}@?TTcPXm zsbjO?YZ*W=3ed<=<*VqJ{Mjh?;;j0C-T-6AoZ)VPS0t@mS7!#_xDwB$4t2X$T%@0M zRMYNsh9~{$3y!0D*%C5h3_8Wy2M5R9&;jSh8|_5?GLC%*LSSCXgTvf|WuI=YrjDi; z16A?YIW6QZeVRLrZZ)Wt5w#m6J17Vs*6d1oB@femoQ#8K&r*@u@wWZF>##}XBz>su zjBE)$Z83ZuKE^SI7WWU{8lV5d7skQU1FSbrk50zL>1p5zPu!5a^f`J)U`)>&M(2nJ zejdw=x*F{n`%Gh)f``$U7X$%a?;Zeo5!VU-F+`0<%H7^B(gEp}2xb}>NTB27zT+Km^ulAaiXh> zFbkOt{6|rkaJa=Q3~bwo`EMi!CbOezP$?|Bbux_RanHYU=rh*M2?(w`oJ= zc)auuGJVoSRZ0|1@Lj&e6Zu`ofD*|mx^4y`&-MPSF4qX<;=4NYgMnoTo#naU1})r! zXJsAwNeJ=gGGoH|MXTINxyd&RfCK8{K^I!dY|klT>MY~nVH*9#XYh;9%Io<}@y)98 zT)q%~f6C5&%r<;#PKx~@xPAzKGgbQD$lZPIuR7;Hd;MhC*J|8DJNI*j@PRTX+`wDl$B)Ii%JW$~=>(n%&fkQ0-`Hs6 z^{a97(I?~f!%xP|>*LTQj#lLF=7W#Nhu5d$@Bi^19pC=*|H9aR@oo%fmQ1J&DrZ z#4{$v#rf60kYH@b0WWLhp%@oAQvKQp2Hi}J30qOKEF{Hmj0Q;us(;Sw{_qeiFQKg#bHhYm=jMqoU!LzmMJdmQY~m(Q4VIJ2BF*mPOyFtlvm z3ih;y{`&GP`dX?!4o2F%4IHvbx1oIkRt5``&InZJ8GS-)nG<2EQ4IkAH+Mt3YZ~6T zoXK}4vu$n06X!*Qc%?}W&q92SLB8i-iSX$x&a~4=;9ev*9RQKLhyh_lcL-zq*BXx0 zD?;p-96HG3+Kv|`a;3bMV=^;+A}vhOO$X-p@Xvz5g79c0!~io`A$(5!%7y%13!!;% z*0+l^wW%*WD}-7qWsEFs7R*+$O~P_6%!rKVpE_wj-ed(gxMmPBV|rLdGnQkfs5(uc zGsmEYEpRZ1a3-_DTa=b#Ty0R39t6hqMn&Q2_Vf1&4!ZGJtupW`!i#u@hAJ>E8XU&B z8z15!+Xau8>a~*;4d%Oy@ZQ958h5uejsX{~bAq(T@u_nb7&X2byzIw=l$D?^(Zl%E z!Tx@pw`<;G1dw++$HqEoCKbUK+;m61OPfwip#WF$kbC-7McrsBwWqDkwe9hpi}msT z@2-xgUkOG7*T-M_fpH&)BDHpmn2-?t>9OO5X((#VmY*q0k;~qQ@DjzezGg^^B94J)$HYsc~;-#Q1(TtJ5ef4r&ee~V&=A)0t^@lIV>mU8n_*;Me zkB$HM`R4d2#_Ge%%kdR&153S~JW9u;VrUn|drh(Lbq4WAa9u;b{G^;iD`gs~hrffS z)&a(nFV}kyRztJ7H@$v{?!)0C_c+v>)xB|2C~ExoDgQ{N$C#!LIu!3Pb}|-t({D}? z8XIdioPl=QMF6`s?FrNJw&2e!hfbm+&naESGV?xx=anSEJIU{luC-wZoD zg>P~rdQaAJ$|gI)$uPv(%tPRkdGfpSc~XZgDu)3*`p%4ea(XyAzOzRhzUOZ}AG=$7 zxyHcCcql{9vISnizt>qor7AxLReE&dl`k zL>eNP%w$p;6{LYVjxZTdMCfpzCsah|84OczDBsJ+>cuqqLwpSBb$ljCAz}fFa3OrX zD(~J-9cF#L{od!tR=oPAEJ85tH|h@oI?kz`5O;IKOYyhbUL)UJ$DAqFCGV!_r?D%d=sO+<#rM&mnXx~5EyErus_`Vxg5tf;14U^>F^QYV6>7jL^i`58$*(44Iwi{C9Icwj`N8wm zgPkgYl-Y^WHrGP-r4S zI9O5!WAbF-&Ds9rquhgkF#g`UCjZWJ8uTcxn&T7<9t&%}r$p~_7?v=%zk#7C`dq8= z&K+BKiYmxMciF08*YL!dvt#dY8@{^_UFM<{O5a?SDEF=X@MMfdXoS}qK!h)Yq`f)6f_&%L2;ZaUA{f^^=qF8>dI(lklWhuYz54YI|HHgnx~8933!lRpXvB$YabW z5(7u*G~q!D4t;Rp@tnEP>vp)a&wL~N)M@gXiFWI+j}wkvwOpPk?HhpxPn=@Bg8uXa z8eT+i>MmK3jiV>~Tws7UFmr-^LL>)Sx*eQ`#^~;g)3b4x=o3cmkrK$MTZ=;#r?Vaw~PxsEC$Na(sbU+DSsgO0BYKD_V+t>C$~ zFuf&5oQmtfJNp_s2qe;F%Y^9>n@zP(9GwL%YLD~x@ym~!cDM3NR+8@Ak3(P|dpM~L zPCIQI$-_o6H>>v#a~^LT{(7#8U=n5W8!;Oh1|Y&giD^%Y2Tt_MTFtYwI6i@$ z5#?;Di|1!3V;VvZjpb$1Sfb9fLs11keq$uU%81UL&*0CNwDCaqck}7xLa9Fx62#Nv zi>9;Q#3?c~a21?Pr!FuDZ^qzlNvlI}Jr&Ty*diPpp)g++Q~H466|90nqws>ha2|~N zhYB(1UX_Tc!`zrYzPo)#zkxKkg0G&zUkw|AJKB)?b1gsoT%P)?pFYu=_C_Ok znfDg;wN!tV_KmhflPa+M#(+)n=DYFPFu`;*gBy-T4cg*FE2r->GIu{`V4vS7bZyJ; zMZcEceJ)FoU*+eDhn%@Vx+Y#2_I&1AccoL_bNE!B@+{@a@ORd{`qO8pEojYB4}r$U z%I4UPao>z#UyV^<5E%FFDZh+7bGp;4jPH@J&ff{GmyT~mJp8bf4}JcWW_h-bRba?} zflVjiD@G)g){~X+=UwFEB+lZmUmlNN`Kd3Bzx(g}E8{Q!>wjwe7u##&%crN~TW9Cv z+vgYKG`vnm$iB9!SaVX%w$1hGfG%qTL+DmV3;ja3!H}oz{OG4LX2pXGV3Stp7TCy4 zp0W15JKd$2?`tvEdmo*QwJ2n5Yj;k@jB{KgOG@agg$@(dl9QC9PvJ;(3PurmSDQqi zGDhkMsO_J%*3~nJrk&g;Q+{@SLEw`%HV5bM>0#v49G~c<$hgQEFWp9->3~MF(Jy$_ z7S^I~eYNggy`Et#?G)IJ(4tr9B>EGLWqcYa`O9aAK7*UiGe}?51s7<|NSCM65&8}f zi){5%*7G!uinDvM=lY|exxkuI>DSEs-o7zJaT`1HruvE#Bf6h^+6!(btv1ayNO`(b z5TXrH{#p3Nkz38`aF*~39u&!Qu+Rd(qhPaXX?renHk2|qW-|N%Um|gVALl1XDUD@C z_xATDorABFJ@V!6F3H#Ek4kzMU{7-@tjHV5Mo+SVbEll8$dWZ)Q zj^SZI8f9dl-;#n(0K>5HFt#`Mnm#su0qNZV58;D6opr5IYl#tW(fsQi!T`?kpzu>a#Tepf8xyri+GWh@B?cZ0 z-!Tf&Xv!MTFqTC4aDWs$r^V0&bkpHx98y>7-i;R=K&9AVg)%4fo7f_^potZ$ii0mcdaT8 zKFCFQA`M9a7dK=p`liOkmB5TQibCaF^QAE0$z15sJGq?e5bhti^Ls|gfi>Tixl5pM z@tqIG?~XU)pZvK$KK{c0_)m=g<4=5c{FRM2Im(V2)6eQ#a`c&5QJLfdqc_#38M!n~9atb&J5>nQ?&$aJy9`b!7 zb?qFVjEw|K@W9y?Rpxf}9`!;dXOJ=HBtS*?OGtUyJ{LM?HczKa5_8a}z$4wJ-q|t-CZW&vP8+T65VmJUw?vGf&9a{&nDDjIEKM0m#%} zp4Aqo2Q1YKske?U-YDH z!|NH}U8_OnK%}lRxn@a6>a5bqPf_H02-tK^h-kL|WJJcwY)M`@V;rTGbEaG;V_^c! zAY#POm0n zc$aiv1pneJ@NsNpeK*HeO**JQ5I&PjskTJTQ&;k!*`1nbf{W~%85!}D<(_a|xzx(dEzS|nN@$Mv!zEH#b?OVrD8}X_qc%Io1t5-%e6J)O*#LOT0Ga^`hfRSAHJN% zAWA=xA;`%x;NhZ9h>54U>tn4sbAqff?Z^u%xe{7;Pk>%aI1#((f5 zPseY3|7XW{<7j^^3i3F-`zn0tWgvG^%#V>Ho85S~fvDe0W>%v+BIx5ucw$8cD!9_0 zFlr~obz)p_!kBlRyU)_bC7}uL5$4mL|<@}Ph-^HV?YO^ z3+KhNhV3{jWKuTOcC)*fwy;F#aZgywCGNg=43&dsVR(irTOPR|NUuidz>fo z*B>?&zR?2CWnH#*ME{Y`*b|$K*@Ax33s%!0qhTE~DK$}pkt09|jw$A{sQDh^NgPIq zLC7O%gnUxoH3aD#9yJM$#|R*VusM#jEh8!w%`;>0RU$QCPcGjnbHHI(Moo&ZGt~24 zbhou@t3n2k7e-kt&xx20hx7@9g@BGK7yx0W5U<8P-0m9q8MzwnlWL$p%=ASRHxKir zGM97K>Q0mgWe$e~v19uAoAV;zVB$^Nd%IgYu6nDWF=Dl6r9-llu9T&qUCQ(4a;oM+lafTkNA8bw| z|8#Hs-+%gZ z_~ya(p@(N3%1j(II$4zHv&;72Xwjnj`k6nBvW!eyNB)e3_j{jp;_VPZ&He3EcD8|I z%MJbRTJEW1g$WPrWt^O;LLX|>^R&}%o0nYQ&25L>L+{NL-w#c8QnwrxXHi6%n{8Lm z88Gmt;e5dOZdc;*8@}VDhj609ECY$ z>B70G%hV5!einSjGUcP2>3F)rb8~s_bb726uRZWR_0Usw7V^A4(U*mh zY(JK{#d-~S4t%TsnrC;Q&^#>yF>MC>V3OX<9tt?rO?UJ{4(-WoGw!EnC+)Mg_x4RN zl>r}xW3F-v?o&?IyX}Q8`py}6d}LqGqx(;L4P6U&;AM}!o8<8hkIj_6p$9{EvK4sY zA#y4sbDYS(0dKgbj3Z5Tc6OEq<}wLLFd2+Orl>czBFZ7`brip?^&oS}J30a@%N@KR z#D4%ZySihvDXTM#1cL+N>hMlm&-C5uK@vkU4HhI%qHzW|44Abkkll?Cbu)+y_#up8 zh3U0PRq?!}ym^1<&+vp$7#Ym@w8F}BDP9=8A<87Yn5#0>cHr0uvdSbeyq;Ij6rO1u z8k}&It#`pNc+pMKIJ+yo9b`M@OC{MK3bYkG@bQ^??L$1&{J z`Vv^59mEUF{f@b(e8-Fevt@9W*Xg}z0(0%|Y>xdnhH!)qn3=-@7lgyol{DYI4gyRa zMIBcMLyxaSAB2mbP{K{k1;%>Tcr4NTtYXYR(IjSNy^YcyKD^)Dn8tbmp5j(q{e_FN2{c7}9>DmcoO-V)(%1T+rL$ z!*9G*1G)I_S#ql{p3{briB6UaDEeaF!9)6TpWk+&<^=Q*Ncj3h;XXG_K&e>szD65X_r>+ zZ7GLuV7V!Keb&Fg*!uFBX2u#m`Q+93`sy3| zx#+rlej4#FPF-vC9$f53zW2ZVYOE!SlTk5uWvS=%zsrQx6TC87op7Az$T8e7l5|ja zu1q=6BssGIjR+JTv8qjx7*!|3MrY7bpjqk#x>?;zCrf+X1~vDmK2}*YWCOElsOUh! z+p5(OIYq_Y@4=XG+DTX17qVMg=4_@l?f9-Fb&duQy)K&2NU#$;Oc|k9LhH0GaP2Oq(OdNG z)?W0OrJ$iP>7;7~VC=|x>b55g1Pitt-djS>)K&J4(y$eG?L^@8372$|O@+YRp_ss+ z&xeW94-fVN>$B+CDZ_?y&8p{MnLKxPd>Tj4#L0&SLv(}X$Cgji4{$2DGxv=CeG=lQ z3zV;kPR4J&#nP;R@UL8TffQaWFYAk{0*&vPv85j(J81o@p-IT=)x z#(Ci(cbi2(rgAi~DG8-y)}>veI#Dc$vNt;cYRSOhv|NILQ$S|l z;R;^Cq~DB}ad)R(`FO^R57^NE>e_aF` zukH`W-II8?fx#^9)d)k4dvTLGT$5!9JZm3BIKTdE{O#TM#((xxpBpD{KO0vMhvRG< z=5sejEdpO&(bnYUI$+_Ar8AbV<++J#KoR3<Zlk#aNJGkLc4v_6W0d3S1SDtje+ z=3;qPo6GV()sYf@FVB|Cb*H152L>`YbH8iw!)HFLymQvThczD7(tdoO`|#CW4DJ1T z6n$j=E{@pUc0P9!TkeK;ch=)PMUfFQe2Nhms}Z05LwnbAC;jqVvDc1Aep2rK=yZH^c{zUjkN&~& z7yjH&kN?}xerf#b?%sHv4t{uXF^-~qI~0SLVsP;LY)2KdU40{e@D+wkJJZnRzZ!Ph z3{^|%8h3^|UCn2=qYXR^JtN*>i3My7{B!^!!+Ew5d!93cUfZP(AlIp>(ODcYw8nWdN)7W=Aa9%8Z|f2coG9;`E3{v zB-)E^m}~Q8euIaU;c!rhcoi)4Z|4@|yK87;XtbG!=mY^5-Dcl+!wmLnkwJUs-M7bQ zKlfhYomdofvL&jTjtpKBB%Q_aGY`fvBeMgJW*CEyId>uLzze+{dfCiIhIlT&=7a^_ z+*{76ipI$^i%d&sYXc_Ehw*mf^@5u)Me3M=BPXI6hJ1Hze=oLXgVXge4i27=!>3Q1 zEmL=c(|mF;YFG1HK&9{OpJl~!CVu)Cyun2?%;;^G9H|0Ky-gt};?uYdZRUQ&QIJ7` zS@KsGXL%@|dkk`k;chC7IBLI)#hd3@5FU;K;_;3&4$h_5*;Iczj_G+_o>K_{;{@nD zUa#Urfgqvl54*v@S~_N8!%R2nG)0Q|qG%rDRE7-IG$b_`Ary~ordBX|`jxWX1SZ7n zq_OnX#5#NKd&|^>E3&odui2z)Ap~cmozCGN9Y--?biaLPya;A)_3%iu5rMxr4_w}x z4>q%9O#lS@#bu0P1c6XtDhwdPb!|pTVJC!4zcIi1wYN-n)q5+QChnEd$K9R1Zl&n8 zlbkMN+-PQ!pIPkJ1T^h43=aIz?RwD!&I2<+IIvZ3>L;)mjUaiAZXhdzTek`pLaq*N ze{Z6Ybk#JB@Iz2*1PMj{2$mEjoH#MJ2-{fHZ7kH;{qFeW?%B8=JBd6K;pDVGdDl&+ zz9KmXh1axo44Pr0&EvHF&HeE=pS(T(lb`y+c>VboiQc!z>DV175BuXhhW^I*eE4*# z{Rpt{3tHCj1Re_5??(9foIDV2&(}{+iYZqaYJ891S{Fm?*_7vOc6gxmz@R#uvu)RW z|1|ZV3OoCmxk-Tvv+GOvQa7VjT>6VLNp(%Xcf*<}lE<76;pvAclgFn3eL#Z0n`79s z|E)wK@5H%&FOKu)x5xe$568{n`uN7#(fIK7C*{$N@LHqwwh_RBTMAkIzJR^obM_-b zO-{PPW$_nC@_%?WdgVSm#Q@rRx69e|UlCv6IDA#7B{cVaiI5s)y5-W#VNS+xec_$) zmwxv9$6x%TzkmG0pZJ60yWy4N=&9FN=i@SUlSRV{Wc)Vuq{ia3FP=%2Gu^&uT$}h! z8>LWsrmwkPL!PsSJ!PAcZyE5+6Tzzk!Y5N7q+II8>7SPOiC!rafg4XgIT~9U%5U;R zH=UbYoX@%Bb;^~uiWySr<}8sdwK{-}&qdmdJX!agcZM@c0v&T-4xaDD;LoU={++`e zDhLkpn+~8)nh}h{r(%&jxrKAGKlM&}lzX1HRJQynvyD2Mgskb3?5v50mm?y?o^V3O%TLJ zv4wR8Lr!`MJmw+@geUwA?M%tT<({2jBNyjqUiCfG{Z6)IbA!XQp1xQWaFVO;nR}6l zJUo4Nej56v&A`?7h2;C%noRr7xQUEMKu)E&eRL?6w z>1c>++yz!IW84v&ai)|2jbPKjr$|8xujabvAf<0F3ZWsD+G@(7V+wOUqEwJ*z7x+7 zHb|J>*<9C7YHB2N&8L~!MRx8;?IRpG(2`LzYn}l3zKopewFSC#Cr-;W_!!Fie8i*x z9GsZ#FvoTxTe}o_>4H=-Mko`ejJ|V5Fi?A(>;Cjtdu8R|W(q_Y7B}{K_%?mePo5Rx zTISZMEA`K;#AX$~tyf>ap4+;59L{{!YfgJh@7pJ)rhx051U7r**Px`2yjW#;=rSen z_x!Dc;83(J-LDLWv0TyeI@{8Y&YbW2+ryX~4EQBDT{)1`ns>t~1cpLv79pQ_p(&#% zZ|xqK4H{zt7US{ygTIUwF%q8FXK@hy}^TxpaD}$q~fmd;ch(^l#;CZTwE?^YZgMYUW0rhUv`4)u2&dvOmwr&4J*7WaJVFPzcOmI;lPx0cV%(vyFdzh_y^ zPNmpd7}-9P@4=Dpa3wp_-%kBkKNbz&-AAcBhHoGCiS7+n@`A@$+{-b~V)$#J>>#-Sn?*~`g>*25TlLL$%slGJ!#plelQNH<1%XrFn z%BadafBhGyBOmk3cX+4)&u5aX3SnhT!aQTwqifJ>%cqMcj)GsVLm;(h%FGrcr7&tWa>X~Ii&Lq z6V8BChIH-)LN6Lp;M~}#OH?h>V^xogGFHAkPf#1(NOx4vmAHwvy{RzIHFIz;P zcK62K-rhKiaaOl%34L#fwDuzQO;&{yMW1;VJTll6&RcKoj~DMeANvQ-y8QX$k6yNo zx8Hdu^>AdO|Dyv<$lHxx-AJGCxd0GfNadqDr!FLKfKEhLJ-}0xPUyxaK?Fh5L%SZ? z5>Mn7-bvqL6mySqHfx%2U=)l=&}Re@=FM}AR*@Yq$#@JUVCHm5T3gt|(sayN|2YVfhV@Um`h)tkl+(iTo`*z<} zM_|YTku}TECLs3g5v_#exjd#vQny#t2s^{4{F@+61CsKd3bCEPJ@eP zR6N_~@e=&{9>BtTc$cwTi=x^2WOMx5hkN5MzWbT+n_v3e7@N}c@t)TY#`*fQarU@3 z&hNL!<>U6aS=k=9D^Zw_acc6cDSu5or#@y$H=LsOM($}c*at~|az7YBQe4cssX8i7}566G@>t7vTj`RKPhbQCX!15|Q zgf9yxsWIT3Pf$9*ny%*4A7-4_9iM(f79aXqM zaBv-?kxC~FIkDTE&F&Oor#HFZS@1cR6YG|#_>RfzqTk9*LgwCVr#)6)_9TCtg0gf@ zFszfO=(Y9p^Re@-kH=>C-foG_JchQ_MJY{JHo^d2$L=UKycqDf1@mO9+4cx^f){oXNe*8-1&tmkL>(AKCj zF~+r{r5jzUU&^?{YJf|LDskvfysONrL1hNNLsXQ!!ny^ z?HKNh^HAp`hv%)9>dSN@djcGF7v}aMMlaDn8)mqW{pp)ebJRFS7pb=nP4F@uE3eqy zYTG6ncKKxJLr2kzdkIK)Oe79bXCOB-mtA8Yd~_%!c(~wr#J2K_=%M4&vl##5(Its_ zf~{zqsoPsX(I3xnTFg(OrzwDwI7sXy7&rx1iR~Tkmp+|*7=8Tw=|THogPz^JgGeDB zXXoPB{qdjqv0r4=O3d^Q@gT4UIY)(Vo>9 zWFvOhT^h#^QtI&qAsB~H)@#l0x2;h$W*lbK@%9X|(oPuCm{*+pmg?X+Cy9ECu8N>Wb}R3ISU?brgL!}&vuakVca|BmTvGq zIVqI4G5O9${ZzmLcT?|ka~pWz9O|o1dCoFFO3X$N^?Xy#Fhq*B?{l-HLGkI|kkI&h)#H4?5+Oq4C{k9i&{#|MP&moc)@&sYR;Z?0?tqV?1GSmQz00dOnG3t3RnXXD1b=?^X^xKBnE&|6p}N&ih<*ogt5y zPp2ma;4bBEbN?>qP0HUTLR^W$T8V?V_Swkh7oyN!gy-K0pFfKuyA#T;$H=bUj~9EJ zB>A zBy|delwFUUtp`>{e&g#Ojgf=LlH8x2?cMkLHGZ9C!M_(qhk^U0jXJt7$W)T&qAij%svD>K2?h7%+xz zZWz;sa!#EmxZ2;_N!^q0I{ce{SOb5aewb1IxzE1S>_X=NEpXb&fH`eULl;ZAn{&l+ zkj2%ud-k122Rc$0=%l*h^=uDrTU(WPC1FWCy z9KHYg^-*MXW4+Hy7;L5YjmamM1!^5<3lY4Ikzhj)A8q2>*Un9q% zf1QxGWbIA6|KmRj7{);>koq)LIrE$#x!y=UOvaEFi;$i#jh=EnK-U0p5YklBj$2{I9mYLGW+;g%uUgwP zr4#{;XNzGm^@Q1k4+6e2qvYVN6TE`#GJXljIhjsF87wfE80b0mJK#K3v=7H8mxZfi zC&9M~ARQA*d+-aV(%R-E@V+sfu{BJpt&Y*>o^iL1g~z~x<=NByzD%ZbE5Z;Zah`6h#W%c819`q5oV#n7229?GdB>KsraW+ z%J6GE^x{JLD8?uEmM|-04|J?An&?=Nb)bux`3ye8Ypmb*htPy^ zzr(wDD+b;g{CjJKea~mVr*;kjgUdO1v$9dAA<{D~1%A$4A%e>u{G95B0NET@|zkm4i`{Uc+y&m5_x(L5U@vOvQ3m@zlUJGw`SS~m~zsg5` zFAab34EsLexu34gYrP(RssjMeIVshC!Qc5 zk$iM)elrMfKQk`8U){;@2pDR_gKP6^$Ww$RbYSpgN2k&4ij&}ozUih_F+?&zpl)_H zhlsDbIW&DMjspEpkJ(CGYFW>ap#4-I2FUlB>VBq^ExzLjHqQ7Uh)ebJ{%@^s1-7&o_ zj_p8SPMC?)hJ6{mZsc}8GZvf#cBN_bM9lPB?MCq1=L3Uw_1SW@-QA6`vv0E;TQJX+ zM|7Tk@9yrm8+5Mc;ht`0$LuX4Lk#Xqj#ivjj*2-?8|mYF@0e7#Klb+b!^<;jhkN7e zc-zur!-Yq$PsZNfesraM4>rmJg*P;(YdtgdwmDumb7kW3R>! zM0b^@Tv$2a@(ifCJ%uGPu%S9eWRx=4**n2Ox~n z4I#qd+nX~&*@2KeGwTU+gpnz;dP3lFFW|{S$V`C25JcZ{l(IZObynF$%za{@dRujF zqz^^O#%*FY(jR!#5AQ-hKbHWG;R&-fQVy{Q;qriY*$lKHv~Xq89M2Dd?}7t(o!TjTBLQaUj{BArE+1C1LwrT()RT&GDVo%K)n)6w!kuF-bVW^+@CsVE!-QHpDC zZ#OvM70-<`aE)?x4)eE{DC8hc2pkd+tt{xP@?ad1lNqSCdL2J#&ETuF7sZBz9%IJw~-KytWmgNnPm)$#SP z;d|k~@4LT_p$+e*%-nu8pLwv$%JQ89(C<~(F{ZuFU^CuPt|@!R$Tqsp*?T$DSdF?f zbue6)Qt{=~oV*4U0wSWUeQUH40eme3ckM$1J(CK`Cy82|LE$K&JB zZznJvJb7Fg`56_4V9-)p-#^|zd^)b<0CEVs9cknUf4#Ax2U!pAc(!Bg1T>0N8{}Af z+L5ByCgbknH2(AVqjSJH*)5sBMgl`kZs=?MHmx#e(v43qTF{nM@1^W>E_@Z=wxx1;?Iw% zOT4x5CS8x*L?7B}+8Y-Q!FCOG0tQYrBd$EXrfnywB$r)~(o6EO|A>9UDGh&`8c)M^49y&*4fzJk4ZHtbd5w?R)V^$q?By-l*0*`036bfa7zDrgQe!- z-bx>tN0Yck{#FioMkn+gjeMHHZW~+ojJsR527iqugV&`K^XdEc^PE*&8ag|_j697v zaeJMg1V0yNw*9?rFlJI7Vl(6;GJAA%7Ja`=eb70b9KgmIdi~r2e z|I9A}Ea-1!SD~etVt65OGm4{z!8HGrfpE-3IR@4ey9}1=#-|~e;8;$9Ks&p0xds4i zbJ(Q`>ySE$B=v+z+KC*1!d4@6PW&tAecwngqlS}D%rK$0E$m#PHk_aF zf%rnqZMZ%~!pC74N@tFtQ!s%sW#8mGWB#V=^8L=|eVnWC#a(|h@En49*1*@OGxRw* z0StU-%kVeq56;XjFup&d^~g=iacX1?x@sUYHe$dZ8N7J-=WAQzqZsqA1Z}@|^*H|P zv)l2v&)#a1@DHAi_g_DZAN_%i@gqO9-{Ax@$v8Yb#5tuFf=db)4{oH- zcc)`~`J>bE>S1;4hh7I^?d|pObDmw!qX=UAT%Ci+3w+WC^B(*zzeb*e7rYr|F&-m7 zCYwD3k7VNUz0Zt~x1NlD@O*vz%AfwJ@&EqPi}6eE9gLTsd23vTpRS_F=Q7DW6Zr6? z?>2x@Uk!VxWatimRyL71*Spt$KAoj|m0zyqhzW|!E)09<16B_jmtS(y@;Vms*{iL~ zf$!!$vQk-1c}_yz!swoaC1WExZZkUP`L{n9Yu|m_#{0nuv}r5Urcb!5Nqe+A-;3JpL~7Nw3Q?%ASJXtZ-3a z_j}JDhyPYR;f)T3roCmb<2hLpbEuTv=!zyYzoUPo*F%qRRP}{`P5nnFa}r#gcy&g; z!6ATvI6Bbq0w+vLTf3j)QWtVgn}g5tJIAEKOmAP(XSpFm=)D@Ym37eN+U#560;5-_ zp2rY#R@BShfU$v2+FqvT(N8})BV*gBiA3D)4c z=!t&NZXLxsYjN`|_ACQ$Rjcyqr`<#nvr-PslTi{Nn=nqmU0!!KptHd<7L*mB>`c^P}QV| z^K(Rsii{ALd@)nqc|i0NSf;U$-~^T`Wd@#5r&5ezQif{`&_)!N%~|%W4UeJfk#ZPn zdet-CUUAU!z|K|bv z94@3{?JZBV>5&2~x4w1V z?-cjC$u}u4T7TnA-P7<3Sku4o3hd5LhTkd09;bz4n)5#L7Q~E4Y0${WMb4LTB7Qdw z{G-b^N2N!h&5+TOv+}zn4o1w|oqZI4y_!B?- zH2i8pZH!ECKo;;q)MtJ}X8>He8+$p=PVdIojxNT<%EmYdY+EAvC`MT? zKhACT5qjN;&|*yBo8f6x8CaJ_Lj4hlUZaOL=yW&wlE6`>XGM=MOBA-zRC`D)6SUc*X=jeWp-EJK4IaJEng%k6<2$#D;4|eD>IM6mbkjm3;lyoi#S~v-G zkAq%vkA7z0gNEXbPHjdK?(@7uUHFi$mW}23a8@QgQ+Mj6GrO5qo~usIb$CCI4l(4n zL*JyZndJCI(B)-0aKrA%Xrh&;rc)coD`gEkI%<#7F8I0)Y}zpV=^+Mv?#VLm%W!ta zJ-d^Rb%J9G(vc0k5`d!V=JxJ5Ok{p`I*qON%z2obds)qJ9X@^>U--VyjkjJ{@)w(v zI<3!#RKcx%^VP}mv8CWRXU27zP>3Ue&ixcjWa$;q%c%^d~d zny41S0FTjk<0Wlx%qUdkycFJ4o-+9!WTl3Js}2loNdyDvUi%PyFxD%L-skc(&uVun zzLhd4IH!^KN7Y{dk>-k8nOsG$1PAwTsN*R~cNyHI|H;zv(BUrgKo<^A)$2v`!F7b%5VfcBMHSTGPqJt~+#0UB# zvvc;aH$J#~dt8g&89Yu@+BI$%849+ik*C&Ss!G?FoK-5pV`;U#xA^Qg|Egn@fstG% zc=`6#kpEXN?#Hj4-i)u@K8_=vaqi1-d^+JOe@sGEUJN#R!aw-ohl4WtPsE?k+?+e{ z#YxEbhd2^yo#{qS0Ea;ParkomRRs9`LJEC*$wDIvfAh(dqc5le6*b z;hS$qwoW<5iR#xxDxuZf%p@l{isDumix3-^*yF{5Lh1pq? zax#q8sK1D1*rnykV0fH-1y+9(hsw0!@&Ko0az$XuJ)BZ$%=c)q^0RWv6z=jsE~Ku0 zmY`F=v8z??~W0=8I~g7yOswtu&LS* zsZE?X)DG$9&%Ag#o<@clEApv7lsa8XH$RVDdD&K~YPJtv`qHN8a?Y|F)0oZly>u^? z18Oz9*B*h&rqYt-qn z>*ykrWP`Hy4Ls;oqn;Dq7^g<|!4+EpZtb|fbU@%A8JdYpSwuKY8{OX^hIr`|$Yg*O zC3fT3PUqmJeO0GykyJcg=o%PIoMs$fee$YwvB69YW}d4PBpaFb(3T@WtC_VBFLsmu0Xyh`(C4Hg@!P*IOcVgg0 z@KzCE9?HLwVBlfcv`<)wEhh-_I?fTLT7!|3(*k#sX!AQ~rp%mcGishuhq(ikD1qc@ zdm|i=%_~_Mi9A}5k+9c!av=wNV0H&)1OtJDGt9uFB#ayejAb4FvqVC!FNYWqed2-v zeKSta!%Sy4<1EqlcItj+yeEBknB0e<3`gw6v!?*y*Qa{)!Pp>rnfY(~M8efnTVO6@ zhwg{_2Vuy)3jXQYc_P0kl<*OL&=O~*o_X50-6Dk!_vTYbl7D>nV!Xb8I&K@yrvFV( zPd;3z;T|>N`BCHU&=3FS<@SGmncu-^*`osazc;4C5E`W1DjenW;~1N-MIru!oWB}R z{2!iOjel}+Gkz=4^KZpj`Q0e5uSDR#9;f#Go6GTGaP%@E`cZiAG_<)$9T&mZMLOmC zd1!NO{@W4f}&n41h0+Ju)(jOsdPJXr%DKEyNO5iewt0pozpa;8U@(P`JT_|7)-gPqqL`<{0usZo&4p%SUHwyc0YSh-Fe0mJ860G;eM3*JKy}y zSbKe1$DA|7u#gpcm+_`&^;v!^ej2N*g-r7LmnWz&os!-IasAv`^=(KGS4vXDPUnf@T-oktTLORv$H0jYYh z&Uj>3KxWN-BgeD>M|7}VFzF`7t;Rb~)-Wy7{mbk?@fG>=JZHZ~JlE;6Jk;-Wr7`z8 zXCQh#`W=p4ukL1Z0uy7)QRPg4jqVie(MxY`6S>FW&ZTnOu^oa;bG5A4k;#~8fBn2T z7g=A_=IrlzqnzyUc{9-T6B{Qx3V+H2MwYulOnzH(2u}jz2C|Vqfgnd>4nebzsENZNSk-FHhDb1MvOb|`tzlDO2By74Akf^H1GzIFH{ zxT1sU`IY~}|K{Hb-0^1f5as>GKE^2kDUjXCWEeO|WN_?n3qa#%qR2+%IizT%^4Fro z_v2lq=rX+GOQjBJ2L53kHX>0ZnQfi(k_1)^}80TOskteUL%n=yQVhp?PFNiSq zVK*U+h2h)5d{3o;Gm${PT5d5tU(8{)wDGMFDECd6@G98Dy`32N>Fok|M&*-1y3vdF zL%AxJ`$QG#A3|QX@95xJXW|QL^GRUeg0Pa3J;j=>EZ4;h?3}15)Mkra8NGJ?Xq+o zr{ZdS_x0s?eHw=;^!#kR*4VkQSv=my;3q(I{5`6ov~l3a<#&9PTdi{*y~1Sma2-B+gx^Ix9*zivha!}L z<0gH-iekNUUSC9TA~aHO;qh@7`7k++5c;H;9x1PxGNwrF`X;UC&0(72AcidYo_2D- zDd*g48ag=Cmlg~zIyMyr5yI41+bf}t9?L5-=UlGGXfxzbD7w%%{9uSmBOM+}T@3$| z$R;^+kJ2*1@elvt$@oA0Z+|4$$$FIi<#80@MY>~$xxi8$yB!yCXf9qJkB`1}GXBzE z`$im>^>LQIS|k1Y-g-9PeednDadVe~diJ={ ztzZn#%#CY;sp4H~o4ya!sjtCE>OVfYh(4P;2ik6xE;EPV;_A)#_><#2H=Pp)eGaqg z+>C=tA9C8fa#mE@c}ywK0S_k>M}8YJr1Rc-emLHGc39*3Ve|?zF>;J3IUvIv0R5p4 zwMPc%tlQ|mIu9wQAI-jopBYb)z0G6T5A-xwVP`SK4Do_DDS9~Q5O8$w>({RIgb9y43r1MEQXC`qCIC_y7$G4 zr=j1Q(Cd1G~F01;B2ZQ68=IAd&M?~J((1hi-SQic-qm<)vR zcS2k*TiI!rU=mc{82|6}HH5`IskaeDPH;2q$~%;YC)cOZO&@biMFFBPY3gf+p;0|U z64-N!+AW{Zhv`r-#!;5J*Gxp3P+q-^U_Iq9M&V4K^c#a>ym`;iMe5PFI4~5#Onrkj zc!#$SvM-Oo> zV#w|ioj%0Khr>&sSh5O}eg)jpz7+9azkKiOl2(rX&hwM1`J0vGh`^?z{^goJF8360 zq06sYYW|0&RRlT11nmRk+qLlelR|G&<|eXn6TBG#yoyq|O5NA#vy6oG^_Ib{tTWPy z#MgK8eK&HD$jT(t;KFM+MdqvN`^u9Tft*h^YeG5lyQ zwG9YxGO!b8!C`)D8}1pYe6$>WsEz6LN5@sK&LC)KcN%8*4f&ZAXWtFB$gm>VpFZ7d zcA)w;Jl?c*+Jnn!;PoXk75tdLG~e+Nn-sahbBtte)<$%~Vvc1X>-o8z`(`6+6He#; zEU^KxEeK7n@P{_h7oP~C?NBH1BoBsV-#UCY-hTH*YBe!#u5O|`;Z~on6I|G))?`As z5d1dd9r~f?v@KIl&0*s3W6H1rG9q;Dst!R!)5=9OF2;#H555)`Z zcwagQ**r;x-FO71L$GO}Fh@8n(l(8xY5xom&t7`ngbrzI;~`t|N~7FD;J_KeSOVu| z`KGuuU27(Y#>o55!YjHn!qV70`gK%})GS?5iD(mpRvJNXw_<6y+gqNFh|V_m}@ zcx$XuXx2XkADnU@gi70(Uq4-k6UmoDQC+i??5`gx5uUugz>85j-Lxfi zY&tp(=IJV2RKcHf4npKF<>#cy@M69_=#%GycP4!5qsZGLCEv0+yUnYIa?Wd! zr|=`A&3Mn<=2D%EOysaHd+BqL{qS5<)O{|6D%knwUS!1mFed5DiADV1FSAd}@8As6 z(<_|mI)w{(SJS^IsfPox7e0UI%kPiv@4gJKq-yCb6IoXqz3-w~_>c3xziavE3<6Tk z*bWUl)RjNR6QEDl@hu?e(-@w}roAriLXU^&AcjrEyWVlz=t(@i(AD%a*y&>WqMxC4 zH+R|GiJWdGAewu6kVQdHXXQ8jKbItP9A=g%`ZdqX8kN528N3EXL1B1;nC4mS%CZUw z0!COX@TOnzM28?jwSr!lzr0uOWYIW2TLr>iz5?R7XL zJWPKrQ|yA;xuMNn+R&aMYqrijlE_KyBF9KDWS$I%o8H`vOnUR*>DA>>jG-(}$I>npZ^1>tEjNjUNc*=B4)G~sXCludruh(l%wjB32e zGpU;(6!3QW5xIkIGR73H8s0FKG`$I0jv9`@?OAYR28L&M5xGlIAvh*0vsD*e?B%ozOnqaS)W_5%UT=juJpkXt}+eGsZot&&@D|`S50yL+ZlF z#RuU`*(r$mo;E8;VP1|0`XC~KnR^#7{&BE;2_DGDaj0O+u`!ea#NdjH~lSA zeJ@qMcfcou5APg9$NWBVx=dv=<_OQBtHIMB2A`tBm(r&-&c^?zk!P@L@I~7(_$&GB zwUl#8p~n*EoMl&H^kZeG5s$(5+#|;z-x>80qwZdRtJ4rr902!L8TRcM_05B^x}EPa zy6eI3x*@F4eO>dN8iI^GHGe=`8&_X&-~06#?JcPcp7wZeQzU|_i?-*IF~Upa|4nX%-=&7 zG@A118E0q?*Bq_|u_9gbOODDR8ad02leTplDlEB_5B%zGm$T(N<;eIO`;MUG%%_Mo z-=)o60~>=CfN)0ZEC06rJf9|}6y{I8`EyLCkpmKleAiz7KGh41t9Bx5X3g)NoR7DE z=WAo^D<7gBfzB@eF4K21ihedgYWj9WM2o>LY6J$(e z4bOB=5d_|l*k|AhOmm|2)PKgpd!!7Rqi>e;9lXI2w%g5o(kq9Dhh00)(J)C@HV`cI z+N~{x^BlT@$;|autgnI365Tm5P=v|Yg~!sb>Da-U6D`u`z|Nq9uR%)i2|l{ob4a7k zXV0H@E)SeZiO;NNzPJBiC)f1g9QW3a2H+O1I5YxH*R;7EhnLOeX(Md7@phWTQJg2nSw5TU0!supUh7;7JdGC&l0(ddAw)gCgWE&01|n*X!W4j; zadvJy!RsmsLTJo+da{Tay%T~+lTTx=z2G7^LKyXy;Zrxpm1?1Qx{*%4bn-^pflnSf z237YwI8$)AXeZZ8?BK{)jM<(?rx!8qxt`}Nb>a<&Bsu(+K`^V@WXLdi;GN@=!C~kQ z{{ba1aDK*Q#N4j}i7@j-(R@D&3XSd7barM7$v6>F0wzg8!4xFp^&HFjhAi!a^*j#T z$^Gs)dx&rc@y6P1HuFf}f-9<3S-AJTTR!$DI-*Jc6+iv(qxUBbm$duzdrs{%a?f|T z$Mb8Eg=Jc@^a)SqLSV>ee*2yq`R;QW#m8_jjJ-*4uB{kWkY1-mIRQ2DPBOqL$eHI> z;{b3JI1)=E-)O(H-*aw8$?hD)2iO~%G2ENap2a7KqZLE$Rh`W?M8H!~uEp`oz3u&! zOZjddMG?B6zHaaBk6-`BoJcEOT=3R@s9=YD!-5fvggD=LH ze)uzq+|S03Jl`4X=G>e`_RX;g57dw@6gZsYu^P|(V-U5^*r5y>2nafcjlmb!`k6Bn zS?CAXQ>VS%X;Sx7U#K-BcWvalbbFbSLmKVYNp-pA-R)1pR~!N5v3k^AV2aEv;8fxA zB{*%_wH$sI+5izYYk&03wZ2~VKv8Mo@8(9MvRtmk@8uw!S-Ppw!n z2}~46mv_jCo=8VNjpFe<{0By};^_(SRli5x=%n3)gYopO=i!N&T{J(#8%58Kk1Olo zf<`2?jWXlQ(CIh07bLB&0FOS*tlbQvYIJjL>MC{3xj5C~d03QdmsNozeQl)@Ob9+y z&7iR{)eIr9J>2OSbCc?P<&Un}3=db1At(pCg)VGffj8`$PgyTI03KvS*`mI5EZ1;@ zm10`6w~L9b=3AZxa|f^Hysh#C5+7FJY(v{ES5pY` zJJT?gipm%!OxOrN#D+v(8U?@6$r8IosC{s4%)le%%>@5tW(XHqHvOIYF$PaZ>H@K) z=AF{~2xhVY<|e!)I{){iaH2| z3uDl^o71rfgG#46ra_p(u#xDdjJZCI15E(7IRn+q`P*SfBZA$2Mv$jx%CL!s8Czp& zb8;X?r^Gz3{V8BkKj{}i-+t2{Ww$1b6@zWY(j+&1K}I$ zn4mk*zMsKWDAv#3OFOf@SrX$bIzQ(5r$;`c_gAN%po<@qSTPmae=y>~Fyt`p6jUyPOTDCHj} z3cKXw6oyYf9i~Dw#xw1gX9662JLlMOU<`3wM;GUkX7evkN0BV|diT2}CTV*x;5O(= z&l5QZAGsM6K98?wLhcd~Z9GSDn)OCRAE7X&B&1jKQ@SzcqHeg)e&33L(p5@51-x^PE0YL5Df?k&G{OC|R+3r9*AOTcfhjjj>#|YpAIK zS%6ZeDdaqB(p86Cg2(cAj$l4@23IMpOyO=HqLZRWt1EM9VzXf7q+jT!;1HkZH~n>G z5?W{9+Y)^4`>riB_dO$P+O#n}#GvAx^bMfPwi}RWEymPDZ2c3k(eVwaz>@L|py6T{ z$fZBu87;ZH>Z8DJ8JqXIFLiZ?XiG8ren#iLo_0AH()Q#XU#61Ex85^~oGhpLKlsze zJam$m+i)gsII$qnr}n`Rgo(J3A@s>Lv|=ab+vofaf9Fhf(c@Duu}L!{w30~tX|oi9 z;jK6eoC28_eKrAe|7qkt4vwMhy9djuFUNP^|L%DC^5gbR-E2*JS$;5t8{Ec*&EQot zfx~hwopP9Ay)-nKNg)DTBajL^c+bG(oPumTeR~`nMB%KjgfJK5;_M`z`BadpIKVM9 zL0rnNZBi&x>GH1{Aq9b!`7(cLF&Ub% zuNNI$szXXm%3Z}%p^m0u38uDf7kL(^N&Or)84dNpq0<_9hxM3sF3^X7!BeVZsGmJM ztRk{`1bBIfaJN+GmK&;D@6LMq5}E-JeZ2OPf)I(wv|L=>jq``6(}#92xWvB^V3_c8 zJj9XsM_-eXi%f5&e)9+PGwm0@Dcjv6W6V2Cf75%XH^=AR+Z;dslb;)3``X82<<-Ub zqo02^)=f~lydEneM*J8)=+Irhn_$&-|B-(R$arlajv*(bq%DTTXWCpER7T!WSwp^@ z%1!9=bK$TpSp3t&U5_*qm+yvh`s949b$cdMU(}Q%7iQUMK;3tG$o2l8-;7M*a-Ysg z-=0L@Y~;Hg>fU7~JayyqaAXC+8VuoE5&1;an|fb;)L)}O`Nwx#!B-`M-?zCX>a zTeqs%NfafLCM{Z&Em5>(wXxtpax5!SELd>@2@#R*YwJ*U_Q1W6Wc#7;O&vvq* zb||zBG1~J9>7l*Iw_Q;y`<--~K1NTQ4+mb3j9`VMRef$XT>2P%heYWdWY$2X0-Y0~ ziqhto@^z6&I58xT%X&M=mu5mx5u6v*^H%Jz)JQ*sY^o?wVj#-jcb$fC?Eu=60G%OJ z5NgQ;;x7?q5{Ta%4aU&5rU1=G4kL6U85~(6PNBjil$?}1X0!uPHwmDuQ;wl>m=Vbc zg{{gEpr}#DB!O3Yf5Mjf^GwG+f~P7VSJRGu>KAy8QG4qep<6OG rH{nuGvleMr zJ?d87^mYkB@SMN?PG7V%6;N(!HHG1cuEWUtdwb&`3{%fhMS+DA(IJiq&+Yy5IFFE; z$=s{~WfQR`TvDgJV3`roxiKjH?T|$9r$zm;EKWpVCMX(kPP$1^XgIf`G`1C{4{lCQ zPOCJ$S`i+=K`6{jhcgU#ad|dwI~3xrclYD$Zfm@IJQ&xB-idiQq((j#rcBON;{E!L zc8%2YeMVzBa}TWQ^I4>MesR`w235Z6bMc;z`mM0vaLHg&5}eH%_|QozuM^lB?J|T6 zb_6irn~GmX@Mg9%nE4Lw&V-K{W<`{H_ zeNLXseNIAPUhXW($QRZq^&0im(^O=HeTk0}GbaTwzsCR_?vMZU51z(vee->XtvX3E zbyr#Bc{o`y*Fc0HBX9DS_d)FJt&d;$g*W4)uN{ux{7=3;Rz5x%|H?=EV=WQRGb116 zA_`*Er%{PqF0WO|S*x2fiX1QF*UA4#5O{OWuk%wGnVd*LbkS6Hh(0Mi&)G}TLY~{> z9;q5tBO?nV?)R*A@o4^Y=5V>^mjz~9ZKq`-Cw__C1IT)Kd@Ve@eR(lH{N4}7N8kGH zSbzI&++Hqr)=WM4eJWl2m8S!M@ok&f;C~T*d6~6N>-ME5Q;a_RGX`WxnNAu8Mg*QN zGxrAW;D?cu9TW+gIHp|JZHJaOfoC~Ji?=(uEIPXmLuDa6k%K;!d8Jckw#|;Ar>QFe zTKX~P6h$Hv)Epe`_d4CixY4~LIbix>QX1W%JoxMcN{8e} zZH2i=f4TSV`Z{&&^qg z!&aNYz8P*X-)Qc0FSx~_GO@~8dyk0vWRI54VM_ToulL7SKl)(2dUF&$TI)O~&)fT6 z0H<%wL`283A9zEZ(FJQ|4ADDeEyt7rgAfY3j!8wSW^Q^lYCV@2_A2#s&>CA4xkSn$ zF3*~HqvKNKcI^qwiR2JwCBH>?YeiruIfS5?{W?NpJs=Y=DAMgAj%WX zCwk01JoICaIYv0&Yuuxty~BKWdyf00$1RJoC`io2=#?qM*ce>jwc#vrN+1M?aHKtg zhq9h$NE=zF-Q`R7d7_*m6D=4KiVMT3t100ym~s>VVPinSDi;X6rxZ1#KN zsqp7Iul8Wy7!$8_3X$x{gd1$n<22Fx<#oL9!nmGD!B|;5WDLQ$-%Tapy#p8=?%;*f zLU6#^wW=3q6%^}s3Cd=ah|CGbLQhe?<$vZ$n8Zjx?5^eab0P{uGE!-H!Zn7dTe>Ea zyja=qTjOfD(-SJlmaWtr{JEZ@Gfht_^BWE3vFJH|P5BbAXD*kX>Tm{+&fKWN$h|OY zzE6XX-@pXNjGcF5i;BrZ`Kbml3Wf2jac8(0$9yKLe-Y!|G5HwqRpWE;*{D39{hnun zJJ)y|Enu-iT3M3mzF)4j2BXq~V}xcxT{Ui$b68#n&evlQ-M% z3dY3Ia{mAZ;Ob#*9AA--@Lu4p(hEP;0WNHLgn7>~3` z7q$%LpFi!*rNem!v!_=a1<&KtZZTXwhE8Q-bb{ZxiHlv<1OyCxoC>YK&59YkPO>GQf)fj1H^}<}BU=i;T}4>SceX!IaWBRG@ur)X9j` z;vNdytG%<@W1HG$%EB33dXI@ddGS^CcJz?dVr&jQr>$n5LTmV+`rU9v=!I_S`_vm8 zjo7=}tB?gPV~f%+aFpF2^)UK0I7cfT_WT^OO8*1S_BID%_G!Lz-di`VP}-~Ac)SaJ zAFOJ+iOx>_oZ`TApWnyFUqsjLv|s4VexortB}w>$i%vLZR2qX2 zuY@7BalWzUo2$7meH%$fqfsOugdphQ$oBantgrIDbIxu5X;Q5Yq5!@Dk6`b~pL1D<(zMKSM z4Ju_b6+NOZN@Cue2xsPE*LbHL3{DuN8q;tYn1M6vlrgZz@zlQS7;fQ_cXD9r@*EEg zVcM@;tASU?=1z?y+;&o17`QXtQy$Yho8F>q%4pWna+IQw(|xp3Cp<|_X{$@u0#6Nd z1W6hl6ZEx|DO&-K(lS}a_$+0nPw+2N79ofVIRagGs_!x{m?<=oK9j0H@Un#9(&`C1 zgRtNU8Js#F!Gj4wghS2pC9lBH~5YaFK5s<-M#n@KGCsBYikiy<6+X)X5BYE7K1&{@V4lk0nhbL zHi{x%+i-10?TzxkbOvHfJ=3xGT+4TnKIh>@lrP03-Toqle(1lGC_X2L#4Px9qW#zR*>9cJ=fIx+Q936V_i_60Okkyi^9+1cVQGu}1W(=3 zF>-1SiZ%C?)*t)1H;E>^yZU+jC;zwK8=wEDe>i^rMVz{;^9XJDCrVY+P9fn%XP7Mh zYm~Cz9MABdrDL3NjwU^Xzo+Dda`=03&MWg=9+Tor`p*N;&d=17b9qk{HR$C{VK8!lrA&@8+bUL{ZFx8O_ow3;X= zOIdrAC!&qq(J|;Kb4d>v-lJa{yhNY$Uf=>l3Wb;Al*gc;DH`K@^2Pzj3nr6w7JT?a zN|~OaqbYYhqMuOI@#_dC(dHO5aLElg_kAf_bjNbf1(bttG^Bf7Ba@tY&f(?xd3i`i zQM+`ciQLsCsmD5Tkv9IB!w_?5VQXivDR6j$e*JgQ&-yQVrsuur>M}-p4w=l1D&ueH z(6EC(p@n{NLK~>19{s=%^cn}s%X-YJM@Kx<7og?T|2_ae!KX|I?n4Jo)~navT|WcD zNlWvUwyTrD1fI`6dsk=OEPSsMH>632dbfv2-LrTGec5LG;ty@QXWf+VR2G>T%;Q~hYo8R`jz?~@Q zsmP}BUg>Uon3r<^p(as@UJ$!cZ2(534l|@J4n|YEVc?~~zz`ZjbRH>Z#YBqpbA}B8 z(q{SzJ_OM=0odX21SjeyxC8gIV8^)Z_hp)H2vmv-&&oktY1M-x9HCj)fX67Pz$aV7> zWCFBv_qZ{B^S4gM`NdtH-3Uw*PEJ6V9;FW7DaEx!YG~)(6kA&d?qwvh~)VTRVqLG!z;A(hvBQm`k zIePi-?RfQj-x_;_$vmhEY;Mlg8e04#$gaMMw2 z1N>3ZgPm6_{JxOy5dllFM5>2|)=Z3HJ$H`fRg3~ z=Egm6Pg3ZAx~qY3@e!P2l%GMi? zZGkiVHB%%L<^WZ=7lIRn2@RWI9p6w7p1kR3g0XOYnbco`y=HX}; z(fC@FI3)pzl!^$a<3PDjU{6B?L{h($x4N2=&OP-l(`23CkpqKu(q8IB0|Z5Ag4zue z^gEqgP99o56Wtr9VnzvNk|-d?0Aqc(aZBJ|)Q0=^mLZnka(AOh$4?KoTn zrCxYo0H^d0I27>7OVoec;!5!@9YI@CzC1k4Gb`g=A{x6cVQS1b6-Sg+Xn_~dhIflj zdV^eEqk8(VJ;ItXMA#=@ZX%+m7!GZ_2A)*eXZqaYyPN1V*GcE}J<6rgn`a;^pNf9@ zHFcW2_tbRaEFWaI3;}IsA@EKIH2fQSF>ahuW8K!3i=sKcBK6K%4{rNCd^IP{xfa^w zl)kr?-V!v%e=Q24o5;8pgWnBmC|31EdA84i6Amfid=1V+pZ)D_GPIsE#0xAjESqUJ zyx(DsIK<>bAIMf;r<=a$ZvZL2^sSrt#2L`vAHKcGeF`-E5WeP2Qe>_*0!kg7$CCQi z)}F#wapLo=wb(!R$6t*PKG>-- z>$1A2#x~a&#L8I?#^7?Hlttv6=w`>3_ziMJ=;WoD)PF`#3NGphMTc@$$m@Y-8L~>FMJ_&(Kp<_<@stWt2JrF8xGjaOgkTM02=fC>_hdjlP9x@06f} z7-2@ywVfJ$!H57UjsqN*mUMCW+wFUmju@>vC17;k zS@a6r%36X<*5%~=aaXV50dglRLC@Op6TF-u_iURR3p3uoeOK7$`{y}2?3-b5RU{QJuIrj2;FNwNWMsUo|uUvh@w2TN-?1rV;(H` z8c)b9dIW2lB|uS(A|;cpMC;aTGqyY);}G+hhL*?z7_vk(kS}$D@e!cQbaEBbn>D`| zC2ZmuVVNnF#pDuiVJtoI)=HkjNv38k5x%4uWkCL)q#d}l~D_SOi90p#xm%?n1tte?ugH3HrK6DUA(LM)-XNq33 z5GL1QB=GF*CDKoQj6Ox_Wi;^O8QyPGi22h$cow<$mA~mA{DKX`YD{E#^{_H7AEFpz zaC;UGeIx?;UfSo#Z`Uhlp6w^3^=40sMp>uJo}KqRp8M)(J+sV=aC@_7e+T|(i}WLg zBmH6wO$1$wk~5JF{`wjG_geU(vvLU&p?2bvl(X@Sl)1I#D~x}PdMa34n+`#cF%7)u zf{#USC!ggxS%Vi{`4Jj$VxzDb*^M;{EO0tg)|u`%S`D&O!8G{u_w0LWtPGJqZw33q z&sN9Vv#CH@PtF;=L~a@W)|*=3BFfZcAMG+eU-|0G@l!v!ugN@@C^TiyzPK1?|LA+; z@OQpD4nO^3tjF;WBUKl`dTI+O=mZvLo=tb>dUn0`UnF~>OnovsCQdE)`JeY@dCyH6 z_eZ}wMf&smqV7Ht`N^|&E?sjo&$WKCNCu&8Y?p&$J22KRKM?}p|Wzs=y7 zJkQ}o&K~T5aneqfi~)7Y92D))J#^m1z~j0M0yyY$(W`?dG4$GO z9QeZ%-IFB0`2=RE(>LUmLG#iv0TI0GkE4MF+6LkNQf2$ztZ4Kt!)ix7nVzMw_1P5~ z;rXe>@z&fwg^m#!Hxs2in(0$?b!|wRpzHAu+!>cPAIuo`T6nyMo@{ao(h2+IvuoR*JZL8C>`aD07ItXkqyKj#mQZD=zi>r)kh{X(rwrIjKCG| zev7y}{7t6FVEUl{>X`56m^u{}42M~PAWPvsIPvN(8U+UUWS902j&dD8&Ec2Mk%_JA zP@}q2r)LEwPmeM3y9ZuMvKfF90j2{yLQ0GxK)@*|(hPHqp<^*&ERh(8K)D&wP!fTp z3O7P#592U>F9jfzH$={u2x8s9pz4N%(@xThhn8o{K;^rjBwizALz4O#clR-<30e%N zednj6s!eIL^%(Z`cs=PjqNdXX1`_HSN0CWqNITcyr@xqz=gYfPP85i-Ey>U~%ily; z<`Pi+n2<3A6VDVAnx7z&ZXZ(zcseO0Og6Iwym{l|x9P)FR9<${$SHUyKxS&Ty$Ei* zlsX9OL`WErl1=?)SQdY9fa!C4J+PTU>IE>@*N@{Q{o@H+m%EEbQ6g2=dA`z;`gY^( zHpLi04ykIm>z$^*Z(hBrS84+w%*R;*D<^~_KxjIwlixOFKsHRc6etrv+;_T17~s1+ zmG8u{=X z18&7e9fTNEZQ=9MIv~yTf1bs6fwyuIndlapf(V9%lmJ`rEOs;2mDdsKH@hk`qR0-->&EpH9m1 zjY;PwD56hruoawOFxTNvx?TT7Aoi8Vqz_M0It2TvG_};Ra|$AyQDcQ2^AKV}K|qzG zj4smV#RcPy0o6ee;XL;#n0&7=hu*$}gP>f4^XXvRJV)8351h16J7*5a%^|ZwWxL0) zh3dWUx9h&+Iqf=&;D6?KuPu%rf13wbCf`mR&gXpxfBM29N*~P|X(TU2&d?{KH;!FH zpZ?Eu{`_p6&h(ujtAP&hsh2b28Drs;Kp3mam$vK&f4Yh zAvn1U0&dU~pX7P`Gy9Y_+M{5>e}Sb}^v(CcWPknj%@^Y`tadxif9B|vaqGfH`irD+$7XdLrDJ;+{%(W+R;4R`p7x{18MY!i=&~FCsJAye*l8xu!J4r;Jmt zh_z$s_uhPzIu_n4qi#8xIRluqUZN;1-x>E#C=2}FHB7i1*1T%eU%kdUBGaJ5!E}PA zF*Gs95=K2{+>Of{d+%l;xt1ao9BbA^;nLPuH;LX8aS%QV$LTw!wb@qq{th2cJ68g97lHMtx$qI3|Si-P@BsM*tjPnt}~N!4^g6m6hON z+#iSKJ!d!1}0~=;m1y;Gv(A1 zoM@*Ggx`5yN^vvMTgSztd@OmJIBFn?8^e?zJ=f zn|~?OC_L~e=kjBsknesRWAdE-)PPKQQ(NW9NiNA*D4IS7UgrY-qYt*ntJibYhx$27 zU{${%tku)YvH!b&I9`1xM*cj}q>Ndj5=PbD?N!h@4N>7u&oVZ6$(e8AJp?A>rO-l= zEvTQ1B7d2I)i8jw6n8%$&+-g@iblP?2;aSUj1h}rkJ8C`GhReSx1xjIyn8>s@!cPc zpZvXVjW^%={&@Q2Z8HyKopVKD!{>JBa_l;b!dA zTQn8F^ja|5?4VSyXkF@^oL`$L)_^AZhJL%gY&+&=Fd}v*RgXYL_Vdg(sy8NM2kFyC z?NR37(--|!?0AdtGNZZhd^PHvt%%Ee$ZgXt6A_pG`P z_N|vExAujY0Z?HKtg@R8q5_+PtkVM=gvmRRqrhxP$DAn7u|sFaC*$_&qT}$K6Am4i zZ$fLq=icsK%3Zd+Ay^p#{DBx?F^|EJ0lv^bnh>7obb5Bx`pv5=W9-wILYnad6{1`x zTIK4&;Xwgr9HdP|$_O$m40{H6NZr}qODE!4qya>+OOSB|#O-)d6eX{cfx1b%7iZEW zh&R*l+GR|n9SArwstZ%q3&R;@gsI>O2`8gz!oa_=>ATCKkEzHij0>SLCT?>86U{Ji z)19ePOLJYgp$dXEkvNrdj1;dgIFg2ua}kO7Q-|ez$6*|k zSCEGzW&FP0Y48qx>b5fy2Zu0YD2EjhzB}REp4J?dG+lhajR>BAapLUpeD38n=|A}J z2|j{zOie+7!$cg4Ttrqn1o#B}<2up%)njlK;R;sLRA>t5`Cstg|3f3^q@V8{d(X`Z zc?#f>xdz^4QW+;PMeG#i>cUj89sND|XV(4WlYt`DZUzUTjY1R^1gyk4?H=_p#A|UP zH+SN7N0A$|_XajjfT4^hGztB?Yh%ia^t}yirtb6I8Ch5w{AC$Rr<3@$rUTLCa*-Pw z%@8!%gnmw;3vWR)vajvnEIR4CC!L zHgjzwb*<;zh%Bv#4=rumOIt^mH{+{s&&JPu{Mqu~9JZC3>^r6_~nuCG8m_1vn00eV=h%Js`N$L1uPMtA3!Kb^hpU-Y^7PEw(S_^) z-ZyjJ{4H+49Nv-*K+vVEr^k`=>(pt;5M5;>;mjx=TSP{|Y%UMoD}%DLw>u7A+U9jV zaHP$^e;Z!YCx*TbMR0)zoC-FHgUxxn5=h~p)HwrA^G(c)5Xhuo3!3R?M5MAz*XtJ= zT%6e)Xv!P?X(Hq0r7eEFY%F}zwgh+6#<5S*_V~*NLbBY~QH;m$-UVL$PaA|a__L&s z4Y5416WoJ2LFkh?A?TI{=*MC^?JxVf+O!PD3$UV;Y1k=^c!lu{0Ny>w!$6(cemw{K zDGE%d@@Z^&AVFS`W^kVo^x}jq>7^(z%FFD=xjqzAnaOFFyy;x%3<5W8666E6GZJ&H z?~#^@2$Jg|#Zq8&57jeDeW7DknZA2RccLsXetW(i; zCv(-?OaBN3W$amXIWWF*oFn@HxQ^K|Cy&>QZ{|3)mlGsP4h@_|+^>y`m6zjo#LJ}sV&H?FJkze{2sT3lIKvAfc&zC@!#=NT0~f-4TddLz zvU4SWdA{}Kb7?Hp>lx3^0}hp*^`eX4z2h_Z@Y!fagv}5)iVrQK7&c<)rR=SjU(ffA zd>T_*4UTLvEA?)Md^1U@qtCkEGj%ZPkqb-Z8o5*4sY9L0bp~>9EB#qTn7-9Xh@2I! z&=2s1HN2qje(N5*;ExolLyi7ZiO)V+Iq}Ky^Z4RC^m#GIOey2e)yOk=l`&66C{rA{ zuFlQqhj*6`%$BxiTI&3ta> zb31iz#|Upnv2VxVY=;+jQ|=&9`v>P|<0n4;c>LUVzBhj1M?V@rb9yp%qT5y;qHpf4 zQJ+f(IjJ~9pGD!`4=$B%zJZ9G!K5ELxivhG|2VY)aVpp1YNmE&u;{Fv-MM^928(>u zXf60rY8HJBpUD)$A%!k0gx<2LqE5QR#9Gd_@8HBIJXmroqN78=Mu#w{^c@4ic@t$% z_g}|S+~v;`60&i>u+0}YN4}q|3G5KOc zWRL6g1#WhPUY_4MQsLjw{KcR9<)8nJ&|%oid^riMj2WCDc9}UI1A^E>7Bc7B%v2I) zvn~)qIwR3{B+`*6lvCp?07)G>)TPN$$dj|c&4g_MR<#Hj1 z>xCY(v{F zLCa?iv9`j4Rjv!;-g-E}IT>*@Y*t`1e8FK5xks6Bv_3ucaT-(jD6x;(6X zp75qb$Kvr|?8d}T4xezq*(OgqP?;!FpU3I>)%eptyFPyDkL@+8@($}NiL{4jrz1s1 z@)`P3{`l=GhVnoD_VM`co^nssA^J@LZ-!qx847H9W+%M2b$mKDVoIq+OkjwDqi`S?=`Hesc)U(PdhIH(`N43h(>h2 z@szs4t1ljIQcny;_h-JYtZA@kI(*7@bYkFOG&uPe2{OR$ zPzRk$_X--zzvQ&KcNwWOgyso=e`ZFt|0a=mK(D?MU@9-*Gc;hR5YNkYICQsJ+{h1m z!AVdzT*;2Wv3U%1tvN_XM=yIa^km8K(^Uc^aGFoTVItQ8S~|4vP>DX^m>FWF{|ZlJ z0&LxmJ9_u!n>TStq}8tr)!c=0F7@MBHLadH8d$<(99rMOAOowe{2X7rJsJCX*08fI zF1x{L(ErM3;ItwF4Q1jub)8#~`*?1d<$Uqy{^T$Gh6q!H2r+pSzEasVI^GS8Afi+T zVqLm11ob%PtcRE%n3cjYd5+PZYvt3q0L+_cY9iDECM-6Mr^8PL_=Dx^;hKKDz@Jr3}z)im3;PgnHn4R!~s}o~V zPrcC)h5%JD1}3HkObCw}bH+Z8R9~hrh`M?67*Y!V>gIkNUp|h@L`H@HULBa=vDN!! z%?Out5U={>(Lw7ZB$UvV+jUHFzvHmxB6u85E@cw6cqsm zhB5@O>2LB8o|&80EY29}8u0K!m%Y_+hi4djGL&Z+cj;TkL>)`x&M-4Flr+9ursF4E z^Mo0&=_@oj@mZlReCkMjc?M3K84LeX5?8m+CTL=KK=M zMMU9kZNK^1#rS``rAVV-K0E7pJK1iQAdQkydoF;=@DtgU%3h(o;}lw3et32{)<5}d zy!g>4WA8Xd|KxORo+d(y;oT77MuN>fX%t?L=2w`Mt@I zMptc|pN`$*v$6hzPsYagKON&+-y27ty&WG!VQ+>f*V6|wO4bn3*z375cjM5J6};SW zVQ28>6u}?H2%F#27de@Klm0Q>80>^=Tiia4)2AK9e5dI zRznhcns`O;398Ju+1XFjAN+AB)LADo^t_jz4SX_>0s-~Uab$3C6zPl{3%-uXhUReq zf>YTCfsZJVyo28}Cf819C43Tk({r3Njv9Ti>kyFWey7-1#%^oMc}(Pwx; zIWoiC$0%zXwE|Z`?=0@p1IxKnR$Bt5*yVx25S)C%w0v}(Dv&aS~-`M^Pq_>);ubv4H z`Q$JZ&*1@efd#x)UF0Ker`^B&XMg24Fp*i`O9h4~h`MjLx~<)&(T$^8ZpCBJ38`cX zAT5+JYT)ub&(&D5w8mBp4DUsHr42If@oc)Gjj^Es>Uqk;8qs=mRTA+c8M|H!qo)~b z-iv-pCSOI3B3iFr+}U0m2QlzEMi2<2$z*fZCA?zDDO4tt**>>@3{oJzOqvrj=vI|c zh=>XFi=m!T9|4_&Fplpvtm@$m%(Uc)g0KP1bR-BahX`KU*Bpf*?PwE$q+P&E!DCbk zw2C%NQqL~m2}I{SC{%JoGoOUeyUWjzWHaV&Y9Lfm^-KaNz;Q;5weXsD{y^ z;I5;PE`zsN$8ocIINm<&je9fbiBtNu+$TN_muY=ZHJvrC8HTB+7E=tJ{2*BH0@i=l z_<1%LOADX*Z+_-q9e^AJA8lO=l?q4e$={Ei1Rs5<{K4?{V#-pqHTd~nqn)#pNqrB^ zIu@V)GwgF|o!=4Ce2UZ=a^rH7-)7W4msz4W8i*jLQ#oOseeIZj?k$HE>c-_O9wbk| z(<$(l#{&;IJJgYz<+Ui6%d5Nb%Re*5zy9Z5M#(L*XO2`Mm;HrCE9$19T$b)nkZbf3oYpv`{+pXPSCKDzg;PM*c0~QrRh+mw=bFI7%rcE*& zTu$9H*KBhf1W^nY_?PpJ7`<*BmHu-YY*Eg^vUkTr@i5cRbgyAZb#-M6{U6wX1owh7 z^Bn-rX>h#;F}?K6)X~vH35ZB_x?LO@?>>t|$k7Q1kw?oOcM6+pGSOu4Jp9yoP=c7$ zdUbsn++Vc~45042=wQo%JER(?hs?@!XrBS@5Ka0+CwkK-e(|0bLjZmEMCAS~G9iOX zAE>yK^-_mPdGJsY1a@_jH_PQXIJI8{9?tI<^9js^u6L2A>anH(1`uHyr?sKJx5Oc~ z$uufWi4vo!?GTp{(h&z;DP;ih5Dsum;kj#R`O~X$an92~{OXZ>E<8m{9|K$lAOsP* zE(;3cMDvly3P$@ESQ#atLxeXSAu8jo|z zn%>H>+(nNvQBTlQIKW2fms!IA(?M9Ru$riNZ?l*fPVnS%lm;(bdJdEGqA_)ZDRq0R zo;IxlKyw~#=SoCC@dihzum@fT{9zO{s5hCqFC!p~yK?YX-bo*7+|x&lKF5wIPw%{b z9v8uvu}!8&it8zn`+aaz!_m~QXmL(HDo^BT_e_AqA1RXG)7UOgR-Pl#GX*^7^nIDm zqjd6bwv)3_sCse{HCvaC=d3c%XJ?MP#8?H7oivtDc&ifD-{6Jg<7BkE=}io~p{n^i z_xh%`815)eOYC}WC-mHn2XDJsWA;17-BT{~W!&*toyycR+s*GN%f5;?r-?*eZaGD^ zCz!~RiJd`x@zOuf2hOJX!LBdl!9#G6htliu>DxGmkxep0E?cetxL#=+@2hJw_r87l zH2(J=-;VF!1c1lD_#7Dwk~xH%k(KSpf|=d;Oh7s-X~){-!~JT z?|kxpY^Tgx4Eu<&Up+ZVWPd&O9`D9}aHh@e&}N6yO}pd|uNvE}@viet_f0yRN7V1+ zAoAwG8+7A+5PW=b9zzsFKf51eOFl4Ko?%Gg)iJYYDF)A}3$YmA8qb{Q3OEY>S>%Qr z^UUSxS>!qnPh^MFjVAOE$DICBCw;;=Sndn{%1q$UFR%+7%(^GHVJXXoeR^yGb_+Brc{pKI^}M}suM z8(9pDqHqSwZ%4BYp$$C%1<*%dcizK}vGo|#;F^AW$a8rjeH7$qKXxj8;t1h!?G%l{ zqk3GUo74wx9b}Vaap~=w1Ul01fYx6$)6bm*xAX$Ox6WN)%D!lyJ55eyddLyIH65Zk z97AVI*UL!XraVW`hH9KWHWfa!WB6lEM4vnA86NfQ@##tHLo53$uybi9FeQjh9UcBj zo$P}C2{;7+>25yLzx3C8;n~yBcQW46IFDrQ|>xA1+Vl{yvpA=yzgDU7KLPU z7vmR2T9^mSVWG$kuhJ$y5s*d|JaNLJuQBvt9ux9hW26asquu=3v$+n4hVn@$>k;O_ zuow-@lp`XLQho1o1@Ps$xk(BI!1*xl(q5E6s^vxMqfLU0^P7FEUP8KT3!F{a1`Q@j zsb@MQQcZTNlRkY6d^6o85(SQEe+t-CI^b6?FMKNkLm?qP27@z(+pJnR;_)-Un7&?e z_(Q+ojrWLV$I`JC1WG!B!yu*M8M)8YBL>tqN5XcwcXy#m+N#r({yxMoSTj3UFtnNp zZ$77S7lBVh>>j#MfXlMI-r~fK;`nC`J*Obo@_i~6*VLcui?h$PoqW;n!0M}$P&;$E z{8S0?y?h(Kh_mUBBE{qBRLeJdt+IfVSx0#<`03WMey74Z(fzLC*``#l#^7=`mg4!2 ze|-0!p&|;vvo%7gv+|oq^)DnzpY)8Op6x5s?`>?+mclu5HF(Ms;Sad0VVX^+Un!G* z=s!nwBOU!G?{3Eb@_U!#w{POKnRTtCp{P7BL!kd6cTUoFluU;Ko*%|mc-B}V{B6b1 z9x?X0)*+}E0Yf}}g>jyp`!1QXLBBhtw&< z+6-p;iyG+C%w7JvcDp2)d_^~r$8cfrCrF~hCeG#<`P5l>QoVlTnR01<8y3L-Z8vz6 z4JJ=!=~JgrgX05HbeS=4{dn${z%zS0b*W`=9Bs}|pc_X=M@{8-H@MU@ov4}G?n*Co zNU&w*zPV|JEtc6yG9`G_(XJfI^POmikMlhI;hnkKDnV6#a{_N~;a9fC9I?5tiD=%N z?>d1H+!q%KrcX*=0hcVay5?T@iwM1RjzbrmpqG~i)lTAk#Kb;}Wv&c)Cb$TVWYqj$60Z+^M|yIr|r)4jMXt`rvE!5=CWCMN-9c=D0& zk%Jh-yL7(8QGsjn)a*~AI}fFLb%SBzy*~<{JJU(XubwZu>5cw6JgzKybgu+;uvF?1pqM|ziC{OtLrm0c^ujwb721^fQ11Nnto#JGhDk;Ac>L`b?+RD2oqA2Io_J>~r%uZ6 z;E_$HchTBJOgLQPy4lpUA^S_ucR4Pp6tdABNkYmPnz?DFA;D)f7=!5vGK^C55q!wb?@r1|nY)E2 z%Ujl=^r1wq6A`3%$GbMABp`|hHksOF5UG$Dp!5TxwF~Y}YD$@&j9BW?HwLJ)o^xK` z#AC{H7|CWfgi3}0J{Z)Ci@CI~j8>aQxOj}nAPPxe)Lpz{$V9tQLMCVMRL$Zfa%O-| z&v<%g6bNHz+>&9?H(SMm86rf5j5dZb?ru!`wgyA?!`Oc4!plurCk6pC;gd4ONS7Da zoseUDU=j2(4b31EJU^N)PCe3MU5XI}LXgnf7@Ki9Du@#Vk0zY8?=%+U8V3cfnwrZ) zesvB-3UnrMdM`q+fz5N%$YWxEQ6RkzCcj({9-Zn>9qR77cfWnl1KW4+^o;F*1ve}%WY~?4DK90tH3?ewmmzzDBP58aL7?&++z$p7yVPXbSU-J*@CfrMpLETw4DZ1&A82TH|ndyfFD7W zBk)Zp$Tj`f1}9v~G%^u5nym`#_^`N2d-~dD^54d49OBgS{o|AIAAK|D7n|d|Pdnr3 zHa$&wS-Zj(!08Rfmk~6qkQUSKjPj&@O}3j2Rzubd7Xc-gBkJwxAcLKtbzyD@uP zxH1k)dMIaR{$AvWK+^-Zc-`1wob17|*_5p@NjNZxl#H2=Vw~KPGX|9*>u_7jkcsMz z^rwM9S0|@#ye2)Hq75q4J^9a> z$TTzv+8R_ebz7ZCF;53kwr?}|seX#I;O%A`7~>daj-;hq4{5jhEU@59N}uk7Z~Eym zJYtUlGsd4o7j!4@W_r^P=s;I6QucOdEY;Z=+Qk(|D|n7G0KRT;lly4qSPt*u^*~qt zbb-U@V5G<#mIk}F@DAMLXt2Vmd)gAT9UaDw(0%C>=SDx9^}L^Q0iOcTX3bh>=#*zG zi*#~M+1AJKJX_K;H)fVJKoysfqx6++vK_NyP8^RcrW}0JdvGBr!{dTfx<9p!>o~sb zM~7+C_RKVamkwU*Jgo+P=^wr~F3!;?z0wG{^RJHQW}H&GR*Iizf>$}AhFPk6A2~aL zHF}$4Cfjj(dRjQU*;@Jrmvu_g4|L*et{O)`7T$o4+n5IN&d<(Lsc1V5=DKtRg9tDL z0vsVj(0Yd<3Sp|3o6acL)KUH{BjdsIEKIP`K}h0xHLV`Rsji-sGWmhoDSvHaJO>AZ zhc3P5HLwR^0)pYxUaBh57$^)spD}hrsotiIbiMU0#_{vpXV*kROn!8?sD5N?7>kz+WsF!jf#3xwKaCz%>^ zEvy4=M7N+@$LHFPB}4H>qfomkM)1pcqp?0N6|Z!BBgFItV>v{Y{hA`Pn6Y=!*u>3K zloKeYO7J@lW(Jnm`&{tpGd6a%AH#?b(G)P+bk>G?PQMW# zeXq~~czG7O#hKsk*MZ1$*wW-sOaEl;{I0QHjK6UPeW$$O1-<5Ji0e80Y&iOu~FItqsfFa1)Cen&^zQgN*ND^4$Tlme#|bfd~hy9 zBe1L6E^>5>IXw85ff9v+MZYRn40Ntf9h3q97dk{vC(S)Hebvv z`O-9Wytg74kei593z4HW8jzcVnv%$`sueLc}zJ49PJZwf0EjgR|h?k}My=~Qw zcT)O-SMsOd^j39y@F;)=KT3iXo>pyK(%qMWSmP3TVw{~{6fn^iMJkP+x>9KvTB;;x z>M0Tk_SSA9=n%lJTOGSocF|ccE8rnfG!vQAxGy893TKGH$Z;$fzp&0kTEv?|lpzpl zHeDS@VrOfuOBZYkf_X3|$AB_Jrrwlae|{LdFQ!9jI7L6on7J^og+K5}u6dJ~i~S3pd^(hy%>>p9>^A^mW+PgO#Fu#e9S1E(Ocl8H33OpV{wKy$ceMv75qh^0AQ z*Z-NOPx(COzFErX$6O|-vJSMu13mCy{zS&cXD`m|@owC9I$#v|Ug{;gp74fB ze51c~jO<7!^hGXb_K>lT{)-;);u6O1A=hMI4~`D1W66`ignpYTWqKkyJblfbDHExm z4Co6Z#u4K@l7$8q;cvek;A*?6XF1n}ewDSLN7F*^5MIqS*(A~S%m_qB=IPE23O`9b zGi2JOi>K2*ot?^d2-h|Afl1R~zqq(q=tcUL6HQ;_d+wt@zJfcpppqN-J!hDNos%&o z^JuK>`T1$;O90f2XE-xlSdcrriAJK!nK=vaKd z658gw%mjL@8b@I0cK7z{rAph}#uJdD5^;$DrGI@gS`55S5ju!t6m0rXuQ?rXE~7g{ zw%0MElnXhf14VoxJcjUmh#-s5amdsd_MP7YoK8zWV<}PugR^Fn`Kn zjC!BtzjQ9u-9PYF=Q>FBNog=>P2V@7HFl$2M%?Rg=Wqn?t10}2Fi6WwZ9=eU922|DwJ1~b^+ZKcHk2w5ZtrVi>W*)w-Bk(cgk?0C}4z9k#TgXO~T8N6YRntrr@O! zGJ3*{UUi6$I#dzHAoSYCK)iI%IYsDLv;q*g;S$5C-vku~&87^LGCFHhG-3ud#b8Vd z)94S}aHy&X7WGWymC8eu=(gZBLZ9B$;6(rll>G`iwiG)3^3K}$N}@&*Tah3bd0ty} z{M3abf}8oau+SsrICJ{<<&p3B05^HA9dzGwp{ssP!OAs+35{|q=R#?OzHn)ry)%$w zKxTxo$WkrJ;!SJvTejcf5u6KWCU|E|;Q>4z)UJLNq{5Vco=ZKAu0sbjVu0-p8VTG<}C-96sX!B{56*1iQd zCe&?&|H3ZqTjzfSzi_}vTNbC|oN$JDsdVWKvI8d~RYs7$*@+(6itHvgaLSL_+HmO&JLRA0ya6gX9Zit!E;e+-m9kMQ4{~e5^K7!3e*z!r3 zfrD)mlcR$?pMElQmYkWyjGsD$7Zex|cHkM$lG7#sKfrtM#_)#5Xxkx@z}xZlJY=}; zOC3a~!~vp5ecy}oLubIntS0(3^kN*l9dXj|X_X2UvtO>^iB=H-N9bgy2Ib1VhV zxYN-lsNE!(G-qlCSfOBWfgexri}&6;HcS6b7D4mk&~twizdd!lpi@)roZ zfiAYW&7*ZR);4BftzFCOz}Goz!37=Cp-*}%fAo@7UU0lhQKxi)6x|r-=y+O~vg33y z?$l@MYwDH&W5PUC#H_4E@dgOU#K=DB0QDLh@Sb;uYiZOxr=JWHL^^p+l$~V>7!2b+ z266^WOKH9-=)Q}3GlZcy@OdhNw0f}l)ZyiXIVl>*G-jbr?M+XIvoe>BU0z@2J45aT z3YMtxfC+m#kUA=;1TM(By12^M7{(As^h*)wM;(y5TcMBqMTPz9`+HKW2g*m@DYaTo19VyV{jEC;1kObCfSY7N-b^dLO2~a z#vPz_)KXXm0q&aG%F`yd^j+3)8XW7-RXj(lCpyD9ULu3wVZ$p;F{N*Ws`SopLgc_d zHmf0DG7DfMRJmM-MKssR*{SOnR_@2IZQPCZ$2bl8xWF~qEDe4BE3<htH~;*;T&3=FS(vx)#@1(_j6Zet!|~tW z`oZ|;Z%)T2kGtcCs|Vw9^I+VE=2gUTj8_Af;Wcdpkl2%b7Szk3EO$I(+?%FHcZ zV<^)$E|4x4iK|Dy4Rsk3qfEOu$JT^FRKBqlehrNH-ZeU^Fr+5$w$NP;25*(o2R{R` zSx3PV&`CG+WQ@#qp6{TTdO>~oq=8gE^RGNOUx*c0@;w-sG}3Cz3OeR@#<4T>>p;%z z7_D9>DDW^KO;x9S1DEJRvd8IhorS(#ER)0Ybua=OJ!zj4a?c>$Nbkl_;Cpj^&>BNC zK8%rP!Jw>^ap!2o=w8I|-zMr*x)FKdoBhnAd5-Mp^Nd8nVGLh?rvc;;;(4k6X_OAl z;R=a3&DB$>iyU_ieP}_ax-4x4_lzMPW85DCn`m2s%4g`xQpqBialX5I5Sx_%HMBxM zPQ4*8P6-FmQdGuRe|>L&8N#q9ctJ47@ZxV}CN6@P=-V#kPXFrchlcvCkGYV0Q%6<5 zw`Vcw<*qO5eV6Jqkm8)}4`vO1{_r_M+%n^0gwdukFrP2kmr`-r9OA~V0G<`BJO?fFg6 z)W>VagcmWI;N=nGO^y^LN)ebq0&)Jl;Y}J-oyxbL`M+u1DEaGa20jWt0wEiqoH0e* zrwm02zYJ@~J3~u*;&Hmi(qE9S_D8TR#d^QYgQ`7T$p^~B+4`nRJV`~Z7@(b zb7`HJH*E(_-ZmkPUaF#L^d6jben9xFDN|#;4;SNC$LaX#hpTQ!S_4uHU_nt1-os0b zOCWRK&Bf`vul`#K57>KcqOD4bQDq>?M+{0n(Wj0|D{|kpDDgbs$32_hu2ax;5OOV` zq#aHR9wJQnY`fD*C=M6T6mQbikr8Q9#>IviHWIe7Uo$W;uz-`6qbH;M3XV(iUvxry<=6Gg>Y4C>Yxw&d=k_zC9Ul z-hMp({M&yz{{7V-j{n)Klkq?KiJS3vHm}F89iNQNL_nvJk<0ria0)88p>S66-Cpky zEt4^E5!^%;UWB&MRPIzJE7xflPm*jjfeXtnDdyco#0-A!G=R{5#)e~9N3^(~JXq(E zept|?v)FqrWrLz=grkHTA=#`XhAjqAR_4{qqp=esVSWUse=GPkDHH!0!xYSvR;eF7 z5x2%5O>HddLvL{E@8QdX)Mu{G)RST1{I@a=500jhjIp5qknSq_;wm!QrG=ZEfmt6T zQAary?{hNXEBNE+!x`KezG~(u?vtSaFwAXUBm2R#vQ0Us?_?jqumeX7x-20>-E3!I zn*2OhhKQq5$!8MPnyvDm|#wubOW2LOug&i;DbEgdXyrZkXv&t^*Taxb zc|W+;M|vBb=QD9oZmz0}=-CDmfKK~XHE>=#1e^PeejVc2H22X&odK&(6MTd7=+(>O zwe~mtBM-=^@3L2i;d2~7e(vA^c3^y z7z8$56vif~#^t6$E|`-^f^?lCjOv5{%&wiKktARtf4+xN0*~vEPLV?>kCq1x7y8*$ zXRcpXshXe{k80Q0)((_5qQYEa1_~S=rWMlmOl8-&2EMbiiI+#8>vwmTItDsaUZ?h4Lvua zch24&BcJb=IgQO<#vwQh-Yya$-F^Oke0252`1$wW9skzVcgDZJ_Ji?1|LA!9{a;)k z|L)K4jKA>555|W(QC30kAH5vo!{CA3%{eOY9y&z^!rsM+t}^g2PHFmXpZArtHRaK{ z(q%*Lg={Zasq@UDXHp!tJhgZbAlqrVBs-!uj$apf! zQwPr>NWT+-=6UqT82V+0oFq`a8BuX1KB(8$zF3 zNuUo`7w6;r)GMw9w=tlhIlh$D71*4foYkgzmj1zOr^mqKm7O^SyyVV8C~(<1kdOZAIaj6?lGfnhG95tm%n)(15+k86wf=L|rC?bs|a1 zNC{8xOF0H~LE==3vAp#EIUX417RebS?B2aOMXhD98XnKZ!wNK$xWQczpx#uQy2J2c zqU|uJ^aNykZhGG_G@cH{MTW-OE#dN}q(9^xB}b$wI0N^c>Ua=xP}}OCzF;A&iNWhjA4=T!$?$ zW6Y)T@1h9S-+eJYID0pK{)_L8|JKgQ_#dr)XZ(lXxETMNKeayogFm%3{yV>XF#gm} z?8opOjJp6KT9TDg_x|nk*ePF4B?5oBQ67RMa7`R9Kgnp|x{mDRIXFZYPNQ*nJ%E=Q z+|(hRZQQpRfMBp`$dtbipTUDu)%ca+mx32%Q*<3K&0nzJ%$7_@XDL&LxwdYNG9w%# zHjVH;WcG>WW;~1{sORJ+!&)3D4gq66@lMBd+|w zCn?4%&YTK3H-^yA4P$NRKAA;RZ7S1c&*mS57wHO<6Um`rxUN_VBJ_{(;a~`;=o-O< zeKJI@2-x@GD%AV@AKLQSzM*q-V5K zeGSKHcew;`rkfK<#|SG&k9S;}6A}Hw;q4HEKGR|PmOiSFUFr&<;PU0`SAlJ%6Z8y0 z3TV&I&jS~@IcVX{(4Ud5(abZYPug7H3hwszg9CwD_##1)rHb?bC(@hj8l(mWyu$F4 z3-*g+hz6cR*O0V&8J*BXaBVAi0AHrhcnqz{KDvJOYadmAS=nayQ;t8msLc&-o574c zpaa^8{+seor&lQwV;?BfcToqyJl)kqSw8zVwLGB10AMT`D#iqHJLzsw2EdduCNOTT zJfhco%Dv^JBAiD&J3~Ie=?Fs)A+`>54U#Bn9?8~bqURuD?s5k@RfGzxJWhs_VAKc&p%}Z=%Hdf%IT4vvD21aByh(9OGGB z25vYISro1?T_c~s)LEeE3lAJTXaJW4N}AMcX1%O89+)Sd@@)D7hlWr@FMIJYDIVUj ze!~@E!zWatK2+_N7NOyizQH}8h|%OoSevX}nG!w_yxj$-SBcnTuEu7fk&o_=$G^UR zKK`q(9>&+6!#81K5z)1o>8X!E;J;4vlS&K^ad75eyBJs|?MP)aIXU#pWptWOAn^M{ zKbP0gfn!t;+U*v>XB1wZg){igr>L~iU1+nnzdc^P+U@!BXls1%YI_`ohVBO)#X*}^s)0__cTyDyrrHw(J2q+KXg`i$S`&zLngi{U*``W{}&n6E)5AF^AszZ|7Tk1;4RbofD6He^F(1|m25OVQ8S_uEy}Xil%f7lU8y zr@wUMS)xt`2E1!l4I~<5(8=y<;9+R&XYTuyTkj)3uQ4~wMF$IdMD6FN;kV<97{ZyB zM=bIysDme+o>BXBuF)R7I?pHf7@TF+1KjjFN19`2c#=+T7KkoQ$2r6LUwe=`J9H5} zHJ!ToNlTfl(+zXrCue0aP7ED%esLDOa6D(wz@S}S*n-=&1aI_I+SMJkHSh{-aK?_% zLHeQXt?rdDQ~VD2uk#il*@jKJI`}d~!l9Af5Pfra3_G(k_*s^rLlw1;p)-7>-qJOF zR;NB$_DnaEt>t_-L%&y77hUaP6-bv^=8qj^r$qTrf$uEAx2!DPjFC9zY#QFXGZr7d z0ka0Kr>2n#^~fk4RumbDk=fP2)tRH;srSX;T4(dX85He z(l930U0%chno1plnZ;`?MOqVKo7yWAJl5OMoa?-~MyC1R=rU~(X0s{xyR($Y0P@qZ z{ORzV4WD16HT5;Q~Ba9P=tJejl!o8Ostzri3llyz_{&OaK9Ive-x## z6}|i%V?VCX$2YcD$3J(lHhyvSaeR<6_R7AA!a2XeE5WblwVgV`PE$!zGHC95PB2yZ zq(7Y_lJA`_pq#%bGO#V!BaP4B=+P*mmnJ>S5cyXA2rl4Io0OQ8`q9DmIE+DmC1OuB zg~x4UdKu$=a$-W<qAKlmXOnV+b zk+lX4xkj-+uiBx^8zT>@e9;0w(IIF>nNpzA+A@o-fAN0K>OR<^%mpPjjEQ3wo}?h> z?vGe8ycL*DJ|v4fDTjZ-ryf}hSpk6zI$esO0+`5)A(wKtFM(Ig$`--VToxxQz);WO z1hPrTi3s;2hn@!yrA=0M_xDo&ZFuD}I(BpHAKG_32B%KL<2X4!Yx-Akh1X^2=)VPv z&J12~&LFtXd6+s&*7UA@kR3TaIgeqv3`~JD_p3Zwh3I;bf=_)t(LRSh(4lmOzRaM= zZ~=Mp&UE{CFszZY;DABrgp;f}S7OeokPdGaATU*4gIoAjzS+5a?uCZqId&wxk9V4K zc0cWMROpn>(8s&E1}6L#;-&n0%d<;{92z)AAAwg}-Mnabx4dO_h6&Z`m@7>l8TymHU0OJU7xmDK`i5W3IbicTp*D2Ve|>%R=BQiKs-F&m%V`6ibMIyT zm)W&-EnIRqqHAN{nq>{l6fhdqNgWiiN;_SL zc|}Myh^dRQLYsQJ?QT5A0Ms;IPE5ZQ1ozEoU<*uAfGlP#AX5 zno((asP$tIGi}WyFg`AYBkIL``dWpaGBrejfyWmWHqN4)F#uxn(v8vFgwUkQIHSH) zzWU!}>MrY{L{jcF5t)egaDOKZx83xK`iq0q2hKbe*vy>Y3k|j#y+lOH2qL3PlUWjp zRO#oul6p#H^(qd5w3{dfz9=Uzd@x328IgMDNx9sI_#0hY@0!#bW}<8?0jbhV89RR& zkMiDU<9)IZ-k?hZvGhCd|&h}VJTgBZZA z7(>?{?r+9UbivQA-i$xJb2EN@?|J;0?dS3J82^WO>2^k4i2zZ~Pvx!DXHYYJNMnU- zMguYEBEa$kq35KENFuBSTVx6`I zG2nL>7vuiybUcQ~AH%QDk==)Uu7vM8Kc>-S`j+24)9yw3@DG3P8{-EtEU?r)%<*aH z2tjqIiYq)@B{8JI)Ws77}S1>8xW zX(KgsE`ux- z>mB-r{x&883m9frF^<^t`y2!g zj52uz3nMrUZ_1?~KH*H8b`+%#yK*PYElMOG;Fm=LpW!2N>R^~!mp{g)SJM~rlII11 zol_OurJ5RA#*`5`J-z78jB`(m>3HSQ)?xgc95O)C#wj$Nq?BV%z=~(UO>Z0>>{J)g zwY5Px1v$?44OHo;Y%X4;@7W+Ja_w}bRQ~8Uhb)L!Xj>*Jh@z`!^v$TU4>B*oapYjn zo(3tW&*Z+ti@_lS?>qaT{=Ec097fIM#Aq za>A7kH~^?o1NAf2!b1*I?|W&1eU7DurQ{iG9;Ibq7jv_QMZ^lKrtb2u%XcW5Jfm@& ze^42O6%zv-(*0H!CML|=o3^7B`BL5#hDjin2*g`T+a;`tNnx}qbDc;P?PQn|n`)_J zT`}&6d3tgjcmiX7cPUR`WlWdRA4Vdy6`%;5u^x(mg6^`kzza_lLA|Q9DaEF4&x?M+ zU|dDIZ98paEZ*(hL_r2Y^d99JX6M0v<@G_B#%2a_R&rM)0}pncIy<@OBq|Z!Wjtl= z*l@#IT!x1*f#D*sg3-ns#`KN(H988u;E$rB7|;Rp!3jr%kfj$@HlmS({llF5!&5-}$~YUpuzE3megA3vwU5@v&)%MoKXtS|HZD%a&&9wUM4;Bwe)M03 z)fqD;oiSDu@hyj(jC-b7+rc99poBF{VWA^_&gK3$3!!z>@o^B8nc1WEzNuxy6447y z1%GjTIX;VVfB*hE#{FvCg&qvI)5}phFXHliynKq_ot}*YeR#SWAH|se#hWk2Um5Sm zzqN5X{<}ZBIsWF}#rV5Fvorq6*Eh$neQjs_k(V3eVD)M|oga^zI01Kg{`&n%4EcEs zd7`lx^Pb`Br#Nq6yr#;ZV?3_j8h69-$xLlPpYpN}DdzuhWRs@X@fT3ms&{MD)tb z4B{h><5;yYsC?$%| z=z?muDs%ns#Im$0Q*aRd`R3JOZ1BUKggLeXO6?0H|c-itIP$t_Q;@*M39C} zIH?YDj2AmZm$vC@xE0~5&pf2u2plpSQ~z-&Iqq<3UkV~nLs~lJ@znQlA0Ca&nlnIW z(`VqAZ3?QaX4)%GEeEqNgWcL3GkTrRl9Xmyf)BI44S&!Lw%OKi^{B(aX``pSWw4QI zXk&E{9AZiP3y>kS!H+YF7tF!)bX4l-1n=%~5UVRgJ2YpL%x0%^;R&40{)C?yR!_>1 za6k;zh~B$8D?G>Xk};hQ$>{RK+M4vw+$MV3JOD7vnf464caO-p%sCkE-@dCI=__Qz zt@F5UFGh+4n;@wJDfsYr7s3zNZOH$_pgcykHK1PRx zj;ZJCoaqTMQN)RCPEJlcZe>zVCy~|Q2DA!h;EmGb(RqOag(xDoImlHINk}mT;=&V! z1Xo&Q#0Wknm3p-c5!{#I?RGy+a3a%PD4iBUJk>o*5|rKs1^igNR>gk#)oVF|xC zVam%e0U>AL2_$B)4J(*WA~co~SzFr_W0(vqXfeIT)E$PpzvbNI_d>XwmJs%Z5fK9W zXq(|nnb|s(QBOBQD@rkSMv)WJD#~1k2SRM?TUic$;xH0se1jI?LtjyVNbaTRDDZEF zG4~VYau&*e!Qmv8dE`_IR}eRw})>Z+v}i{FR^F8UNBx z?2ivO9><3VoUSPR@apYFjCqv9&FAlG&}+mKxjmYc!x^Pa$qr)ZLvTp$Ola~O56g8Il;2EMCF zBI71}HVSO5_R=;SG$TBWLM|I|sXu>Uv;mfN#!YwUd1LGLjNgqCv<#FYCjVYYW8ZoE z&9_JC!$y=JT|~$aj=b?}ClS?6>WowD-SjE>(TA_1H|881hPz|xcrh@@Sgt18?;Hs8 zA;KqMo`J)h)F%3+tYrs$KRLIhEd3MbBz@MOz6G*+p%`jEeZ^Otd;RbG2Nq=v>xdLN zH=frIvfy>1@L}2AhhP0lqyOSAePui@FXk50o|zd(nQa$ST z40=cppP%0qcU?Z1>w=s*`s;K>9P-dzKRid*&!G<~YjP3Ym3!uGaq7_B1Vh0`E>|bg zqYOM4&C2&>H|=>I9vO4r;aYz>F*$VUaBOfL@FR24|INs()`4nSau>=r0~B3Ue#$xV zKHC<=zysJjneh6uP8laF;$0h|E~_!D2$IbOPggUg{y7O!c9Q1AvvB~@H>+M`U+4zW zJRC9NXm76-lZQEK+QzpRp(i}a%ykR)I)JM+;!^A8l*w|z&6U+nf%om(lhX7ca%jE* zPBP5({5-d`7I>kr;XVCe3+P_;k`qH1aM$55nda2>;$QpqU;T~FN)9lLT*_AIFB#RO ztiIOnU~Y)9sZxTis1aVCYA42Ekun=gI zkvHDYs8jaF`NK#dnAA6KzdHAr5Du(TPr^K19aU5WrbEK0a-GCNFs2H5PO%qXu2&BesaALAqcxaqQFhvIT;eL6vpE_o$5(8QvEkgFrZIO*@ z(?jJfglU6U0u^y9V|KgY)FZOOq~|dl95CZI_V<^@wQhe#GXXzk>UhPYw$i|kT0G7= zdhoVTnj&}>l2fz4A8$Q<YU{rO`2;*0a~XSS}#pWV6|e}3a){Dr$O#=rb>G;}uX!pC{9zXMNHGU?J!2Lh|{qa+)_v2OSI{)^M#)?#1l*rZR@5fqr`7XG> zj`4bVdo`}U`R#G@yMHh~OncA2_XlJ1JKr5&zdajA@86EKZ+(0G()!K#$mWF_ zBkQ8P?ktu%%owH+!P#Y&+K616v}zcUlTYN;ZDXL*IHw1Rdfp_0kMuNpBwu|KTk7QS zP!j^QT_C?!M<&N1O(MOrD`b$Q!tPnsf8= z@UZYo&C)aU>rrHg!@}U`2Sb&rtN7dhmwTOcP49@z1xGTL-bqk=MCM?X;ihBAB|4&+ zw6n^<;Gkox%X=+`KKiImRdn*p(Ao+rhPv>kj{=^D4spu<&8#N+a31uz`=12H?v$6h zIq(9Pccb5^d| zOW@?xypixCSDS{@Y!);N4PM7)@Z z0CGScBAC*X_8J!bX@fD%rFygkG*v~Y?8_Zkg6AsH)b$*NgqUYB?l+6cWoLm~27!@2 zi1&|>URFRb((O82=23XLM^Tc7I66E@P6oklYBO+YMcRm`s}FX^Gh^EeWh^7%Jk24M zxo*D`Z_@WJ!3o1qB+H?OI&-;C2@x!_nbGXXPJF?K_Tji_3aogFGecRJ2XLQ09X_s% zosz0-b)oOX=0JFEkxKx8EtF6plIwM0aBm*a3;j<2p>j$hol z9lx@5H~!4Z<@ne3@5aB8X!$R!oQ=P=i|S; zd!OH9{MDVC@n8A+=J@rOPvcL2Wqthd4|m4l&dPZCa<}_}uSPIeay|y<&%y6g=n;q` zWl{8zod*%X`T2M}jZrtdJd+5mr+xC`wX7yqku65R8?@+@IgB=Sj^R3! zY#C_=6JH+1$u(S9W2~>?c`|f`zx3j?6GbR6vciK{%3qR zzJ{&nCei*zB3kW`Cl1(oB7HnZ{^$m}t-1`}!=rG^xglGOns+_WD>va)N}kc$OJG4C z)L^Af^}s_XkOfvl4FVf8w?&?GgB=X@39aZAh7bI88iMt1;1ztr3%GqMKwr$DE$t>M z^t`|UZs8Pu4OghkrcC;eW^`DGMDiQX-N!qwUxq(TxMsLzH?M;ec#&yT25enc9S78; zLYV<`V8Bghzyr9a>~fR@2A0-FZ*nTrlYku@(=T@Eo5QkjmZIe=eYN?})PLqH89tj1 znLSb1hSIzN%4%&9yCL;%saYpKaySBqdN~(h;3zQUo{=%uKTGQ9$TzQ!icbc0>AZ-N z&8v~c!The#gT6O-d-Fl;=H|Fc0HN=Ddk1yw$hCp5 zhzK(!LDeI&!_ou{Aq}Dv>LNT+ln)UGpT;P*xA!Wf7!#5)rHH)a>FQrh{NW7T-CPx+ zU?iYY$@OSMKun3@c)U_z7_+ad6piFl-3X2O_jdNel(HFtiAByay;cTNW+8YCZZInD z>dDpU}0~v1b5ROEyH^>f`9()4N3K7zm660*>wcL?Q8Z;Ry~VF7S)tZ)4t5 zcsl-@W2xFr0y_Wz|MW>jK~&JC`D{o;SzaV^x(v?F<5fSt$o)lKq6D;gybDc@1*WWF zqQ}tx=`LsJ*|f1XQ!6E@&rs@z|g*VQ}b|Uu=cb~_8qW!OI+>T%TBm0T| zug0I)y&He|XLrV5{`%AScYbzt{I~!4o$+_R_B{TFfBJ**-}uG7@vnUNV*IsV{rdQo zKk{;XCBfZK|J2_2sh`*#FL$2CSKn-opZLaJX!1N>CL-T?o#;1$b)P<6hKAUn!xo|3 z3z0*d;R2Z(kMY#+-hMIeKmUB(eU|f!FUG_B@?bZt(3jIfNZ~ErDoxRZH4d73 z?Z-!NNukDMLAoEF(=$@q~IR%i)z9yYfZ8@PdOvGX~5NBwJnj6TX>4gOquKhb&Yc=Y(D^zmC|-9yPZHubj?&SC{167M(n^Gj%278 zS;RP1o|$@^j6?6_1GIOfxj1NxY+!ETd~o02r%&q0MfydM;Mw*+`c?nos1tjGW3nK~ zW3SH6PC7&d&PQZ21=9vunUVK0Z4Yk2&~^;z#82oJdKy9s=$-V22aU6HT%n$Q z0VjP|rzm{pS+wEY2)snvdepf{fs1|GkK;&2H`ccU<5hLM?RwF<`q=n;+TPx>etz05 zy71)mv;kT72nf8;Xs!jgnt_%v&&6uN&}{Wb-~7+2K!dwUoE>uw z;270Cf_Qa%RwQCrV@U*+ z@XYaxiCD}J@er#nQ6jINa;Z@#I6(|#V04HpVbZ}y*^8h?%3d^RJOy!#v+zPY?x$RY z&x9Qjy_A+Rgamxe5-b$i{$lG(eTH1PNDE~s3fHquaiv|tg&@Yxz2OQIG!s!rio!`m zeQAOaV&6B~kv+hSl*4k83R-}VV&DM4i*#k@PJ}*|?A)g=vys8qsAPd7bgP4vXS<{= zr}UMVPhiyf{(BAuhL@V3%8!7VI7n&EOb;hPG^S7L!r)=Z(g?rAAxuA)_q0tB@LIc~|{fT}Ff5VTH z6S9ny(ZqdoHab@bUc+mKA?oBU_`l2N(ny`6$Y$r=kpEm`u%wO2CpgqcvHOHWM#_+( zXjQgUCrotf9HaEt+yaUo9~fR_i0M0#ZTH6y4d#2p6QD)!(6J08maj9#(1p@6a&S1) z(FqiyC-slR>}4pOoSZh{fWf58WE<#2RItq}{-Dbv|JZ&6*|;jr9k<;KaSd!-Leh8yc8xKLb<4 zjDm^k(nOfcp%Ml?jg=qFv!I_}9=$9-$Q&1jz~Ag|=qrF@Z{RB+Ec%r|G)|>p!y0yT zR7Bx(9sGTVo7j=9@HTv&pB;Cufc-M`Pv+$C)#2Eslgn4z<2<_Jc;Vb}av-J-SMaIr zrE{+z_CP=fIA4`=3ChwjLdVlF<|2hV#~hH&i(VLYh5|7tlAD`3W8MyREf<0^v_V%r z35Z2B=WUc$2^(Xg^Hc`FC<($4iqr#EdObv*PDX^vOyvN|a2B3itI-al)|iIy1Q1pU z9|H%ylsTosd%Niv#Zw}QoeGgwFo0|J8pb?b{uX22C?d=w(qvS4uIfU#nHr17oEA#t zFg+$T*2e=R1p3Vg@xmBg*C;Rw0#g%QWnjS2NBAwU@X#@A1Rw6ixo<7eg=&9w=8gH4K!-jBy=SZmU&zx z>TnQX3Vbz@XEO`IyOiNfhjGvdTdV4Ia4Z$}O!F;_fAB%M$|As%$$XRo8)R7EA;$7N z2Gz@j8kH}`iXVa}OSmvCB|OKj(zn3zRD&qOk3tKM(@@j+CWkdr&4H~P=$L}zIZ7{` zh@DHFslT?&l`s?$J*|}coAj0mUA`z zdOSXl0snMH<)_CTN0)AY_I9Y_<3#FTjHmbSYs?=mb3Fq19NIlbF``$q5~1BnV4$dK zbRsMIvW$2g;KYm*CxKI+m2rB$^vHL(%4ZFB?$sesjZa_tyB(j;Xg7Z%N@MMFygm0_ zS6|vRbABh^_tTe^C>x5mI}_&1ZE#H4t5RB%V@W4SQ8ET6r)Pm9j${lN*`Px>qLjOx z-jrqN?BvK0YMLIRWGPQD<3*ojd^7bkklH*;gbI!lJVodc%Tn>Py~QcR$6I}gUReY# zc(U_7Qn#++WkwPlGxL@5sZJdx#5ouRidUwB>=a+=$Lw>Q+Vls6!0?e$kKVPbBtzI z5&-BD$ST*KfBEudVA<}bJT>s)HTuDGmgNfI(uD%9{G9ZcXQPvXrtn7e8U2<1Uv@}_ zUP>MGOzz>M>ErSibbb!;42ph1>YT(C-BMwG*A@qcIPM*w+iPe3PPvj1s@r}Ca z>>BQr?Y76!Gib$;q8AN?reyiY1Jl01S&Vv}#*_~Z*Z^(1kLGm_@=SLTHJq06;GUVJ z=owj#9UC~sfeP+qgXlxJ=M2s{AskzQBAYieF@}QYvRFD3{7((o$iViFY?)P6ChyJV z+lGSaaC(y-r|0Pa^P?`}#K=^goa8g`$_#Y4LwnJi97kDmb()(2XLt^-+0=A8H-gM+ z-5hf!A!cDjqkMu`-j&^+Lc&~ssG>eSxoAZcw^_!I5u$F!66I|Cz2h+H80GD=kwXxn z6TG>(=6!lksX7SeCD{dTF5wbkcOqF47kwOIF@7{tc+-n+ItYRZSn9(J>Yov0jAtq) zJTpxf#0FN3iGZ^X20jKgQ6+>l?Ug?1#}$t=Wncx-^joyp_HxaPPeu(9c})n70TB@w z_jd#>cBgGohWbD`nMlJcG;|U6&pjzN{Ast5d-{U; zFju|h;Ca5OYAuT3Y}lKCK-gZ9m+MVia0)&XbLN`Rc;CSVI4KenpeV34D-gnXt~;uD z%FJMdi$%&Y&zPV>ku9V5I&YaFhz_3|cCuSZ;RGBRHtf?W>!y{+%C=jg%9)*pAxz@2-$Z)r^lJ zF9(Gy0%d$vC2ii!Ukc#|hTh3(7bW4#FOTpSIV2hM3toc*dlPsT9|OzmWlPV<(R^W2 zobbrFWvMh9MNHpYuM9p>t1sjN%=or=P8srDeizvWPHlExOPo4rV@#w)mlKZZJV&Ez zeNG+twPUoQ38hQvccwJlMW1n!&%-m?Xy!t%LMvIZMzuLC5hl-5SAuoDM!@*7eJ|ir zS^5BbMz@$pNJOeF5 z1oZHE%2po+$HvgjEkb*N3dd4ZE(2=gv1S${gMvCMu(mgMqGLD4X`FmEinEGG*dp|y z6PuX{BGH-7w2{%PMD06)3vZdIT-*g;=-eUEFrsw-!NFWciT0dib^;&IIcIPbz6d}1 zEu&p#o9Q`*FW4pB`Dhx8mzByO6|(F}4`Ije|K0*pS56eBXqq=bwTQI|AGBZKsX92*NHEEsP_%9Ki!NsOO#^ZR$k zMJ6NWa+#38P$b&w>pT+~N(V94hc6Gy$e}~(o8vQH|95_N6^4K}Jir5cNPUPHgPdzJ z4f>3UMDK*$de$!Cg6FiwF|h=sli31GQoZuz;y!S7AX$LZr`GjKJ=%A|Y`q$Kny*R+26)G>X(8TuGr6WH8q zG|gZ~kniIq--VtxKl)-^|KPJOd%L&hJfF9pzZ*}9&L2eSx%Tir3L{au`m|RkGyLHb zJ*55udcFp3{Yd}wvj&3`zNphKf#h{fXHj`g2(WbO-b}sbj@JnVam|6dW>hiIgBIZd9&zk zG^7k8(&bQj4n8=YIN+a(>+AvnWX>Hp3SV@B7mjf3627y;S1yH1w1>~cCXfnEEl zOIvWKFLVWm-Dfvr3Vw(pL(W(>z{&+WTK#mCz{0r<3LsrS8DMMe50Cca2)&HVn$u)Q zJz3Xjq#2FQsfvz#VfZXfdSxkHohyOCi?{YmC-vhkeFs;Kb@>t9wCD(zN0-B4f2AD# zY)E$*tlZWG*_qV|4~8yu&q*9`li%RFa~aZaD--AdJc!OW(aV8rG3w?De1^SuH%V{838osGY|$+Vybc@gSnOUZ@_$zWu! z$Rn6x7|(EouCL=XR{w?{(kFKFn&U;z81d&2k|E-yxLVIE2(C99q-=x3cg!EqDtX=nauT#nU!I-TyuRg?NojyP85E`3NA#;lnDe7%ti}k zI_rKJ?=%oN8oegUHRecp>RTSKQXw3KKm=rBWLTr^G9X~xOP{r^ubhi2tdy$VNJnp_ zw*#N_sYyL0#Z^l>>PYeoYg3R{F%2L8mE4qwQ+dwEN8s! zyvD$TeU|q%G$ykNiB9~q7StBo&bH47(>5aK}g7)xW>M+B38Qn2b3^83H69NYg@rZnfL}QH*uaH!gF(sUGzQY=a`e!y${mX(aWRuS5%%( zq<`VBqG{sFfmWOjm$c)2Ad~$boCPj0aK>a6=rEP(BL~Yo0fvkoQ8x##8RQsv{id?e zrg}5a!B_iFuJavDWjrl8gKx{3(f%xsyvct{2TY$&-oJ|jXC91z(mo^{xzHx<*o{&k z#|~AGrOhSombQp90gp0AV1yI!-`qu~gduCY0>AsOzw*`b`l}y}i*uQUv)CyU!#BoP zzxMUOZ7byjw1GuEoD+LkkP$kTUhFEQz`{XbN1$7#_9S}BP%~TP*;V=jmIw(3gGCic zk5`};V=@pd)ph*E*|@zlrW%3HJ)XAh2dvp2W1lr{o`xd9A3Wr&HI@>|O@3`37N4S`{M%dSFUGD009Lk>9XMchauO0h)7 z2ozb6%fL9KUe4kT64cGU$}@dUJ2qWNH{z*EW$_SaWJ1{>V*8%DrJTGkemUmB&}n=2 zIn0-OEBxsLoKiGW>Y|mxADp0!l?TSECbI^A7%W0%f`wVgowQUXKxylTbdVXAQvAp# z0tmqQ?%Hzvs*`P^Wb#Noa)MU)!uYmw#q1rUi~{J4;Lw=xHH{xQDM>WExuh)Ou;dvG zZLUDwiYQ2Jmk!3MLbpc$xu^f?qWIvC-zV}$Cv8%89aad;UPpX)urtm+dNuA+mJOQ5 z$Zf_dxJf=6X)6Mc{ZU zE1xA^g|nFrgwl6Z3XbNSgkoTp!<4LE>U!!u_)yw)_27f$0dMd$pLe@>ZwX~Rmpd^8 zH{tpB`*DOtHGyjyc*qaU<7jc$_m9uUQ4}8WL~}+6AL7Y?QhuApoufC?@pJ3WDC^La z5pOCw^jaF*r2{8bDy2*}n6RZT%4y15+5tzL0h5g|k_f6!dA=X)?~ZNRpTNkmHirii zJFg&LWs0OJbEUjs;}PSTCjY{pRV!(xFzrO2EnnaShL#6^@ICd)G?MNq1HL*VfsY}p z10Vggzh{@b$X{UK9C)5C(4gV5IzOqSo1_F5eIw_U$JDQ0%ZJrVh8%YB)c?+JFl?6N zG`#5D6I~{Ur_e2Z>e_n-p7Q|@DQoDUkuvUtbQ|amJMq!SIUR_#jpt?-o?rF zP6hIAFOkbcuctO)y5ZQ#>@MaDFk5MaZVtaI37>>uurZ+zqHbqJ03vnynnj0upMfhZ47-0DlXIeH`V z!jTi`>9h%7LVQZmVH}GQqfGK_E8=Nd^Vs##>vPgx@dkU$>s4+yx}h{F38^3m7i~&&K!navJv^S}j7}*JyR?BrR0^>o zY({t*E=UX#(k7NSPGm1<%F6hbF>$GCGZ{tim;#)3)idrPa;9tuCT|c zA0`b~l^3QO@FWW6cLZ_yr!S)PrsK_wy^nFuZ)tN|wZciG$0NOlk%Li?WCu)Jfo2gx4vs6@%C5*ApJg`3T=; z^F>(AUzw3TW15o6+?~ z1CiF~EHWb_FoPimQy=sR|H~4RQM14)>!$UCGi_;;vr)auiAn=!F;X^^QWr<*%h}j* zDmY2t0=t9JW|-l6I{H)prS*L0bVOeX(jr^>PKHIX?xBgk>l7Lp2Jmin#>y+eWd=~4 zaca;Oc-w5)q{9pi;--Vuw0(Zd3Eo9-nMfuwg)2sGx#9tT&|{1}T+-+2q#GRg6JOx% z(jmX`hud&kya%6tH&q{C*dQ4m!edh?PK>z@vWI;OXB{NZsE;V-H@lXnZNw&8rc=$T z-w(g3y!8hT-8FPd2cwrsa_Y#xiMt$V2hW_)PR=8bVNCq$dCr5ZfxaWKy<@h*+s*8z zgt=FO9k$Uhx6>Z_Q5=PZQ9gbDMd%iNq{h?}6qJX&3K>~M?m>O^w>N9*uSt5Tpw~ED zXeJozxWAial)^L(cV~;HBN~W!2B5nj=(@A{R#szZ%t%i;28{Q}i0YseCBZWu|_dF)M`T z#7yI7Jb9ti(iR5jwvH8wnYz(0GurJ^)p4LOz3WawSmPeWCf&^dbXi3rk%PTNLLnH& zA=sFK(Uj(~AxP^8Oc;Z=X||_X{gi|V(WE4^WKA~FbMR7*1PY!oLz=6?W|-YqCkF&= zWfCwHg=ta>hMwEIuGK+;+irvux=`921d4Df(Yej2)S}}qkp>#`sJr<^+QIl8Up$P{ zv%5YXcGB0M{pz@hVrJwmX(MEj2YzVtJl-IJM0FFV=I0=mq;sD^e2^V;BKEV%mi3F0L zoWL}vph_g)$wO36PML^Gae?@GV^`YMBPgSYi}(=+t$=QNjf+a4>?R)|UtNHK(Ye)Ki}})G-~vK!DBxxTk({ zs04ZXi;n`&+`l3UF+enU$l1sbKGPmuGc!qo7Di=^ix`MJQXaO+31e^KELbQ=`pk05 z$?J0*cdmmEu7V6kcaHbB+MLlt;hhCZ`kV-_@A}9+_#lt;6&Sok2F&o<*QC-n0Txfg z5ZF2Aob6^O(;g$*O}e7rx{Nd>rMFEIq$}tj@+)0`ab~7>_&Vk445>B`6eq!9WhOja zofv>S+z|Z@chf+e^V@e0_C57RSnfzRsKkXx#6s!q+ZbFpCaX7b>hY)D2;m$4rc;$OZKl)Vks>qKW1H5Z0|Pf% zr%NAXJfmav1%0nBLW|V@{3!S_X^%6+Fo*8~Yy9D6O3~fhOl29aZ{p0PFLPP6`BCXh z+V(a|_6?(TeiY=uacl_Mk~Q#hLRO{3pJTMU9=3)GGo~{+XY^&v<1qlH^I?FkZN_lU zH19fb>Dw4T%)>Jb-=;191qYCj-c{gJM=qosA)R9`l!EBlJq8Maj74)8I1r-ht=$m0 z$Pj@}Pe-bLHPO;Tlx$U2kYfwnjYS#WGKZqeQBpU>P-E*|$BREq4@sDaz;R&C&ZF={ z5W;T4A7Y9mAe&N0aO=hAvN7pzT4s8=A{NZfyN2MV$kH}J$ABiRHRDlYhyvydAv(Dw z9jHRf9bqpVo zJDeFxbh9IqsH~ePMP8*VJL5NSJ_eriS3BeE!^80q!Hz7%$ftg7m(oQYisx_QyH+ar z^EdH0Bla&N1T;vQ8o+!vo85M=jrjdec}wS}Lm-OJJ=YfZn^F$1xCSTX(;AwT3(mn* z1r;!~(F=$^zW{Bv9hq44ph_V`DKB>%TA%WLx}Pq)?~B6vUz^die647B-CPBSI*W%k zv}niuRG^w!#tCDJDZh9o7w#fkpS;`|SCLgpCJ1SI9-YbKc5t`zy-&yJKJTI?4yFlp z=7z)wY^@FZt5=~ep6h-XcP38F@p?Sb`5uwaMxi+w5Jpm&>GZ}h#!%Ftz2LylXuVEn z`eCDw>JIb{PRNqyMMadJ6*J+e@;Gd9B=94CU})e;L^-pHQKpd@FA0)~T3f8QR;QWs z=|Y~jBre7z_bJ$}>WL0fSBKbY#h9T<2T`qqF>bcg*0Sv%Cu2_7qYDi!sAn3fxt1P2 z;Bh189N&HFj%#P9r<11&vF6fR@Y!q{S&32dzIOo>dU76|E1`{kqbJ8^xdH$_=m)w$ zUHXP!IN|UEZqAN_4g)_}Z#cU_kZ0^cV2%YE%naVt8+3vCniW!4J`EATlfE|Fp}v&k zh{#Y(r$f5|l&uJ!$N>AH{Oz4rEnfv^hHYY4WfP*4^?|d7-{@UVmOdJn?=VkfSau0s z4Iio(PYFce0Bm-UV)L-B(;r@Eqfam-iuR(4mS1ECnr_Y?$A)mu zce>9B6dl_Ro?IhrlmbQ&p_Zt*pbkn2qb>KL4$g@54;m6A2f9##vL%zbW;VoT4QN6UEDbqcF#K?N zmHr2oPV7rNXrpY`v8N%C3?FO#C*?4oNg0j*{~6gj73Kf=H^1dR*!w%r?O(5mP=@{)_k|V2E9zt^lLw$4$r@piPQ*ZTIo}(+= zqkK6rvSkbmo>zw6;dqk0Q?t71jr`#T^~*lcaXb5IJNl^Eqrin=`Y=hzktS*o7-dv!ftPUTVv`I;Gh$u*>pM?3@hnB!{a%CfK6s37wX`_o3h|LIxu``*S6@$ z{B{41@KO2n(Yz-#nZvuvF?2hIZ$;$6J3gal=|e*Y_*39OHybK>_wHQ-8M<7Gy|ec7 zHzQ$sF~8Cm&Ya+^!w-=GG+oZaNb{lV#D{k10Cu5)L}(`?Ak#53xzT3nCz$nbeLW5h zJCVL|R16j14Ne~WXz2Fv<*VEe%@%s9`-`NGhsd09c)C^~%c(Lfh(_oL=j!MVj44m2 z!fl!YGoi_s04Tzm>8I(D)|lnN_m&Aoxi>8pZ&4>qP_vhF895CC2(#6Im%2RRX$)LL z<4`<6O+$yN34L13y%?r2CyyjJPDl5{M7BbNJSi{25TVoo`X|5ZIhNs5FJWM$>uE-S z5h0hGfk=C{sl+UZt%6JMX-``wb%+fi*|Ip%Izr1(%<;7_Dr1rM2n}ya8w8y&G1M53 z0dTMD--&NvnvsC(&tunq^U9^ZD(#%t@ea*e6@7P2Wtin!6p}LDqt0{!O731)+~9i$PdSiV366No7&C z!T;wkx4X;TV_@rqvQo;4)(l&CAy-pvf7h|>b;1{1RRG;rei}J{p3`=U=DIGY?% zL&z5P=YbO9Nc~{1^haM7Pvo43XY~P4Kq{_s))5I$+(sE3$JuKdI=__pcHz{gO_pwSr|KRDAST|vRAZ`L`LTpd?(knidgLD5^5pT1ZV zCC2Gr;D#eQaTH?~o(9uijCzeu_{@9CO(tXT$iHku+bJz0hpFFquYTYeXl8i3ZE4`P z@fXKh-Jqv$I^m6BN$1F*IWcy3>-zEZtJ#duo?-_NTB29y9t6$?8)P6bkay0LXE;9` zsBQ=peB54&bT0~*DAAl8Q7IW`fI0fhY$W|d*BY*qu`4YC3!?$k(_$*pH5mf}1$2!S~da^m||mqO8zpiac^8QfJ6HnjE$n z97J-`V6z=@bU44S-@NHO4)RS$;rF>;1-&?jr|?7r&^+7O^Ymweu};p;Z~6}|oM>Sf z*u+Sg2z2E%b3Ewq?cIZ16Iezk=MSgC-BuhKbJV=sKx!Yn^d>%Vn6rWZIczh)K(}ej zX_q2!dv~`fZXVfk-7;lmZ0qXcy4UKd<#C&}Ak0dHsEUrlpioOdI%_h5SDa#)65ElR z9d`{ec~G8nOL=|axe!p}y#%pioP;aLYcv|9LT-&vkaQh}*4dV$pi+lAbr9^0Fm*CN zF=&Sta^K`Seday$eywSoyTU=tj8;t;#6<)Fb4gu2-g|S~Rf)u_?wD7EC4fiB6Dg-| z&c&Pr!@0Q$5jZmDIAGcswsfb~fLk)1!wU-fIr~sk}LJGQbAlwT+ys^tz9%{jB8ND>Y_$F{nWvsuH z6E#OrupMEx6Cj*<#$i_LRr+SW!FIfRn;}tTgnN3iQOqgFxgr#lwg{YZDE$}b!}E*r zSz7z-_3pTefIS8$6tJ=k4dI?ficn55P;tHeoLIb*^HK&LDWn?9bfBQ74o8h+jB=mP ze>JT6tSN0_pX$$-JfhrO{^pyANo2?nryj;8pUbmtw|(WfC+hH=I{SbAa#eM~jR|zd zeA^$0PMN}xXFV_` zz#qzt!?*9o{^xJ|f*Ea7Sl!+=_l^I`Y8>oGUh70tb_oii*lN_mC$dvotwIhS@Y8b4 zx*M*LsgylEK8*}sMM2GIhNJAQa`TSesNKp(tC^_ebNXzvX9%* zg)Y!9uuZ=le zufn$%0eLS-pl}BU&Zq3q z+-`UVC(`ILv)z?0I<&7I4}5(MZ5%Z?X;wS;Pv0M>H!@J>nXq4z4)A~uo%a;_qtj(* z3OBo6kDET7U!E17>f*p`8)G{i;u$)a-O(p}Bx_+97A?`f`Xzv%ckOMOw1R*`l&*?~ z7fcZ5T#MIhoK!R~U2KOPkNz0wJWXMMJC!1al!sT^K~%@`Aty{!&o18~LBye>X{1YS z7(z&H)FZ0Or;bB#$(~Nms5=Nr8yG`neO3)BW{UuU_0?rt0<@KKsa{%lw z4M&DLnq>&w1Wmgp+4Oa=dF&D{f}74pj^I)pgapxKK`@J~$BZt#wM`%4D%eW@!GsRe zxNmhrVhR`k(vsR0af62v=z7*LO{%Mqm^jx>hN?U%o1iJjoylZ==TCakkPCVgC+Ho- zQ4l;wP603cAj(G_RZLL~aMH~)Ql2wbK3gb)I(h2JU)Ry*o;T-Rr~NN>63M@bM;_7i&2Qcsj$voYFIq#Ppsh3;}d@(=yjG_u}woiE6 z-PW&c{tuq#xP9si0Gx-}pBbHeY1~t$b@ZC=c_uCNTK*tp2`}8Z?w22MTZgAduI1jB zqO-WvBR}xQ942u3mA~{MYCXSGJ4r87&weWY<(c33=HzH+d@6GnxL5HT`UdXJ+v~CY zgYS>6@MU+ltZb1nM#R|qbkb#bwqk%82Jm&LCeB8Oj#8fk57e-vjfe0qm<%(S=p>?L z(DmC27R$)?QjTh6lYs!A9A99L#r~HRo14}tJu#%$ei$|Ni|rKR>Mu7KZRgw^NrvOucEM`b&SnIq4O7g%1eHfo}AZz?L)0@y0v! zl9aj4Yd8{~fk%!HzMeWDdOET$bKIaVbm|5_fm@rE{ygI~vSbuLy?l94{x|u~oHBZN zbK5+xz@F!fy>koS;m@L3YfI6vC7NR5=lc)K}m0p8F+ukkqlN0Dw5sG5M&lO6LJaIff9EQy4rW zsV>Sy2RzrbOo(rS88-poWJbO1NME2aJ;P;0j#bi&f1ln6=3XiB$*Fyyz4#WK^vgHsm; zNYMHchbT_seR1UlJfR^bPH9Th6gELXJ3IsdrAO-P9@sIsZ7cUVX71x1ivI3tb)4t^A4G6|u(dMo0{4?) z5AcRZxEv^CM>zb2e#^h|T2CYT{!!YFN-$9 z<1N{C)e@xVVb3atcbv1@P$j(j{?*=i%PD6#g6kLIqpcL#_~f&({_bS#G!mec=WO#T z&V}Kd*|C}K?IgD}!$28+!uM8swEy|5Gm?K^j-(D5ps;?8Bx7}(cF}P!>YxPT`!4 zMHy4NutITq%Nu$#1WTu_4!JMdib9li!ha=gc(@aDE3eZqfrkvCFM~}+)el;)P2D>@ zh|I=lrf>9|NqgXQf9j{SlT+pn9B^WWwuoEiQk!_XDfr-y{HRA}qbm_2*PUFLKl2Gh z#$5*a%%F4V19)iXI{m{Co5;#gbYrH#+ekZj#FO*@orw1M$@RlmQr2573tl>s?%@!~ zVqV__&fpDwWEUszz#&ZFv*;SK8oJ9Sba`F+;d&?W20li&#w+dXBm0m?>qyZ<_)K;i&vEy5=&?ET;f+ItL9z3@OOIL{HM|l~J_QR{CRgl|FQZQTk{(HC{YFzbJhJ zT4~Gha}42z9l4A1CBT9cx*X54XZXdcDZ@sxtD0j=yjE8^P7j?04*gu}d3J6FKGSi6 zFT@a2#kRYnLE&*=Uk z9ZVY#OAYEgf*NxvbGWwfp4Qf8iW{?I0I*r-ef<73-wEP=8P%+G6Jm_}aRQX9(F@pR z^uQuY0l$qCcvK<<3Mbv1_{eu~O2<>~^-_Z$jEu?E)v^8j)*VatuCEsOYxEvt8)3ol z+XOiV8&rjdnYi1(A7_&0g&GCXmFmWnQPR^P-=WM*}JG2)Q zQPLN*rho~Y&1N_R+Ty*UWx7)QyJkqnL>|8x_wF?SzA!#MxMdvD+5LE^%Njdhe3z%&_HSd*$)txIX zH;>krYmX+_y@{fP|M2K);9bx4{W$e|-}%8f2>z|)5K%9kay-LO#9nq{kb2OKk;ji! z&vAUaFafZ~d{pD_8PBX?aKIM67=q`?MR7UEa z6Y128&pFenvd=)b{_XD1b>f{Hq^;28=y117S7psiY-RA~b944QKC`KmYyrcsej9vs z_$~U^>yH*JFT-87}>yvSXBQ6Kk(Jr>WmDzNjvZ$5HO|==BbCt zupy2a9F$fmudnc1I6`mHKK4?_bm)Solw~L#GKT0@e(RM$05vHC57kwv7hcIxvy*ry z%;=f&PH+k44&?8V0XOv;B_6Z^YfFy6UQZXsX=RC6qufR`($j4iRR{afqyR5 zJlq9BxUss^eGJ;xQQSu(1GvgJ?$KDxOa#oaO$D73(0S!%aSj7ak)eZOm&| z-f|%ftgHzy4-{jNwIr?=DA;?MlIUg;i|cb8;`CrTR+CQzA;g^r5N4GwU#8~PW4ODF zAx3?B=V0va9Tv%GgK6qDg+Um$8l`w;C47|P+1cD?u`g^0e!Ao*u=1#*iz;mOxKal~ zQ_eH01>a->jsT}Zf4aF$;4=ePip|n10%x5yf#D5yJdB`(h!woRq)n4$8sSFRE&}6q z&dw1aY!Mv9wHLumJz2L-8S%bNkkKDbMTknE!(95$Dd6-p-Nw;KxyyL7mfK)ceOb$i zLDlb}`SviAH7vWC={s+!LYrkuopaLVMa4-Rp74t0jrwXkN1K^=XEudqe1Ca;Ndt8n zYi$-JaHQ%mxvesJ_>4Pr&v8xTT(-f49|Az(U8K*SMxp#p`u6eO_P7oYnRplAIw=pw z;2VDwMJMn09sJa2I#U_O$Mnt2v@dBFFp>67#4AS9W^PEcbtY?;4`#Zr6fYHjX1K~KltHT`}|!TZqD*DHBb3d_%mu} z$hR2i^o0&+nm9bIoCDpLLq_1MvdpJUAii^OfEks`7%X#Pw!=F|i9R-K@FHJP)HW*d zG8xRSC!VpsTb~#n!#Vg_JM@)tX>Gw>lw0XTRv2?BeMY|yeXjRi-3z1U;AGNI*Ufn0 z8IkJLJ&Ya2Ss9CQV%HfxSsdTt(Ec6EV5Lq^;8F+Y+GIZ2Ir8bANLTdA*e>S=^>w0= zH{e)7yjEM7y)tfJ>X`y`;f(F7~PeWHRd5r!IR_ z;Dxzn+fv5OMwtQt__@DNv>ScY&0!7>rvafeV`PJp;`2W@RF_KWy6Ff`K47E_X@RK^ zrObNn!JVKHrGmUJ`RgqH=&`PJ2<);Jj6MW+NHjXC^0Sbo=`20fkj+4B1}fkY;o6W4 z?{aSU1OJ;3U*{PP%yi%kk?JSAB<(K<%){j!L%*`gchOI58eY*QJn4;s#^bHLVn6ME zN3RN$!FPRi-kn&lFT)Q3f0;q3vywi*rAz|{hE28pT^2;-9?C?SCs2)y2mmK;XS7cS zI1zOQV~E*0xab_ToZu!00`R#af;@%*(&JkjI~CT;i!^$16@Y_?tr$BIPY6*3nT9$s z>^{VlUe1|I+RcB7lG6bU$os1T${z&hR_I|e1wfJJvCL~$HAcng^`_D}9hxbbna(bd zF@6X(C+GwQOcZ1``U_l3*rt)dAd~USy(xblqjhq6URXE@XJ;mea8mMI{-)Px5}s&* zQJxG0k^0vO86kjf@T!-Qbd@{)#Ekh)@$tSfrnRb+6y?HjQ_PeLupaWC2`)G3$GKE_ zaBU4_sx5=7*U4k8a}zG9anE6l9xfTirY}dvFqd z__R%?dmXO0zh3+%7>StjEUKVsXC4P0z;{PGt^{^dhC z9cimJIloJ%e99ugLw>cq?=6~wu64ql_7RmVRKRk*e>w7-a+?R^nF`!!^(vcqaETPo z?QX%EcJ(`La_o-3@^X9{Ww$CS4V>#ax4(EdR{rpN<34ogW*pm_mH#U3v?ChC-}CKl zllMxi@bnTUzh!$mQ3|Y)Y5HlxG~wfna{{YV>d={)Og=?MGdsi~W&Ak*0t-%mv{9nC zMBMP3^XgiLj?(*aMsW7=seC%ey=TCo4fGEX?GGhSXE5H5NL?NLri@dMNXsTvbD}9n zq6#zR((mvl+DRodLiE`A*+u#yizd?(>IGkSiN<6B1OyEJf~rjwbF#=E95;O%JS<^? zOGXzB$*gT@&(ANb4>%agqM>@yvFa{EZ2DXN#%qqihZ21eUdL@C+mpWJ zXgVhPEVIVJksX0}UJY4_6HXZ&#G#Twet1k!7+ZiQwzf4(AD=bIO}TD7l=>RAhgU8VTp7xs z3-#X;LvslX0m)zq8aZ#uaw_ZK1$K4FLecw%Ip$_>b6$znM;;#OTGFkY4m)iqbk2CTz)5p5EIOI7$e^zj$F?c0AAkOqirZ90&uU zm>oh94tCT{k@9+Zn7mnxzyqf|FgHU-STGR935kgQK#_nZ9l;oLQc)sfhPkEsS&iLI@gIM4#R`bQgRXdq3h3z-Rh1w}dn+{kDt@U*uIv z6f-wk4?`)7*|cw25W-SwuJdY_o;c;l$h)WS1Xn-GsQE-Yo+Ky6q(tR0i%m_?T($w@ zZG<*kHsc7t7)!m9dB8tmKD$H}H*s9h32tQ$ylWuH3oX?#Cx zf)bREwCl>)-B=y3j&{e>-q!fh*6R2qk>|x`Xcc(y?GrasPWYkzG! z_3K;9eEApZKzPdH(;Cbki)Yjt9&F{Ib2=AI82EE84Z0UD5%Ow>&_iEbpY4&J(6-g( zKf;qbW=h`0z?n8%T-)IA1fh+5&6n7Kv9(MXb16Rxt2*YNBcCdAGiB$s9DRV|*GSH= z?hoGVjklC)c$R_RjO1*ZnRLXSjN7}J*je7O56LJIhP9o_$kz@FtA?P*o9(eIP17bHl#-O@3 zZPSTUuHhqkI;a0nJywiAfO0<1C1V4B9jn2nE;T!x`c1tDjrvBx@d4reE4BtJd z(PSf1bAf!}>r3dyUX+ z?$tTL6nU0cZQQDp65=^AEH>wnRz{%ZEc^a}BY*o*Uawxhnx0<%)T2!IP}g+&EahCI z(4Q-vIeU735ux7f`{xl3VKFPln8O)TPcgdryG=i&C8eDXb{R;{)N=@H8QXSYyt|nx z8Y!&`LdwE{df*AnaE%2CLYH@>Ka|*XjzE__1$O<{Ws$;s82$i1vjaR!9&PXCj>--g+Yn8>n<7MgQ7mf9!}w8~?G~qK@w+#i zuIE>}=n`e_bSMUQ^M+#XAyY7)2j9L*p7 zXgq)ShaKnKjGW3&;A2rAhY{SA5S}zv-Y7HlVx%U&PeWOylWX*XtPaJbFX+md-QSBd z8^hG>N5GOQrR*6xNKi*n74%rX+p>XC9TX@!Fj`%^pK@Dq0FRC=XWE^($@6sv;34=S z_uGkr=!EOQc5k*bBb;+O%A`KErY;H{+>EqSAEyj(IO(HVcNAy))zopETLzf{q04m2 zIK7nz&`$Pb{+g8tLtD{eQys$%eKlx0(RodIj-jMa;FGTK>e9Y4I(o!-{s*rQ=Wd$0 zHX}o+5&C6Dy!wRR#XDI-tH_RngT{G*_q5P5@Y8)Q@+|#=_tZUMisO}NU8-9I>Xosa z4p|c!gfv?mS$~b@^O$lt1Ey(&@wPcUwq7+SO5F~2Wf~(q6x|h=M76G0e#!@km^&#) zo;d*>^VYBJ=#JdygvxMuE1SNU>tIM?>Rm%2lXpbZ`qrV*7*`GrJ)>OrDhLe_%-$t$ z-W-m@!~NL1z#9138237vG~ThyK{RCL7w*b$N(62W8JdGw;Mw7T;0qjZDg#B2O7Wi_ zd(HY)9V7h zDKz2Y&6`*CzB*a2C-2{uindYr{sybmuur7mX)>cGgFVf^u}e&j+hgM-~@ zG5Gs0esXM<_va^CMkVEp0tgIzZ0GEBC@m^ zp4oVRGFE^8+he7C5u+4Y#SeZghg{}rm&pVAFf&9c5}=lJe27eFqmz%wX9_T&3`FG~ z2tig2QJ$0n7aVlSR!-NYEyKFu2^524aESQQZg{DVMJ~+Y786kCq)+J7UJU@c=Ysh+ zbPD6j$STD6PK?4!F{JwPoEIONz{Q9)@{S>=SMagElABIY4fJH}?Bq00h7$Qs;ogPs z%>$XnJ$2_9DQmc=n=Y=d+O`PLj)41nf{vT=ejSa#ZRucTEBr2tXEPd3PK|adG-KSx zCk!ii&Z6hQqVMowNRd37&x6=_cNr|@cFvi7%+(q-IPb<rSxIVbb8tP@z}b!kxZLv7@K?2Z9mNHC)O2dNnNC ziP#*g{CBHkBAM=ID5sP3$60aK}rq{)wL?9#7#+!<` zcoQ4>{VE>krX8ztQ0jysq)O+F6YTfB?Y!@$*V=K2{3(lxFsiy4EA26a#v{^XnG1tX z1srM0GZcy80`K#6ugl_sXaab0dRd}VjEy?9lfDsjuy1WkbFW6g;~f#$I2$m#^vEci z6t7@;2E*BtS^Ho4@XdJnaxQVGcbYHoBm;4AVH|%(3)gw}G7PvKrMRDZ8DN6ZR9lq5 zX#|$1N8xomD-BDv5z-pgTo(awTqxD{0d0bdDlFb*`Yn&x35jU~li1U_Gfl$?oEc7> zvNJ2dK?>kG&Bj-T`QY4`cfskiVFW3)&wn!@IXFOlF-`=zW0w(9AfS~Y7r250U!-d} z8V<^Y^9ZT<0laW9jY0%9eUk+`+}#=bhhAN~nER0j?t+l}I0{!E9*(o0`pUT2Pyf>1 zQ*h8(_NfEX!2jhRpmOy8C>21?E50tj{TGjkPl|KG+=u$fK;>c|?SGhK@tx_K#zWtG zhHvoVRHDvhN&ioHzdXR+Gxw1Q%7POHXki`xnKJkz{7Y8y8&zCy?ch;IK0ySB-mrJ^ zdT;!LpL#vs1+EuSJR7%HW9tvTJvL&jz-=u$TH`s&oqTuvGjc=btTRV5nHv2dgZR>L zn@COsobL9X`oPx}CFxK%R|!m=aUNbU)(GCdfQr!oBmCqKFL8qL8s$mm;IPqfWQ&2P zt7N^rm5(u(#WGnomA1a>l)!|)=mh#fWXTvV)0>4iEu)R^`f+g{J;7+CU3hn$lL8J= ztBj_H7*`P(og?c7N7^8(jG*ydzrAMl?H4D}1J^;y42tFkEOAf+7o(R)3NPNLi%s-e z(mN>BrSvgv$KjI77q#2h1Mcg1{6>e)(}jIW$y@lUs($1%|6Bn$Cqd)BO@@Cv7m>{ zFVYS?uw(4&#`;zuW?#3vR|sCexs9`vAj$9y+hNu{UM4|xR_Tx6kzC10Z5Tq%Ne6+O zt;U10J9zr^*c@|=W(xOS9EDUdE>Ex1Cu=OXYTzh?(=);lM$0o8LF!Q*ssXDBAG~Kck&%$% zz~IY%5t`0o^jrvwprYlw>)UZ~usaTqqyeYmu?7}g7~|sHWI3s+B5s_=i^m{#r8^9~ z>PA4(0%c;>W@uJsGfpTv=S-;!?l3z?L3*aqb8riOFmaSBX!%X)Nd+;i-K!vQfx+ZG z9yx{}9Op6o?ls*SSSFktF-lLpG2rpu8SvXU9bEEeYp6XCcZ!-(@_| z5>1!hvk1^Z=uFh9V}q6}oX9Xujxw#r(}oE>PHTgk1*p)hjqNywPl?`-4>rc9uXo2u zV0Z|=8p*?F+JzWGsHPq?pARRamp1f!8eGc2r(z58K2;tlz(cz++!t_p9UZ`8}V)9`jrO_dP{>DgD-lA6-3=lcAVK{>S$y1J86KA1>3s z($S}wZH#~Nb03ZWZ)DQuHfvEDE8qG47~lK2FT+syFp!_MUXZNsPa}4k$GB zp|j;yWToTU!Ff}&d6rXHgB5wP(MqGTK)`XJ#2uHB3!opR&M8}*{F%`yZc``YT%(=m zr`$zO&^K*#^QX#BV97m!2Kns$)EtbY-I+x?iyU6Zc~u_&xvoz|Z>~uLd)__zW~pCF z+XenOr6N=1On5^%x(|QOtXFW_enx3?NFpOL5~pXEbp|*ajGMX|zyvSk=`nJJyJ`T# z-?DsZqH{a!`i5oTg7LsJcZpgtHaR9!jC<=z-SFZR$&nYjl#a2SZ1?DBti`ay|zr~6X8#L!4AxI?AqcOm|LZa?n57ZF{oq{Z6T}1Kl*|b#38aN z8XDG~aGvgL6&!d%Yiq~psfWZM>v_v~;D(ZEdN6fKh4aSighVNb1n%NpO07951_oGNwvZ%jdx?B`bm|hN zTc1BYUJ6aB$_fJ;UIbSQWdN6sX_)<<=~u%m1VWm8Dz3-WWgAGZsND%((smg4HVS(> zXq4#02byzcL~#z{uMB(yu@%hAYwzYKH!kMWng2wNX7W;w_=v(bd}79|eqE$LKZ;C$ ze|KY?gnu8>=3^d|aq?EJ54|V2TMh?rmT3F{Ce$Ow9|MRf+7U%N7&tD#{>%-#b{5G!@hiM%D%pdbmQ_nmL zPns89ef+Z@jsN@4ycx$a=E2lhW!V4GH^&Rxy@sZGL@rDQG9;ue8+C}fq!-DJv?GT> zS{#pRqe{E9qi`6Iq@P1` z_o9MJzq{xUb#e@$m2R56u$4mODD6vSKU*#xyagck=+jR=tA1y%ys?r~V0@pGz_GTB zmxJ1jOYj@eOE38L47p|a!OUS`$o+P7XVlbDhb86MHhec>vV>oqm!q@ z$2vz2*}l#<$`fQDRh54}{OGHx3txve!8e0$8)i1^@aR?Gk=a1I)R(#iO8CDi{glt8 zJin1zQF`hYcCk(geb+3(glz_T;fJPb1Hk^_eh57oK5kWo+bikjMloTMT*pYMqn-z* zn=h4^lRJ#Nn}lVCquJKmQSh%4)oyJvj$wk_hgkI!L|&=2oeU2LL*!>-9idaM@9++V z0nSUxr4leV0?!lzb&2LNde?G?prYRsEroY7%>>`aJRobe)E^UF@8Tdf3Xst{NnAn{(t|YvDLQ}Mj7O}7wK!a zhYhVJOjdPlMk%MS`6)dpV&wps6vsiHnW1T1L^TfM-0;)LQHCzClDE!piGu(S(!iSd z;Ap=?TQzJHXA*U=ZLa?&f*m~fIS z^Hgx#q@QbWV-8K%UN2@yZ*JMJW;zlaC(m0Aq;G4HpQ-p68`0%-mcbx0HuNwJRsal; zc{D7;VduygPK@)GKK9L_<-Is?aj*eYS~iP;Rx`+Gy`_WxL?$Eqe#j8Ke|KJkrY-bV zzt_4teAi!nG^`?muk(?%U#KI8tjEl92Jg`&htU&W8Y7!XA8>L+sk=M-)zfrIjcM?Y zR-BI-?VOCh{YE%u#>cIJmtFDtTLCNHsm_l6l9^seMxMoowtig??q_zS@At@e8{AaS zru2R6lf6U~C~DKeM(8=TU!DtW`Yh@XLUNt+F!ePZM@NI>;yic@o%eSSYOA!#DLIYv zcaUp`k%3^mJY*Q;@YSoz4MX)9XPmvX7tO8L5C_k-i_x#uqrn-cqC0Np&3 z( z!gQUnxG>-d&7-L>_DF|Pb!Sg@W!ZVy53+!q=LD*& zMN+0I2Q4`iA=?jrarnu9v7f9^$YIM;NTMZ@VzWt$O^R%`y1TjxK+ba}0Dr%43*Y1;94+JV8eoA>aZ;kllb z=Wlf8KL7BAQpg!#hqURVf9HSlB5&D;NALdG|L}XuzdQ(EK0hpPtWWuW{%m>u{%qN1TN#7EOW-cc zi-@XE1?b{}ESDX>>0-@N6Ef(e0m26Kb{9mxP|czjt8A}hN}J|wtx0jvhccst;Anrh zI8eu)pSHi@NCRub$(Y%3l_`C(V^uEp*>-ageSkBj`_RY{M8)#il&~kGR0a_xH9m{ETAhtt*&n$(P8`>_qYG9tRsRSFV3We*7ce$C~D5syFpVywyC8M6o&Dib8K*iW}YVd3=_7VBO zAG?TJ<%m2QOyoNJ>~l|im^LO5Oagj*R6318_0bdnJT-2?7JozU;F$FY)&M;xoAyZI z*QO(0mU1X&8lZQwpbQ{?E2lH~C{5b9i6ciV$B)nEcDqOqurZ?WaX)~l z9L8B|-y3jtFKpTu0b6j{3@bE}<>;4n3D$jxknd*|FokD=`Ps-bdd5(V2aJ=Ctc3H? zcdIOUs}OvOEUg%IM9x)c`}lTm`R;d4maF|_oB8{5qUtU}BVX%rdEV%rE9hkYZxF7FOf)ii_}^vY?85glwYK9QHfyLapZFr*4-qe)Q$Gg!(x*OF zuYB(9`!RfNzuc#Z-k@vrg@^f_^&~_~{Q7D6?!WnZ`Ded-xO|Mxy}rCy*8ccs%ky9V za@hvxAx>WcIgb~{-80i&!9;oJn?fEMW!@DJT^k$v@~G@qd|{Zywj7W18KU^SPD z53Pd4R;_{YbVYXD|^JqO_lrwa2-S1PzE&)1%Dtuy zdrm`el4EyVFibRl7=cC^YpcGwj$WXxHkGloiV>ceL+@jxBMT0R9F);rE6}n&Ova06 z%HW-5NYZ6HvKO2&h1B&yR?|l>?Dtpg?euOs4oB-SP5rKC?8@96c>{&$1D&BSWKDJ! z*^#otZKmW+u7VOy*fjJ`!eL@iMC*)Hk5JtiQm z*K*{xU&t6lS+*c3%YrwKT$J5a)C3vkQJS9nODRK0^dJHY7_5)uUKs(QTGe@F6?>zh z$eEigL=#-%}!>m#i~< zF!xTV_O}B@XRP1G!J^OnJD?;iB``|E;a!DRLKDRvBtQ`aptskBK4a!wR6o#$7W`#A ztJTh?PmU9*?;RY}K{C`%%83c`X zKu$~Z=<_dQ)Oo&Xh2n=j=A$zGp6m-!{bn3$@6L}gLfL29iGXeI0W@i8uIbO0*Xll> zjnBx-{V&(Ft^6LD^+jP4jmy+k8Q7k;Z|l^<`&PqpnTp0!zEcx>%x4b=+snWIhue({o-uy)1HSmYnOzTR_c*wNZV< z<6O^C(ziO&@Hs>_I1@0-;LX1ZX5X+K{{VPC!{0jTv|;M*GIoIlJ-jOXk-;>m3%&ME z19<(e9xuvHRH~dTXN`&~6}zO{7~R9SqMw{Ax=oLM@e*5li^??l;8$SmVvA5Gj4SZrx z>?&;~a9XcptkV_greQ|J3D6s*ZGr^^EsN&E)#FQ7<@3(%5LyKE?)Wef*vx?#9B1tW zA`bZa8ew_oetW`aultH}bFM#JoHsCXLKq?9VNcVG5hzK0{CM7HsZ;zOb`wRQ zKzPSsJ*mnb#~M#QH*SEr>m;p4d{Lr|Ycu1g{Dj<~M*bTQhAt7TgJme7zeSR>c47{% z8O9IL>ci(d7J)?51c1$4LkmDY+TV^doy-nEcuJp45KrOx?ag%>Oi&i^@&a2^A(%jm zqxUF8Mr_WJyev=xh!kHIM?XZ(oJDSEhE)KGKAH!lmogD;pm{Itygk`pUPoS!>G6Jd zYq|K{}aGTrqjXRSmiz?r0U{`JB{3iyNv z1A4uogRP_QTZThhfUru_f05~X`@^sKpSJp`pLw?(j@_#_`M~$&kaj3WUM_V#_abn4 z3>_T&bH+)}fB6r-S^g;UT8lk=_2>U-`RXtKX1TwZgEUl=fGeHc;ouWw_u!!1@2OZ$ zRm5NYO?6GV*S0i3EQ_I!g7fHJK$Nof;V-#|4b#opO7yB#z14kJfHC)4jM=efY}S=v z`bs}!-KM$fq;iGb6z+eHPB@EVV?6<3cuc1r1CXE4lYRlT>noQC4Q2s(RWW@{>6%`~ zK11VF7h^xc0jrR06QyUIq_qKpV)s=?^AE1o@l=w6O}7U{e=QhmWfuK_yfWk5BO$%h~zaa(;dh8Ce7H zRE(hC`~n(wsN{n?4tifzWgzkU6q9|ijbs!HL}i||3^L{P~O zsI2x3g0yAa=F4G>Y7)#b9&5bHj@(a2RApATHpK<7aCy5d{lAj~1bqHRs#`Ye*!KogR&4TfM(ZN;}1o^fx)VS6W`D}wl%dRN;6>VqBSZry*TJA!q`o=x-=Zj`=Zk=d zXb_{71j_s2Z*LHZttI$)2q9VV>cOZomYfmJmWmCyZ8N(a~N2NuZGHZ*QVp0Ri`*iJ}QwI$e=#7(6GftXczTbRGo{e=F-3;1LNM z-xy?+I-iC5^nvz(T@;)#HIojIcfCQ|R+6$(KVV2Eq51Xp=JJ%!??Us{$?o#;oBfvT zAHu_DiAR)Swwnb6Gp4@Nmdrh-p1;|gzN5pTS$0+T%i&U_TtmaX0PT!xK)&?l?{WOY zPvhd~M;==0$nyO-{~!L&TZEJ$&nxs8|Lkb+p1$yhOvo9?KBb+<^uf75YHnVt+C z$%e+k&epJI(&>Y9Cc_2=p@jaHfBCEDJ^=pQ^P{g%!lwyb>m-9w@D#PRB6yR(f8blL zaW6IU=1g2?>s24wT|FhD{LC*f?(q1ippPET(cWnM3v@Lh*h}y)bL={(CPI2=L-2X@ z)8DkwH=GBDYUhmX5N$hYha*D|K9eiVl}u@Fs%V@ndV|$zyFAJ7tsJHgx%IP>Xuc=J(v;7QBDpJ~eakRy%S>$GM zpXah$T+DwuHIWY)pAl?q7M|ccw6h`W!mKT#^IhbaHm&#SG3fdI;OMw?td75}jzk1_ z+J(lEiM2=)=0+XWjxBY2Q(;fVody+j`x7+f*NbQFP^c zAeK+^@ypyp1TU&lnlfN>$+nAKC-Q;VT?~?c(Xp1vjDhp*x!PeG<+T`a1U-(z81hM- zy9F(Mgks*a4nWys=6W!eVl}c(8$hTIC^Q0rmPrC$o}+K~0oxH7e){xk`4IY|13j_M zyn3B%u5HOO*NB+4W9LCwnLZhWF`HFZK>!p32ue_W&Tqy;2^j(fQjVl7cn9+U8s#NJ zQ!X-AjHz)XOHmo16zHZPlsp7SzMQ6oaS419EO{v>QGOA=dBDBhjYQmW?%}lsa&ve` zKqoo@^F`str2;Th0#jlF0?LRJ6h%`6l7K^MT;)}|n>jiC%{>VKDSvo}CuuhPkMX2k zGS4SVd*N?e8xC=D+*x<&W0WuD$L*{qtq*>bB*@ zuMz+p?RiFmp|R)aRQ`}naSVX4QzGFtyRo3D8I6D~|Il7Xn$K;M^k5D;nx^XDwA1x6 zu@$;X?}bF2uIoEs90$n$**1GvPZrz5wbF=b0aycj=n>n-dk-H{N$i11!KT`L2rs~e z{;qb<%jYV9rqEWw5xc0E)4vlHVT%%}Aiw42D3_<#2@E+*O35+cg{lR|U&kC9V&ilS z{`G-N`Q7~Rs2#cjz^xR7zR+e`{5}~Qz28qTfsgDYjI3T)LDJU9J35iil%RbtgOur8 zv*fIDbblfLr_dKnlwD)|j>pD^UoF$pt6V7vW4CbC6f7C2*hrED+-!~=kv&Je7DrV1 z=6-Vd@cw;lGP)4^;_wj3Hm4dPI1-eMGv!nTH1yR0F?;}>;7mE4NWcH?VN?9fQL@^d z=#KsfM;pWkBXFdFZrY^x4J;xX^YZxN`-ri!lo&KI44r(2AVJb8VB_S-5?yq*xEp$I zZ?2Z>tBat=R0i3b3c<&ZAC~XGe_vgb+*A(}XzFWK0fJ-n7><$we52@5KRb!`MK9uy zB46LDKEW2#-0Zf&BcXE!*H(^(=$3IXDG>`HGEBAoS&4oV(q4qdn1O8+`Z&P4o9i&8 znSB~20A=cn>)Yk#fk9IY+2fQ`DT)mG=&cf%tbejo##*C^BKzAsn@8h`D9x+b&mMU% z8(TXWj1zm}#G@<#NHz;}IAdqS6IxgIN5BL_$c?u06M-0Ib)sxj+g_B2(o+P#kJ2#} zi_$`SeplKNxe`^*kYpvmf?_ek?eKY~ z7rttA2T*0iUHg!K1kdp7T0jwc9un}}Twmq8M7U9M?Npu#)&K~jGYHB?`HfAVWQAu{ z*o?JNYWfrCQ6!6>*76+(f;WtC7EHZb_5#clh~jbXWW$k@YC)s&m~j9rGF4VWn}Dfj z1cW!ExSSo3fv;160seGx9G*p><77B?>nrFNB-Mr>2tM{WUgH6d!h zqw9}d2$3YfDn;VZBqZ@lD$U2|~YIo=t2g2L)XbcPP#kMX;G)FtGF;KVQaC56 zy$SS9Nk+%Qhw5@@@_8KvY|QVU$Sic|-#ARK!4+jxHpP~s(rMFp0?RL_#i<%f6yONj zn(0P3SJ&5VOE$>P@VMDva8qC?!!;-i?bvcl?HM=cEqI1&?7q*+4-TSTAD*kw$_zWn zhaB+W+^nZi5hK6R#nHhD9?a>(R{Whgf4pl888}|!i!58!1^i^f?^Ku=U zY_X1(G^jiGNb88Ti+95xG}3AEO_#+)C&X#nv=KeyRN<#8CEu~4i9>V~dIV0iLC~gj zZ#>p!sNSkZamsc#sm?w(*b3dDNfm%y!Oux_mDOV>8LMeU{3*Uwx9MtZFc|YH_UZbW zCQ4wa?J#RTw1|>2k#Li(h{ zqR5snL$eEweGc3(*B-#hb)CbHky989XC$5fb5CAMjNhMMEgQM^Fg)Tkl>m(sz;a}q zCk5@?b=tvO3gUP~8597}cAuIQ8Q=#YDpAU&(?rWh=x3;$se@}S!$0jiTCoe1Lcgd} z-vWc4yBzQ!M{@Hyg?7zmlu6bER66{~tTGSZoNM4zyBbr#1+To&rGlZa0=RqgS(p)? zJT*93ot~Gt>xGv9HeeYV$)y07e?ZIBP`A=VFU-?fRj;(g(RhhmRe`!SG53(kYXM%` z1USal659(MZ7*Z{oFMDzw~m*a-+Q-Q|Mu~68@W6)L?D~%l>7@^p?me349ly`FxdGk z1ECkjNVj~Sm*pwZvwo&V|x=XY66bj|N+ zv|XkNm50w6=Y4GQ-~W&QVEN~7*O$j{zhB<`*v>1E9Z=hohg8;aHHyBvWQF5ZDV3Hift~?Ek{`X-X zkqO2$(+%%ePZ!Kk6C}Jz&>P#`Qf5v|;kT(EP6^MH<2lilNbnQT16)~5%k}*{HWFuN z+;qx09q19Tavtk(8u;0OIP`aYOZv1r+qj%%!Dg&VZiXD2WP8@OSgmXiGqoUY0KOvU~mSWC)u3G5>2VcQCrFXQxiSye`qGY<3jCxP7g3gC) z>7UP7Yo!-W^i(3{k=4dW-_m*IEs-Rv9cL~ifUp+8`&YmIwocQ@bSg5Y`N)#qchP6C z1a>-BodY(V%Q?e1JHH$>gc^PK;bwxV@R)zHF3X}xyrgGgOY|>1PrGFQ@%`Cy7d?`| zNJIr*bU=Usi(h@kHzlb}d$FGKEgD`sv_50Cc3~C;3f3~*3>GG~svI5aV$b{zW7#x& ziY)>_ESpG48C{u{&?cOm+8PX?1035QqMt@O23mwc|1M^$^I&(jQn>pkz*I$QOmuDisiPX)RS)k_> zH%G6E{(M(O&p*y&wuJ5E*v$QR9k0$>R2IONJGMmk$%02HhC**FpM zf@|I~IM;NXk+EwSsAw4o%77RV+Py-4UXvAT|5bGS3kQj2)mS2WBcdyg$ zS{!7!1fEu4UA@&wJ}r3VJs`@-TXiLT+?(cHK;{uB<9Jv^clHez0eK1E#2C#Mwm8h@T`= zrE@k;N9h>b_Ql!7`48A8Tf`$-xqzV0wI|MU(t^8;J?%b+=9bH2m-wR^V9F0>%$X8w zJbVZV)==n2#lae)>K}rFn|`q8>+8$G&G2BHJO0v{@GqEQd*Q?Vr&i(GciP4)RjY%e z{o)+`vym@d~*O#M{<5`OlJ>?fNky-+N2U2Gm2eoMa$ugH9;tVDupI0k z>8@oEMBm@rrV!;xq;3VZg9kW&%HwU|UBaI>ICX>~+G%Z0s&Wz?qp=mID;fh@xC{Ik z5xOw&>f*9akinvd;hT%UwaqQ}>vghD_sUwqm`q`|0uk;P5p0y3!aIyYG=@oz;{=8n z??o`(8=vuuxH&jkw5$VNf`S`W1KZp%F@5XLSpk}05F*AdSfmJPo6%8ZJ#2)2duByy z`s|5tAy@lffAe;E`}Ikk)6AU$^D&Hz^YhT}X;utO77+jpAUBH3-!y^41=#tZz~Guu zuz;)bIH1n&1>(>w%b<|j5FIN|HfyF}xoP~Lw*+G zk+H~DW~FcCS7mzh_9_Nz`5v5IWeh*R|FZm9`0%qZ>o*zuU5Mf^r{v8kjbqVQL*`uP zcP^VQ16&ekfUfiE|2g?yK7rThQm>r0!UKG0v)YRu_2bn$|66_j!@qnU*vs2@FZY$k zDcfHD=xF()|K%Sq|MyYs@Gt*n+5X92EuX$S4G(1G7N#YD&zMblS%uD7aayuz`apH_ zUx9QxU^nFEf4oBEqv8hqIsDmnsZ<`>#mu^(Dm6`^Gl;A27AAT`(hi>eK<9NMMId891<$6#`}bjH6G08RaJd zWopj`7oaHbTG+(_t_j_U}&3!%DgVUogg?ml>ZyY_LeQETg%#^E zkR^d=!Vr7(H{0QuRrekfcsXaFRms>P8{~U0 z(zi6nD8u}g<#WBY0+eV1n-~H`PTLmw+4l}eIe2RjfJJmHFKY@!;G8Mr2q*xsw0C>0 zH2staY->4rcf9N#?j=x~Y=UEDAOH#2S$#h|HOJ)H$Z5AzDT1Mia<*zwh$M;KuI~fol1E`6@Gj{WcoVv$pdP1HOI{6y6k}(hvNjHlR4GdUc z^WYg#X|Buow8`<!DNgr3(ej7$+vPfKT!#Mt969_|fd4;jyjs3Ze^M&YTAudx z6I%S+0U2#*pq{$w7MC-Y{!XtxJygW z$$A8ChmjSqwf@6mInyuma3vvf>@GM!z{sj+*?q16aN6`GC)cSPv1rH4$6{E7jw}bnavubu_*z_(a~|U zG^@znqY4AJR8oL_J_=hRo7i8dN*flY1{X2{!Ck95wdpvBVAxGE{;80t*u?3@X)680 z8e4y7SB^kN`PZ-_-^d8##vV+;2xhJ4nfYTf$FEhzhi%)vqUvWHoACkN+LpiBs7f3e zvRU_e)p>FD1{CUdHXKWy>tI;;hW5te5W#dlbgDaa9|JUv?M>>)F|;qQGDTykv1F4xihJ}W=^G}psDNz+Uj&>0m6KCJiA z*U(|72z-rx4P)(hzfJ=r*D^%7%dt^>0&9kq?>LdYt&QdFaf~8kk}+~H+OW4>c0l>B zV-UBu3^yZvQ2@gCh}7=44WJa9a{v8JGQ~t?jP!SZ@4sDs{~!J1D4$aHF!ectmD$Zg zuGu!Ro@njh3_WNZ!&daV0nJ~Pg$ND=D|2?C? ziDiOj=gF&*G_#HTSNh5k1LZpLIBf?^aYB`Gz*xYws;H=#0iKAt!NZKfbj=V=PUJxT zvJq1#oU8*sTu>W~qmk{J>+G``B1^P4SdSWT6!>c#etNaan2e0veJ-d4bZA426EK$q zD1sr=Hvrpe|31z-{O>~7TnCH7(bC$Cu;Uj%&zc`j7pUt)CJ*$6`()k*w5q;xw(R`7DrvFTo;jiimz$ zwxB?T1MS9!Pu2U3M{>s&1pU?!S?Jv01inUdURu2>N_*_8|eO3LqIJ;OrzW>l* z&gUu#^lw$^G%hj~Fx^Ei=D=@muJYYD@v*q{u|Ajxs#F{WQ7W?NgPo=c4iYHh1^aK6 zII`Q_kIzVOoB;;2CQvXQRbP59p;DzbgUT^n1K;hp1nZwE6Af*7B=^ym!A^ z-bem-x#mgv&l3>Un4&Yv<$?`CD0>0iwpjsv6%8~*5Ow0YGK6`DwgSO`L8~{u@87-~ zzKnBNecn+Ob4(#HdymCE|NY-t{^YlJmOuHEpDfS+?kCIscORCW*u+}ew;0uR_(!+# zPUf&5(5HLsk4@4Cl`T^plaY^;^0{>;#O2uu+CWRo>KPY@O$GwXmHlv-WUG>czj(v> z172I4=m^lGFC00WA=lhqeFnO8Ri?YMGb^RpeKXy(9KFUrL2Mm+bX!#dmaDRZ@TZcYd4%k*rt}-uryU)QVzs z)T;NjmWapS)ZBR`E!Gvs;7Brtf@?pY#Rh4OWs%a5r?_TwZhmr2t@W z?`RfB2G98=Ygl&n4&ozs6G)KRR8*Rr1a75fUgo}^Y0EiWBYVy%^p9i z-RX_;n$?YdA48jk6bwrgV}A4aV0YO}25=uGmrXlH_VX7}ypq7Dp8M_)n`T{kb3{FQ zFbz3oa68#nkDCocs(85v^BknJv$xy%B75B#2jKuJi|o3vEF(JoaNZ{xiK5(RQH4Om z)B>hsvDKqDz^x~(8QxMTJt2w~XZt8(?Incuv z9KXwW(^UEFI^a3;Tub-(?(TUaT?|jQ9(hH>t60sPBTwMO_$j^iIAq2o8b>2#HyYgXXuMXqEmNYa0+643{S zVdn)M^Mo|>UGzM|$h~17`t!?EK<^=d{2{vf*J<~!)?Y1unfI?=e_l>={X+oWKKd!Q zuXRrUG2`zEa=HIG4n_VEQ|v=;l9$Et=bZq6!$BKhPyHVN;ur+hrmM(SX1x11Q>!|<+yh%M1I+k#XC zWRb?f{yn7(@6Aoh`p9|K`lS8HEEm3PV@i?`n~?}~jZD;?Gx0*(TJ#0&0vERBEC;K@ zXI;!xggWmUfSkGaH{C5XbG)uYpX$Oq3fr77Iy~2*1IxP(C31cfy}^T4aMFCI3gXD8 z)QktYE<=>asE}Bjz<~>};ifs`)%>;bICFn4AJb=5HR5|Cfu4{^pqNhP^zGDN*)0{uu z)COmX7!SOnin8sgn8wD^7MhHs)e6B|d1@6N(IT zKiuh=Dxq^ja9yl6g&n_;&n;3GxHg!H&pJIluS`{Et`g+b4#SA*MR-Q0WP2$A?(w&` z%i3i!8H=eHBZfLZz9pnEy3sc#_FoJ-0^HkA)alWt`Mza~5dQG!T=sk!sR7^uRRkbY znrIb6k_E>>9wtCIINXo09Sdny<6WIZKTBJpBtT5ZEfkI&t6HC#wU1 zY1cUh%4Qv(>w$K@kcoQKtC9~RVkp*Zcv*ca(${8fHaFvipX6n%fY69#3+QGX-njz_ z`F)~!|GA#gdsvP|T0Ru4;el(-XIaeTT7Ym6;1#sYeE*ch0CGV-{1WhR?n+@j+B0;b z4R8w%>%26a_8K6hN8tLH@jJuaLu_vI`&F`@56^eY&%gS#{CR-t-+j7We)4>^eE%we zPDVF}%e;;g&pW?A=RWiIk70@fb?(#WL;iMb5lwZ{++)8FxaaNXV>8c@=}+jf30VG`(Y|!vvOO zi+=#q^x1yo82=P|m$8x8*h=TQL${y|h;Tl12bkD033vdC=?yl{Mn`sZDh?xksJOXL zz~$hmv5ldjm9mU+c9V$D$TDM!9GXSao&4RYAI2ZL%p>Yw;MYtvpVKokV*i}Or_VB< z$M)L;p(1~QURYY);Jhs?O9!Ou<~03+6?7r(o8D7l;8^GZdN@m}!G=0-oImuJkIEm_ zD?4jUYpPOp%UyJUJx}KTCKz$kw)N~h4$}+AM zk48Vz{){y;j?6mM7k;pBub@iXGX_Bpd^V2GEk}kc=cPi}fG0K##~sSkBe}~zL6W4v zN_#jJ`%BwfyE_@1t$2}JaDb#`zfTE_dM?3BCBxge@l00gZ}gxQU+|7BV3q9LTrx~z z({vsUbdAV3cgY>By0KU?guoxzSeyy1eW{WcX&e$N*)aI#dcUm>P4G!Jc=Kr|6cu?9*;?zihB2vwX&h z`q%875=7~Dql`y;PC&B&2XOb&yhBI`w$)*CbLR)(a1Z68XaKI;K*GhdL?%(aqoEp=#Z%dkSFvdQlCHFGGDSghvR5%pIRKSd)F|@s%y+*7JFf!gg&M`bc#CBKwfq7Q8Yh0rxk;2HW=e?^c|Y65||CZ zq~FNlK4VDeF_oSKR^){qG?>CMPdb5JhMl&R{go1Za2KA-e$Irog1b}+RZ@Q4_ym?M|&rG=)-#Qdy?X0=C79f=*hF(dxN-66VfpZQv3&P@x%~3+eEIp)+48f;kIPT5PnVzEpDq9P_Wkm&?msU7 zX1QGca_wo;8U;rxgVmHC0bqaug=JO`t+78cMOWp!G zrO9TJbSW})@9;Yzil9$G9R2`FyuxRJ2s>CEYfayQ{13Y(a(_WhG7z3{4D?J%RyIWz z93Z&}(8h_++%THV%Q^srlzgWA{pQV1?S@_pn-*?K0dDV)(^cFl&_}71jnihcAYe<4IjElIr^Ee z%jbE_aRP|+!I9%{Dj1O??YLD)X{*dk6%~f$} z_h7F(IP^uIRdWO{u;?(!=f=9z6leVOOJL|wDwS3D2oT807z7X9X%JiCjO}otw^&Bs zC1gy?|3Y5<3tYT7y~tQ*J&^^1s>2eL45@kv!|Yb0f2|1R9*LJRUq)`uU%*#4upxRE zo5kW+%fZQ8@`~^oHlsuV;GFsL!@aF#|7iCIps=`$BAZKKEI`}7Z$?V&1kl zK48Fz2r)xNQH^N&P`**EfL{Ug7>d_Kjfvu-j22<(`|xnPtJsAu8tP)I{J!=YA0~hR zL>Bic0hKoa24g~&HfwC@b4r=Yc(Yhz7S4TIF0L+HzD7?CM0@y=HuAp{RId zOvd@5B|p3=JU}52U=6SUADViUV*2e?lXT9RDf8gtIOuHW3qhtdtPMJ6e{8`r^v;7NCRp>P z&=29~W%JQh93UvxZdV+i#C`rD1Q<*#1H*}r;N{^i~I@_$@@SpL<` z+42XsXUm@^)Bjnr?!S)X|JykI{~Vk7oBaNt9&eVvxW8O}_Iwd1ezp80?|*Uge)*?Y z@0b5Q_y5zI56eHx`+xiJn|}7MpRSib{_?Q=EJ499!>{XCpO!#eWz^FS!)J2tp+upL zo_5;G_#+RFh}^8D_!64CjWA=U7gBoit{LV2R&Vl?eDNTkR9}i2xkmX=)d>&Tly>m1 zGAekgLgd1H-fc>uOAyXhW`R)jzs@S3Y4h{4?4p!kXs)aoPN#oH=qukw%D>? zQwz3+(dh;b0d2a3F1(P9$&fi4UD#@brk2lhP4zQ&3A@}6D;Ob-HtIK_p@B;5jDtou z&{pYl3MbuMO<~mDGG=3%YQ^A#k#XXJsy-_tSi*ML+-mMuSr={klvFiX&ONYD=70Ka2|GntJyh2;>H3It-jh1hmIoD)n=!vlhD@LEPf}>nAOLeB)dqxY_*ADC0Tz_(zRDGF0I*Vh--Ll54% zyf};9%mPpWhiP8=#$N2WIC*=p>>ut_epeSCmx}~oXg6l|Jg0xs+oh}K$M#fIZ3Q&N z8heiXVYgr_Bo<%S971rNXyO0;4}ZK|U0U=q5h;VFn1sEZpBy~FYK(9gm;?VYTb66v zN3%&zC(;M>TN%)PfOkLHyG1900?gp7%o(S-v=bp>SePm{Z1nb`s2;4(aH5Heg=`$< z(P5lIX!{gH6M5V6z))N#N(VH`%Che+s>|&F$cu4OYV*4oOwg1-Hji;+WiF24d=&uQ z-Cb{FMmd>0x@GLzkjW}Ja|#5`2<|$+r8?jQB=bhIa<~Brf#kXZho~|UBBcX*7FeL4 zF^lk3MLq=>cXkE{fUokNa=-CWCPSKi9bB@_${Q_wWA@o%z!Q z0RQpx_3~G7{J#uuev^@&hwfj6-d_dS&Z3Lw0gT%OGoJ)Lv6+@%zl>Ab+p|)4PHb#& z7T_h9P#GQGi&NjpZ;tZ~NNq%xR&D2RzyTOMvQ;I(7*&f*y^s&5x3|9^Kv5B}f19o; z37Voa1?T*X=pJRG*O9SP*IuP^hj?spod zOtx*Mp;kH4A0VYmKD)Y@^*?OMxbd~~`Dx38vpTODKFG4QBUrbNM9_+E?I<6sSP)TH zi^q1^focQXYWX+}9=@u=g^%G8zH>OX#I{OS{dI-$lSdMh1nBt2VY`2(JSPFMK+ScZ zV)x|FzP`K?NJM|)9Gmn+CKimUB0iI$ zhuuWvUHxLS`3Gy^S4-|8QqXpgMDtB-Nw&R{-vufjV}1}lrz?^!(@XY|7Xv~M+Ue{H z6Z}nn#?h%u$!+9e-kUto;h7E+8!X!2Kk134H*xl+Y*ku*>v#XF<>=^b+8I95NmUEG zDq`8;pWsn)7X1-4g#YXh#*tMWD88g!_77iSl}ZbKsxAaKC(=^hU=Wm&auFtiw)1vc z9gswbm7iO?G9rUeqL}$D4HtCWmuq*EU2P{~-PsY*kHJwu5T3N4*Z2mo2ZrYWlFrG${F&qL$!#IjCA#Va{)ga?z zblnyd8l9%bAw50Kcjpl_J~*oJAlk)u{Ne-!KgUN09ltF*s|o~=@xtf+o&FqT zGLCX-<|Har8m;oDFE5d`>j6{rzlJmPnEr^FM<(`J17il#EHiX@qe<@^7+sM;YNRKF zg-0qcuG6+a6I(r9Yd2AG_amnV4yk*2LpR5R8gfn3)yHP|`Gd^yjBYj9#A*chm!%mo z0|O9ixi(&Q;byJ^NVjS9IiG*Z{g0tXS^hpUxk!KKas20>@0Y9PdAW$;pXTq|@M&#t zXZf6Vo-^)e`V>AqrN0}2NItXh31GIukT%gdcKW%rC@pqQHgXZ&yY}v0bqF{ZUkC{8 zN6C!3h_=JwlxD#z!0Yr!UVxC}m6f_*d*&h8j=+iS$Ye0M^S|LUhn^P8J8v=uMtDmh ze8k!KY2`CR$f%?50@bly8JN=RedI%y1+&lx0~}yQb#i6e&gc0Za6UYaoQ>DFvb^gA za7xPhgE@3VaJd#9Y6B4O?H>eSw<14|FAggXmk!n;MkhJ6mSFRDv#sz?g{0YLK7&17 zUqnBm-(lJWh~`M#@Wm_I)gUVBz8pq8t~J>(4`3)uYcGFpIs;W zr+WeuJlje@wVUzm?ps7WhsWS88dSH~H@?#u_s{v}0-Bi~P@!v22`(gHwi?%3>S*BZ zJm-S#(g6VpJf|%aGL>%Y&gdU| zWV5$77sWlrL?8eqt%gITi zf9IBGkfJdW5vF-LElBCfB0F>Az`VpOsfB3&ICo|Lv(t;tmz%2>VcBngd9B_!>eiFpp@v2e~*!T#>+2tJPhkV( z<3z-b&vjrNlleHokg}1-z4qMnTxX>;y784^b4vKcl*}<9Dy`ON7_LAJP%G`ncJfmu z88M}gNU+hKOyw~E$Pf&xff?HJN2dl7Q2LHSRf+)+z(#X{PlnL3a$XG<@I2A`o>KP) zfDJ%KMvOvxI5I#B3 z0-XX6pLJb8+VR#_g8}FrplK$bmmLMbcQ^cK@Dl(K5RfSy!!HL%n4in&yTia9SzG(O zfLOP$#rOp)#-TqNX&lZv5Kze!(Pj;TEKh~Qw#}ZlmN5z<^-X4*FMv!=`6X?Jmok^| zxmBy^?d+2thd~DchOKmf$X2lGM1Chv8OPSvPC@BGrWYFPI3hqiq~ic`1&GH$FEj#a z0q&ulC;==_g_Px*JEn_duNq?^+$=ifwBoEY1~L^4vmrJJ{4HkGK8L0J>%0mw(hpjI zcW|!!czzjMzN*6uap92+B>m)C+TLvY<;~M}(IbJc z4pJ$}?X3>t54n=n1PgR+f)EbxfzwOBVMF_68>e z)3SdU#SduMYyaWH)p8xM1(JlL=!>?c0++Yx(u&MBU!wn3S6XIB_72tkQ=q8)s2Y0a6!mM2YZG$OMj#6JhnetIQPnvTt*)U}5B*?SG z=HAYA-A6&fBJ*Ujcb_Tco9^KF?;g>mJRdtT;* z7@WP%GM$U_1VeQMv=63hTTT^XUN3U%TdS2mIc)*b8AF#!9Ko9uwvisaam%rUB zlLhR84_aue3`cJ@a<~44Ph^zQbnMz#&EwKL&g1ytaM?9I^x`Zy^8I9E*2woq} z(gWO{k6>}8-5J}lD^Gjdi&Kh_Td~^Pn*iLsz+^w!_g;b_2I3SuKnawr_@$TDOf(OP z?lKvAgZJKm3~&Jzfi^o4$c`M@!<3Zk6v!^}P(heeFvq!bh`fia#+dQ6Yko3b{Teg7 zPooX2Wk#D4gaj(%3}-=SxK-Nq*}3J&g4M%_sSwTHWU?^!d{I4F?O|6E2io*?I{^^+ z!#&?s`UwEhsJhlO)6>OjA>i;vZy5)^?(iU4-9c8SORz80^08Guk*L+ z#6xk{mR3%NjPQ?xf3-Hwi$e^%Ce0{D6P(Kqjww?BGU zU0aI`$t|R{4L+sqpIwA6umxVmPBT_Py#UZcW=DPBdGJp#qtkY|7O&B~7yA;hu}j#o znv$wwgf4hR-8MIOm!sq3ju{raPEaW!u%=0}>1gK4BV)J;f8b<~(vB)REppBd7#)N4 zsy43IuPI#BGrX*W&o%7GG^9WRznnVQ%0ks}+S)se9f>Fnj?#ri6G7+S9Jxrr1F9Gr z?jhQO0V`NJFA?LLH=-wzZ-`BlqrDQ(G-He&1En=ippR-1jas0bCrbS zyE^!Mr<5dWtfHp>gQLCWz~0EMtqQN(f?_y;?djpma(x+R{>3~r0199xa8TB=ig_o% zq%AKBYfjiWlnO`b%*#)i4_7GRyRY9ik~cm|LqULqtWktYIRrt<*&I(x_IZzz$(}^Y zQH=0fA72=0fDQ;ce8=zpY-ue@PI;~z-jne<;+5e#zS7|x=E}PtIQQ-*Qxw7HvKr23 zfFFgV@MP*UbWncdp;+lYH`mq0^Mi>f-RM%QwG^> zco{PlqNB=b%GHhwRhpZCCIFlJWq^47cpqJ+8@aA4kV79@LuA2DkL?dI=R1A*yVvAh zS}i(5E3n|?at)}ZPtEYiAoq6uKcL1yJ*Rw1YGj7S1PmDio&f@2>GJ|;`j#=_5t*(Dh}{`A1XKIt#EzBqzb z)@Tc7fjayjJ4oOWXV5Kf(OvTMWG~=Sq0+I?sQgJrbdocH8-j^B!gfHgPA9_Jj6UN7 zkRgXwD?*2}vjG&@3S?#5RvWW(`hvbbQW?+Eo(i9S=_Gs_#}~e^_gptmFPydo6|GL> zr(pi3rDWB_$N`?EZGjSZSwY7Ws05j&d;XHlC}Sl*dLXFmHR;QIH=XAk;Y9|S-`RaY zH?+}xI7glw$Au|^d?rv3Ky>apeKsp&hlt0dl7$*WNcVZv0w%VMaShlZi_&_Jw zk+GO(M#oeYROy}?RNO^=?6?_jnzX}*jhluDvx3vXpNy~fr61c*zdkuw-UVObBpaEz z`Khk1xcm`R}W(0XR0^3UUh-4 zKHMe|2#%}Z9-o|KjIp`s5hqD6XWY^I=p_2c?J>UX(HZ#ZUt)%zrh;4##|3~EICHl2 zzt!RBMeh%PkH3D`^1lEJZ@)xV0zRwrRY;#7KPRbt7ut6!KNXuMXn6~g{VgD&d*sGf z;{8Da3xN@kH0M9TN&G|DV-^0|l$~PmAvW#Au}2XCp!>TVluSD})JVQ8cQ+Ri#0a|` zN7%kMK$$&3$=HE8;us?aeot6+SxKqY8&%&@pQ3PqLz$zQU z?6WhgV$-f3IB+1HfiTipM8c@Yu~%u+x6H2w7^4!P$kI7YAk)Y-3WT=K?FO)S69jN9 z6)*tdoRjg$B$VQN)J)mv=qeg5lc}iz3&xgmxZa$nNMxz&DxSY{- zi_ODbd`lwd2;Qn&B`-peJyCWgJW9LDhQ~RP!GLQQzTkD*pRolr1Cp)-8h{W0J%pF$ zYGik40RqZn)Ujl*fz{~WD3~rQb4D{msYPq<)$jCSn zVb^5goEUkc37_!Ji`;>87d%y8WXEYQ*D^3`7|3dC>-DnF!9|9E9xzU{zAB&P-UK;5 zYc(ifgXge0O~1Ux$$QNlJ7XRoC_nNyV9^#`>zNS#4jn+7{*VQS*F`zGhOAd0h-U%= zZPn35j=rnh(Q0B`Ydo_UW)^q=kwJBv_HfzC{dgdAGJbleU$Q4(j(dQ5#k={* z&OLd|3vE-P9%s27@MUM7(T;c86+ku1_IvurU_eM8=*6lc=v2*`hBU3wAg1N{ z(FZzOC+Qu!v=6J;6eoUlecL|Z2nW5ohP*T&*bzXk^iK);IXt8rb>hL9otJy@+dXv8 z+JtToQ`O*r((as{5hsw(w1$7SH{!qk@U*yYaxg)B%kSx)K5_^iPb$-I=|6n-+StGs zyMv}Grm{lD?tyO{jB$qR^%vyC?ge-aw!?ET=WF1V1nnw3l}HH;1QE6$(#0;c{%Wd< zA_TV2A@dDnjBj)WHXKB*qdzJmQ$;kde{ps;_vN1hj5_@q8dUSn%~?k_DpgOh3zJ2= z$)q_%c)}M5ieQL<^}~nv(OE$sTZ$heaAQe7l1`e^fPM}Bvr|=$*zRE*Zs^>|y{}&< zhze~Mg}P&VcQd~6syb)<#WMDkzfFBTC1^nV-r-T(pbJe&VBJ%IIsI zy;ZPE5tlV25t0t?t}YUpygXts#BK$=oX|!&I5OE5@U9coZj{O6_7G^`mvQdE1ZY_- zBI3HZK`UiWWDN{*Mdw~(46}evX^f+ZR5Bh;Zc372P6jONH!nML=QFow{?7?tEotuW zwPZ^9`oR6WsVW$(^0j#)ibeV3L*l@0mfJ*an^7pjpy(}$g#ZTIvza3dI-hFfYEA5esUZIVtr>kE-Umik}5lJ zAGy}CMmD`D|8;Sy>vA7HX=B=Xaq795op@=>LO0KH@aR^V0K2icW;c$+b)10v)@3>Q z2`H84CL2vh9Ru1EOoWyibNbcJZL$H6OMV+bJTM>6&h(KWb(Kle{jT?jtvlCZCqXx! z=c}|G+l}t$lJpP|iwtE&gk;Pf@YL158HcRT_ba``gq9RzXa1~{$~|~NpG^k|yy~D{ zdhkzHFF2!z?r+cOuhT}ibWZ?1WmRJ&7i$DK9~fc#5#sxI>=k<)LIsrBFP_dbE206h z0Ri!3%ulsZ_~CE;3*;M6Eb~v?BCPP@I-0d)={{#3KV69PCbL04QeA7HV@Ou6m8Wr{k-rtmo?k1EQ~)N;=Oa; z?}AIVCgK5S-JMR>BII{|0G;Ubowl`SOt%S)>_+hX0a-4cz%;hKy$=)TC~x;SV2@4; zB4HAJWY^aFXv6a@1WIsc@8D?J-A^{(#6pEDJ}S=p`0Ysra&Zwq75Y43@F;<<>>Kas zw4j+hysF=Q33MfDGj(Yinl5v?cAVII@69e@yX}lW{;l7N?ZR7F`6ik6VQgRm#>?}w z<>vA__P7>Zo63nbL!)CBi_f_;0uhHH;hA8mizmZ-NnL&9OVEX(jyD&iL`&kQ!U(|j zIsm{49qztfjt@lu3?7(su2(U7hpP+_dUY-_oyp2L|IK9M8=F%)5S8MGg&HlBzP3P* z<4j}~fj?v*Ek}k?UiN_QCxX!@fPRjli2TeyD*-;;e_6i&a5+)Y3%wMvi;n878D@Yc zSvoMmIHi<1Yk8Q&$SEqo!GNGG1RxyNs$?Up zGL`W@4jxcWq?NHnQANAP1)vDsmN3E{#k5suJ-?3;DVw6p{Fh)u$yJorj4c8dP@JAw zh%zNPnIt*bDmDgaQPgwK%h#v!X)7QsFg%cXv}~Pgja`=Ulr{t;d!c#7|LgQ4$a2~f zxtia`WBOv-4I_2_!1m58P-L7I*Q^{o5aGxpknjNz#>gH39fzfSpbQh&ts24R!X>lU5bdeoMEjYcpQP*Nimnvj#q?^t{|7C?QwRApk&DxC>^& z&IBgr-gn~6O?7b60v)zMKiwx#G!2l?GX^^{IAR&7a(AsSG`QC|%eUx&^(9K|A53fI zs^9tMZFy^Jltn$}>*-Vjju3**RaFU)&>0n@W)vlFL8WD$*ftKC+yvTne+*i|H>N0G0&+Q$SuVRIczxDftE{*fuYQ|i?h zdL?kGs<4zkG=t2T1e$`mR#?JG0X3TOo*b<~p_8unUEdj77sKW=7-GD;(JK|U$;1VU z)*vOZ%l+(G=FC>SomfXd(bg5}p>G^{@Gku1L)ZrWwep>=nX27PpB$yNG&ADXVNE*peePzQIS zm>FL%z&1SoFu2*Ntk_7mO~zl^8qFUV_xag}@&}D*hanMetI{9df6!cc#i7$d{1aHA zolXcwwMBgezr~;pm7x6M8M;7hZ#F+Z3#8> zn#&NKPn05BH^&}8T8m?d{zhM$k&2=%zOy(d%N2aPJ~qs zp~kbRXF*5U|=2%S`XbW)k zIn4QdvM>t(6Saj879Dbw9ByLX7sO$KZ%XlI0s z*s5^Kx}S(xbZ?%vTW~Tq!HHc7=ojryRUw8I-dg~&A4SFcW~kADKJPn5V;)pSW`8FG ze-qH4aLQ!>hJzKnP+}|gi((NH9cmVqv6Fc|?0x+{a>h@(*>xY0nRXbx1xbJxf7jzU z@mf%&^d~zrK5Ikj;KL~VwWaaqG8xhnr#K5NDUNCoV`b<7THwY7)o~;u2G)WBP5{r{ zD=+|dfE5^+dlwKgJ~a8>PKtv7%}(g^^u(@I&p%n<;oe^K+UZl;;Fy#kDBU>shX7St zi43+1d}!T_{LH`0O7ubw%J1yKcqtN{7m%Kv-*j%-Q}!-n1Ey<^_VydNkgKYJ?h4Ma zBT9+?GoI*a?8^9or+a}W8vsfL^5_Ykwwhq9f|HE#O#;A{5A&VRqkwhxPJNVd;iYPU z0Ize)>w=#o5}}JT=D5)%gX$CF66A1HhPOJ^5Joi%qt9tvKRs6g_rhD--&EH;=*e`` z>M)wl>*g9ZU3?0^IBO1uE-9HCvpH7INE@yNpnX+7(a-Rk9g(%~yg6Qrf42qZ83P`> z->#fhcBPC;)(r}B?}~nQ$yxb~-CNklF+L?I>!3oXKpXb318u}llrQWAUpW_cPETN~ z``HaA<2fA$zN3;8dz9hUzBsEFr{l5U(dZ^Yb&6?hvO{Yb65K=|@<(VMJ*5lQ7O^c@ zgfk5~(jLsVZih2RKaBG-hVm}@b8wKPBLS0OXfGJFvN5Mj!Y@Cq{ysiAYG0;H`4bfb zI$Nh3S&SS{bE1ux#%d&fT(Des@^YdTj`Z#6uZ04H$ z8bM*N-0mp3u zo9H)(X-;rij`yr09S8m9bt3X8%`Bn_^K%_AZR7v|BNU3T0)&Qg@;xTrIl{DiZ-~gWSj?x2G56jrkm}hk+2X3)pv+CT-h-C=PWWt@@D2`)G zmB(chUdM*COsg-+pB z9e3_CUb-acxVaTI%k(_2U^mfuoREt`he%tSu5URs*O8m8Mpng}Hx%FqQk2pCEi=LQ zbXYwpNauHpb!68*2S}|10apPo-b4_!g?um21Lkz){t+i}F84(*>BC<5HOC-Zu;n@x z4A0Xyy&x!cy_de&Rlv&F)3ybDcvA--XCZ5?UPLbIvY%XIF_O7%OJWa6n zwEXz%!`jfzjZ@d`^}SmzF6O9UL55Q9bLe4d`X^TQh>R7V;mDNbH8U7CiBhl& zz4QTZLdW1RCp3ZFPJ%;v&G|a8hz`(4bV~}*etf*Y9GuuroOa{TZf~#09z&Ns+YwxC zLceu>`3DQx;7rklKWQINT<5_$^j*S1=8}_f*pvB^IU7QUpWUk+JJe2w7=6S)^cyRi zisPr(dCO+$H^vG6EHINO(&tuo(ia-Z(zKBZ2wP_#vuZp^OEPaVmpG{$)2;r@c;J-e zKr%OHq--={<-|`verP~je2!cNKPskVmce(5*qQ||av^7x5{WKe$MmDC>_(M?FR||E zyul4yw_udxmQ?j%p076hfHpYWqiWNK@xxep?5VC_1=GP25Q_llE{2C^oMwS1v}w;GAPnU|Ec6o{0y%8}1W(bcb7{ZnD++KIg{LqD&?tw; z2U7a;06YWPikvBa=MafH*Ld`*z|8iQqhuSad3)0oqUzZzD|Z~-7+@wsCM=*40gKQh z$HF0#rv3aoc=qEslnnIK@Tw&jG!aau3gest2`WA`H`S)y^e zhBHtBF*T)ML9hUvO|Pb61Tu7zO#dss z^7yHAi=Rr@oTtZ(rt6F;cAI7#lC(PcZ652&`9}wXOQkFK)FDT|H#w7xZ~ZmR@^Go! z0B^<_Ug5bwm)tIsQ9Y+YV-YYoEJwDd;?z0p@Iqy6-?RF|GnL9#^&&6RopqEoLF<-Y zvbOMmF5)Mewszw@x8p2A@5bwmjE_wQ*CI!H-Qb!nME)!R@5rY5kg;$|yC@u!@eX}YvK&A<>aj2xJ&=!vnweG^sm3{x}rA?(CAPCgNFno zcj3|P1*e_s(yr+YI@<-vWE?$jweevdqul_VJ|;PYxhiJ7kU9N+ANdP(=&sDySl+#R zo1p))bi&D&%@Y8yli7Da_KY`5_o|cH=2tv%Pw-~SHX59J!-MF77rA_R|2{gGBIYlXMF3uy2beI5UK$*XGEP8UVjp&=3sIy}rQ4SH)DmuC9Jx|Gg1|-i; zua@&ms~~050D2-xn#zgEOc*-GqPxyNZOTd=y{P1+ywz2sVXBIPS4(+OFj2iS(*A*y zyxc3JoGo<%1I~yN+KPj5K=||`*-$bgCCV;}3TSh-9HOzJg~5fK@(~JdwfuM|l@iHp>qp)5;&vOAB-N-57xi&C_@#_zNtpH}w zzF$ov2}WcfoQyb$qge$g900tk+yRj-T@z4!Mw9mkum zGPaDrr~ZZh+E+wMSJ^2&D1D(#Fw9mP@Z=s2RA7kjz?faw)&~e-hTv>6$OP!&Kkyd_ z*$IQUC-06r4a3=4Yl40bQ7~-04sb$0oIx8AvKewjA2046?-8#>NeQC!9pFPN44W0< zre|c?WHr+b%}Ql}xsHRAxy^wu0`QFneskKTES2-ILnd~%>p-vs$NAn1ukp=s&1{r@ zbF%c9Gvru`eUbSpaF-aEFIF`mZ~9`K7CE!!ap>^I`l5_AFJtUV{EVYnGv}0Kz|_mE-tTfjk5#BmPTLGIQz+UbjJAREiO_CNL!IV z8lAg9FFCMQb7F+G7V4X8r?0K>@;oD$xp!lg6+hgDzmb^-0)LvQaTNXzF}m*<@>u&|aL7TdyKzD{{vunq%d z>jETC81&>z{@~!~V0>2?%wFjqAINv$6<_#f(*aFRzvE{mLA%wT+uH;%32@gAwq7sC zM=`(*l!u{A%2*6yRX!Cpbe=5$BhnC2w-gls6n#-x&QZirVq=&d7b%hw3^YQj-~*)j z+sSMergSA@7#EI}d5qdXlwq&qM1T3K@1qh*app4^jHpu7^Hm+Mlkq4MCq&_tVecqH zlwvW@6oCI=f0ZqSpf6@E* zA^-xG$yhk!e0-)L>{9j@e<1@8dU!^a;PdkK_+aL{h?71V2oFn7X=wc zNh!!6CB9Uktb6J>TF?0zSlO|S*9R6e>&IowtLtfL4}7PQhCUg2T0*Zgf!qfC4!7! zzfrm$$1gj-jLbL-E9YfneYi}<1kj(7!L%|F`skDAPXyu$7#8jcz&T=$mou|i4u}cx z&Bfb|fwyEyKy}jLA-kr7d!f(EgG3HbVmq5%Z7J_6T;46`XRLH8e?)$qTP_H#^o=f>CKMErvo@-) zk%QpWxO&C}Ipi}ulA&qa=XBBi=#rV!x!e@~b4(UXa=Mf0da)IiGq@pGld0o}z^nS6 zd(qWWL4J22ihBj%cm@YK6-kCfh0gcUpP|{4`i$3$&P5_=e=3vgPFdMQxt#h&-WEm* zK2%%D(6i0ai}xHhJe2TE34d09=YsZckut~FtRMJ97J^~MQ~u`VrvQMXMVG*t4XIY) z9h>Ihfp~vUyQd#NHZz9Nk}*55I&~77;2#?nU3toz_d> zN{`8B~1+cITz3+otrjx@F}yNo>fs@EcDYXF0q7|w zM)iB0&p&)TO?G!x`HFA>EC)&4@dF-H3i<$KR-{tcE|7|{x~9gOF$h@v)Rvz>fMKa1RL8>Ojm=k)m7q}e z9o@>$F#y#XE1aiFBFpeo@PsddKvNc09arOWeHVph9OT6SXCYSf9>1bb^cLq-Pt2DA zYk-e7z=AeGf#Bg@5R3K%t+}im@-ISf+nGzaD%jqh^S|0+$UB>~KrW&4b z?ioYIxt@RlIDh60A}GOVAH$KGZ; z;sm*5>WN|k>%Er)R~)PZ>o|vXhm%DI*(d#!)JfhjmA%X{!ef`5<>=a!=e-(0hYH`5 zeR(KB%xC0K{$xC+5xRaM?QX>;Eilbj9SfTw#~v<|poF7?r?4TH7lUodWCOh}cq_2l z-BxAX4QFSkgqI4W3LCl7C-eBK8vM=2v$NyqIW6s!Z)11I-^qtP&Azsr{PiI}}fW*-Cl0faR zsq5#j{09`HN$E%@otFW+VNMOY01D*Cz^z!cHKwKc2wG&MBgWSK$?42$L!9L_JbtgG zxiG>CPEo5>&lqpt;DGZ0^Yu+4(5pDkC|O#n(JI$Q1L`np?4b{Xx7Gu2k+WxF!@vV8 zk3%zI0&#Pz80;K(krrB28yrM&^7c52{nZ2pQ3e^t?zV#ily*5^bKx=eIRDejywMa8 zJx-?K+#E$b7;VDr9mS%*8mfuH6QQin+FCZphkWKa>4*49QVC{J^S^&uL zv6&DMXP0a8IUv)Hu`^UUA?p_a>4zPfn-c`k+sC`n6}uE#{R*8uIV|I}jZsET529MGI~P9}7~S?%{3AD)Gz&>6eWFYG{O zEz9>PN`W!^54GvvVy4h?KT}K9-|%#IH~Jesx5OOYH$!Ki>CZ`LvD$(wz6zS?Uo)Zf z=N@2(Ci)?36BG+R@ke_VPF?_=ZFf;$1}3O5KG|3kmi&RwOa@;oo~ejA8@uAjH=W z+ka%q#%4NzGo8c-hRwn^ypTDgF=Ni<`Yirr{CkHM-_Z%!J-XMxATqFX$yDZ30;V2@ zq~exq4M)P*R7P zJCc*|?jV8s(EcfW2tLHN_m7UEe-FVOQ$^N)1=G|0#p!vIv2TC#Z86Vy1s_WD$8X=J z&-LP$sYd(J(Fofw&Mua-I9a^IGe8Bfm5N`uAG|uWQ@xElkq<<7Z+Fcp91P0>XU0=e#f7`iv{&D>?Jm4 zeBlB6kw{nlC}mzD) zQ9>CAr@4>dnI(u(14m!>}fJA=d>Sj?#cS@9q5n@VODu^Zln>z5=F=I8PCj_ zS^;{SKqFCd0**QV_@<5;ASr*+O2FGFI zJ7Ae>Dct9ts^AgYCKkFf>fEw_eT+x%oA6vMoc>jr#=_mqepdHbARG&Avi7^`2O z%X$EEr)lz?vC|b989p1Qxmr3#=4f`E*8oO;OTPlJ1a-Ndj?e{D2S*3CD~`UYDsYy9 z7&_5~RcVXy%WlZRTr3_iTw^W2(k5`_EI1r|V37Ex?}9_x0f<_ebI>^lS)BQQ;6QJI zs8Xux)wFAxO0Xb{5ae`Bd25rCEN{?~z6&4$1os#@J^*EXvL$?NAQ7_m4?HDqqtSnz zYiz|mz|?*CFCf0Ux?ir-<_NN2o zI^;kv$;ng#oW&uaj!xz}iH-3zYYyM9(heJ@ubifme$V8{?;b%si!I62)E}D*MtFr! zA4)b*=2C$LlT%C(_aQlwwiQnIm~ z(ckL{^d&JIS}`Mhu)nYe4OzYe_{j15@-$W4l>(rP@`;r`r&>0OTs}9LI=#` zD|$?CUSyN6a;=PAfRN8##sNpzr5Am_j?EaqgVA7D2&fNRGuPP2A<9P~Iu95F!qi#( zqg*2TLGUOL!fR=1nqkR)Es8LorSu}gvZX^Au=8h8dX8E<8BZ5vefkvTm5tuYLd-)U z+@UQu-S=+gBjdK$xQhkCB*4U>_+4g0Q7Bs%iG`5>o07wj{Y14Q^)7&;-~mcmnp2^a zs@}dk&Y0F?AQ8x#g?2~FMlvwjgKR0ygwgh2e@W*4uzb8s)E0UU_xD4ODuel!^b{oy zwsxho3>^63k4)?+Gzf^4s|d$S1`VuL6c~{Vh~w@QK^%Y2oX_~2`bSA9^2%A&nTi$z zgjW4C5Jp!;T2Bw*v%UZ(kpBMsDhBYh>~8HWo(xy#`a*}7`FBS6?&Pq2k%#P!LYl*} z7DB`-IP$;|pHp7+$!JzDft!4aWATb8>ddV07BB&iW*5GQVGD+U7m((NID^^GoNLlP zV7719Ll!)S@y%vvh(JQ(WFXNoi&13g#>+7%jS57a?c9Yu(J2lwq((1fpkoLe3RwUh z0SJbe`YH8d{O7-s~akAqf128e8hye!-?6(swSxr@vPL?*k#e`BS;f(EhQIQyCR zY=9BqBDc=>r3LXMp0OeA9vqPWTC)Y&O;=^-UcEIa$$!?Ca7f>NFI)N4H5M(?g$_F+ zwlZ$PqpAtV4U|mvtSUzIso6^C<6tH`4d5l9@S)uW1t-0-ZB8sfs?0@j<1n07KyrW2 zok%|%Jy{75Fw?FA3PHx<p<1(aycYh+Gd+*cJW0N&kXL*Q^fEp%dh(ibYoiYQTRL zD4K7s9&sr78xTq~Ow&|HBD099;CBR8`ZR1lxo2n5xd8-a_Cis=% zw=zN>jSu~JwF;D`GVl@{H4dTbrp0N z7u|RfVR?X8eTYx!QN8toot(@}FU^z`yQj}E$YX8ckwj9^-PQ21VSRSt82^xQa$px3 zE4y|0n4k1Z+X4uVmNUG(xF{WxOVt9~Ae~a@*%18IVAbce8~h00`2)^fdo5{4R+2l_ z!H``2fIk*D;_=v7^fQCDqpVN$dr4H-K$E}tss{Fwfot{j3?s*d7o9Q#OuwgdfcYJ6tleXECaj06ivX-O}9t#A? zf6bEoFX=?5Iu|WCwy!`NVRG~aWI+OE`cTJRUbt9-Slmo9}M7cO)22Vj;FOw*7 zQQ}69QQn8!5FNuPAyE+JjVnxyz_i<_G;J$cAlh{z8NeeOr3jvTeH$a-T-+l&7TGXv z4i^9_qiiRlH9wI-20YW&CZz#zi3B%Z7sN$=qB%b&QcpIR`_Hbz@X+D8zqZC%V1&VS zB>7wvTV9#VVq{Si^WE-we|iz{`P_MX89fk}O^9GA8KD7CZxJpBLJ?(n4K|WxFQ;^udO6BiqC+JrBPp$^6o*lV8O;$%R!ub9aEg%l;8uvQ6O2U)7JJfF3~dO zRVJe_yD#f9$XxKoVYQl)wt*g!Wh%yMng;+(<~bR*X&O;GXNH$u!5qC1u#i6x07&%A z%RL-CkVBhsZEV{6?g0@pB6O*CDD|3t2nbwUhTddP(G3eXtya|@oly;8Br>xGvE?)S z3BdtgAVD8x6!|*s^Dw>&a0Rd;ijI}sWbAG);6*N`kjTWVwqP7-3sCvH@6Z7N@lW4V z<{Ue~uT{oHf1EIA#kvd>JLs8%Y6Nf18h~i7>Gri;qb!ehAktG7<0xck9v#aL(0G)L zSHI{|_28H}d)LX#))Lf4IL!u?{5=*F4+NMk*Js@NZZ*$aU=rSA1V^eYN%jtxGF?G_ z9HZ|!mFi?})f*vNA6$9j;o+_4BY(p+?WoZj$<*HpEc>{CEzeTeY~>Ikzu z?+JyG5fJ4(n_1=lu3V13IkZGzhm4p}bzp)p^Ut9tZFxq6d(qI(9!GAKKi%I~c|F(-t=2!ygMDnP#ut4Fnp91$FU~s+YR?~ zyNlKWQWQ=^FLR!`+JIxY%!yir#4w!gt17Y86YoTf_sik2;|SvnW6b6aRVgSsC8Knd znQ@sf<5Vx>-0f@KNrt=|VJHDHfWE@asVKZ!wg5^T4FOhZ^FBR~k|nw}bzmhZCrc0v zn$j=^PGc4Ia{>edaJ~evDu;}thQzgluDLsZ!DS^HhOE= zPg9Z43m94!ppD!!zHD&)uUNz2QWhe=Sg$Y%NZvzejiT zN2^e|-?iq}WvG_{XIVR^!SKxwM&5kpS%To45}GND**H|&NJf>NMDB+=G&FyZE-Xu^CpduJ3Zpe>X5=82l=#ez;Q(f z=9$=nFu_O00Z1}Na>DD$v}bArG47*>wOQ+1cK7QryS*q`N;B1ri#^I?DrZdmZDkMr|+wz~W23Fq#CF2*pjnkfYOA^^m{m~HJx!s9Tg-2{{Z$;-3L z@Xf-^$+q3lmHBCg9>G@qNTT3}09)I1K!C4DXW<^bQ*AIUz;;wkJmQt! zZtpr3^QpA&?b>$dtO;v+Bu|1Lfm4F!V9mk7ab<6zupMBniax})V?U}*Dt?#2(RZ+X%@|?p3)gS5BVK8ETC)f&JKb;Wfe|Ej*>^~ZP*FlLpB@X53JzaEnJ4%yZi8O zDmm=z?c2AlZ1bbAbG8t|{jK=dtL03v5E-&{(?iy#S=Yv<^o1TYupzv~>(yEMj)5k7 z{@bp%s|1$pBv*0BqtsKj2mnMtJvl4P;z$YHcjJ7&N=%%Nah4~ol&;v|3d3)4IT1A4xwR^7Tj~Mz!7j&?eZBKO*Lp6FnNc>-j}cXe?gTDXg$DeUY1d+ zOtdrKM>jKs^16@AO#{Y5R{G5#dTqfkeD@i8(S=svtsO?E!lS}4a@6}|s6f;>n|(3- zbm##v4WP6iKBi!FL=5`2u}o zqOC||8Tmc&5WnK%k?UDoD|xv zo1k|Ss2OkW3-44gIM>SyQy+0)q0xzHcx9}uGSRVC&Q^;-rv!f;EEv2CP2mN7t4;-1 z^hpO+6)(KPZ$PeXK^;fc${^f`OifSr;GYgnN!zxm=wA9?wN3TXP-WsQ0QzX`4aD*} zx%o+_ILyj9*Lu0m)6VwxW+xMj=h)#Adx_SD-sp!Ytva3`;h*takVhthh1+0O&zcYr zg{SyL2IWKgBV*&HuiZwQ?`TYYMx2;wFI72wgwLjj$j*ac1n*8Rq_-bGzRz{0Q@3m5 zu$Zouj{NQ9If2@g$&oxVfV(n#Cq-JgR{aiLpT7uFGFCxy`j&J_CQZ%OW)tWL7SdF8 zr1jF$_`BW1Q>dOLm??g;B3U6p*sQm80@# zzDDD5;w0TnAOh8)$Vy2st}J~8h$v;EfBP5bxYQUL=TW7dY=&~QY-nT{7P<(`nbQ;t zpqYcKN>lhSn-RAGh8Yl40+ms2%Q`-<7Qt;K^4rVbaa>Vg43nv$$Nczt3|gjZK6B6M zP?2IItQVv297;NC`8{+JK4%FOY*TWr)x3<0^kv+X#n=eLw7};tkBh?)9QSG-hcM@0 zizz>1AdnIv%M=GlWIN8M9&l|*6y2eZBHREc%A0GC53PRLT5Bmprd@AAp+h&Yw@%|W3d@1Y z#JZp}yb<+sR`equXKWlEDpfY7bcb$+1IU2K)2D!B#s|RY3i?)62Ye&zdA$H6tqO=M zQ%@kM0y0j`8i8)N$$j?WcExkX&N*y^w$Z}@RJtY8%b%4unk}mHQWao3A*|~a825mX zcK|;9SX|>8-8HhOoT0^N>(FI0X@^aj;wfMYPJObv$@Vg)TvLZ0-qVqu@)<(h?_PH8Y;!a^(a#(*XUka$aAdLM#Lg>&j7tV? zoPC68oPk|K^bxb>Op4GPK0zZqE)AjSGI(d#5nZE4hZzT(ROUwis9p2{9tXob;m(P1 zU5izWi0-rT=mvgQe=@cPL(yA%{15gILQ7;6deBN<)+0F84xf5@W3E+62}z*^UVL6o z&n|n9g@mw-ZlRSPN;Wxs?ZORs15cFB(|nSs&5mEy#2F{_HIm79>&aLz!^_(wi7Hs> z3ucA}i%@G{`V983C%E8fohrvB(CsXXmCx}v-tbxMfX8Kg+6 z;GLhc%C;G&CwnLGKDza#+|v?b6e>2?>-9+~?=n!lF)biUmI>}1?1a`Y%e6%|p+yN8 z6P1ppY&<1*PQX6W-<)0esCzH>6FrJZDJLb6$+;H&<~ap7qO&dv43D~1C?L0CA9MQV z>a^8pT#V+PypLcos>gpXbfSoGc!R>*7?nEQbD z%$bI8JP#kEy)DNZBl$2KC2%~RMJNIB=!Qor0#ra^+!WUJj9Lj(20`KVm;VYL-(;gQ9`qe$CgDf?f?xy$+iOy`C_&4MP}m`pDK6+Us$WY{fE##2W% z_9q+D5!%oQBhX-8kJADo#^`}jJ?tq#C4P9}r{TG`*-8F(y$pWJ%CU1!JVA^0c8?Fj zQ_ds$hPmNFt3h?TbUpz}**r?rG#lL==RR_xXV$umJ}V=0zGNUs z6+B0e1NtqsrcM2ulQ*A>rt0*IybIdF6ak_>RQP0ScuPijH^+CAmpR?YDJ&dkpVe#|q{Cv4MzwGugPRiNs=;fehk>tqCqQCvagO<2u?$+8g$j+Fo zt8vq`KfwrITa*lYeU3lp=NHk-;Lle|>HI?MKL0GB<2=vuJ-oEmz)mu<>DEU29lfJx z8X|8fNd79&Do+kL+CAJ~PTsz&o#0!imD4X7nugmyJ}f`%g@+lom3kaGoNdJ}cJn&4 zsb;`j)r$xa4$Q{qwub_Z$-QC%5a6^{IUmq_U3GUg3fSUX|JxE{D*BXEh zS2_^Jm(Q`RK#Q3$t6&{wQs!9*@hXB4q2>D+xpSOb7ETsyo=_Q{liT079V5#06u=E} zRpdK!@{{p!SXQ~3H^hwaIn0XzAul?rd zEwVJLHupun?#tB-0noH$nctg>(uRNwDA8v+V@rzDp|s^C zdj;1179cPlSy9ZS*V9iw5zwtIh{=ntHd_oo>)0|5@&>SMgFLKL!3*=y-1B6;Mk?E* z&jb9N7P)a^y+7@@ge}|6wRDn#(HA=y=wlZPMrHtCn`ku6APf2M{yf22bkzCd0eI_I zraV9GwFDm8bmWvXXELy3)i+1c$|^GO@*Y3{DbsGIMw+RF*9|_>PP4JJrJwcnIOW5` z!Hm$vSpX>jQ-_(qJGk%!FR;?HEBax0Sli4`L7TDHsR2s;M91I)TB|3y7M7@p*yYiA z<20!GmcR3hOc*XC1_*6di?h#M9~&Zm0ki$+=HSujX+;U0!6yyksSJ~d@w8JUScBoca(vASzx}8?}B0qY-`I0AmaGb3v zOMTH7{NnUDuHpln?bMYZP(l#?hF40~9&g?0)bxcHWZCDM$M#W9cVTU_@ceCu1Rmqb z=-dPpu$2ys&RWd;SX^@Mg#^j8t7pASP@d!XfAx#MT@DV9mXmky+Md2UZKjj_PeRb@ zQa*R|xW$8}bR5)_=3jV~L?q+U-{r-1^<6bX#TJGL6g*X~ynG|W{}P9JUYgmQ1=*!5 z_n^T6N)oFNAD!IE&SbrDz8Q<4hy2j5A|iNcNk0zL;>;#ev28prA2V*j1D$L&GxU-h zy;bE?A$t4G*9j7LYm@poJWBR%|M}@-Cg@E}D~P7xch=+Lxr*c_rCt=|0!8Ka0@20A8{pIsuw}ph$%qAsZyc z)nhujPa`)ifm{{H0i`IE@I}^`b$xjjA$U^RUK!;97#kayXP$Tz|Kh?*{0Oy1{kn|0 zxSmz0kI(n%JB~Pt%_wE;XyAYuhV0!yF&q;@U6?c3u;W3i)QRGZL0^=NA*%{_dfMA} zwjj*oQ8B^^Px4;017Ib$Zb=D4KAhbwA49A1b<5as@-^U$qfb@~{TK~29o6}FrL?WI zf8BkhX*z#9<}<*n6qonTwuNdjU;_-@qRfkn^C~*psz9L%00LM*CR4YUekU{lLP~R$ zK9opT<+0pQFtk|f5(wt={m>%n#7sPz%z3IclPSdUB(wB8qZZJi4S>t&x)>*8rZDJ1 z3qu4#7IhfU*+(mFLtki_Y;j7tE1+&^D}OU)hA{x9Bp*jna96ITf!LvJZoyF;GnyC{ zLyIor%U$GTUR!`7&{CST&_x9Rz4+!{Zy8T+!_v~NZ9rSz=WpZEx3;xOt^_8aLu+mu zI|v-%5Y+a>~$?4RH{wQ^srw0%kIB zI&QJiOvB`!+=~YC7Oc`$&wlsa-}@$+ROkj$vx0mw^kz2cSBB=%oUMSRJwdgV#q1Kz z?z_IU2i!{jj0rde%q{#8_~2jHhvawt(-muLfR@0j&rt|{Ui7u=FhYZs%A)ph{(v?a zUV>VT;mGjVpj0np+V(UvK=`8VF2K|`a7@1dO`yjqquKO>Bt#I(+0t2uzL<8wFZv^6 zYpFhcu^+w9ajmLlQ}%{6UUMK%95|Up?$%2PUL*M*LxD>}tKjNKqqA7r2Gz!*lh(QEdTfpmU1ZJ~4Y zEB0MoAA3bF+1chLkcYRe%BGE0;=&ZROKurJ`NAXwR=!hxk=6Gxr?EBq%AWu7XFsb` zJvu%qjrfl?vKR-OKt1@k7l%VX{3C#7!m&G313mC7wvCVOvAckdI26aDu<=#WaM-iw z-YdAeER8ZdJmE6a~#5Jx~8Pxie_-sA~|Ok zFcCp4Oql5>dQU%8?k1t=lNHmK?ZX`V$KQNhZm%9XZT9v@Ck^t*?>V;1K2<3))+oZ7 zIoE)2vT_Os?&eH-*^Yx#92==bzL%dfchOSwt2m@El;h`EclVDv2;Ym?fhABS)sKlt z?r!EG4oY7HaTVvTYM>tq;<2Z)y#ZNs?8^P?T|r4Vo)c*)ckl_GN$mLAT->>t*%I>< zAz##Z8g<1e%?a%vIs2V3&4Jz3cpT85OX}n?unXpz!F&#ICVP&+x87tNj6UO`xb{m{ z`3W>*w-2@qO}~_ea-N=@*H8|U$pck}cVU%nxZ15}lq0t)6!^cDj(*_s+{3R zK8oU#$8)O!z(zUp>LL;0tuq2bVEDRnPL%gDN>AAwVS9AA(-JhrlmTduGf+O{96kS@ z@>h{^4}-;*uFlOpj%@7pQ6`xXFqWm759_H<>5JR}IR__5(VpoDvRpYBU>Sy0-umb< zjOlYKI2<#=X17F7vr79YjUUZx<^o02i+$<&+zNY!){G-^>uTQ6=G&HNIR+CHYZfEE zI1q*&`M3u_)^Ud}Wpxh89NuH-o9Bq<7WC!12_9e82V_hRIw~-PGCC@Q6iL^~r=J2q zezpRV_VGF0=H96!6-e@Zt3=T~tGfjQ#=5Z{hoHn*AfdY!%*}QwI%W~4k}_us^nlBC zvOYoNUV?(FOI45rPoceXdI5b7Tt>e4QaQpy;M2-Mt}m}c)2h_^Cg6gvBd>7vi}Lnl zKLE#A@P&W1qlE&pI5KaMli-4uX`f?+DG*}y5)rjk9RZYIuhjsw! zx-O#4_Z3R`zN*F;INRlf?4h>>a!TU@z?sXYuM!$^*xPZwJDx>PG6sPg*=n~zX_5nD zp=U5t(5nI|A>hPZOD1em@?|?>R4=5bjnL8)57U;XQL5%B?Fx3-fq#ObRzF_!0ME&= zH1zeUHfntI)i&EN;R)KEJm(C0Su49IZ*M%+KR$GtAq@M?FMqXsZ9!M;9zW`kVy|e> zx9XlY1zoajJ735_g+iq^8ZuQwc9{Xj3-#}V@#nJ8X*54}9?4l1e`|?!WtPf59Q%M9F zX*>P1`L59jy?l)4Xb27j-fY&{393P!0BIp00SIpR5$&q_^{8o6kg+ZLk_J(-E@CqS3aD8*{thO?%O2m6Vt5@iDy4(}=v7X~tXj-q>pdN}un`Y55$E3_C05DeLLnD25 zLK;xR&$fXV8QYr#A+p#B>I7mI%?yuyFDuhIzQ~5IoXwD+$GP0H5c8ItI~nN5&pM~r z0>?dv);RIzqfEf=tCjp4;4hG=&Y%h}alhu?u`%tEgxpTv07(Jy&Q-z7G zu!Xe6X2@9f)Sx>wz4{`sqjR2hup9iGGvLWYiCUoL`3#&I9ViaH1nhJe9+e*%ILAB= zJUWyNoV|J$JpCuyfoF53-F=15ohi(%j9Ggzp=ns_HLZ5$kLVQg)Lod zrjE6Aq~GY+n>VTmvs=g^PaocYA6`vcbOhd|`TTS!6P&P9ATkU;1z~jEv(m?=Wx?s6 zE=GqfwB|c}*W_iLc1j-arVQxuglT8I_M#gNIKwyVz?_F+`b_B@UJGF8lD^=I z!_>ADlnbT==j_RLb9i7*UNWVE1OxF`;K(01KuTYdF`U-t_Kt(&65L0YcwlVwYcB!C zyKnwplR8-RB|iD)>b%tlNz~fwD54TG1qCDw4}m&B03q1divpQEBVzzvZ5f3^>}G_u zvwKt^z;y5XL>n9!B5xCks1V3dWFn#xtJ4{reU8c+F%Q!-z|)=;pqFXW z^)hGK`xs`T=={w{l;1ftZE*06`tbOm%H_clB2q^fqTgOgLd6v2jECE&` z+t|A3GT;k@wIQQWm2jUZwUv?Zhf|m`rNugdDEblpD0vCuRE?awA5kyQf|2~vK;fm$ z+>mz1nV=uA3M~BI05UY&PBYHJ<+inj#!yYB8(&Rj#_3w?;U~Ucg})Dvf?tkr7TpMn zjJ4pJwrWuMl4B6q>>uv;^sdqKIK%X%KUQm@qhO2u02^C(Tp+mMKt$@sLyX^V}>nqAB3auDRF19bE$W|2E*NS5a3iD zK~yJ{w%M&GGRkZlw5A_+^_($sdb5ZL1JV8BX!FlDtjWIkRt_scXDbd99KGHUuvl1U z9R=M~Y8PC&|5Npl?OERH#E}`k7 zs*Fd_vbkeH8N6@}p5UYPG<1z!JhyC4cB+Q#w};zU1CW3bcQcUDk9^k??TjP+addVN z(Ur>tlq#&8@z8ja_L5v2pHwGo|8xfXNs=kLLXP@mH}uR}3Q3ObV1DPgEV`_N3$6%* z9n&nxgAuZI3jyILtkR~^et8fc@;4kjIt9`8x=)a@(Nj$~lUULj0fB0VsvVWG=0q@W zM+Ez&cW{?ZxZn4tv*?7XjR(}6CS#V^w1O90A%k~cziS{Xsq)hi-SoFX)pHVqD3J=3 zAOjYtw&2$+J~nk7V#50-Hfw1!$&3UMcCCD)pxStz!!JPvIr69UTcYEBrUmwiu~XPR zdNK;BG#9Nez<7?g>;XW_o`Ef30R}y6BSecV2~31yu8BjIMJW-;)~Zlpq%CSzyn8ax zPNG);ba5qG6IF^>E&iCcF?Y7|DC;{;7Dh`0wRDq}SQD}3Wz87HwY5a`MR0+%>F_;XqV+R&84hr#i<=n|QW`(hG z=j>1I91#Ns9=YZ!yM?IjsO$_660FwhPO8IaSeiVHVzKR{Gb=-Z!Ey28EyDY?yHQ!Dq%p($sK6$GQD{~tAcSBZv~K{zo4h;5LM5+>qA19 z1wgkg<;wmdrvRjkw+AKxIfe*aGDbR&5Be-`@?A5JSd?rJf7qJonPw;95%1$324}4F zuZODSems_8b&4$CsTkoUzNro&nT~qDjt-K~*~R_x-Q}!Vao~zFzVfIs>R%>+|E8Ys zyFm*Z&h-GMSu&xeZNZEqQyLK>q zD|%)+l-Cw5Y9vcuz6grSd4|->6B_l*H+|r)3(MgIiO!;OEYp)_P+oA<8pfHnf8*tshYXoE�Y_cuTKCU!cN zC>1s9(~SAMU;nCq^Lre)61#2NAte8?cU5f(73`Zc_F;3-q<;2wla%0Kbk|}*yj6Mn z6g{lZ$e7n0d#YSO$uk0D?C-V?c)bd6Dlwb)m{s-Z#Pf5|;0&KQ#2?WMTY;Mge#`BM7>BIB1J+}6K*$l0_n_n$Q4)TaW z0tJy4gHsA*e4CEz4hG2iPyqr1{ru|V@OXb`N`(Vn9P(tqN>35j^369M zsN->zp>SpcqADe=T;wjN8FCI}3na{s z3%Zn(=#`B2O%z@3q-sG&K1a4PKtR^{uCM4_+K-OmHE^Xr_G$x9`~vdI|5UV@U5F82 zacY1ZU+7w$aR$?^g?Xpbw8?pIJH)D>2B06Gj8H{J`Ox7tZ<_Vup{j?YP@foL3|=J# zNRUCeUs?;+(N8i0>H-}gkEXl(o6@x%O(9D1CDn^wYW5?8er{ z9*3H*RdC1z__9ShWY@>h$wB)kw~mPu%ee7erEebFsj3s+<{I0A1de2MC7_LcnA7)` z%~+4YuEUL%dg)y&4+)g~E~D2re4$hH5lAA^JTcv{D2apeA->DB+$#*uO&oH*XFXIf6byQYNG4?j%NSufSI zN766(NYSj@Q7L50GFB&lvV&PSBH^1g0-U3)7!7aooh@Z-E+Q@l7`JgqVj8HYKh;>W zA6}vxMm%@18B7<9&uV)%H372m+x@chQr_lxVJm+jiDlz^Np@5Y1Ux(OW%NmZWWk@Q zb~O->EL&-QZMw-~?ep*|d<-^h2H*Y8zi4NkOc(x!KZo&GZ^9q`gKvW|k9~ThYB>C! z#7z~Z$#llCv!5h(=kL~asH~bM..inference.ai.azure.com + where `your-deployment-name` is your unique AI Model deployment name, and + `your-azure-region` is the Azure region where your model is deployed. + 2) IMAGE_EMBEDDINGS_KEY - Your model key (a 32-character string). Keep it secret. +""" + + +def sample_image_embeddings(): + import os + import base64 + + try: + endpoint = os.environ["IMAGE_EMBEDDINGS_ENDPOINT"] + key = os.environ["IMAGE_EMBEDDINGS_KEY"] + except KeyError: + print("Missing environment variable 'IMAGE_EMBEDDINGS_ENDPOINT' or 'IMAGE_EMBEDDINGS_KEY'") + print("Set them before running this sample.") + exit() + + # [START image_embeddings] + from azure.ai.inference import ImageEmbeddingsClient + from azure.ai.inference.models import EmbeddingInput + from azure.core.credentials import AzureKeyCredential + + with open("sample1.png", "rb") as f: + image1:str = base64.b64encode(f.read()).decode('utf-8') + with open("sample2.png", "rb") as f: + image2:str = base64.b64encode(f.read()).decode('utf-8') + + client = ImageEmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + + result = client.create(input=[ + EmbeddingInput(image=image1), + EmbeddingInput(image=image2) + ]) + + for item in result.data: + length = len(item.embedding) + print( + f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " + f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" + ) + # [END image_embeddings] + + +if __name__ == "__main__": + sample_image_embeddings() diff --git a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py index d6d0d3499036..2b8afdf16ed3 100644 --- a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py +++ b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py @@ -109,7 +109,9 @@ def _validate_model_info_result(model_info: sdk.models.ModelInfo): assert model_info.model_provider_name is not None assert len(model_info.model_provider_name) > 0 assert model_info.model_type is not None - assert model_info.model_type == "completion" # This should be sdk.models.ModelType.CHAT_COMPLETION once the model is fixed + assert ( + model_info.model_type == "completion" + ) # This should be sdk.models.ModelType.CHAT_COMPLETION once the model is fixed @staticmethod def _validate_chat_completions_result(result: sdk.models.ChatCompletions, contains: List[str]): diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml index 24ec38780c3f..0f74fd6846c8 100644 --- a/sdk/ai/azure-ai-inference/tsp-location.yaml +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/ModelClient -commit: bbd51a40f4dab41ba235a7317b1697ac65dc9e6d +commit: 6f4ed3b6732421c15ba3a55926527043059ce289 repo: Azure/azure-rest-api-specs additionalDirectories: From 82168b54a9df27e2b131a16f5827394ddfc004ce Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 14 May 2024 16:51:41 -0700 Subject: [PATCH 063/112] Update test recordings --- sdk/ai/azure-ai-inference/assets.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/ai/azure-ai-inference/assets.json b/sdk/ai/azure-ai-inference/assets.json index 1433e07f47b8..00660db93c6b 100644 --- a/sdk/ai/azure-ai-inference/assets.json +++ b/sdk/ai/azure-ai-inference/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "python", "TagPrefix": "python/ai/azure-ai-inference", - "Tag": "python/ai/azure-ai-inference_f6e39baf60" + "Tag": "python/ai/azure-ai-inference_9b9508aeab" } From e788873fb88b6ec118640efecbfccdefb527ec51 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 14 May 2024 17:47:36 -0700 Subject: [PATCH 064/112] Fix quality some quality gates --- .vscode/cspell.json | 1 + sdk/ai/azure-ai-inference/README.md | 17 +++++++++-------- .../azure/ai/inference/__init__.py | 2 +- .../ai/inference/_operations/_operations.py | 2 +- .../azure/ai/inference/_patch.py | 15 ++++++++------- .../azure/ai/inference/aio/__init__.py | 2 +- .../ai/inference/aio/_operations/_operations.py | 2 +- sdk/ai/azure-ai-inference/samples/README.md | 1 + ...ings.py => sample_image_embeddings_async.py} | 2 +- ...le_chat_completions_with_client_generator.py | 17 +++++++++-------- 10 files changed, 33 insertions(+), 28 deletions(-) rename sdk/ai/azure-ai-inference/samples/async_samples/{sample_image_embeddings.py => sample_image_embeddings_async.py} (97%) diff --git a/.vscode/cspell.json b/.vscode/cspell.json index 9ac1d097069b..1f69d1bdaedd 100644 --- a/.vscode/cspell.json +++ b/.vscode/cspell.json @@ -1283,6 +1283,7 @@ { "filename": "sdk/ai/azure-ai-inference/**", "words": [ + "ubinary", "mros", "Nify", "ctxt", diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 3b747fe37d13..a30437a66386 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -288,20 +288,21 @@ Instead of creating a specific client directly (`ChatCompletionsClient`, `Embedd ```python -from azure.ai.inference import ClientGenerator +from azure.ai.inference import ClientGenerator, ChatCompletionsClient from azure.ai.inference.models import SystemMessage, UserMessage from azure.core.credentials import AzureKeyCredential client = ClientGenerator.from_endpoint(endpoint=endpoint, credential=AzureKeyCredential(key)) -result = client.create( - messages=[ - SystemMessage(content="You are a helpful assistant."), - UserMessage(content="How many feet are in a mile?"), - ] -) +if isinstance(client, ChatCompletionsClient): + result = client.create( + messages=[ + SystemMessage(content="You are a helpful assistant."), + UserMessage(content="How many feet are in a mile?"), + ] + ) -print(result.choices[0].message.content) + print(result.choices[0].message.content) ``` diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py index 898076e89409..f7dd49374aaa 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py @@ -6,7 +6,7 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from ._client import ChatCompletionsClient +from ._patch import ChatCompletionsClient from ._client import EmbeddingsClient from ._client import ImageEmbeddingsClient from ._version import VERSION diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index ee75606ca0ff..5d49e52c9a4d 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -527,7 +527,7 @@ def create( """ @distributed_trace - def create( + def create( # pylint: disable=too-many-locals self, body: Union[JSON, IO[bytes]] = _Unset, *, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index 31b7ce6329f1..9f34748a79ff 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -29,7 +29,7 @@ from ._serialization import Serializer from ._operations._operations import build_chat_completions_create_request from ._client import ChatCompletionsClient as ChatCompletionsClientGenerated -from ._client import EmbeddingsClient +from ._client import EmbeddingsClient, ImageEmbeddingsClient if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -47,20 +47,21 @@ class ClientGenerator: @staticmethod def from_endpoint( endpoint: str, credential: AzureKeyCredential, **kwargs: Any - ) -> Union[ChatCompletionsClientGenerated, EmbeddingsClient]: + ) -> Union[ChatCompletionsClientGenerated, EmbeddingsClient, ImageEmbeddingsClient]: client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... model_info = client.get_model_info() logger.info("model_info=%s", model_info) - if model_info.model_type == None or model_info.model_type == "": + if model_info.model_type in (None, ''): raise ValueError( "The AI model information is missing a value for `model type`. Cannot create an appropriate client." ) - elif model_info.model_type == _models.ModelType.CHAT: + if model_info.model_type == _models.ModelType.CHAT: return client - elif model_info.model_type == _models.ModelType.EMBEDDINGS: + if model_info.model_type == _models.ModelType.EMBEDDINGS: return EmbeddingsClient(endpoint, credential, **kwargs) - else: - raise ValueError(f"No client available to support AI model type `{model_info.model_type}`") + if model_info.model_type == _models.ModelType.IMAGE_EMBEDDINGS: + return ImageEmbeddingsClient(endpoint, credential, **kwargs) + raise ValueError(f"No client available to support AI model type `{model_info.model_type}`") class ChatCompletionsClient(ChatCompletionsClientGenerated): diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py index e9e1b0469645..bdbf6403d8eb 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py @@ -6,7 +6,7 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from ._client import ChatCompletionsClient +from ._patch import ChatCompletionsClient from ._client import EmbeddingsClient from ._client import ImageEmbeddingsClient diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index 27e40a349796..bf932ab94ea5 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -402,7 +402,7 @@ async def create( """ @distributed_trace_async - async def create( + async def create( # pylint: disable=too-many-locals self, body: Union[JSON, IO[bytes]] = _Unset, *, diff --git a/sdk/ai/azure-ai-inference/samples/README.md b/sdk/ai/azure-ai-inference/samples/README.md index aec54a1832db..03dae2b02ddc 100644 --- a/sdk/ai/azure-ai-inference/samples/README.md +++ b/sdk/ai/azure-ai-inference/samples/README.md @@ -36,6 +36,7 @@ The concepts are similar, you can easily modify any of the samples to your needs |[sample_chat_completions_streaming_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py) | One chat completion operation using an asynchronous client and streaming response. | |[sample_chat_completions_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py) | One chat completion operation using an asynchronous client. | |[sample_embeddings_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py) | One embeddings operation using an asynchronous client. | +|[sample_image_embeddings_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py) | One image embeddings operation, on two input images, using an asynchronous client. | ## Prerequisites diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py similarity index 97% rename from sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings.py rename to sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py index 34aaa03e5c03..9ca1f1e52ffa 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py @@ -31,7 +31,7 @@ async def sample_image_embeddings_async(): print("Set them before running this sample.") exit() - from azure.ai.inference import ImageEmbeddingsClient + from azure.ai.inference.aio import ImageEmbeddingsClient from azure.ai.inference.models import EmbeddingInput from azure.core.credentials import AzureKeyCredential diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_client_generator.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_client_generator.py index b85d659cb80d..52d40590b580 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_client_generator.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_client_generator.py @@ -32,20 +32,21 @@ def sample_chat_completions_with_client_generator(): exit() # [START chat_completions_with_client_generator] - from azure.ai.inference import ClientGenerator + from azure.ai.inference import ClientGenerator, ChatCompletionsClient from azure.ai.inference.models import SystemMessage, UserMessage from azure.core.credentials import AzureKeyCredential client = ClientGenerator.from_endpoint(endpoint=endpoint, credential=AzureKeyCredential(key)) - result = client.create( - messages=[ - SystemMessage(content="You are a helpful assistant."), - UserMessage(content="How many feet are in a mile?"), - ] - ) + if isinstance(client, ChatCompletionsClient): + result = client.create( + messages=[ + SystemMessage(content="You are a helpful assistant."), + UserMessage(content="How many feet are in a mile?"), + ] + ) - print(result.choices[0].message.content) + print(result.choices[0].message.content) # [END chat_completions_with_client_generator] From 2c18488dce8f1e8a1ad38d35912af87983e4216a Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 15 May 2024 09:58:51 -0700 Subject: [PATCH 065/112] Fix broken link ('link verification check') --- sdk/ai/azure-ai-inference/README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index a30437a66386..f103f02c7db7 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -1,6 +1,8 @@ # Azure model inference client library for Python -The client Library allows you to do inference using AI models you deployed to Azure. It supports both serverless endpoints (aka "model as a service" (MaaS) or "pay as you go") and selfhosted endpoints (aka "model as a platform" (MaaP) or "real-time endpoints"). The client library makes services calls using REST AP version `2024-05-01-preview` [specificed here](https://review.learn.microsoft.com/en-us/azure/ai-studio/reference/reference-model-inference-api?branch=release-build-azure-ai-studio&tabs=azure-studio). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). + + +The client Library allows you to do inference using AI models you deployed to Azure. It supports both serverless endpoints (aka "model as a service" (MaaS) or "pay as you go") and selfhosted endpoints (aka "model as a platform" (MaaP) or "real-time endpoints"). The client library makes services calls using REST AP version `2024-05-01-preview` [specificed here](https://www.microsoft.com/). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). Use the model inference client library to: From 2ac893f5f5acf966074e6be9c0cdbebf87a76086 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 15 May 2024 16:53:56 -0700 Subject: [PATCH 066/112] Use 'response' instead of 'result' in samples, as this is what I see elsewhere. Also use 'or ""' when printing streaming update --- sdk/ai/azure-ai-inference/README.md | 22 +++++++-------- .../sample_chat_completions_async.py | 28 +++++++++---------- ...sample_chat_completions_streaming_async.py | 11 ++++---- .../async_samples/sample_embeddings_async.py | 20 ++++++------- .../sample_image_embeddings_async.py | 6 ++-- .../samples/sample_chat_completions.py | 4 +-- ...ample_chat_completions_from_input_bytes.py | 4 +-- ...sample_chat_completions_from_input_json.py | 4 +-- .../sample_chat_completions_streaming.py | 9 +++--- ..._chat_completions_with_client_generator.py | 4 +-- ...ple_chat_completions_with_entra_id_auth.py | 4 +-- .../sample_chat_completions_with_history.py | 10 +++---- .../sample_chat_completions_with_tools.py | 16 +++++------ .../samples/sample_embeddings.py | 4 +-- .../samples/sample_image_embeddings.py | 4 +-- 15 files changed, 74 insertions(+), 76 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index f103f02c7db7..10b74fa1dc4a 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -139,14 +139,14 @@ from azure.core.credentials import AzureKeyCredential client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) -result = client.create( +response = client.create( messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), ] ) -print(result.choices[0].message.content) +print(response.choices[0].message.content) ``` @@ -168,18 +168,18 @@ from azure.core.credentials import AzureKeyCredential client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) -result = client.create_streaming( +response = client.create_streaming( messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="Give me 5 good reasons why I should exercise every day."), ] ) -for update in result: +for update in response: if update.choices[0].delta.content: print(update.choices[0].delta.content, end="") -result.close() +response.close() ``` @@ -200,9 +200,9 @@ from azure.core.credentials import AzureKeyCredential client = EmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) -result = client.create(input=["first phrase", "second phrase", "third phrase"]) +response = client.create(input=["first phrase", "second phrase", "third phrase"]) -for item in result.data: +for item in response.data: length = len(item.embedding) print( f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " @@ -239,12 +239,12 @@ with open("sample2.png", "rb") as f: client = ImageEmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) -result = client.create(input=[ +response = client.create(input=[ EmbeddingInput(image=image1), EmbeddingInput(image=image2) ]) -for item in result.data: +for item in response.data: length = len(item.embedding) print( f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " @@ -297,14 +297,14 @@ from azure.core.credentials import AzureKeyCredential client = ClientGenerator.from_endpoint(endpoint=endpoint, credential=AzureKeyCredential(key)) if isinstance(client, ChatCompletionsClient): - result = client.create( + response = client.create( messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), ] ) - print(result.choices[0].message.content) + print(response.choices[0].message.content) ``` diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py index c4b7d1ccbdcf..2335846fc3a9 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py @@ -53,24 +53,24 @@ async def sample_chat_completions_async(): await asyncio.sleep(0.1) print("Waiting...") - # Get the result - result = future.result() + # Get the response + response = future.result() await client.close() # Print results the the console print("Chat Completions:") - print(f"choices[0].message.content: {result.choices[0].message.content}") - print(f"choices[0].message.role: {result.choices[0].message.role}") - print(f"choices[0].finish_reason: {result.choices[0].finish_reason}") - print(f"choices[0].index: {result.choices[0].index}") - print(f"id: {result.id}") - print(f"created: {result.created}") - print(f"model: {result.model}") - print(f"object: {result.object}") - print(f"usage.capacity_type: {result.usage.capacity_type}") - print(f"usage.prompt_tokens: {result.usage.prompt_tokens}") - print(f"usage.completion_tokens: {result.usage.completion_tokens}") - print(f"usage.total_tokens: {result.usage.total_tokens}") + print(f"choices[0].message.content: {response.choices[0].message.content}") + print(f"choices[0].message.role: {response.choices[0].message.role}") + print(f"choices[0].finish_reason: {response.choices[0].finish_reason}") + print(f"choices[0].index: {response.choices[0].index}") + print(f"id: {response.id}") + print(f"created: {response.created}") + print(f"model: {response.model}") + print(f"object: {response.object}") + print(f"usage.capacity_type: {response.usage.capacity_type}") + print(f"usage.prompt_tokens: {response.usage.prompt_tokens}") + print(f"usage.completion_tokens: {response.usage.completion_tokens}") + print(f"usage.total_tokens: {response.usage.total_tokens}") async def main(): diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py index ed5f15a1f15e..296125aade9b 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py @@ -60,13 +60,12 @@ async def sample_chat_completions_streaming_async(): await asyncio.sleep(0.1) print("Waiting...") - # Get the result - result = future.result() + # Get the response + response = future.result() - # Iterate on the result to get chat completion updates, as they arrive from the service - async for update in result: - if update.choices[0].delta.content: - print(update.choices[0].delta.content, end="") + # Iterate on the response to get chat completion updates, as they arrive from the service + async for update in response: + print(update.choices[0].delta.content or "", end="") # Remember to always close the asynchronous client when you are done with it await client.close() diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py index 782c7c23cd2e..6969f939ef90 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py @@ -44,23 +44,23 @@ async def sample_embeddings_async(): await asyncio.sleep(0.1) print("Waiting...") - # Get the result - result = future.result() + # Get the response + response = future.result() await client.close() # Print results the the console - print("Embeddings result:") - for item in result.data: + print("Embeddings response:") + for item in response.data: length = len(item.embedding) print( f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, ..., {item.embedding[length-2]}, {item.embedding[length-1]}]" ) - print(f"id: {result.id}") - print(f"model: {result.model}") - print(f"object: {result.object}") - print(f"usage.input_tokens: {result.usage.input_tokens}") - print(f"usage.prompt_tokens: {result.usage.prompt_tokens}") - print(f"usage.total_tokens: {result.usage.total_tokens}") + print(f"id: {response.id}") + print(f"model: {response.model}") + print(f"object: {response.object}") + print(f"usage.input_tokens: {response.usage.input_tokens}") + print(f"usage.prompt_tokens: {response.usage.prompt_tokens}") + print(f"usage.total_tokens: {response.usage.total_tokens}") async def main(): diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py index 9ca1f1e52ffa..ce76efde8a2c 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py @@ -57,11 +57,11 @@ async def sample_image_embeddings_async(): await asyncio.sleep(0.1) print("Waiting...") - # Get the result - result = future.result() + # Get the response + response = future.result() await client.close() - for item in result.data: + for item in response.data: length = len(item.embedding) print( f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index f239fdafeaed..dc43db94d4a7 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -37,14 +37,14 @@ def sample_chat_completions(): client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - result = client.create( + response = client.create( messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), ] ) - print(result.choices[0].message.content) + print(response.choices[0].message.content) # [END chat_completions] diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py index 5eb173b14f8a..104ba60836c4 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py @@ -40,9 +40,9 @@ def sample_chat_completions_from_input_bytes(): # Make a chat completion call, by directly providing the # HTTP request body as IO[bytes], containing chat messages. - result = client.create(read_text_file("example_chat.json")) + response = client.create(read_text_file("example_chat.json")) - print(result.choices[0].message.content) + print(response.choices[0].message.content) def read_text_file(file_path: str) -> io.BytesIO: diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py index 8b5f51d2a28d..212bbf4802b0 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py @@ -54,9 +54,9 @@ def sample_chat_completions_from_input_json(): # Make a chat completion call, by directly providing the # HTTP request body as IO[bytes], containing chat messages. - result = client.create(json_messages) + response = client.create(json_messages) - print(result.choices[0].message.content) + print(response.choices[0].message.content) if __name__ == "__main__": diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py index 9ed9ed275d41..1d83113dd6ef 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py @@ -37,18 +37,17 @@ def sample_chat_completions_streaming(): client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - result = client.create_streaming( + response = client.create_streaming( messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="Give me 5 good reasons why I should exercise every day."), ] ) - for update in result: - if update.choices[0].delta.content: - print(update.choices[0].delta.content, end="") + for update in response: + print(update.choices[0].delta.content or "", end="") - result.close() + response.close() # [END chat_completions_streaming] diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_client_generator.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_client_generator.py index 52d40590b580..2730a92563b8 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_client_generator.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_client_generator.py @@ -39,14 +39,14 @@ def sample_chat_completions_with_client_generator(): client = ClientGenerator.from_endpoint(endpoint=endpoint, credential=AzureKeyCredential(key)) if isinstance(client, ChatCompletionsClient): - result = client.create( + response = client.create( messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), ] ) - print(result.choices[0].message.content) + print(response.choices[0].message.content) # [END chat_completions_with_client_generator] diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_entra_id_auth.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_entra_id_auth.py index e48bd86d0b4b..d196ab0f8801 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_entra_id_auth.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_entra_id_auth.py @@ -39,14 +39,14 @@ def sample_chat_completions_with_entra_id_auth(): client = ChatCompletionsClient(endpoint=endpoint, credential=default_azure_credential) - result = client.create( + response = client.create( messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), ] ) - print(result.choices[0].message.content) + print(response.choices[0].message.content) if __name__ == "__main__": diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py index a375d86f78c9..f4372922a874 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py @@ -44,14 +44,14 @@ def sample_chat_completions_with_history(): UserMessage(content="What year was construction of the international space station mostly done?"), ] - result = client.create(messages=messages) - print(result.choices[0].message.content) + response = client.create(messages=messages) + print(response.choices[0].message.content) - messages.append(AssistantMessage(content=result.choices[0].message.content)) + messages.append(AssistantMessage(content=response.choices[0].message.content)) messages.append(UserMessage(content="And what was the estimated cost to build it?")) - result = client.create(messages=messages) - print(result.choices[0].message.content) + response = client.create(messages=messages) + print(response.choices[0].message.content) if __name__ == "__main__": diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py index 6669df784b8e..f5e89b924f34 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py @@ -113,24 +113,24 @@ def get_flight_info(origin_city: str, destination_city: str): UserMessage(content="What are the next flights from Seattle to Miami and from Seattle to Orlando?"), ] - result = client.create( + response = client.create( messages=messages, tools=[flight_info], # tool_choice=ChatCompletionsNamedToolSelection(type="function") # Cohere model does not support ) # As long as the model keeps requesting tool calls, make tool calls and provide the tool outputs to the model - while result.choices[0].finish_reason == CompletionsFinishReason.TOOL_CALLS: + while response.choices[0].finish_reason == CompletionsFinishReason.TOOL_CALLS: # Append the previous model response to the chat history - if result.choices[0].message.tool_calls is not None: + if response.choices[0].message.tool_calls is not None: # TODO: Remove the need to set content="" - messages.append(AssistantMessage(content="", tool_calls=result.choices[0].message.tool_calls)) + messages.append(AssistantMessage(content="", tool_calls=response.choices[0].message.tool_calls)) # Make new function call(s) as needed. If parallel function calling is supported by the model, # we may have more than one tool call request. - if result.choices[0].message.tool_calls is not None: - for tool_call in result.choices[0].message.tool_calls: + if response.choices[0].message.tool_calls is not None: + for tool_call in response.choices[0].message.tool_calls: if hasattr(tool_call, "function"): function_name = tool_call.function.name function_args = json.loads(tool_call.function.arguments.replace("'", '"')) @@ -148,12 +148,12 @@ def get_flight_info(origin_city: str, destination_city: str): ) # With the additional tools information on hand, get another response from the model - result = client.create( + response = client.create( messages=messages, tools=[flight_info], tool_choice=ChatCompletionsToolSelectionPreset.AUTO ) # Print the final response - print(result.choices[0].message.content) + print(response.choices[0].message.content) if __name__ == "__main__": diff --git a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py index 66c112bb7692..740c62d0273e 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py +++ b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py @@ -36,9 +36,9 @@ def sample_embeddings(): client = EmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - result = client.create(input=["first phrase", "second phrase", "third phrase"]) + response = client.create(input=["first phrase", "second phrase", "third phrase"]) - for item in result.data: + for item in response.data: length = len(item.embedding) print( f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " diff --git a/sdk/ai/azure-ai-inference/samples/sample_image_embeddings.py b/sdk/ai/azure-ai-inference/samples/sample_image_embeddings.py index 835d96b3d864..c7b514a6b5b7 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_image_embeddings.py +++ b/sdk/ai/azure-ai-inference/samples/sample_image_embeddings.py @@ -43,12 +43,12 @@ def sample_image_embeddings(): client = ImageEmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - result = client.create(input=[ + response = client.create(input=[ EmbeddingInput(image=image1), EmbeddingInput(image=image2) ]) - for item in result.data: + for item in response.data: length = len(item.embedding) print( f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " From b56ca82254fd9a0009731f7ff9b2a694bb879931 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 16 May 2024 09:56:46 -0700 Subject: [PATCH 067/112] Implement load_client and load_async_client --- sdk/ai/azure-ai-inference/README.md | 22 +++--- .../azure/ai/inference/_patch.py | 64 +++++++++++----- sdk/ai/azure-ai-inference/samples/README.md | 3 +- .../async_samples/sample_load_client_async.py | 74 +++++++++++++++++++ ...ent_generator.py => sample_load_client.py} | 23 +++--- 5 files changed, 146 insertions(+), 40 deletions(-) create mode 100644 sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py rename sdk/ai/azure-ai-inference/samples/{sample_chat_completions_with_client_generator.py => sample_load_client.py} (69%) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 10b74fa1dc4a..15864c731852 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -107,7 +107,7 @@ TODO: Add overview and link to explain image embeddings. Embeddings operations target the URL route `images/embeddings` on the provided endpoint. -### Client generator +### Loading a client TODO @@ -120,7 +120,7 @@ In the following sections you will find simple examples of: * [Text Embeddings](#embeddings-example) * [Image Embeddings](#image-embeddings-example) * [Get model information](#get-model-information-example) -* [Create a client using the ClientGenerator](#create-a-client-using-the-clientgenerator) +* [Loading a client using `load_client` function](#loading-a-client-using-load_client-function) The examples create a synchronous client as mentioned in [Create and authenticate clients](#create-and-authenticate-clients). Only mandatory input settings are shown for simplicity. @@ -176,8 +176,7 @@ response = client.create_streaming( ) for update in response: - if update.choices[0].delta.content: - print(update.choices[0].delta.content, end="") + print(update.choices[0].delta.content or "", end="") response.close() ``` @@ -283,18 +282,23 @@ print(f"Model type: {model_info.model_type}") -### Create a client using the ClientGenerator +### Loading a client using `load_client` function -Instead of creating a specific client directly (`ChatCompletionsClient`, `EmbeddingsClient`) you can use the `ClientGenerator.from_endpoint` method to create the appropriate client associated with the provided endpoint URL. In this example we use it to create a `ChatCompletionsClient`: +Instead of creating a specific client directly (`ChatCompletionsClient`, `EmbeddingsClient` or `ImageEmbeddingsClient`) you can use the `load_client` function to create the appropriate synchronous client associated with the provided endpoint URL. In the example below, we use it to create a synchronous `ChatCompletionsClient`. Similarly, call the `load_async_client` to get the appropriate asynchronous client. - +The `load_client` function makes a REST API call to the `/info` route on the given endpoint, which provides the model type. Based on the model type, the correct client is returned. In most cases you know the model type (chat completions, embeddings, image embeddings) so you can create the appropriate client directly and avoid doing this addition REST API call. + + ```python -from azure.ai.inference import ClientGenerator, ChatCompletionsClient +from azure.ai.inference import load_client, ChatCompletionsClient from azure.ai.inference.models import SystemMessage, UserMessage from azure.core.credentials import AzureKeyCredential -client = ClientGenerator.from_endpoint(endpoint=endpoint, credential=AzureKeyCredential(key)) +client = load_client(endpoint=endpoint, credential=AzureKeyCredential(key)) + +# This should create a client of type `ChatCompletionsClient` +print(f"Created client of type `{type(client).__name__}`.") if isinstance(client, ChatCompletionsClient): response = client.create( diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index 9f34748a79ff..a0ef6fc80f26 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -6,6 +6,7 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ +import asyncio import json import logging import sys @@ -30,6 +31,9 @@ from ._operations._operations import build_chat_completions_create_request from ._client import ChatCompletionsClient as ChatCompletionsClientGenerated from ._client import EmbeddingsClient, ImageEmbeddingsClient +from .aio._client import ChatCompletionsClient as AsyncChatCompletionsClient +from .aio._client import EmbeddingsClient as AsyncEmbeddingsClient +from .aio._client import ImageEmbeddingsClient as AsyncImageEmbeddingsClient if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -43,25 +47,44 @@ logger = logging.getLogger(__name__) -class ClientGenerator: - @staticmethod - def from_endpoint( - endpoint: str, credential: AzureKeyCredential, **kwargs: Any - ) -> Union[ChatCompletionsClientGenerated, EmbeddingsClient, ImageEmbeddingsClient]: - client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... - model_info = client.get_model_info() - logger.info("model_info=%s", model_info) - if model_info.model_type in (None, ''): - raise ValueError( - "The AI model information is missing a value for `model type`. Cannot create an appropriate client." - ) - if model_info.model_type == _models.ModelType.CHAT: - return client - if model_info.model_type == _models.ModelType.EMBEDDINGS: - return EmbeddingsClient(endpoint, credential, **kwargs) - if model_info.model_type == _models.ModelType.IMAGE_EMBEDDINGS: - return ImageEmbeddingsClient(endpoint, credential, **kwargs) - raise ValueError(f"No client available to support AI model type `{model_info.model_type}`") +def load_client( + endpoint: str, credential: AzureKeyCredential, **kwargs: Any +) -> Union[ChatCompletionsClientGenerated, EmbeddingsClient, ImageEmbeddingsClient]: + client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... + model_info = client.get_model_info() + logger.info("model_info=%s", model_info) + if model_info.model_type in (None, ''): + raise ValueError( + "The AI model information is missing a value for `model type`. Cannot create an appropriate client." + ) + # TODO: Remove "completions" once Mistral Large fixes their model type + if model_info.model_type == _models.ModelType.CHAT or "completion": + return client + if model_info.model_type == _models.ModelType.EMBEDDINGS: + return EmbeddingsClient(endpoint, credential, **kwargs) + if model_info.model_type == _models.ModelType.IMAGE_EMBEDDINGS: + return ImageEmbeddingsClient(endpoint, credential, **kwargs) + raise ValueError(f"No client available to support AI model type `{model_info.model_type}`") + + +def load_async_client( + endpoint: str, credential: AzureKeyCredential, **kwargs: Any +) -> Union[AsyncChatCompletionsClient, AsyncEmbeddingsClient, AsyncImageEmbeddingsClient]: + client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... + model_info = client.get_model_info() + logger.info("model_info=%s", model_info) + if model_info.model_type in (None, ''): + raise ValueError( + "The AI model information is missing a value for `model type`. Cannot create an appropriate client." + ) + # TODO: Remove "completions" once Mistral Large fixes their model type + if model_info.model_type == _models.ModelType.CHAT or "completion": + return AsyncChatCompletionsClient(endpoint, credential, **kwargs) + if model_info.model_type == _models.ModelType.EMBEDDINGS: + return AsyncEmbeddingsClient(endpoint, credential, **kwargs) + if model_info.model_type == _models.ModelType.IMAGE_EMBEDDINGS: + return AsyncImageEmbeddingsClient(endpoint, credential, **kwargs) + raise ValueError(f"No client available to support AI model type `{model_info.model_type}`") class ChatCompletionsClient(ChatCompletionsClientGenerated): @@ -406,7 +429,8 @@ def create_streaming( __all__: List[str] = [ - "ClientGenerator", + "load_client", + "load_async_client", "ChatCompletionsClient", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-inference/samples/README.md b/sdk/ai/azure-ai-inference/samples/README.md index 03dae2b02ddc..bc7058f98caf 100644 --- a/sdk/ai/azure-ai-inference/samples/README.md +++ b/sdk/ai/azure-ai-inference/samples/README.md @@ -24,7 +24,7 @@ The concepts are similar, you can easily modify any of the samples to your needs |[sample_chat_completions_from_input_bytes.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py) | One chat completion operation using a synchronous client, with input messages provided as `IO[bytes]`. | |[sample_chat_completions_from_input_json.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py) | One chat completion operation using a synchronous client, with input messages provided as `MutableMapping[str, Any]` | |[sample_chat_completions_with_tools.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py) | Shows how do use a tool (function) in chat completions, for an AI model that supports tools | -|[sample_chat_completions_with_client_generator.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_client_generator.py) | Shows how do use the `ClientGenerator.from_endpoint()` to create the appropriate client based on the provided endpoint URL. In this example, it creates a `ChatCompletionsClient`. | +|[sample_load_client.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_load_client.py) | Shows how do use the function `load_client` to create the appropriate synchronous client based on the provided endpoint URL. In this example, it creates a synchronous `ChatCompletionsClient`. | |[sample_embeddings.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_embeddings.py) | One embeddings operation using a synchronous client. | |[sample_image_embeddings.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_image_embeddings.py) | One image embeddings operation, on two input images, using a synchronous client. | |[sample_get_model_info.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py) | Get AI model information using the chat completions client. Similarly can be done with all other clients. | @@ -37,6 +37,7 @@ The concepts are similar, you can easily modify any of the samples to your needs |[sample_chat_completions_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py) | One chat completion operation using an asynchronous client. | |[sample_embeddings_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py) | One embeddings operation using an asynchronous client. | |[sample_image_embeddings_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py) | One image embeddings operation, on two input images, using an asynchronous client. | +|[sample_load_client_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_load_client_async.py) | Shows how do use the function `load_async_client` to create the appropriate asynchronous client based on the provided endpoint URL. In this example, it creates an asynchronous `ChatCompletionsClient`. | ## Prerequisites diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py new file mode 100644 index 000000000000..2164aac5dbfb --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py @@ -0,0 +1,74 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to create an asynchronous client from a given + endpoint URL using the load_async_client() function. + In this sample, we get an asynchronous client and do a chat completions call. + +USAGE: + python sample_load_client_async.py + + Set these two environment variables before running the sample: + 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form + https://..inference.ai.azure.com + where `your-deployment-name` is your unique AI Model deployment name, and + `your-azure-region` is the Azure region where your model is deployed. + 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. +""" +import asyncio + +async def sample_load_client_async(): + import os + + try: + endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] + key = os.environ["CHAT_COMPLETIONS_KEY"] + except KeyError: + print("Missing environment variable 'CHAT_COMPLETIONS_ENDPOINT' or 'CHAT_COMPLETIONS_KEY'") + print("Set them before running this sample.") + exit() + + from azure.ai.inference import load_async_client + from azure.ai.inference.aio import ChatCompletionsClient + from azure.ai.inference.models import SystemMessage, UserMessage + from azure.core.credentials import AzureKeyCredential + + client = load_async_client(endpoint=endpoint, credential=AzureKeyCredential(key)) + + # This should create a client of type `ChatCompletionsClient` + print(f"Created client of type `{type(client).__name__}`.") + + # TODO: Why does this return False? + #if isinstance(client, azure.ai.inference.aio.ChatCompletionsClient): + # Do a single chat completion operation. Start the operation and get a Future object. + future = asyncio.ensure_future( + client.create( + messages=[ + SystemMessage(content="You are a helpful assistant."), + UserMessage(content="How many feet are in a mile?"), + ] + ) + ) + + # Loop until the operation is done + while not future.done(): + await asyncio.sleep(0.1) + print("Waiting...") + + # Get the response + response = future.result() + await client.close() + + # Print results the the console + print(f"choices[0].message.content: {response.choices[0].message.content}") + + +async def main(): + await sample_load_client_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_client_generator.py b/sdk/ai/azure-ai-inference/samples/sample_load_client.py similarity index 69% rename from sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_client_generator.py rename to sdk/ai/azure-ai-inference/samples/sample_load_client.py index 2730a92563b8..cd7b1fa6a22b 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_client_generator.py +++ b/sdk/ai/azure-ai-inference/samples/sample_load_client.py @@ -4,12 +4,12 @@ # ------------------------------------ """ DESCRIPTION: - This sample demonstrates how to get a chat completions response from - the service using a synchronous client that was obtained from a - `ClientGenerator.from_endpoint` call. + This sample demonstrates how to create a client from a given endpoint URL using + the load_client() function imported from azure.ai.inference. + In this sample, we get a synchronous client and do a chat completions call. USAGE: - python sample_chat_completions_with_client_generator + python sample_load_client.py Set these two environment variables before running the sample: 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form @@ -20,7 +20,7 @@ """ -def sample_chat_completions_with_client_generator(): +def sample_load_client(): import os try: @@ -31,12 +31,15 @@ def sample_chat_completions_with_client_generator(): print("Set them before running this sample.") exit() - # [START chat_completions_with_client_generator] - from azure.ai.inference import ClientGenerator, ChatCompletionsClient + # [START load_client] + from azure.ai.inference import load_client, ChatCompletionsClient from azure.ai.inference.models import SystemMessage, UserMessage from azure.core.credentials import AzureKeyCredential - client = ClientGenerator.from_endpoint(endpoint=endpoint, credential=AzureKeyCredential(key)) + client = load_client(endpoint=endpoint, credential=AzureKeyCredential(key)) + + # This should create a client of type `ChatCompletionsClient` + print(f"Created client of type `{type(client).__name__}`.") if isinstance(client, ChatCompletionsClient): response = client.create( @@ -47,8 +50,8 @@ def sample_chat_completions_with_client_generator(): ) print(response.choices[0].message.content) - # [END chat_completions_with_client_generator] + # [END load_client] if __name__ == "__main__": - sample_chat_completions_with_client_generator() + sample_load_client() From aafcc37c74aa67db183eba7aa9e99ae07e43ebc5 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 16 May 2024 10:30:39 -0700 Subject: [PATCH 068/112] Make three BaseStreamingChatCompletions variables (constants) private) --- .../azure/ai/inference/models/_patch.py | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index 6e876d00253b..4d2b41e6b894 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -26,14 +26,14 @@ class BaseStreamingChatCompletions: """ # Enable detailed logs of SSE parsing. For development only, should be `False` by default. - ENABLE_CLASS_LOGS = False + _ENABLE_CLASS_LOGS = False # The prefix of each line in the SSE stream that contains a JSON string # to deserialize into a ChatCompletionsUpdate object - SSE_DATA_EVENT_PREFIX = "data: " + _SSE_DATA_EVENT_PREFIX = "data: " # The line indicating the end of the SSE stream - SSE_DATA_EVENT_DONE = "data: [DONE]" + _SSE_DATA_EVENT_DONE = "data: [DONE]" def __init__(self): self._queue: "queue.Queue[_models.ChatCompletionsUpdate]" = queue.Queue() @@ -50,7 +50,7 @@ def _deserialize_and_add_to_queue(self, element: bytes) -> bool: line_list: List[str] = re.split(r"(?<=\n)", element.decode("utf-8")) for index, line in enumerate(line_list): - if self.ENABLE_CLASS_LOGS: + if self._ENABLE_CLASS_LOGS: logger.debug("[Original line] %s", repr(line)) if index == 0: @@ -61,17 +61,17 @@ def _deserialize_and_add_to_queue(self, element: bytes) -> bool: self._incomplete_json = line return False - if self.ENABLE_CLASS_LOGS: + if self._ENABLE_CLASS_LOGS: logger.debug("[Modified line] %s", repr(line)) if line == "\n": # Empty line, indicating flush output to client continue - if not line.startswith(self.SSE_DATA_EVENT_PREFIX): + if not line.startswith(self._SSE_DATA_EVENT_PREFIX): raise ValueError(f"SSE event not supported (line `{line}`)") - if line.startswith(self.SSE_DATA_EVENT_DONE): - if self.ENABLE_CLASS_LOGS: + if line.startswith(self._SSE_DATA_EVENT_DONE): + if self._ENABLE_CLASS_LOGS: logger.debug("[Done]") return True @@ -80,10 +80,10 @@ def _deserialize_and_add_to_queue(self, element: bytes) -> bool: # and add it to the queue. self._queue.put( # pylint: disable=W0212 # Access to a protected member _deserialize of a client class - _models.ChatCompletionsUpdate._deserialize(json.loads(line[len(self.SSE_DATA_EVENT_PREFIX) : -1]), []) + _models.ChatCompletionsUpdate._deserialize(json.loads(line[len(self._SSE_DATA_EVENT_PREFIX) : -1]), []) ) - if self.ENABLE_CLASS_LOGS: + if self._ENABLE_CLASS_LOGS: logger.debug("[Added to queue]") return False @@ -111,7 +111,7 @@ def __next__(self) -> _models.ChatCompletionsUpdate: return self._queue.get() def _read_next_block(self) -> bool: - if self.ENABLE_CLASS_LOGS: + if self._ENABLE_CLASS_LOGS: logger.debug("[Reading next block]") try: # Use 'cast' to make 'pyright' error go away @@ -153,7 +153,7 @@ async def __anext__(self) -> _models.ChatCompletionsUpdate: return self._queue.get() async def _read_next_block_async(self) -> bool: - if self.ENABLE_CLASS_LOGS: + if self._ENABLE_CLASS_LOGS: logger.debug("[Reading next block]") try: # Use 'cast' to make 'pyright' error go away From b5484eec8865ecbb9c88c267fcabe084924aa550 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 16 May 2024 17:48:24 -0700 Subject: [PATCH 069/112] sync and async versions of load_client --- .../azure/ai/inference/_patch.py | 33 +++---------------- .../azure/ai/inference/aio/_patch.py | 26 +++++++++++++++ ...sample_chat_completions_streaming_async.py | 9 +---- .../async_samples/sample_load_client_async.py | 11 +++---- .../samples/sample_load_client.py | 2 +- sdk/ai/azure-ai-inference/tests/README.md | 21 +++++++----- 6 files changed, 49 insertions(+), 53 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index a0ef6fc80f26..8d9aac95d440 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -6,7 +6,6 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import asyncio import json import logging import sys @@ -31,9 +30,6 @@ from ._operations._operations import build_chat_completions_create_request from ._client import ChatCompletionsClient as ChatCompletionsClientGenerated from ._client import EmbeddingsClient, ImageEmbeddingsClient -from .aio._client import ChatCompletionsClient as AsyncChatCompletionsClient -from .aio._client import EmbeddingsClient as AsyncEmbeddingsClient -from .aio._client import ImageEmbeddingsClient as AsyncImageEmbeddingsClient if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -43,8 +39,7 @@ _Unset: Any = object() _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False - -logger = logging.getLogger(__name__) +_LOGGER = logging.getLogger(__name__) def load_client( @@ -52,14 +47,15 @@ def load_client( ) -> Union[ChatCompletionsClientGenerated, EmbeddingsClient, ImageEmbeddingsClient]: client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... model_info = client.get_model_info() - logger.info("model_info=%s", model_info) + client.close() + _LOGGER.info("model_info=%s", model_info) if model_info.model_type in (None, ''): raise ValueError( "The AI model information is missing a value for `model type`. Cannot create an appropriate client." ) # TODO: Remove "completions" once Mistral Large fixes their model type if model_info.model_type == _models.ModelType.CHAT or "completion": - return client + return ChatCompletionsClient(endpoint, credential, **kwargs) if model_info.model_type == _models.ModelType.EMBEDDINGS: return EmbeddingsClient(endpoint, credential, **kwargs) if model_info.model_type == _models.ModelType.IMAGE_EMBEDDINGS: @@ -67,26 +63,6 @@ def load_client( raise ValueError(f"No client available to support AI model type `{model_info.model_type}`") -def load_async_client( - endpoint: str, credential: AzureKeyCredential, **kwargs: Any -) -> Union[AsyncChatCompletionsClient, AsyncEmbeddingsClient, AsyncImageEmbeddingsClient]: - client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... - model_info = client.get_model_info() - logger.info("model_info=%s", model_info) - if model_info.model_type in (None, ''): - raise ValueError( - "The AI model information is missing a value for `model type`. Cannot create an appropriate client." - ) - # TODO: Remove "completions" once Mistral Large fixes their model type - if model_info.model_type == _models.ModelType.CHAT or "completion": - return AsyncChatCompletionsClient(endpoint, credential, **kwargs) - if model_info.model_type == _models.ModelType.EMBEDDINGS: - return AsyncEmbeddingsClient(endpoint, credential, **kwargs) - if model_info.model_type == _models.ModelType.IMAGE_EMBEDDINGS: - return AsyncImageEmbeddingsClient(endpoint, credential, **kwargs) - raise ValueError(f"No client available to support AI model type `{model_info.model_type}`") - - class ChatCompletionsClient(ChatCompletionsClientGenerated): @overload @@ -430,7 +406,6 @@ def create_streaming( __all__: List[str] = [ "load_client", - "load_async_client", "ChatCompletionsClient", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index ef8be533d33f..d6d233d97c06 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -7,11 +7,13 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ import json +import logging import sys from io import IOBase from typing import Any, Dict, Union, IO, List, Optional, overload from azure.core.pipeline import PipelineResponse +from azure.core.credentials import AzureKeyCredential from azure.core.tracing.decorator_async import distributed_trace_async from azure.core.utils import case_insensitive_dict from azure.core.exceptions import ( @@ -25,6 +27,7 @@ from .. import models as _models from .._model_base import SdkJSONEncoder from ._client import ChatCompletionsClient as ChatCompletionsClientGenerated +from ._client import EmbeddingsClient, ImageEmbeddingsClient from .._operations._operations import build_chat_completions_create_request if sys.version_info >= (3, 9): @@ -33,6 +36,28 @@ from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object _Unset: Any = object() +_LOGGER = logging.getLogger(__name__) + + +async def load_client( + endpoint: str, credential: AzureKeyCredential, **kwargs: Any +) -> Union[ChatCompletionsClientGenerated, EmbeddingsClient, ImageEmbeddingsClient]: + client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... + model_info = await client.get_model_info() + await client.close() + _LOGGER.info("model_info=%s", model_info) + if model_info.model_type in (None, ''): + raise ValueError( + "The AI model information is missing a value for `model type`. Cannot create an appropriate client." + ) + # TODO: Remove "completions" once Mistral Large fixes their model type + if model_info.model_type == _models.ModelType.CHAT or "completion": + return ChatCompletionsClient(endpoint, credential, **kwargs) + if model_info.model_type == _models.ModelType.EMBEDDINGS: + return EmbeddingsClient(endpoint, credential, **kwargs) + if model_info.model_type == _models.ModelType.IMAGE_EMBEDDINGS: + return ImageEmbeddingsClient(endpoint, credential, **kwargs) + raise ValueError(f"No client available to support AI model type `{model_info.model_type}`") class ChatCompletionsClient(ChatCompletionsClientGenerated): @@ -377,6 +402,7 @@ async def create_streaming( __all__: List[str] = [ + "load_client", "ChatCompletionsClient" ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py index 296125aade9b..85dd3714cb9e 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py @@ -25,7 +25,6 @@ async def sample_chat_completions_streaming_async(): from azure.ai.inference.aio import ChatCompletionsClient from azure.ai.inference.models import SystemMessage, UserMessage, ChatCompletionsUpdate from azure.core.credentials import AzureKeyCredential - from azure.core.pipeline.transport import AsyncioRequestsTransport # Read the values of your model endpoint and key from environment variables try: @@ -36,14 +35,8 @@ async def sample_chat_completions_streaming_async(): print("Set them before running this sample.") exit() - # TODO: Remove this. - # Example of how the app can change the HTTP buffer size. The default is 4096 bytes. Reducing it here to 64 bytes - # does not improve the latency of the streamed results. Is there caching happening on the service? or is the AI model - # itself producing output tokens at high-latency? - transport = AsyncioRequestsTransport(connection_data_block_size=64) - # Create chat completions client for synchronous operations - client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key), transport=transport) + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # Do a single streaming chat completion operation. Start the operation and get a Future object. future = asyncio.ensure_future( diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py index 2164aac5dbfb..eca14da6c1a0 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py @@ -4,8 +4,8 @@ # ------------------------------------ """ DESCRIPTION: - This sample demonstrates how to create an asynchronous client from a given - endpoint URL using the load_async_client() function. + This sample demonstrates how to create an asynchronous client from a given endpoint URL using + the load_client() function, imported from azure.ai.inference.aio. In this sample, we get an asynchronous client and do a chat completions call. USAGE: @@ -31,18 +31,17 @@ async def sample_load_client_async(): print("Set them before running this sample.") exit() - from azure.ai.inference import load_async_client - from azure.ai.inference.aio import ChatCompletionsClient + from azure.ai.inference.aio import load_client, ChatCompletionsClient from azure.ai.inference.models import SystemMessage, UserMessage from azure.core.credentials import AzureKeyCredential - client = load_async_client(endpoint=endpoint, credential=AzureKeyCredential(key)) + client = await load_client(endpoint=endpoint, credential=AzureKeyCredential(key)) # This should create a client of type `ChatCompletionsClient` print(f"Created client of type `{type(client).__name__}`.") # TODO: Why does this return False? - #if isinstance(client, azure.ai.inference.aio.ChatCompletionsClient): + #if isinstance(client, ChatCompletionsClient): # Do a single chat completion operation. Start the operation and get a Future object. future = asyncio.ensure_future( client.create( diff --git a/sdk/ai/azure-ai-inference/samples/sample_load_client.py b/sdk/ai/azure-ai-inference/samples/sample_load_client.py index cd7b1fa6a22b..9b583ee0e4ca 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_load_client.py +++ b/sdk/ai/azure-ai-inference/samples/sample_load_client.py @@ -5,7 +5,7 @@ """ DESCRIPTION: This sample demonstrates how to create a client from a given endpoint URL using - the load_client() function imported from azure.ai.inference. + the load_client() function, imported from azure.ai.inference. In this sample, we get a synchronous client and do a chat completions call. USAGE: diff --git a/sdk/ai/azure-ai-inference/tests/README.md b/sdk/ai/azure-ai-inference/tests/README.md index 0a69a8d23684..c3687487391a 100644 --- a/sdk/ai/azure-ai-inference/tests/README.md +++ b/sdk/ai/azure-ai-inference/tests/README.md @@ -12,20 +12,20 @@ The live tests were written against the AI models mentioned below. You will need ## Setup -* Clone or download this sample repository. -* Open a command prompt window in the folder `sdk\ai\azure-ai-inference`. -* If you want to run tests against the latest published client library, install it by running: +- Clone or download this sample repository. +- Open a command prompt window in the folder `sdk\ai\azure-ai-inference`. +- If you want to run tests against the latest published client library, install it by running: ```bash pip install azure-ai-inference ``` -* If you want to run tests against a locally built client library: - * First build the wheel: +- If you want to run tests against a locally built client library: + - First build the wheel: ```bash pip install wheel pip install -r dev_requirements.txt python setup.py bdist_wheel ``` - * Then install the resulting local wheel (update version `1.0.0b1` to the current one): + - Then install the resulting local wheel (update version `1.0.0b1` to the current one): ```bash pip install dist\azure_ai_inference-1.0.0b1-py3-none-any.whl --user --force-reinstall ``` @@ -35,7 +35,8 @@ The live tests were written against the AI models mentioned below. You will need The tests read endpoints and keys from environemt variables. See the [Set environment variables](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/README.md#set-environment-variables) section in the samples README.md file for the full list of environment variables that need to be set for all tests to pass. In addition, the following environment values **must be** defined, although not used. Assign any value to them: -``` + +```bash set AI_TENANT_ID=not-used set AI_CLIENT_ID=not-used set AI_CLIENT_SECRET=not-used @@ -44,7 +45,8 @@ set AI_CLIENT_SECRET=not-used ## Configure test proxy Configure the test proxy to run live service tests without recordings: -``` + +```bash set AZURE_TEST_RUN_LIVE=true set AZURE_SKIP_LIVE_RECORDING=true set PROXY_URL=http://localhost:5000 @@ -54,7 +56,8 @@ set AZURE_TEST_USE_CLI_AUTH=true ## Run tests To run all tests, type: -``` + +```bash pytest ``` From 0a82fe2e7610ad209f86976af9a6c12fe32a2dfe Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 16 May 2024 21:27:21 -0700 Subject: [PATCH 070/112] Remove wait loop in async samples. Simplify tool sample. Other minor sample changes --- .../sample_chat_completions_async.py | 41 ++----- ...sample_chat_completions_streaming_async.py | 21 +--- .../async_samples/sample_embeddings_async.py | 26 ++--- .../sample_image_embeddings_async.py | 24 ++--- .../async_samples/sample_load_client_async.py | 23 ++-- .../samples/sample_chat_completions.py | 1 - ...ample_chat_completions_from_input_bytes.py | 1 - ...sample_chat_completions_from_input_json.py | 1 - .../sample_chat_completions_streaming.py | 1 - ...ple_chat_completions_with_entra_id_auth.py | 1 - .../sample_chat_completions_with_history.py | 1 - .../sample_chat_completions_with_tools.py | 100 +++++++----------- .../samples/sample_embeddings.py | 1 - .../samples/sample_get_model_info.py | 1 - .../samples/sample_image_embeddings.py | 1 - .../samples/sample_load_client.py | 1 - 16 files changed, 77 insertions(+), 168 deletions(-) diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py index 2335846fc3a9..59955aa05081 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py @@ -4,7 +4,7 @@ # ------------------------------------ """ DESCRIPTION: - This sample demonstrates how to get a chat completion response + This sample demonstrates how to get a chat completion response from the service using an asynchronous client. USAGE: @@ -19,7 +19,6 @@ """ import asyncio - async def sample_chat_completions_async(): import os from azure.ai.inference.aio import ChatCompletionsClient @@ -38,41 +37,19 @@ async def sample_chat_completions_async(): # Create a Model Client for synchronous operations client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - # Do a single chat completion operation. Start the operation and get a Future object. - future = asyncio.ensure_future( - client.create( - messages=[ - SystemMessage(content="You are a helpful assistant."), - UserMessage(content="How many feet are in a mile?"), - ] - ) + # Do a single chat completion operation + response = await client.create( + messages=[ + SystemMessage(content="You are a helpful assistant."), + UserMessage(content="How many feet are in a mile?"), + ] ) - # Loop until the operation is done - while not future.done(): - await asyncio.sleep(0.1) - print("Waiting...") + # Print response the the console + print(response.choices[0].message.content) - # Get the response - response = future.result() await client.close() - # Print results the the console - print("Chat Completions:") - print(f"choices[0].message.content: {response.choices[0].message.content}") - print(f"choices[0].message.role: {response.choices[0].message.role}") - print(f"choices[0].finish_reason: {response.choices[0].finish_reason}") - print(f"choices[0].index: {response.choices[0].index}") - print(f"id: {response.id}") - print(f"created: {response.created}") - print(f"model: {response.model}") - print(f"object: {response.object}") - print(f"usage.capacity_type: {response.usage.capacity_type}") - print(f"usage.prompt_tokens: {response.usage.prompt_tokens}") - print(f"usage.completion_tokens: {response.usage.completion_tokens}") - print(f"usage.total_tokens: {response.usage.total_tokens}") - - async def main(): await sample_chat_completions_async() diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py index 85dd3714cb9e..2874812efc14 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py @@ -19,7 +19,6 @@ """ import asyncio - async def sample_chat_completions_streaming_async(): import os from azure.ai.inference.aio import ChatCompletionsClient @@ -39,23 +38,13 @@ async def sample_chat_completions_streaming_async(): client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # Do a single streaming chat completion operation. Start the operation and get a Future object. - future = asyncio.ensure_future( - client.create_streaming( - messages=[ - SystemMessage(content="You are a helpful assistant."), - UserMessage(content="Give me 5 good reasons why I should exercise every day."), - ] - ) + response = await client.create_streaming( + messages=[ + SystemMessage(content="You are a helpful assistant."), + UserMessage(content="Give me 5 good reasons why I should exercise every day."), + ] ) - # Loop until you get the HTTP response headers from the service - while not future.done(): - await asyncio.sleep(0.1) - print("Waiting...") - - # Get the response - response = future.result() - # Iterate on the response to get chat completion updates, as they arrive from the service async for update in response: print(update.choices[0].delta.content or "", end="") diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py index 6969f939ef90..354f1fb53ebd 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py @@ -18,7 +18,6 @@ """ import asyncio - async def sample_embeddings_async(): import os from azure.ai.inference.aio import EmbeddingsClient @@ -37,31 +36,22 @@ async def sample_embeddings_async(): client = EmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # Do a single embeddings operation. Start the operation and get a Future object. - future = asyncio.ensure_future(client.create(input=["first phrase", "second phrase", "third phrase"])) - - # Loop until the operation is done - while not future.done(): - await asyncio.sleep(0.1) - print("Waiting...") + response = await client.create( + input=[ + "first phrase", + "second phrase", + "third phrase" + ] + ) - # Get the response - response = future.result() - await client.close() - - # Print results the the console print("Embeddings response:") for item in response.data: length = len(item.embedding) print( f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, ..., {item.embedding[length-2]}, {item.embedding[length-1]}]" ) - print(f"id: {response.id}") - print(f"model: {response.model}") - print(f"object: {response.object}") - print(f"usage.input_tokens: {response.usage.input_tokens}") - print(f"usage.prompt_tokens: {response.usage.prompt_tokens}") - print(f"usage.total_tokens: {response.usage.total_tokens}") + await client.close() async def main(): await sample_embeddings_async() diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py index ce76efde8a2c..5ebe815b2610 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py @@ -43,24 +43,14 @@ async def sample_image_embeddings_async(): client = ImageEmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # Do a single image embeddings operation. Start the operation and get a Future object. - future = asyncio.ensure_future( - client.create( - input=[ - EmbeddingInput(image=image1), - EmbeddingInput(image=image2) - ] - ) + response = await client.create( + input=[ + EmbeddingInput(image=image1), + EmbeddingInput(image=image2) + ] ) - # Loop until the operation is done - while not future.done(): - await asyncio.sleep(0.1) - print("Waiting...") - - # Get the response - response = future.result() - await client.close() - + print("Embeddings response:") for item in response.data: length = len(item.embedding) print( @@ -68,6 +58,8 @@ async def sample_image_embeddings_async(): f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" ) + await client.close() + async def main(): await sample_image_embeddings_async() diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py index eca14da6c1a0..332724c3ca4d 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py @@ -43,27 +43,18 @@ async def sample_load_client_async(): # TODO: Why does this return False? #if isinstance(client, ChatCompletionsClient): # Do a single chat completion operation. Start the operation and get a Future object. - future = asyncio.ensure_future( - client.create( - messages=[ - SystemMessage(content="You are a helpful assistant."), - UserMessage(content="How many feet are in a mile?"), - ] - ) + response = await client.create( + messages=[ + SystemMessage(content="You are a helpful assistant."), + UserMessage(content="How many feet are in a mile?"), + ] ) - # Loop until the operation is done - while not future.done(): - await asyncio.sleep(0.1) - print("Waiting...") + # Print response the the console + print(response.choices[0].message.content) - # Get the response - response = future.result() await client.close() - # Print results the the console - print(f"choices[0].message.content: {response.choices[0].message.content}") - async def main(): await sample_load_client_async() diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index dc43db94d4a7..b2e979615834 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -18,7 +18,6 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ - def sample_chat_completions(): import os diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py index 104ba60836c4..240afbddffc7 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py @@ -20,7 +20,6 @@ """ import io - def sample_chat_completions_from_input_bytes(): import os diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py index 212bbf4802b0..4d6e83f5f2e3 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py @@ -19,7 +19,6 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ - def sample_chat_completions_from_input_json(): import os from typing import MutableMapping, Any diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py index 1d83113dd6ef..125a867ca37b 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py @@ -18,7 +18,6 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ - def sample_chat_completions_streaming(): import os diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_entra_id_auth.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_entra_id_auth.py index d196ab0f8801..dd7b94f9d385 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_entra_id_auth.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_entra_id_auth.py @@ -18,7 +18,6 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ - def sample_chat_completions_with_entra_id_auth(): import os diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py index f4372922a874..e59ee6c0f34f 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py @@ -19,7 +19,6 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ - def sample_chat_completions_with_history(): import os diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py index f5e89b924f34..d2f30ff87c46 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py @@ -3,9 +3,6 @@ # Licensed under the MIT License. # ------------------------------------ """ -NOTE: - This sample is still work in progress... - DESCRIPTION: This sample demonstrates how to do chat completions using a synchronous client, with the assistance of tools. In this sample, we use a mock function tool to retrieve @@ -23,19 +20,10 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ - def sample_chat_completions_with_tools(): import os import json - # Enable unredacted logging, including full request and response payloads (delete me!) - import sys - import logging - - logger = logging.getLogger("azure") - logger.setLevel(logging.DEBUG) - logger.addHandler(logging.StreamHandler(stream=sys.stdout)) - try: endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] key = os.environ["CHAT_COMPLETIONS_KEY"] @@ -47,10 +35,7 @@ def sample_chat_completions_with_tools(): from azure.ai.inference import ChatCompletionsClient from azure.ai.inference.models import ( AssistantMessage, - ChatCompletionsFunctionToolCall, ChatCompletionsFunctionToolDefinition, - ChatCompletionsNamedToolSelection, - ChatCompletionsToolSelectionPreset, CompletionsFinishReason, FunctionDefinition, SystemMessage, @@ -59,9 +44,6 @@ def sample_chat_completions_with_tools(): ) from azure.core.credentials import AzureKeyCredential - # Create a chat completion client. Make sure you selected a model that supports tools. - client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key), logging_enable=True) - # Define a function that retrieves flight information def get_flight_info(origin_city: str, destination_city: str): """ @@ -69,21 +51,16 @@ def get_flight_info(origin_city: str, destination_city: str): flight between two cities. Parameters: - origin_city (str): The name of the city where the flight originates - destination_city (str): The destination city + origin_city (str): The name of the city where the flight originates. + destination_city (str): The destination city. Returns: - str: The airline name, fight number, date and time of the next flight between the cities + str: The airline name, fight number, date and time of the next flight between the cities. """ if origin_city == "Seattle" and destination_city == "Miami": return "Delta airlines flight number 123 from Seattle to Miami, departing May 7th, 2024 at 10:00 AM." - # return '{"info": "Delta airlines flight number 123 from Seattle to Miami, departing May 7th, 2024 at 10:00 AM."}' - elif origin_city == "Seattle" and destination_city == "Orlando": - return "American Airlines flight number 456 from Seattle to Orlando, departing May 8th, 2024 at 2:45 PM." - # return '{"info": "American Airlines flight number 456 from Seattle to Orlando, departing May 8th, 2024 at 2:45 PM."}' else: - return "I don't have that information." - # return '{"into": "I don\'t have that information."}' + return "Sorry, I don't have that information." # Define a 'tool' that the model can use to retrieves flight information flight_info = ChatCompletionsFunctionToolDefinition( @@ -107,53 +84,56 @@ def get_flight_info(origin_city: str, destination_city: str): ) ) + # Create a chat completion client. Make sure you selected a model that supports tools. + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + # Make a chat completions call asking for flight information, while providing a tool to handle the request messages = [ SystemMessage(content="You an assistant that helps users find flight information."), - UserMessage(content="What are the next flights from Seattle to Miami and from Seattle to Orlando?"), + UserMessage(content="What is the next flights from Seattle to Miami?"), ] response = client.create( messages=messages, tools=[flight_info], - # tool_choice=ChatCompletionsNamedToolSelection(type="function") # Cohere model does not support ) - # As long as the model keeps requesting tool calls, make tool calls and provide the tool outputs to the model - while response.choices[0].finish_reason == CompletionsFinishReason.TOOL_CALLS: + # The model should be asking for tool calls + if response.choices[0].finish_reason == CompletionsFinishReason.TOOL_CALLS: # Append the previous model response to the chat history - if response.choices[0].message.tool_calls is not None: - # TODO: Remove the need to set content="" - messages.append(AssistantMessage(content="", tool_calls=response.choices[0].message.tool_calls)) - - # Make new function call(s) as needed. If parallel function calling is supported by the model, - # we may have more than one tool call request. - if response.choices[0].message.tool_calls is not None: - for tool_call in response.choices[0].message.tool_calls: - if hasattr(tool_call, "function"): - function_name = tool_call.function.name - function_args = json.loads(tool_call.function.arguments.replace("'", '"')) - tool_call_id = tool_call.id - print(f"Calling function `{function_name}` with arguments {function_args}") - callable_func = locals()[function_name] - function_response = callable_func(**function_args) - print(f"Function response is: {function_response}") - - # Provide the tool response to the model, by appending it to the chat history - messages.append( - ToolMessage( - tool_call_id=tool_call_id, content=function_response - ) # json.dumps(function_response) - ) - - # With the additional tools information on hand, get another response from the model - response = client.create( - messages=messages, tools=[flight_info], tool_choice=ChatCompletionsToolSelectionPreset.AUTO + messages.append( + AssistantMessage( + content="", + tool_calls=response.choices[0].message.tool_calls + ) ) - # Print the final response - print(response.choices[0].message.content) + # The tools call should be a function call + tool_call = response.choices[0].message.tool_calls[0] + if hasattr(tool_call, "function"): + + function_args = json.loads(tool_call.function.arguments.replace("'", '"')) + print(f"Calling function `{tool_call.function.name}` with arguments {function_args}") + callable_func = locals()[tool_call.function.name] + + function_response = callable_func(**function_args) + print(f"Function response = {function_response}") + + # Provide the tool response to the model, by appending it to the chat history + messages.append( + ToolMessage( + tool_call_id=tool_call.id, content=function_response + ) # json.dumps(function_response) + ) + + # With the additional tools information on hand, get another response from the model + response = client.create( + messages=messages, + tools=[flight_info] + ) + + print(f"Model response = {response.choices[0].message.content}") if __name__ == "__main__": diff --git a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py index 740c62d0273e..ec7cb024b08e 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py +++ b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py @@ -18,7 +18,6 @@ 2) EMBEDDINGS_KEY - Your model key (a 32-character string). Keep it secret. """ - def sample_embeddings(): import os diff --git a/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py b/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py index f7aca4e2be47..2d6e27a4ce52 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py +++ b/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py @@ -19,7 +19,6 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ - def sample_get_model_info(): import os diff --git a/sdk/ai/azure-ai-inference/samples/sample_image_embeddings.py b/sdk/ai/azure-ai-inference/samples/sample_image_embeddings.py index c7b514a6b5b7..4cf42207e169 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_image_embeddings.py +++ b/sdk/ai/azure-ai-inference/samples/sample_image_embeddings.py @@ -18,7 +18,6 @@ 2) IMAGE_EMBEDDINGS_KEY - Your model key (a 32-character string). Keep it secret. """ - def sample_image_embeddings(): import os import base64 diff --git a/sdk/ai/azure-ai-inference/samples/sample_load_client.py b/sdk/ai/azure-ai-inference/samples/sample_load_client.py index 9b583ee0e4ca..585c950b6572 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_load_client.py +++ b/sdk/ai/azure-ai-inference/samples/sample_load_client.py @@ -19,7 +19,6 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ - def sample_load_client(): import os From bf93525b4eec9de79545bb91b073d7827b92ba0f Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 16 May 2024 22:51:03 -0700 Subject: [PATCH 071/112] Re-emit with new operator names --- sdk/ai/azure-ai-inference/README.md | 17 ++++----- .../ai/inference/_operations/_operations.py | 36 +++++++++---------- .../azure/ai/inference/_patch.py | 16 ++++----- .../inference/aio/_operations/_operations.py | 36 +++++++++---------- .../azure/ai/inference/aio/_patch.py | 26 +++++++------- .../sample_chat_completions_async.py | 4 ++- ...sample_chat_completions_streaming_async.py | 3 +- .../async_samples/sample_embeddings_async.py | 10 ++---- .../sample_image_embeddings_async.py | 12 +++---- .../async_samples/sample_load_client_async.py | 5 +-- .../samples/sample_chat_completions.py | 3 +- ...ample_chat_completions_from_input_bytes.py | 3 +- ...sample_chat_completions_from_input_json.py | 3 +- .../sample_chat_completions_streaming.py | 3 +- ...ple_chat_completions_with_entra_id_auth.py | 3 +- .../sample_chat_completions_with_history.py | 5 +-- .../sample_chat_completions_with_tools.py | 19 +++------- .../samples/sample_embeddings.py | 3 +- .../samples/sample_get_model_info.py | 1 + .../samples/sample_image_embeddings.py | 10 +++--- .../samples/sample_load_client.py | 3 +- .../test_model_inference_async_client.py | 10 +++--- .../tests/test_model_inference_client.py | 14 ++++---- sdk/ai/azure-ai-inference/tsp-location.yaml | 2 +- 24 files changed, 119 insertions(+), 128 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 15864c731852..2141c2262648 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -139,7 +139,7 @@ from azure.core.credentials import AzureKeyCredential client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) -response = client.create( +response = client.complete( messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), @@ -168,7 +168,7 @@ from azure.core.credentials import AzureKeyCredential client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) -response = client.create_streaming( +response = client.streaming_complete( messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="Give me 5 good reasons why I should exercise every day."), @@ -199,7 +199,7 @@ from azure.core.credentials import AzureKeyCredential client = EmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) -response = client.create(input=["first phrase", "second phrase", "third phrase"]) +response = client.embedding(input=["first phrase", "second phrase", "third phrase"]) for item in response.data: length = len(item.embedding) @@ -232,16 +232,13 @@ from azure.ai.inference.models import EmbeddingInput from azure.core.credentials import AzureKeyCredential with open("sample1.png", "rb") as f: - image1:str = base64.b64encode(f.read()).decode('utf-8') + image1: str = base64.b64encode(f.read()).decode("utf-8") with open("sample2.png", "rb") as f: - image2:str = base64.b64encode(f.read()).decode('utf-8') + image2: str = base64.b64encode(f.read()).decode("utf-8") client = ImageEmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) -response = client.create(input=[ - EmbeddingInput(image=image1), - EmbeddingInput(image=image2) -]) +response = client.embedding(input=[EmbeddingInput(image=image1), EmbeddingInput(image=image2)]) for item in response.data: length = len(item.embedding) @@ -301,7 +298,7 @@ client = load_client(endpoint=endpoint, credential=AzureKeyCredential(key)) print(f"Created client of type `{type(client).__name__}`.") if isinstance(client, ChatCompletionsClient): - response = client.create( + response = client.complete( messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index 5d49e52c9a4d..bf963ed016e2 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -42,7 +42,7 @@ _SERIALIZER.client_side_validation = False -def build_chat_completions_create_request(*, model_deployment: Optional[str] = None, **kwargs: Any) -> HttpRequest: +def build_chat_completions_complete_request(*, model_deployment: Optional[str] = None, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -85,7 +85,7 @@ def build_chat_completions_get_model_info_request(**kwargs: Any) -> HttpRequest: return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_embeddings_create_request(*, model_deployment: Optional[str] = None, **kwargs: Any) -> HttpRequest: +def build_embeddings_embedding_request(*, model_deployment: Optional[str] = None, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -128,7 +128,7 @@ def build_embeddings_get_model_info_request(**kwargs: Any) -> HttpRequest: return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_image_embeddings_create_request(*, model_deployment: Optional[str] = None, **kwargs: Any) -> HttpRequest: +def build_image_embeddings_embedding_request(*, model_deployment: Optional[str] = None, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -174,7 +174,7 @@ def build_image_embeddings_get_model_info_request(**kwargs: Any) -> HttpRequest: class ChatCompletionsClientOperationsMixin(ChatCompletionsClientMixinABC): @overload - def create( + def complete( self, body: JSON, *, @@ -302,7 +302,7 @@ def create( """ @overload - def create( + def complete( self, *, messages: List[_models.ChatRequestMessage], @@ -452,7 +452,7 @@ def create( """ @overload - def create( + def complete( self, body: IO[bytes], *, @@ -527,7 +527,7 @@ def create( """ @distributed_trace - def create( # pylint: disable=too-many-locals + def complete( # pylint: disable=too-many-locals self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -766,7 +766,7 @@ def create( # pylint: disable=too-many-locals else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_chat_completions_create_request( + _request = build_chat_completions_complete_request( model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, @@ -875,7 +875,7 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: class EmbeddingsClientOperationsMixin(EmbeddingsClientMixinABC): @overload - def create( + def embedding( self, body: JSON, *, @@ -967,7 +967,7 @@ def create( """ @overload - def create( + def embedding( self, *, input: List[str], @@ -1060,7 +1060,7 @@ def create( """ @overload - def create( + def embedding( self, body: IO[bytes], *, @@ -1124,7 +1124,7 @@ def create( """ @distributed_trace - def create( + def embedding( self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -1274,7 +1274,7 @@ def create( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_embeddings_create_request( + _request = build_embeddings_embedding_request( model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, @@ -1383,7 +1383,7 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: class ImageEmbeddingsClientOperationsMixin(ImageEmbeddingsClientMixinABC): @overload - def create( + def embedding( self, body: JSON, *, @@ -1478,7 +1478,7 @@ def create( """ @overload - def create( + def embedding( self, *, input: List[_models.EmbeddingInput], @@ -1571,7 +1571,7 @@ def create( """ @overload - def create( + def embedding( self, body: IO[bytes], *, @@ -1635,7 +1635,7 @@ def create( """ @distributed_trace - def create( + def embedding( self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -1788,7 +1788,7 @@ def create( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_image_embeddings_create_request( + _request = build_image_embeddings_embedding_request( model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index 8d9aac95d440..275e4c72799f 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -27,7 +27,7 @@ from . import models as _models from ._model_base import SdkJSONEncoder from ._serialization import Serializer -from ._operations._operations import build_chat_completions_create_request +from ._operations._operations import build_chat_completions_complete_request from ._client import ChatCompletionsClient as ChatCompletionsClientGenerated from ._client import EmbeddingsClient, ImageEmbeddingsClient @@ -49,12 +49,12 @@ def load_client( model_info = client.get_model_info() client.close() _LOGGER.info("model_info=%s", model_info) - if model_info.model_type in (None, ''): + if model_info.model_type in (None, ""): raise ValueError( "The AI model information is missing a value for `model type`. Cannot create an appropriate client." ) # TODO: Remove "completions" once Mistral Large fixes their model type - if model_info.model_type == _models.ModelType.CHAT or "completion": + if model_info.model_type in (_models.ModelType.CHAT, "completion"): return ChatCompletionsClient(endpoint, credential, **kwargs) if model_info.model_type == _models.ModelType.EMBEDDINGS: return EmbeddingsClient(endpoint, credential, **kwargs) @@ -66,7 +66,7 @@ def load_client( class ChatCompletionsClient(ChatCompletionsClientGenerated): @overload - def create_streaming( + def streaming_complete( self, body: JSON, *, @@ -97,7 +97,7 @@ def create_streaming( """ @overload - def create_streaming( + def streaming_complete( self, *, messages: List[_models.ChatRequestMessage], @@ -203,7 +203,7 @@ def create_streaming( """ @overload - def create_streaming( + def streaming_complete( self, body: IO[bytes], *, @@ -234,7 +234,7 @@ def create_streaming( """ @distributed_trace - def create_streaming( + def streaming_complete( self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -376,7 +376,7 @@ def create_streaming( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_chat_completions_create_request( + _request = build_chat_completions_complete_request( model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index bf932ab94ea5..6b108d4921a3 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -27,11 +27,11 @@ from ... import models as _models from ..._model_base import SdkJSONEncoder, _deserialize from ..._operations._operations import ( - build_chat_completions_create_request, + build_chat_completions_complete_request, build_chat_completions_get_model_info_request, - build_embeddings_create_request, + build_embeddings_embedding_request, build_embeddings_get_model_info_request, - build_image_embeddings_create_request, + build_image_embeddings_embedding_request, build_image_embeddings_get_model_info_request, ) from .._vendor import ChatCompletionsClientMixinABC, EmbeddingsClientMixinABC, ImageEmbeddingsClientMixinABC @@ -49,7 +49,7 @@ class ChatCompletionsClientOperationsMixin(ChatCompletionsClientMixinABC): @overload - async def create( + async def complete( self, body: JSON, *, @@ -177,7 +177,7 @@ async def create( """ @overload - async def create( + async def complete( self, *, messages: List[_models.ChatRequestMessage], @@ -327,7 +327,7 @@ async def create( """ @overload - async def create( + async def complete( self, body: IO[bytes], *, @@ -402,7 +402,7 @@ async def create( """ @distributed_trace_async - async def create( # pylint: disable=too-many-locals + async def complete( # pylint: disable=too-many-locals self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -641,7 +641,7 @@ async def create( # pylint: disable=too-many-locals else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_chat_completions_create_request( + _request = build_chat_completions_complete_request( model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, @@ -750,7 +750,7 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: class EmbeddingsClientOperationsMixin(EmbeddingsClientMixinABC): @overload - async def create( + async def embedding( self, body: JSON, *, @@ -842,7 +842,7 @@ async def create( """ @overload - async def create( + async def embedding( self, *, input: List[str], @@ -935,7 +935,7 @@ async def create( """ @overload - async def create( + async def embedding( self, body: IO[bytes], *, @@ -999,7 +999,7 @@ async def create( """ @distributed_trace_async - async def create( + async def embedding( self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -1149,7 +1149,7 @@ async def create( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_embeddings_create_request( + _request = build_embeddings_embedding_request( model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, @@ -1258,7 +1258,7 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: class ImageEmbeddingsClientOperationsMixin(ImageEmbeddingsClientMixinABC): @overload - async def create( + async def embedding( self, body: JSON, *, @@ -1353,7 +1353,7 @@ async def create( """ @overload - async def create( + async def embedding( self, *, input: List[_models.EmbeddingInput], @@ -1446,7 +1446,7 @@ async def create( """ @overload - async def create( + async def embedding( self, body: IO[bytes], *, @@ -1510,7 +1510,7 @@ async def create( """ @distributed_trace_async - async def create( + async def embedding( self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -1663,7 +1663,7 @@ async def create( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_image_embeddings_create_request( + _request = build_image_embeddings_embedding_request( model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index d6d233d97c06..ce062e661c5b 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -28,7 +28,7 @@ from .._model_base import SdkJSONEncoder from ._client import ChatCompletionsClient as ChatCompletionsClientGenerated from ._client import EmbeddingsClient, ImageEmbeddingsClient -from .._operations._operations import build_chat_completions_create_request +from .._operations._operations import build_chat_completions_complete_request if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -46,12 +46,12 @@ async def load_client( model_info = await client.get_model_info() await client.close() _LOGGER.info("model_info=%s", model_info) - if model_info.model_type in (None, ''): + if model_info.model_type in (None, ""): raise ValueError( "The AI model information is missing a value for `model type`. Cannot create an appropriate client." ) # TODO: Remove "completions" once Mistral Large fixes their model type - if model_info.model_type == _models.ModelType.CHAT or "completion": + if model_info.model_type in (_models.ModelType.CHAT, "completion"): return ChatCompletionsClient(endpoint, credential, **kwargs) if model_info.model_type == _models.ModelType.EMBEDDINGS: return EmbeddingsClient(endpoint, credential, **kwargs) @@ -63,13 +63,13 @@ async def load_client( class ChatCompletionsClient(ChatCompletionsClientGenerated): @overload - async def create_streaming( + async def streaming_complete( self, body: JSON, *, model_deployment: Optional[str] = None, content_type: str = "application/json", - **kwargs: Any + **kwargs: Any, ) -> _models.AsyncStreamingChatCompletions: # pylint: disable=line-too-long """Gets streaming chat completions for the provided chat messages. @@ -94,7 +94,7 @@ async def create_streaming( """ @overload - async def create_streaming( + async def streaming_complete( self, *, messages: List[_models.ChatRequestMessage], @@ -113,7 +113,7 @@ async def create_streaming( Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] ] = None, seed: Optional[int] = None, - **kwargs: Any + **kwargs: Any, ) -> _models.AsyncStreamingChatCompletions: # pylint: disable=line-too-long """Gets streaming chat completions for the provided chat messages. @@ -200,13 +200,13 @@ async def create_streaming( """ @overload - async def create_streaming( + async def streaming_complete( self, body: IO[bytes], *, model_deployment: Optional[str] = None, content_type: str = "application/json", - **kwargs: Any + **kwargs: Any, ) -> _models.AsyncStreamingChatCompletions: # pylint: disable=line-too-long """Gets streaming chat completions for the provided chat messages. @@ -231,7 +231,7 @@ async def create_streaming( """ @distributed_trace_async - async def create_streaming( + async def streaming_complete( self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -250,7 +250,7 @@ async def create_streaming( Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] ] = None, seed: Optional[int] = None, - **kwargs: Any + **kwargs: Any, ) -> _models.AsyncStreamingChatCompletions: # pylint: disable=line-too-long """Gets streaming chat completions for the provided chat messages. @@ -373,7 +373,7 @@ async def create_streaming( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_chat_completions_create_request( + _request = build_chat_completions_complete_request( model_deployment=model_deployment, content_type=content_type, api_version=self._config.api_version, @@ -403,7 +403,7 @@ async def create_streaming( __all__: List[str] = [ "load_client", - "ChatCompletionsClient" + "ChatCompletionsClient", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py index 59955aa05081..881ee89cc716 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py @@ -19,6 +19,7 @@ """ import asyncio + async def sample_chat_completions_async(): import os from azure.ai.inference.aio import ChatCompletionsClient @@ -38,7 +39,7 @@ async def sample_chat_completions_async(): client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # Do a single chat completion operation - response = await client.create( + response = await client.complete( messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), @@ -50,6 +51,7 @@ async def sample_chat_completions_async(): await client.close() + async def main(): await sample_chat_completions_async() diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py index 2874812efc14..91a54f86fb69 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py @@ -19,6 +19,7 @@ """ import asyncio + async def sample_chat_completions_streaming_async(): import os from azure.ai.inference.aio import ChatCompletionsClient @@ -38,7 +39,7 @@ async def sample_chat_completions_streaming_async(): client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # Do a single streaming chat completion operation. Start the operation and get a Future object. - response = await client.create_streaming( + response = await client.streaming_complete( messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="Give me 5 good reasons why I should exercise every day."), diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py index 354f1fb53ebd..79d70b7eb241 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py @@ -18,6 +18,7 @@ """ import asyncio + async def sample_embeddings_async(): import os from azure.ai.inference.aio import EmbeddingsClient @@ -36,13 +37,7 @@ async def sample_embeddings_async(): client = EmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # Do a single embeddings operation. Start the operation and get a Future object. - response = await client.create( - input=[ - "first phrase", - "second phrase", - "third phrase" - ] - ) + response = await client.embedding(input=["first phrase", "second phrase", "third phrase"]) print("Embeddings response:") for item in response.data: @@ -53,6 +48,7 @@ async def sample_embeddings_async(): await client.close() + async def main(): await sample_embeddings_async() diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py index 5ebe815b2610..0a0b70c136a4 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py @@ -19,6 +19,7 @@ """ import asyncio + async def sample_image_embeddings_async(): import os import base64 @@ -36,19 +37,14 @@ async def sample_image_embeddings_async(): from azure.core.credentials import AzureKeyCredential with open("sample1.png", "rb") as f: - image1:str = base64.b64encode(f.read()).decode('utf-8') + image1: str = base64.b64encode(f.read()).decode("utf-8") with open("sample2.png", "rb") as f: - image2:str = base64.b64encode(f.read()).decode('utf-8') + image2: str = base64.b64encode(f.read()).decode("utf-8") client = ImageEmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # Do a single image embeddings operation. Start the operation and get a Future object. - response = await client.create( - input=[ - EmbeddingInput(image=image1), - EmbeddingInput(image=image2) - ] - ) + response = await client.embedding(input=[EmbeddingInput(image=image1), EmbeddingInput(image=image2)]) print("Embeddings response:") for item in response.data: diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py index 332724c3ca4d..b9ec509af37b 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py @@ -20,6 +20,7 @@ """ import asyncio + async def sample_load_client_async(): import os @@ -41,9 +42,9 @@ async def sample_load_client_async(): print(f"Created client of type `{type(client).__name__}`.") # TODO: Why does this return False? - #if isinstance(client, ChatCompletionsClient): + # if isinstance(client, ChatCompletionsClient): # Do a single chat completion operation. Start the operation and get a Future object. - response = await client.create( + response = await client.complete( messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index b2e979615834..bd69deae3888 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -18,6 +18,7 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ + def sample_chat_completions(): import os @@ -36,7 +37,7 @@ def sample_chat_completions(): client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - response = client.create( + response = client.complete( messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py index 240afbddffc7..bafe6345ee06 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py @@ -20,6 +20,7 @@ """ import io + def sample_chat_completions_from_input_bytes(): import os @@ -39,7 +40,7 @@ def sample_chat_completions_from_input_bytes(): # Make a chat completion call, by directly providing the # HTTP request body as IO[bytes], containing chat messages. - response = client.create(read_text_file("example_chat.json")) + response = client.complete(read_text_file("example_chat.json")) print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py index 4d6e83f5f2e3..bdb322d29892 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py @@ -19,6 +19,7 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ + def sample_chat_completions_from_input_json(): import os from typing import MutableMapping, Any @@ -53,7 +54,7 @@ def sample_chat_completions_from_input_json(): # Make a chat completion call, by directly providing the # HTTP request body as IO[bytes], containing chat messages. - response = client.create(json_messages) + response = client.complete(json_messages) print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py index 125a867ca37b..ad6971c864df 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py @@ -18,6 +18,7 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ + def sample_chat_completions_streaming(): import os @@ -36,7 +37,7 @@ def sample_chat_completions_streaming(): client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - response = client.create_streaming( + response = client.streaming_complete( messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="Give me 5 good reasons why I should exercise every day."), diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_entra_id_auth.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_entra_id_auth.py index dd7b94f9d385..b5b6c24d224a 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_entra_id_auth.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_entra_id_auth.py @@ -18,6 +18,7 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ + def sample_chat_completions_with_entra_id_auth(): import os @@ -38,7 +39,7 @@ def sample_chat_completions_with_entra_id_auth(): client = ChatCompletionsClient(endpoint=endpoint, credential=default_azure_credential) - response = client.create( + response = client.complete( messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py index e59ee6c0f34f..7d3e8d7a74cf 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py @@ -19,6 +19,7 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ + def sample_chat_completions_with_history(): import os @@ -43,13 +44,13 @@ def sample_chat_completions_with_history(): UserMessage(content="What year was construction of the international space station mostly done?"), ] - response = client.create(messages=messages) + response = client.complete(messages=messages) print(response.choices[0].message.content) messages.append(AssistantMessage(content=response.choices[0].message.content)) messages.append(UserMessage(content="And what was the estimated cost to build it?")) - response = client.create(messages=messages) + response = client.complete(messages=messages) print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py index d2f30ff87c46..92757bc3fd33 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py @@ -20,6 +20,7 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ + def sample_chat_completions_with_tools(): import os import json @@ -93,7 +94,7 @@ def get_flight_info(origin_city: str, destination_city: str): UserMessage(content="What is the next flights from Seattle to Miami?"), ] - response = client.create( + response = client.complete( messages=messages, tools=[flight_info], ) @@ -102,12 +103,7 @@ def get_flight_info(origin_city: str, destination_city: str): if response.choices[0].finish_reason == CompletionsFinishReason.TOOL_CALLS: # Append the previous model response to the chat history - messages.append( - AssistantMessage( - content="", - tool_calls=response.choices[0].message.tool_calls - ) - ) + messages.append(AssistantMessage(content="", tool_calls=response.choices[0].message.tool_calls)) # The tools call should be a function call tool_call = response.choices[0].message.tool_calls[0] @@ -122,16 +118,11 @@ def get_flight_info(origin_city: str, destination_city: str): # Provide the tool response to the model, by appending it to the chat history messages.append( - ToolMessage( - tool_call_id=tool_call.id, content=function_response - ) # json.dumps(function_response) + ToolMessage(tool_call_id=tool_call.id, content=function_response) ) # With the additional tools information on hand, get another response from the model - response = client.create( - messages=messages, - tools=[flight_info] - ) + response = client.complete(messages=messages, tools=[flight_info]) print(f"Model response = {response.choices[0].message.content}") diff --git a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py index ec7cb024b08e..bd0c8770d86d 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py +++ b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py @@ -18,6 +18,7 @@ 2) EMBEDDINGS_KEY - Your model key (a 32-character string). Keep it secret. """ + def sample_embeddings(): import os @@ -35,7 +36,7 @@ def sample_embeddings(): client = EmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - response = client.create(input=["first phrase", "second phrase", "third phrase"]) + response = client.embedding(input=["first phrase", "second phrase", "third phrase"]) for item in response.data: length = len(item.embedding) diff --git a/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py b/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py index 2d6e27a4ce52..f7aca4e2be47 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py +++ b/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py @@ -19,6 +19,7 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ + def sample_get_model_info(): import os diff --git a/sdk/ai/azure-ai-inference/samples/sample_image_embeddings.py b/sdk/ai/azure-ai-inference/samples/sample_image_embeddings.py index 4cf42207e169..3ddc5a128daa 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_image_embeddings.py +++ b/sdk/ai/azure-ai-inference/samples/sample_image_embeddings.py @@ -18,6 +18,7 @@ 2) IMAGE_EMBEDDINGS_KEY - Your model key (a 32-character string). Keep it secret. """ + def sample_image_embeddings(): import os import base64 @@ -36,16 +37,13 @@ def sample_image_embeddings(): from azure.core.credentials import AzureKeyCredential with open("sample1.png", "rb") as f: - image1:str = base64.b64encode(f.read()).decode('utf-8') + image1: str = base64.b64encode(f.read()).decode("utf-8") with open("sample2.png", "rb") as f: - image2:str = base64.b64encode(f.read()).decode('utf-8') + image2: str = base64.b64encode(f.read()).decode("utf-8") client = ImageEmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - response = client.create(input=[ - EmbeddingInput(image=image1), - EmbeddingInput(image=image2) - ]) + response = client.embedding(input=[EmbeddingInput(image=image1), EmbeddingInput(image=image2)]) for item in response.data: length = len(item.embedding) diff --git a/sdk/ai/azure-ai-inference/samples/sample_load_client.py b/sdk/ai/azure-ai-inference/samples/sample_load_client.py index 585c950b6572..07ba6db344a8 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_load_client.py +++ b/sdk/ai/azure-ai-inference/samples/sample_load_client.py @@ -19,6 +19,7 @@ 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ + def sample_load_client(): import os @@ -41,7 +42,7 @@ def sample_load_client(): print(f"Created client of type `{type(client).__name__}`.") if isinstance(client, ChatCompletionsClient): - response = client.create( + response = client.complete( messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py index a06cd1281892..aa65da8053ee 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -37,13 +37,13 @@ async def test_async_chat_completions_error_free(self, **kwargs): ] client = self._create_async_chat_client(**kwargs) - result = await client.create(messages=messages) + result = await client.complete(messages=messages) self._print_chat_completions_result(result) self._validate_chat_completions_result(result, ["5280", "5,280"]) messages.append(sdk.models.AssistantMessage(content=result.choices[0].message.content)) messages.append(sdk.models.UserMessage(content="and how many yards?")) - result = await client.create(messages=messages) + result = await client.complete(messages=messages) self._print_chat_completions_result(result) self._validate_chat_completions_result(result, ["1760", "1,760"]) await client.close() @@ -52,7 +52,7 @@ async def test_async_chat_completions_error_free(self, **kwargs): @recorded_by_proxy_async async def test_async_chat_completions_streaming_error_free(self, **kwargs): client = self._create_async_chat_client(Sync=False, **kwargs) - result = await client.create_streaming( + result = await client.streaming_complete( messages=[ sdk.models.SystemMessage(content="You are a helpful assistant."), sdk.models.UserMessage(content="Give me 3 good reasons why I should exercise every day."), @@ -65,7 +65,7 @@ async def test_async_chat_completions_streaming_error_free(self, **kwargs): @recorded_by_proxy_async async def test_async_embeddings_error_free(self, **kwargs): client = self._create_async_embeddings_client(**kwargs) - result = await client.create(input=["first phrase", "second phrase", "third phrase"]) + result = await client.embedding(input=["first phrase", "second phrase", "third phrase"]) self._print_embeddings_result(result) self._validate_embeddings_result(result) await client.close() @@ -82,7 +82,7 @@ async def test_embeddings_with_auth_failure(self, **kwargs): client = self._create_async_embeddings_client(bad_key=True, **kwargs) exception_caught = False try: - result = await client.create(input=["first phrase", "second phrase", "third phrase"]) + result = await client.embedding(input=["first phrase", "second phrase", "third phrase"]) except AzureError as e: exception_caught = True print(e) diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py index 4acddfb132a3..d380fa32d918 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -32,7 +32,7 @@ def test_get_model_info_error_free(self, **kwargs): @recorded_by_proxy def test_chat_completions_error_free(self, **kwargs): client = self._create_chat_client(**kwargs) - result = client.create(messages=[sdk.models.UserMessage(content="How many feet are in a mile?")]) + result = client.complete(messages=[sdk.models.UserMessage(content="How many feet are in a mile?")]) self._print_chat_completions_result(result) self._validate_chat_completions_result(result, ["5280", "5,280"]) client.close() @@ -41,7 +41,7 @@ def test_chat_completions_error_free(self, **kwargs): @recorded_by_proxy def test_chat_completions_streaming_error_free(self, **kwargs): client = self._create_chat_client(**kwargs) - result = client.create_streaming( + result = client.streaming_complete( messages=[ sdk.models.SystemMessage(content="You are a helpful assistant."), sdk.models.UserMessage(content="Give me 3 good reasons why I should exercise every day."), @@ -78,7 +78,7 @@ def test_chat_completions_with_tool_error_free(self, **kwargs): sdk.models.SystemMessage(content="You are an assistant that helps users find weather information."), sdk.models.UserMessage(content="what's the maximum temperature in Seattle two days from now?"), ] - result = client.create( + result = client.complete( messages=messages, tools=[forecast_tool], ) @@ -91,7 +91,7 @@ def test_chat_completions_with_tool_error_free(self, **kwargs): tool_call_id=result.choices[0].message.tool_calls[0].id, ) ) - result = client.create( + result = client.complete( messages=messages, tools=[forecast_tool], ) @@ -102,7 +102,7 @@ def test_chat_completions_with_tool_error_free(self, **kwargs): @recorded_by_proxy def test_embeddings_error_free(self, **kwargs): client = self._create_embeddings_client(**kwargs) - result = client.create(input=["first phrase", "second phrase", "third phrase"]) + result = client.embedding(input=["first phrase", "second phrase", "third phrase"]) self._print_embeddings_result(result) self._validate_embeddings_result(result) client.close() @@ -119,7 +119,7 @@ def test_chat_completion_with_auth_failure(self, **kwargs): client = self._create_chat_client(bad_key=True, **kwargs) exception_caught = False try: - result = client.create(messages=[sdk.models.UserMessage(content="How many feet are in a mile?")]) + result = client.complete(messages=[sdk.models.UserMessage(content="How many feet are in a mile?")]) except AzureError as e: exception_caught = True print(e) @@ -135,7 +135,7 @@ def test_embeddings_on_chat_completion_endpoint(self, **kwargs): client = self._create_embeddings_client_with_chat_completions_credentials(**kwargs) exception_caught = False try: - result = client.create(input=["first phrase", "second phrase", "third phrase"]) + result = client.embedding(input=["first phrase", "second phrase", "third phrase"]) except AzureError as e: exception_caught = True print(e) diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml index 0f74fd6846c8..fe03720ac782 100644 --- a/sdk/ai/azure-ai-inference/tsp-location.yaml +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/ModelClient -commit: 6f4ed3b6732421c15ba3a55926527043059ce289 +commit: b9c9be427d3a4587d5fea7815747287b6468d03c repo: Azure/azure-rest-api-specs additionalDirectories: From ca2f4ff369731f1126cf00dd8263b5b449d9038a Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 20 May 2024 07:36:53 -0700 Subject: [PATCH 072/112] Minor change to sample --- .../samples/sample_chat_completions_with_tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py index 92757bc3fd33..ec8f364265da 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py @@ -103,7 +103,7 @@ def get_flight_info(origin_city: str, destination_city: str): if response.choices[0].finish_reason == CompletionsFinishReason.TOOL_CALLS: # Append the previous model response to the chat history - messages.append(AssistantMessage(content="", tool_calls=response.choices[0].message.tool_calls)) + messages.append(AssistantMessage(tool_calls=response.choices[0].message.tool_calls)) # The tools call should be a function call tool_call = response.choices[0].message.tool_calls[0] From 673f27bd77c302a9b27b4d45c8279793722cf5de Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 21 May 2024 12:35:36 -0700 Subject: [PATCH 073/112] Add support for hyper_parameters --- sdk/ai/azure-ai-inference/README.md | 2 +- .../azure/ai/inference/__init__.py | 2 +- .../ai/inference/_operations/_operations.py | 872 +----------------- .../azure/ai/inference/_patch.py | 593 ++++++++++-- .../azure/ai/inference/aio/__init__.py | 2 +- .../inference/aio/_operations/_operations.py | 870 +---------------- .../azure/ai/inference/aio/_patch.py | 575 +++++++++++- .../azure/ai/inference/models/__init__.py | 4 +- .../azure/ai/inference/models/_models.py | 8 +- ...sample_chat_completions_streaming_async.py | 3 +- .../sample_chat_completions_streaming.py | 5 +- ...mple_chat_completions_with_hyper_params.py | 74 ++ .../sample_chat_completions_with_tools.py | 4 +- .../test_model_inference_async_client.py | 3 +- .../tests/test_model_inference_client.py | 3 +- sdk/ai/azure-ai-inference/tsp-location.yaml | 2 +- 16 files changed, 1241 insertions(+), 1781 deletions(-) create mode 100644 sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_hyper_params.py diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 2141c2262648..6367b1aab139 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -168,7 +168,7 @@ from azure.core.credentials import AzureKeyCredential client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) -response = client.streaming_complete( +response = client.complete( messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="Give me 5 good reasons why I should exercise every day."), diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py index f7dd49374aaa..898076e89409 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py @@ -6,7 +6,7 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from ._patch import ChatCompletionsClient +from ._client import ChatCompletionsClient from ._client import EmbeddingsClient from ._client import ImageEmbeddingsClient from ._version import VERSION diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index bf963ed016e2..b1ebc04fe3a7 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -35,8 +35,6 @@ from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object _Unset: Any = object() -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False @@ -174,142 +172,23 @@ def build_image_embeddings_get_model_info_request(**kwargs: Any) -> HttpRequest: class ChatCompletionsClientOperationsMixin(ChatCompletionsClientMixinABC): @overload - def complete( + def _complete( self, body: JSON, *, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.ChatCompletions: - # pylint: disable=line-too-long - """Gets chat completions for the provided chat messages. - Completions support a wide variety of tasks and generate text that continues from or - "completes" - provided prompt data. - - :param body: Required. - :type body: JSON - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ChatCompletions - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "messages": [ - chat_request_message - ], - "extras": { - "str": "str" # Optional. Extra parameters (in the form of string - key-value pairs) that are not in the standard request payload. They will be - passed to the service as-is in the root of the JSON request payload. How the - service handles these extra parameters depends on the value of the - ``extra-parameters`` HTTP request header. - }, - "frequency_penalty": 0.0, # Optional. A value that influences the - probability of generated tokens appearing based on their cumulative frequency in - generated text. Positive values will make tokens less likely to appear as their - frequency increases and decrease the likelihood of the model repeating the same - statements verbatim. - "max_tokens": 0, # Optional. The maximum number of tokens to generate. - "presence_penalty": 0.0, # Optional. A value that influences the probability - of generated tokens appearing based on their existing presence in generated text. - Positive values will make tokens less likely to appear when they already exist - and increase the model's likelihood to output new topics. - "response_format": "str", # Optional. An object specifying the format that - the model must output. Used to enable JSON mode. Known values are: "text" and - "json_object". - "seed": 0, # Optional. If specified, the system will make a best effort to - sample deterministically such that repeated requests with the same seed and - parameters should return the same result. Determinism is not guaranteed.". - "stop": [ - "str" # Optional. A collection of textual sequences that will end - completions generation. - ], - "temperature": 0.0, # Optional. The sampling temperature to use that - controls the apparent creativity of generated completions. Higher values will - make output more random while lower values will make results more focused and - deterministic. It is not recommended to modify temperature and top_p for the same - completions request as the interaction of these two settings is difficult to - predict. - "tool_choice": "str", # Optional. If specified, the model will configure - which of the provided tools it can use for the chat completions response. Is - either a Union[str, "_models.ChatCompletionsToolSelectionPreset"] type or a - ChatCompletionsNamedToolSelection type. - "tools": [ - chat_completions_tool_definition - ], - "top_p": 0.0 # Optional. An alternative to sampling with temperature called - nucleus sampling. This value causes the model to consider the results of tokens - with the provided probability mass. As an example, a value of 0.15 will cause - only the tokens comprising the top 15% of probability mass to be considered. It - is not recommended to modify temperature and top_p for the same completions - request as the interaction of these two settings is difficult to predict. - } - - # response body for status code(s): 200 - response == { - "choices": [ - { - "finish_reason": "str", # The reason that this chat - completions choice completed its generated. Required. Known values are: - "stop", "length", "content_filter", and "tool_calls". - "index": 0, # The ordered index associated with this chat - completions choice. Required. - "message": { - "content": "str", # The content of the message. - Required. - "role": "str", # The chat role associated with the - message. Required. Known values are: "system", "user", "assistant", - and "tool". - "tool_calls": [ - chat_completions_tool_call - ] - } - } - ], - "created": "2020-02-20 00:00:00", # The first timestamp associated with - generation activity for this completions response, represented as seconds since - the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. - "id": "str", # A unique identifier associated with this chat completions - response. Required. - "model": "str", # The model used for the chat completion. Required. - "object": "str", # The response object type, which is always - ``chat.completion``. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "completion_tokens": 0, # The number of tokens generated across all - completions emissions. Required. - "prompt_tokens": 0, # The number of tokens in the provided prompts - for the completions request. Required. - "total_tokens": 0 # The total number of tokens processed for the - completions request and response. Required. - } - } - """ - + ) -> _models.ChatCompletions: ... @overload - def complete( + def _complete( self, *, messages: List[_models.ChatRequestMessage], model_deployment: Optional[str] = None, content_type: str = "application/json", - extras: Optional[Dict[str, str]] = None, frequency_penalty: Optional[float] = None, + stream_parameter: Optional[bool] = None, presence_penalty: Optional[float] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, @@ -322,219 +201,26 @@ def complete( ] = None, seed: Optional[int] = None, **kwargs: Any - ) -> _models.ChatCompletions: - # pylint: disable=line-too-long - """Gets chat completions for the provided chat messages. - Completions support a wide variety of tasks and generate text that continues from or - "completes" - provided prompt data. - - :keyword messages: The collection of context messages associated with this chat completions - request. - Typical usage begins with a chat message for the System role that provides instructions for - the behavior of the assistant, followed by alternating messages between the User and - Assistant roles. Required. - :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the - standard request payload. - They will be passed to the service as-is in the root of the JSON request payload. - How the service handles these extra parameters depends on the value of the - ``extra-parameters`` - HTTP request header. Default value is None. - :paramtype extras: dict[str, str] - :keyword frequency_penalty: A value that influences the probability of generated tokens - appearing based on their cumulative - frequency in generated text. - Positive values will make tokens less likely to appear as their frequency increases and - decrease the likelihood of the model repeating the same statements verbatim. Default value is - None. - :paramtype frequency_penalty: float - :keyword presence_penalty: A value that influences the probability of generated tokens - appearing based on their existing - presence in generated text. - Positive values will make tokens less likely to appear when they already exist and increase - the - model's likelihood to output new topics. Default value is None. - :paramtype presence_penalty: float - :keyword temperature: The sampling temperature to use that controls the apparent creativity of - generated completions. - Higher values will make output more random while lower values will make results more focused - and deterministic. - It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict. Default value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature called nucleus sampling. This value - causes the - model to consider the results of tokens with the provided probability mass. As an example, a - value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be - considered. - It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict. Default value is None. - :paramtype top_p: float - :keyword max_tokens: The maximum number of tokens to generate. Default value is None. - :paramtype max_tokens: int - :keyword response_format: An object specifying the format that the model must output. Used to - enable JSON mode. Known values are: "text" and "json_object". Default value is None. - :paramtype response_format: str or ~azure.ai.inference.models.ChatCompletionsResponseFormat - :keyword stop: A collection of textual sequences that will end completions generation. Default - value is None. - :paramtype stop: list[str] - :keyword tools: The available tool definitions that the chat completions request can use, - including caller-defined functions. Default value is None. - :paramtype tools: list[~azure.ai.inference.models.ChatCompletionsToolDefinition] - :keyword tool_choice: If specified, the model will configure which of the provided tools it can - use for the chat completions response. Is either a Union[str, - "_models.ChatCompletionsToolSelectionPreset"] type or a ChatCompletionsNamedToolSelection type. - Default value is None. - :paramtype tool_choice: str or ~azure.ai.inference.models.ChatCompletionsToolSelectionPreset or - ~azure.ai.inference.models.ChatCompletionsNamedToolSelection - :keyword seed: If specified, the system will make a best effort to sample deterministically - such that repeated requests with the - same seed and parameters should return the same result. Determinism is not guaranteed.". - Default value is None. - :paramtype seed: int - :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ChatCompletions - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "choices": [ - { - "finish_reason": "str", # The reason that this chat - completions choice completed its generated. Required. Known values are: - "stop", "length", "content_filter", and "tool_calls". - "index": 0, # The ordered index associated with this chat - completions choice. Required. - "message": { - "content": "str", # The content of the message. - Required. - "role": "str", # The chat role associated with the - message. Required. Known values are: "system", "user", "assistant", - and "tool". - "tool_calls": [ - chat_completions_tool_call - ] - } - } - ], - "created": "2020-02-20 00:00:00", # The first timestamp associated with - generation activity for this completions response, represented as seconds since - the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. - "id": "str", # A unique identifier associated with this chat completions - response. Required. - "model": "str", # The model used for the chat completion. Required. - "object": "str", # The response object type, which is always - ``chat.completion``. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "completion_tokens": 0, # The number of tokens generated across all - completions emissions. Required. - "prompt_tokens": 0, # The number of tokens in the provided prompts - for the completions request. Required. - "total_tokens": 0 # The total number of tokens processed for the - completions request and response. Required. - } - } - """ - + ) -> _models.ChatCompletions: ... @overload - def complete( + def _complete( self, body: IO[bytes], *, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.ChatCompletions: - # pylint: disable=line-too-long - """Gets chat completions for the provided chat messages. - Completions support a wide variety of tasks and generate text that continues from or - "completes" - provided prompt data. - - :param body: Required. - :type body: IO[bytes] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ChatCompletions - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "choices": [ - { - "finish_reason": "str", # The reason that this chat - completions choice completed its generated. Required. Known values are: - "stop", "length", "content_filter", and "tool_calls". - "index": 0, # The ordered index associated with this chat - completions choice. Required. - "message": { - "content": "str", # The content of the message. - Required. - "role": "str", # The chat role associated with the - message. Required. Known values are: "system", "user", "assistant", - and "tool". - "tool_calls": [ - chat_completions_tool_call - ] - } - } - ], - "created": "2020-02-20 00:00:00", # The first timestamp associated with - generation activity for this completions response, represented as seconds since - the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. - "id": "str", # A unique identifier associated with this chat completions - response. Required. - "model": "str", # The model used for the chat completion. Required. - "object": "str", # The response object type, which is always - ``chat.completion``. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "completion_tokens": 0, # The number of tokens generated across all - completions emissions. Required. - "prompt_tokens": 0, # The number of tokens in the provided prompts - for the completions request. Required. - "total_tokens": 0 # The total number of tokens processed for the - completions request and response. Required. - } - } - """ + ) -> _models.ChatCompletions: ... @distributed_trace - def complete( # pylint: disable=too-many-locals + def _complete( self, body: Union[JSON, IO[bytes]] = _Unset, *, messages: List[_models.ChatRequestMessage] = _Unset, model_deployment: Optional[str] = None, - extras: Optional[Dict[str, str]] = None, frequency_penalty: Optional[float] = None, + stream_parameter: Optional[bool] = None, presence_penalty: Optional[float] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, @@ -567,13 +253,6 @@ def complete( # pylint: disable=too-many-locals Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str - :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the - standard request payload. - They will be passed to the service as-is in the root of the JSON request payload. - How the service handles these extra parameters depends on the value of the - ``extra-parameters`` - HTTP request header. Default value is None. - :paramtype extras: dict[str, str] :keyword frequency_penalty: A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. @@ -581,6 +260,9 @@ def complete( # pylint: disable=too-many-locals decrease the likelihood of the model repeating the same statements verbatim. Default value is None. :paramtype frequency_penalty: float + :keyword stream_parameter: A value indicating whether chat completions should be streamed for + this request. Default value is None. + :paramtype stream_parameter: bool :keyword presence_penalty: A value that influences the probability of generated tokens appearing based on their existing presence in generated text. @@ -637,13 +319,6 @@ def complete( # pylint: disable=too-many-locals "messages": [ chat_request_message ], - "extras": { - "str": "str" # Optional. Extra parameters (in the form of string - key-value pairs) that are not in the standard request payload. They will be - passed to the service as-is in the root of the JSON request payload. How the - service handles these extra parameters depends on the value of the - ``extra-parameters`` HTTP request header. - }, "frequency_penalty": 0.0, # Optional. A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. Positive values will make tokens less likely to appear as their @@ -664,6 +339,8 @@ def complete( # pylint: disable=too-many-locals "str" # Optional. A collection of textual sequences that will end completions generation. ], + "stream": bool, # Optional. A value indicating whether chat completions + should be streamed for this request. "temperature": 0.0, # Optional. The sampling temperature to use that controls the apparent creativity of generated completions. Higher values will make output more random while lower values will make results more focused and @@ -739,13 +416,11 @@ def complete( # pylint: disable=too-many-locals _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ChatCompletions] = kwargs.pop("cls", None) if body is _Unset: if messages is _Unset: raise TypeError("missing required argument: messages") body = { - "extras": extras, "frequency_penalty": frequency_penalty, "max_tokens": max_tokens, "messages": messages, @@ -753,6 +428,7 @@ def complete( # pylint: disable=too-many-locals "response_format": response_format, "seed": seed, "stop": stop, + "stream": stream_parameter, "temperature": temperature, "tool_choice": tool_choice, "tools": tools, @@ -797,9 +473,6 @@ def complete( # pylint: disable=too-many-locals else: deserialized = _deserialize(_models.ChatCompletions, response.json()) - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - return deserialized # type: ignore @distributed_trace @@ -836,8 +509,6 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.ModelInfo] = kwargs.pop("cls", None) - _request = build_chat_completions_get_model_info_request( api_version=self._config.api_version, headers=_headers, @@ -866,271 +537,49 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: else: deserialized = _deserialize(_models.ModelInfo, response.json()) - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - return deserialized # type: ignore class EmbeddingsClientOperationsMixin(EmbeddingsClientMixinABC): @overload - def embedding( + def _embedding( self, body: JSON, *, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.EmbeddingsResult: - # pylint: disable=line-too-long - """Return the embeddings for a given text prompt. - - :param body: Required. - :type body: JSON - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "input": [ - "str" # Input text to embed, encoded as a string or array of tokens. - To embed multiple inputs in a single request, pass an array of strings or - array of token arrays. Required. - ], - "dimensions": 0, # Optional. Optional. The number of dimensions the - resulting output embeddings should have. Passing null causes the model to use its - default value. Returns a 422 error if the model doesn't support the value or - parameter. - "encoding_format": "str", # Optional. Optional. The number of dimensions the - resulting output embeddings should have. Passing null causes the model to use its - default value. Returns a 422 error if the model doesn't support the value or - parameter. Known values are: "base64", "binary", "float", "int8", "ubinary", and - "uint8". - "extras": { - "str": "str" # Optional. Extra parameters (in the form of string - key-value pairs) that are not in the standard request payload. They will be - passed to the service as-is in the root of the JSON request payload. How the - service handles these extra parameters depends on the value of the - ``extra-parameters`` HTTP request header. - }, - "input_type": "str" # Optional. Optional. The type of the input. Returns a - 422 error if the model doesn't support the value or parameter. Known values are: - "text", "query", and "document". - } - - # response body for status code(s): 200 - response == { - "data": [ - { - "embedding": [ - 0.0 # List of embeddings value for the input prompt. - These represent a measurement of the vector-based relatedness of the - provided input. Required. - ], - "index": 0, # Index of the prompt to which the EmbeddingItem - corresponds. Required. - "object": "str" # The object type of this embeddings item. - Will always be ``embedding``. Required. - } - ], - "id": "str", # Unique identifier for the embeddings result. Required. - "model": "str", # The model ID used to generate this result. Required. - "object": "str", # The object type of the embeddings result. Will always be - ``list``. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "input_tokens": 0, # Number of tokens in the request prompt. - Required. - "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to ``input_tokens``. However, certain AI - models may add extra tokens to the input hence the number can be higher. (for - example when input_type="query"). Required. - "total_tokens": 0 # Total number of tokens transacted in this - request/response. Required. - } - } - """ - + ) -> _models.EmbeddingsResult: ... @overload - def embedding( + def _embedding( self, *, input: List[str], model_deployment: Optional[str] = None, content_type: str = "application/json", - extras: Optional[Dict[str, str]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, - **kwargs: Any - ) -> _models.EmbeddingsResult: - # pylint: disable=line-too-long - """Return the embeddings for a given text prompt. - - :keyword input: Input text to embed, encoded as a string or array of tokens. - To embed multiple inputs in a single request, pass an array - of strings or array of token arrays. Required. - :paramtype input: list[str] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the - standard request payload. - They will be passed to the service as-is in the root of the JSON request payload. - How the service handles these extra parameters depends on the value of the - ``extra-parameters`` - HTTP request header. Default value is None. - :paramtype extras: dict[str, str] - :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should - have. - Passing null causes the model to use its default value. - Returns a 422 error if the model doesn't support the value or parameter. Default value is - None. - :paramtype dimensions: int - :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings - should have. - Passing null causes the model to use its default value. - Returns a 422 error if the model doesn't support the value or parameter. Known values are: - "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. - :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat - :keyword input_type: Optional. The type of the input. - Returns a 422 error if the model doesn't support the value or parameter. Known values are: - "text", "query", and "document". Default value is None. - :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType - :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "data": [ - { - "embedding": [ - 0.0 # List of embeddings value for the input prompt. - These represent a measurement of the vector-based relatedness of the - provided input. Required. - ], - "index": 0, # Index of the prompt to which the EmbeddingItem - corresponds. Required. - "object": "str" # The object type of this embeddings item. - Will always be ``embedding``. Required. - } - ], - "id": "str", # Unique identifier for the embeddings result. Required. - "model": "str", # The model ID used to generate this result. Required. - "object": "str", # The object type of the embeddings result. Will always be - ``list``. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "input_tokens": 0, # Number of tokens in the request prompt. - Required. - "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to ``input_tokens``. However, certain AI - models may add extra tokens to the input hence the number can be higher. (for - example when input_type="query"). Required. - "total_tokens": 0 # Total number of tokens transacted in this - request/response. Required. - } - } - """ - + **kwargs: Any + ) -> _models.EmbeddingsResult: ... @overload - def embedding( + def _embedding( self, body: IO[bytes], *, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.EmbeddingsResult: - # pylint: disable=line-too-long - """Return the embeddings for a given text prompt. - - :param body: Required. - :type body: IO[bytes] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "data": [ - { - "embedding": [ - 0.0 # List of embeddings value for the input prompt. - These represent a measurement of the vector-based relatedness of the - provided input. Required. - ], - "index": 0, # Index of the prompt to which the EmbeddingItem - corresponds. Required. - "object": "str" # The object type of this embeddings item. - Will always be ``embedding``. Required. - } - ], - "id": "str", # Unique identifier for the embeddings result. Required. - "model": "str", # The model ID used to generate this result. Required. - "object": "str", # The object type of the embeddings result. Will always be - ``list``. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "input_tokens": 0, # Number of tokens in the request prompt. - Required. - "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to ``input_tokens``. However, certain AI - models may add extra tokens to the input hence the number can be higher. (for - example when input_type="query"). Required. - "total_tokens": 0 # Total number of tokens transacted in this - request/response. Required. - } - } - """ + ) -> _models.EmbeddingsResult: ... @distributed_trace - def embedding( + def _embedding( self, body: Union[JSON, IO[bytes]] = _Unset, *, input: List[str] = _Unset, model_deployment: Optional[str] = None, - extras: Optional[Dict[str, str]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, @@ -1150,13 +599,6 @@ def embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str - :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the - standard request payload. - They will be passed to the service as-is in the root of the JSON request payload. - How the service handles these extra parameters depends on the value of the - ``extra-parameters`` - HTTP request header. Default value is None. - :paramtype extras: dict[str, str] :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should have. Passing null causes the model to use its default value. @@ -1196,13 +638,6 @@ def embedding( default value. Returns a 422 error if the model doesn't support the value or parameter. Known values are: "base64", "binary", "float", "int8", "ubinary", and "uint8". - "extras": { - "str": "str" # Optional. Extra parameters (in the form of string - key-value pairs) that are not in the standard request payload. They will be - passed to the service as-is in the root of the JSON request payload. How the - service handles these extra parameters depends on the value of the - ``extra-parameters`` HTTP request header. - }, "input_type": "str" # Optional. Optional. The type of the input. Returns a 422 error if the model doesn't support the value or parameter. Known values are: "text", "query", and "document". @@ -1254,7 +689,6 @@ def embedding( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.EmbeddingsResult] = kwargs.pop("cls", None) if body is _Unset: if input is _Unset: @@ -1262,7 +696,6 @@ def embedding( body = { "dimensions": dimensions, "encoding_format": encoding_format, - "extras": extras, "input": input, "input_type": input_type, } @@ -1305,9 +738,6 @@ def embedding( else: deserialized = _deserialize(_models.EmbeddingsResult, response.json()) - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - return deserialized # type: ignore @distributed_trace @@ -1344,8 +774,6 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.ModelInfo] = kwargs.pop("cls", None) - _request = build_embeddings_get_model_info_request( api_version=self._config.api_version, headers=_headers, @@ -1374,274 +802,49 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: else: deserialized = _deserialize(_models.ModelInfo, response.json()) - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - return deserialized # type: ignore class ImageEmbeddingsClientOperationsMixin(ImageEmbeddingsClientMixinABC): @overload - def embedding( + def _embedding( self, body: JSON, *, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.EmbeddingsResult: - # pylint: disable=line-too-long - """Return the embeddings for given images. - - :param body: Required. - :type body: JSON - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "input": [ - { - "image": "str", # The input image, in PNG format. Required. - "text": "str" # Optional. Optional. The text input to feed - into the model (like DINO, CLIP). Returns a 422 error if the model - doesn't support the value or parameter. - } - ], - "dimensions": 0, # Optional. Optional. The number of dimensions the - resulting output embeddings should have. Passing null causes the model to use its - default value. Returns a 422 error if the model doesn't support the value or - parameter. - "encoding_format": "str", # Optional. Optional. The number of dimensions the - resulting output embeddings should have. Passing null causes the model to use its - default value. Returns a 422 error if the model doesn't support the value or - parameter. Known values are: "base64", "binary", "float", "int8", "ubinary", and - "uint8". - "extras": { - "str": "str" # Optional. Extra parameters (in the form of string - key-value pairs) that are not in the standard request payload. They will be - passed to the service as-is in the root of the JSON request payload. How the - service handles these extra parameters depends on the value of the - ``extra-parameters`` HTTP request header. - }, - "input_type": "str" # Optional. Optional. The type of the input. Returns a - 422 error if the model doesn't support the value or parameter. Known values are: - "text", "query", and "document". - } - - # response body for status code(s): 200 - response == { - "data": [ - { - "embedding": [ - 0.0 # List of embeddings value for the input prompt. - These represent a measurement of the vector-based relatedness of the - provided input. Required. - ], - "index": 0, # Index of the prompt to which the EmbeddingItem - corresponds. Required. - "object": "str" # The object type of this embeddings item. - Will always be ``embedding``. Required. - } - ], - "id": "str", # Unique identifier for the embeddings result. Required. - "model": "str", # The model ID used to generate this result. Required. - "object": "str", # The object type of the embeddings result. Will always be - ``list``. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "input_tokens": 0, # Number of tokens in the request prompt. - Required. - "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to ``input_tokens``. However, certain AI - models may add extra tokens to the input hence the number can be higher. (for - example when input_type="query"). Required. - "total_tokens": 0 # Total number of tokens transacted in this - request/response. Required. - } - } - """ - + ) -> _models.EmbeddingsResult: ... @overload - def embedding( + def _embedding( self, *, input: List[_models.EmbeddingInput], model_deployment: Optional[str] = None, content_type: str = "application/json", - extras: Optional[Dict[str, str]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any - ) -> _models.EmbeddingsResult: - # pylint: disable=line-too-long - """Return the embeddings for given images. - - :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an - array. - The input must not exceed the max input tokens for the model. Required. - :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the - standard request payload. - They will be passed to the service as-is in the root of the JSON request payload. - How the service handles these extra parameters depends on the value of the - ``extra-parameters`` - HTTP request header. Default value is None. - :paramtype extras: dict[str, str] - :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should - have. - Passing null causes the model to use its default value. - Returns a 422 error if the model doesn't support the value or parameter. Default value is - None. - :paramtype dimensions: int - :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings - should have. - Passing null causes the model to use its default value. - Returns a 422 error if the model doesn't support the value or parameter. Known values are: - "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. - :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat - :keyword input_type: Optional. The type of the input. - Returns a 422 error if the model doesn't support the value or parameter. Known values are: - "text", "query", and "document". Default value is None. - :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType - :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "data": [ - { - "embedding": [ - 0.0 # List of embeddings value for the input prompt. - These represent a measurement of the vector-based relatedness of the - provided input. Required. - ], - "index": 0, # Index of the prompt to which the EmbeddingItem - corresponds. Required. - "object": "str" # The object type of this embeddings item. - Will always be ``embedding``. Required. - } - ], - "id": "str", # Unique identifier for the embeddings result. Required. - "model": "str", # The model ID used to generate this result. Required. - "object": "str", # The object type of the embeddings result. Will always be - ``list``. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "input_tokens": 0, # Number of tokens in the request prompt. - Required. - "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to ``input_tokens``. However, certain AI - models may add extra tokens to the input hence the number can be higher. (for - example when input_type="query"). Required. - "total_tokens": 0 # Total number of tokens transacted in this - request/response. Required. - } - } - """ - + ) -> _models.EmbeddingsResult: ... @overload - def embedding( + def _embedding( self, body: IO[bytes], *, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.EmbeddingsResult: - # pylint: disable=line-too-long - """Return the embeddings for given images. - - :param body: Required. - :type body: IO[bytes] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "data": [ - { - "embedding": [ - 0.0 # List of embeddings value for the input prompt. - These represent a measurement of the vector-based relatedness of the - provided input. Required. - ], - "index": 0, # Index of the prompt to which the EmbeddingItem - corresponds. Required. - "object": "str" # The object type of this embeddings item. - Will always be ``embedding``. Required. - } - ], - "id": "str", # Unique identifier for the embeddings result. Required. - "model": "str", # The model ID used to generate this result. Required. - "object": "str", # The object type of the embeddings result. Will always be - ``list``. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "input_tokens": 0, # Number of tokens in the request prompt. - Required. - "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to ``input_tokens``. However, certain AI - models may add extra tokens to the input hence the number can be higher. (for - example when input_type="query"). Required. - "total_tokens": 0 # Total number of tokens transacted in this - request/response. Required. - } - } - """ + ) -> _models.EmbeddingsResult: ... @distributed_trace - def embedding( + def _embedding( self, body: Union[JSON, IO[bytes]] = _Unset, *, input: List[_models.EmbeddingInput] = _Unset, model_deployment: Optional[str] = None, - extras: Optional[Dict[str, str]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, @@ -1661,13 +864,6 @@ def embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str - :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the - standard request payload. - They will be passed to the service as-is in the root of the JSON request payload. - How the service handles these extra parameters depends on the value of the - ``extra-parameters`` - HTTP request header. Default value is None. - :paramtype extras: dict[str, str] :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should have. Passing null causes the model to use its default value. @@ -1710,13 +906,6 @@ def embedding( default value. Returns a 422 error if the model doesn't support the value or parameter. Known values are: "base64", "binary", "float", "int8", "ubinary", and "uint8". - "extras": { - "str": "str" # Optional. Extra parameters (in the form of string - key-value pairs) that are not in the standard request payload. They will be - passed to the service as-is in the root of the JSON request payload. How the - service handles these extra parameters depends on the value of the - ``extra-parameters`` HTTP request header. - }, "input_type": "str" # Optional. Optional. The type of the input. Returns a 422 error if the model doesn't support the value or parameter. Known values are: "text", "query", and "document". @@ -1776,7 +965,6 @@ def embedding( body = { "dimensions": dimensions, "encoding_format": encoding_format, - "extras": extras, "input": input, "input_type": input_type, } diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index 275e4c72799f..589eddf0b70f 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -5,13 +5,18 @@ """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize + +What do we patch auto-generated code? +1. Add support for input argument `hyper_params` (all clients) +2. Add support for function load_client +3. Add support for chat completion streaming """ import json import logging import sys from io import IOBase -from typing import Any, Dict, Union, IO, List, Optional, overload +from typing import Any, Dict, Union, IO, List, Optional, overload, Type from azure.core.pipeline import PipelineResponse from azure.core.credentials import AzureKeyCredential from azure.core.tracing.decorator import distributed_trace @@ -25,11 +30,16 @@ map_error, ) from . import models as _models -from ._model_base import SdkJSONEncoder +from ._model_base import SdkJSONEncoder, _deserialize from ._serialization import Serializer -from ._operations._operations import build_chat_completions_complete_request +from ._operations._operations import ( + build_chat_completions_complete_request, + build_embeddings_embedding_request, + build_image_embeddings_embedding_request, +) from ._client import ChatCompletionsClient as ChatCompletionsClientGenerated -from ._client import EmbeddingsClient, ImageEmbeddingsClient +from ._client import EmbeddingsClient as EmbeddingsClientGenerated +from ._client import ImageEmbeddingsClient as ImageEmbeddingsClientGenerated if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -37,14 +47,16 @@ from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object _Unset: Any = object() + _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False + _LOGGER = logging.getLogger(__name__) def load_client( endpoint: str, credential: AzureKeyCredential, **kwargs: Any -) -> Union[ChatCompletionsClientGenerated, EmbeddingsClient, ImageEmbeddingsClient]: +) -> Union[ChatCompletionsClientGenerated, EmbeddingsClientGenerated, ImageEmbeddingsClientGenerated]: client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... model_info = client.get_model_info() client.close() @@ -66,44 +78,45 @@ def load_client( class ChatCompletionsClient(ChatCompletionsClientGenerated): @overload - def streaming_complete( + def complete( self, body: JSON, *, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any, - ) -> _models.StreamingChatCompletions: + ) -> Union[_models.StreamingChatCompletions, _models.ChatCompletions]: # pylint: disable=line-too-long - """Gets streaming chat completions for the provided chat messages. + """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or - "completes" provided prompt data. When using this method, the response is streamed + "completes" provided prompt data. When using this method with `stream=True`, the response is streamed back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions object to get content updates as they arrive. :param body: Required. :type body: JSON :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. + Relevant only to self-hosted endpoints (previously known as Model-as-a-Platform (MaaP) + or "real-time endpoints"). Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.StreamingChatCompletions + :return: ChatCompletions for non-streaming, or StreamingChatCompletions for streaming. + :rtype: ~azure.ai.inference.models.ChatCompletions or ~azure.ai.inference.models.StreamingChatCompletions :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def streaming_complete( + def complete( self, *, messages: List[_models.ChatRequestMessage], model_deployment: Optional[str] = None, content_type: str = "application/json", - extras: Optional[Dict[str, str]] = None, + hyper_params: Optional[Dict[str, Any]] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None, temperature: Optional[float] = None, @@ -111,17 +124,18 @@ def streaming_complete( max_tokens: Optional[int] = None, response_format: Optional[Union[str, _models.ChatCompletionsResponseFormat]] = None, stop: Optional[List[str]] = None, + stream: Optional[bool] = None, tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, tool_choice: Optional[ Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] ] = None, seed: Optional[int] = None, **kwargs: Any, - ) -> _models.StreamingChatCompletions: + ) -> Union[_models.StreamingChatCompletions, _models.ChatCompletions]: # pylint: disable=line-too-long - """Gets streaming chat completions for the provided chat messages. + """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or - "completes" provided prompt data. When using this method, the response is streamed + "completes" provided prompt data. When using this method with `stream=True`, the response is streamed back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions object to get content updates as they arrive. @@ -139,13 +153,11 @@ def streaming_complete( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the - standard request payload. - They will be passed to the service as-is in the root of the JSON request payload. - How the service handles these extra parameters depends on the value of the - ``extra-parameters`` - HTTP request header. Default value is None. - :paramtype extras: dict[str, str] + :keyword hyper_params: Additional, model-specific parameters that are not in the + standard request payload. They will be added as-is to the root of the JSON in the request body. + How the service handles these hypter parameters depends on the value of the + ``unknown-parameters`` request header. Default value is None. + :paramtype hyper_params: dict[str, Any] :keyword frequency_penalty: A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. @@ -183,6 +195,10 @@ def streaming_complete( :keyword stop: A collection of textual sequences that will end completions generation. Default value is None. :paramtype stop: list[str] + :keyword stream: A value indicating whether chat completions should be streamed for this request. + Default value is False. If streaming is enabled, the response will be a StreamingChatCompletions. + Otherwise the response will be a ChatCompletions. + :paramtype stream: bool :keyword tools: The available tool definitions that the chat completions request can use, including caller-defined functions. Default value is None. :paramtype tools: list[~azure.ai.inference.models.ChatCompletionsToolDefinition] @@ -197,24 +213,24 @@ def streaming_complete( same seed and parameters should return the same result. Determinism is not guaranteed.". Default value is None. :paramtype seed: int - :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ChatCompletions + :return: ChatCompletions for non-streaming, or StreamingChatCompletions for streaming. + :rtype: ~azure.ai.inference.models.ChatCompletions or ~azure.ai.inference.models.StreamingChatCompletions :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def streaming_complete( + def complete( self, body: IO[bytes], *, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any, - ) -> _models.StreamingChatCompletions: + ) -> Union[_models.StreamingChatCompletions, _models.ChatCompletions]: # pylint: disable=line-too-long - """Gets streaming chat completions for the provided chat messages. + """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or - "completes" provided prompt data. When using this method, the response is streamed + "completes" provided prompt data. When using this method with `stream=True`, the response is streamed back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions object to get content updates as they arrive. @@ -228,19 +244,19 @@ def streaming_complete( :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ChatCompletions + :return: ChatCompletions for non-streaming, or StreamingChatCompletions for streaming. + :rtype: ~azure.ai.inference.models.ChatCompletions or ~azure.ai.inference.models.StreamingChatCompletions :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace - def streaming_complete( + def complete( self, body: Union[JSON, IO[bytes]] = _Unset, *, messages: List[_models.ChatRequestMessage] = _Unset, model_deployment: Optional[str] = None, - extras: Optional[Dict[str, str]] = None, + hyper_params: Optional[Dict[str, Any]] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None, temperature: Optional[float] = None, @@ -248,17 +264,18 @@ def streaming_complete( max_tokens: Optional[int] = None, response_format: Optional[Union[str, _models.ChatCompletionsResponseFormat]] = None, stop: Optional[List[str]] = None, + stream: Optional[bool] = None, tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, tool_choice: Optional[ Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] ] = None, seed: Optional[int] = None, **kwargs: Any, - ) -> _models.StreamingChatCompletions: + ) -> Union[_models.StreamingChatCompletions, _models.ChatCompletions]: # pylint: disable=line-too-long - """Gets streaming chat completions for the provided chat messages. + """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or - "completes" provided prompt data. When using this method, the response is streamed + "completes" provided prompt data. When using this method with `stream=True`, the response is streamed back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions object to get content updates as they arrive. @@ -275,13 +292,11 @@ def streaming_complete( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str - :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the - standard request payload. - They will be passed to the service as-is in the root of the JSON request payload. - How the service handles these extra parameters depends on the value of the - ``extra-parameters`` - HTTP request header. Default value is None. - :paramtype extras: dict[str, str] + :keyword hyper_params: Additional, model-specific parameters that are not in the + standard request payload. They will be added as-is to the root of the JSON in the request body. + How the service handles these hypter parameters depends on the value of the + ``unknown-parameters`` request header. Default value is None. + :paramtype hyper_params: dict[str, Any] :keyword frequency_penalty: A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. @@ -319,6 +334,10 @@ def streaming_complete( :keyword stop: A collection of textual sequences that will end completions generation. Default value is None. :paramtype stop: list[str] + :keyword stream: A value indicating whether chat completions should be streamed for this request. + Default value is False. If streaming is enabled, the response will be a StreamingChatCompletions. + Otherwise the response will be a ChatCompletions. + :paramtype stream: bool :keyword tools: The available tool definitions that the chat completions request can use, including caller-defined functions. Default value is None. :paramtype tools: list[~azure.ai.inference.models.ChatCompletionsToolDefinition] @@ -333,8 +352,8 @@ def streaming_complete( same seed and parameters should return the same result. Determinism is not guaranteed.". Default value is None. :paramtype seed: int - :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ChatCompletions + :return: ChatCompletions for non-streaming, or StreamingChatCompletions for streaming. + :rtype: ~azure.ai.inference.models.ChatCompletions or ~azure.ai.inference.models.StreamingChatCompletions :raises ~azure.core.exceptions.HttpResponseError: """ error_map = { @@ -354,7 +373,6 @@ def streaming_complete( if messages is _Unset: raise TypeError("missing required argument: messages") body = { - "extras": extras, "frequency_penalty": frequency_penalty, "max_tokens": max_tokens, "messages": messages, @@ -362,12 +380,14 @@ def streaming_complete( "response_format": response_format, "seed": seed, "stop": stop, - "stream": True, + "stream": stream, "temperature": temperature, "tool_choice": tool_choice, "tools": tools, "top_p": top_p, } + if hyper_params is not None: + body.update(hyper_params) body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -389,24 +409,493 @@ def streaming_complete( } _request.url = self._client.format_url(_request.url, **path_format_arguments) - kwargs.pop("stream", True) # Remove stream from kwargs (ignore value set by the application) + _stream = stream or False pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=True, **kwargs + _request, stream=_stream, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: - response.read() # Load the body in memory and close the socket + if _stream: + response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) - return _models.StreamingChatCompletions(response) + if _stream: + return _models.StreamingChatCompletions(response) + else: + return _deserialize(_models._models.ChatCompletions, response.json()) # pylint: disable=protected-access + + +class EmbeddingsClient(EmbeddingsClientGenerated): + + @overload + def embedding( + self, + body: JSON, + *, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.EmbeddingsResult: + """Return the embeddings for a given text prompt. + + :param body: Required. + :type body: JSON + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def embedding( + self, + *, + hyper_params: Optional[Dict[str, Any]] = None, + input: List[str], + model_deployment: Optional[str] = None, + content_type: str = "application/json", + dimensions: Optional[int] = None, + encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, + input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, + **kwargs: Any + ) -> _models.EmbeddingsResult: + """Return the embeddings for a given text prompt. + + :keyword hyper_params: Additional, model-specific parameters that are not in the + standard request payload. They will be added as-is to the root of the JSON in the request body. + How the service handles these hypter parameters depends on the value of the + ``unknown-parameters`` request header. Default value is None. + :paramtype hyper_params: dict[str, Any] + :keyword input: Input text to embed, encoded as a string or array of tokens. + To embed multiple inputs in a single request, pass an array + of strings or array of token arrays. Required. + :paramtype input: list[str] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] + :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should + have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Default value is + None. + :paramtype dimensions: int + :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings + should have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. + :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat + :keyword input_type: Optional. The type of the input. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". Default value is None. + :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def embedding( + self, + body: IO[bytes], + *, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.EmbeddingsResult: + """Return the embeddings for a given text prompt. + + :param body: Required. + :type body: IO[bytes] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def embedding( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + hyper_params: Optional[Dict[str, Any]] = None, + input: List[str] = _Unset, + model_deployment: Optional[str] = None, + dimensions: Optional[int] = None, + encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, + input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, + **kwargs: Any + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for a given text prompt. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword hyper_params: Additional, model-specific parameters that are not in the + standard request payload. They will be added as-is to the root of the JSON in the request body. + How the service handles these hypter parameters depends on the value of the + ``unknown-parameters`` request header. Default value is None. + :paramtype hyper_params: dict[str, Any] + :keyword input: Input text to embed, encoded as a string or array of tokens. + To embed multiple inputs in a single request, pass an array + of strings or array of token arrays. Required. + :paramtype input: list[str] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should + have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Default value is + None. + :paramtype dimensions: int + :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings + should have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. + :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat + :keyword input_type: Optional. The type of the input. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". Default value is None. + :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + + if body is _Unset: + if input is _Unset: + raise TypeError("missing required argument: input") + body = { + "dimensions": dimensions, + "encoding_format": encoding_format, + "input": input, + "input_type": input_type, + } + if hyper_params is not None: + body.update(hyper_params) + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_embeddings_embedding_request( + model_deployment=model_deployment, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.EmbeddingsResult, response.json()) + + return deserialized # type: ignore + +class ImageEmbeddingsClient(ImageEmbeddingsClientGenerated): + + @overload + def embedding( + self, + body: JSON, + *, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.EmbeddingsResult: + """Return the embeddings for given images. + + :param body: Required. + :type body: JSON + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def embedding( + self, + *, + hyper_params: Optional[Dict[str, Any]] = None, + input: List[_models.EmbeddingInput], + model_deployment: Optional[str] = None, + content_type: str = "application/json", + dimensions: Optional[int] = None, + encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, + input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, + **kwargs: Any + ) -> _models.EmbeddingsResult: + """Return the embeddings for given images. + + :keyword hyper_params: Additional, model-specific parameters that are not in the + standard request payload. They will be added as-is to the root of the JSON in the request body. + How the service handles these hypter parameters depends on the value of the + ``unknown-parameters`` request header. Default value is None. + :paramtype hyper_params: dict[str, Any] + :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an + array. + The input must not exceed the max input tokens for the model. Required. + :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] + :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should + have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Default value is + None. + :paramtype dimensions: int + :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings + should have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. + :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat + :keyword input_type: Optional. The type of the input. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". Default value is None. + :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def embedding( + self, + body: IO[bytes], + *, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.EmbeddingsResult: + """Return the embeddings for given images. + + :param body: Required. + :type body: IO[bytes] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def embedding( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + hyper_params: Optional[Dict[str, Any]] = None, + input: List[_models.EmbeddingInput] = _Unset, + model_deployment: Optional[str] = None, + dimensions: Optional[int] = None, + encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, + input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, + **kwargs: Any + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for given images. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword hyper_params: Additional, model-specific parameters that are not in the + standard request payload. They will be added as-is to the root of the JSON in the request body. + How the service handles these hypter parameters depends on the value of the + ``unknown-parameters`` request header. Default value is None. + :paramtype hyper_params: dict[str, Any] + :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an + array. + The input must not exceed the max input tokens for the model. Required. + :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should + have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Default value is + None. + :paramtype dimensions: int + :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings + should have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. + :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat + :keyword input_type: Optional. The type of the input. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". Default value is None. + :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + + if body is _Unset: + if input is _Unset: + raise TypeError("missing required argument: input") + body = { + "dimensions": dimensions, + "encoding_format": encoding_format, + "input": input, + "input_type": input_type, + } + if hyper_params is not None: + body.update(hyper_params) + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_image_embeddings_embedding_request( + model_deployment=model_deployment, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.EmbeddingsResult, response.json()) + + return deserialized # type: ignore __all__: List[str] = [ "load_client", "ChatCompletionsClient", + "EmbeddingsClient", + "ImageEmbeddingsClient", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py index bdbf6403d8eb..e9e1b0469645 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py @@ -6,7 +6,7 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from ._patch import ChatCompletionsClient +from ._client import ChatCompletionsClient from ._client import EmbeddingsClient from ._client import ImageEmbeddingsClient diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index 6b108d4921a3..7eace756cea8 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -49,142 +49,23 @@ class ChatCompletionsClientOperationsMixin(ChatCompletionsClientMixinABC): @overload - async def complete( + async def _complete( self, body: JSON, *, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.ChatCompletions: - # pylint: disable=line-too-long - """Gets chat completions for the provided chat messages. - Completions support a wide variety of tasks and generate text that continues from or - "completes" - provided prompt data. - - :param body: Required. - :type body: JSON - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ChatCompletions - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "messages": [ - chat_request_message - ], - "extras": { - "str": "str" # Optional. Extra parameters (in the form of string - key-value pairs) that are not in the standard request payload. They will be - passed to the service as-is in the root of the JSON request payload. How the - service handles these extra parameters depends on the value of the - ``extra-parameters`` HTTP request header. - }, - "frequency_penalty": 0.0, # Optional. A value that influences the - probability of generated tokens appearing based on their cumulative frequency in - generated text. Positive values will make tokens less likely to appear as their - frequency increases and decrease the likelihood of the model repeating the same - statements verbatim. - "max_tokens": 0, # Optional. The maximum number of tokens to generate. - "presence_penalty": 0.0, # Optional. A value that influences the probability - of generated tokens appearing based on their existing presence in generated text. - Positive values will make tokens less likely to appear when they already exist - and increase the model's likelihood to output new topics. - "response_format": "str", # Optional. An object specifying the format that - the model must output. Used to enable JSON mode. Known values are: "text" and - "json_object". - "seed": 0, # Optional. If specified, the system will make a best effort to - sample deterministically such that repeated requests with the same seed and - parameters should return the same result. Determinism is not guaranteed.". - "stop": [ - "str" # Optional. A collection of textual sequences that will end - completions generation. - ], - "temperature": 0.0, # Optional. The sampling temperature to use that - controls the apparent creativity of generated completions. Higher values will - make output more random while lower values will make results more focused and - deterministic. It is not recommended to modify temperature and top_p for the same - completions request as the interaction of these two settings is difficult to - predict. - "tool_choice": "str", # Optional. If specified, the model will configure - which of the provided tools it can use for the chat completions response. Is - either a Union[str, "_models.ChatCompletionsToolSelectionPreset"] type or a - ChatCompletionsNamedToolSelection type. - "tools": [ - chat_completions_tool_definition - ], - "top_p": 0.0 # Optional. An alternative to sampling with temperature called - nucleus sampling. This value causes the model to consider the results of tokens - with the provided probability mass. As an example, a value of 0.15 will cause - only the tokens comprising the top 15% of probability mass to be considered. It - is not recommended to modify temperature and top_p for the same completions - request as the interaction of these two settings is difficult to predict. - } - - # response body for status code(s): 200 - response == { - "choices": [ - { - "finish_reason": "str", # The reason that this chat - completions choice completed its generated. Required. Known values are: - "stop", "length", "content_filter", and "tool_calls". - "index": 0, # The ordered index associated with this chat - completions choice. Required. - "message": { - "content": "str", # The content of the message. - Required. - "role": "str", # The chat role associated with the - message. Required. Known values are: "system", "user", "assistant", - and "tool". - "tool_calls": [ - chat_completions_tool_call - ] - } - } - ], - "created": "2020-02-20 00:00:00", # The first timestamp associated with - generation activity for this completions response, represented as seconds since - the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. - "id": "str", # A unique identifier associated with this chat completions - response. Required. - "model": "str", # The model used for the chat completion. Required. - "object": "str", # The response object type, which is always - ``chat.completion``. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "completion_tokens": 0, # The number of tokens generated across all - completions emissions. Required. - "prompt_tokens": 0, # The number of tokens in the provided prompts - for the completions request. Required. - "total_tokens": 0 # The total number of tokens processed for the - completions request and response. Required. - } - } - """ - + ) -> _models.ChatCompletions: ... @overload - async def complete( + async def _complete( self, *, messages: List[_models.ChatRequestMessage], model_deployment: Optional[str] = None, content_type: str = "application/json", - extras: Optional[Dict[str, str]] = None, frequency_penalty: Optional[float] = None, + stream_parameter: Optional[bool] = None, presence_penalty: Optional[float] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, @@ -197,219 +78,26 @@ async def complete( ] = None, seed: Optional[int] = None, **kwargs: Any - ) -> _models.ChatCompletions: - # pylint: disable=line-too-long - """Gets chat completions for the provided chat messages. - Completions support a wide variety of tasks and generate text that continues from or - "completes" - provided prompt data. - - :keyword messages: The collection of context messages associated with this chat completions - request. - Typical usage begins with a chat message for the System role that provides instructions for - the behavior of the assistant, followed by alternating messages between the User and - Assistant roles. Required. - :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the - standard request payload. - They will be passed to the service as-is in the root of the JSON request payload. - How the service handles these extra parameters depends on the value of the - ``extra-parameters`` - HTTP request header. Default value is None. - :paramtype extras: dict[str, str] - :keyword frequency_penalty: A value that influences the probability of generated tokens - appearing based on their cumulative - frequency in generated text. - Positive values will make tokens less likely to appear as their frequency increases and - decrease the likelihood of the model repeating the same statements verbatim. Default value is - None. - :paramtype frequency_penalty: float - :keyword presence_penalty: A value that influences the probability of generated tokens - appearing based on their existing - presence in generated text. - Positive values will make tokens less likely to appear when they already exist and increase - the - model's likelihood to output new topics. Default value is None. - :paramtype presence_penalty: float - :keyword temperature: The sampling temperature to use that controls the apparent creativity of - generated completions. - Higher values will make output more random while lower values will make results more focused - and deterministic. - It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict. Default value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature called nucleus sampling. This value - causes the - model to consider the results of tokens with the provided probability mass. As an example, a - value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be - considered. - It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict. Default value is None. - :paramtype top_p: float - :keyword max_tokens: The maximum number of tokens to generate. Default value is None. - :paramtype max_tokens: int - :keyword response_format: An object specifying the format that the model must output. Used to - enable JSON mode. Known values are: "text" and "json_object". Default value is None. - :paramtype response_format: str or ~azure.ai.inference.models.ChatCompletionsResponseFormat - :keyword stop: A collection of textual sequences that will end completions generation. Default - value is None. - :paramtype stop: list[str] - :keyword tools: The available tool definitions that the chat completions request can use, - including caller-defined functions. Default value is None. - :paramtype tools: list[~azure.ai.inference.models.ChatCompletionsToolDefinition] - :keyword tool_choice: If specified, the model will configure which of the provided tools it can - use for the chat completions response. Is either a Union[str, - "_models.ChatCompletionsToolSelectionPreset"] type or a ChatCompletionsNamedToolSelection type. - Default value is None. - :paramtype tool_choice: str or ~azure.ai.inference.models.ChatCompletionsToolSelectionPreset or - ~azure.ai.inference.models.ChatCompletionsNamedToolSelection - :keyword seed: If specified, the system will make a best effort to sample deterministically - such that repeated requests with the - same seed and parameters should return the same result. Determinism is not guaranteed.". - Default value is None. - :paramtype seed: int - :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ChatCompletions - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "choices": [ - { - "finish_reason": "str", # The reason that this chat - completions choice completed its generated. Required. Known values are: - "stop", "length", "content_filter", and "tool_calls". - "index": 0, # The ordered index associated with this chat - completions choice. Required. - "message": { - "content": "str", # The content of the message. - Required. - "role": "str", # The chat role associated with the - message. Required. Known values are: "system", "user", "assistant", - and "tool". - "tool_calls": [ - chat_completions_tool_call - ] - } - } - ], - "created": "2020-02-20 00:00:00", # The first timestamp associated with - generation activity for this completions response, represented as seconds since - the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. - "id": "str", # A unique identifier associated with this chat completions - response. Required. - "model": "str", # The model used for the chat completion. Required. - "object": "str", # The response object type, which is always - ``chat.completion``. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "completion_tokens": 0, # The number of tokens generated across all - completions emissions. Required. - "prompt_tokens": 0, # The number of tokens in the provided prompts - for the completions request. Required. - "total_tokens": 0 # The total number of tokens processed for the - completions request and response. Required. - } - } - """ - + ) -> _models.ChatCompletions: ... @overload - async def complete( + async def _complete( self, body: IO[bytes], *, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.ChatCompletions: - # pylint: disable=line-too-long - """Gets chat completions for the provided chat messages. - Completions support a wide variety of tasks and generate text that continues from or - "completes" - provided prompt data. - - :param body: Required. - :type body: IO[bytes] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ChatCompletions - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "choices": [ - { - "finish_reason": "str", # The reason that this chat - completions choice completed its generated. Required. Known values are: - "stop", "length", "content_filter", and "tool_calls". - "index": 0, # The ordered index associated with this chat - completions choice. Required. - "message": { - "content": "str", # The content of the message. - Required. - "role": "str", # The chat role associated with the - message. Required. Known values are: "system", "user", "assistant", - and "tool". - "tool_calls": [ - chat_completions_tool_call - ] - } - } - ], - "created": "2020-02-20 00:00:00", # The first timestamp associated with - generation activity for this completions response, represented as seconds since - the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. - "id": "str", # A unique identifier associated with this chat completions - response. Required. - "model": "str", # The model used for the chat completion. Required. - "object": "str", # The response object type, which is always - ``chat.completion``. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "completion_tokens": 0, # The number of tokens generated across all - completions emissions. Required. - "prompt_tokens": 0, # The number of tokens in the provided prompts - for the completions request. Required. - "total_tokens": 0 # The total number of tokens processed for the - completions request and response. Required. - } - } - """ + ) -> _models.ChatCompletions: ... @distributed_trace_async - async def complete( # pylint: disable=too-many-locals + async def _complete( self, body: Union[JSON, IO[bytes]] = _Unset, *, messages: List[_models.ChatRequestMessage] = _Unset, model_deployment: Optional[str] = None, - extras: Optional[Dict[str, str]] = None, frequency_penalty: Optional[float] = None, + stream_parameter: Optional[bool] = None, presence_penalty: Optional[float] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, @@ -442,13 +130,6 @@ async def complete( # pylint: disable=too-many-locals Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str - :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the - standard request payload. - They will be passed to the service as-is in the root of the JSON request payload. - How the service handles these extra parameters depends on the value of the - ``extra-parameters`` - HTTP request header. Default value is None. - :paramtype extras: dict[str, str] :keyword frequency_penalty: A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. @@ -456,6 +137,9 @@ async def complete( # pylint: disable=too-many-locals decrease the likelihood of the model repeating the same statements verbatim. Default value is None. :paramtype frequency_penalty: float + :keyword stream_parameter: A value indicating whether chat completions should be streamed for + this request. Default value is None. + :paramtype stream_parameter: bool :keyword presence_penalty: A value that influences the probability of generated tokens appearing based on their existing presence in generated text. @@ -512,13 +196,6 @@ async def complete( # pylint: disable=too-many-locals "messages": [ chat_request_message ], - "extras": { - "str": "str" # Optional. Extra parameters (in the form of string - key-value pairs) that are not in the standard request payload. They will be - passed to the service as-is in the root of the JSON request payload. How the - service handles these extra parameters depends on the value of the - ``extra-parameters`` HTTP request header. - }, "frequency_penalty": 0.0, # Optional. A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. Positive values will make tokens less likely to appear as their @@ -539,6 +216,8 @@ async def complete( # pylint: disable=too-many-locals "str" # Optional. A collection of textual sequences that will end completions generation. ], + "stream": bool, # Optional. A value indicating whether chat completions + should be streamed for this request. "temperature": 0.0, # Optional. The sampling temperature to use that controls the apparent creativity of generated completions. Higher values will make output more random while lower values will make results more focused and @@ -620,7 +299,6 @@ async def complete( # pylint: disable=too-many-locals if messages is _Unset: raise TypeError("missing required argument: messages") body = { - "extras": extras, "frequency_penalty": frequency_penalty, "max_tokens": max_tokens, "messages": messages, @@ -628,6 +306,7 @@ async def complete( # pylint: disable=too-many-locals "response_format": response_format, "seed": seed, "stop": stop, + "stream": stream_parameter, "temperature": temperature, "tool_choice": tool_choice, "tools": tools, @@ -750,262 +429,43 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: class EmbeddingsClientOperationsMixin(EmbeddingsClientMixinABC): @overload - async def embedding( + async def _embedding( self, body: JSON, *, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.EmbeddingsResult: - # pylint: disable=line-too-long - """Return the embeddings for a given text prompt. - - :param body: Required. - :type body: JSON - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "input": [ - "str" # Input text to embed, encoded as a string or array of tokens. - To embed multiple inputs in a single request, pass an array of strings or - array of token arrays. Required. - ], - "dimensions": 0, # Optional. Optional. The number of dimensions the - resulting output embeddings should have. Passing null causes the model to use its - default value. Returns a 422 error if the model doesn't support the value or - parameter. - "encoding_format": "str", # Optional. Optional. The number of dimensions the - resulting output embeddings should have. Passing null causes the model to use its - default value. Returns a 422 error if the model doesn't support the value or - parameter. Known values are: "base64", "binary", "float", "int8", "ubinary", and - "uint8". - "extras": { - "str": "str" # Optional. Extra parameters (in the form of string - key-value pairs) that are not in the standard request payload. They will be - passed to the service as-is in the root of the JSON request payload. How the - service handles these extra parameters depends on the value of the - ``extra-parameters`` HTTP request header. - }, - "input_type": "str" # Optional. Optional. The type of the input. Returns a - 422 error if the model doesn't support the value or parameter. Known values are: - "text", "query", and "document". - } - - # response body for status code(s): 200 - response == { - "data": [ - { - "embedding": [ - 0.0 # List of embeddings value for the input prompt. - These represent a measurement of the vector-based relatedness of the - provided input. Required. - ], - "index": 0, # Index of the prompt to which the EmbeddingItem - corresponds. Required. - "object": "str" # The object type of this embeddings item. - Will always be ``embedding``. Required. - } - ], - "id": "str", # Unique identifier for the embeddings result. Required. - "model": "str", # The model ID used to generate this result. Required. - "object": "str", # The object type of the embeddings result. Will always be - ``list``. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "input_tokens": 0, # Number of tokens in the request prompt. - Required. - "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to ``input_tokens``. However, certain AI - models may add extra tokens to the input hence the number can be higher. (for - example when input_type="query"). Required. - "total_tokens": 0 # Total number of tokens transacted in this - request/response. Required. - } - } - """ - - @overload - async def embedding( - self, - *, - input: List[str], - model_deployment: Optional[str] = None, - content_type: str = "application/json", - extras: Optional[Dict[str, str]] = None, - dimensions: Optional[int] = None, - encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, - input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, - **kwargs: Any - ) -> _models.EmbeddingsResult: - # pylint: disable=line-too-long - """Return the embeddings for a given text prompt. - - :keyword input: Input text to embed, encoded as a string or array of tokens. - To embed multiple inputs in a single request, pass an array - of strings or array of token arrays. Required. - :paramtype input: list[str] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the - standard request payload. - They will be passed to the service as-is in the root of the JSON request payload. - How the service handles these extra parameters depends on the value of the - ``extra-parameters`` - HTTP request header. Default value is None. - :paramtype extras: dict[str, str] - :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should - have. - Passing null causes the model to use its default value. - Returns a 422 error if the model doesn't support the value or parameter. Default value is - None. - :paramtype dimensions: int - :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings - should have. - Passing null causes the model to use its default value. - Returns a 422 error if the model doesn't support the value or parameter. Known values are: - "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. - :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat - :keyword input_type: Optional. The type of the input. - Returns a 422 error if the model doesn't support the value or parameter. Known values are: - "text", "query", and "document". Default value is None. - :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType - :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "data": [ - { - "embedding": [ - 0.0 # List of embeddings value for the input prompt. - These represent a measurement of the vector-based relatedness of the - provided input. Required. - ], - "index": 0, # Index of the prompt to which the EmbeddingItem - corresponds. Required. - "object": "str" # The object type of this embeddings item. - Will always be ``embedding``. Required. - } - ], - "id": "str", # Unique identifier for the embeddings result. Required. - "model": "str", # The model ID used to generate this result. Required. - "object": "str", # The object type of the embeddings result. Will always be - ``list``. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "input_tokens": 0, # Number of tokens in the request prompt. - Required. - "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to ``input_tokens``. However, certain AI - models may add extra tokens to the input hence the number can be higher. (for - example when input_type="query"). Required. - "total_tokens": 0 # Total number of tokens transacted in this - request/response. Required. - } - } - """ - + ) -> _models.EmbeddingsResult: ... @overload - async def embedding( + async def _embedding( + self, + *, + input: List[str], + model_deployment: Optional[str] = None, + content_type: str = "application/json", + dimensions: Optional[int] = None, + encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, + input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, + **kwargs: Any + ) -> _models.EmbeddingsResult: ... + @overload + async def _embedding( self, body: IO[bytes], *, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.EmbeddingsResult: - # pylint: disable=line-too-long - """Return the embeddings for a given text prompt. - - :param body: Required. - :type body: IO[bytes] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "data": [ - { - "embedding": [ - 0.0 # List of embeddings value for the input prompt. - These represent a measurement of the vector-based relatedness of the - provided input. Required. - ], - "index": 0, # Index of the prompt to which the EmbeddingItem - corresponds. Required. - "object": "str" # The object type of this embeddings item. - Will always be ``embedding``. Required. - } - ], - "id": "str", # Unique identifier for the embeddings result. Required. - "model": "str", # The model ID used to generate this result. Required. - "object": "str", # The object type of the embeddings result. Will always be - ``list``. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "input_tokens": 0, # Number of tokens in the request prompt. - Required. - "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to ``input_tokens``. However, certain AI - models may add extra tokens to the input hence the number can be higher. (for - example when input_type="query"). Required. - "total_tokens": 0 # Total number of tokens transacted in this - request/response. Required. - } - } - """ + ) -> _models.EmbeddingsResult: ... @distributed_trace_async - async def embedding( + async def _embedding( self, body: Union[JSON, IO[bytes]] = _Unset, *, input: List[str] = _Unset, model_deployment: Optional[str] = None, - extras: Optional[Dict[str, str]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, @@ -1025,13 +485,6 @@ async def embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str - :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the - standard request payload. - They will be passed to the service as-is in the root of the JSON request payload. - How the service handles these extra parameters depends on the value of the - ``extra-parameters`` - HTTP request header. Default value is None. - :paramtype extras: dict[str, str] :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should have. Passing null causes the model to use its default value. @@ -1071,13 +524,6 @@ async def embedding( default value. Returns a 422 error if the model doesn't support the value or parameter. Known values are: "base64", "binary", "float", "int8", "ubinary", and "uint8". - "extras": { - "str": "str" # Optional. Extra parameters (in the form of string - key-value pairs) that are not in the standard request payload. They will be - passed to the service as-is in the root of the JSON request payload. How the - service handles these extra parameters depends on the value of the - ``extra-parameters`` HTTP request header. - }, "input_type": "str" # Optional. Optional. The type of the input. Returns a 422 error if the model doesn't support the value or parameter. Known values are: "text", "query", and "document". @@ -1137,7 +583,6 @@ async def embedding( body = { "dimensions": dimensions, "encoding_format": encoding_format, - "extras": extras, "input": input, "input_type": input_type, } @@ -1258,265 +703,43 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: class ImageEmbeddingsClientOperationsMixin(ImageEmbeddingsClientMixinABC): @overload - async def embedding( + async def _embedding( self, body: JSON, *, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.EmbeddingsResult: - # pylint: disable=line-too-long - """Return the embeddings for given images. - - :param body: Required. - :type body: JSON - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "input": [ - { - "image": "str", # The input image, in PNG format. Required. - "text": "str" # Optional. Optional. The text input to feed - into the model (like DINO, CLIP). Returns a 422 error if the model - doesn't support the value or parameter. - } - ], - "dimensions": 0, # Optional. Optional. The number of dimensions the - resulting output embeddings should have. Passing null causes the model to use its - default value. Returns a 422 error if the model doesn't support the value or - parameter. - "encoding_format": "str", # Optional. Optional. The number of dimensions the - resulting output embeddings should have. Passing null causes the model to use its - default value. Returns a 422 error if the model doesn't support the value or - parameter. Known values are: "base64", "binary", "float", "int8", "ubinary", and - "uint8". - "extras": { - "str": "str" # Optional. Extra parameters (in the form of string - key-value pairs) that are not in the standard request payload. They will be - passed to the service as-is in the root of the JSON request payload. How the - service handles these extra parameters depends on the value of the - ``extra-parameters`` HTTP request header. - }, - "input_type": "str" # Optional. Optional. The type of the input. Returns a - 422 error if the model doesn't support the value or parameter. Known values are: - "text", "query", and "document". - } - - # response body for status code(s): 200 - response == { - "data": [ - { - "embedding": [ - 0.0 # List of embeddings value for the input prompt. - These represent a measurement of the vector-based relatedness of the - provided input. Required. - ], - "index": 0, # Index of the prompt to which the EmbeddingItem - corresponds. Required. - "object": "str" # The object type of this embeddings item. - Will always be ``embedding``. Required. - } - ], - "id": "str", # Unique identifier for the embeddings result. Required. - "model": "str", # The model ID used to generate this result. Required. - "object": "str", # The object type of the embeddings result. Will always be - ``list``. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "input_tokens": 0, # Number of tokens in the request prompt. - Required. - "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to ``input_tokens``. However, certain AI - models may add extra tokens to the input hence the number can be higher. (for - example when input_type="query"). Required. - "total_tokens": 0 # Total number of tokens transacted in this - request/response. Required. - } - } - """ - + ) -> _models.EmbeddingsResult: ... @overload - async def embedding( + async def _embedding( self, *, input: List[_models.EmbeddingInput], model_deployment: Optional[str] = None, content_type: str = "application/json", - extras: Optional[Dict[str, str]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any - ) -> _models.EmbeddingsResult: - # pylint: disable=line-too-long - """Return the embeddings for given images. - - :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an - array. - The input must not exceed the max input tokens for the model. Required. - :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the - standard request payload. - They will be passed to the service as-is in the root of the JSON request payload. - How the service handles these extra parameters depends on the value of the - ``extra-parameters`` - HTTP request header. Default value is None. - :paramtype extras: dict[str, str] - :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should - have. - Passing null causes the model to use its default value. - Returns a 422 error if the model doesn't support the value or parameter. Default value is - None. - :paramtype dimensions: int - :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings - should have. - Passing null causes the model to use its default value. - Returns a 422 error if the model doesn't support the value or parameter. Known values are: - "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. - :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat - :keyword input_type: Optional. The type of the input. - Returns a 422 error if the model doesn't support the value or parameter. Known values are: - "text", "query", and "document". Default value is None. - :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType - :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "data": [ - { - "embedding": [ - 0.0 # List of embeddings value for the input prompt. - These represent a measurement of the vector-based relatedness of the - provided input. Required. - ], - "index": 0, # Index of the prompt to which the EmbeddingItem - corresponds. Required. - "object": "str" # The object type of this embeddings item. - Will always be ``embedding``. Required. - } - ], - "id": "str", # Unique identifier for the embeddings result. Required. - "model": "str", # The model ID used to generate this result. Required. - "object": "str", # The object type of the embeddings result. Will always be - ``list``. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "input_tokens": 0, # Number of tokens in the request prompt. - Required. - "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to ``input_tokens``. However, certain AI - models may add extra tokens to the input hence the number can be higher. (for - example when input_type="query"). Required. - "total_tokens": 0 # Total number of tokens transacted in this - request/response. Required. - } - } - """ - + ) -> _models.EmbeddingsResult: ... @overload - async def embedding( + async def _embedding( self, body: IO[bytes], *, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.EmbeddingsResult: - # pylint: disable=line-too-long - """Return the embeddings for given images. - - :param body: Required. - :type body: IO[bytes] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "data": [ - { - "embedding": [ - 0.0 # List of embeddings value for the input prompt. - These represent a measurement of the vector-based relatedness of the - provided input. Required. - ], - "index": 0, # Index of the prompt to which the EmbeddingItem - corresponds. Required. - "object": "str" # The object type of this embeddings item. - Will always be ``embedding``. Required. - } - ], - "id": "str", # Unique identifier for the embeddings result. Required. - "model": "str", # The model ID used to generate this result. Required. - "object": "str", # The object type of the embeddings result. Will always be - ``list``. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "input_tokens": 0, # Number of tokens in the request prompt. - Required. - "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to ``input_tokens``. However, certain AI - models may add extra tokens to the input hence the number can be higher. (for - example when input_type="query"). Required. - "total_tokens": 0 # Total number of tokens transacted in this - request/response. Required. - } - } - """ + ) -> _models.EmbeddingsResult: ... @distributed_trace_async - async def embedding( + async def _embedding( self, body: Union[JSON, IO[bytes]] = _Unset, *, input: List[_models.EmbeddingInput] = _Unset, model_deployment: Optional[str] = None, - extras: Optional[Dict[str, str]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, @@ -1536,13 +759,6 @@ async def embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str - :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the - standard request payload. - They will be passed to the service as-is in the root of the JSON request payload. - How the service handles these extra parameters depends on the value of the - ``extra-parameters`` - HTTP request header. Default value is None. - :paramtype extras: dict[str, str] :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should have. Passing null causes the model to use its default value. @@ -1585,13 +801,6 @@ async def embedding( default value. Returns a 422 error if the model doesn't support the value or parameter. Known values are: "base64", "binary", "float", "int8", "ubinary", and "uint8". - "extras": { - "str": "str" # Optional. Extra parameters (in the form of string - key-value pairs) that are not in the standard request payload. They will be - passed to the service as-is in the root of the JSON request payload. How the - service handles these extra parameters depends on the value of the - ``extra-parameters`` HTTP request header. - }, "input_type": "str" # Optional. Optional. The type of the input. Returns a 422 error if the model doesn't support the value or parameter. Known values are: "text", "query", and "document". @@ -1651,7 +860,6 @@ async def embedding( body = { "dimensions": dimensions, "encoding_format": encoding_format, - "extras": extras, "input": input, "input_type": input_type, } diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index ce062e661c5b..ec33bc231e12 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -11,7 +11,7 @@ import sys from io import IOBase -from typing import Any, Dict, Union, IO, List, Optional, overload +from typing import Any, Dict, Union, IO, List, Optional, overload, Type from azure.core.pipeline import PipelineResponse from azure.core.credentials import AzureKeyCredential from azure.core.tracing.decorator_async import distributed_trace_async @@ -25,10 +25,15 @@ map_error, ) from .. import models as _models -from .._model_base import SdkJSONEncoder +from .._model_base import SdkJSONEncoder, _deserialize from ._client import ChatCompletionsClient as ChatCompletionsClientGenerated -from ._client import EmbeddingsClient, ImageEmbeddingsClient -from .._operations._operations import build_chat_completions_complete_request +from ._client import EmbeddingsClient as EmbeddingsClientGenerated +from ._client import ImageEmbeddingsClient as ImageEmbeddingsClientGenerated +from .._operations._operations import ( + build_chat_completions_complete_request, + build_embeddings_embedding_request, + build_image_embeddings_embedding_request +) if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -41,7 +46,7 @@ async def load_client( endpoint: str, credential: AzureKeyCredential, **kwargs: Any -) -> Union[ChatCompletionsClientGenerated, EmbeddingsClient, ImageEmbeddingsClient]: +) -> Union[ChatCompletionsClientGenerated, EmbeddingsClientGenerated, ImageEmbeddingsClientGenerated]: client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... model_info = await client.get_model_info() await client.close() @@ -63,19 +68,19 @@ async def load_client( class ChatCompletionsClient(ChatCompletionsClientGenerated): @overload - async def streaming_complete( + async def complete( self, body: JSON, *, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any, - ) -> _models.AsyncStreamingChatCompletions: + ) -> Union[_models.AsyncStreamingChatCompletions, _models.ChatCompletions]: # pylint: disable=line-too-long - """Gets streaming chat completions for the provided chat messages. + """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or - "completes" provided prompt data. When using this method, the response is streamed - back to the client. Iterate over the resulting ~azure.ai.inference.models.AsyncStreamingChatCompletions + "completes" provided prompt data. When using this method with `stream=True`, the response is streamed + back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions object to get content updates as they arrive. :param body: Required. @@ -88,18 +93,19 @@ async def streaming_complete( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.AsyncStreamingChatCompletions + :return: ChatCompletions for non-streaming, or AsyncStreamingChatCompletions for streaming. + :rtype: ~azure.ai.inference.models.ChatCompletions or ~azure.ai.inference.models.AsyncStreamingChatCompletions :raises ~azure.core.exceptions.HttpResponseError: """ @overload - async def streaming_complete( + async def complete( self, *, messages: List[_models.ChatRequestMessage], model_deployment: Optional[str] = None, content_type: str = "application/json", + hyper_params: Optional[Dict[str, Any]] = None, extras: Optional[Dict[str, str]] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None, @@ -108,18 +114,19 @@ async def streaming_complete( max_tokens: Optional[int] = None, response_format: Optional[Union[str, _models.ChatCompletionsResponseFormat]] = None, stop: Optional[List[str]] = None, + stream: Optional[bool] = None, tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, tool_choice: Optional[ Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] ] = None, seed: Optional[int] = None, **kwargs: Any, - ) -> _models.AsyncStreamingChatCompletions: + ) -> Union[_models.AsyncStreamingChatCompletions, _models.ChatCompletions]: # pylint: disable=line-too-long - """Gets streaming chat completions for the provided chat messages. + """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or - "completes" provided prompt data. When using this method, the response is streamed - back to the client. Iterate over the resulting ~azure.ai.inference.models.AsyncStreamingChatCompletions + "completes" provided prompt data. When using this method with `stream=True`, the response is streamed + back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions object to get content updates as they arrive. :keyword messages: The collection of context messages associated with this chat completions @@ -136,6 +143,11 @@ async def streaming_complete( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str + :keyword hyper_params: Additional, model-specific parameters that are not in the + standard request payload. They will be added as-is to the root of the JSON in the request body. + How the service handles these hypter parameters depends on the value of the + ``unknown-parameters`` request header. Default value is None. + :paramtype hyper_params: dict[str, Any] :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the standard request payload. They will be passed to the service as-is in the root of the JSON request payload. @@ -180,6 +192,10 @@ async def streaming_complete( :keyword stop: A collection of textual sequences that will end completions generation. Default value is None. :paramtype stop: list[str] + :keyword stream: A value indicating whether chat completions should be streamed for this request. + Default value is False. If streaming is enabled, the response will be a StreamingChatCompletions. + Otherwise the response will be a ChatCompletions. + :paramtype stream: bool :keyword tools: The available tool definitions that the chat completions request can use, including caller-defined functions. Default value is None. :paramtype tools: list[~azure.ai.inference.models.ChatCompletionsToolDefinition] @@ -194,25 +210,25 @@ async def streaming_complete( same seed and parameters should return the same result. Determinism is not guaranteed.". Default value is None. :paramtype seed: int - :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ChatCompletions + :return: ChatCompletions for non-streaming, or AsyncStreamingChatCompletions for streaming. + :rtype: ~azure.ai.inference.models.ChatCompletions or ~azure.ai.inference.models.AsyncStreamingChatCompletions :raises ~azure.core.exceptions.HttpResponseError: """ @overload - async def streaming_complete( + async def complete( self, body: IO[bytes], *, model_deployment: Optional[str] = None, content_type: str = "application/json", **kwargs: Any, - ) -> _models.AsyncStreamingChatCompletions: + ) -> Union[_models.AsyncStreamingChatCompletions, _models.ChatCompletions]: # pylint: disable=line-too-long - """Gets streaming chat completions for the provided chat messages. + """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or - "completes" provided prompt data. When using this method, the response is streamed - back to the client. Iterate over the resulting ~azure.ai.inference.models.AsyncStreamingChatCompletions + "completes" provided prompt data. When using this method with `stream=True`, the response is streamed + back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions object to get content updates as they arrive. :param body: Required. @@ -225,18 +241,19 @@ async def streaming_complete( :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ChatCompletions + :return: ChatCompletions for non-streaming, or AsyncStreamingChatCompletions for streaming. + :rtype: ~azure.ai.inference.models.ChatCompletions or ~azure.ai.inference.models.AsyncStreamingChatCompletions :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace_async - async def streaming_complete( + async def complete( self, body: Union[JSON, IO[bytes]] = _Unset, *, messages: List[_models.ChatRequestMessage] = _Unset, model_deployment: Optional[str] = None, + hyper_params: Optional[Dict[str, Any]] = None, extras: Optional[Dict[str, str]] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None, @@ -245,18 +262,19 @@ async def streaming_complete( max_tokens: Optional[int] = None, response_format: Optional[Union[str, _models.ChatCompletionsResponseFormat]] = None, stop: Optional[List[str]] = None, + stream: Optional[bool] = None, tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, tool_choice: Optional[ Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] ] = None, seed: Optional[int] = None, **kwargs: Any, - ) -> _models.AsyncStreamingChatCompletions: + ) -> Union[_models.AsyncStreamingChatCompletions, _models.ChatCompletions]: # pylint: disable=line-too-long - """Gets streaming chat completions for the provided chat messages. + """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or - "completes" provided prompt data. When using this method, the response is streamed - back to the client. Iterate over the resulting ~azure.ai.inference.models.AsyncStreamingChatCompletions + "completes" provided prompt data. When using this method with `stream=True`, the response is streamed + back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions object to get content updates as they arrive. :param body: Is either a JSON type or a IO[bytes] type. Required. @@ -272,6 +290,11 @@ async def streaming_complete( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword hyper_params: Additional, model-specific parameters that are not in the + standard request payload. They will be added as-is to the root of the JSON in the request body. + How the service handles these hypter parameters depends on the value of the + ``unknown-parameters`` request header. Default value is None. + :paramtype hyper_params: dict[str, Any] :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the standard request payload. They will be passed to the service as-is in the root of the JSON request payload. @@ -316,6 +339,10 @@ async def streaming_complete( :keyword stop: A collection of textual sequences that will end completions generation. Default value is None. :paramtype stop: list[str] + :keyword stream: A value indicating whether chat completions should be streamed for this request. + Default value is False. If streaming is enabled, the response will be a StreamingChatCompletions. + Otherwise the response will be a ChatCompletions. + :paramtype stream: bool :keyword tools: The available tool definitions that the chat completions request can use, including caller-defined functions. Default value is None. :paramtype tools: list[~azure.ai.inference.models.ChatCompletionsToolDefinition] @@ -330,8 +357,8 @@ async def streaming_complete( same seed and parameters should return the same result. Determinism is not guaranteed.". Default value is None. :paramtype seed: int - :return: ChatCompletions. The ChatCompletions is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.ChatCompletions + :return: ChatCompletions for non-streaming, or AsyncStreamingChatCompletions for streaming. + :rtype: ~azure.ai.inference.models.ChatCompletions or ~azure.ai.inference.models.AsyncStreamingChatCompletions :raises ~azure.core.exceptions.HttpResponseError: """ error_map = { @@ -359,12 +386,14 @@ async def streaming_complete( "response_format": response_format, "seed": seed, "stop": stop, - "stream": True, + "stream": stream, "temperature": temperature, "tool_choice": tool_choice, "tools": tools, "top_p": top_p, } + if hyper_params is not None: + body.update(hyper_params) body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -386,24 +415,494 @@ async def streaming_complete( } _request.url = self._client.format_url(_request.url, **path_format_arguments) - kwargs.pop("stream", True) # Remove stream from kwargs (ignore value set by the application) + _stream = stream or False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + return _models.AsyncStreamingChatCompletions(response) + else: + return _deserialize(_models.ChatCompletions, response.json()) # pylint: disable=protected-access + + +class EmbeddingsClient(EmbeddingsClientGenerated): + + @overload + async def embedding( + self, + body: JSON, + *, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.EmbeddingsResult: + """Return the embeddings for a given text prompt. + + :param body: Required. + :type body: JSON + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def embedding( + self, + *, + hyper_params: Optional[Dict[str, Any]] = None, + input: List[str], + model_deployment: Optional[str] = None, + content_type: str = "application/json", + dimensions: Optional[int] = None, + encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, + input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, + **kwargs: Any + ) -> _models.EmbeddingsResult: + """Return the embeddings for a given text prompt. + + :keyword hyper_params: Additional, model-specific parameters that are not in the + standard request payload. They will be added as-is to the root of the JSON in the request body. + How the service handles these hypter parameters depends on the value of the + ``unknown-parameters`` request header. Default value is None. + :paramtype hyper_params: dict[str, Any] + :keyword input: Input text to embed, encoded as a string or array of tokens. + To embed multiple inputs in a single request, pass an array + of strings or array of token arrays. Required. + :paramtype input: list[str] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] + :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should + have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Default value is + None. + :paramtype dimensions: int + :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings + should have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. + :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat + :keyword input_type: Optional. The type of the input. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". Default value is None. + :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def embedding( + self, + body: IO[bytes], + *, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.EmbeddingsResult: + """Return the embeddings for a given text prompt. + + :param body: Required. + :type body: IO[bytes] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def embedding( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + hyper_params: Optional[Dict[str, Any]] = None, + input: List[str] = _Unset, + model_deployment: Optional[str] = None, + dimensions: Optional[int] = None, + encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, + input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, + **kwargs: Any + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for a given text prompt. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword hyper_params: Additional, model-specific parameters that are not in the + standard request payload. They will be added as-is to the root of the JSON in the request body. + How the service handles these hypter parameters depends on the value of the + ``unknown-parameters`` request header. Default value is None. + :paramtype hyper_params: dict[str, Any] + :keyword input: Input text to embed, encoded as a string or array of tokens. + To embed multiple inputs in a single request, pass an array + of strings or array of token arrays. Required. + :paramtype input: list[str] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should + have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Default value is + None. + :paramtype dimensions: int + :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings + should have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. + :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat + :keyword input_type: Optional. The type of the input. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". Default value is None. + :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + + if body is _Unset: + if input is _Unset: + raise TypeError("missing required argument: input") + body = { + "dimensions": dimensions, + "encoding_format": encoding_format, + "input": input, + "input_type": input_type, + } + if hyper_params is not None: + body.update(hyper_params) + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_embeddings_embedding_request( + model_deployment=model_deployment, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + await response.read() # Load the body in memory and close the socket + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.EmbeddingsResult, response.json()) + + return deserialized # type: ignore + + +class ImageEmbeddingsClient(ImageEmbeddingsClientGenerated): + + @overload + async def embedding( + self, + body: JSON, + *, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.EmbeddingsResult: + """Return the embeddings for given images. + + :param body: Required. + :type body: JSON + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def embedding( + self, + *, + hyper_params: Optional[Dict[str, Any]] = None, + input: List[_models.EmbeddingInput], + model_deployment: Optional[str] = None, + content_type: str = "application/json", + dimensions: Optional[int] = None, + encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, + input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, + **kwargs: Any + ) -> _models.EmbeddingsResult: + """Return the embeddings for given images. + + :keyword hyper_params: Additional, model-specific parameters that are not in the + standard request payload. They will be added as-is to the root of the JSON in the request body. + How the service handles these hypter parameters depends on the value of the + ``unknown-parameters`` request header. Default value is None. + :paramtype hyper_params: dict[str, Any] + :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an + array. + The input must not exceed the max input tokens for the model. Required. + :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the + standard request payload. + They will be passed to the service as-is in the root of the JSON request payload. + How the service handles these extra parameters depends on the value of the + ``extra-parameters`` + HTTP request header. Default value is None. + :paramtype extras: dict[str, str] + :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should + have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Default value is + None. + :paramtype dimensions: int + :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings + should have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. + :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat + :keyword input_type: Optional. The type of the input. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". Default value is None. + :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def embedding( + self, + body: IO[bytes], + *, + model_deployment: Optional[str] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.EmbeddingsResult: + """Return the embeddings for given images. + + :param body: Required. + :type body: IO[bytes] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def embedding( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + hyper_params: Optional[Dict[str, Any]] = None, + input: List[_models.EmbeddingInput] = _Unset, + model_deployment: Optional[str] = None, + dimensions: Optional[int] = None, + encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, + input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, + **kwargs: Any + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for given images. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword hyper_params: Additional, model-specific parameters that are not in the + standard request payload. They will be added as-is to the root of the JSON in the request body. + How the service handles these hypter parameters depends on the value of the + ``unknown-parameters`` request header. Default value is None. + :paramtype hyper_params: dict[str, Any] + :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an + array. + The input must not exceed the max input tokens for the model. Required. + :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should + have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Default value is + None. + :paramtype dimensions: int + :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings + should have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. + :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat + :keyword input_type: Optional. The type of the input. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". Default value is None. + :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + + if body is _Unset: + if input is _Unset: + raise TypeError("missing required argument: input") + body = { + "dimensions": dimensions, + "encoding_format": encoding_format, + "input": input, + "input_type": input_type, + } + if hyper_params is not None: + body.update(hyper_params) + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_image_embeddings_embedding_request( + model_deployment=model_deployment, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access - _request, stream=True, **kwargs + _request, stream=_stream, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: - await response.read() # Load the body in memory and close the socket + if _stream: + await response.read() # Load the body in memory and close the socket map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) - return _models.AsyncStreamingChatCompletions(response) + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.EmbeddingsResult, response.json()) + + return deserialized # type: ignore __all__: List[str] = [ "load_client", "ChatCompletionsClient", + "EmbeddingsClient", + "ImageEmbeddingsClient", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py index 50530f45bf51..d53b495e35c9 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py @@ -31,10 +31,10 @@ from ._models import UserMessage from ._enums import CapacityType +from ._enums import CompletionsFinishReason from ._enums import ChatCompletionsResponseFormat from ._enums import ChatCompletionsToolSelectionPreset from ._enums import ChatRole -from ._enums import CompletionsFinishReason from ._enums import EmbeddingEncodingFormat from ._enums import EmbeddingInputType from ._enums import ModelType @@ -47,6 +47,7 @@ "ChatChoice", "ChatChoiceUpdate", "ChatCompletions", + "CompletionsFinishReason", "ChatCompletionsFunctionToolCall", "ChatCompletionsFunctionToolDefinition", "ChatCompletionsNamedToolSelection", @@ -70,7 +71,6 @@ "ChatCompletionsResponseFormat", "ChatCompletionsToolSelectionPreset", "ChatRole", - "CompletionsFinishReason", "EmbeddingEncodingFormat", "EmbeddingInputType", "ModelType", diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py index 42c9d74fb4c1..b5feeda05ed6 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py @@ -127,7 +127,7 @@ class ChatChoice(_model_base.Model): index: int = rest_field() """The ordered index associated with this chat completions choice. Required.""" - finish_reason: Union[str, "_models.CompletionsFinishReason"] = rest_field() + finish_reason: Union[str, "_models._enums.CompletionsFinishReason"] = rest_field() """The reason that this chat completions choice completed its generated. Required. Known values are: \"stop\", \"length\", \"content_filter\", and \"tool_calls\".""" message: "_models.ChatResponseMessage" = rest_field() @@ -138,7 +138,7 @@ def __init__( self, *, index: int, - finish_reason: Union[str, "_models.CompletionsFinishReason"], + finish_reason: Union[str, "_models._enums.CompletionsFinishReason"], message: "_models.ChatResponseMessage", ): ... @@ -172,7 +172,7 @@ class ChatChoiceUpdate(_model_base.Model): index: int = rest_field() """The ordered index associated with this chat completions choice. Required.""" - finish_reason: Union[str, "_models.CompletionsFinishReason"] = rest_field() + finish_reason: Union[str, "_models._enums.CompletionsFinishReason"] = rest_field() """The reason that this chat completions choice completed its generated. Required. Known values are: \"stop\", \"length\", \"content_filter\", and \"tool_calls\".""" delta: "_models.ChatResponseMessage" = rest_field() @@ -183,7 +183,7 @@ def __init__( self, *, index: int, - finish_reason: Union[str, "_models.CompletionsFinishReason"], + finish_reason: Union[str, "_models._enums.CompletionsFinishReason"], delta: "_models.ChatResponseMessage", ): ... diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py index 91a54f86fb69..2fe316642ea5 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py @@ -39,7 +39,8 @@ async def sample_chat_completions_streaming_async(): client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # Do a single streaming chat completion operation. Start the operation and get a Future object. - response = await client.streaming_complete( + response = await client.complete( + stream=True, messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="Give me 5 good reasons why I should exercise every day."), diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py index ad6971c864df..48b1786b158f 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py @@ -37,7 +37,8 @@ def sample_chat_completions_streaming(): client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - response = client.streaming_complete( + response = client.complete( + stream=True, messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="Give me 5 good reasons why I should exercise every day."), @@ -47,7 +48,7 @@ def sample_chat_completions_streaming(): for update in response: print(update.choices[0].delta.content or "", end="") - response.close() + client.close() # [END chat_completions_streaming] diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_hyper_params.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_hyper_params.py new file mode 100644 index 000000000000..0eded965cb7e --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_hyper_params.py @@ -0,0 +1,74 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to get a chat completions response from + the service using a synchronous client, while supplying additional + model-specific parameters as part of the request. + See setting of an optional `unknown-parameters` request header via the + `headers_policy` in the client constructor. + See setting of `hyper_params` in the `complete` method. + +USAGE: + python sample_chat_completions_with_hyper_params.py + + Set these two environment variables before running the sample: + 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form + https://..inference.ai.azure.com + where `your-deployment-name` is your unique AI Model deployment name, and + `your-azure-region` is the Azure region where your model is deployed. + 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. +""" + + +def sample_chat_completions_with_hyper_params(): + import os + import sys + import logging + + logger = logging.getLogger("azure") + logger.setLevel(logging.DEBUG) + handler = logging.StreamHandler(stream=sys.stdout) + logger.addHandler(handler) + + try: + endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] + key = os.environ["CHAT_COMPLETIONS_KEY"] + except KeyError: + print("Missing environment variable 'CHAT_COMPLETIONS_ENDPOINT' or 'CHAT_COMPLETIONS_KEY'") + print("Set them before running this sample.") + exit() + + from azure.ai.inference import ChatCompletionsClient + from azure.ai.inference.models import SystemMessage, UserMessage + from azure.core.credentials import AzureKeyCredential + from azure.core.pipeline.policies import HeadersPolicy + + client = ChatCompletionsClient( + endpoint=endpoint, + credential=AzureKeyCredential(key), + headers={"unknown-parameters": "allow"}, # Optional. Supported values: "allow", "ignore", "error" (the default). + logging_enable=True + ) + + response = client.complete( + messages=[ + SystemMessage(content="You are a helpful assistant."), + UserMessage(content="How many feet are in a mile?"), + ], + hyper_params={ # Optional. Additional parameters to pass to the model. + "key1": 1, + "key2": True, + "key3": "Some value", + "key4": [1, 2, 3], + "key5": {"key6": 2, "key7": False, "key8": "Some other value", "key9": [4, 5, 6, 7]}, + }, + ) + + print(response.choices[0].message.content) + + +if __name__ == "__main__": + sample_chat_completions_with_hyper_params() diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py index ec8f364265da..03dcc6abb750 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py @@ -117,9 +117,7 @@ def get_flight_info(origin_city: str, destination_city: str): print(f"Function response = {function_response}") # Provide the tool response to the model, by appending it to the chat history - messages.append( - ToolMessage(tool_call_id=tool_call.id, content=function_response) - ) + messages.append(ToolMessage(tool_call_id=tool_call.id, content=function_response)) # With the additional tools information on hand, get another response from the model response = client.complete(messages=messages, tools=[flight_info]) diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py index aa65da8053ee..bdbe1698528c 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -52,7 +52,8 @@ async def test_async_chat_completions_error_free(self, **kwargs): @recorded_by_proxy_async async def test_async_chat_completions_streaming_error_free(self, **kwargs): client = self._create_async_chat_client(Sync=False, **kwargs) - result = await client.streaming_complete( + result = await client.complete( + stream=True, messages=[ sdk.models.SystemMessage(content="You are a helpful assistant."), sdk.models.UserMessage(content="Give me 3 good reasons why I should exercise every day."), diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py index d380fa32d918..27af63f9f475 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -41,7 +41,8 @@ def test_chat_completions_error_free(self, **kwargs): @recorded_by_proxy def test_chat_completions_streaming_error_free(self, **kwargs): client = self._create_chat_client(**kwargs) - result = client.streaming_complete( + result = client.complete( + stream=True, messages=[ sdk.models.SystemMessage(content="You are a helpful assistant."), sdk.models.UserMessage(content="Give me 3 good reasons why I should exercise every day."), diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml index fe03720ac782..ce49f4a1c287 100644 --- a/sdk/ai/azure-ai-inference/tsp-location.yaml +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/ModelClient -commit: b9c9be427d3a4587d5fea7815747287b6468d03c +commit: 39fa4d1583207b3f358707f4d0c98b9e2fd9a54c repo: Azure/azure-rest-api-specs additionalDirectories: From 0ed9d7bce9d937f63ed179ac286746b0163e06d9 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 21 May 2024 12:47:20 -0700 Subject: [PATCH 074/112] Update root README.md --- sdk/ai/azure-ai-inference/README.md | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 6367b1aab139..e84e6951d077 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -1,8 +1,6 @@ # Azure model inference client library for Python - - -The client Library allows you to do inference using AI models you deployed to Azure. It supports both serverless endpoints (aka "model as a service" (MaaS) or "pay as you go") and selfhosted endpoints (aka "model as a platform" (MaaP) or "real-time endpoints"). The client library makes services calls using REST AP version `2024-05-01-preview` [specificed here](https://www.microsoft.com/). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). +The client Library allows you to do inference using AI models you deployed to Azure. It supports both Serverless Endpoints (aka "model as a service" (MaaS) or "pay as you go") and Selfhosted Endpoints (aka "model as a platform" (MaaP) or "real-time endpoints"). The client library makes services calls using REST AP version `2024-05-01-preview`, as documented in [Azure AI Model Inference API](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-api). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). Use the model inference client library to: @@ -10,13 +8,14 @@ Use the model inference client library to: * Get information about the model * Do chat completions * Get text embeddings +* Get image embeddings Note that for inference using OpenAI models hosted on Azure you should be using the [OpenAI Python client library](https://github.com/openai/openai-python) instead of this client. -[Product documentation](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview) +[Product documentation](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-api) | [Samples](https://aka.ms/azsdk/model-client/samples/python) -| [API reference documentation](https://aka.ms/azsdk/model-client/ref-docs/python) -| [Package (Pypi)](https://aka.ms/azsdk/model-client/package/pypi) +| [API reference documentation](https://aka.ms/azsdk/azure-ai-inference/ref-docs/python) +| [Package (Pypi)](https://aka.ms/azsdk/azure-ai-inference/package/pypi) | [SDK source code](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/azure/ai/inference) ## Getting started @@ -82,13 +81,12 @@ client = ChatCompletionsClient( ## Key concepts -### AI Model information +### Loading the client and getting AI model information TODO: Add overview and link to explain AI model info The operation to get AI model information targets the URL route `/info` on the provided endpoint. - ### Chat Completions TODO: Add overview and link to explain chat completions. @@ -107,17 +105,13 @@ TODO: Add overview and link to explain image embeddings. Embeddings operations target the URL route `images/embeddings` on the provided endpoint. -### Loading a client - -TODO - ## Examples In the following sections you will find simple examples of: * [Chat completions](#chat-completions-example) * [Streaming chat completions](#streaming-chat-completions-example) -* [Text Embeddings](#embeddings-example) +* [Text Embeddings](#text-embeddings-example) * [Image Embeddings](#image-embeddings-example) * [Get model information](#get-model-information-example) * [Loading a client using `load_client` function](#loading-a-client-using-load_client-function) @@ -169,6 +163,7 @@ from azure.core.credentials import AzureKeyCredential client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) response = client.complete( + stream=True, messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="Give me 5 good reasons why I should exercise every day."), @@ -178,7 +173,7 @@ response = client.complete( for update in response: print(update.choices[0].delta.content or "", end="") -response.close() +client.close() ``` From 7c5424f416a56fde0f6d5c24e61b690afad6a342 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 21 May 2024 18:31:54 -0700 Subject: [PATCH 075/112] Save work - unknown_params header, hyper_params input, cached model_info --- .../ai/inference/_operations/_operations.py | 77 +++++++- .../azure/ai/inference/_patch.py | 159 +++++++++++++++-- .../inference/aio/_operations/_operations.py | 31 +++- .../azure/ai/inference/aio/_patch.py | 164 ++++++++++++++++-- .../azure/ai/inference/models/__init__.py | 20 ++- .../azure/ai/inference/models/_enums.py | 12 ++ .../azure/ai/inference/models/_models.py | 4 +- ...sample_chat_completions_streaming_async.py | 2 +- .../async_samples/sample_load_client_async.py | 36 ++-- .../samples/sample_chat_completions.py | 13 +- .../sample_chat_completions_streaming.py | 2 +- ...mple_chat_completions_with_hyper_params.py | 9 +- .../samples/sample_load_client.py | 5 +- .../tests/model_inference_test_base.py | 25 ++- .../test_model_inference_async_client.py | 61 ++++++- .../tests/test_model_inference_client.py | 65 ++++++- sdk/ai/azure-ai-inference/tsp-location.yaml | 2 +- 17 files changed, 585 insertions(+), 102 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index b1ebc04fe3a7..eeea19518f6e 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -35,12 +35,19 @@ from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object _Unset: Any = object() +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False -def build_chat_completions_complete_request(*, model_deployment: Optional[str] = None, **kwargs: Any) -> HttpRequest: +def build_chat_completions_complete_request( + *, + model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + **kwargs: Any +) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -57,6 +64,8 @@ def build_chat_completions_complete_request(*, model_deployment: Optional[str] = # Construct headers if model_deployment is not None: _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployment", model_deployment, "str") + if unknown_params is not None: + _headers["unknown-parameters"] = _SERIALIZER.header("unknown_params", unknown_params, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -83,7 +92,12 @@ def build_chat_completions_get_model_info_request(**kwargs: Any) -> HttpRequest: return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_embeddings_embedding_request(*, model_deployment: Optional[str] = None, **kwargs: Any) -> HttpRequest: +def build_embeddings_embedding_request( + *, + model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + **kwargs: Any +) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -100,6 +114,8 @@ def build_embeddings_embedding_request(*, model_deployment: Optional[str] = None # Construct headers if model_deployment is not None: _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployment", model_deployment, "str") + if unknown_params is not None: + _headers["unknown-parameters"] = _SERIALIZER.header("unknown_params", unknown_params, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -126,7 +142,12 @@ def build_embeddings_get_model_info_request(**kwargs: Any) -> HttpRequest: return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_image_embeddings_embedding_request(*, model_deployment: Optional[str] = None, **kwargs: Any) -> HttpRequest: +def build_image_embeddings_embedding_request( + *, + model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + **kwargs: Any +) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -143,6 +164,8 @@ def build_image_embeddings_embedding_request(*, model_deployment: Optional[str] # Construct headers if model_deployment is not None: _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployment", model_deployment, "str") + if unknown_params is not None: + _headers["unknown-parameters"] = _SERIALIZER.header("unknown_params", unknown_params, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -177,6 +200,7 @@ def _complete( body: JSON, *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: ... @@ -186,6 +210,7 @@ def _complete( *, messages: List[_models.ChatRequestMessage], model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", frequency_penalty: Optional[float] = None, stream_parameter: Optional[bool] = None, @@ -208,6 +233,7 @@ def _complete( body: IO[bytes], *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: ... @@ -219,6 +245,7 @@ def _complete( *, messages: List[_models.ChatRequestMessage] = _Unset, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, frequency_penalty: Optional[float] = None, stream_parameter: Optional[bool] = None, presence_penalty: Optional[float] = None, @@ -253,6 +280,9 @@ def _complete( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword frequency_penalty: A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. @@ -416,6 +446,7 @@ def _complete( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ChatCompletions] = kwargs.pop("cls", None) if body is _Unset: if messages is _Unset: @@ -444,6 +475,7 @@ def _complete( _request = build_chat_completions_complete_request( model_deployment=model_deployment, + unknown_params=unknown_params, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -473,10 +505,13 @@ def _complete( else: deserialized = _deserialize(_models.ChatCompletions, response.json()) + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + return deserialized # type: ignore @distributed_trace - def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: + def _get_model_info(self, **kwargs: Any) -> _models.ModelInfo: # pylint: disable=line-too-long """Returns information about the AI model. @@ -509,6 +544,8 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} + cls: ClsType[_models.ModelInfo] = kwargs.pop("cls", None) + _request = build_chat_completions_get_model_info_request( api_version=self._config.api_version, headers=_headers, @@ -537,6 +574,9 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: else: deserialized = _deserialize(_models.ModelInfo, response.json()) + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + return deserialized # type: ignore @@ -548,6 +588,7 @@ def _embedding( body: JSON, *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: ... @@ -557,6 +598,7 @@ def _embedding( *, input: List[str], model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, @@ -569,6 +611,7 @@ def _embedding( body: IO[bytes], *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: ... @@ -580,6 +623,7 @@ def _embedding( *, input: List[str] = _Unset, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, @@ -599,6 +643,9 @@ def _embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should have. Passing null causes the model to use its default value. @@ -689,6 +736,7 @@ def _embedding( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.EmbeddingsResult] = kwargs.pop("cls", None) if body is _Unset: if input is _Unset: @@ -709,6 +757,7 @@ def _embedding( _request = build_embeddings_embedding_request( model_deployment=model_deployment, + unknown_params=unknown_params, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -738,10 +787,13 @@ def _embedding( else: deserialized = _deserialize(_models.EmbeddingsResult, response.json()) + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + return deserialized # type: ignore @distributed_trace - def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: + def _get_model_info(self, **kwargs: Any) -> _models.ModelInfo: # pylint: disable=line-too-long """Returns information about the AI model. @@ -774,6 +826,8 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} + cls: ClsType[_models.ModelInfo] = kwargs.pop("cls", None) + _request = build_embeddings_get_model_info_request( api_version=self._config.api_version, headers=_headers, @@ -802,6 +856,9 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: else: deserialized = _deserialize(_models.ModelInfo, response.json()) + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + return deserialized # type: ignore @@ -813,6 +870,7 @@ def _embedding( body: JSON, *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: ... @@ -822,6 +880,7 @@ def _embedding( *, input: List[_models.EmbeddingInput], model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, @@ -834,6 +893,7 @@ def _embedding( body: IO[bytes], *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: ... @@ -845,6 +905,7 @@ def _embedding( *, input: List[_models.EmbeddingInput] = _Unset, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, @@ -864,6 +925,9 @@ def _embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should have. Passing null causes the model to use its default value. @@ -978,6 +1042,7 @@ def _embedding( _request = build_image_embeddings_embedding_request( model_deployment=model_deployment, + unknown_params=unknown_params, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -1013,7 +1078,7 @@ def _embedding( return deserialized # type: ignore @distributed_trace - def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: + def _get_model_info(self, **kwargs: Any) -> _models.ModelInfo: # pylint: disable=line-too-long """Returns information about the AI model. diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index 589eddf0b70f..da35103bf943 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -36,6 +36,9 @@ build_chat_completions_complete_request, build_embeddings_embedding_request, build_image_embeddings_embedding_request, + build_chat_completions_get_model_info_request, + build_embeddings_get_model_info_request, + build_image_embeddings_get_model_info_request ) from ._client import ChatCompletionsClient as ChatCompletionsClientGenerated from ._client import EmbeddingsClient as EmbeddingsClientGenerated @@ -57,22 +60,29 @@ def load_client( endpoint: str, credential: AzureKeyCredential, **kwargs: Any ) -> Union[ChatCompletionsClientGenerated, EmbeddingsClientGenerated, ImageEmbeddingsClientGenerated]: + client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... model_info = client.get_model_info() client.close() + _LOGGER.info("model_info=%s", model_info) if model_info.model_type in (None, ""): raise ValueError( "The AI model information is missing a value for `model type`. Cannot create an appropriate client." ) - # TODO: Remove "completions" once Mistral Large fixes their model type + + # TODO: Remove "completions" and "embedding" once Mistral Large and Cohere fixes their model type if model_info.model_type in (_models.ModelType.CHAT, "completion"): - return ChatCompletionsClient(endpoint, credential, **kwargs) - if model_info.model_type == _models.ModelType.EMBEDDINGS: - return EmbeddingsClient(endpoint, credential, **kwargs) - if model_info.model_type == _models.ModelType.IMAGE_EMBEDDINGS: - return ImageEmbeddingsClient(endpoint, credential, **kwargs) - raise ValueError(f"No client available to support AI model type `{model_info.model_type}`") + client = ChatCompletionsClient(endpoint, credential, **kwargs) + elif model_info.model_type in (_models.ModelType.EMBEDDINGS, "embedding"): + client = EmbeddingsClient(endpoint, credential, **kwargs) + elif model_info.model_type == _models.ModelType.IMAGE_EMBEDDINGS: + client = ImageEmbeddingsClient(endpoint, credential, **kwargs) + else: + raise ValueError(f"No client available to support AI model type `{model_info.model_type}`") + + client._model_info = model_info # pylint: disable=protected-access + return client class ChatCompletionsClient(ChatCompletionsClientGenerated): @@ -83,6 +93,7 @@ def complete( body: JSON, *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any, ) -> Union[_models.StreamingChatCompletions, _models.ChatCompletions]: @@ -101,6 +112,10 @@ def complete( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -115,6 +130,7 @@ def complete( *, messages: List[_models.ChatRequestMessage], model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", hyper_params: Optional[Dict[str, Any]] = None, frequency_penalty: Optional[float] = None, @@ -150,6 +166,10 @@ def complete( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -224,6 +244,7 @@ def complete( body: IO[bytes], *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any, ) -> Union[_models.StreamingChatCompletions, _models.ChatCompletions]: @@ -241,6 +262,10 @@ def complete( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -256,6 +281,7 @@ def complete( *, messages: List[_models.ChatRequestMessage] = _Unset, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, hyper_params: Optional[Dict[str, Any]] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None, @@ -292,6 +318,10 @@ def complete( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword hyper_params: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. How the service handles these hypter parameters depends on the value of the @@ -398,6 +428,7 @@ def complete( _request = build_chat_completions_complete_request( model_deployment=model_deployment, + unknown_params=unknown_params, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -427,6 +458,25 @@ def complete( else: return _deserialize(_models._models.ChatCompletions, response.json()) # pylint: disable=protected-access + # Cache here the results of get_model_info call + _model_info: Optional[_models.ModelInfo] = None + + @distributed_trace + def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: + # pylint: disable=line-too-long + """Returns information about the AI model. + + :return: ModelInfo. The ModelInfo is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ModelInfo + :raises ~azure.core.exceptions.HttpResponseError: + """ + if self._model_info == None: + self._model_info = self._get_model_info(**kwargs) + return self._model_info + + def __str__(self) -> str: + return super().__str__() + f"\n_model_info={self._model_info}" + class EmbeddingsClient(EmbeddingsClientGenerated): @@ -436,8 +486,9 @@ def embedding( body: JSON, *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", - **kwargs: Any + **kwargs: Any, ) -> _models.EmbeddingsResult: """Return the embeddings for a given text prompt. @@ -448,6 +499,10 @@ def embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -463,11 +518,12 @@ def embedding( hyper_params: Optional[Dict[str, Any]] = None, input: List[str], model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, - **kwargs: Any + **kwargs: Any, ) -> _models.EmbeddingsResult: """Return the embeddings for a given text prompt. @@ -485,6 +541,10 @@ def embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -522,8 +582,9 @@ def embedding( body: IO[bytes], *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", - **kwargs: Any + **kwargs: Any, ) -> _models.EmbeddingsResult: """Return the embeddings for a given text prompt. @@ -534,6 +595,10 @@ def embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -550,10 +615,11 @@ def embedding( hyper_params: Optional[Dict[str, Any]] = None, input: List[str] = _Unset, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, - **kwargs: Any + **kwargs: Any, ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long """Return the embeddings for a given text prompt. @@ -574,6 +640,10 @@ def embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should have. Passing null causes the model to use its default value. @@ -628,6 +698,7 @@ def embedding( _request = build_embeddings_embedding_request( model_deployment=model_deployment, + unknown_params=unknown_params, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -659,6 +730,25 @@ def embedding( return deserialized # type: ignore + # Cache here the results of get_model_info call + _model_info: Optional[_models.ModelInfo] = None + + @distributed_trace + def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: + # pylint: disable=line-too-long + """Returns information about the AI model. + + :return: ModelInfo. The ModelInfo is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ModelInfo + :raises ~azure.core.exceptions.HttpResponseError: + """ + if self._model_info == None: + self._model_info = self._get_model_info(**kwargs) + return self._model_info + + def __str__(self) -> str: + return super().__str__() + f"\n_model_info={self._model_info}" + class ImageEmbeddingsClient(ImageEmbeddingsClientGenerated): @overload @@ -667,8 +757,9 @@ def embedding( body: JSON, *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", - **kwargs: Any + **kwargs: Any, ) -> _models.EmbeddingsResult: """Return the embeddings for given images. @@ -679,6 +770,10 @@ def embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -694,11 +789,12 @@ def embedding( hyper_params: Optional[Dict[str, Any]] = None, input: List[_models.EmbeddingInput], model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, - **kwargs: Any + **kwargs: Any, ) -> _models.EmbeddingsResult: """Return the embeddings for given images. @@ -716,6 +812,10 @@ def embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -753,8 +853,9 @@ def embedding( body: IO[bytes], *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", - **kwargs: Any + **kwargs: Any, ) -> _models.EmbeddingsResult: """Return the embeddings for given images. @@ -765,6 +866,10 @@ def embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -781,10 +886,11 @@ def embedding( hyper_params: Optional[Dict[str, Any]] = None, input: List[_models.EmbeddingInput] = _Unset, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, - **kwargs: Any + **kwargs: Any, ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long """Return the embeddings for given images. @@ -805,6 +911,10 @@ def embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should have. Passing null causes the model to use its default value. @@ -859,6 +969,7 @@ def embedding( _request = build_image_embeddings_embedding_request( model_deployment=model_deployment, + unknown_params=unknown_params, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -890,6 +1001,24 @@ def embedding( return deserialized # type: ignore + _model_info: Optional[_models.ModelInfo] = None + + @distributed_trace + def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: + # pylint: disable=line-too-long + """Returns information about the AI model. + + :return: ModelInfo. The ModelInfo is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ModelInfo + :raises ~azure.core.exceptions.HttpResponseError: + """ + if self._model_info == None: + self._model_info = self._get_model_info(**kwargs) + return self._model_info + + def __str__(self) -> str: + return super().__str__() + f"\n_model_info={self._model_info}" + __all__: List[str] = [ "load_client", diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index 7eace756cea8..361431d4bc45 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -54,6 +54,7 @@ async def _complete( body: JSON, *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: ... @@ -63,6 +64,7 @@ async def _complete( *, messages: List[_models.ChatRequestMessage], model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", frequency_penalty: Optional[float] = None, stream_parameter: Optional[bool] = None, @@ -85,6 +87,7 @@ async def _complete( body: IO[bytes], *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: ... @@ -96,6 +99,7 @@ async def _complete( *, messages: List[_models.ChatRequestMessage] = _Unset, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, frequency_penalty: Optional[float] = None, stream_parameter: Optional[bool] = None, presence_penalty: Optional[float] = None, @@ -130,6 +134,10 @@ async def _complete( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword frequency_penalty: A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. @@ -322,6 +330,7 @@ async def _complete( _request = build_chat_completions_complete_request( model_deployment=model_deployment, + unknown_params=unknown_params, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -357,7 +366,7 @@ async def _complete( return deserialized # type: ignore @distributed_trace_async - async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: + async def _get_model_info(self, **kwargs: Any) -> _models.ModelInfo: # pylint: disable=line-too-long """Returns information about the AI model. @@ -434,6 +443,7 @@ async def _embedding( body: JSON, *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: ... @@ -443,6 +453,7 @@ async def _embedding( *, input: List[str], model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, @@ -455,6 +466,7 @@ async def _embedding( body: IO[bytes], *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: ... @@ -466,6 +478,7 @@ async def _embedding( *, input: List[str] = _Unset, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, @@ -485,6 +498,9 @@ async def _embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should have. Passing null causes the model to use its default value. @@ -596,6 +612,7 @@ async def _embedding( _request = build_embeddings_embedding_request( model_deployment=model_deployment, + unknown_params=unknown_params, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -631,7 +648,7 @@ async def _embedding( return deserialized # type: ignore @distributed_trace_async - async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: + async def _get_model_info(self, **kwargs: Any) -> _models.ModelInfo: # pylint: disable=line-too-long """Returns information about the AI model. @@ -708,6 +725,7 @@ async def _embedding( body: JSON, *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: ... @@ -717,6 +735,7 @@ async def _embedding( *, input: List[_models.EmbeddingInput], model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, @@ -729,6 +748,7 @@ async def _embedding( body: IO[bytes], *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: ... @@ -740,6 +760,7 @@ async def _embedding( *, input: List[_models.EmbeddingInput] = _Unset, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, @@ -759,6 +780,9 @@ async def _embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should have. Passing null causes the model to use its default value. @@ -873,6 +897,7 @@ async def _embedding( _request = build_image_embeddings_embedding_request( model_deployment=model_deployment, + unknown_params=unknown_params, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -908,7 +933,7 @@ async def _embedding( return deserialized # type: ignore @distributed_trace_async - async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: + async def _get_model_info(self, **kwargs: Any) -> _models.ModelInfo: # pylint: disable=line-too-long """Returns information about the AI model. diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index ec33bc231e12..5351a4709997 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -29,10 +29,13 @@ from ._client import ChatCompletionsClient as ChatCompletionsClientGenerated from ._client import EmbeddingsClient as EmbeddingsClientGenerated from ._client import ImageEmbeddingsClient as ImageEmbeddingsClientGenerated -from .._operations._operations import ( +from .._operations._operations import ( build_chat_completions_complete_request, build_embeddings_embedding_request, - build_image_embeddings_embedding_request + build_image_embeddings_embedding_request, + build_chat_completions_get_model_info_request, + build_embeddings_get_model_info_request, + build_image_embeddings_get_model_info_request, ) if sys.version_info >= (3, 9): @@ -47,22 +50,29 @@ async def load_client( endpoint: str, credential: AzureKeyCredential, **kwargs: Any ) -> Union[ChatCompletionsClientGenerated, EmbeddingsClientGenerated, ImageEmbeddingsClientGenerated]: + client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... model_info = await client.get_model_info() await client.close() + _LOGGER.info("model_info=%s", model_info) if model_info.model_type in (None, ""): raise ValueError( "The AI model information is missing a value for `model type`. Cannot create an appropriate client." ) - # TODO: Remove "completions" once Mistral Large fixes their model type + + # TODO: Remove "completions" and "embedding" once Mistral Large and Cohere fixes their model type if model_info.model_type in (_models.ModelType.CHAT, "completion"): - return ChatCompletionsClient(endpoint, credential, **kwargs) - if model_info.model_type == _models.ModelType.EMBEDDINGS: - return EmbeddingsClient(endpoint, credential, **kwargs) - if model_info.model_type == _models.ModelType.IMAGE_EMBEDDINGS: - return ImageEmbeddingsClient(endpoint, credential, **kwargs) - raise ValueError(f"No client available to support AI model type `{model_info.model_type}`") + client = ChatCompletionsClient(endpoint, credential, **kwargs) + elif model_info.model_type in (_models.ModelType.EMBEDDINGS, "embedding"): + client = EmbeddingsClient(endpoint, credential, **kwargs) + elif model_info.model_type == _models.ModelType.IMAGE_EMBEDDINGS: + client = ImageEmbeddingsClient(endpoint, credential, **kwargs) + else: + raise ValueError(f"No client available to support AI model type `{model_info.model_type}`") + + client._model_info = model_info # pylint: disable=protected-access + return client class ChatCompletionsClient(ChatCompletionsClientGenerated): @@ -73,6 +83,7 @@ async def complete( body: JSON, *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any, ) -> Union[_models.AsyncStreamingChatCompletions, _models.ChatCompletions]: @@ -90,6 +101,10 @@ async def complete( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -104,6 +119,7 @@ async def complete( *, messages: List[_models.ChatRequestMessage], model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", hyper_params: Optional[Dict[str, Any]] = None, extras: Optional[Dict[str, str]] = None, @@ -155,6 +171,10 @@ async def complete( ``extra-parameters`` HTTP request header. Default value is None. :paramtype extras: dict[str, str] + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword frequency_penalty: A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. @@ -221,6 +241,7 @@ async def complete( body: IO[bytes], *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any, ) -> Union[_models.AsyncStreamingChatCompletions, _models.ChatCompletions]: @@ -238,6 +259,10 @@ async def complete( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -253,6 +278,7 @@ async def complete( *, messages: List[_models.ChatRequestMessage] = _Unset, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, hyper_params: Optional[Dict[str, Any]] = None, extras: Optional[Dict[str, str]] = None, frequency_penalty: Optional[float] = None, @@ -302,6 +328,10 @@ async def complete( ``extra-parameters`` HTTP request header. Default value is None. :paramtype extras: dict[str, str] + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword frequency_penalty: A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. @@ -404,6 +434,7 @@ async def complete( _request = build_chat_completions_complete_request( model_deployment=model_deployment, + unknown_params=unknown_params, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -433,6 +464,25 @@ async def complete( else: return _deserialize(_models.ChatCompletions, response.json()) # pylint: disable=protected-access + # Cache here the results of get_model_info call + _model_info: Optional[_models.ModelInfo] = None + + @distributed_trace_async + async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: + # pylint: disable=line-too-long + """Returns information about the AI model. + + :return: ModelInfo. The ModelInfo is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ModelInfo + :raises ~azure.core.exceptions.HttpResponseError: + """ + if self._model_info == None: + self._model_info = await self._get_model_info(**kwargs) + return self._model_info + + def __str__(self) -> str: + return super().__str__() + f"\n_model_info={self._model_info}" + class EmbeddingsClient(EmbeddingsClientGenerated): @@ -442,8 +492,9 @@ async def embedding( body: JSON, *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", - **kwargs: Any + **kwargs: Any, ) -> _models.EmbeddingsResult: """Return the embeddings for a given text prompt. @@ -454,6 +505,10 @@ async def embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -469,11 +524,12 @@ async def embedding( hyper_params: Optional[Dict[str, Any]] = None, input: List[str], model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, - **kwargs: Any + **kwargs: Any, ) -> _models.EmbeddingsResult: """Return the embeddings for a given text prompt. @@ -491,6 +547,10 @@ async def embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -528,8 +588,9 @@ async def embedding( body: IO[bytes], *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", - **kwargs: Any + **kwargs: Any, ) -> _models.EmbeddingsResult: """Return the embeddings for a given text prompt. @@ -540,6 +601,10 @@ async def embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -556,10 +621,11 @@ async def embedding( hyper_params: Optional[Dict[str, Any]] = None, input: List[str] = _Unset, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, - **kwargs: Any + **kwargs: Any, ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long """Return the embeddings for a given text prompt. @@ -580,6 +646,10 @@ async def embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should have. Passing null causes the model to use its default value. @@ -634,6 +704,7 @@ async def embedding( _request = build_embeddings_embedding_request( model_deployment=model_deployment, + unknown_params=unknown_params, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -665,6 +736,25 @@ async def embedding( return deserialized # type: ignore + # Cache here the results of get_model_info call + _model_info: Optional[_models.ModelInfo] = None + + @distributed_trace_async + async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: + # pylint: disable=line-too-long + """Returns information about the AI model. + + :return: ModelInfo. The ModelInfo is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ModelInfo + :raises ~azure.core.exceptions.HttpResponseError: + """ + if self._model_info == None: + self._model_info = await self._get_model_info(**kwargs) + return self._model_info + + def __str__(self) -> str: + return super().__str__() + f"\n_model_info={self._model_info}" + class ImageEmbeddingsClient(ImageEmbeddingsClientGenerated): @@ -674,8 +764,9 @@ async def embedding( body: JSON, *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", - **kwargs: Any + **kwargs: Any, ) -> _models.EmbeddingsResult: """Return the embeddings for given images. @@ -686,6 +777,10 @@ async def embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -701,11 +796,12 @@ async def embedding( hyper_params: Optional[Dict[str, Any]] = None, input: List[_models.EmbeddingInput], model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, - **kwargs: Any + **kwargs: Any, ) -> _models.EmbeddingsResult: """Return the embeddings for given images. @@ -723,6 +819,10 @@ async def embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -760,8 +860,9 @@ async def embedding( body: IO[bytes], *, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", - **kwargs: Any + **kwargs: Any, ) -> _models.EmbeddingsResult: """Return the embeddings for given images. @@ -772,6 +873,10 @@ async def embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -788,10 +893,11 @@ async def embedding( hyper_params: Optional[Dict[str, Any]] = None, input: List[_models.EmbeddingInput] = _Unset, model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, - **kwargs: Any + **kwargs: Any, ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long """Return the embeddings for given images. @@ -812,6 +918,10 @@ async def embedding( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should have. Passing null causes the model to use its default value. @@ -866,6 +976,7 @@ async def embedding( _request = build_image_embeddings_embedding_request( model_deployment=model_deployment, + unknown_params=unknown_params, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -897,6 +1008,25 @@ async def embedding( return deserialized # type: ignore + # Cache here the results of get_model_info call + _model_info: Optional[_models.ModelInfo] = None + + @distributed_trace_async + async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: + # pylint: disable=line-too-long + """Returns information about the AI model. + + :return: ModelInfo. The ModelInfo is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.ModelInfo + :raises ~azure.core.exceptions.HttpResponseError: + """ + if self._model_info == None: + self._model_info = await self._get_model_info(**kwargs) + return self._model_info + + def __str__(self) -> str: + return super().__str__() + f"\n_model_info={self._model_info}" + __all__: List[str] = [ "load_client", diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py index d53b495e35c9..3ad02b68ecca 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py @@ -31,49 +31,51 @@ from ._models import UserMessage from ._enums import CapacityType -from ._enums import CompletionsFinishReason from ._enums import ChatCompletionsResponseFormat from ._enums import ChatCompletionsToolSelectionPreset +from ._enums import CompletionsFinishReason from ._enums import ChatRole from ._enums import EmbeddingEncodingFormat from ._enums import EmbeddingInputType from ._enums import ModelType +from ._enums import UnknownParams from ._patch import __all__ as _patch_all from ._patch import * # pylint: disable=unused-wildcard-import from ._patch import patch_sdk as _patch_sdk __all__ = [ "AssistantMessage", + "CapacityType", "ChatChoice", "ChatChoiceUpdate", "ChatCompletions", - "CompletionsFinishReason", "ChatCompletionsFunctionToolCall", "ChatCompletionsFunctionToolDefinition", "ChatCompletionsNamedToolSelection", + "ChatCompletionsResponseFormat", "ChatCompletionsToolCall", "ChatCompletionsToolDefinition", + "ChatCompletionsToolSelectionPreset", "ChatCompletionsUpdate", "ChatRequestMessage", "ChatResponseMessage", + "ChatRole", + "CompletionsFinishReason", "CompletionsUsage", + "EmbeddingEncodingFormat", "EmbeddingInput", + "EmbeddingInputType", "EmbeddingItem", "EmbeddingsResult", "EmbeddingsUsage", "FunctionCall", "FunctionDefinition", "ModelInfo", + "ModelType", "SystemMessage", "ToolMessage", + "UnknownParams" "UserMessage", - "CapacityType", - "ChatCompletionsResponseFormat", - "ChatCompletionsToolSelectionPreset", - "ChatRole", - "EmbeddingEncodingFormat", - "EmbeddingInputType", - "ModelType", ] __all__.extend([p for p in _patch_all if p not in __all__]) _patch_sdk() diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py index ffa1e646fa73..75d01fb0ad89 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py @@ -119,3 +119,15 @@ class ModelType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Audio generation""" CHAT = "chat" """Chat completions""" + + +class UnknownParams(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Controls what happens if unknown parameters are passed in the JSON request payload.""" + + ERROR = "error" + """The service will error if it detected unknown parameters in the request payload. This is the + default.""" + INGORE = "ignore" + """The servcie will ignore unknown parameters in the request payload.""" + ALLOW = "allow" + """The service will pass unknown parameters to the back-end AI model.""" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py index b5feeda05ed6..99b0cf2a1250 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py @@ -889,7 +889,7 @@ class ModelInfo(_model_base.Model): model_name: str = rest_field() """The name of the AI model. For example: ``Phi21``. Required.""" - model_type: Union[str, "_models.ModelType"] = rest_field() + model_type: Union[str, "_models._enums.ModelType"] = rest_field() """The type of the AI model. A Unique identifier for the profile. Required. Known values are: \"embeddings\", \"image_generation\", \"text_generation\", \"image_embeddings\", \"audio_generation\", and \"chat\".""" @@ -901,7 +901,7 @@ def __init__( self, *, model_name: str, - model_type: Union[str, "_models.ModelType"], + model_type: Union[str, "_models._enums.ModelType"], model_provider_name: str, ): ... diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py index 2fe316642ea5..22ba49e024d8 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py @@ -44,7 +44,7 @@ async def sample_chat_completions_streaming_async(): messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="Give me 5 good reasons why I should exercise every day."), - ] + ], ) # Iterate on the response to get chat completion updates, as they arrive from the service diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py index b9ec509af37b..548a3fa6f43a 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py @@ -6,17 +6,17 @@ DESCRIPTION: This sample demonstrates how to create an asynchronous client from a given endpoint URL using the load_client() function, imported from azure.ai.inference.aio. - In this sample, we get an asynchronous client and do a chat completions call. + In this sample, we get an asynchronous embeddings client and do one embeddings call. USAGE: python sample_load_client_async.py Set these two environment variables before running the sample: - 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form + 1) EMBEDDINGS_ENDPOINT - Your endpoint URL, in the form https://..inference.ai.azure.com where `your-deployment-name` is your unique AI Model deployment name, and `your-azure-region` is the Azure region where your model is deployed. - 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. + 2) EMBEDDINGS_KEY - Your model key (a 32-character string). Keep it secret. """ import asyncio @@ -25,34 +25,30 @@ async def sample_load_client_async(): import os try: - endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] - key = os.environ["CHAT_COMPLETIONS_KEY"] + endpoint = os.environ["EMBEDDINGS_ENDPOINT"] + key = os.environ["EMBEDDINGS_KEY"] except KeyError: - print("Missing environment variable 'CHAT_COMPLETIONS_ENDPOINT' or 'CHAT_COMPLETIONS_KEY'") + print("Missing environment variable 'EMBEDDINGS_ENDPOINT' or 'EMBEDDINGS_KEY'") print("Set them before running this sample.") exit() - from azure.ai.inference.aio import load_client, ChatCompletionsClient - from azure.ai.inference.models import SystemMessage, UserMessage + from azure.ai.inference.aio import load_client, EmbeddingsClient from azure.core.credentials import AzureKeyCredential client = await load_client(endpoint=endpoint, credential=AzureKeyCredential(key)) - # This should create a client of type `ChatCompletionsClient` + # This should create a client of type `EmbeddingsClient` print(f"Created client of type `{type(client).__name__}`.") - # TODO: Why does this return False? - # if isinstance(client, ChatCompletionsClient): - # Do a single chat completion operation. Start the operation and get a Future object. - response = await client.complete( - messages=[ - SystemMessage(content="You are a helpful assistant."), - UserMessage(content="How many feet are in a mile?"), - ] - ) + if isinstance(client, EmbeddingsClient): + response = await client.embedding(input=["first phrase", "second phrase", "third phrase"]) - # Print response the the console - print(response.choices[0].message.content) + print("Embeddings response:") + for item in response.data: + length = len(item.embedding) + print( + f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, ..., {item.embedding[length-2]}, {item.embedding[length-1]}]" + ) await client.close() diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index bd69deae3888..de5f0ef82593 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -21,6 +21,13 @@ def sample_chat_completions(): import os + import sys + import logging + + logger = logging.getLogger("azure") + logger.setLevel(logging.DEBUG) + handler = logging.StreamHandler(stream=sys.stdout) + logger.addHandler(handler) try: endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] @@ -35,7 +42,11 @@ def sample_chat_completions(): from azure.ai.inference.models import SystemMessage, UserMessage from azure.core.credentials import AzureKeyCredential - client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key),logging_enable=True) + print(client) + model_info = client.get_model_info() + print(model_info) + print(client) response = client.complete( messages=[ diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py index 48b1786b158f..226489918000 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py @@ -42,7 +42,7 @@ def sample_chat_completions_streaming(): messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="Give me 5 good reasons why I should exercise every day."), - ] + ], ) for update in response: diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_hyper_params.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_hyper_params.py index 0eded965cb7e..3f7af2911d07 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_hyper_params.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_hyper_params.py @@ -42,15 +42,17 @@ def sample_chat_completions_with_hyper_params(): exit() from azure.ai.inference import ChatCompletionsClient - from azure.ai.inference.models import SystemMessage, UserMessage + from azure.ai.inference.models import SystemMessage, UserMessage, UnknownParams from azure.core.credentials import AzureKeyCredential from azure.core.pipeline.policies import HeadersPolicy client = ChatCompletionsClient( endpoint=endpoint, credential=AzureKeyCredential(key), - headers={"unknown-parameters": "allow"}, # Optional. Supported values: "allow", "ignore", "error" (the default). - logging_enable=True + #headers={ + # "unknown-parameters": "allow" + #}, # Optional. Supported values: "allow", "ignore", "error" (the default). + logging_enable=True, ) response = client.complete( @@ -58,6 +60,7 @@ def sample_chat_completions_with_hyper_params(): SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), ], + unknown_params=UnknownParams.ALLOW, # Optional. Supported values: "allow", "ignore", "error" (the default) hyper_params={ # Optional. Additional parameters to pass to the model. "key1": 1, "key2": True, diff --git a/sdk/ai/azure-ai-inference/samples/sample_load_client.py b/sdk/ai/azure-ai-inference/samples/sample_load_client.py index 07ba6db344a8..a46abecc6634 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_load_client.py +++ b/sdk/ai/azure-ai-inference/samples/sample_load_client.py @@ -6,7 +6,8 @@ DESCRIPTION: This sample demonstrates how to create a client from a given endpoint URL using the load_client() function, imported from azure.ai.inference. - In this sample, we get a synchronous client and do a chat completions call. + In this sample, we get a synchronous chat completions client and do one + chat completions call. USAGE: python sample_load_client.py @@ -41,6 +42,8 @@ def sample_load_client(): # This should create a client of type `ChatCompletionsClient` print(f"Created client of type `{type(client).__name__}`.") + print(client) + if isinstance(client, ChatCompletionsClient): response = client.complete( messages=[ diff --git a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py index 2b8afdf16ed3..c64b68b01535 100644 --- a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py +++ b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py @@ -71,7 +71,24 @@ def _load_embeddings_credentials(self, *, bad_key: bool, **kwargs): credential = AzureKeyCredential(key) return endpoint, credential - # Methos to create the different sync and async clients + # Methods to create sync and async clients using Load_client() function + async def _load_async_chat_client(self, *, bad_key: bool = False, **kwargs) -> async_sdk.ChatCompletionsClient: + endpoint, credential = self._load_chat_credentials(bad_key=bad_key, **kwargs) + return await async_sdk.load_client(endpoint=endpoint, credential=credential, logging_enable=LOGGING_ENABLED) + + def _load_chat_client(self, *, bad_key: bool = False, **kwargs) -> sdk.ChatCompletionsClient: + endpoint, credential = self._load_chat_credentials(bad_key=bad_key, **kwargs) + return sdk.load_client(endpoint=endpoint, credential=credential, logging_enable=LOGGING_ENABLED) + + async def _load_async_embeddings_client(self, *, bad_key: bool = False, **kwargs) -> async_sdk.EmbeddingsClient: + endpoint, credential = self._load_embeddings_credentials(bad_key=bad_key, **kwargs) + return await async_sdk.load_client(endpoint=endpoint, credential=credential, logging_enable=LOGGING_ENABLED) + + def _load_embeddings_client(self, *, bad_key: bool = False, **kwargs) -> sdk.EmbeddingsClient: + endpoint, credential = self._load_embeddings_credentials(bad_key=bad_key, **kwargs) + return sdk.load_client(endpoint=endpoint, credential=credential, logging_enable=LOGGING_ENABLED) + + # Methos to create the different sync and async clients directly def _create_async_chat_client(self, *, bad_key: bool = False, **kwargs) -> async_sdk.ChatCompletionsClient: endpoint, credential = self._load_chat_credentials(bad_key=bad_key, **kwargs) return async_sdk.ChatCompletionsClient(endpoint=endpoint, credential=credential, logging_enable=LOGGING_ENABLED) @@ -103,15 +120,13 @@ def _print_model_info_result(model_info: sdk.models.ModelInfo): print("\tmodel_provider_name: {}".format(model_info.model_provider_name)) @staticmethod - def _validate_model_info_result(model_info: sdk.models.ModelInfo): + def _validate_model_info_result(model_info: sdk.models.ModelInfo, expected_model_type: Union[str, sdk.models.ModelType]): assert model_info.model_name is not None assert len(model_info.model_name) > 0 assert model_info.model_provider_name is not None assert len(model_info.model_provider_name) > 0 assert model_info.model_type is not None - assert ( - model_info.model_type == "completion" - ) # This should be sdk.models.ModelType.CHAT_COMPLETION once the model is fixed + assert model_info.model_type == expected_model_type @staticmethod def _validate_chat_completions_result(result: sdk.models.ChatCompletions, contains: List[str]): diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py index bdbe1698528c..de72b4da58f5 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -4,6 +4,7 @@ # ------------------------------------ import inspect import azure.ai.inference as sdk +import azure.ai.inference.aio as async_sdk from model_inference_test_base import ModelClientTestBase, ServicePreparerChatCompletions, ServicePreparerEmbeddings from devtools_testutils.aio import recorded_by_proxy_async @@ -18,19 +19,63 @@ class TestModelAsyncClient(ModelClientTestBase): # HAPPY PATH TESTS # # ********************************************************************************** + @ServicePreparerChatCompletions() + @recorded_by_proxy_async + async def test_async_load_chat_completions_client(self, **kwargs): + + client = await self._load_async_chat_client(**kwargs) + assert isinstance(client, async_sdk.ChatCompletionsClient) + result1 = await client.get_model_info() + self._print_model_info_result(result1) + self._validate_model_info_result(result1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed + await client.close() + + @ServicePreparerEmbeddings() + @recorded_by_proxy_async + async def test_async_load_embeddings_client(self, **kwargs): + + client = await self._load_async_embeddings_client(**kwargs) + assert isinstance(client, async_sdk.EmbeddingsClient) + result1 = await client.get_model_info() + self._print_model_info_result(result1) + self._validate_model_info_result(result1, "embedding") # TODO: This should be ModelType.EMBEDDINGS once the model is fixed + await client.close() @ServicePreparerChatCompletions() @recorded_by_proxy_async - async def test_async_get_model_info_error_free(self, **kwargs): + async def test_async_get_model_info_on_chat_client(self, **kwargs): client = self._create_async_chat_client(**kwargs) - result = await client.get_model_info() - self._print_model_info_result(result) - self._validate_model_info_result(result) + result1 = await client.get_model_info() + self._print_model_info_result(result1) + self._validate_model_info_result(result1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed + + # Get the model info again. No network calls should be made here, + # as the result is cached in the client. + result2 = await client.get_model_info() + self._print_model_info_result(result2) + assert result1 == result2 + + await client.close() + + @ServicePreparerEmbeddings() + @recorded_by_proxy_async + async def test_async_get_model_info_on_embeddings_client(self, **kwargs): + client = self._create_async_embeddings_client(**kwargs) + result1 = await client.get_model_info() + self._print_model_info_result(result1) + self._validate_model_info_result(result1, "embedding") # TODO: This should be ModelType.EMBEDDINGS once the model is fixed + + # Get the model info again. No network calls should be made here, + # as the result is cached in the client. + result2 = await client.get_model_info() + self._print_model_info_result(result2) + assert result1 == result2 + await client.close() @ServicePreparerChatCompletions() @recorded_by_proxy_async - async def test_async_chat_completions_error_free(self, **kwargs): + async def test_async_chat_completions(self, **kwargs): messages = [ sdk.models.SystemMessage(content="You are a helpful assistant answering questions regarding length units."), sdk.models.UserMessage(content="How many feet are in a mile?"), @@ -50,21 +95,21 @@ async def test_async_chat_completions_error_free(self, **kwargs): @ServicePreparerChatCompletions() @recorded_by_proxy_async - async def test_async_chat_completions_streaming_error_free(self, **kwargs): + async def test_async_chat_completions_streaming(self, **kwargs): client = self._create_async_chat_client(Sync=False, **kwargs) result = await client.complete( stream=True, messages=[ sdk.models.SystemMessage(content="You are a helpful assistant."), sdk.models.UserMessage(content="Give me 3 good reasons why I should exercise every day."), - ] + ], ) await self._validate_async_chat_completions_streaming_result(result) await client.close() @ServicePreparerEmbeddings() @recorded_by_proxy_async - async def test_async_embeddings_error_free(self, **kwargs): + async def test_async_embeddings(self, **kwargs): client = self._create_async_embeddings_client(**kwargs) result = await client.embedding(input=["first phrase", "second phrase", "third phrase"]) self._print_embeddings_result(result) diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py index 27af63f9f475..ab3152234dcb 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -21,16 +21,63 @@ class TestModelClient(ModelClientTestBase): @ServicePreparerChatCompletions() @recorded_by_proxy - def test_get_model_info_error_free(self, **kwargs): + def test_load_chat_completions_client(self, **kwargs): + + client = self._load_chat_client(**kwargs) + assert isinstance(client, sdk.ChatCompletionsClient) + result1 = client.get_model_info() + self._print_model_info_result(result1) + self._validate_model_info_result(result1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed + client.close() + + @ServicePreparerEmbeddings() + @recorded_by_proxy + def test_load_embeddings_client(self, **kwargs): + + client = self._load_embeddings_client(**kwargs) + assert isinstance(client, sdk.EmbeddingsClient) + result1 = client.get_model_info() + self._print_model_info_result(result1) + self._validate_model_info_result(result1, "embedding") # TODO: This should be ModelType.EMBEDDINGS once the model is fixed + client.close() + + @ServicePreparerChatCompletions() + @recorded_by_proxy + def test_get_model_info_on_chat_client(self, **kwargs): + client = self._create_chat_client(**kwargs) - result = client.get_model_info() - self._print_model_info_result(result) - self._validate_model_info_result(result) + result1 = client.get_model_info() + self._print_model_info_result(result1) + self._validate_model_info_result(result1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed + + # Get the model info again. No network calls should be made here, + # as the result is cached in the client. + result2 = client.get_model_info() + self._print_model_info_result(result2) + assert result1 == result2 + client.close() + @ServicePreparerEmbeddings() + @recorded_by_proxy + def test_get_model_info_on_embeddings_client(self, **kwargs): + + client = self._create_embeddings_client(**kwargs) + result1 = client.get_model_info() + self._print_model_info_result(result1) + self._validate_model_info_result(result1, "embedding") # TODO: This should be ModelType.EMBEDDINGS once the model is fixed + + # Get the model info again. No network calls should be made here, + # as the result is cached in the client. + result2 = client.get_model_info() + self._print_model_info_result(result2) + assert result1 == result2 + + client.close() + @ServicePreparerChatCompletions() @recorded_by_proxy - def test_chat_completions_error_free(self, **kwargs): + def test_chat_completions(self, **kwargs): client = self._create_chat_client(**kwargs) result = client.complete(messages=[sdk.models.UserMessage(content="How many feet are in a mile?")]) self._print_chat_completions_result(result) @@ -39,21 +86,21 @@ def test_chat_completions_error_free(self, **kwargs): @ServicePreparerChatCompletions() @recorded_by_proxy - def test_chat_completions_streaming_error_free(self, **kwargs): + def test_chat_completions_streaming(self, **kwargs): client = self._create_chat_client(**kwargs) result = client.complete( stream=True, messages=[ sdk.models.SystemMessage(content="You are a helpful assistant."), sdk.models.UserMessage(content="Give me 3 good reasons why I should exercise every day."), - ] + ], ) self._validate_chat_completions_streaming_result(result) client.close() @ServicePreparerChatCompletions() @recorded_by_proxy - def test_chat_completions_with_tool_error_free(self, **kwargs): + def test_chat_completions_with_tool(self, **kwargs): forecast_tool = sdk.models.ChatCompletionsFunctionToolDefinition( function=sdk.models.FunctionDefinition( name="get_max_temperature", @@ -101,7 +148,7 @@ def test_chat_completions_with_tool_error_free(self, **kwargs): @ServicePreparerEmbeddings() @recorded_by_proxy - def test_embeddings_error_free(self, **kwargs): + def test_embeddings(self, **kwargs): client = self._create_embeddings_client(**kwargs) result = client.embedding(input=["first phrase", "second phrase", "third phrase"]) self._print_embeddings_result(result) diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml index ce49f4a1c287..e87796de0772 100644 --- a/sdk/ai/azure-ai-inference/tsp-location.yaml +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/ModelClient -commit: 39fa4d1583207b3f358707f4d0c98b9e2fd9a54c +commit: 556cb56981e1667fccdd303860d6d508af20e63d repo: Azure/azure-rest-api-specs additionalDirectories: From 1faf123aa3518dd27da7373a2a24bc154f735302 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 22 May 2024 08:08:45 -0700 Subject: [PATCH 076/112] Some test changes --- .../azure/ai/inference/models/_enums.py | 4 +- .../azure/ai/inference/models/_models.py | 20 --- .../tests/model_inference_test_base.py | 135 +++++++++--------- .../test_model_inference_async_client.py | 66 ++++----- .../tests/test_model_inference_client.py | 86 ++++++----- 5 files changed, 147 insertions(+), 164 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py index 75d01fb0ad89..49053a340001 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py @@ -127,7 +127,7 @@ class UnknownParams(str, Enum, metaclass=CaseInsensitiveEnumMeta): ERROR = "error" """The service will error if it detected unknown parameters in the request payload. This is the default.""" - INGORE = "ignore" - """The servcie will ignore unknown parameters in the request payload.""" + IGNORE = "ignore" + """The service will ignore unknown parameters in the request payload.""" ALLOW = "allow" """The service will pass unknown parameters to the back-end AI model.""" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py index 99b0cf2a1250..50b9109f336e 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py @@ -208,8 +208,6 @@ class ChatCompletions(_model_base.Model): :ivar id: A unique identifier associated with this chat completions response. Required. :vartype id: str - :ivar object: The response object type, which is always ``chat.completion``. Required. - :vartype object: str :ivar created: The first timestamp associated with generation activity for this completions response, represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. @@ -227,8 +225,6 @@ class ChatCompletions(_model_base.Model): id: str = rest_field() """A unique identifier associated with this chat completions response. Required.""" - object: str = rest_field() - """The response object type, which is always ``chat.completion``. Required.""" created: datetime.datetime = rest_field(format="unix-timestamp") """The first timestamp associated with generation activity for this completions response, represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required.""" @@ -247,7 +243,6 @@ def __init__( self, *, id: str, # pylint: disable=redefined-builtin - object: str, created: datetime.datetime, model: str, usage: "_models.CompletionsUsage", @@ -442,8 +437,6 @@ class ChatCompletionsUpdate(_model_base.Model): :ivar id: A unique identifier associated with this chat completions response. Required. :vartype id: str - :ivar object: The response object type, which is always ``chat.completion``. Required. - :vartype object: str :ivar created: The first timestamp associated with generation activity for this completions response, represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. @@ -462,8 +455,6 @@ class ChatCompletionsUpdate(_model_base.Model): id: str = rest_field() """A unique identifier associated with this chat completions response. Required.""" - object: str = rest_field() - """The response object type, which is always ``chat.completion``. Required.""" created: datetime.datetime = rest_field(format="unix-timestamp") """The first timestamp associated with generation activity for this completions response, represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required.""" @@ -482,7 +473,6 @@ def __init__( self, *, id: str, # pylint: disable=redefined-builtin - object: str, created: datetime.datetime, model: str, usage: "_models.CompletionsUsage", @@ -646,8 +636,6 @@ class EmbeddingItem(_model_base.Model): :vartype embedding: list[float] :ivar index: Index of the prompt to which the EmbeddingItem corresponds. Required. :vartype index: int - :ivar object: The object type of this embeddings item. Will always be ``embedding``. Required. - :vartype object: str """ embedding: List[float] = rest_field() @@ -655,8 +643,6 @@ class EmbeddingItem(_model_base.Model): vector-based relatedness of the provided input. Required.""" index: int = rest_field() """Index of the prompt to which the EmbeddingItem corresponds. Required.""" - object: str = rest_field() - """The object type of this embeddings item. Will always be ``embedding``. Required.""" @overload def __init__( @@ -664,7 +650,6 @@ def __init__( *, embedding: List[float], index: int, - object: str, ): ... @overload @@ -692,8 +677,6 @@ class EmbeddingsResult(_model_base.Model): :vartype data: list[~azure.ai.inference.models.EmbeddingItem] :ivar usage: Usage counts for tokens input using the embeddings API. Required. :vartype usage: ~azure.ai.inference.models.EmbeddingsUsage - :ivar object: The object type of the embeddings result. Will always be ``list``. Required. - :vartype object: str :ivar model: The model ID used to generate this result. Required. :vartype model: str """ @@ -704,8 +687,6 @@ class EmbeddingsResult(_model_base.Model): """Embedding values for the prompts submitted in the request. Required.""" usage: "_models.EmbeddingsUsage" = rest_field() """Usage counts for tokens input using the embeddings API. Required.""" - object: str = rest_field() - """The object type of the embeddings result. Will always be ``list``. Required.""" model: str = rest_field() """The model ID used to generate this result. Required.""" @@ -716,7 +697,6 @@ def __init__( id: str, # pylint: disable=redefined-builtin data: List["_models.EmbeddingItem"], usage: "_models.EmbeddingsUsage", - object: str, model: str, ): ... diff --git a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py index c64b68b01535..9facde17189a 100644 --- a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py +++ b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py @@ -114,7 +114,7 @@ def _create_embeddings_client_with_chat_completions_credentials(self, **kwargs) @staticmethod def _print_model_info_result(model_info: sdk.models.ModelInfo): if ModelClientTestBase.PRINT_RESULT: - print(" Model info result:") + print(" Model info:") print("\tmodel_name: {}".format(model_info.model_name)) print("\tmodel_type: {}".format(model_info.model_type)) print("\tmodel_provider_name: {}".format(model_info.model_provider_name)) @@ -129,40 +129,38 @@ def _validate_model_info_result(model_info: sdk.models.ModelInfo, expected_model assert model_info.model_type == expected_model_type @staticmethod - def _validate_chat_completions_result(result: sdk.models.ChatCompletions, contains: List[str]): - assert any(item in result.choices[0].message.content for item in contains) - assert result.choices[0].message.role == sdk.models.ChatRole.ASSISTANT - assert result.choices[0].finish_reason == sdk.models.CompletionsFinishReason.STOPPED - assert result.choices[0].index == 0 - assert bool(ModelClientTestBase.REGEX_RESULT_ID.match(result.id)) - assert result.created is not None - assert result.created != "" - assert result.model is not None - assert result.model != "" - assert result.object == "chat.completion" - assert result.usage.prompt_tokens > 0 - assert result.usage.completion_tokens > 0 - assert result.usage.total_tokens == result.usage.prompt_tokens + result.usage.completion_tokens + def _validate_chat_completions_result(response: sdk.models.ChatCompletions, contains: List[str]): + assert any(item in response.choices[0].message.content for item in contains) + assert response.choices[0].message.role == sdk.models.ChatRole.ASSISTANT + assert response.choices[0].finish_reason == sdk.models.CompletionsFinishReason.STOPPED + assert response.choices[0].index == 0 + assert bool(ModelClientTestBase.REGEX_RESULT_ID.match(response.id)) + assert response.created is not None + assert response.created != "" + assert response.model is not None + assert response.model != "" + assert response.usage.prompt_tokens > 0 + assert response.usage.completion_tokens > 0 + assert response.usage.total_tokens == response.usage.prompt_tokens + response.usage.completion_tokens @staticmethod - def _validate_chat_completions_tool_result(result: sdk.models.ChatCompletions): - assert result.choices[0].message.content == None or result.choices[0].message.content == "" - assert result.choices[0].message.role == sdk.models.ChatRole.ASSISTANT - assert result.choices[0].finish_reason == sdk.models.CompletionsFinishReason.TOOL_CALLS - assert result.choices[0].index == 0 - function_args = json.loads(result.choices[0].message.tool_calls[0].function.arguments.replace("'", '"')) + def _validate_chat_completions_tool_result(response: sdk.models.ChatCompletions): + assert response.choices[0].message.content == None or response.choices[0].message.content == "" + assert response.choices[0].message.role == sdk.models.ChatRole.ASSISTANT + assert response.choices[0].finish_reason == sdk.models.CompletionsFinishReason.TOOL_CALLS + assert response.choices[0].index == 0 + function_args = json.loads(response.choices[0].message.tool_calls[0].function.arguments.replace("'", '"')) print(function_args) assert function_args["city"].lower() == "seattle" assert function_args["days"] == "2" - assert bool(ModelClientTestBase.REGEX_RESULT_ID.match(result.id)) - assert result.created is not None - assert result.created != "" - assert result.model is not None - # assert result.model != "" - assert result.object == "chat.completion" - assert result.usage.prompt_tokens > 0 - assert result.usage.completion_tokens > 0 - assert result.usage.total_tokens == result.usage.prompt_tokens + result.usage.completion_tokens + assert bool(ModelClientTestBase.REGEX_RESULT_ID.match(response.id)) + assert response.created is not None + assert response.created != "" + assert response.model is not None + # assert response.model != "" + assert response.usage.prompt_tokens > 0 + assert response.usage.completion_tokens > 0 + assert response.usage.total_tokens == response.usage.prompt_tokens + response.usage.completion_tokens @staticmethod def _validate_chat_completions_update(update: sdk.models.ChatCompletionsUpdate, first: bool) -> str: @@ -174,7 +172,6 @@ def _validate_chat_completions_update(update: sdk.models.ChatCompletionsUpdate, assert update.choices[0].delta.content != None assert update.created is not None assert update.created != "" - assert update.object == "chat.completion.chunk" assert update.choices[0].delta.tool_calls == None assert update.choices[0].index == 0 assert update.id is not None @@ -187,10 +184,10 @@ def _validate_chat_completions_update(update: sdk.models.ChatCompletionsUpdate, return "" @staticmethod - def _validate_chat_completions_streaming_result(result: sdk.models.StreamingChatCompletions): + def _validate_chat_completions_streaming_result(response: sdk.models.StreamingChatCompletions): count = 0 content = "" - for update in result: + for update in response: content += ModelClientTestBase._validate_chat_completions_update(update, count == 0) count += 1 assert count > 2 @@ -204,10 +201,10 @@ def _validate_chat_completions_streaming_result(result: sdk.models.StreamingChat print(content) @staticmethod - async def _validate_async_chat_completions_streaming_result(result: sdk.models.StreamingChatCompletions): + async def _validate_async_chat_completions_streaming_result(response: sdk.models.StreamingChatCompletions): count = 0 content = "" - async for update in result: + async for update in response: content += ModelClientTestBase._validate_chat_completions_update(update, count == 0) count += 1 assert count > 2 @@ -221,55 +218,51 @@ async def _validate_async_chat_completions_streaming_result(result: sdk.models.S print(content) @staticmethod - def _print_chat_completions_result(result: sdk.models.ChatCompletions): + def _print_chat_completions_result(response: sdk.models.ChatCompletions): if ModelClientTestBase.PRINT_RESULT: - print(" Chat Completions result:") - for choice in result.choices: + print(" Chat Completions response:") + for choice in response.choices: print(f"\tchoices[0].message.content: {choice.message.content}") print(f"\tchoices[0].message.tool_calls: {choice.message.tool_calls}") print("\tchoices[0].message.role: {}".format(choice.message.role)) print("\tchoices[0].finish_reason: {}".format(choice.finish_reason)) print("\tchoices[0].index: {}".format(choice.index)) - print("\tid: {}".format(result.id)) - print("\tcreated: {}".format(result.created)) - print("\tmodel: {}".format(result.model)) - print("\tobject: {}".format(result.object)) - print("\tusage.prompt_tokens: {}".format(result.usage.prompt_tokens)) - print("\tusage.completion_tokens: {}".format(result.usage.completion_tokens)) - print("\tusage.total_tokens: {}".format(result.usage.total_tokens)) + print("\tid: {}".format(response.id)) + print("\tcreated: {}".format(response.created)) + print("\tmodel: {}".format(response.model)) + print("\tusage.prompt_tokens: {}".format(response.usage.prompt_tokens)) + print("\tusage.completion_tokens: {}".format(response.usage.completion_tokens)) + print("\tusage.total_tokens: {}".format(response.usage.total_tokens)) @staticmethod - def _validate_embeddings_result(result: sdk.models.EmbeddingsResult): - assert result is not None - assert result.data is not None - assert len(result.data) == 3 + def _validate_embeddings_result(response: sdk.models.EmbeddingsResult): + assert response is not None + assert response.data is not None + assert len(response.data) == 3 for i in [0, 1, 2]: - assert result.data[i] is not None - assert result.data[i].object == "embedding" - assert result.data[i].index == i - assert len(result.data[i].embedding) == 1024 - assert result.data[i].embedding[0] != 0.0 - assert result.data[i].embedding[1023] != 0.0 - assert bool(ModelClientTestBase.REGEX_RESULT_ID.match(result.id)) - # assert len(result.model) > 0 # At the time of writing this test, this JSON field existed but was empty - assert result.object == "list" + assert response.data[i] is not None + assert response.data[i].index == i + assert len(response.data[i].embedding) == 1024 + assert response.data[i].embedding[0] != 0.0 + assert response.data[i].embedding[1023] != 0.0 + assert bool(ModelClientTestBase.REGEX_RESULT_ID.match(response.id)) + # assert len(response.model) > 0 # At the time of writing this test, this JSON field existed but was empty # At the time of writing this test, input_tokens did not exist (I see completion tokens instead) - # assert result.usage.input_tokens > 0 - # assert result.usage.prompt_tokens > 0 - # assert result.total_tokens == result.usage.input_tokens + result.usage.prompt_tokens + # assert response.usage.input_tokens > 0 + # assert response.usage.prompt_tokens > 0 + # assert response.total_tokens == response.usage.input_tokens + response.usage.prompt_tokens @staticmethod - def _print_embeddings_result(result: sdk.models.EmbeddingsResult): + def _print_embeddings_result(response: sdk.models.EmbeddingsResult): if ModelClientTestBase.PRINT_RESULT: - print("Embeddings result:") - for item in result.data: + print("Embeddings response:") + for item in response.data: length = len(item.embedding) print( - f"\tdata[{item.index}]: length={length}, object={item.object}, [{item.embedding[0]}, {item.embedding[1]}, ..., {item.embedding[length-2]}, {item.embedding[length-1]}]" + f"\tdata[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, ..., {item.embedding[length-2]}, {item.embedding[length-1]}]" ) - print(f"\tid: {result.id}") - print(f"\tmodel: {result.model}") - print(f"\tobject: {result.object}") - # print(f"\tusage.input_tokens: {result.usage.input_tokens}") # At the time of writing this test, this JSON field does not exist - print(f"\tusage.prompt_tokens: {result.usage.prompt_tokens}") - print(f"\tusage.total_tokens: {result.usage.total_tokens}") + print(f"\tid: {response.id}") + print(f"\tmodel: {response.model}") + # print(f"\tusage.input_tokens: {response.usage.input_tokens}") # At the time of writing this test, this JSON field does not exist + print(f"\tusage.prompt_tokens: {response.usage.prompt_tokens}") + print(f"\tusage.total_tokens: {response.usage.total_tokens}") diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py index de72b4da58f5..773fafdf7132 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -25,9 +25,9 @@ async def test_async_load_chat_completions_client(self, **kwargs): client = await self._load_async_chat_client(**kwargs) assert isinstance(client, async_sdk.ChatCompletionsClient) - result1 = await client.get_model_info() - self._print_model_info_result(result1) - self._validate_model_info_result(result1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed + response1 = await client.get_model_info() + self._print_model_info_result(response1) + self._validate_model_info_result(response1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed await client.close() @ServicePreparerEmbeddings() @@ -36,24 +36,24 @@ async def test_async_load_embeddings_client(self, **kwargs): client = await self._load_async_embeddings_client(**kwargs) assert isinstance(client, async_sdk.EmbeddingsClient) - result1 = await client.get_model_info() - self._print_model_info_result(result1) - self._validate_model_info_result(result1, "embedding") # TODO: This should be ModelType.EMBEDDINGS once the model is fixed + response1 = await client.get_model_info() + self._print_model_info_result(response1) + self._validate_model_info_result(response1, "embedding") # TODO: This should be ModelType.EMBEDDINGS once the model is fixed await client.close() @ServicePreparerChatCompletions() @recorded_by_proxy_async async def test_async_get_model_info_on_chat_client(self, **kwargs): client = self._create_async_chat_client(**kwargs) - result1 = await client.get_model_info() - self._print_model_info_result(result1) - self._validate_model_info_result(result1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed + response1 = await client.get_model_info() + self._print_model_info_result(response1) + self._validate_model_info_result(response1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed # Get the model info again. No network calls should be made here, - # as the result is cached in the client. - result2 = await client.get_model_info() - self._print_model_info_result(result2) - assert result1 == result2 + # as the response is cached in the client. + response2 = await client.get_model_info() + self._print_model_info_result(response2) + assert response1 == response2 await client.close() @@ -61,15 +61,15 @@ async def test_async_get_model_info_on_chat_client(self, **kwargs): @recorded_by_proxy_async async def test_async_get_model_info_on_embeddings_client(self, **kwargs): client = self._create_async_embeddings_client(**kwargs) - result1 = await client.get_model_info() - self._print_model_info_result(result1) - self._validate_model_info_result(result1, "embedding") # TODO: This should be ModelType.EMBEDDINGS once the model is fixed + response1 = await client.get_model_info() + self._print_model_info_result(response1) + self._validate_model_info_result(response1, "embedding") # TODO: This should be ModelType.EMBEDDINGS once the model is fixed # Get the model info again. No network calls should be made here, - # as the result is cached in the client. - result2 = await client.get_model_info() - self._print_model_info_result(result2) - assert result1 == result2 + # as the response is cached in the client. + response2 = await client.get_model_info() + self._print_model_info_result(response2) + assert response1 == response2 await client.close() @@ -82,38 +82,38 @@ async def test_async_chat_completions(self, **kwargs): ] client = self._create_async_chat_client(**kwargs) - result = await client.complete(messages=messages) - self._print_chat_completions_result(result) - self._validate_chat_completions_result(result, ["5280", "5,280"]) + response = await client.complete(messages=messages) + self._print_chat_completions_result(response) + self._validate_chat_completions_result(response, ["5280", "5,280"]) - messages.append(sdk.models.AssistantMessage(content=result.choices[0].message.content)) + messages.append(sdk.models.AssistantMessage(content=response.choices[0].message.content)) messages.append(sdk.models.UserMessage(content="and how many yards?")) - result = await client.complete(messages=messages) - self._print_chat_completions_result(result) - self._validate_chat_completions_result(result, ["1760", "1,760"]) + response = await client.complete(messages=messages) + self._print_chat_completions_result(response) + self._validate_chat_completions_result(response, ["1760", "1,760"]) await client.close() @ServicePreparerChatCompletions() @recorded_by_proxy_async async def test_async_chat_completions_streaming(self, **kwargs): client = self._create_async_chat_client(Sync=False, **kwargs) - result = await client.complete( + response = await client.complete( stream=True, messages=[ sdk.models.SystemMessage(content="You are a helpful assistant."), sdk.models.UserMessage(content="Give me 3 good reasons why I should exercise every day."), ], ) - await self._validate_async_chat_completions_streaming_result(result) + await self._validate_async_chat_completions_streaming_result(response) await client.close() @ServicePreparerEmbeddings() @recorded_by_proxy_async async def test_async_embeddings(self, **kwargs): client = self._create_async_embeddings_client(**kwargs) - result = await client.embedding(input=["first phrase", "second phrase", "third phrase"]) - self._print_embeddings_result(result) - self._validate_embeddings_result(result) + response = await client.embedding(input=["first phrase", "second phrase", "third phrase"]) + self._print_embeddings_result(response) + self._validate_embeddings_result(response) await client.close() # ********************************************************************************** @@ -128,7 +128,7 @@ async def test_embeddings_with_auth_failure(self, **kwargs): client = self._create_async_embeddings_client(bad_key=True, **kwargs) exception_caught = False try: - result = await client.embedding(input=["first phrase", "second phrase", "third phrase"]) + response = await client.embedding(input=["first phrase", "second phrase", "third phrase"]) except AzureError as e: exception_caught = True print(e) diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py index ab3152234dcb..b8146dd04ca8 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -25,9 +25,9 @@ def test_load_chat_completions_client(self, **kwargs): client = self._load_chat_client(**kwargs) assert isinstance(client, sdk.ChatCompletionsClient) - result1 = client.get_model_info() - self._print_model_info_result(result1) - self._validate_model_info_result(result1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed + response1 = client.get_model_info() + self._print_model_info_result(response1) + self._validate_model_info_result(response1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed client.close() @ServicePreparerEmbeddings() @@ -36,9 +36,9 @@ def test_load_embeddings_client(self, **kwargs): client = self._load_embeddings_client(**kwargs) assert isinstance(client, sdk.EmbeddingsClient) - result1 = client.get_model_info() - self._print_model_info_result(result1) - self._validate_model_info_result(result1, "embedding") # TODO: This should be ModelType.EMBEDDINGS once the model is fixed + response1 = client.get_model_info() + self._print_model_info_result(response1) + self._validate_model_info_result(response1, "embedding") # TODO: This should be ModelType.EMBEDDINGS once the model is fixed client.close() @ServicePreparerChatCompletions() @@ -46,15 +46,15 @@ def test_load_embeddings_client(self, **kwargs): def test_get_model_info_on_chat_client(self, **kwargs): client = self._create_chat_client(**kwargs) - result1 = client.get_model_info() - self._print_model_info_result(result1) - self._validate_model_info_result(result1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed + response1 = client.get_model_info() + self._print_model_info_result(response1) + self._validate_model_info_result(response1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed # Get the model info again. No network calls should be made here, - # as the result is cached in the client. - result2 = client.get_model_info() - self._print_model_info_result(result2) - assert result1 == result2 + # as the response is cached in the client. + response2 = client.get_model_info() + self._print_model_info_result(response2) + assert response1 == response2 client.close() @@ -63,39 +63,49 @@ def test_get_model_info_on_chat_client(self, **kwargs): def test_get_model_info_on_embeddings_client(self, **kwargs): client = self._create_embeddings_client(**kwargs) - result1 = client.get_model_info() - self._print_model_info_result(result1) - self._validate_model_info_result(result1, "embedding") # TODO: This should be ModelType.EMBEDDINGS once the model is fixed + response1 = client.get_model_info() + self._print_model_info_result(response1) + self._validate_model_info_result(response1, "embedding") # TODO: This should be ModelType.EMBEDDINGS once the model is fixed # Get the model info again. No network calls should be made here, - # as the result is cached in the client. - result2 = client.get_model_info() - self._print_model_info_result(result2) - assert result1 == result2 + # as the response is cached in the client. + response2 = client.get_model_info() + self._print_model_info_result(response2) + assert response1 == response2 client.close() @ServicePreparerChatCompletions() @recorded_by_proxy - def test_chat_completions(self, **kwargs): + def test_chat_completions_with_hyper_params(self, **kwargs): client = self._create_chat_client(**kwargs) - result = client.complete(messages=[sdk.models.UserMessage(content="How many feet are in a mile?")]) - self._print_chat_completions_result(result) - self._validate_chat_completions_result(result, ["5280", "5,280"]) + response = client.complete( + messages=[sdk.models.UserMessage(content="How many feet are in a mile?")], + unknown_params=sdk.models.UnknownParams.IGNORE, + hyper_params={ + "key1": 1, + "key2": True, + "key3": "Some value", + "key4": [1, 2, 3], + "key5": {"key6": 2, "key7": False, "key8": "Some other value", "key9": [4, 5, 6, 7]}, + }, + ) + self._print_chat_completions_result(response) + self._validate_chat_completions_result(response, ["5280", "5,280"]) client.close() @ServicePreparerChatCompletions() @recorded_by_proxy def test_chat_completions_streaming(self, **kwargs): client = self._create_chat_client(**kwargs) - result = client.complete( + response = client.complete( stream=True, messages=[ sdk.models.SystemMessage(content="You are a helpful assistant."), sdk.models.UserMessage(content="Give me 3 good reasons why I should exercise every day."), ], ) - self._validate_chat_completions_streaming_result(result) + self._validate_chat_completions_streaming_result(response) client.close() @ServicePreparerChatCompletions() @@ -126,33 +136,33 @@ def test_chat_completions_with_tool(self, **kwargs): sdk.models.SystemMessage(content="You are an assistant that helps users find weather information."), sdk.models.UserMessage(content="what's the maximum temperature in Seattle two days from now?"), ] - result = client.complete( + response = client.complete( messages=messages, tools=[forecast_tool], ) - self._print_chat_completions_result(result) - self._validate_chat_completions_tool_result(result) - messages.append(sdk.models.AssistantMessage(tool_calls=result.choices[0].message.tool_calls)) + self._print_chat_completions_result(response) + self._validate_chat_completions_tool_result(response) + messages.append(sdk.models.AssistantMessage(tool_calls=response.choices[0].message.tool_calls)) messages.append( sdk.models.ToolMessage( content="62", - tool_call_id=result.choices[0].message.tool_calls[0].id, + tool_call_id=response.choices[0].message.tool_calls[0].id, ) ) - result = client.complete( + response = client.complete( messages=messages, tools=[forecast_tool], ) - self._validate_chat_completions_result(result, ["62"]) + self._validate_chat_completions_result(response, ["62"]) client.close() @ServicePreparerEmbeddings() @recorded_by_proxy def test_embeddings(self, **kwargs): client = self._create_embeddings_client(**kwargs) - result = client.embedding(input=["first phrase", "second phrase", "third phrase"]) - self._print_embeddings_result(result) - self._validate_embeddings_result(result) + response = client.embedding(input=["first phrase", "second phrase", "third phrase"]) + self._print_embeddings_result(response) + self._validate_embeddings_result(response) client.close() # ********************************************************************************** @@ -167,7 +177,7 @@ def test_chat_completion_with_auth_failure(self, **kwargs): client = self._create_chat_client(bad_key=True, **kwargs) exception_caught = False try: - result = client.complete(messages=[sdk.models.UserMessage(content="How many feet are in a mile?")]) + response = client.complete(messages=[sdk.models.UserMessage(content="How many feet are in a mile?")]) except AzureError as e: exception_caught = True print(e) @@ -183,7 +193,7 @@ def test_embeddings_on_chat_completion_endpoint(self, **kwargs): client = self._create_embeddings_client_with_chat_completions_credentials(**kwargs) exception_caught = False try: - result = client.embedding(input=["first phrase", "second phrase", "third phrase"]) + response = client.embedding(input=["first phrase", "second phrase", "third phrase"]) except AzureError as e: exception_caught = True print(e) From 71bd71d3b06af9d9811efae8f6af502eff706e5e Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 22 May 2024 17:47:02 -0700 Subject: [PATCH 077/112] Many changes --- sdk/ai/azure-ai-inference/README.md | 131 +++++------ .../azure/ai/inference/_patch.py | 212 +++++++++--------- .../azure/ai/inference/aio/_patch.py | 210 ++++++++--------- sdk/ai/azure-ai-inference/samples/README.md | 42 +++- .../samples/async_samples/example_chat.json | 13 ++ ...chat_completions_from_input_bytes_async.py | 64 ++++++ ..._chat_completions_from_input_json_async.py | 64 ++++++ .../samples/example_chat.json | 14 +- .../samples/sample_chat_completions.py | 15 +- ...ample_chat_completions_from_input_bytes.py | 16 +- ...sample_chat_completions_from_input_json.py | 10 +- ...mple_chat_completions_with_hyper_params.py | 19 +- .../samples/sample_get_model_info.py | 2 +- .../samples/sample_load_client.py | 2 - .../azure-ai-inference/tests/chat.test.json | 13 ++ .../tests/model_inference_test_base.py | 21 +- .../test_model_inference_async_client.py | 113 +++++++--- .../tests/test_model_inference_client.py | 119 +++++++--- 18 files changed, 673 insertions(+), 407 deletions(-) create mode 100644 sdk/ai/azure-ai-inference/samples/async_samples/example_chat.json create mode 100644 sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py create mode 100644 sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py create mode 100644 sdk/ai/azure-ai-inference/tests/chat.test.json diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index e84e6951d077..069d419e5293 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -1,4 +1,4 @@ -# Azure model inference client library for Python +# Azure AI Inference client library for Python The client Library allows you to do inference using AI models you deployed to Azure. It supports both Serverless Endpoints (aka "model as a service" (MaaS) or "pay as you go") and Selfhosted Endpoints (aka "model as a platform" (MaaP) or "real-time endpoints"). The client library makes services calls using REST AP version `2024-05-01-preview`, as documented in [Azure AI Model Inference API](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-api). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). @@ -8,7 +8,7 @@ Use the model inference client library to: * Get information about the model * Do chat completions * Get text embeddings -* Get image embeddings + Note that for inference using OpenAI models hosted on Azure you should be using the [OpenAI Python client library](https://github.com/openai/openai-python) instead of this client. @@ -44,9 +44,11 @@ To update an existing installation of the package, use: pip install --upgrade azure-ai-inferencing ``` -### Create and authenticate clients +## Key concepts + +### Create and authenticate a client directly -The package includes three clients `ChatCompletionsClient`, `EmbeddingsClient` and `ImageGenerationClients`. They are all created in the similar manner. For example, assuming `endpoint` and `key` are strings holding your endpoint URL and key, this Python code will create and authenticate a synchronous `ChatCompletionsClient`: +The package includes two clients `ChatCompletionsClient` and `EmbeddingsClient` . Both can be created in the similar manner. For example, assuming `endpoint` and `key` are strings holding your endpoint URL and key, this Python code will create and authenticate a synchronous `ChatCompletionsClient`: ```python from azure.ai.inference import ChatCompletionsClient @@ -79,31 +81,63 @@ client = ChatCompletionsClient( ) ``` -## Key concepts +### Create and authentice clients using `load_client` + +As an alternative to creating a specific client directly, you can use the function `load_client` to return the relevant client (of types `ChatCompletionsClient` or `EmbeddingsClient`) based on the provided endpoint: + +```python +from azure.ai.inference import load_client +from azure.core.credentials import AzureKeyCredential + +client = load_client( + endpoint=endpoint, + credential=AzureKeyCredential(key) +) + +print(f"Created client of type `{type(client).__name__}`.") +``` + +To load an asynchronous client, import the `load_client` function from `azure.ai.inference.aio` instead. + + +### Getting AI model information -### Loading the client and getting AI model information +All clients provide a `get_model_info` method to retrive AI model information. This makes a REST call to the `/info` route on the provided endpoint, as documented in [the REST API reference](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-info). -TODO: Add overview and link to explain AI model info + + +```python +model_info = client.get_model_info() + +print(f"Model name: {model_info.model_name}") +print(f"Model provider name: {model_info.model_provider_name}") +print(f"Model type: {model_info.model_type}") +``` +AI model information is cached in the client, and futher calls to `get_model_info` will access the cached value and wil not result in a REST API call. Note that if you created the client using `load_client` function, model information will already be cached in the client. -The operation to get AI model information targets the URL route `/info` on the provided endpoint. +AI model information is displayed (if available) when you `print(client)`. + + ### Chat Completions -TODO: Add overview and link to explain chat completions. +The `ChatCompletionsClient` has a method named `complete`. The method makes a REST API call to the `/chat/completions` route on the provided endpoint, as documented in [the REST API reference](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-chat-completions). -Chat completion operations target the URL route `/chat/completions` on the provided endpoint. +See simple chat completion examples below. More can be found in the [samples](https://github.com/Azure/azure-sdk-for-python/tree/azure-ai-inference/sdk/ai/azure-ai-inference/samples) folder. ### Text Embeddings -TODO: Add overview and link to explain embeddings. +The `EmbeddingsClient` has a method named `embedding`. The method makes a REST API call to the `/embeddings` route on the provided endpoint, as documented in [the REST API reference](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-embeddings). -Embeddings operations target the URL route `/embeddings` on the provided endpoint. +See simple text embedding example below. More can be found in the [samples](https://github.com/Azure/azure-sdk-for-python/tree/azure-ai-inference/sdk/ai/azure-ai-inference/samples) folder. + ## Examples @@ -112,9 +146,7 @@ In the following sections you will find simple examples of: * [Chat completions](#chat-completions-example) * [Streaming chat completions](#streaming-chat-completions-example) * [Text Embeddings](#text-embeddings-example) -* [Image Embeddings](#image-embeddings-example) -* [Get model information](#get-model-information-example) -* [Loading a client using `load_client` function](#loading-a-client-using-load_client-function) + The examples create a synchronous client as mentioned in [Create and authenticate clients](#create-and-authenticate-clients). Only mandatory input settings are shown for simplicity. @@ -145,13 +177,11 @@ print(response.choices[0].message.content) -The printed result of course depends on the model, but you should get something like this: `Hello! I'd be happy to help answer your question. There are 5,280 feet in a mile`. - To generate completions for additional messages, simply call `client.create` multiple times using the same `client`. ### Streaming chat completions example -This example demonstrates how to generate a single chat completions with streaming response. +This example demonstrates how to generate a single chat completions with streaming response. You need to add `stream=True` to the `complete` call to enable streaming. @@ -167,7 +197,7 @@ response = client.complete( messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="Give me 5 good reasons why I should exercise every day."), - ] + ], ) for update in response: @@ -180,11 +210,11 @@ client.close() The printed result of course depends on the model, but you should see the answer progressively get longer as updates get streamed to the client. -To generate completions for additional messages, simply call `client.create_streaming` multiple times using the same `client`. +To generate completions for additional messages, simply call `client.complete` multiple times using the same `client`. ### Text Embeddings example -This example demonstrates how to get embeddings. +This example demonstrates how to get text embeddings. @@ -207,6 +237,7 @@ for item in response.data: The printed result of course depends on the model, but you should see something like this: + ```txt data[0]: length=1024, [0.0013399124, -0.01576233, ..., 0.007843018, 0.000238657] data[1]: length=1024, [0.036590576, -0.0059547424, ..., 0.011405945, 0.004863739] @@ -215,11 +246,12 @@ data[2]: length=1024, [0.04196167, 0.029083252, ..., -0.0027484894, 0.0073127747 To generate embeddings for additional phrases, simply call `client.create` multiple times using the same `client`. + + ```python from azure.ai.inference import ImageEmbeddingsClient @@ -243,7 +275,7 @@ for item in response.data: ) ``` - +-- END SNIPPET -- The printed result of course depends on the model, but you should see something like this: @@ -252,58 +284,7 @@ TBD ``` To generate embeddings for additional phrases, simply call `client.create` multiple times using the same `client`. - -### Get model information example - -Each one of the clients supports a `get_model_info` method that can be used to retreive infomation about the AI model. This example shows how to get model information from the `ChatCompletionsClient`, but similarly can be done with the other clients. - - - -```python -from azure.ai.inference import ChatCompletionsClient -from azure.core.credentials import AzureKeyCredential - -client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - -model_info = client.get_model_info() - -print(f"Model name: {model_info.model_name}") -print(f"Model provider name: {model_info.model_provider_name}") -print(f"Model type: {model_info.model_type}") -``` - - - -### Loading a client using `load_client` function - -Instead of creating a specific client directly (`ChatCompletionsClient`, `EmbeddingsClient` or `ImageEmbeddingsClient`) you can use the `load_client` function to create the appropriate synchronous client associated with the provided endpoint URL. In the example below, we use it to create a synchronous `ChatCompletionsClient`. Similarly, call the `load_async_client` to get the appropriate asynchronous client. - -The `load_client` function makes a REST API call to the `/info` route on the given endpoint, which provides the model type. Based on the model type, the correct client is returned. In most cases you know the model type (chat completions, embeddings, image embeddings) so you can create the appropriate client directly and avoid doing this addition REST API call. - - - -```python -from azure.ai.inference import load_client, ChatCompletionsClient -from azure.ai.inference.models import SystemMessage, UserMessage -from azure.core.credentials import AzureKeyCredential - -client = load_client(endpoint=endpoint, credential=AzureKeyCredential(key)) - -# This should create a client of type `ChatCompletionsClient` -print(f"Created client of type `{type(client).__name__}`.") - -if isinstance(client, ChatCompletionsClient): - response = client.complete( - messages=[ - SystemMessage(content="You are a helpful assistant."), - UserMessage(content="How many feet are in a mile?"), - ] - ) - - print(response.choices[0].message.content) -``` - - +--> ## Troubleshooting diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index da35103bf943..dcf1cb98dfc7 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -87,43 +87,6 @@ def load_client( class ChatCompletionsClient(ChatCompletionsClientGenerated): - @overload - def complete( - self, - body: JSON, - *, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> Union[_models.StreamingChatCompletions, _models.ChatCompletions]: - # pylint: disable=line-too-long - """Gets chat completions for the provided chat messages. - Completions support a wide variety of tasks and generate text that continues from or - "completes" provided prompt data. When using this method with `stream=True`, the response is streamed - back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions - object to get content updates as they arrive. - - :param body: Required. - :type body: JSON - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to self-hosted endpoints (previously known as Model-as-a-Platform (MaaP) - or "real-time endpoints"). - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ChatCompletions for non-streaming, or StreamingChatCompletions for streaming. - :rtype: ~azure.ai.inference.models.ChatCompletions or ~azure.ai.inference.models.StreamingChatCompletions - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload def complete( self, @@ -175,7 +138,7 @@ def complete( :paramtype content_type: str :keyword hyper_params: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hypter parameters depends on the value of the + How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype hyper_params: dict[str, Any] :keyword frequency_penalty: A value that influences the probability of generated tokens @@ -237,6 +200,43 @@ def complete( :rtype: ~azure.ai.inference.models.ChatCompletions or ~azure.ai.inference.models.StreamingChatCompletions :raises ~azure.core.exceptions.HttpResponseError: """ + + @overload + def complete( + self, + body: JSON, + *, + model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> Union[_models.StreamingChatCompletions, _models.ChatCompletions]: + # pylint: disable=line-too-long + """Gets chat completions for the provided chat messages. + Completions support a wide variety of tasks and generate text that continues from or + "completes" provided prompt data. When using this method with `stream=True`, the response is streamed + back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions + object to get content updates as they arrive. + + :param body: Required. + :type body: JSON + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to self-hosted endpoints (previously known as Model-as-a-Platform (MaaP) + or "real-time endpoints"). + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ChatCompletions for non-streaming, or StreamingChatCompletions for streaming. + :rtype: ~azure.ai.inference.models.ChatCompletions or ~azure.ai.inference.models.StreamingChatCompletions + :raises ~azure.core.exceptions.HttpResponseError: + """ @overload def complete( @@ -324,7 +324,7 @@ def complete( :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword hyper_params: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hypter parameters depends on the value of the + How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype hyper_params: dict[str, Any] :keyword frequency_penalty: A value that influences the probability of generated tokens @@ -419,6 +419,8 @@ def complete( if hyper_params is not None: body.update(hyper_params) body = {k: v for k, v in body.items() if v is not None} + elif isinstance(body, dict) and "stream" in body and isinstance(body["stream"], bool): + stream = body["stream"] content_type = content_type or "application/json" _content = None if isinstance(body, (IOBase, bytes)): @@ -480,37 +482,6 @@ def __str__(self) -> str: class EmbeddingsClient(EmbeddingsClientGenerated): - @overload - def embedding( - self, - body: JSON, - *, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.EmbeddingsResult: - """Return the embeddings for a given text prompt. - - :param body: Required. - :type body: JSON - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload def embedding( self, @@ -529,7 +500,7 @@ def embedding( :keyword hyper_params: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hypter parameters depends on the value of the + How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype hyper_params: dict[str, Any] :keyword input: Input text to embed, encoded as a string or array of tokens. @@ -576,6 +547,37 @@ def embedding( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + def embedding( + self, + body: JSON, + *, + model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.EmbeddingsResult: + """Return the embeddings for a given text prompt. + + :param body: Required. + :type body: JSON + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload def embedding( self, @@ -628,7 +630,7 @@ def embedding( :type body: JSON or IO[bytes] :keyword hyper_params: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hypter parameters depends on the value of the + How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype hyper_params: dict[str, Any] :keyword input: Input text to embed, encoded as a string or array of tokens. @@ -751,37 +753,6 @@ def __str__(self) -> str: class ImageEmbeddingsClient(ImageEmbeddingsClientGenerated): - @overload - def embedding( - self, - body: JSON, - *, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.EmbeddingsResult: - """Return the embeddings for given images. - - :param body: Required. - :type body: JSON - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload def embedding( self, @@ -800,7 +771,7 @@ def embedding( :keyword hyper_params: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hypter parameters depends on the value of the + How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype hyper_params: dict[str, Any] :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an @@ -847,6 +818,37 @@ def embedding( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + def embedding( + self, + body: JSON, + *, + model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.EmbeddingsResult: + """Return the embeddings for given images. + + :param body: Required. + :type body: JSON + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload def embedding( self, @@ -899,7 +901,7 @@ def embedding( :type body: JSON or IO[bytes] :keyword hyper_params: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hypter parameters depends on the value of the + How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype hyper_params: dict[str, Any] :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index 5351a4709997..9d53d59d73e3 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -77,42 +77,6 @@ async def load_client( class ChatCompletionsClient(ChatCompletionsClientGenerated): - @overload - async def complete( - self, - body: JSON, - *, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> Union[_models.AsyncStreamingChatCompletions, _models.ChatCompletions]: - # pylint: disable=line-too-long - """Gets chat completions for the provided chat messages. - Completions support a wide variety of tasks and generate text that continues from or - "completes" provided prompt data. When using this method with `stream=True`, the response is streamed - back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions - object to get content updates as they arrive. - - :param body: Required. - :type body: JSON - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ChatCompletions for non-streaming, or AsyncStreamingChatCompletions for streaming. - :rtype: ~azure.ai.inference.models.ChatCompletions or ~azure.ai.inference.models.AsyncStreamingChatCompletions - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload async def complete( self, @@ -161,7 +125,7 @@ async def complete( :paramtype content_type: str :keyword hyper_params: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hypter parameters depends on the value of the + How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype hyper_params: dict[str, Any] :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the @@ -235,6 +199,42 @@ async def complete( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + async def complete( + self, + body: JSON, + *, + model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> Union[_models.AsyncStreamingChatCompletions, _models.ChatCompletions]: + # pylint: disable=line-too-long + """Gets chat completions for the provided chat messages. + Completions support a wide variety of tasks and generate text that continues from or + "completes" provided prompt data. When using this method with `stream=True`, the response is streamed + back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions + object to get content updates as they arrive. + + :param body: Required. + :type body: JSON + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ChatCompletions for non-streaming, or AsyncStreamingChatCompletions for streaming. + :rtype: ~azure.ai.inference.models.ChatCompletions or ~azure.ai.inference.models.AsyncStreamingChatCompletions + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload async def complete( self, @@ -318,7 +318,7 @@ async def complete( :paramtype model_deployment: str :keyword hyper_params: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hypter parameters depends on the value of the + How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype hyper_params: dict[str, Any] :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the @@ -425,6 +425,8 @@ async def complete( if hyper_params is not None: body.update(hyper_params) body = {k: v for k, v in body.items() if v is not None} + elif isinstance(body, dict) and "stream" in body and isinstance(body["stream"], bool): + stream = body["stream"] content_type = content_type or "application/json" _content = None if isinstance(body, (IOBase, bytes)): @@ -486,37 +488,6 @@ def __str__(self) -> str: class EmbeddingsClient(EmbeddingsClientGenerated): - @overload - async def embedding( - self, - body: JSON, - *, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.EmbeddingsResult: - """Return the embeddings for a given text prompt. - - :param body: Required. - :type body: JSON - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload async def embedding( self, @@ -535,7 +506,7 @@ async def embedding( :keyword hyper_params: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hypter parameters depends on the value of the + How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype hyper_params: dict[str, Any] :keyword input: Input text to embed, encoded as a string or array of tokens. @@ -582,6 +553,37 @@ async def embedding( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + async def embedding( + self, + body: JSON, + *, + model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.EmbeddingsResult: + """Return the embeddings for a given text prompt. + + :param body: Required. + :type body: JSON + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload async def embedding( self, @@ -634,7 +636,7 @@ async def embedding( :type body: JSON or IO[bytes] :keyword hyper_params: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hypter parameters depends on the value of the + How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype hyper_params: dict[str, Any] :keyword input: Input text to embed, encoded as a string or array of tokens. @@ -758,37 +760,6 @@ def __str__(self) -> str: class ImageEmbeddingsClient(ImageEmbeddingsClientGenerated): - @overload - async def embedding( - self, - body: JSON, - *, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.EmbeddingsResult: - """Return the embeddings for given images. - - :param body: Required. - :type body: JSON - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload async def embedding( self, @@ -807,7 +778,7 @@ async def embedding( :keyword hyper_params: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hypter parameters depends on the value of the + How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype hyper_params: dict[str, Any] :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an @@ -854,6 +825,37 @@ async def embedding( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + async def embedding( + self, + body: JSON, + *, + model_deployment: Optional[str] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.EmbeddingsResult: + """Return the embeddings for given images. + + :param body: Required. + :type body: JSON + :keyword model_deployment: Name of the deployment to which you would like to route the request. + Relevant only to Model-as-a-Platform (MaaP) deployments. + Typically used when you want to target a test environment instead of production environment. + Default value is None. + :paramtype model_deployment: str + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. Known values are: "error", "ignore", and "allow". Default value is None. + The service defaults to "error" in this case. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload async def embedding( self, @@ -906,7 +908,7 @@ async def embedding( :type body: JSON or IO[bytes] :keyword hyper_params: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hypter parameters depends on the value of the + How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype hyper_params: dict[str, Any] :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an diff --git a/sdk/ai/azure-ai-inference/samples/README.md b/sdk/ai/azure-ai-inference/samples/README.md index bc7058f98caf..9bf7af522ebb 100644 --- a/sdk/ai/azure-ai-inference/samples/README.md +++ b/sdk/ai/azure-ai-inference/samples/README.md @@ -8,14 +8,14 @@ products: urlFragment: model-inference-samples --- -# Samples for the model client library for Python +# Samples for Azure AI Inference client library for Python -These are runnable console Python programs that show how to do chat completion, embeddings and image geneartion using the clients in this package. Samples are in this folder -and use the a synchronous client. Samples in the subfolder `async_samples` use the asynchronous client. -The concepts are similar, you can easily modify any of the samples to your needs. +These are runnable console Python scripts that show how to do chat completion, text embeddings and image embeddings using the clients in this package. Samples in this folder use the a synchronous clients. Samples in the subfolder `async_samples` use the asynchronous clients. The concepts are similar, you can easily modify any of the synchronous samples to asynchronous. ## Synchronous client samples +### Chat completions + |**File Name**|**Description**| |----------------|-------------| |[sample_chat_completions_streaming.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py) | One chat completion operation using a synchronous client and streaming response. | @@ -25,19 +25,49 @@ The concepts are similar, you can easily modify any of the samples to your needs |[sample_chat_completions_from_input_json.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py) | One chat completion operation using a synchronous client, with input messages provided as `MutableMapping[str, Any]` | |[sample_chat_completions_with_tools.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py) | Shows how do use a tool (function) in chat completions, for an AI model that supports tools | |[sample_load_client.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_load_client.py) | Shows how do use the function `load_client` to create the appropriate synchronous client based on the provided endpoint URL. In this example, it creates a synchronous `ChatCompletionsClient`. | +|[sample_get_model_info.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py) | Get AI model information using the chat completions client. Similarly can be done with all other clients. | +|[sample_chat_completions_with_hyper_params.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_hyper_params.py) | Chat completions with additional model-specific parameters. | + + +### Text embeddings + +|**File Name**|**Description**| +|----------------|-------------| |[sample_embeddings.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_embeddings.py) | One embeddings operation using a synchronous client. | + + ## Asynchronous client samples +### Chat completions + |**File Name**|**Description**| |----------------|-------------| |[sample_chat_completions_streaming_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py) | One chat completion operation using an asynchronous client and streaming response. | |[sample_chat_completions_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py) | One chat completion operation using an asynchronous client. | +|[sample_load_client_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_load_client_async.py) | Shows how do use the function `load_async_client` to create the appropriate asynchronous client based on the provided endpoint URL. In this example, it creates an asynchronous `ChatCompletionsClient`. | +|[sample_chat_completions_from_input_bytes_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py) | One chat completion operation using a synchronous client, with input messages provided as `IO[bytes]`. | +|[sample_chat_completions_from_input_json_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py) | One chat completion operation using a synchronous client, with input messages provided as `MutableMapping[str, Any]` | + +### Text embeddings + +|**File Name**|**Description**| +|----------------|-------------| |[sample_embeddings_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py) | One embeddings operation using an asynchronous client. | + + ## Prerequisites diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/example_chat.json b/sdk/ai/azure-ai-inference/samples/async_samples/example_chat.json new file mode 100644 index 000000000000..ed95ff670792 --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/async_samples/example_chat.json @@ -0,0 +1,13 @@ +{ + "messages": + [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "How many feet are in a mile?" + } + ] +} \ No newline at end of file diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py new file mode 100644 index 000000000000..903f843dbcae --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py @@ -0,0 +1,64 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to get a chat completions response from + the service using an asynchronous client, and directly providing the + IO[bytes] request body (containing input chat messages). + +USAGE: + python sample_chat_completions_from_input_bytes_async.py + + Set these two environment variables before running the sample: + 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form + https://..inference.ai.azure.com + where `your-deployment-name` is your unique AI Model deployment name, and + `your-azure-region` is the Azure region where your model is deployed. + 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. +""" +import asyncio +import io + +async def sample_chat_completions_from_input_bytes_async(): + import os + + try: + endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] + key = os.environ["CHAT_COMPLETIONS_KEY"] + except KeyError: + print("Missing environment variable 'CHAT_COMPLETIONS_ENDPOINT' or 'CHAT_COMPLETIONS_KEY'") + print("Set them before running this sample.") + exit() + + from azure.ai.inference.aio import ChatCompletionsClient + from azure.core.credentials import AzureKeyCredential + + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + + # Make a chat completion call, by directly providing the + # HTTP request body as IO[bytes], containing chat messages. + response = await client.complete(read_text_file("example_chat.json")) + + print(response.choices[0].message.content) + + await client.close() + + +def read_text_file(file_name: str) -> io.BytesIO: + """ + Reads a text file and returns a BytesIO object with the file content in UTF-8 encoding. + The file is expected to be in the same directory as this Python script. + """ + from pathlib import Path + with Path(__file__).with_name(file_name).open("r") as f: + return io.BytesIO(f.read().encode("utf-8")) + + +async def main(): + await sample_chat_completions_from_input_bytes_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py new file mode 100644 index 000000000000..e24ed54d2a60 --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py @@ -0,0 +1,64 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to get a chat completions response from + the service using an asynchronous client, and directly providing the + JSON request body (containing input chat messages). + +USAGE: + python sample_chat_completions_from_input_json_async + + Set these two environment variables before running the sample: + 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form + https://..inference.ai.azure.com + where `your-deployment-name` is your unique AI Model deployment name, and + `your-azure-region` is the Azure region where your model is deployed. + 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. +""" +import asyncio + +async def sample_chat_completions_from_input_json_async(): + import os + from azure.ai.inference.aio import ChatCompletionsClient + from azure.core.credentials import AzureKeyCredential + + try: + endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] + key = os.environ["CHAT_COMPLETIONS_KEY"] + except KeyError: + print("Missing environment variable 'CHAT_COMPLETIONS_ENDPOINT' or 'CHAT_COMPLETIONS_KEY'") + print("Set them before running this sample.") + exit() + + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + + request_body = { + "messages": [ + { + "role": "system", + "content": "You are an AI assistant that helps people find information. Your replies are short, no more than two sentences.", + }, + {"role": "user", "content": "What year was construction of the International Space Station mostly done?"}, + { + "role": "assistant", + "content": "The main construction of the International Space Station (ISS) was completed between 1998 and 2011. During this period, more than 30 flights by US space shuttles and 40 by Russian rockets were conducted to transport components and modules to the station.", + }, + {"role": "user", "content": "And what was the estimated cost to build it?"}, + ] + } + + response = await client.complete(request_body) + + print(response.choices[0].message.content) + + await client.close() + +async def main(): + await sample_chat_completions_from_input_json_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-inference/samples/example_chat.json b/sdk/ai/azure-ai-inference/samples/example_chat.json index 49cf3e2cb647..f9acff2e18df 100644 --- a/sdk/ai/azure-ai-inference/samples/example_chat.json +++ b/sdk/ai/azure-ai-inference/samples/example_chat.json @@ -3,19 +3,11 @@ [ { "role": "system", - "content": "You are an AI assistant that helps people find information. Your replies are short, no more than two sentences." + "content": "You are a helpful assistant." }, { "role": "user", - "content": "What year was construction of the international space station mostly done?" - }, - { - "role": "assistant", - "content": "The main construction of the International Space Station (ISS) was completed between 1998 and 2011. During this period, more than 30 flights by US space shuttles and 40 by Russian rockets were conducted to transport components and modules to the station." - }, - { - "role": "user", - "content": "And what was the estimated cost to build it?" + "content": "How many feet are in a mile?" } ] -} \ No newline at end of file +} diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index de5f0ef82593..17a6be128aa7 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -21,14 +21,7 @@ def sample_chat_completions(): import os - import sys - import logging - - logger = logging.getLogger("azure") - logger.setLevel(logging.DEBUG) - handler = logging.StreamHandler(stream=sys.stdout) - logger.addHandler(handler) - + try: endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] key = os.environ["CHAT_COMPLETIONS_KEY"] @@ -42,11 +35,7 @@ def sample_chat_completions(): from azure.ai.inference.models import SystemMessage, UserMessage from azure.core.credentials import AzureKeyCredential - client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key),logging_enable=True) - print(client) - model_info = client.get_model_info() - print(model_info) - print(client) + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) response = client.complete( messages=[ diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py index bafe6345ee06..e11d34edca44 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py @@ -20,7 +20,6 @@ """ import io - def sample_chat_completions_from_input_bytes(): import os @@ -33,9 +32,8 @@ def sample_chat_completions_from_input_bytes(): exit() from azure.ai.inference import ChatCompletionsClient - from azure.ai.inference.models import SystemMessage, UserMessage from azure.core.credentials import AzureKeyCredential - + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # Make a chat completion call, by directly providing the @@ -45,10 +43,14 @@ def sample_chat_completions_from_input_bytes(): print(response.choices[0].message.content) -def read_text_file(file_path: str) -> io.BytesIO: - """Reads a text file and returns a BytesIO object with the file content in UTF-8 encoding.""" - with open(file_path, "r") as file: - return io.BytesIO(file.read().encode("utf-8")) +def read_text_file(file_name: str) -> io.BytesIO: + """ + Reads a text file and returns a BytesIO object with the file content in UTF-8 encoding. + The file is expected to be in the same directory as this Python script. + """ + from pathlib import Path + with Path(__file__).with_name(file_name).open("r") as f: + return io.BytesIO(f.read().encode("utf-8")) if __name__ == "__main__": diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py index bdb322d29892..40766ae626f0 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py @@ -22,7 +22,6 @@ def sample_chat_completions_from_input_json(): import os - from typing import MutableMapping, Any from azure.ai.inference import ChatCompletionsClient from azure.core.credentials import AzureKeyCredential @@ -36,8 +35,7 @@ def sample_chat_completions_from_input_json(): client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - # Define the input chat messages as a MutableMapping - json_messages: MutableMapping[str, Any] = { + request_body = { "messages": [ { "role": "system", @@ -52,11 +50,11 @@ def sample_chat_completions_from_input_json(): ] } - # Make a chat completion call, by directly providing the - # HTTP request body as IO[bytes], containing chat messages. - response = client.complete(json_messages) + response = client.complete(request_body) print(response.choices[0].message.content) + + client.close() if __name__ == "__main__": diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_hyper_params.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_hyper_params.py index 3f7af2911d07..8d87d613dce7 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_hyper_params.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_hyper_params.py @@ -25,13 +25,6 @@ def sample_chat_completions_with_hyper_params(): import os - import sys - import logging - - logger = logging.getLogger("azure") - logger.setLevel(logging.DEBUG) - handler = logging.StreamHandler(stream=sys.stdout) - logger.addHandler(handler) try: endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] @@ -44,23 +37,15 @@ def sample_chat_completions_with_hyper_params(): from azure.ai.inference import ChatCompletionsClient from azure.ai.inference.models import SystemMessage, UserMessage, UnknownParams from azure.core.credentials import AzureKeyCredential - from azure.core.pipeline.policies import HeadersPolicy - client = ChatCompletionsClient( - endpoint=endpoint, - credential=AzureKeyCredential(key), - #headers={ - # "unknown-parameters": "allow" - #}, # Optional. Supported values: "allow", "ignore", "error" (the default). - logging_enable=True, - ) + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) response = client.complete( messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), ], - unknown_params=UnknownParams.ALLOW, # Optional. Supported values: "allow", "ignore", "error" (the default) + unknown_params=UnknownParams.ALLOW, # Optional. Supported values: "ALLOW", "IGNORE", "ERROR" (service default) hyper_params={ # Optional. Additional parameters to pass to the model. "key1": 1, "key2": True, diff --git a/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py b/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py index f7aca4e2be47..c5393fab914c 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py +++ b/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py @@ -31,12 +31,12 @@ def sample_get_model_info(): print("Set them before running this sample.") exit() - # [START get_model_info] from azure.ai.inference import ChatCompletionsClient from azure.core.credentials import AzureKeyCredential client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + # [START get_model_info] model_info = client.get_model_info() print(f"Model name: {model_info.model_name}") diff --git a/sdk/ai/azure-ai-inference/samples/sample_load_client.py b/sdk/ai/azure-ai-inference/samples/sample_load_client.py index a46abecc6634..683a05cb9c9d 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_load_client.py +++ b/sdk/ai/azure-ai-inference/samples/sample_load_client.py @@ -42,8 +42,6 @@ def sample_load_client(): # This should create a client of type `ChatCompletionsClient` print(f"Created client of type `{type(client).__name__}`.") - print(client) - if isinstance(client, ChatCompletionsClient): response = client.complete( messages=[ diff --git a/sdk/ai/azure-ai-inference/tests/chat.test.json b/sdk/ai/azure-ai-inference/tests/chat.test.json new file mode 100644 index 000000000000..c3440a386b9b --- /dev/null +++ b/sdk/ai/azure-ai-inference/tests/chat.test.json @@ -0,0 +1,13 @@ +{ + "messages": + [ + { + "role": "system", + "content": "ou are a helpful assistant." + }, + { + "role": "user", + "content": "How many feet are in a mile?" + } + ] +} diff --git a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py index 9facde17189a..dffc81aa20cb 100644 --- a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py +++ b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py @@ -2,15 +2,17 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ -import functools -import logging -import sys import azure.ai.inference as sdk import azure.ai.inference.aio as async_sdk -import re +import functools +import io import json +import logging +import re +import sys from os import path +from pathlib import Path from typing import List, Optional, Union from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader from azure.core.credentials import AzureKeyCredential @@ -111,6 +113,15 @@ def _create_embeddings_client_with_chat_completions_credentials(self, **kwargs) credential = AzureKeyCredential(key) return sdk.EmbeddingsClient(endpoint=endpoint, credential=credential, logging_enable=LOGGING_ENABLED) + @staticmethod + def read_text_file(file_name: str) -> io.BytesIO: + """ + Reads a text file and returns a BytesIO object with the file content in UTF-8 encoding. + The file is expected to be in the same directory as this Python script. + """ + with Path(__file__).with_name(file_name).open("r") as f: + return io.BytesIO(f.read().encode("utf-8")) + @staticmethod def _print_model_info_result(model_info: sdk.models.ModelInfo): if ModelClientTestBase.PRINT_RESULT: @@ -201,7 +212,7 @@ def _validate_chat_completions_streaming_result(response: sdk.models.StreamingCh print(content) @staticmethod - async def _validate_async_chat_completions_streaming_result(response: sdk.models.StreamingChatCompletions): + async def _validate_async_chat_completions_streaming_result(response: sdk.models.AsyncStreamingChatCompletions): count = 0 content = "" async for update in response: diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py index 773fafdf7132..26691cb27402 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -16,19 +16,9 @@ class TestModelAsyncClient(ModelClientTestBase): # ********************************************************************************** # - # HAPPY PATH TESTS + # HAPPY PATH TESTS - TEXT EMBEDDINGS # # ********************************************************************************** - @ServicePreparerChatCompletions() - @recorded_by_proxy_async - async def test_async_load_chat_completions_client(self, **kwargs): - - client = await self._load_async_chat_client(**kwargs) - assert isinstance(client, async_sdk.ChatCompletionsClient) - response1 = await client.get_model_info() - self._print_model_info_result(response1) - self._validate_model_info_result(response1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed - await client.close() @ServicePreparerEmbeddings() @recorded_by_proxy_async @@ -41,13 +31,13 @@ async def test_async_load_embeddings_client(self, **kwargs): self._validate_model_info_result(response1, "embedding") # TODO: This should be ModelType.EMBEDDINGS once the model is fixed await client.close() - @ServicePreparerChatCompletions() + @ServicePreparerEmbeddings() @recorded_by_proxy_async - async def test_async_get_model_info_on_chat_client(self, **kwargs): - client = self._create_async_chat_client(**kwargs) + async def test_async_get_model_info_on_embeddings_client(self, **kwargs): + client = self._create_async_embeddings_client(**kwargs) response1 = await client.get_model_info() self._print_model_info_result(response1) - self._validate_model_info_result(response1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed + self._validate_model_info_result(response1, "embedding") # TODO: This should be ModelType.EMBEDDINGS once the model is fixed # Get the model info again. No network calls should be made here, # as the response is cached in the client. @@ -59,11 +49,37 @@ async def test_async_get_model_info_on_chat_client(self, **kwargs): @ServicePreparerEmbeddings() @recorded_by_proxy_async - async def test_async_get_model_info_on_embeddings_client(self, **kwargs): + async def test_async_embeddings(self, **kwargs): client = self._create_async_embeddings_client(**kwargs) + response = await client.embedding(input=["first phrase", "second phrase", "third phrase"]) + self._print_embeddings_result(response) + self._validate_embeddings_result(response) + await client.close() + + # ********************************************************************************** + # + # HAPPY PATH TESTS - CHAT COMPLETIONS + # + # ********************************************************************************** + + @ServicePreparerChatCompletions() + @recorded_by_proxy_async + async def test_async_load_chat_completions_client(self, **kwargs): + + client = await self._load_async_chat_client(**kwargs) + assert isinstance(client, async_sdk.ChatCompletionsClient) response1 = await client.get_model_info() self._print_model_info_result(response1) - self._validate_model_info_result(response1, "embedding") # TODO: This should be ModelType.EMBEDDINGS once the model is fixed + self._validate_model_info_result(response1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed + await client.close() + + @ServicePreparerChatCompletions() + @recorded_by_proxy_async + async def test_async_get_model_info_on_chat_client(self, **kwargs): + client = self._create_async_chat_client(**kwargs) + response1 = await client.get_model_info() + self._print_model_info_result(response1) + self._validate_model_info_result(response1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed # Get the model info again. No network calls should be made here, # as the response is cached in the client. @@ -75,17 +91,15 @@ async def test_async_get_model_info_on_embeddings_client(self, **kwargs): @ServicePreparerChatCompletions() @recorded_by_proxy_async - async def test_async_chat_completions(self, **kwargs): + async def test_async_chat_completions_multi_turn(self, **kwargs): messages = [ sdk.models.SystemMessage(content="You are a helpful assistant answering questions regarding length units."), sdk.models.UserMessage(content="How many feet are in a mile?"), ] - client = self._create_async_chat_client(**kwargs) response = await client.complete(messages=messages) self._print_chat_completions_result(response) self._validate_chat_completions_result(response, ["5280", "5,280"]) - messages.append(sdk.models.AssistantMessage(content=response.choices[0].message.content)) messages.append(sdk.models.UserMessage(content="and how many yards?")) response = await client.complete(messages=messages) @@ -93,6 +107,25 @@ async def test_async_chat_completions(self, **kwargs): self._validate_chat_completions_result(response, ["1760", "1,760"]) await client.close() + @ServicePreparerChatCompletions() + @recorded_by_proxy_async + async def test_async_chat_completions_with_hyper_params(self, **kwargs): + client = self._create_async_chat_client(**kwargs) + response = await client.complete( + messages=[sdk.models.UserMessage(content="How many feet are in a mile?")], + unknown_params=sdk.models.UnknownParams.IGNORE, + hyper_params={ + "key1": 1, + "key2": True, + "key3": "Some value", + "key4": [1, 2, 3], + "key5": {"key6": 2, "key7": False, "key8": "Some other value", "key9": [4, 5, 6, 7]}, + }, + ) + self._print_chat_completions_result(response) + self._validate_chat_completions_result(response, ["5280", "5,280"]) + await client.close() + @ServicePreparerChatCompletions() @recorded_by_proxy_async async def test_async_chat_completions_streaming(self, **kwargs): @@ -107,13 +140,41 @@ async def test_async_chat_completions_streaming(self, **kwargs): await self._validate_async_chat_completions_streaming_result(response) await client.close() - @ServicePreparerEmbeddings() + @ServicePreparerChatCompletions() @recorded_by_proxy_async - async def test_async_embeddings(self, **kwargs): - client = self._create_async_embeddings_client(**kwargs) - response = await client.embedding(input=["first phrase", "second phrase", "third phrase"]) - self._print_embeddings_result(response) - self._validate_embeddings_result(response) + async def test_async_chat_completions_with_json_input(self, **kwargs): + client = self._create_async_chat_client(**kwargs) + request_body = { + "messages": [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "How many feet are in a mile?"}, + ] + } + response = await client.complete(request_body) + self._validate_chat_completions_result(response, ["5280", "5,280"]) + await client.close() + + @ServicePreparerChatCompletions() + @recorded_by_proxy_async + async def test_async_chat_completions_with_bytes_input(self, **kwargs): + client = self._create_async_chat_client(**kwargs) + response = await client.complete(self.read_text_file("chat.test.json")) + self._validate_chat_completions_result(response, ["5280", "5,280"]) + await client.close() + + @ServicePreparerChatCompletions() + @recorded_by_proxy_async + async def test_async_chat_completions_streaming_with_json_input(self, **kwargs): + client = self._create_async_chat_client(**kwargs) + request_body = { + "messages": [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Give me 3 good reasons why I should exercise every day."}, + ], + "stream": True + } + response = await client.complete(request_body) + await self._validate_async_chat_completions_streaming_result(response) await client.close() # ********************************************************************************** diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py index b8146dd04ca8..66ff16385017 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -15,21 +15,10 @@ class TestModelClient(ModelClientTestBase): # ********************************************************************************** # - # HAPPY PATH TESTS + # HAPPY PATH TESTS - TEXT EMBEDDINGS # # ********************************************************************************** - @ServicePreparerChatCompletions() - @recorded_by_proxy - def test_load_chat_completions_client(self, **kwargs): - - client = self._load_chat_client(**kwargs) - assert isinstance(client, sdk.ChatCompletionsClient) - response1 = client.get_model_info() - self._print_model_info_result(response1) - self._validate_model_info_result(response1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed - client.close() - @ServicePreparerEmbeddings() @recorded_by_proxy def test_load_embeddings_client(self, **kwargs): @@ -41,14 +30,14 @@ def test_load_embeddings_client(self, **kwargs): self._validate_model_info_result(response1, "embedding") # TODO: This should be ModelType.EMBEDDINGS once the model is fixed client.close() - @ServicePreparerChatCompletions() + @ServicePreparerEmbeddings() @recorded_by_proxy - def test_get_model_info_on_chat_client(self, **kwargs): + def test_get_model_info_on_embeddings_client(self, **kwargs): - client = self._create_chat_client(**kwargs) + client = self._create_embeddings_client(**kwargs) response1 = client.get_model_info() self._print_model_info_result(response1) - self._validate_model_info_result(response1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed + self._validate_model_info_result(response1, "embedding") # TODO: This should be ModelType.EMBEDDINGS once the model is fixed # Get the model info again. No network calls should be made here, # as the response is cached in the client. @@ -60,12 +49,38 @@ def test_get_model_info_on_chat_client(self, **kwargs): @ServicePreparerEmbeddings() @recorded_by_proxy - def test_get_model_info_on_embeddings_client(self, **kwargs): - + def test_embeddings(self, **kwargs): client = self._create_embeddings_client(**kwargs) + response = client.embedding(input=["first phrase", "second phrase", "third phrase"]) + self._print_embeddings_result(response) + self._validate_embeddings_result(response) + client.close() + + # ********************************************************************************** + # + # HAPPY PATH TESTS - CHAT COMPLETIONS + # + # ********************************************************************************** + + @ServicePreparerChatCompletions() + @recorded_by_proxy + def test_load_chat_completions_client(self, **kwargs): + + client = self._load_chat_client(**kwargs) + assert isinstance(client, sdk.ChatCompletionsClient) response1 = client.get_model_info() self._print_model_info_result(response1) - self._validate_model_info_result(response1, "embedding") # TODO: This should be ModelType.EMBEDDINGS once the model is fixed + self._validate_model_info_result(response1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed + client.close() + + @ServicePreparerChatCompletions() + @recorded_by_proxy + def test_get_model_info_on_chat_client(self, **kwargs): + + client = self._create_chat_client(**kwargs) + response1 = client.get_model_info() + self._print_model_info_result(response1) + self._validate_model_info_result(response1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed # Get the model info again. No network calls should be made here, # as the response is cached in the client. @@ -74,7 +89,25 @@ def test_get_model_info_on_embeddings_client(self, **kwargs): assert response1 == response2 client.close() - + + @ServicePreparerChatCompletions() + @recorded_by_proxy + def test_chat_completions_multi_turn(self, **kwargs): + client = self._create_chat_client(**kwargs) + messages = [ + sdk.models.SystemMessage(content="You are a helpful assistant answering questions regarding length units."), + sdk.models.UserMessage(content="How many feet are in a mile?"), + ] + response = client.complete(messages=messages) + self._print_chat_completions_result(response) + self._validate_chat_completions_result(response, ["5280", "5,280"]) + messages.append(sdk.models.AssistantMessage(content=response.choices[0].message.content)) + messages.append(sdk.models.UserMessage(content="and how many yards?")) + response = client.complete(messages=messages) + self._print_chat_completions_result(response) + self._validate_chat_completions_result(response, ["1760", "1,760"]) + client.close() + @ServicePreparerChatCompletions() @recorded_by_proxy def test_chat_completions_with_hyper_params(self, **kwargs): @@ -94,6 +127,28 @@ def test_chat_completions_with_hyper_params(self, **kwargs): self._validate_chat_completions_result(response, ["5280", "5,280"]) client.close() + @ServicePreparerChatCompletions() + @recorded_by_proxy + def test_chat_completions_with_json_input(self, **kwargs): + client = self._create_chat_client(**kwargs) + request_body = { + "messages": [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "How many feet are in a mile?"}, + ] + } + response = client.complete(request_body) + self._validate_chat_completions_result(response, ["5280", "5,280"]) + client.close() + + @ServicePreparerChatCompletions() + @recorded_by_proxy + def test_chat_completions_with_bytes_input(self, **kwargs): + client = self._create_chat_client(**kwargs) + response = client.complete(self.read_text_file("chat.test.json")) + self._validate_chat_completions_result(response, ["5280", "5,280"]) + client.close() + @ServicePreparerChatCompletions() @recorded_by_proxy def test_chat_completions_streaming(self, **kwargs): @@ -108,6 +163,21 @@ def test_chat_completions_streaming(self, **kwargs): self._validate_chat_completions_streaming_result(response) client.close() + @ServicePreparerChatCompletions() + @recorded_by_proxy + def test_chat_completions_streaming_with_json_input(self, **kwargs): + client = self._create_chat_client(**kwargs) + request_body = { + "messages": [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Give me 3 good reasons why I should exercise every day."}, + ], + "stream": True + } + response = client.complete(request_body) + self._validate_chat_completions_streaming_result(response) + client.close() + @ServicePreparerChatCompletions() @recorded_by_proxy def test_chat_completions_with_tool(self, **kwargs): @@ -156,15 +226,6 @@ def test_chat_completions_with_tool(self, **kwargs): self._validate_chat_completions_result(response, ["62"]) client.close() - @ServicePreparerEmbeddings() - @recorded_by_proxy - def test_embeddings(self, **kwargs): - client = self._create_embeddings_client(**kwargs) - response = client.embedding(input=["first phrase", "second phrase", "third phrase"]) - self._print_embeddings_result(response) - self._validate_embeddings_result(response) - client.close() - # ********************************************************************************** # # ERROR TESTS From 712dfccba994710655853794b7b943829f734b93 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 22 May 2024 18:14:31 -0700 Subject: [PATCH 078/112] New test recordings --- sdk/ai/azure-ai-inference/assets.json | 2 +- .../tests/test_model_inference_async_client.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/sdk/ai/azure-ai-inference/assets.json b/sdk/ai/azure-ai-inference/assets.json index 00660db93c6b..2b675c4979f7 100644 --- a/sdk/ai/azure-ai-inference/assets.json +++ b/sdk/ai/azure-ai-inference/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "python", "TagPrefix": "python/ai/azure-ai-inference", - "Tag": "python/ai/azure-ai-inference_9b9508aeab" + "Tag": "python/ai/azure-ai-inference_1f7396c982" } diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py index 26691cb27402..07220c6370e2 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -19,7 +19,7 @@ class TestModelAsyncClient(ModelClientTestBase): # HAPPY PATH TESTS - TEXT EMBEDDINGS # # ********************************************************************************** - + """ live test with recording fails for this... why? @ServicePreparerEmbeddings() @recorded_by_proxy_async async def test_async_load_embeddings_client(self, **kwargs): @@ -30,6 +30,7 @@ async def test_async_load_embeddings_client(self, **kwargs): self._print_model_info_result(response1) self._validate_model_info_result(response1, "embedding") # TODO: This should be ModelType.EMBEDDINGS once the model is fixed await client.close() + """ @ServicePreparerEmbeddings() @recorded_by_proxy_async From d6012b795c3dda9d4ad9d66c9fd098c13d87fd38 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 22 May 2024 22:58:25 -0700 Subject: [PATCH 079/112] Minor samples and README.md changes --- sdk/ai/azure-ai-inference/README.md | 50 ++++++++++++++++--- sdk/ai/azure-ai-inference/samples/README.md | 2 +- ...mple_chat_completions_with_hyper_params.py | 2 + 3 files changed, 45 insertions(+), 9 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 069d419e5293..cb0924be473d 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -13,9 +13,9 @@ Use the model inference client library to: Note that for inference using OpenAI models hosted on Azure you should be using the [OpenAI Python client library](https://github.com/openai/openai-python) instead of this client. [Product documentation](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-api) -| [Samples](https://aka.ms/azsdk/model-client/samples/python) -| [API reference documentation](https://aka.ms/azsdk/azure-ai-inference/ref-docs/python) -| [Package (Pypi)](https://aka.ms/azsdk/azure-ai-inference/package/pypi) +| [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples) +| [API reference documentation](https://aka.ms/azsdk/azure-ai-inference/python/reference) +| [Package (Pypi)](https://aka.ms/azsdk/azure-ai-inference/python/package) | [SDK source code](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/azure/ai/inference) ## Getting started @@ -99,7 +99,6 @@ print(f"Created client of type `{type(client).__name__}`.") To load an asynchronous client, import the `load_client` function from `azure.ai.inference.aio` instead. - ### Getting AI model information All clients provide a `get_model_info` method to retrive AI model information. This makes a REST call to the `/info` route on the provided endpoint, as documented in [the REST API reference](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-info). @@ -113,23 +112,24 @@ print(f"Model name: {model_info.model_name}") print(f"Model provider name: {model_info.model_provider_name}") print(f"Model type: {model_info.model_type}") ``` + + + AI model information is cached in the client, and futher calls to `get_model_info` will access the cached value and wil not result in a REST API call. Note that if you created the client using `load_client` function, model information will already be cached in the client. AI model information is displayed (if available) when you `print(client)`. - - ### Chat Completions The `ChatCompletionsClient` has a method named `complete`. The method makes a REST API call to the `/chat/completions` route on the provided endpoint, as documented in [the REST API reference](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-chat-completions). -See simple chat completion examples below. More can be found in the [samples](https://github.com/Azure/azure-sdk-for-python/tree/azure-ai-inference/sdk/ai/azure-ai-inference/samples) folder. +See simple chat completion examples below. More can be found in the [samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples) folder. ### Text Embeddings The `EmbeddingsClient` has a method named `embedding`. The method makes a REST API call to the `/embeddings` route on the provided endpoint, as documented in [the REST API reference](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-embeddings). -See simple text embedding example below. More can be found in the [samples](https://github.com/Azure/azure-sdk-for-python/tree/azure-ai-inference/sdk/ai/azure-ai-inference/samples) folder. +See simple text embedding example below. More can be found in the [samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples) folder. +### Sending proprietary model parameters + +The REST API defines common model parameters for chat completions, text embeddings, etc. If the model you are targeting has additional parameters you would like to set, the client library allows you easily do so. See [Chat completions with additional model-specific parameters](#chat-completions-with-additional-model-specific-parameters). It similarly applies to other clients. + ## Examples In the following sections you will find simple examples of: * [Chat completions](#chat-completions-example) * [Streaming chat completions](#streaming-chat-completions-example) +* [Chat completions with additional model-specific parameters](#chat-completions-with-additional-model-specific-parameters) * [Text Embeddings](#text-embeddings-example) @@ -212,6 +217,35 @@ The printed result of course depends on the model, but you should see the answer To generate completions for additional messages, simply call `client.complete` multiple times using the same `client`. +### Chat completions with additional model-specific parameters + +In this example, additional JSON elements are inserted at the root of the request body by setting `hyper_params` when calling the `complete` method. + +Note that by default, the service will reject any request payload that includes unknown parameters (ones that are not defined in the REST API [Request Body table](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-chat-completions#request-body)). In order to change that behaviour, the request must include an additional HTTP header that described the intended behaviour with regards to unknown parameters. This is done by setting `unknown_params` to allow passing the unknown parameers to the AI model, or by ingnoring them (dropping them), and only passing the known parameters to the model. + +The settings `hyper_params` and `unknown_params` are suppored for all other clients as well. + + + +```python +response = client.complete( + messages=[ + SystemMessage(content="You are a helpful assistant."), + UserMessage(content="How many feet are in a mile?"), + ], + unknown_params=UnknownParams.ALLOW, # Optional. Supported values: "ALLOW", "IGNORE", "ERROR" (service default) + hyper_params={ # Optional. Additional parameters to pass to the model. + "key1": 1, + "key2": True, + "key3": "Some value", + "key4": [1, 2, 3], + "key5": {"key6": 2, "key7": False, "key8": "Some other value", "key9": [4, 5, 6, 7]}, + }, +) +``` + + + ### Text Embeddings example This example demonstrates how to get text embeddings. diff --git a/sdk/ai/azure-ai-inference/samples/README.md b/sdk/ai/azure-ai-inference/samples/README.md index 9bf7af522ebb..84eef7266575 100644 --- a/sdk/ai/azure-ai-inference/samples/README.md +++ b/sdk/ai/azure-ai-inference/samples/README.md @@ -51,7 +51,7 @@ These are runnable console Python scripts that show how to do chat completion, t |----------------|-------------| |[sample_chat_completions_streaming_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py) | One chat completion operation using an asynchronous client and streaming response. | |[sample_chat_completions_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py) | One chat completion operation using an asynchronous client. | -|[sample_load_client_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_load_client_async.py) | Shows how do use the function `load_async_client` to create the appropriate asynchronous client based on the provided endpoint URL. In this example, it creates an asynchronous `ChatCompletionsClient`. | +|[sample_load_client_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py) | Shows how do use the function `load_async_client` to create the appropriate asynchronous client based on the provided endpoint URL. In this example, it creates an asynchronous `ChatCompletionsClient`. | |[sample_chat_completions_from_input_bytes_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py) | One chat completion operation using a synchronous client, with input messages provided as `IO[bytes]`. | |[sample_chat_completions_from_input_json_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py) | One chat completion operation using a synchronous client, with input messages provided as `MutableMapping[str, Any]` | diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_hyper_params.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_hyper_params.py index 8d87d613dce7..4792be49c471 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_hyper_params.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_hyper_params.py @@ -40,6 +40,7 @@ def sample_chat_completions_with_hyper_params(): client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + # [START hyper_params] response = client.complete( messages=[ SystemMessage(content="You are a helpful assistant."), @@ -54,6 +55,7 @@ def sample_chat_completions_with_hyper_params(): "key5": {"key6": 2, "key7": False, "key8": "Some other value", "key9": [4, 5, 6, 7]}, }, ) + # [END chat_completions] print(response.choices[0].message.content) From 3f52d6fd78e3c70e323b79ef2dcbb8e730270f01 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 22 May 2024 23:56:27 -0700 Subject: [PATCH 080/112] Update names of streaming response classes --- .../azure/ai/inference/models/__init__.py | 8 +++---- .../azure/ai/inference/models/_models.py | 10 ++++---- .../azure/ai/inference/models/_patch.py | 24 +++++++++---------- ...sample_chat_completions_streaming_async.py | 2 +- .../tests/model_inference_test_base.py | 2 +- 5 files changed, 23 insertions(+), 23 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py index 3ad02b68ecca..685cc394667a 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py @@ -8,14 +8,14 @@ from ._models import AssistantMessage from ._models import ChatChoice -from ._models import ChatChoiceUpdate +from ._models import StreamingChatChoiceUpdate from ._models import ChatCompletions from ._models import ChatCompletionsFunctionToolCall from ._models import ChatCompletionsFunctionToolDefinition from ._models import ChatCompletionsNamedToolSelection from ._models import ChatCompletionsToolCall from ._models import ChatCompletionsToolDefinition -from ._models import ChatCompletionsUpdate +from ._models import StreamingChatCompletionsUpdate from ._models import ChatRequestMessage from ._models import ChatResponseMessage from ._models import CompletionsUsage @@ -47,7 +47,7 @@ "AssistantMessage", "CapacityType", "ChatChoice", - "ChatChoiceUpdate", + "StreamingChatChoiceUpdate", "ChatCompletions", "ChatCompletionsFunctionToolCall", "ChatCompletionsFunctionToolDefinition", @@ -56,7 +56,7 @@ "ChatCompletionsToolCall", "ChatCompletionsToolDefinition", "ChatCompletionsToolSelectionPreset", - "ChatCompletionsUpdate", + "StreamingChatCompletionsUpdate", "ChatRequestMessage", "ChatResponseMessage", "ChatRole", diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py index 50b9109f336e..a10584775238 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py @@ -153,7 +153,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, **kwargs) -class ChatChoiceUpdate(_model_base.Model): +class StreamingChatChoiceUpdate(_model_base.Model): """Represents an update to a single prompt completion when the service is streaming updates using Server Sent Events (SSE). Generally, ``n`` choices are generated per provided prompt with a default value of 1. @@ -425,7 +425,7 @@ class ChatCompletionsNamedToolSelection(_model_base.Model): """The object type. Required.""" -class ChatCompletionsUpdate(_model_base.Model): +class StreamingChatCompletionsUpdate(_model_base.Model): """Represents a response update to a chat completions request, when the service is streaming updates using Server Sent Events (SSE). @@ -450,7 +450,7 @@ class ChatCompletionsUpdate(_model_base.Model): completions response. Generally, ``n`` choices are generated per provided prompt with a default value of 1. Token limits and other settings may limit the number of choices generated. Required. - :vartype choices: list[~azure.ai.inference.models.ChatChoiceUpdate] + :vartype choices: list[~azure.ai.inference.models.StreamingChatChoiceUpdate] """ id: str = rest_field() @@ -463,7 +463,7 @@ class ChatCompletionsUpdate(_model_base.Model): usage: "_models.CompletionsUsage" = rest_field() """Usage information for tokens processed and generated as part of this completions operation. Required.""" - choices: List["_models.ChatChoiceUpdate"] = rest_field() + choices: List["_models.StreamingChatChoiceUpdate"] = rest_field() """An update to the collection of completion choices associated with this completions response. Generally, ``n`` choices are generated per provided prompt with a default value of 1. Token limits and other settings may limit the number of choices generated. Required.""" @@ -476,7 +476,7 @@ def __init__( created: datetime.datetime, model: str, usage: "_models.CompletionsUsage", - choices: List["_models.ChatChoiceUpdate"], + choices: List["_models.StreamingChatChoiceUpdate"], ): ... @overload diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index 4d2b41e6b894..9c7affb1d14d 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -22,27 +22,27 @@ class BaseStreamingChatCompletions: """A base class for the sync and async streaming chat completions responses, holding any common code to deserializes the Server Sent Events (SSE) response stream into chat completions updates, each one - represented by a ChatCompletionsUpdate object. + represented by a StreamingChatCompletionsUpdate object. """ # Enable detailed logs of SSE parsing. For development only, should be `False` by default. _ENABLE_CLASS_LOGS = False # The prefix of each line in the SSE stream that contains a JSON string - # to deserialize into a ChatCompletionsUpdate object + # to deserialize into a StreamingChatCompletionsUpdate object _SSE_DATA_EVENT_PREFIX = "data: " # The line indicating the end of the SSE stream _SSE_DATA_EVENT_DONE = "data: [DONE]" def __init__(self): - self._queue: "queue.Queue[_models.ChatCompletionsUpdate]" = queue.Queue() + self._queue: "queue.Queue[_models.StreamingChatCompletionsUpdate]" = queue.Queue() self._incomplete_json = "" self._done = False # Will be set to True when reading 'data: [DONE]' line def _deserialize_and_add_to_queue(self, element: bytes) -> bool: - # Clear the queue of ChatCompletionsUpdate before processing the next block + # Clear the queue of StreamingChatCompletionsUpdate before processing the next block self._queue.queue.clear() # Convert `bytes` to string and split the string by newline, while keeping the new line char. @@ -76,11 +76,11 @@ def _deserialize_and_add_to_queue(self, element: bytes) -> bool: return True # If you reached here, the line should contain `data: {...}\n` - # where the curly braces contain a valid JSON object. Deserialize it into a ChatCompletionsUpdate object + # where the curly braces contain a valid JSON object. Deserialize it into a StreamingChatCompletionsUpdate object # and add it to the queue. self._queue.put( # pylint: disable=W0212 # Access to a protected member _deserialize of a client class - _models.ChatCompletionsUpdate._deserialize(json.loads(line[len(self._SSE_DATA_EVENT_PREFIX) : -1]), []) + _models.StreamingChatCompletionsUpdate._deserialize(json.loads(line[len(self._SSE_DATA_EVENT_PREFIX) : -1]), []) ) if self._ENABLE_CLASS_LOGS: @@ -90,9 +90,9 @@ def _deserialize_and_add_to_queue(self, element: bytes) -> bool: class StreamingChatCompletions(BaseStreamingChatCompletions): - """Represents an interator over ChatCompletionsUpdate objects. It can be used for either synchronous or + """Represents an interator over StreamingChatCompletionsUpdate objects. It can be used for either synchronous or asynchronous iterations. The class deserializes the Server Sent Events (SSE) response stream - into chat completions updates, each one represented by a ChatCompletionsUpdate object. + into chat completions updates, each one represented by a StreamingChatCompletionsUpdate object. """ def __init__(self, response: HttpResponse): @@ -103,7 +103,7 @@ def __init__(self, response: HttpResponse): def __iter__(self): return self - def __next__(self) -> _models.ChatCompletionsUpdate: + def __next__(self) -> _models.StreamingChatCompletionsUpdate: while self._queue.empty() and not self._done: self._done = self._read_next_block() if self._queue.empty(): @@ -132,9 +132,9 @@ def close(self) -> None: class AsyncStreamingChatCompletions(BaseStreamingChatCompletions): - """Represents an async interator over ChatCompletionsUpdate objects. It can be used for either synchronous or + """Represents an async interator over StreamingChatCompletionsUpdate objects. It can be used for either synchronous or asynchronous iterations. The class deserializes the Server Sent Events (SSE) response stream - into chat completions updates, each one represented by a ChatCompletionsUpdate object. + into chat completions updates, each one represented by a StreamingChatCompletionsUpdate object. """ def __init__(self, response: AsyncHttpResponse): @@ -145,7 +145,7 @@ def __init__(self, response: AsyncHttpResponse): def __aiter__(self): return self - async def __anext__(self) -> _models.ChatCompletionsUpdate: + async def __anext__(self) -> _models.StreamingChatCompletionsUpdate: while self._queue.empty() and not self._done: self._done = await self._read_next_block_async() if self._queue.empty(): diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py index 22ba49e024d8..2b71226ad17c 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py @@ -23,7 +23,7 @@ async def sample_chat_completions_streaming_async(): import os from azure.ai.inference.aio import ChatCompletionsClient - from azure.ai.inference.models import SystemMessage, UserMessage, ChatCompletionsUpdate + from azure.ai.inference.models import SystemMessage, UserMessage, StreamingChatCompletionsUpdate from azure.core.credentials import AzureKeyCredential # Read the values of your model endpoint and key from environment variables diff --git a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py index dffc81aa20cb..58a9e0990fd9 100644 --- a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py +++ b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py @@ -174,7 +174,7 @@ def _validate_chat_completions_tool_result(response: sdk.models.ChatCompletions) assert response.usage.total_tokens == response.usage.prompt_tokens + response.usage.completion_tokens @staticmethod - def _validate_chat_completions_update(update: sdk.models.ChatCompletionsUpdate, first: bool) -> str: + def _validate_chat_completions_update(update: sdk.models.StreamingChatCompletionsUpdate, first: bool) -> str: if first: # Why is 'content','created' and 'object' missing in the first update? assert update.choices[0].delta.role == sdk.models.ChatRole.ASSISTANT From 62c5d8a1fb2a108b602c78f8b4ef0521657c3a65 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 23 May 2024 16:16:32 -0700 Subject: [PATCH 081/112] Update Entra ID sample, document Entra ID in README, use ttps://ml.azure.com/.default for auth, document additional request headers --- sdk/ai/azure-ai-inference/README.md | 32 +++++++++--- .../azure/ai/inference/_configuration.py | 2 +- .../azure/ai/inference/_patch.py | 50 ++++++++++++++----- .../azure/ai/inference/aio/_patch.py | 48 +++++++++++++----- sdk/ai/azure-ai-inference/samples/README.md | 1 + ..._chat_completions_from_input_json_async.py | 2 +- ...mpletions_streaming_with_entra_id_auth.py} | 42 ++++++++++------ .../samples/sample_get_model_info.py | 2 +- 8 files changed, 130 insertions(+), 49 deletions(-) rename sdk/ai/azure-ai-inference/samples/{sample_chat_completions_with_entra_id_auth.py => sample_chat_completions_streaming_with_entra_id_auth.py} (51%) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index cb0924be473d..9b2d2b5a69e2 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -24,11 +24,9 @@ Note that for inference using OpenAI models hosted on Azure you should be using * [Python 3.8](https://www.python.org/) or later installed, including [pip](https://pip.pypa.io/en/stable/). * An [Azure subscription](https://azure.microsoft.com/free). -* An [AI Model from the catalog](https://ai.azure.com/explore/models) deployed through Azure AI Studio. To construct the client library, you will need to pass in the endpoint URL and key associated with your deployed AI model. - - * The endpoint URL has the form `https://your-deployment-name.your-azure-region.inference.ai.azure.com`, where `your-deployment-name` is your unique model deployment name and `your-azure-region` is the Azure region where the model is deployed (e.g. `eastus2`). - - * The key is a 32-character string. +* An [AI Model from the catalog](https://ai.azure.com/explore/models) deployed through Azure AI Studio. +* To construct the client library, you will need to pass in the endpoint URL. The endpoint URL has the form `https://your-deployment-name.your-azure-region.inference.ai.azure.com`, where `your-deployment-name` is your unique model deployment name and `your-azure-region` is the Azure region where the model is deployed (e.g. `eastus2`). +* Depending on your model deployment, you either need a key to authenticate against the service, or Entra ID credentials. The key is a 32-character string. ### Install the package @@ -46,7 +44,7 @@ pip install --upgrade azure-ai-inferencing ## Key concepts -### Create and authenticate a client directly +### Create and authenticate a client directly, using key The package includes two clients `ChatCompletionsClient` and `EmbeddingsClient` . Both can be created in the similar manner. For example, assuming `endpoint` and `key` are strings holding your endpoint URL and key, this Python code will create and authenticate a synchronous `ChatCompletionsClient`: @@ -81,6 +79,26 @@ client = ChatCompletionsClient( ) ``` +### Create and authenticate a client directly, using Entra ID + +To use an Entra ID token credential, firs install the [azure-identity](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity) package: + +```python +pip install azure.identity +``` + +You will need to provide the desired credential type obtained from that package. A common selection is [DefaultAzureCredential](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#defaultazurecredential) and it can be used as follows: + +```python +from azure.ai.inference import ChatCompletionsClient +from azure.identity import DefaultAzureCredential + +client = ChatCompletionsClient( + endpoint=endpoint, + credential=DefaultAzureCredential() +) +``` + ### Create and authentice clients using `load_client` As an alternative to creating a specific client directly, you can use the function `load_client` to return the relevant client (of types `ChatCompletionsClient` or `EmbeddingsClient`) based on the provided endpoint: @@ -99,6 +117,8 @@ print(f"Created client of type `{type(client).__name__}`.") To load an asynchronous client, import the `load_client` function from `azure.ai.inference.aio` instead. +Entra ID authentication is also supported by the `load_client` function. Replace the key authentication above with `credential=DefaultAzureCredential()` for example. + ### Getting AI model information All clients provide a `get_model_info` method to retrive AI model information. This makes a REST call to the `/info` route on the provided endpoint, as documented in [the REST API reference](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-info). diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py index 2aab1cca6b60..403b272dee2c 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py @@ -47,7 +47,7 @@ def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCr self.endpoint = endpoint self.credential = credential self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) + self.credential_scopes = kwargs.pop("credential_scopes", ["https://ml.azure.com/.default"]) kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index dcf1cb98dfc7..9d952c6f8cbf 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -126,13 +126,15 @@ def complete( :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -223,11 +225,13 @@ def complete( :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to self-hosted endpoints (previously known as Model-as-a-Platform (MaaP) or "real-time endpoints"). + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -259,11 +263,13 @@ def complete( :type body: IO[bytes] :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -315,11 +321,13 @@ def complete( :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword hyper_params: Additional, model-specific parameters that are not in the @@ -509,11 +517,13 @@ def embedding( :paramtype input: list[str] :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -563,11 +573,13 @@ def embedding( :type body: JSON :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -594,11 +606,13 @@ def embedding( :type body: IO[bytes] :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -639,11 +653,13 @@ def embedding( :paramtype input: list[str] :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should @@ -780,11 +796,13 @@ def embedding( :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -834,11 +852,13 @@ def embedding( :type body: JSON :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -865,11 +885,13 @@ def embedding( :type body: IO[bytes] :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -910,11 +932,13 @@ def embedding( :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index 9d53d59d73e3..f7c648b7ca15 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -117,6 +117,7 @@ async def complete( :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str @@ -136,7 +137,8 @@ async def complete( HTTP request header. Default value is None. :paramtype extras: dict[str, str] :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword frequency_penalty: A value that influences the probability of generated tokens @@ -220,11 +222,13 @@ async def complete( :type body: JSON :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -256,11 +260,13 @@ async def complete( :type body: IO[bytes] :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -313,6 +319,7 @@ async def complete( :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str @@ -329,7 +336,8 @@ async def complete( HTTP request header. Default value is None. :paramtype extras: dict[str, str] :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword frequency_penalty: A value that influences the probability of generated tokens @@ -515,11 +523,13 @@ async def embedding( :paramtype input: list[str] :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -569,11 +579,13 @@ async def embedding( :type body: JSON :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -600,11 +612,13 @@ async def embedding( :type body: IO[bytes] :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -645,11 +659,13 @@ async def embedding( :paramtype input: list[str] :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should @@ -787,11 +803,13 @@ async def embedding( :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -841,11 +859,13 @@ async def embedding( :type body: JSON :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -872,11 +892,13 @@ async def embedding( :type body: IO[bytes] :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -917,11 +939,13 @@ async def embedding( :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] :keyword model_deployment: Name of the deployment to which you would like to route the request. Relevant only to Model-as-a-Platform (MaaP) deployments. + This sets the HTTP request header `azureml-model-deployment`. Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. This sets the HTTP request header `unknown-parameters`. + Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should diff --git a/sdk/ai/azure-ai-inference/samples/README.md b/sdk/ai/azure-ai-inference/samples/README.md index 84eef7266575..eaf8d699bee0 100644 --- a/sdk/ai/azure-ai-inference/samples/README.md +++ b/sdk/ai/azure-ai-inference/samples/README.md @@ -19,6 +19,7 @@ These are runnable console Python scripts that show how to do chat completion, t |**File Name**|**Description**| |----------------|-------------| |[sample_chat_completions_streaming.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py) | One chat completion operation using a synchronous client and streaming response. | +|[sample_chat_completions_streaming_with_entra_id_auth.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py) | One chat completion operation using a synchronous client and streaming response, using Entra ID authentication. This sample also shows setting the `model_deployment` parameter, often required for Selfhosted Endpoints. | |[sample_chat_completions.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py) | One chat completion operation using a synchronous client. | |[sample_chat_completions_with_history.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py) | Two chat completion operations using a synchronous client, which the second completion using chat history from the first. | |[sample_chat_completions_from_input_bytes.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py) | One chat completion operation using a synchronous client, with input messages provided as `IO[bytes]`. | diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py index e24ed54d2a60..c9ebda426c80 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py @@ -9,7 +9,7 @@ JSON request body (containing input chat messages). USAGE: - python sample_chat_completions_from_input_json_async + python sample_chat_completions_from_input_json_async.py Set these two environment variables before running the sample: 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_entra_id_auth.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py similarity index 51% rename from sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_entra_id_auth.py rename to sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py index b5b6c24d224a..d8e43610a3bc 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_entra_id_auth.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py @@ -4,11 +4,15 @@ # ------------------------------------ """ DESCRIPTION: - This sample demonstrates how to get a chat completions response from - the service using a synchronous client, with an Entra ID authentication. - + This sample demonstrates how to do chat completions with streaming, + using a synchronous client, with an Entra ID authentication. + It also shows how to set the model deployment name, which is supported + by Selfhosted Endpoints (aka "model as a platform" (MaaP) or "real-time endpoints"). + Model deployment name gets sent to the service as HTTP request + header `azureml-model-deployment`, and is optional. + USAGE: - python sample_chat_completions_with_entra_id_auth + python sample_chat_completions_streaming_with_entra_id_auth.py Set these two environment variables before running the sample: 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form @@ -19,35 +23,43 @@ """ -def sample_chat_completions_with_entra_id_auth(): +def sample_chat_completions_streaming_with_entra_id_auth(): import os try: endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] - key = os.environ["CHAT_COMPLETIONS_KEY"] except KeyError: - print("Missing environment variable 'CHAT_COMPLETIONS_ENDPOINT' or 'CHAT_COMPLETIONS_KEY'") - print("Set them before running this sample.") + print("Missing environment variable 'CHAT_COMPLETIONS_ENDPOINT'") + print("Set it. before running this sample.") exit() + try: + model_deployment = os.environ["CHAT_COMPLETIONS_DEPLOYMENT_NAME"] + except KeyError: + print("Could not read optional environment variable `CHAT_COMPLETIONS_DEPLOYMENT_NAME`.") + print("`model_deployment` will not be set.") + model_deployment = None + from azure.ai.inference import ChatCompletionsClient from azure.ai.inference.models import SystemMessage, UserMessage from azure.identity import DefaultAzureCredential # See https://learn.microsoft.com/python/api/overview/azure/identity-readme#defaultazurecredential - default_azure_credential = DefaultAzureCredential() - - client = ChatCompletionsClient(endpoint=endpoint, credential=default_azure_credential) + client = ChatCompletionsClient(endpoint=endpoint, credential=DefaultAzureCredential()) response = client.complete( + model_deployment=model_deployment, messages=[ SystemMessage(content="You are a helpful assistant."), - UserMessage(content="How many feet are in a mile?"), - ] + UserMessage(content="Give me 5 good reasons why I should exercise every day."), + ], + stream=True ) - print(response.choices[0].message.content) + for update in response: + print(update.choices[0].delta.content or "", end="") + client.close() if __name__ == "__main__": - sample_chat_completions_with_entra_id_auth() + sample_chat_completions_streaming_with_entra_id_auth() diff --git a/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py b/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py index c5393fab914c..02f95ae7502a 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py +++ b/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py @@ -9,7 +9,7 @@ clients. USAGE: - python sample_get_model_info + python sample_get_model_info.py Set these two environment variables before running the sample: 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form From 8a3a1673107e5fc8daa14b5d998ddbd247302770 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 24 May 2024 16:58:03 -0700 Subject: [PATCH 082/112] use model_extras instead of hyper_params. Update client __str__ to not mention internal variable name --- sdk/ai/azure-ai-inference/README.md | 8 +-- sdk/ai/azure-ai-inference/assets.json | 2 +- .../azure/ai/inference/_patch.py | 63 ++++++++++--------- .../azure/ai/inference/aio/_patch.py | 54 ++++++++-------- sdk/ai/azure-ai-inference/samples/README.md | 2 +- ...ple_chat_completions_with_model_extras.py} | 16 ++--- sdk/ai/azure-ai-inference/tests/README.md | 2 +- .../test_model_inference_async_client.py | 4 +- .../tests/test_model_inference_client.py | 6 +- 9 files changed, 79 insertions(+), 78 deletions(-) rename sdk/ai/azure-ai-inference/samples/{sample_chat_completions_with_hyper_params.py => sample_chat_completions_with_model_extras.py} (83%) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 9b2d2b5a69e2..62ab40643c3f 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -239,13 +239,13 @@ To generate completions for additional messages, simply call `client.complete` m ### Chat completions with additional model-specific parameters -In this example, additional JSON elements are inserted at the root of the request body by setting `hyper_params` when calling the `complete` method. +In this example, extra JSON elements are inserted at the root of the request body by setting `model_extras` when calling the `complete` method. These are indended for AI models that require extra parameters beyond what is defined in the REST API. Note that by default, the service will reject any request payload that includes unknown parameters (ones that are not defined in the REST API [Request Body table](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-chat-completions#request-body)). In order to change that behaviour, the request must include an additional HTTP header that described the intended behaviour with regards to unknown parameters. This is done by setting `unknown_params` to allow passing the unknown parameers to the AI model, or by ingnoring them (dropping them), and only passing the known parameters to the model. -The settings `hyper_params` and `unknown_params` are suppored for all other clients as well. +The settings `model_extras` and `unknown_params` are suppored for all other clients as well. - + ```python response = client.complete( @@ -254,7 +254,7 @@ response = client.complete( UserMessage(content="How many feet are in a mile?"), ], unknown_params=UnknownParams.ALLOW, # Optional. Supported values: "ALLOW", "IGNORE", "ERROR" (service default) - hyper_params={ # Optional. Additional parameters to pass to the model. + model_extras={ # Optional. Additional parameters to pass to the model. "key1": 1, "key2": True, "key3": "Some value", diff --git a/sdk/ai/azure-ai-inference/assets.json b/sdk/ai/azure-ai-inference/assets.json index 2b675c4979f7..e9d32a1e01b9 100644 --- a/sdk/ai/azure-ai-inference/assets.json +++ b/sdk/ai/azure-ai-inference/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "python", "TagPrefix": "python/ai/azure-ai-inference", - "Tag": "python/ai/azure-ai-inference_1f7396c982" + "Tag": "python/ai/azure-ai-inference_4a3ee6b285" } diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index 9d952c6f8cbf..d38799db720b 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -6,10 +6,11 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize -What do we patch auto-generated code? -1. Add support for input argument `hyper_params` (all clients) +Why do we patch auto-generated code? +1. Add support for input argument `model_extras` (all clients) 2. Add support for function load_client -3. Add support for chat completion streaming +3. Add support for get_model_info, while caching the result (all clients) +4. Add support for chat completion streaming (ChatCompletionsClient client only) """ import json import logging @@ -95,7 +96,7 @@ def complete( model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", - hyper_params: Optional[Dict[str, Any]] = None, + model_extras: Optional[Dict[str, Any]] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None, temperature: Optional[float] = None, @@ -138,11 +139,11 @@ def complete( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :keyword hyper_params: Additional, model-specific parameters that are not in the + :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. - :paramtype hyper_params: dict[str, Any] + :paramtype model_extras: dict[str, Any] :keyword frequency_penalty: A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. @@ -288,7 +289,7 @@ def complete( messages: List[_models.ChatRequestMessage] = _Unset, model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, - hyper_params: Optional[Dict[str, Any]] = None, + model_extras: Optional[Dict[str, Any]] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None, temperature: Optional[float] = None, @@ -330,11 +331,11 @@ def complete( Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams - :keyword hyper_params: Additional, model-specific parameters that are not in the + :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. - :paramtype hyper_params: dict[str, Any] + :paramtype model_extras: dict[str, Any] :keyword frequency_penalty: A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. @@ -424,8 +425,8 @@ def complete( "tools": tools, "top_p": top_p, } - if hyper_params is not None: - body.update(hyper_params) + if model_extras is not None: + body.update(model_extras) body = {k: v for k, v in body.items() if v is not None} elif isinstance(body, dict) and "stream" in body and isinstance(body["stream"], bool): stream = body["stream"] @@ -485,7 +486,7 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: return self._model_info def __str__(self) -> str: - return super().__str__() + f"\n_model_info={self._model_info}" + return super().__str__() + f"\n{self._model_info}" class EmbeddingsClient(EmbeddingsClientGenerated): @@ -494,7 +495,7 @@ class EmbeddingsClient(EmbeddingsClientGenerated): def embedding( self, *, - hyper_params: Optional[Dict[str, Any]] = None, + model_extras: Optional[Dict[str, Any]] = None, input: List[str], model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, @@ -506,11 +507,11 @@ def embedding( ) -> _models.EmbeddingsResult: """Return the embeddings for a given text prompt. - :keyword hyper_params: Additional, model-specific parameters that are not in the + :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. - :paramtype hyper_params: dict[str, Any] + :paramtype model_extras: dict[str, Any] :keyword input: Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Required. @@ -628,7 +629,7 @@ def embedding( self, body: Union[JSON, IO[bytes]] = _Unset, *, - hyper_params: Optional[Dict[str, Any]] = None, + model_extras: Optional[Dict[str, Any]] = None, input: List[str] = _Unset, model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, @@ -642,11 +643,11 @@ def embedding( :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword hyper_params: Additional, model-specific parameters that are not in the + :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. - :paramtype hyper_params: dict[str, Any] + :paramtype model_extras: dict[str, Any] :keyword input: Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Required. @@ -704,8 +705,8 @@ def embedding( "input": input, "input_type": input_type, } - if hyper_params is not None: - body.update(hyper_params) + if model_extras is not None: + body.update(model_extras) body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -765,15 +766,15 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: return self._model_info def __str__(self) -> str: - return super().__str__() + f"\n_model_info={self._model_info}" - + return super().__str__() + f"\n{self._model_info}" + class ImageEmbeddingsClient(ImageEmbeddingsClientGenerated): @overload def embedding( self, *, - hyper_params: Optional[Dict[str, Any]] = None, + model_extras: Optional[Dict[str, Any]] = None, input: List[_models.EmbeddingInput], model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, @@ -785,11 +786,11 @@ def embedding( ) -> _models.EmbeddingsResult: """Return the embeddings for given images. - :keyword hyper_params: Additional, model-specific parameters that are not in the + :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. - :paramtype hyper_params: dict[str, Any] + :paramtype model_extras: dict[str, Any] :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an array. The input must not exceed the max input tokens for the model. Required. @@ -907,7 +908,7 @@ def embedding( self, body: Union[JSON, IO[bytes]] = _Unset, *, - hyper_params: Optional[Dict[str, Any]] = None, + model_extras: Optional[Dict[str, Any]] = None, input: List[_models.EmbeddingInput] = _Unset, model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, @@ -921,11 +922,11 @@ def embedding( :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword hyper_params: Additional, model-specific parameters that are not in the + :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. - :paramtype hyper_params: dict[str, Any] + :paramtype model_extras: dict[str, Any] :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an array. The input must not exceed the max input tokens for the model. Required. @@ -983,8 +984,8 @@ def embedding( "input": input, "input_type": input_type, } - if hyper_params is not None: - body.update(hyper_params) + if model_extras is not None: + body.update(model_extras) body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -1043,7 +1044,7 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: return self._model_info def __str__(self) -> str: - return super().__str__() + f"\n_model_info={self._model_info}" + return super().__str__() + f"\n{self._model_info}" __all__: List[str] = [ diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index f7c648b7ca15..33d043898b8b 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -85,7 +85,7 @@ async def complete( model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", - hyper_params: Optional[Dict[str, Any]] = None, + model_extras: Optional[Dict[str, Any]] = None, extras: Optional[Dict[str, str]] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None, @@ -124,11 +124,11 @@ async def complete( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :keyword hyper_params: Additional, model-specific parameters that are not in the + :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. - :paramtype hyper_params: dict[str, Any] + :paramtype model_extras: dict[str, Any] :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the standard request payload. They will be passed to the service as-is in the root of the JSON request payload. @@ -285,7 +285,7 @@ async def complete( messages: List[_models.ChatRequestMessage] = _Unset, model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, - hyper_params: Optional[Dict[str, Any]] = None, + model_extras: Optional[Dict[str, Any]] = None, extras: Optional[Dict[str, str]] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None, @@ -323,11 +323,11 @@ async def complete( Typically used when you want to target a test environment instead of production environment. Default value is None. :paramtype model_deployment: str - :keyword hyper_params: Additional, model-specific parameters that are not in the + :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. - :paramtype hyper_params: dict[str, Any] + :paramtype model_extras: dict[str, Any] :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the standard request payload. They will be passed to the service as-is in the root of the JSON request payload. @@ -430,8 +430,8 @@ async def complete( "tools": tools, "top_p": top_p, } - if hyper_params is not None: - body.update(hyper_params) + if model_extras is not None: + body.update(model_extras) body = {k: v for k, v in body.items() if v is not None} elif isinstance(body, dict) and "stream" in body and isinstance(body["stream"], bool): stream = body["stream"] @@ -491,7 +491,7 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: return self._model_info def __str__(self) -> str: - return super().__str__() + f"\n_model_info={self._model_info}" + return super().__str__() + f"\n{self._model_info}" class EmbeddingsClient(EmbeddingsClientGenerated): @@ -500,7 +500,7 @@ class EmbeddingsClient(EmbeddingsClientGenerated): async def embedding( self, *, - hyper_params: Optional[Dict[str, Any]] = None, + model_extras: Optional[Dict[str, Any]] = None, input: List[str], model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, @@ -512,11 +512,11 @@ async def embedding( ) -> _models.EmbeddingsResult: """Return the embeddings for a given text prompt. - :keyword hyper_params: Additional, model-specific parameters that are not in the + :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. - :paramtype hyper_params: dict[str, Any] + :paramtype model_extras: dict[str, Any] :keyword input: Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Required. @@ -634,7 +634,7 @@ async def embedding( self, body: Union[JSON, IO[bytes]] = _Unset, *, - hyper_params: Optional[Dict[str, Any]] = None, + model_extras: Optional[Dict[str, Any]] = None, input: List[str] = _Unset, model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, @@ -648,11 +648,11 @@ async def embedding( :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword hyper_params: Additional, model-specific parameters that are not in the + :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. - :paramtype hyper_params: dict[str, Any] + :paramtype model_extras: dict[str, Any] :keyword input: Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Required. @@ -710,8 +710,8 @@ async def embedding( "input": input, "input_type": input_type, } - if hyper_params is not None: - body.update(hyper_params) + if model_extras is not None: + body.update(model_extras) body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -771,7 +771,7 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: return self._model_info def __str__(self) -> str: - return super().__str__() + f"\n_model_info={self._model_info}" + return super().__str__() + f"\n{self._model_info}" class ImageEmbeddingsClient(ImageEmbeddingsClientGenerated): @@ -780,7 +780,7 @@ class ImageEmbeddingsClient(ImageEmbeddingsClientGenerated): async def embedding( self, *, - hyper_params: Optional[Dict[str, Any]] = None, + model_extras: Optional[Dict[str, Any]] = None, input: List[_models.EmbeddingInput], model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, @@ -792,11 +792,11 @@ async def embedding( ) -> _models.EmbeddingsResult: """Return the embeddings for given images. - :keyword hyper_params: Additional, model-specific parameters that are not in the + :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. - :paramtype hyper_params: dict[str, Any] + :paramtype model_extras: dict[str, Any] :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an array. The input must not exceed the max input tokens for the model. Required. @@ -914,7 +914,7 @@ async def embedding( self, body: Union[JSON, IO[bytes]] = _Unset, *, - hyper_params: Optional[Dict[str, Any]] = None, + model_extras: Optional[Dict[str, Any]] = None, input: List[_models.EmbeddingInput] = _Unset, model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, @@ -928,11 +928,11 @@ async def embedding( :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword hyper_params: Additional, model-specific parameters that are not in the + :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. How the service handles these hyper parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. - :paramtype hyper_params: dict[str, Any] + :paramtype model_extras: dict[str, Any] :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an array. The input must not exceed the max input tokens for the model. Required. @@ -990,8 +990,8 @@ async def embedding( "input": input, "input_type": input_type, } - if hyper_params is not None: - body.update(hyper_params) + if model_extras is not None: + body.update(model_extras) body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -1051,7 +1051,7 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: return self._model_info def __str__(self) -> str: - return super().__str__() + f"\n_model_info={self._model_info}" + return super().__str__() + f"\n{self._model_info}" __all__: List[str] = [ diff --git a/sdk/ai/azure-ai-inference/samples/README.md b/sdk/ai/azure-ai-inference/samples/README.md index eaf8d699bee0..c2112160052a 100644 --- a/sdk/ai/azure-ai-inference/samples/README.md +++ b/sdk/ai/azure-ai-inference/samples/README.md @@ -27,7 +27,7 @@ These are runnable console Python scripts that show how to do chat completion, t |[sample_chat_completions_with_tools.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py) | Shows how do use a tool (function) in chat completions, for an AI model that supports tools | |[sample_load_client.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_load_client.py) | Shows how do use the function `load_client` to create the appropriate synchronous client based on the provided endpoint URL. In this example, it creates a synchronous `ChatCompletionsClient`. | |[sample_get_model_info.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py) | Get AI model information using the chat completions client. Similarly can be done with all other clients. | -|[sample_chat_completions_with_hyper_params.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_hyper_params.py) | Chat completions with additional model-specific parameters. | +|[sample_chat_completions_with_model_extras.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py) | Chat completions with additional model-specific parameters. | ### Text embeddings diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_hyper_params.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py similarity index 83% rename from sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_hyper_params.py rename to sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py index 4792be49c471..7428be24e362 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_hyper_params.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py @@ -7,12 +7,12 @@ This sample demonstrates how to get a chat completions response from the service using a synchronous client, while supplying additional model-specific parameters as part of the request. - See setting of an optional `unknown-parameters` request header via the - `headers_policy` in the client constructor. - See setting of `hyper_params` in the `complete` method. + See setting of the optional `model_extras` in the `complete` method. + Also see related setting of the optional `unknown-parameters` + parameter in the `complete` method. USAGE: - python sample_chat_completions_with_hyper_params.py + python sample_chat_completions_with_model_extras.py Set these two environment variables before running the sample: 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form @@ -23,7 +23,7 @@ """ -def sample_chat_completions_with_hyper_params(): +def sample_chat_completions_with_model_extras(): import os try: @@ -40,14 +40,14 @@ def sample_chat_completions_with_hyper_params(): client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - # [START hyper_params] + # [START model_extras] response = client.complete( messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), ], unknown_params=UnknownParams.ALLOW, # Optional. Supported values: "ALLOW", "IGNORE", "ERROR" (service default) - hyper_params={ # Optional. Additional parameters to pass to the model. + model_extras={ # Optional. Additional parameters to pass to the model. "key1": 1, "key2": True, "key3": "Some value", @@ -61,4 +61,4 @@ def sample_chat_completions_with_hyper_params(): if __name__ == "__main__": - sample_chat_completions_with_hyper_params() + sample_chat_completions_with_model_extras() diff --git a/sdk/ai/azure-ai-inference/tests/README.md b/sdk/ai/azure-ai-inference/tests/README.md index c3687487391a..712d5411c35e 100644 --- a/sdk/ai/azure-ai-inference/tests/README.md +++ b/sdk/ai/azure-ai-inference/tests/README.md @@ -8,7 +8,7 @@ The live tests were written against the AI models mentioned below. You will need - `Mistral-Large` for chat completion tests - `Cohere-embed-v3-english` for embedding tests -- `TBD` for image generation tests + ## Setup diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py index 07220c6370e2..02f6cad3d1a9 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -110,12 +110,12 @@ async def test_async_chat_completions_multi_turn(self, **kwargs): @ServicePreparerChatCompletions() @recorded_by_proxy_async - async def test_async_chat_completions_with_hyper_params(self, **kwargs): + async def test_async_chat_completions_with_model_extras(self, **kwargs): client = self._create_async_chat_client(**kwargs) response = await client.complete( messages=[sdk.models.UserMessage(content="How many feet are in a mile?")], unknown_params=sdk.models.UnknownParams.IGNORE, - hyper_params={ + model_extras={ "key1": 1, "key2": True, "key3": "Some value", diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py index 66ff16385017..8c22de6878fe 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -105,17 +105,17 @@ def test_chat_completions_multi_turn(self, **kwargs): messages.append(sdk.models.UserMessage(content="and how many yards?")) response = client.complete(messages=messages) self._print_chat_completions_result(response) - self._validate_chat_completions_result(response, ["1760", "1,760"]) + self._validate_chat_completions_result(response, ["1760", "1,760"]) client.close() @ServicePreparerChatCompletions() @recorded_by_proxy - def test_chat_completions_with_hyper_params(self, **kwargs): + def test_chat_completions_with_model_extras(self, **kwargs): client = self._create_chat_client(**kwargs) response = client.complete( messages=[sdk.models.UserMessage(content="How many feet are in a mile?")], unknown_params=sdk.models.UnknownParams.IGNORE, - hyper_params={ + model_extras={ "key1": 1, "key2": True, "key3": "Some value", From 092f91b3c530bb70c24ce40154740fda960b285b Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 28 May 2024 08:59:08 -0700 Subject: [PATCH 083/112] Fix all pylint errors. Minor updates to root README.md --- sdk/ai/azure-ai-inference/README.md | 6 ++--- .../ai/inference/_operations/_operations.py | 1 + .../azure/ai/inference/_patch.py | 26 ++++++++++--------- .../inference/aio/_operations/_operations.py | 1 + .../azure/ai/inference/aio/_patch.py | 18 +++++++------ .../azure/ai/inference/models/__init__.py | 2 +- .../azure/ai/inference/models/_patch.py | 14 ++++++---- 7 files changed, 39 insertions(+), 29 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 62ab40643c3f..3dc55cd72301 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -46,7 +46,7 @@ pip install --upgrade azure-ai-inferencing ### Create and authenticate a client directly, using key -The package includes two clients `ChatCompletionsClient` and `EmbeddingsClient` . Both can be created in the similar manner. For example, assuming `endpoint` and `key` are strings holding your endpoint URL and key, this Python code will create and authenticate a synchronous `ChatCompletionsClient`: +The package includes two clients `ChatCompletionsClient` and `EmbeddingsClient`. Both can be created in the similar manner. For example, assuming `endpoint` and `key` are strings holding your endpoint URL and key, this Python code will create and authenticate a synchronous `ChatCompletionsClient`: ```python from azure.ai.inference import ChatCompletionsClient @@ -233,7 +233,7 @@ client.close() -The printed result of course depends on the model, but you should see the answer progressively get longer as updates get streamed to the client. +In the above `for` loop that prints the results you should see the answer progressively get longer as updates get streamed to the client. To generate completions for additional messages, simply call `client.complete` multiple times using the same `client`. @@ -290,7 +290,7 @@ for item in response.data: -The printed result of course depends on the model, but you should see something like this: +The length of the embedding vector depends on the model, but you should see something like this: ```txt data[0]: length=1024, [0.0013399124, -0.01576233, ..., 0.007843018, 0.000238657] diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index eeea19518f6e..8931cb55ecb7 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -262,6 +262,7 @@ def _complete( **kwargs: Any ) -> _models.ChatCompletions: # pylint: disable=line-too-long + # pylint: disable=too-many-locals """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or "completes" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index d38799db720b..260980de4965 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -2,6 +2,7 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ +# pylint: disable=too-many-lines) """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize @@ -37,9 +38,6 @@ build_chat_completions_complete_request, build_embeddings_embedding_request, build_image_embeddings_embedding_request, - build_chat_completions_get_model_info_request, - build_embeddings_get_model_info_request, - build_image_embeddings_get_model_info_request ) from ._client import ChatCompletionsClient as ChatCompletionsClientGenerated from ._client import EmbeddingsClient as EmbeddingsClientGenerated @@ -61,7 +59,7 @@ def load_client( endpoint: str, credential: AzureKeyCredential, **kwargs: Any ) -> Union[ChatCompletionsClientGenerated, EmbeddingsClientGenerated, ImageEmbeddingsClientGenerated]: - + client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... model_info = client.get_model_info() client.close() @@ -203,7 +201,7 @@ def complete( :rtype: ~azure.ai.inference.models.ChatCompletions or ~azure.ai.inference.models.StreamingChatCompletions :raises ~azure.core.exceptions.HttpResponseError: """ - + @overload def complete( self, @@ -306,6 +304,7 @@ def complete( **kwargs: Any, ) -> Union[_models.StreamingChatCompletions, _models.ChatCompletions]: # pylint: disable=line-too-long + # pylint: disable=too-many-locals """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or "completes" provided prompt data. When using this method with `stream=True`, the response is streamed @@ -466,8 +465,8 @@ def complete( if _stream: return _models.StreamingChatCompletions(response) - else: - return _deserialize(_models._models.ChatCompletions, response.json()) # pylint: disable=protected-access + + return _deserialize(_models._models.ChatCompletions, response.json()) # pylint: disable=protected-access # Cache here the results of get_model_info call _model_info: Optional[_models.ModelInfo] = None @@ -481,11 +480,12 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError: """ - if self._model_info == None: + if self._model_info is None: self._model_info = self._get_model_info(**kwargs) return self._model_info - + def __str__(self) -> str: + # pylint: disable=client-method-name-no-double-underscore return super().__str__() + f"\n{self._model_info}" @@ -761,13 +761,14 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError: """ - if self._model_info == None: + if self._model_info is None: self._model_info = self._get_model_info(**kwargs) return self._model_info def __str__(self) -> str: + # pylint: disable=client-method-name-no-double-underscore return super().__str__() + f"\n{self._model_info}" - + class ImageEmbeddingsClient(ImageEmbeddingsClientGenerated): @overload @@ -1039,11 +1040,12 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError: """ - if self._model_info == None: + if self._model_info is None: self._model_info = self._get_model_info(**kwargs) return self._model_info def __str__(self) -> str: + # pylint: disable=client-method-name-no-double-underscore return super().__str__() + f"\n{self._model_info}" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index 361431d4bc45..9aca75cce1b0 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -116,6 +116,7 @@ async def _complete( **kwargs: Any ) -> _models.ChatCompletions: # pylint: disable=line-too-long + # pylint: disable=too-many-locals """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or "completes" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index 33d043898b8b..20ccc6fa7ded 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -2,6 +2,7 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ +# pylint: disable=too-many-lines) """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize @@ -33,9 +34,6 @@ build_chat_completions_complete_request, build_embeddings_embedding_request, build_image_embeddings_embedding_request, - build_chat_completions_get_model_info_request, - build_embeddings_get_model_info_request, - build_image_embeddings_get_model_info_request, ) if sys.version_info >= (3, 9): @@ -303,6 +301,7 @@ async def complete( **kwargs: Any, ) -> Union[_models.AsyncStreamingChatCompletions, _models.ChatCompletions]: # pylint: disable=line-too-long + # pylint: disable=too-many-locals """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or "completes" provided prompt data. When using this method with `stream=True`, the response is streamed @@ -471,8 +470,8 @@ async def complete( if _stream: return _models.AsyncStreamingChatCompletions(response) - else: - return _deserialize(_models.ChatCompletions, response.json()) # pylint: disable=protected-access + + return _deserialize(_models.ChatCompletions, response.json()) # pylint: disable=protected-access # Cache here the results of get_model_info call _model_info: Optional[_models.ModelInfo] = None @@ -486,11 +485,12 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError: """ - if self._model_info == None: + if self._model_info is None: self._model_info = await self._get_model_info(**kwargs) return self._model_info def __str__(self) -> str: + # pylint: disable=client-method-name-no-double-underscore return super().__str__() + f"\n{self._model_info}" @@ -766,11 +766,12 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError: """ - if self._model_info == None: + if self._model_info is None: self._model_info = await self._get_model_info(**kwargs) return self._model_info def __str__(self) -> str: + # pylint: disable=client-method-name-no-double-underscore return super().__str__() + f"\n{self._model_info}" @@ -1046,11 +1047,12 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError: """ - if self._model_info == None: + if self._model_info is None: self._model_info = await self._get_model_info(**kwargs) return self._model_info def __str__(self) -> str: + # pylint: disable=client-method-name-no-double-underscore return super().__str__() + f"\n{self._model_info}" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py index 685cc394667a..6cb58ad49a4f 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py @@ -74,7 +74,7 @@ "ModelType", "SystemMessage", "ToolMessage", - "UnknownParams" + "UnknownParams", "UserMessage", ] __all__.extend([p for p in _patch_all if p not in __all__]) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index 9c7affb1d14d..77cbdfd60673 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -76,11 +76,14 @@ def _deserialize_and_add_to_queue(self, element: bytes) -> bool: return True # If you reached here, the line should contain `data: {...}\n` - # where the curly braces contain a valid JSON object. Deserialize it into a StreamingChatCompletionsUpdate object + # where the curly braces contain a valid JSON object. + # Deserialize it into a StreamingChatCompletionsUpdate object # and add it to the queue. self._queue.put( # pylint: disable=W0212 # Access to a protected member _deserialize of a client class - _models.StreamingChatCompletionsUpdate._deserialize(json.loads(line[len(self._SSE_DATA_EVENT_PREFIX) : -1]), []) + _models.StreamingChatCompletionsUpdate._deserialize( + json.loads(line[len(self._SSE_DATA_EVENT_PREFIX) : -1]), [] + ) ) if self._ENABLE_CLASS_LOGS: @@ -132,9 +135,10 @@ def close(self) -> None: class AsyncStreamingChatCompletions(BaseStreamingChatCompletions): - """Represents an async interator over StreamingChatCompletionsUpdate objects. It can be used for either synchronous or - asynchronous iterations. The class deserializes the Server Sent Events (SSE) response stream - into chat completions updates, each one represented by a StreamingChatCompletionsUpdate object. + """Represents an async interator over StreamingChatCompletionsUpdate objects. + It can be used for either synchronous or asynchronous iterations. The class + deserializes the Server Sent Events (SSE) response stream into chat + completions updates, each one represented by a StreamingChatCompletionsUpdate object. """ def __init__(self, response: AsyncHttpResponse): From 8e610c29020250cd061ecf0d4ec8fdde4faf1aa5 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 28 May 2024 11:43:13 -0700 Subject: [PATCH 084/112] Example of JSON messages in the root README.md --- sdk/ai/azure-ai-inference/README.md | 32 +++++++++++++++ ...sample_chat_completions_from_input_json.py | 39 +++++++++++-------- 2 files changed, 55 insertions(+), 16 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 3dc55cd72301..0129265826c8 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -202,6 +202,38 @@ print(response.choices[0].message.content) +The following types or messages are supported: `SystemMessage`,`UserMessage`, `AssistantMessage`, `ToolMessage`. + +Alternativley you can provide the messages as dictionary instead of using the strongly typed classes like `SystemMessage` and `UserMessage`: + + + +```python +response = client.complete( + { + "messages": [ + { + "role": "system", + "content": "You are an AI assistant that helps people find information. Your replies are short, no more than two sentences.", + }, + { + "role": "user", + "content": "What year was construction of the International Space Station mostly done?"}, + { + "role": "assistant", + "content": "The main construction of the International Space Station (ISS) was completed between 1998 and 2011. During this period, more than 30 flights by US space shuttles and 40 by Russian rockets were conducted to transport components and modules to the station.", + }, + { + "role": "user", + "content": "And what was the estimated cost to build it?" + } + ] + } +) +``` + + + To generate completions for additional messages, simply call `client.create` multiple times using the same `client`. ### Streaming chat completions example diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py index 40766ae626f0..4daa7bfa95b1 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py @@ -35,22 +35,29 @@ def sample_chat_completions_from_input_json(): client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - request_body = { - "messages": [ - { - "role": "system", - "content": "You are an AI assistant that helps people find information. Your replies are short, no more than two sentences.", - }, - {"role": "user", "content": "What year was construction of the International Space Station mostly done?"}, - { - "role": "assistant", - "content": "The main construction of the International Space Station (ISS) was completed between 1998 and 2011. During this period, more than 30 flights by US space shuttles and 40 by Russian rockets were conducted to transport components and modules to the station.", - }, - {"role": "user", "content": "And what was the estimated cost to build it?"}, - ] - } - - response = client.complete(request_body) + # [START chat_completions] + response = client.complete( + { + "messages": [ + { + "role": "system", + "content": "You are an AI assistant that helps people find information. Your replies are short, no more than two sentences.", + }, + { + "role": "user", + "content": "What year was construction of the International Space Station mostly done?"}, + { + "role": "assistant", + "content": "The main construction of the International Space Station (ISS) was completed between 1998 and 2011. During this period, more than 30 flights by US space shuttles and 40 by Russian rockets were conducted to transport components and modules to the station.", + }, + { + "role": "user", + "content": "And what was the estimated cost to build it?" + } + ] + } + ) + # [END chat_completions] print(response.choices[0].message.content) From a738ff5df85058b48889758001f098f0c85932d1 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 28 May 2024 16:23:59 -0700 Subject: [PATCH 085/112] Some MyPy fixes. Also fix wrong package name in root README.md --- sdk/ai/azure-ai-inference/README.md | 10 +++++----- .../azure/ai/inference/_patch.py | 16 ++++++++-------- .../azure/ai/inference/aio/_patch.py | 16 ++++++++-------- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 0129265826c8..539a6dd208b2 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -1,6 +1,6 @@ # Azure AI Inference client library for Python -The client Library allows you to do inference using AI models you deployed to Azure. It supports both Serverless Endpoints (aka "model as a service" (MaaS) or "pay as you go") and Selfhosted Endpoints (aka "model as a platform" (MaaP) or "real-time endpoints"). The client library makes services calls using REST AP version `2024-05-01-preview`, as documented in [Azure AI Model Inference API](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-api). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). +The client Library (in preview) allows you to do inference using AI models you deployed to Azure. It supports both Serverless Endpoints (aka "model as a service" (MaaS) or "pay as you go") and Selfhosted Endpoints (aka "model as a platform" (MaaP) or "real-time endpoints"). The client library makes services calls using REST AP version `2024-05-01-preview`, as documented in [Azure AI Model Inference API](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-api). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). Use the model inference client library to: @@ -33,13 +33,13 @@ Note that for inference using OpenAI models hosted on Azure you should be using To install the Azure AI Inferencing package use the following command: ```bash -pip install azure-ai-inferencing +pip install azure-ai-inference ``` To update an existing installation of the package, use: ```bash -pip install --upgrade azure-ai-inferencing +pip install --upgrade azure-ai-inference ``` ## Key concepts @@ -422,10 +422,10 @@ logger = logging.getLogger("azure") # Set the desired logging level. logging.INFO or logging.DEBUG are good options. logger.setLevel(logging.DEBUG) -# Direct logging output to stdout (the default): +# Direct logging output to stdout: handler = logging.StreamHandler(stream=sys.stdout) # Or direct logging output to a file: -# handler = logging.FileHandler(filename = 'sample.log') +# handler = logging.FileHandler(filename="sample.log") logger.addHandler(handler) # Optional: change the default logging format. Here we add a timestamp. diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index 260980de4965..743338a01a65 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -60,9 +60,9 @@ def load_client( endpoint: str, credential: AzureKeyCredential, **kwargs: Any ) -> Union[ChatCompletionsClientGenerated, EmbeddingsClientGenerated, ImageEmbeddingsClientGenerated]: - client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... - model_info = client.get_model_info() - client.close() + client1 = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... + model_info = client1.get_model_info() + client1.close() _LOGGER.info("model_info=%s", model_info) if model_info.model_type in (None, ""): @@ -72,16 +72,16 @@ def load_client( # TODO: Remove "completions" and "embedding" once Mistral Large and Cohere fixes their model type if model_info.model_type in (_models.ModelType.CHAT, "completion"): - client = ChatCompletionsClient(endpoint, credential, **kwargs) + client2 = ChatCompletionsClient(endpoint, credential, **kwargs) elif model_info.model_type in (_models.ModelType.EMBEDDINGS, "embedding"): - client = EmbeddingsClient(endpoint, credential, **kwargs) + client2 = EmbeddingsClient(endpoint, credential, **kwargs) elif model_info.model_type == _models.ModelType.IMAGE_EMBEDDINGS: - client = ImageEmbeddingsClient(endpoint, credential, **kwargs) + client2 = ImageEmbeddingsClient(endpoint, credential, **kwargs) else: raise ValueError(f"No client available to support AI model type `{model_info.model_type}`") - client._model_info = model_info # pylint: disable=protected-access - return client + client2._model_info = model_info # pylint: disable=protected-access + return client2 class ChatCompletionsClient(ChatCompletionsClientGenerated): diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index 20ccc6fa7ded..0984c99f0a26 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -49,9 +49,9 @@ async def load_client( endpoint: str, credential: AzureKeyCredential, **kwargs: Any ) -> Union[ChatCompletionsClientGenerated, EmbeddingsClientGenerated, ImageEmbeddingsClientGenerated]: - client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... - model_info = await client.get_model_info() - await client.close() + client1 = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... + model_info = await client1.get_model_info() + await client1.close() _LOGGER.info("model_info=%s", model_info) if model_info.model_type in (None, ""): @@ -61,16 +61,16 @@ async def load_client( # TODO: Remove "completions" and "embedding" once Mistral Large and Cohere fixes their model type if model_info.model_type in (_models.ModelType.CHAT, "completion"): - client = ChatCompletionsClient(endpoint, credential, **kwargs) + client2 = ChatCompletionsClient(endpoint, credential, **kwargs) elif model_info.model_type in (_models.ModelType.EMBEDDINGS, "embedding"): - client = EmbeddingsClient(endpoint, credential, **kwargs) + client2 = EmbeddingsClient(endpoint, credential, **kwargs) elif model_info.model_type == _models.ModelType.IMAGE_EMBEDDINGS: - client = ImageEmbeddingsClient(endpoint, credential, **kwargs) + client2 = ImageEmbeddingsClient(endpoint, credential, **kwargs) else: raise ValueError(f"No client available to support AI model type `{model_info.model_type}`") - client._model_info = model_info # pylint: disable=protected-access - return client + client2._model_info = model_info # pylint: disable=protected-access + return client2 class ChatCompletionsClient(ChatCompletionsClientGenerated): From 44e94d082a09f1e7b8677a74262c1f42e8904927 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 29 May 2024 14:15:12 -0700 Subject: [PATCH 086/112] Same work before re-emitting SDK --- sdk/ai/azure-ai-inference/README.md | 20 +- .../ai/inference/_operations/_operations.py | 16 +- .../azure/ai/inference/_patch.py | 190 ++---------------- .../inference/aio/_operations/_operations.py | 16 +- .../azure/ai/inference/aio/_patch.py | 187 ++--------------- .../azure/ai/inference/models/_models.py | 4 +- sdk/ai/azure-ai-inference/samples/README.md | 2 +- .../async_samples/sample_embeddings_async.py | 2 +- .../samples/sample_chat_completions.py | 10 +- .../sample_chat_completions_azure_openai.py | 71 +++++++ ...ompletions_streaming_with_entra_id_auth.py | 27 ++- ...mple_chat_completions_with_model_extras.py | 15 +- .../samples/sample_embeddings.py | 2 +- 13 files changed, 170 insertions(+), 392 deletions(-) create mode 100644 sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 539a6dd208b2..288fdb4b1529 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -287,16 +287,26 @@ response = client.complete( ], unknown_params=UnknownParams.ALLOW, # Optional. Supported values: "ALLOW", "IGNORE", "ERROR" (service default) model_extras={ # Optional. Additional parameters to pass to the model. - "key1": 1, - "key2": True, - "key3": "Some value", - "key4": [1, 2, 3], - "key5": {"key6": 2, "key7": False, "key8": "Some other value", "key9": [4, 5, 6, 7]}, + "key1": "value1", + "key2": "value2" }, ) ``` +In the above example, this will be the JSON payload in the HTTP request: + +```json +{ + "messages": + [ + {"role":"system","content":"You are a helpful assistant."}, + {"role":"user","content":"How many feet are in a mile?"} + ], + "key1": "value1", + "key2": "value2" +} +``` ### Text Embeddings example diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index 8931cb55ecb7..99035cb603ed 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -584,7 +584,7 @@ def _get_model_info(self, **kwargs: Any) -> _models.ModelInfo: class EmbeddingsClientOperationsMixin(EmbeddingsClientMixinABC): @overload - def _embedding( + def _embed( self, body: JSON, *, @@ -594,7 +594,7 @@ def _embedding( **kwargs: Any ) -> _models.EmbeddingsResult: ... @overload - def _embedding( + def _embed( self, *, input: List[str], @@ -607,7 +607,7 @@ def _embedding( **kwargs: Any ) -> _models.EmbeddingsResult: ... @overload - def _embedding( + def _embed( self, body: IO[bytes], *, @@ -618,7 +618,7 @@ def _embedding( ) -> _models.EmbeddingsResult: ... @distributed_trace - def _embedding( + def _embed( self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -866,7 +866,7 @@ def _get_model_info(self, **kwargs: Any) -> _models.ModelInfo: class ImageEmbeddingsClientOperationsMixin(ImageEmbeddingsClientMixinABC): @overload - def _embedding( + def _embed( self, body: JSON, *, @@ -876,7 +876,7 @@ def _embedding( **kwargs: Any ) -> _models.EmbeddingsResult: ... @overload - def _embedding( + def _embed( self, *, input: List[_models.EmbeddingInput], @@ -889,7 +889,7 @@ def _embedding( **kwargs: Any ) -> _models.EmbeddingsResult: ... @overload - def _embedding( + def _embed( self, body: IO[bytes], *, @@ -900,7 +900,7 @@ def _embedding( ) -> _models.EmbeddingsResult: ... @distributed_trace - def _embedding( + def _embed( self, body: Union[JSON, IO[bytes]] = _Unset, *, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index 743338a01a65..344a78398e5b 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -91,8 +91,6 @@ def complete( self, *, messages: List[_models.ChatRequestMessage], - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", model_extras: Optional[Dict[str, Any]] = None, frequency_penalty: Optional[float] = None, @@ -123,17 +121,6 @@ def complete( the behavior of the assistant, followed by alternating messages between the User and Assistant roles. Required. :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -207,8 +194,6 @@ def complete( self, body: JSON, *, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any, ) -> Union[_models.StreamingChatCompletions, _models.ChatCompletions]: @@ -221,18 +206,6 @@ def complete( :param body: Required. :type body: JSON - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to self-hosted endpoints (previously known as Model-as-a-Platform (MaaP) - or "real-time endpoints"). - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -246,8 +219,6 @@ def complete( self, body: IO[bytes], *, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any, ) -> Union[_models.StreamingChatCompletions, _models.ChatCompletions]: @@ -260,17 +231,6 @@ def complete( :param body: Required. :type body: IO[bytes] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -285,8 +245,6 @@ def complete( body: Union[JSON, IO[bytes]] = _Unset, *, messages: List[_models.ChatRequestMessage] = _Unset, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, model_extras: Optional[Dict[str, Any]] = None, frequency_penalty: Optional[float] = None, presence_penalty: Optional[float] = None, @@ -319,17 +277,6 @@ def complete( the behavior of the assistant, followed by alternating messages between the User and Assistant roles. Required. :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. How the service handles these hyper parameters depends on the value of the @@ -404,6 +351,7 @@ def complete( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} + _unknown_params = None content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) @@ -424,8 +372,9 @@ def complete( "tools": tools, "top_p": top_p, } - if model_extras is not None: + if model_extras is not None and bool(model_extras): body.update(model_extras) + _unknown_params="allow" body = {k: v for k, v in body.items() if v is not None} elif isinstance(body, dict) and "stream" in body and isinstance(body["stream"], bool): stream = body["stream"] @@ -437,8 +386,7 @@ def complete( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_chat_completions_complete_request( - model_deployment=model_deployment, - unknown_params=unknown_params, + unknown_params=_unknown_params, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -492,13 +440,11 @@ def __str__(self) -> str: class EmbeddingsClient(EmbeddingsClientGenerated): @overload - def embedding( + def embed( self, *, model_extras: Optional[Dict[str, Any]] = None, input: List[str], - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, @@ -516,17 +462,6 @@ def embedding( To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Required. :paramtype input: list[str] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -559,12 +494,10 @@ def embedding( """ @overload - def embedding( + def embed( self, body: JSON, *, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any, ) -> _models.EmbeddingsResult: @@ -572,17 +505,6 @@ def embedding( :param body: Required. :type body: JSON - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -592,12 +514,10 @@ def embedding( """ @overload - def embedding( + def embed( self, body: IO[bytes], *, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any, ) -> _models.EmbeddingsResult: @@ -605,17 +525,6 @@ def embedding( :param body: Required. :type body: IO[bytes] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -625,14 +534,12 @@ def embedding( """ @distributed_trace - def embedding( + def embed( self, body: Union[JSON, IO[bytes]] = _Unset, *, model_extras: Optional[Dict[str, Any]] = None, input: List[str] = _Unset, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, @@ -652,17 +559,6 @@ def embedding( To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Required. :paramtype input: list[str] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should have. Passing null causes the model to use its default value. @@ -705,7 +601,7 @@ def embedding( "input": input, "input_type": input_type, } - if model_extras is not None: + if model_extras is not None and bool(model_extras): body.update(model_extras) body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" @@ -716,8 +612,7 @@ def embedding( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_embeddings_embedding_request( - model_deployment=model_deployment, - unknown_params=unknown_params, + unknown_params="allow", content_type=content_type, api_version=self._config.api_version, content=_content, @@ -772,13 +667,11 @@ def __str__(self) -> str: class ImageEmbeddingsClient(ImageEmbeddingsClientGenerated): @overload - def embedding( + def embed( self, *, model_extras: Optional[Dict[str, Any]] = None, input: List[_models.EmbeddingInput], - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, @@ -796,17 +689,6 @@ def embedding( array. The input must not exceed the max input tokens for the model. Required. :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -839,12 +721,10 @@ def embedding( """ @overload - def embedding( + def embed( self, body: JSON, *, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any, ) -> _models.EmbeddingsResult: @@ -852,17 +732,6 @@ def embedding( :param body: Required. :type body: JSON - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -872,12 +741,10 @@ def embedding( """ @overload - def embedding( + def embed( self, body: IO[bytes], *, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any, ) -> _models.EmbeddingsResult: @@ -885,17 +752,6 @@ def embedding( :param body: Required. :type body: IO[bytes] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -905,14 +761,12 @@ def embedding( """ @distributed_trace - def embedding( + def embed( self, body: Union[JSON, IO[bytes]] = _Unset, *, model_extras: Optional[Dict[str, Any]] = None, input: List[_models.EmbeddingInput] = _Unset, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, @@ -932,17 +786,6 @@ def embedding( array. The input must not exceed the max input tokens for the model. Required. :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should have. Passing null causes the model to use its default value. @@ -985,7 +828,7 @@ def embedding( "input": input, "input_type": input_type, } - if model_extras is not None: + if model_extras is not None and bool(model_extras): body.update(model_extras) body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" @@ -996,8 +839,7 @@ def embedding( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_image_embeddings_embedding_request( - model_deployment=model_deployment, - unknown_params=unknown_params, + unknown_params="allow", content_type=content_type, api_version=self._config.api_version, content=_content, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index 9aca75cce1b0..e62f189bfcec 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -439,7 +439,7 @@ async def _get_model_info(self, **kwargs: Any) -> _models.ModelInfo: class EmbeddingsClientOperationsMixin(EmbeddingsClientMixinABC): @overload - async def _embedding( + async def _embed( self, body: JSON, *, @@ -449,7 +449,7 @@ async def _embedding( **kwargs: Any ) -> _models.EmbeddingsResult: ... @overload - async def _embedding( + async def _embed( self, *, input: List[str], @@ -462,7 +462,7 @@ async def _embedding( **kwargs: Any ) -> _models.EmbeddingsResult: ... @overload - async def _embedding( + async def _embed( self, body: IO[bytes], *, @@ -473,7 +473,7 @@ async def _embedding( ) -> _models.EmbeddingsResult: ... @distributed_trace_async - async def _embedding( + async def _embed( self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -721,7 +721,7 @@ async def _get_model_info(self, **kwargs: Any) -> _models.ModelInfo: class ImageEmbeddingsClientOperationsMixin(ImageEmbeddingsClientMixinABC): @overload - async def _embedding( + async def _embed( self, body: JSON, *, @@ -731,7 +731,7 @@ async def _embedding( **kwargs: Any ) -> _models.EmbeddingsResult: ... @overload - async def _embedding( + async def _embed( self, *, input: List[_models.EmbeddingInput], @@ -744,7 +744,7 @@ async def _embedding( **kwargs: Any ) -> _models.EmbeddingsResult: ... @overload - async def _embedding( + async def _embed( self, body: IO[bytes], *, @@ -755,7 +755,7 @@ async def _embedding( ) -> _models.EmbeddingsResult: ... @distributed_trace_async - async def _embedding( + async def _embed( self, body: Union[JSON, IO[bytes]] = _Unset, *, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index 0984c99f0a26..52394be6fac7 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -80,8 +80,6 @@ async def complete( self, *, messages: List[_models.ChatRequestMessage], - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", model_extras: Optional[Dict[str, Any]] = None, extras: Optional[Dict[str, str]] = None, @@ -113,12 +111,6 @@ async def complete( the behavior of the assistant, followed by alternating messages between the User and Assistant roles. Required. :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -134,11 +126,6 @@ async def complete( ``extra-parameters`` HTTP request header. Default value is None. :paramtype extras: dict[str, str] - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword frequency_penalty: A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. @@ -204,8 +191,6 @@ async def complete( self, body: JSON, *, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any, ) -> Union[_models.AsyncStreamingChatCompletions, _models.ChatCompletions]: @@ -218,17 +203,6 @@ async def complete( :param body: Required. :type body: JSON - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -242,8 +216,6 @@ async def complete( self, body: IO[bytes], *, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any, ) -> Union[_models.AsyncStreamingChatCompletions, _models.ChatCompletions]: @@ -256,17 +228,6 @@ async def complete( :param body: Required. :type body: IO[bytes] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -281,8 +242,6 @@ async def complete( body: Union[JSON, IO[bytes]] = _Unset, *, messages: List[_models.ChatRequestMessage] = _Unset, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, model_extras: Optional[Dict[str, Any]] = None, extras: Optional[Dict[str, str]] = None, frequency_penalty: Optional[float] = None, @@ -316,12 +275,6 @@ async def complete( the behavior of the assistant, followed by alternating messages between the User and Assistant roles. Required. :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. How the service handles these hyper parameters depends on the value of the @@ -334,11 +287,6 @@ async def complete( ``extra-parameters`` HTTP request header. Default value is None. :paramtype extras: dict[str, str] - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword frequency_penalty: A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. @@ -429,7 +377,7 @@ async def complete( "tools": tools, "top_p": top_p, } - if model_extras is not None: + if model_extras is not None and bool(model_extras): body.update(model_extras) body = {k: v for k, v in body.items() if v is not None} elif isinstance(body, dict) and "stream" in body and isinstance(body["stream"], bool): @@ -442,8 +390,7 @@ async def complete( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_chat_completions_complete_request( - model_deployment=model_deployment, - unknown_params=unknown_params, + unknown_params="allow", content_type=content_type, api_version=self._config.api_version, content=_content, @@ -497,13 +444,11 @@ def __str__(self) -> str: class EmbeddingsClient(EmbeddingsClientGenerated): @overload - async def embedding( + async def embed( self, *, model_extras: Optional[Dict[str, Any]] = None, input: List[str], - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, @@ -521,17 +466,6 @@ async def embedding( To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Required. :paramtype input: list[str] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -564,12 +498,10 @@ async def embedding( """ @overload - async def embedding( + async def embed( self, body: JSON, *, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any, ) -> _models.EmbeddingsResult: @@ -577,17 +509,6 @@ async def embedding( :param body: Required. :type body: JSON - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -597,12 +518,10 @@ async def embedding( """ @overload - async def embedding( + async def embed( self, body: IO[bytes], *, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any, ) -> _models.EmbeddingsResult: @@ -610,17 +529,6 @@ async def embedding( :param body: Required. :type body: IO[bytes] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -630,14 +538,12 @@ async def embedding( """ @distributed_trace_async - async def embedding( + async def embed( self, body: Union[JSON, IO[bytes]] = _Unset, *, model_extras: Optional[Dict[str, Any]] = None, input: List[str] = _Unset, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, @@ -657,17 +563,6 @@ async def embedding( To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Required. :paramtype input: list[str] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should have. Passing null causes the model to use its default value. @@ -710,7 +605,7 @@ async def embedding( "input": input, "input_type": input_type, } - if model_extras is not None: + if model_extras is not None and bool(model_extras): body.update(model_extras) body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" @@ -721,8 +616,7 @@ async def embedding( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_embeddings_embedding_request( - model_deployment=model_deployment, - unknown_params=unknown_params, + unknown_params="allow", content_type=content_type, api_version=self._config.api_version, content=_content, @@ -778,13 +672,11 @@ def __str__(self) -> str: class ImageEmbeddingsClient(ImageEmbeddingsClientGenerated): @overload - async def embedding( + async def embed( self, *, model_extras: Optional[Dict[str, Any]] = None, input: List[_models.EmbeddingInput], - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, @@ -802,17 +694,6 @@ async def embedding( array. The input must not exceed the max input tokens for the model. Required. :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -845,12 +726,10 @@ async def embedding( """ @overload - async def embedding( + async def embed( self, body: JSON, *, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any, ) -> _models.EmbeddingsResult: @@ -858,17 +737,6 @@ async def embedding( :param body: Required. :type body: JSON - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -878,12 +746,10 @@ async def embedding( """ @overload - async def embedding( + async def embed( self, body: IO[bytes], *, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any, ) -> _models.EmbeddingsResult: @@ -891,17 +757,6 @@ async def embedding( :param body: Required. :type body: IO[bytes] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -911,14 +766,12 @@ async def embedding( """ @distributed_trace_async - async def embedding( + async def embed( self, body: Union[JSON, IO[bytes]] = _Unset, *, model_extras: Optional[Dict[str, Any]] = None, input: List[_models.EmbeddingInput] = _Unset, - model_deployment: Optional[str] = None, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, @@ -938,17 +791,6 @@ async def embedding( array. The input must not exceed the max input tokens for the model. Required. :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - This sets the HTTP request header `azureml-model-deployment`. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. This sets the HTTP request header `unknown-parameters`. - Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should have. Passing null causes the model to use its default value. @@ -991,7 +833,7 @@ async def embedding( "input": input, "input_type": input_type, } - if model_extras is not None: + if model_extras is not None and bool(model_extras): body.update(model_extras) body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" @@ -1002,8 +844,7 @@ async def embedding( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_image_embeddings_embedding_request( - model_deployment=model_deployment, - unknown_params=unknown_params, + unknown_params="allow", content_type=content_type, api_version=self._config.api_version, content=_content, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py index a10584775238..5345b6424c53 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py @@ -82,7 +82,7 @@ class AssistantMessage(ChatRequestMessage, discriminator="assistant"): role: Literal[ChatRole.ASSISTANT] = rest_discriminator(name="role") # type: ignore """The chat role associated with this message, which is always 'assistant' for assistant messages. Required. The role that provides responses to system-instructed, user-prompted input.""" - content: str = rest_field() + content: Optional[str] = rest_field() """The content of the message. Required.""" tool_calls: Optional[List["_models.ChatCompletionsToolCall"]] = rest_field() """The tool calls that must be resolved and have their outputs appended to subsequent input @@ -93,7 +93,7 @@ class AssistantMessage(ChatRequestMessage, discriminator="assistant"): def __init__( self, *, - content: str, + content: Optional[str] = None, tool_calls: Optional[List["_models.ChatCompletionsToolCall"]] = None, ): ... diff --git a/sdk/ai/azure-ai-inference/samples/README.md b/sdk/ai/azure-ai-inference/samples/README.md index c2112160052a..011388f1d5aa 100644 --- a/sdk/ai/azure-ai-inference/samples/README.md +++ b/sdk/ai/azure-ai-inference/samples/README.md @@ -19,7 +19,7 @@ These are runnable console Python scripts that show how to do chat completion, t |**File Name**|**Description**| |----------------|-------------| |[sample_chat_completions_streaming.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py) | One chat completion operation using a synchronous client and streaming response. | -|[sample_chat_completions_streaming_with_entra_id_auth.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py) | One chat completion operation using a synchronous client and streaming response, using Entra ID authentication. This sample also shows setting the `model_deployment` parameter, often required for Selfhosted Endpoints. | +|[sample_chat_completions_streaming_with_entra_id_auth.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py) | One chat completion operation using a synchronous client and streaming response, using Entra ID authentication. This sample also shows setting the `azureml-model-deployment` HTTP request header, which may be required for Selfhosted Endpoints. | |[sample_chat_completions.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py) | One chat completion operation using a synchronous client. | |[sample_chat_completions_with_history.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py) | Two chat completion operations using a synchronous client, which the second completion using chat history from the first. | |[sample_chat_completions_from_input_bytes.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py) | One chat completion operation using a synchronous client, with input messages provided as `IO[bytes]`. | diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py index 79d70b7eb241..561f66c4d22e 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py @@ -37,7 +37,7 @@ async def sample_embeddings_async(): client = EmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # Do a single embeddings operation. Start the operation and get a Future object. - response = await client.embedding(input=["first phrase", "second phrase", "third phrase"]) + response = await client.embed(input=["first phrase", "second phrase", "third phrase"]) print("Embeddings response:") for item in response.data: diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index 17a6be128aa7..e59d3d7fe073 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -21,7 +21,13 @@ def sample_chat_completions(): import os - + + import sys + import logging + logger = logging.getLogger("azure") + logger.setLevel(logging.DEBUG) + logger.addHandler(logging.StreamHandler(stream=sys.stdout)) + try: endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] key = os.environ["CHAT_COMPLETIONS_KEY"] @@ -35,7 +41,7 @@ def sample_chat_completions(): from azure.ai.inference.models import SystemMessage, UserMessage from azure.core.credentials import AzureKeyCredential - client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key), logging_enable = True) response = client.complete( messages=[ diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py new file mode 100644 index 000000000000..5cf35387b7e9 --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py @@ -0,0 +1,71 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to get a chat completions response from + the service using a synchronous client, using an Azure OpenAI endpoint. + +USAGE: + python sample_chat_completions_azure_openai.py + + Set these two environment variables before running the sample: + 1) AOAI_CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form + https://.openai.azure.com/openai/deployments/ + where `your-deployment-name` is your unique AI Model deployment name. + For example: https://my-unique-label.openai.azure.com/openai/deployments/gpt-4-turbo + 2) AOAI_CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. +""" + + +def sample_chat_completions_azure_openai(): + import os + + import sys + import logging + logger = logging.getLogger("azure") + logger.setLevel(logging.DEBUG) + logger.addHandler(logging.StreamHandler(stream=sys.stdout)) + #logger.addHandler(logging.FileHandler(filename = "c:\\temp\\sample.log")) + + try: + endpoint = os.environ["AOAI_CHAT_COMPLETIONS_ENDPOINT"] + key = os.environ["AOAI_CHAT_COMPLETIONS_KEY"] + except KeyError: + print("Missing environment variable 'AOAI_CHAT_COMPLETIONS_ENDPOINT' or 'AOAI_CHAT_COMPLETIONS_KEY'") + print("Set them before running this sample.") + exit() + + from azure.ai.inference import ChatCompletionsClient + from azure.ai.inference.models import SystemMessage, UserMessage + + # Using key auth + # from azure.core.credentials import AzureKeyCredential + # client = ChatCompletionsClient( + # endpoint=endpoint, + # credential=AzureKeyCredential(""), # Pass in a dummy or empty value, as `credential` is a mandatory parameter + # headers={"api-key": key}, # AOAI uses this header for key auth. MaaS/MaaP uses "Authorization" header. + # api_version="2024-02-15-preview", logging_enable = True) # MaaS/MaaP uses "2024-05-01-preview" + + # Using Entra ID auth + from azure.identity import DefaultAzureCredential + client = ChatCompletionsClient( + endpoint=endpoint, + credential=DefaultAzureCredential(), + credential_scopes=["https://cognitiveservices.azure.com/.default"], # MaaS/MaaP uses https://ml.azure.com/.default + api_version="2024-02-15-preview", # MaaS/MaaP uses "2024-05-01-preview" + logging_enable = True) + + response = client.complete( + messages=[ + SystemMessage(content="You are a helpful assistant."), + UserMessage(content="How many feet are in a mile?"), + ] + ) + + print(response.choices[0].message.content) + + +if __name__ == "__main__": + sample_chat_completions_azure_openai() diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py index d8e43610a3bc..3834e09822e2 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py @@ -6,20 +6,21 @@ DESCRIPTION: This sample demonstrates how to do chat completions with streaming, using a synchronous client, with an Entra ID authentication. - It also shows how to set the model deployment name, which is supported - by Selfhosted Endpoints (aka "model as a platform" (MaaP) or "real-time endpoints"). - Model deployment name gets sent to the service as HTTP request - header `azureml-model-deployment`, and is optional. - + It also shows how to set the optional HTTP request header `azureml-model-deployment`, + which is supported by Selfhosted Endpoints (aka "model as a platform" (MaaP) + or "real-time endpoints"). It can be used to target test deployment + during stating, instead of the default production deployment. + USAGE: python sample_chat_completions_streaming_with_entra_id_auth.py - Set these two environment variables before running the sample: + Set one or two of these environment variables before running the sample: 1) CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form https://..inference.ai.azure.com where `your-deployment-name` is your unique AI Model deployment name, and `your-azure-region` is the Azure region where your model is deployed. - 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. + 2) CHAT_COMPLETIONS_DEPLOYMENT_NAME - Optional. The value for the HTTP + request header `azureml-model-deployment`. """ @@ -37,18 +38,22 @@ def sample_chat_completions_streaming_with_entra_id_auth(): model_deployment = os.environ["CHAT_COMPLETIONS_DEPLOYMENT_NAME"] except KeyError: print("Could not read optional environment variable `CHAT_COMPLETIONS_DEPLOYMENT_NAME`.") - print("`model_deployment` will not be set.") + print("HTTP request header `azureml-model-deployment` will not be set.") model_deployment = None from azure.ai.inference import ChatCompletionsClient from azure.ai.inference.models import SystemMessage, UserMessage from azure.identity import DefaultAzureCredential - # See https://learn.microsoft.com/python/api/overview/azure/identity-readme#defaultazurecredential - client = ChatCompletionsClient(endpoint=endpoint, credential=DefaultAzureCredential()) + # For details on DefaultAzureCredential, see + # https://learn.microsoft.com/python/api/overview/azure/identity-readme#defaultazurecredential + + client = ChatCompletionsClient( + endpoint=endpoint, + credential=DefaultAzureCredential(), + headers={"azureml-model-deployment": model_deployment}) response = client.complete( - model_deployment=model_deployment, messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="Give me 5 good reasons why I should exercise every day."), diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py index 7428be24e362..5b78b504b122 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py @@ -26,6 +26,12 @@ def sample_chat_completions_with_model_extras(): import os + import sys + import logging + logger = logging.getLogger("azure") + logger.setLevel(logging.DEBUG) + logger.addHandler(logging.StreamHandler(stream=sys.stdout)) + try: endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] key = os.environ["CHAT_COMPLETIONS_KEY"] @@ -38,7 +44,7 @@ def sample_chat_completions_with_model_extras(): from azure.ai.inference.models import SystemMessage, UserMessage, UnknownParams from azure.core.credentials import AzureKeyCredential - client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key), logging_enable = True) # [START model_extras] response = client.complete( @@ -48,11 +54,8 @@ def sample_chat_completions_with_model_extras(): ], unknown_params=UnknownParams.ALLOW, # Optional. Supported values: "ALLOW", "IGNORE", "ERROR" (service default) model_extras={ # Optional. Additional parameters to pass to the model. - "key1": 1, - "key2": True, - "key3": "Some value", - "key4": [1, 2, 3], - "key5": {"key6": 2, "key7": False, "key8": "Some other value", "key9": [4, 5, 6, 7]}, + "key1": "value1", + "key2": "value2" }, ) # [END chat_completions] diff --git a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py index bd0c8770d86d..f8c97f5ba537 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py +++ b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py @@ -36,7 +36,7 @@ def sample_embeddings(): client = EmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - response = client.embedding(input=["first phrase", "second phrase", "third phrase"]) + response = client.embed(input=["first phrase", "second phrase", "third phrase"]) for item in response.data: length = len(item.embedding) From a825db45d83b42170e62566457738d25fce8f1d4 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 30 May 2024 10:28:49 -0700 Subject: [PATCH 087/112] Use `embed` instead of `embedding`. Update Entra ID and AOAI samples --- sdk/ai/azure-ai-inference/README.md | 14 +-- sdk/ai/azure-ai-inference/assets.json | 2 +- .../ai/inference/_operations/_operations.py | 39 -------- .../azure/ai/inference/_patch.py | 16 ++-- .../inference/aio/_operations/_operations.py | 30 ------- .../azure/ai/inference/aio/_patch.py | 18 ++-- .../azure/ai/inference/models/_enums.py | 11 ++- .../sample_image_embeddings_async.py | 2 +- .../async_samples/sample_load_client_async.py | 2 +- .../samples/sample_chat_completions.py | 8 +- .../sample_chat_completions_azure_openai.py | 89 +++++++++++-------- ...ompletions_streaming_with_entra_id_auth.py | 2 +- ...mple_chat_completions_with_model_extras.py | 9 +- .../samples/sample_image_embeddings.py | 2 +- .../test_model_inference_async_client.py | 5 +- .../tests/test_model_inference_client.py | 5 +- sdk/ai/azure-ai-inference/tsp-location.yaml | 2 +- 17 files changed, 101 insertions(+), 155 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 288fdb4b1529..18093c202f1a 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -10,7 +10,7 @@ Use the model inference client library to: * Get text embeddings -Note that for inference using OpenAI models hosted on Azure you should be using the [OpenAI Python client library](https://github.com/openai/openai-python) instead of this client. +Note that for inference using OpenAI models hosted on Azure, you should be using the official [OpenAI Python client library](https://github.com/openai/openai-python) in product code instead of this client. However, for development and evaluation purposes (comparing OpenAI models to other models in the Azure AI Studio catalog), you can use the azure-ai-inference Python client library with Azure OpenAI endpoints, as shown [in this sample](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py). [Product documentation](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-api) | [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples) @@ -81,7 +81,7 @@ client = ChatCompletionsClient( ### Create and authenticate a client directly, using Entra ID -To use an Entra ID token credential, firs install the [azure-identity](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity) package: +To use an Entra ID token credential, first install the [azure-identity](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity) package: ```python pip install azure.identity @@ -98,6 +98,7 @@ client = ChatCompletionsClient( credential=DefaultAzureCredential() ) ``` +During application development, you would typically set up the environment for authentication using Entra ID by first [Installing the Azure CLI](https://learn.microsoft.com/cli/azure/install-azure-cli), running `az login` in your console window, then entering your credentials in the browser window that was opened. The call to `DefaultAzureCredential()` will then succeed. ### Create and authentice clients using `load_client` @@ -273,9 +274,9 @@ To generate completions for additional messages, simply call `client.complete` m In this example, extra JSON elements are inserted at the root of the request body by setting `model_extras` when calling the `complete` method. These are indended for AI models that require extra parameters beyond what is defined in the REST API. -Note that by default, the service will reject any request payload that includes unknown parameters (ones that are not defined in the REST API [Request Body table](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-chat-completions#request-body)). In order to change that behaviour, the request must include an additional HTTP header that described the intended behaviour with regards to unknown parameters. This is done by setting `unknown_params` to allow passing the unknown parameers to the AI model, or by ingnoring them (dropping them), and only passing the known parameters to the model. +Note that by default, the service will reject any request payload that includes unknown parameters (ones that are not defined in the REST API [Request Body table](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-chat-completions#request-body)). In order to change the default service behaviour, when the `complete` method includes `model_extras`, the client library will automatically add the HTTP request header `"unknown_params": "pass_through"`. -The settings `model_extras` and `unknown_params` are suppored for all other clients as well. +The input argument `model_extras` is not restricted to chat completions. It is suppored on other client methods as well. @@ -285,7 +286,6 @@ response = client.complete( SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), ], - unknown_params=UnknownParams.ALLOW, # Optional. Supported values: "ALLOW", "IGNORE", "ERROR" (service default) model_extras={ # Optional. Additional parameters to pass to the model. "key1": "value1", "key2": "value2" @@ -320,7 +320,7 @@ from azure.core.credentials import AzureKeyCredential client = EmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) -response = client.embedding(input=["first phrase", "second phrase", "third phrase"]) +response = client.embed(input=["first phrase", "second phrase", "third phrase"]) for item in response.data: length = len(item.embedding) @@ -361,7 +361,7 @@ with open("sample2.png", "rb") as f: client = ImageEmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) -response = client.embedding(input=[EmbeddingInput(image=image1), EmbeddingInput(image=image2)]) +response = client.embed(input=[EmbeddingInput(image=image1), EmbeddingInput(image=image2)]) for item in response.data: length = len(item.embedding) diff --git a/sdk/ai/azure-ai-inference/assets.json b/sdk/ai/azure-ai-inference/assets.json index e9d32a1e01b9..ce5bacc60905 100644 --- a/sdk/ai/azure-ai-inference/assets.json +++ b/sdk/ai/azure-ai-inference/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "python", "TagPrefix": "python/ai/azure-ai-inference", - "Tag": "python/ai/azure-ai-inference_4a3ee6b285" + "Tag": "python/ai/azure-ai-inference_a50981dab0" } diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index 99035cb603ed..80fa6cef0bf3 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -44,7 +44,6 @@ def build_chat_completions_complete_request( *, - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, **kwargs: Any ) -> HttpRequest: @@ -62,8 +61,6 @@ def build_chat_completions_complete_request( _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - if model_deployment is not None: - _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployment", model_deployment, "str") if unknown_params is not None: _headers["unknown-parameters"] = _SERIALIZER.header("unknown_params", unknown_params, "str") if content_type is not None: @@ -94,7 +91,6 @@ def build_chat_completions_get_model_info_request(**kwargs: Any) -> HttpRequest: def build_embeddings_embedding_request( *, - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, **kwargs: Any ) -> HttpRequest: @@ -112,8 +108,6 @@ def build_embeddings_embedding_request( _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - if model_deployment is not None: - _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployment", model_deployment, "str") if unknown_params is not None: _headers["unknown-parameters"] = _SERIALIZER.header("unknown_params", unknown_params, "str") if content_type is not None: @@ -144,7 +138,6 @@ def build_embeddings_get_model_info_request(**kwargs: Any) -> HttpRequest: def build_image_embeddings_embedding_request( *, - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, **kwargs: Any ) -> HttpRequest: @@ -162,8 +155,6 @@ def build_image_embeddings_embedding_request( _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - if model_deployment is not None: - _headers["azureml-model-deployment"] = _SERIALIZER.header("model_deployment", model_deployment, "str") if unknown_params is not None: _headers["unknown-parameters"] = _SERIALIZER.header("unknown_params", unknown_params, "str") if content_type is not None: @@ -199,7 +190,6 @@ def _complete( self, body: JSON, *, - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any @@ -209,7 +199,6 @@ def _complete( self, *, messages: List[_models.ChatRequestMessage], - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", frequency_penalty: Optional[float] = None, @@ -232,7 +221,6 @@ def _complete( self, body: IO[bytes], *, - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any @@ -244,7 +232,6 @@ def _complete( body: Union[JSON, IO[bytes]] = _Unset, *, messages: List[_models.ChatRequestMessage] = _Unset, - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, frequency_penalty: Optional[float] = None, stream_parameter: Optional[bool] = None, @@ -276,11 +263,6 @@ def _complete( the behavior of the assistant, followed by alternating messages between the User and Assistant roles. Required. :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams @@ -475,7 +457,6 @@ def _complete( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_chat_completions_complete_request( - model_deployment=model_deployment, unknown_params=unknown_params, content_type=content_type, api_version=self._config.api_version, @@ -588,7 +569,6 @@ def _embed( self, body: JSON, *, - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any @@ -598,7 +578,6 @@ def _embed( self, *, input: List[str], - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, @@ -611,7 +590,6 @@ def _embed( self, body: IO[bytes], *, - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any @@ -623,7 +601,6 @@ def _embed( body: Union[JSON, IO[bytes]] = _Unset, *, input: List[str] = _Unset, - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, @@ -639,11 +616,6 @@ def _embed( To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Required. :paramtype input: list[str] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams @@ -757,7 +729,6 @@ def _embed( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_embeddings_embedding_request( - model_deployment=model_deployment, unknown_params=unknown_params, content_type=content_type, api_version=self._config.api_version, @@ -870,7 +841,6 @@ def _embed( self, body: JSON, *, - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any @@ -880,7 +850,6 @@ def _embed( self, *, input: List[_models.EmbeddingInput], - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, @@ -893,7 +862,6 @@ def _embed( self, body: IO[bytes], *, - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any @@ -905,7 +873,6 @@ def _embed( body: Union[JSON, IO[bytes]] = _Unset, *, input: List[_models.EmbeddingInput] = _Unset, - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, @@ -921,11 +888,6 @@ def _embed( array. The input must not exceed the max input tokens for the model. Required. :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams @@ -1042,7 +1004,6 @@ def _embed( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_image_embeddings_embedding_request( - model_deployment=model_deployment, unknown_params=unknown_params, content_type=content_type, api_version=self._config.api_version, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index 344a78398e5b..35c116b25262 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -351,7 +351,7 @@ def complete( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - _unknown_params = None + _unknown_params:_models._enums.UnknownParams = None content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) @@ -374,7 +374,7 @@ def complete( } if model_extras is not None and bool(model_extras): body.update(model_extras) - _unknown_params="allow" + _unknown_params=_models._enums.UnknownParams.PASS_THROUGH body = {k: v for k, v in body.items() if v is not None} elif isinstance(body, dict) and "stream" in body and isinstance(body["stream"], bool): stream = body["stream"] @@ -589,6 +589,7 @@ def embed( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} + _unknown_params:_models._enums.UnknownParams = None content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) @@ -601,8 +602,9 @@ def embed( "input": input, "input_type": input_type, } - if model_extras is not None and bool(model_extras): + if model_extras is not None and bool(model_extras): body.update(model_extras) + _unknown_params=_models._enums.UnknownParams.PASS_THROUGH body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -612,7 +614,7 @@ def embed( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_embeddings_embedding_request( - unknown_params="allow", + unknown_params=_unknown_params, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -816,6 +818,7 @@ def embed( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} + _unknown_params:_models._enums.UnknownParams = None content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) @@ -828,8 +831,9 @@ def embed( "input": input, "input_type": input_type, } - if model_extras is not None and bool(model_extras): + if model_extras is not None and bool(model_extras): body.update(model_extras) + _unknown_params=_models._enums.UnknownParams.PASS_THROUGH body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -839,7 +843,7 @@ def embed( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_image_embeddings_embedding_request( - unknown_params="allow", + unknown_params=_unknown_params, content_type=content_type, api_version=self._config.api_version, content=_content, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index e62f189bfcec..4d09671ec2a9 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -53,7 +53,6 @@ async def _complete( self, body: JSON, *, - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any @@ -63,7 +62,6 @@ async def _complete( self, *, messages: List[_models.ChatRequestMessage], - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", frequency_penalty: Optional[float] = None, @@ -86,7 +84,6 @@ async def _complete( self, body: IO[bytes], *, - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any @@ -98,7 +95,6 @@ async def _complete( body: Union[JSON, IO[bytes]] = _Unset, *, messages: List[_models.ChatRequestMessage] = _Unset, - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, frequency_penalty: Optional[float] = None, stream_parameter: Optional[bool] = None, @@ -130,11 +126,6 @@ async def _complete( the behavior of the assistant, followed by alternating messages between the User and Assistant roles. Required. :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON request payload. Known values are: "error", "ignore", and "allow". Default value is None. The service defaults to "error" in this case. @@ -330,7 +321,6 @@ async def _complete( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_chat_completions_complete_request( - model_deployment=model_deployment, unknown_params=unknown_params, content_type=content_type, api_version=self._config.api_version, @@ -443,7 +433,6 @@ async def _embed( self, body: JSON, *, - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any @@ -453,7 +442,6 @@ async def _embed( self, *, input: List[str], - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, @@ -466,7 +454,6 @@ async def _embed( self, body: IO[bytes], *, - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any @@ -478,7 +465,6 @@ async def _embed( body: Union[JSON, IO[bytes]] = _Unset, *, input: List[str] = _Unset, - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, @@ -494,11 +480,6 @@ async def _embed( To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Required. :paramtype input: list[str] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams @@ -612,7 +593,6 @@ async def _embed( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_embeddings_embedding_request( - model_deployment=model_deployment, unknown_params=unknown_params, content_type=content_type, api_version=self._config.api_version, @@ -725,7 +705,6 @@ async def _embed( self, body: JSON, *, - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any @@ -735,7 +714,6 @@ async def _embed( self, *, input: List[_models.EmbeddingInput], - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, @@ -748,7 +726,6 @@ async def _embed( self, body: IO[bytes], *, - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any @@ -760,7 +737,6 @@ async def _embed( body: Union[JSON, IO[bytes]] = _Unset, *, input: List[_models.EmbeddingInput] = _Unset, - model_deployment: Optional[str] = None, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, @@ -776,11 +752,6 @@ async def _embed( array. The input must not exceed the max input tokens for the model. Required. :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] - :keyword model_deployment: Name of the deployment to which you would like to route the request. - Relevant only to Model-as-a-Platform (MaaP) deployments. - Typically used when you want to target a test environment instead of production environment. - Default value is None. - :paramtype model_deployment: str :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON request payload. Known values are: "error", "ignore", and "allow". Default value is None. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams @@ -897,7 +868,6 @@ async def _embed( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_image_embeddings_embedding_request( - model_deployment=model_deployment, unknown_params=unknown_params, content_type=content_type, api_version=self._config.api_version, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index 52394be6fac7..27fe2d499bd5 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -356,6 +356,7 @@ async def complete( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} + _unknown_params:_models._enums.UnknownParams = None content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) @@ -377,8 +378,9 @@ async def complete( "tools": tools, "top_p": top_p, } - if model_extras is not None and bool(model_extras): + if model_extras is not None and bool(model_extras): body.update(model_extras) + _unknown_params=_models._enums.UnknownParams.PASS_THROUGH body = {k: v for k, v in body.items() if v is not None} elif isinstance(body, dict) and "stream" in body and isinstance(body["stream"], bool): stream = body["stream"] @@ -390,7 +392,7 @@ async def complete( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_chat_completions_complete_request( - unknown_params="allow", + unknown_params=_unknown_params, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -593,6 +595,7 @@ async def embed( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} + _unknown_params:_models._enums.UnknownParams = None content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) @@ -605,8 +608,9 @@ async def embed( "input": input, "input_type": input_type, } - if model_extras is not None and bool(model_extras): + if model_extras is not None and bool(model_extras): body.update(model_extras) + _unknown_params=_models._enums.UnknownParams.PASS_THROUGH body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -616,7 +620,7 @@ async def embed( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_embeddings_embedding_request( - unknown_params="allow", + unknown_params=_unknown_params, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -821,6 +825,7 @@ async def embed( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} + _unknown_params:_models._enums.UnknownParams = None content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) @@ -833,8 +838,9 @@ async def embed( "input": input, "input_type": input_type, } - if model_extras is not None and bool(model_extras): + if model_extras is not None and bool(model_extras): body.update(model_extras) + _unknown_params=_models._enums.UnknownParams.PASS_THROUGH body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -844,7 +850,7 @@ async def embed( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_image_embeddings_embedding_request( - unknown_params="allow", + unknown_params=_unknown_params, content_type=content_type, api_version=self._config.api_version, content=_content, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py index 49053a340001..6867b680bd61 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py @@ -126,8 +126,11 @@ class UnknownParams(str, Enum, metaclass=CaseInsensitiveEnumMeta): ERROR = "error" """The service will error if it detected unknown parameters in the request payload. This is the - default.""" - IGNORE = "ignore" - """The service will ignore unknown parameters in the request payload.""" - ALLOW = "allow" + service default.""" + DROP = "drop" + """ + The service will ignore (drop) unknown parameters in the request payload. + It will only pass the known parameters to the back-end AI model. + """ + PASS_THROUGH = "pass_through" """The service will pass unknown parameters to the back-end AI model.""" diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py index 0a0b70c136a4..0a948480c56a 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py @@ -44,7 +44,7 @@ async def sample_image_embeddings_async(): client = ImageEmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # Do a single image embeddings operation. Start the operation and get a Future object. - response = await client.embedding(input=[EmbeddingInput(image=image1), EmbeddingInput(image=image2)]) + response = await client.embed(input=[EmbeddingInput(image=image1), EmbeddingInput(image=image2)]) print("Embeddings response:") for item in response.data: diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py index 548a3fa6f43a..7820a1711cff 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py @@ -41,7 +41,7 @@ async def sample_load_client_async(): print(f"Created client of type `{type(client).__name__}`.") if isinstance(client, EmbeddingsClient): - response = await client.embedding(input=["first phrase", "second phrase", "third phrase"]) + response = await client.embed(input=["first phrase", "second phrase", "third phrase"]) print("Embeddings response:") for item in response.data: diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index e59d3d7fe073..bd69deae3888 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -22,12 +22,6 @@ def sample_chat_completions(): import os - import sys - import logging - logger = logging.getLogger("azure") - logger.setLevel(logging.DEBUG) - logger.addHandler(logging.StreamHandler(stream=sys.stdout)) - try: endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] key = os.environ["CHAT_COMPLETIONS_KEY"] @@ -41,7 +35,7 @@ def sample_chat_completions(): from azure.ai.inference.models import SystemMessage, UserMessage from azure.core.credentials import AzureKeyCredential - client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key), logging_enable = True) + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) response = client.complete( messages=[ diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py index 5cf35387b7e9..b9e2de6ddb78 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py @@ -5,57 +5,74 @@ """ DESCRIPTION: This sample demonstrates how to get a chat completions response from - the service using a synchronous client, using an Azure OpenAI endpoint. + the service using a synchronous client, with an Azure OpenAI (AOAI) endpoint. + Two types of authentications are shown: key authentication and Entra ID + authentication. -USAGE: - python sample_chat_completions_azure_openai.py + Note that all production code should use the official OpenAI Python client + library when using Azure OpenAI (AOAI) endpoints. This library can be found + here: https://github.com/openai/openai-python + + For development and evaluation purposes (comparing OpenAI models to other + models in the Azure AI Studio catalog), you can use the azure-ai-inference + Python client library with AOAI endpoints, as shown in this sample. - Set these two environment variables before running the sample: - 1) AOAI_CHAT_COMPLETIONS_ENDPOINT - Your endpoint URL, in the form - https://.openai.azure.com/openai/deployments/ - where `your-deployment-name` is your unique AI Model deployment name. - For example: https://my-unique-label.openai.azure.com/openai/deployments/gpt-4-turbo - 2) AOAI_CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. +USAGE: + 1. Update 'key_auth` below to 'True' for key authentication, or 'False' for + Entra ID authentication. + 2. Update `api_version` (the AOAI REST API version) as needed. + 3. Set one or two environment variables, depending on your authentication method: + * AOAI_CHAT_COMPLETIONS_ENDPOINT - Your AOAI endpoint URL, with partial path, in the form + https://.openai.azure.com/openai/deployments/ + where `your-unique-resource-name` is your globally unique AOAI resource name, + where `your-deployment-name` is your AI Model deployment name. + For example: https://your-unique-host.openai.azure.com/openai/deployments/gpt-4-turbo + * AOAI_CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. This + is only required for key authentication. + 4. Run the sample: + python sample_chat_completions_azure_openai.py """ +key_auth:bool = True # Set to True for key authentication, or False for Entra ID authentication. def sample_chat_completions_azure_openai(): import os - - import sys - import logging - logger = logging.getLogger("azure") - logger.setLevel(logging.DEBUG) - logger.addHandler(logging.StreamHandler(stream=sys.stdout)) - #logger.addHandler(logging.FileHandler(filename = "c:\\temp\\sample.log")) + from azure.ai.inference import ChatCompletionsClient + from azure.ai.inference.models import SystemMessage, UserMessage try: endpoint = os.environ["AOAI_CHAT_COMPLETIONS_ENDPOINT"] - key = os.environ["AOAI_CHAT_COMPLETIONS_KEY"] except KeyError: - print("Missing environment variable 'AOAI_CHAT_COMPLETIONS_ENDPOINT' or 'AOAI_CHAT_COMPLETIONS_KEY'") - print("Set them before running this sample.") + print("Missing environment variable 'AOAI_CHAT_COMPLETIONS_ENDPOINT'") + print("Set it before running this sample.") exit() - from azure.ai.inference import ChatCompletionsClient - from azure.ai.inference.models import SystemMessage, UserMessage + if key_auth: + from azure.core.credentials import AzureKeyCredential + + try: + key = os.environ["AOAI_CHAT_COMPLETIONS_KEY"] + except KeyError: + print("Missing environment variable 'AOAI_CHAT_COMPLETIONS_KEY'") + print("Set it before running this sample.") + exit() + + client = ChatCompletionsClient( + endpoint=endpoint, + credential=AzureKeyCredential(""), # Pass in an empty value. + headers={"api-key": key}, + api_version="2024-02-15-preview" # AOAI api-version. Update as needed. + ) - # Using key auth - # from azure.core.credentials import AzureKeyCredential - # client = ChatCompletionsClient( - # endpoint=endpoint, - # credential=AzureKeyCredential(""), # Pass in a dummy or empty value, as `credential` is a mandatory parameter - # headers={"api-key": key}, # AOAI uses this header for key auth. MaaS/MaaP uses "Authorization" header. - # api_version="2024-02-15-preview", logging_enable = True) # MaaS/MaaP uses "2024-05-01-preview" + else: # Entra ID authentication + from azure.identity import DefaultAzureCredential - # Using Entra ID auth - from azure.identity import DefaultAzureCredential - client = ChatCompletionsClient( - endpoint=endpoint, - credential=DefaultAzureCredential(), - credential_scopes=["https://cognitiveservices.azure.com/.default"], # MaaS/MaaP uses https://ml.azure.com/.default - api_version="2024-02-15-preview", # MaaS/MaaP uses "2024-05-01-preview" - logging_enable = True) + client = ChatCompletionsClient( + endpoint=endpoint, + credential=DefaultAzureCredential(), + credential_scopes=["https://cognitiveservices.azure.com/.default"], + api_version="2024-02-15-preview" # AOAI api-version. Update as needed. + ) response = client.complete( messages=[ diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py index 3834e09822e2..652a7c756fb0 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py @@ -9,7 +9,7 @@ It also shows how to set the optional HTTP request header `azureml-model-deployment`, which is supported by Selfhosted Endpoints (aka "model as a platform" (MaaP) or "real-time endpoints"). It can be used to target test deployment - during stating, instead of the default production deployment. + during staging, instead of the default production deployment. USAGE: python sample_chat_completions_streaming_with_entra_id_auth.py diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py index 5b78b504b122..c269f9958ac5 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py @@ -26,12 +26,6 @@ def sample_chat_completions_with_model_extras(): import os - import sys - import logging - logger = logging.getLogger("azure") - logger.setLevel(logging.DEBUG) - logger.addHandler(logging.StreamHandler(stream=sys.stdout)) - try: endpoint = os.environ["CHAT_COMPLETIONS_ENDPOINT"] key = os.environ["CHAT_COMPLETIONS_KEY"] @@ -44,7 +38,7 @@ def sample_chat_completions_with_model_extras(): from azure.ai.inference.models import SystemMessage, UserMessage, UnknownParams from azure.core.credentials import AzureKeyCredential - client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key), logging_enable = True) + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # [START model_extras] response = client.complete( @@ -52,7 +46,6 @@ def sample_chat_completions_with_model_extras(): SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), ], - unknown_params=UnknownParams.ALLOW, # Optional. Supported values: "ALLOW", "IGNORE", "ERROR" (service default) model_extras={ # Optional. Additional parameters to pass to the model. "key1": "value1", "key2": "value2" diff --git a/sdk/ai/azure-ai-inference/samples/sample_image_embeddings.py b/sdk/ai/azure-ai-inference/samples/sample_image_embeddings.py index 3ddc5a128daa..a7b7dc0e5b6b 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_image_embeddings.py +++ b/sdk/ai/azure-ai-inference/samples/sample_image_embeddings.py @@ -43,7 +43,7 @@ def sample_image_embeddings(): client = ImageEmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - response = client.embedding(input=[EmbeddingInput(image=image1), EmbeddingInput(image=image2)]) + response = client.embed(input=[EmbeddingInput(image=image1), EmbeddingInput(image=image2)]) for item in response.data: length = len(item.embedding) diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py index 02f6cad3d1a9..801cdb7887aa 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -52,7 +52,7 @@ async def test_async_get_model_info_on_embeddings_client(self, **kwargs): @recorded_by_proxy_async async def test_async_embeddings(self, **kwargs): client = self._create_async_embeddings_client(**kwargs) - response = await client.embedding(input=["first phrase", "second phrase", "third phrase"]) + response = await client.embed(input=["first phrase", "second phrase", "third phrase"]) self._print_embeddings_result(response) self._validate_embeddings_result(response) await client.close() @@ -114,7 +114,6 @@ async def test_async_chat_completions_with_model_extras(self, **kwargs): client = self._create_async_chat_client(**kwargs) response = await client.complete( messages=[sdk.models.UserMessage(content="How many feet are in a mile?")], - unknown_params=sdk.models.UnknownParams.IGNORE, model_extras={ "key1": 1, "key2": True, @@ -190,7 +189,7 @@ async def test_embeddings_with_auth_failure(self, **kwargs): client = self._create_async_embeddings_client(bad_key=True, **kwargs) exception_caught = False try: - response = await client.embedding(input=["first phrase", "second phrase", "third phrase"]) + response = await client.embed(input=["first phrase", "second phrase", "third phrase"]) except AzureError as e: exception_caught = True print(e) diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py index 8c22de6878fe..2beae8e8d4d9 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -51,7 +51,7 @@ def test_get_model_info_on_embeddings_client(self, **kwargs): @recorded_by_proxy def test_embeddings(self, **kwargs): client = self._create_embeddings_client(**kwargs) - response = client.embedding(input=["first phrase", "second phrase", "third phrase"]) + response = client.embed(input=["first phrase", "second phrase", "third phrase"]) self._print_embeddings_result(response) self._validate_embeddings_result(response) client.close() @@ -114,7 +114,6 @@ def test_chat_completions_with_model_extras(self, **kwargs): client = self._create_chat_client(**kwargs) response = client.complete( messages=[sdk.models.UserMessage(content="How many feet are in a mile?")], - unknown_params=sdk.models.UnknownParams.IGNORE, model_extras={ "key1": 1, "key2": True, @@ -254,7 +253,7 @@ def test_embeddings_on_chat_completion_endpoint(self, **kwargs): client = self._create_embeddings_client_with_chat_completions_credentials(**kwargs) exception_caught = False try: - response = client.embedding(input=["first phrase", "second phrase", "third phrase"]) + response = client.embed(input=["first phrase", "second phrase", "third phrase"]) except AzureError as e: exception_caught = True print(e) diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml index e87796de0772..2d126c5532d2 100644 --- a/sdk/ai/azure-ai-inference/tsp-location.yaml +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/ModelClient -commit: 556cb56981e1667fccdd303860d6d508af20e63d +commit: be191d796831bbdd495781dffb8a72443aee6a13 repo: Azure/azure-rest-api-specs additionalDirectories: From c4814d874cfdde7ae0318d7fed1f074f3276ec2a Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 30 May 2024 17:51:39 -0700 Subject: [PATCH 088/112] Fix some mypy errors. Use different terms for MaaS/MaaP --- sdk/ai/azure-ai-inference/README.md | 2 +- .../azure/ai/inference/_patch.py | 36 +++++++++++-------- .../azure/ai/inference/aio/_patch.py | 35 ++++++++++-------- sdk/ai/azure-ai-inference/samples/README.md | 8 ++--- ...ompletions_streaming_with_entra_id_auth.py | 6 ++-- 5 files changed, 49 insertions(+), 38 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 18093c202f1a..5bef6316b01c 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -1,6 +1,6 @@ # Azure AI Inference client library for Python -The client Library (in preview) allows you to do inference using AI models you deployed to Azure. It supports both Serverless Endpoints (aka "model as a service" (MaaS) or "pay as you go") and Selfhosted Endpoints (aka "model as a platform" (MaaP) or "real-time endpoints"). The client library makes services calls using REST AP version `2024-05-01-preview`, as documented in [Azure AI Model Inference API](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-api). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). +The client Library (in preview) allows you to do inference using AI models you deployed to Azure. It supports both type of AI model deployments in Azure AI Studio: _Serverless API with Azure AI Content Safety_ and _Managed Compute without Azure AI Content Safety_. The client library makes services calls using REST AP version `2024-05-01-preview`, as documented in [Azure AI Model Inference API](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-api). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). Use the model inference client library to: diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index 35c116b25262..ae5decbc55ed 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -60,9 +60,9 @@ def load_client( endpoint: str, credential: AzureKeyCredential, **kwargs: Any ) -> Union[ChatCompletionsClientGenerated, EmbeddingsClientGenerated, ImageEmbeddingsClientGenerated]: - client1 = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... - model_info = client1.get_model_info() - client1.close() + client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... + model_info = client.get_model_info() + client.close() _LOGGER.info("model_info=%s", model_info) if model_info.model_type in (None, ""): @@ -72,16 +72,22 @@ def load_client( # TODO: Remove "completions" and "embedding" once Mistral Large and Cohere fixes their model type if model_info.model_type in (_models.ModelType.CHAT, "completion"): - client2 = ChatCompletionsClient(endpoint, credential, **kwargs) - elif model_info.model_type in (_models.ModelType.EMBEDDINGS, "embedding"): - client2 = EmbeddingsClient(endpoint, credential, **kwargs) - elif model_info.model_type == _models.ModelType.IMAGE_EMBEDDINGS: - client2 = ImageEmbeddingsClient(endpoint, credential, **kwargs) - else: - raise ValueError(f"No client available to support AI model type `{model_info.model_type}`") + chat_completion_client = ChatCompletionsClient(endpoint, credential, **kwargs) + chat_completion_client._model_info = model_info # pylint: disable=protected-access + return chat_completion_client + + if model_info.model_type in (_models.ModelType.EMBEDDINGS, "embedding"): + embedding_client = EmbeddingsClient(endpoint, credential, **kwargs) + embedding_client._model_info = model_info # pylint: disable=protected-access + return embedding_client + + if model_info.model_type == _models.ModelType.IMAGE_EMBEDDINGS: + image_embedding_client = ImageEmbeddingsClient(endpoint, credential, **kwargs) + image_embedding_client._model_info = model_info # pylint: disable=protected-access + return image_embedding_client + + raise ValueError(f"No client available to support AI model type `{model_info.model_type}`") - client2._model_info = model_info # pylint: disable=protected-access - return client2 class ChatCompletionsClient(ChatCompletionsClientGenerated): @@ -351,7 +357,7 @@ def complete( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - _unknown_params:_models._enums.UnknownParams = None + _unknown_params:Union[_models._enums.UnknownParams, None] = None content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) @@ -589,7 +595,7 @@ def embed( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - _unknown_params:_models._enums.UnknownParams = None + _unknown_params:Union[_models._enums.UnknownParams, None] = None content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) @@ -818,7 +824,7 @@ def embed( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - _unknown_params:_models._enums.UnknownParams = None + _unknown_params:Union[_models._enums.UnknownParams, None] = None content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index 27fe2d499bd5..5e0858b88c3a 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -49,9 +49,9 @@ async def load_client( endpoint: str, credential: AzureKeyCredential, **kwargs: Any ) -> Union[ChatCompletionsClientGenerated, EmbeddingsClientGenerated, ImageEmbeddingsClientGenerated]: - client1 = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... - model_info = await client1.get_model_info() - await client1.close() + client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... + model_info = await client.get_model_info() + await client.close() _LOGGER.info("model_info=%s", model_info) if model_info.model_type in (None, ""): @@ -61,16 +61,21 @@ async def load_client( # TODO: Remove "completions" and "embedding" once Mistral Large and Cohere fixes their model type if model_info.model_type in (_models.ModelType.CHAT, "completion"): - client2 = ChatCompletionsClient(endpoint, credential, **kwargs) - elif model_info.model_type in (_models.ModelType.EMBEDDINGS, "embedding"): - client2 = EmbeddingsClient(endpoint, credential, **kwargs) - elif model_info.model_type == _models.ModelType.IMAGE_EMBEDDINGS: - client2 = ImageEmbeddingsClient(endpoint, credential, **kwargs) - else: - raise ValueError(f"No client available to support AI model type `{model_info.model_type}`") + chat_completion_client = ChatCompletionsClient(endpoint, credential, **kwargs) + chat_completion_client._model_info = model_info # pylint: disable=protected-access + return chat_completion_client - client2._model_info = model_info # pylint: disable=protected-access - return client2 + if model_info.model_type in (_models.ModelType.EMBEDDINGS, "embedding"): + embedding_client = EmbeddingsClient(endpoint, credential, **kwargs) + embedding_client._model_info = model_info # pylint: disable=protected-access + return embedding_client + + if model_info.model_type == _models.ModelType.IMAGE_EMBEDDINGS: + image_embedding_client = ImageEmbeddingsClient(endpoint, credential, **kwargs) + image_embedding_client._model_info = model_info # pylint: disable=protected-access + return image_embedding_client + + raise ValueError(f"No client available to support AI model type `{model_info.model_type}`") class ChatCompletionsClient(ChatCompletionsClientGenerated): @@ -356,7 +361,7 @@ async def complete( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - _unknown_params:_models._enums.UnknownParams = None + _unknown_params:Union[_models._enums.UnknownParams, None] = None content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) @@ -595,7 +600,7 @@ async def embed( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - _unknown_params:_models._enums.UnknownParams = None + _unknown_params:Union[_models._enums.UnknownParams, None] = None content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) @@ -825,7 +830,7 @@ async def embed( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - _unknown_params:_models._enums.UnknownParams = None + _unknown_params:Union[_models._enums.UnknownParams, None] = None content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) diff --git a/sdk/ai/azure-ai-inference/samples/README.md b/sdk/ai/azure-ai-inference/samples/README.md index 011388f1d5aa..33cc3fe7056c 100644 --- a/sdk/ai/azure-ai-inference/samples/README.md +++ b/sdk/ai/azure-ai-inference/samples/README.md @@ -28,7 +28,7 @@ These are runnable console Python scripts that show how to do chat completion, t |[sample_load_client.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_load_client.py) | Shows how do use the function `load_client` to create the appropriate synchronous client based on the provided endpoint URL. In this example, it creates a synchronous `ChatCompletionsClient`. | |[sample_get_model_info.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py) | Get AI model information using the chat completions client. Similarly can be done with all other clients. | |[sample_chat_completions_with_model_extras.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py) | Chat completions with additional model-specific parameters. | - +|[sample_chat_completions_azure_openai.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py) | Chat completions against Azure OpenAI endpoint. | ### Text embeddings @@ -89,7 +89,7 @@ See [Prerequisites](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ ## Set environment variables -To construct any of the clients, you will need to pass in the endpoint URL and key associated with your deployed AI model. +To construct any of the clients, you will need to pass in the endpoint URL. If you are using key authentication, you also need to pass in the key associated with your deployed AI model. * The endpoint URL has the form `https://your-deployment-name.your-azure-region.inference.ai.azure.com`, where `your-deployment-name` is your unique model deployment name and `your-azure-region` is the Azure region where the model is deployed (e.g. `eastus2`). @@ -108,13 +108,13 @@ Note that the client library does not directly read these environment variable a ## Running the samples To run the first sample, type: + ```bash python sample_chat_completions.py ``` + similarly for the other samples. ## Troubleshooting See [Troubleshooting](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/README.md#troubleshooting) here. - - diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py index 652a7c756fb0..04c3944f1d51 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py @@ -7,9 +7,9 @@ This sample demonstrates how to do chat completions with streaming, using a synchronous client, with an Entra ID authentication. It also shows how to set the optional HTTP request header `azureml-model-deployment`, - which is supported by Selfhosted Endpoints (aka "model as a platform" (MaaP) - or "real-time endpoints"). It can be used to target test deployment - during staging, instead of the default production deployment. + which is supported when you deploy a model using "Managed Compute without Azure + AI Content Safety". It can be used to target test deployment during staging, + instead of the default production deployment. USAGE: python sample_chat_completions_streaming_with_entra_id_auth.py From 2df88dc81e2422d57702d324e95c1702550ae958 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 31 May 2024 08:45:24 -0700 Subject: [PATCH 089/112] Re-emit, now with pyproject.toml --- sdk/ai/azure-ai-inference/README.md | 3 +- .../azure/ai/inference/__init__.py | 16 +- .../azure/ai/inference/_configuration.py | 4 +- .../ai/inference/_operations/_operations.py | 279 +++++++++++++++--- .../azure/ai/inference/_patch.py | 26 +- .../azure/ai/inference/aio/__init__.py | 16 +- .../azure/ai/inference/aio/_configuration.py | 6 +- .../inference/aio/_operations/_operations.py | 268 ++++++++++++++--- .../azure/ai/inference/aio/_patch.py | 24 +- .../azure/ai/inference/models/__init__.py | 39 +-- .../azure/ai/inference/models/_enums.py | 6 +- .../azure/ai/inference/models/_models.py | 224 +++++++------- sdk/ai/azure-ai-inference/pyproject.toml | 2 + ...chat_completions_from_input_bytes_async.py | 4 +- ..._chat_completions_from_input_json_async.py | 4 +- .../sample_chat_completions_azure_openai.py | 19 +- ...ample_chat_completions_from_input_bytes.py | 4 +- ...sample_chat_completions_from_input_json.py | 10 +- ...ompletions_streaming_with_entra_id_auth.py | 12 +- ...mple_chat_completions_with_model_extras.py | 5 +- .../tests/model_inference_test_base.py | 6 +- .../test_model_inference_async_client.py | 16 +- .../tests/test_model_inference_client.py | 18 +- 23 files changed, 701 insertions(+), 310 deletions(-) create mode 100644 sdk/ai/azure-ai-inference/pyproject.toml diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 5bef6316b01c..d5dd92426d52 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -1,6 +1,7 @@ # Azure AI Inference client library for Python -The client Library (in preview) allows you to do inference using AI models you deployed to Azure. It supports both type of AI model deployments in Azure AI Studio: _Serverless API with Azure AI Content Safety_ and _Managed Compute without Azure AI Content Safety_. The client library makes services calls using REST AP version `2024-05-01-preview`, as documented in [Azure AI Model Inference API](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-api). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). +The client Library (in preview) allows you to generate predictions from foundational models deployed to Azure AI Studio and Azure Machine Learning. It supports +Serverless API endpoints and Managed Compute Endpoints (formerly known as Managed Online Endpoints). The client library makes services calls using REST AP version `2024-05-01-preview`, as documented in [Azure AI Model Inference API](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-api). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). Use the model inference client library to: diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py index 898076e89409..ff62b276a309 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/__init__.py @@ -6,25 +6,23 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from ._client import ChatCompletionsClient -from ._client import EmbeddingsClient -from ._client import ImageEmbeddingsClient +from ._patch import ChatCompletionsClient +from ._patch import EmbeddingsClient +from ._patch import ImageEmbeddingsClient from ._version import VERSION __version__ = VERSION -try: - from ._patch import __all__ as _patch_all - from ._patch import * # pylint: disable=unused-wildcard-import -except ImportError: - _patch_all = [] + +from ._patch import load_client from ._patch import patch_sdk as _patch_sdk __all__ = [ + "load_client", "ChatCompletionsClient", "EmbeddingsClient", "ImageEmbeddingsClient", ] -__all__.extend([p for p in _patch_all if p not in __all__]) + _patch_sdk() diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py index 403b272dee2c..65f27adc9ec9 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_configuration.py @@ -102,7 +102,7 @@ def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCr self.endpoint = endpoint self.credential = credential self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) + self.credential_scopes = kwargs.pop("credential_scopes", ["https://ml.azure.com/.default"]) kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) @@ -157,7 +157,7 @@ def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCr self.endpoint = endpoint self.credential = credential self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) + self.credential_scopes = kwargs.pop("credential_scopes", ["https://ml.azure.com/.default"]) kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index 80fa6cef0bf3..4a7deff4e26b 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -43,9 +43,7 @@ def build_chat_completions_complete_request( - *, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, - **kwargs: Any + *, unknown_params: Optional[Union[str, _models.UnknownParams]] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -89,10 +87,8 @@ def build_chat_completions_get_model_info_request(**kwargs: Any) -> HttpRequest: return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_embeddings_embedding_request( - *, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, - **kwargs: Any +def build_embeddings_embed_request( + *, unknown_params: Optional[Union[str, _models.UnknownParams]] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -136,10 +132,8 @@ def build_embeddings_get_model_info_request(**kwargs: Any) -> HttpRequest: return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_image_embeddings_embedding_request( - *, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, - **kwargs: Any +def build_image_embeddings_embed_request( + *, unknown_params: Optional[Union[str, _models.UnknownParams]] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -190,7 +184,7 @@ def _complete( self, body: JSON, *, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: ... @@ -199,7 +193,7 @@ def _complete( self, *, messages: List[_models.ChatRequestMessage], - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, content_type: str = "application/json", frequency_penalty: Optional[float] = None, stream_parameter: Optional[bool] = None, @@ -221,7 +215,7 @@ def _complete( self, body: IO[bytes], *, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: ... @@ -232,7 +226,7 @@ def _complete( body: Union[JSON, IO[bytes]] = _Unset, *, messages: List[_models.ChatRequestMessage] = _Unset, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, frequency_penalty: Optional[float] = None, stream_parameter: Optional[bool] = None, presence_penalty: Optional[float] = None, @@ -249,7 +243,6 @@ def _complete( **kwargs: Any ) -> _models.ChatCompletions: # pylint: disable=line-too-long - # pylint: disable=too-many-locals """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or "completes" @@ -264,7 +257,9 @@ def _complete( Assistant roles. Required. :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. + This sets the HTTP request header ``unknown-parameters``. Known values are: "error", "drop", + and "pass_through". Default value is None. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword frequency_penalty: A value that influences the probability of generated tokens appearing based on their cumulative @@ -402,8 +397,6 @@ def _complete( "id": "str", # A unique identifier associated with this chat completions response. Required. "model": "str", # The model used for the chat completion. Required. - "object": "str", # The response object type, which is always - ``chat.completion``. Required. "usage": { "capacity_type": "str", # Indicates whether your capacity has been affected by the usage amount (token count) reported here. Required. Known @@ -569,7 +562,7 @@ def _embed( self, body: JSON, *, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: ... @@ -578,7 +571,7 @@ def _embed( self, *, input: List[str], - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, @@ -590,7 +583,7 @@ def _embed( self, body: IO[bytes], *, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: ... @@ -601,7 +594,7 @@ def _embed( body: Union[JSON, IO[bytes]] = _Unset, *, input: List[str] = _Unset, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, @@ -617,7 +610,9 @@ def _embed( of strings or array of token arrays. Required. :paramtype input: list[str] :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. + This sets the HTTP request header ``unknown-parameters``. Known values are: "error", "drop", + and "pass_through". Default value is None. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should have. @@ -672,16 +667,12 @@ def _embed( These represent a measurement of the vector-based relatedness of the provided input. Required. ], - "index": 0, # Index of the prompt to which the EmbeddingItem + "index": 0 # Index of the prompt to which the EmbeddingItem corresponds. Required. - "object": "str" # The object type of this embeddings item. - Will always be ``embedding``. Required. } ], "id": "str", # Unique identifier for the embeddings result. Required. "model": "str", # The model ID used to generate this result. Required. - "object": "str", # The object type of the embeddings result. Will always be - ``list``. Required. "usage": { "capacity_type": "str", # Indicates whether your capacity has been affected by the usage amount (token count) reported here. Required. Known @@ -728,7 +719,7 @@ def _embed( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_embeddings_embedding_request( + _request = build_embeddings_embed_request( unknown_params=unknown_params, content_type=content_type, api_version=self._config.api_version, @@ -837,43 +828,237 @@ def _get_model_info(self, **kwargs: Any) -> _models.ModelInfo: class ImageEmbeddingsClientOperationsMixin(ImageEmbeddingsClientMixinABC): @overload - def _embed( + def embed( self, body: JSON, *, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.EmbeddingsResult: ... + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for given images. + + :param body: Required. + :type body: JSON + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. + This sets the HTTP request header ``unknown-parameters``. Known values are: "error", "drop", + and "pass_through". Default value is None. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "input": [ + { + "image": "str", # The input image, in PNG format. Required. + "text": "str" # Optional. Optional. The text input to feed + into the model (like DINO, CLIP). Returns a 422 error if the model + doesn't support the value or parameter. + } + ], + "dimensions": 0, # Optional. Optional. The number of dimensions the + resulting output embeddings should have. Passing null causes the model to use its + default value. Returns a 422 error if the model doesn't support the value or + parameter. + "encoding_format": "str", # Optional. Optional. The number of dimensions the + resulting output embeddings should have. Passing null causes the model to use its + default value. Returns a 422 error if the model doesn't support the value or + parameter. Known values are: "base64", "binary", "float", "int8", "ubinary", and + "uint8". + "input_type": "str" # Optional. Optional. The type of the input. Returns a + 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". + } + + # response body for status code(s): 200 + response == { + "data": [ + { + "embedding": [ + 0.0 # List of embeddings value for the input prompt. + These represent a measurement of the vector-based relatedness of the + provided input. Required. + ], + "index": 0 # Index of the prompt to which the EmbeddingItem + corresponds. Required. + } + ], + "id": "str", # Unique identifier for the embeddings result. Required. + "model": "str", # The model ID used to generate this result. Required. + "usage": { + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". + "input_tokens": 0, # Number of tokens in the request prompt. + Required. + "prompt_tokens": 0, # Number of tokens used for the prompt sent to + the AI model. Typically identical to ``input_tokens``. However, certain AI + models may add extra tokens to the input hence the number can be higher. (for + example when input_type="query"). Required. + "total_tokens": 0 # Total number of tokens transacted in this + request/response. Required. + } + } + """ + @overload - def _embed( + def embed( self, *, input: List[_models.EmbeddingInput], - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any - ) -> _models.EmbeddingsResult: ... + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for given images. + + :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an + array. + The input must not exceed the max input tokens for the model. Required. + :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. + This sets the HTTP request header ``unknown-parameters``. Known values are: "error", "drop", + and "pass_through". Default value is None. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should + have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Default value is + None. + :paramtype dimensions: int + :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings + should have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. + :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat + :keyword input_type: Optional. The type of the input. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". Default value is None. + :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "data": [ + { + "embedding": [ + 0.0 # List of embeddings value for the input prompt. + These represent a measurement of the vector-based relatedness of the + provided input. Required. + ], + "index": 0 # Index of the prompt to which the EmbeddingItem + corresponds. Required. + } + ], + "id": "str", # Unique identifier for the embeddings result. Required. + "model": "str", # The model ID used to generate this result. Required. + "usage": { + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". + "input_tokens": 0, # Number of tokens in the request prompt. + Required. + "prompt_tokens": 0, # Number of tokens used for the prompt sent to + the AI model. Typically identical to ``input_tokens``. However, certain AI + models may add extra tokens to the input hence the number can be higher. (for + example when input_type="query"). Required. + "total_tokens": 0 # Total number of tokens transacted in this + request/response. Required. + } + } + """ + @overload - def _embed( + def embed( self, body: IO[bytes], *, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.EmbeddingsResult: ... + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for given images. + + :param body: Required. + :type body: IO[bytes] + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. + This sets the HTTP request header ``unknown-parameters``. Known values are: "error", "drop", + and "pass_through". Default value is None. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "data": [ + { + "embedding": [ + 0.0 # List of embeddings value for the input prompt. + These represent a measurement of the vector-based relatedness of the + provided input. Required. + ], + "index": 0 # Index of the prompt to which the EmbeddingItem + corresponds. Required. + } + ], + "id": "str", # Unique identifier for the embeddings result. Required. + "model": "str", # The model ID used to generate this result. Required. + "usage": { + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". + "input_tokens": 0, # Number of tokens in the request prompt. + Required. + "prompt_tokens": 0, # Number of tokens used for the prompt sent to + the AI model. Typically identical to ``input_tokens``. However, certain AI + models may add extra tokens to the input hence the number can be higher. (for + example when input_type="query"). Required. + "total_tokens": 0 # Total number of tokens transacted in this + request/response. Required. + } + } + """ @distributed_trace - def _embed( + def embed( self, body: Union[JSON, IO[bytes]] = _Unset, *, input: List[_models.EmbeddingInput] = _Unset, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, @@ -889,7 +1074,9 @@ def _embed( The input must not exceed the max input tokens for the model. Required. :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. + This sets the HTTP request header ``unknown-parameters``. Known values are: "error", "drop", + and "pass_through". Default value is None. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should have. @@ -947,16 +1134,12 @@ def _embed( These represent a measurement of the vector-based relatedness of the provided input. Required. ], - "index": 0, # Index of the prompt to which the EmbeddingItem + "index": 0 # Index of the prompt to which the EmbeddingItem corresponds. Required. - "object": "str" # The object type of this embeddings item. - Will always be ``embedding``. Required. } ], "id": "str", # Unique identifier for the embeddings result. Required. "model": "str", # The model ID used to generate this result. Required. - "object": "str", # The object type of the embeddings result. Will always be - ``list``. Required. "usage": { "capacity_type": "str", # Indicates whether your capacity has been affected by the usage amount (token count) reported here. Required. Known @@ -1003,7 +1186,7 @@ def _embed( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_image_embeddings_embedding_request( + _request = build_image_embeddings_embed_request( unknown_params=unknown_params, content_type=content_type, api_version=self._config.api_version, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index ae5decbc55ed..d1b78fcf1f09 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -36,8 +36,8 @@ from ._serialization import Serializer from ._operations._operations import ( build_chat_completions_complete_request, - build_embeddings_embedding_request, - build_image_embeddings_embedding_request, + build_embeddings_embed_request, + build_image_embeddings_embed_request, ) from ._client import ChatCompletionsClient as ChatCompletionsClientGenerated from ._client import EmbeddingsClient as EmbeddingsClientGenerated @@ -79,8 +79,8 @@ def load_client( if model_info.model_type in (_models.ModelType.EMBEDDINGS, "embedding"): embedding_client = EmbeddingsClient(endpoint, credential, **kwargs) embedding_client._model_info = model_info # pylint: disable=protected-access - return embedding_client - + return embedding_client + if model_info.model_type == _models.ModelType.IMAGE_EMBEDDINGS: image_embedding_client = ImageEmbeddingsClient(endpoint, credential, **kwargs) image_embedding_client._model_info = model_info # pylint: disable=protected-access @@ -89,7 +89,6 @@ def load_client( raise ValueError(f"No client available to support AI model type `{model_info.model_type}`") - class ChatCompletionsClient(ChatCompletionsClientGenerated): @overload @@ -357,7 +356,7 @@ def complete( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - _unknown_params:Union[_models._enums.UnknownParams, None] = None + _unknown_params: Union[_models._enums.UnknownParams, None] = None content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) @@ -380,7 +379,7 @@ def complete( } if model_extras is not None and bool(model_extras): body.update(model_extras) - _unknown_params=_models._enums.UnknownParams.PASS_THROUGH + _unknown_params = _models._enums.UnknownParams.PASS_THROUGH body = {k: v for k, v in body.items() if v is not None} elif isinstance(body, dict) and "stream" in body and isinstance(body["stream"], bool): stream = body["stream"] @@ -595,7 +594,7 @@ def embed( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - _unknown_params:Union[_models._enums.UnknownParams, None] = None + _unknown_params: Union[_models._enums.UnknownParams, None] = None content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) @@ -610,7 +609,7 @@ def embed( } if model_extras is not None and bool(model_extras): body.update(model_extras) - _unknown_params=_models._enums.UnknownParams.PASS_THROUGH + _unknown_params = _models._enums.UnknownParams.PASS_THROUGH body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -619,7 +618,7 @@ def embed( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_embeddings_embedding_request( + _request = build_embeddings_embed_request( unknown_params=_unknown_params, content_type=content_type, api_version=self._config.api_version, @@ -672,6 +671,7 @@ def __str__(self) -> str: # pylint: disable=client-method-name-no-double-underscore return super().__str__() + f"\n{self._model_info}" + class ImageEmbeddingsClient(ImageEmbeddingsClientGenerated): @overload @@ -824,7 +824,7 @@ def embed( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - _unknown_params:Union[_models._enums.UnknownParams, None] = None + _unknown_params: Union[_models._enums.UnknownParams, None] = None content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) @@ -839,7 +839,7 @@ def embed( } if model_extras is not None and bool(model_extras): body.update(model_extras) - _unknown_params=_models._enums.UnknownParams.PASS_THROUGH + _unknown_params = _models._enums.UnknownParams.PASS_THROUGH body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -848,7 +848,7 @@ def embed( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_image_embeddings_embedding_request( + _request = build_image_embeddings_embed_request( unknown_params=_unknown_params, content_type=content_type, api_version=self._config.api_version, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py index e9e1b0469645..c31764c00803 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/__init__.py @@ -6,22 +6,20 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from ._client import ChatCompletionsClient -from ._client import EmbeddingsClient -from ._client import ImageEmbeddingsClient +from ._patch import ChatCompletionsClient +from ._patch import EmbeddingsClient +from ._patch import ImageEmbeddingsClient -try: - from ._patch import __all__ as _patch_all - from ._patch import * # pylint: disable=unused-wildcard-import -except ImportError: - _patch_all = [] + +from ._patch import load_client from ._patch import patch_sdk as _patch_sdk __all__ = [ + "load_client", "ChatCompletionsClient", "EmbeddingsClient", "ImageEmbeddingsClient", ] -__all__.extend([p for p in _patch_all if p not in __all__]) + _patch_sdk() diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py index 34483b59956d..15f6105a1624 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_configuration.py @@ -49,7 +49,7 @@ def __init__( self.endpoint = endpoint self.credential = credential self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) + self.credential_scopes = kwargs.pop("credential_scopes", ["https://ml.azure.com/.default"]) kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) @@ -106,7 +106,7 @@ def __init__( self.endpoint = endpoint self.credential = credential self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) + self.credential_scopes = kwargs.pop("credential_scopes", ["https://ml.azure.com/.default"]) kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) @@ -163,7 +163,7 @@ def __init__( self.endpoint = endpoint self.credential = credential self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) + self.credential_scopes = kwargs.pop("credential_scopes", ["https://ml.azure.com/.default"]) kwargs.setdefault("sdk_moniker", "ai-inference/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index 4d09671ec2a9..e529655c4805 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -29,9 +29,9 @@ from ..._operations._operations import ( build_chat_completions_complete_request, build_chat_completions_get_model_info_request, - build_embeddings_embedding_request, + build_embeddings_embed_request, build_embeddings_get_model_info_request, - build_image_embeddings_embedding_request, + build_image_embeddings_embed_request, build_image_embeddings_get_model_info_request, ) from .._vendor import ChatCompletionsClientMixinABC, EmbeddingsClientMixinABC, ImageEmbeddingsClientMixinABC @@ -53,7 +53,7 @@ async def _complete( self, body: JSON, *, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: ... @@ -62,7 +62,7 @@ async def _complete( self, *, messages: List[_models.ChatRequestMessage], - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, content_type: str = "application/json", frequency_penalty: Optional[float] = None, stream_parameter: Optional[bool] = None, @@ -84,7 +84,7 @@ async def _complete( self, body: IO[bytes], *, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: ... @@ -95,7 +95,7 @@ async def _complete( body: Union[JSON, IO[bytes]] = _Unset, *, messages: List[_models.ChatRequestMessage] = _Unset, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, frequency_penalty: Optional[float] = None, stream_parameter: Optional[bool] = None, presence_penalty: Optional[float] = None, @@ -112,7 +112,6 @@ async def _complete( **kwargs: Any ) -> _models.ChatCompletions: # pylint: disable=line-too-long - # pylint: disable=too-many-locals """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or "completes" @@ -127,8 +126,9 @@ async def _complete( Assistant roles. Required. :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. - The service defaults to "error" in this case. + request payload. + This sets the HTTP request header ``unknown-parameters``. Known values are: "error", "drop", + and "pass_through". Default value is None. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword frequency_penalty: A value that influences the probability of generated tokens appearing based on their cumulative @@ -266,8 +266,6 @@ async def _complete( "id": "str", # A unique identifier associated with this chat completions response. Required. "model": "str", # The model used for the chat completion. Required. - "object": "str", # The response object type, which is always - ``chat.completion``. Required. "usage": { "capacity_type": "str", # Indicates whether your capacity has been affected by the usage amount (token count) reported here. Required. Known @@ -433,7 +431,7 @@ async def _embed( self, body: JSON, *, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: ... @@ -442,7 +440,7 @@ async def _embed( self, *, input: List[str], - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, @@ -454,7 +452,7 @@ async def _embed( self, body: IO[bytes], *, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: ... @@ -465,7 +463,7 @@ async def _embed( body: Union[JSON, IO[bytes]] = _Unset, *, input: List[str] = _Unset, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, @@ -481,7 +479,9 @@ async def _embed( of strings or array of token arrays. Required. :paramtype input: list[str] :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. + This sets the HTTP request header ``unknown-parameters``. Known values are: "error", "drop", + and "pass_through". Default value is None. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should have. @@ -536,16 +536,12 @@ async def _embed( These represent a measurement of the vector-based relatedness of the provided input. Required. ], - "index": 0, # Index of the prompt to which the EmbeddingItem + "index": 0 # Index of the prompt to which the EmbeddingItem corresponds. Required. - "object": "str" # The object type of this embeddings item. - Will always be ``embedding``. Required. } ], "id": "str", # Unique identifier for the embeddings result. Required. "model": "str", # The model ID used to generate this result. Required. - "object": "str", # The object type of the embeddings result. Will always be - ``list``. Required. "usage": { "capacity_type": "str", # Indicates whether your capacity has been affected by the usage amount (token count) reported here. Required. Known @@ -592,7 +588,7 @@ async def _embed( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_embeddings_embedding_request( + _request = build_embeddings_embed_request( unknown_params=unknown_params, content_type=content_type, api_version=self._config.api_version, @@ -701,43 +697,237 @@ async def _get_model_info(self, **kwargs: Any) -> _models.ModelInfo: class ImageEmbeddingsClientOperationsMixin(ImageEmbeddingsClientMixinABC): @overload - async def _embed( + async def embed( self, body: JSON, *, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.EmbeddingsResult: ... + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for given images. + + :param body: Required. + :type body: JSON + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. + This sets the HTTP request header ``unknown-parameters``. Known values are: "error", "drop", + and "pass_through". Default value is None. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "input": [ + { + "image": "str", # The input image, in PNG format. Required. + "text": "str" # Optional. Optional. The text input to feed + into the model (like DINO, CLIP). Returns a 422 error if the model + doesn't support the value or parameter. + } + ], + "dimensions": 0, # Optional. Optional. The number of dimensions the + resulting output embeddings should have. Passing null causes the model to use its + default value. Returns a 422 error if the model doesn't support the value or + parameter. + "encoding_format": "str", # Optional. Optional. The number of dimensions the + resulting output embeddings should have. Passing null causes the model to use its + default value. Returns a 422 error if the model doesn't support the value or + parameter. Known values are: "base64", "binary", "float", "int8", "ubinary", and + "uint8". + "input_type": "str" # Optional. Optional. The type of the input. Returns a + 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". + } + + # response body for status code(s): 200 + response == { + "data": [ + { + "embedding": [ + 0.0 # List of embeddings value for the input prompt. + These represent a measurement of the vector-based relatedness of the + provided input. Required. + ], + "index": 0 # Index of the prompt to which the EmbeddingItem + corresponds. Required. + } + ], + "id": "str", # Unique identifier for the embeddings result. Required. + "model": "str", # The model ID used to generate this result. Required. + "usage": { + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". + "input_tokens": 0, # Number of tokens in the request prompt. + Required. + "prompt_tokens": 0, # Number of tokens used for the prompt sent to + the AI model. Typically identical to ``input_tokens``. However, certain AI + models may add extra tokens to the input hence the number can be higher. (for + example when input_type="query"). Required. + "total_tokens": 0 # Total number of tokens transacted in this + request/response. Required. + } + } + """ + @overload - async def _embed( + async def embed( self, *, input: List[_models.EmbeddingInput], - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any - ) -> _models.EmbeddingsResult: ... + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for given images. + + :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an + array. + The input must not exceed the max input tokens for the model. Required. + :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. + This sets the HTTP request header ``unknown-parameters``. Known values are: "error", "drop", + and "pass_through". Default value is None. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should + have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Default value is + None. + :paramtype dimensions: int + :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings + should have. + Passing null causes the model to use its default value. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. + :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat + :keyword input_type: Optional. The type of the input. + Returns a 422 error if the model doesn't support the value or parameter. Known values are: + "text", "query", and "document". Default value is None. + :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "data": [ + { + "embedding": [ + 0.0 # List of embeddings value for the input prompt. + These represent a measurement of the vector-based relatedness of the + provided input. Required. + ], + "index": 0 # Index of the prompt to which the EmbeddingItem + corresponds. Required. + } + ], + "id": "str", # Unique identifier for the embeddings result. Required. + "model": "str", # The model ID used to generate this result. Required. + "usage": { + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". + "input_tokens": 0, # Number of tokens in the request prompt. + Required. + "prompt_tokens": 0, # Number of tokens used for the prompt sent to + the AI model. Typically identical to ``input_tokens``. However, certain AI + models may add extra tokens to the input hence the number can be higher. (for + example when input_type="query"). Required. + "total_tokens": 0 # Total number of tokens transacted in this + request/response. Required. + } + } + """ + @overload - async def _embed( + async def embed( self, body: IO[bytes], *, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.EmbeddingsResult: ... + ) -> _models.EmbeddingsResult: + # pylint: disable=line-too-long + """Return the embeddings for given images. + + :param body: Required. + :type body: IO[bytes] + :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON + request payload. + This sets the HTTP request header ``unknown-parameters``. Known values are: "error", "drop", + and "pass_through". Default value is None. + :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping + :rtype: ~azure.ai.inference.models.EmbeddingsResult + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "data": [ + { + "embedding": [ + 0.0 # List of embeddings value for the input prompt. + These represent a measurement of the vector-based relatedness of the + provided input. Required. + ], + "index": 0 # Index of the prompt to which the EmbeddingItem + corresponds. Required. + } + ], + "id": "str", # Unique identifier for the embeddings result. Required. + "model": "str", # The model ID used to generate this result. Required. + "usage": { + "capacity_type": "str", # Indicates whether your capacity has been + affected by the usage amount (token count) reported here. Required. Known + values are: "usage" and "fixed". + "input_tokens": 0, # Number of tokens in the request prompt. + Required. + "prompt_tokens": 0, # Number of tokens used for the prompt sent to + the AI model. Typically identical to ``input_tokens``. However, certain AI + models may add extra tokens to the input hence the number can be higher. (for + example when input_type="query"). Required. + "total_tokens": 0 # Total number of tokens transacted in this + request/response. Required. + } + } + """ @distributed_trace_async - async def _embed( + async def embed( self, body: Union[JSON, IO[bytes]] = _Unset, *, input: List[_models.EmbeddingInput] = _Unset, - unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, @@ -753,7 +943,9 @@ async def _embed( The input must not exceed the max input tokens for the model. Required. :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. Known values are: "error", "ignore", and "allow". Default value is None. + request payload. + This sets the HTTP request header ``unknown-parameters``. Known values are: "error", "drop", + and "pass_through". Default value is None. :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should have. @@ -811,16 +1003,12 @@ async def _embed( These represent a measurement of the vector-based relatedness of the provided input. Required. ], - "index": 0, # Index of the prompt to which the EmbeddingItem + "index": 0 # Index of the prompt to which the EmbeddingItem corresponds. Required. - "object": "str" # The object type of this embeddings item. - Will always be ``embedding``. Required. } ], "id": "str", # Unique identifier for the embeddings result. Required. "model": "str", # The model ID used to generate this result. Required. - "object": "str", # The object type of the embeddings result. Will always be - ``list``. Required. "usage": { "capacity_type": "str", # Indicates whether your capacity has been affected by the usage amount (token count) reported here. Required. Known @@ -867,7 +1055,7 @@ async def _embed( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_image_embeddings_embedding_request( + _request = build_image_embeddings_embed_request( unknown_params=unknown_params, content_type=content_type, api_version=self._config.api_version, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index 5e0858b88c3a..ff991c091183 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -32,8 +32,8 @@ from ._client import ImageEmbeddingsClient as ImageEmbeddingsClientGenerated from .._operations._operations import ( build_chat_completions_complete_request, - build_embeddings_embedding_request, - build_image_embeddings_embedding_request, + build_embeddings_embed_request, + build_image_embeddings_embed_request, ) if sys.version_info >= (3, 9): @@ -68,8 +68,8 @@ async def load_client( if model_info.model_type in (_models.ModelType.EMBEDDINGS, "embedding"): embedding_client = EmbeddingsClient(endpoint, credential, **kwargs) embedding_client._model_info = model_info # pylint: disable=protected-access - return embedding_client - + return embedding_client + if model_info.model_type == _models.ModelType.IMAGE_EMBEDDINGS: image_embedding_client = ImageEmbeddingsClient(endpoint, credential, **kwargs) image_embedding_client._model_info = model_info # pylint: disable=protected-access @@ -361,7 +361,7 @@ async def complete( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - _unknown_params:Union[_models._enums.UnknownParams, None] = None + _unknown_params: Union[_models._enums.UnknownParams, None] = None content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) @@ -385,7 +385,7 @@ async def complete( } if model_extras is not None and bool(model_extras): body.update(model_extras) - _unknown_params=_models._enums.UnknownParams.PASS_THROUGH + _unknown_params = _models._enums.UnknownParams.PASS_THROUGH body = {k: v for k, v in body.items() if v is not None} elif isinstance(body, dict) and "stream" in body and isinstance(body["stream"], bool): stream = body["stream"] @@ -600,7 +600,7 @@ async def embed( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - _unknown_params:Union[_models._enums.UnknownParams, None] = None + _unknown_params: Union[_models._enums.UnknownParams, None] = None content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) @@ -615,7 +615,7 @@ async def embed( } if model_extras is not None and bool(model_extras): body.update(model_extras) - _unknown_params=_models._enums.UnknownParams.PASS_THROUGH + _unknown_params = _models._enums.UnknownParams.PASS_THROUGH body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -624,7 +624,7 @@ async def embed( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_embeddings_embedding_request( + _request = build_embeddings_embed_request( unknown_params=_unknown_params, content_type=content_type, api_version=self._config.api_version, @@ -830,7 +830,7 @@ async def embed( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - _unknown_params:Union[_models._enums.UnknownParams, None] = None + _unknown_params: Union[_models._enums.UnknownParams, None] = None content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) @@ -845,7 +845,7 @@ async def embed( } if model_extras is not None and bool(model_extras): body.update(model_extras) - _unknown_params=_models._enums.UnknownParams.PASS_THROUGH + _unknown_params = _models._enums.UnknownParams.PASS_THROUGH body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -854,7 +854,7 @@ async def embed( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_image_embeddings_embedding_request( + _request = build_image_embeddings_embed_request( unknown_params=_unknown_params, content_type=content_type, api_version=self._config.api_version, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py index 6cb58ad49a4f..aff95c6a3c99 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py @@ -8,14 +8,12 @@ from ._models import AssistantMessage from ._models import ChatChoice -from ._models import StreamingChatChoiceUpdate from ._models import ChatCompletions from ._models import ChatCompletionsFunctionToolCall from ._models import ChatCompletionsFunctionToolDefinition from ._models import ChatCompletionsNamedToolSelection from ._models import ChatCompletionsToolCall from ._models import ChatCompletionsToolDefinition -from ._models import StreamingChatCompletionsUpdate from ._models import ChatRequestMessage from ._models import ChatResponseMessage from ._models import CompletionsUsage @@ -26,6 +24,8 @@ from ._models import FunctionCall from ._models import FunctionDefinition from ._models import ModelInfo +from ._models import StreamingChatChoiceUpdate +from ._models import StreamingChatCompletionsUpdate from ._models import SystemMessage from ._models import ToolMessage from ._models import UserMessage @@ -33,49 +33,52 @@ from ._enums import CapacityType from ._enums import ChatCompletionsResponseFormat from ._enums import ChatCompletionsToolSelectionPreset -from ._enums import CompletionsFinishReason from ._enums import ChatRole from ._enums import EmbeddingEncodingFormat from ._enums import EmbeddingInputType -from ._enums import ModelType from ._enums import UnknownParams -from ._patch import __all__ as _patch_all -from ._patch import * # pylint: disable=unused-wildcard-import +from ._enums import CompletionsFinishReason +from ._enums import ModelType + +from ._patch import StreamingChatCompletions +from ._patch import AsyncStreamingChatCompletions from ._patch import patch_sdk as _patch_sdk __all__ = [ + "StreamingChatCompletions", + "AsyncStreamingChatCompletions", "AssistantMessage", - "CapacityType", "ChatChoice", - "StreamingChatChoiceUpdate", "ChatCompletions", "ChatCompletionsFunctionToolCall", "ChatCompletionsFunctionToolDefinition", "ChatCompletionsNamedToolSelection", - "ChatCompletionsResponseFormat", "ChatCompletionsToolCall", "ChatCompletionsToolDefinition", - "ChatCompletionsToolSelectionPreset", - "StreamingChatCompletionsUpdate", "ChatRequestMessage", "ChatResponseMessage", - "ChatRole", - "CompletionsFinishReason", "CompletionsUsage", - "EmbeddingEncodingFormat", "EmbeddingInput", - "EmbeddingInputType", "EmbeddingItem", "EmbeddingsResult", "EmbeddingsUsage", "FunctionCall", "FunctionDefinition", "ModelInfo", - "ModelType", + "StreamingChatChoiceUpdate", + "StreamingChatCompletionsUpdate", "SystemMessage", "ToolMessage", - "UnknownParams", "UserMessage", + "CapacityType", + "ChatCompletionsResponseFormat", + "ChatCompletionsToolSelectionPreset", + "ChatRole", + "EmbeddingEncodingFormat", + "EmbeddingInputType", + "UnknownParams", + "CompletionsFinishReason", + "ModelType" ] -__all__.extend([p for p in _patch_all if p not in __all__]) + _patch_sdk() diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py index 6867b680bd61..a74712006c24 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py @@ -128,9 +128,7 @@ class UnknownParams(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The service will error if it detected unknown parameters in the request payload. This is the service default.""" DROP = "drop" - """ - The service will ignore (drop) unknown parameters in the request payload. - It will only pass the known parameters to the back-end AI model. - """ + """The service will ignore (drop) unknown parameters in the request payload. It will only pass the + known parameters to the back-end AI model.""" PASS_THROUGH = "pass_through" """The service will pass unknown parameters to the back-end AI model.""" diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py index 5345b6424c53..32670c1b32fa 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py @@ -71,7 +71,7 @@ class AssistantMessage(ChatRequestMessage, discriminator="assistant"): assistant messages. Required. The role that provides responses to system-instructed, user-prompted input. :vartype role: str or ~azure.ai.inference.models.ASSISTANT - :ivar content: The content of the message. Required. + :ivar content: The content of the message. :vartype content: str :ivar tool_calls: The tool calls that must be resolved and have their outputs appended to subsequent input messages for the chat @@ -83,7 +83,7 @@ class AssistantMessage(ChatRequestMessage, discriminator="assistant"): """The chat role associated with this message, which is always 'assistant' for assistant messages. Required. The role that provides responses to system-instructed, user-prompted input.""" content: Optional[str] = rest_field() - """The content of the message. Required.""" + """The content of the message.""" tool_calls: Optional[List["_models.ChatCompletionsToolCall"]] = rest_field() """The tool calls that must be resolved and have their outputs appended to subsequent input messages for the chat @@ -153,51 +153,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, **kwargs) -class StreamingChatChoiceUpdate(_model_base.Model): - """Represents an update to a single prompt completion when the service is streaming updates - using Server Sent Events (SSE). - Generally, ``n`` choices are generated per provided prompt with a default value of 1. - Token limits and other settings may limit the number of choices generated. - - All required parameters must be populated in order to send to server. - - :ivar index: The ordered index associated with this chat completions choice. Required. - :vartype index: int - :ivar finish_reason: The reason that this chat completions choice completed its generated. - Required. Known values are: "stop", "length", "content_filter", and "tool_calls". - :vartype finish_reason: str or ~azure.ai.inference.models.CompletionsFinishReason - :ivar delta: An update to the chat message for a given chat completions prompt. Required. - :vartype delta: ~azure.ai.inference.models.ChatResponseMessage - """ - - index: int = rest_field() - """The ordered index associated with this chat completions choice. Required.""" - finish_reason: Union[str, "_models._enums.CompletionsFinishReason"] = rest_field() - """The reason that this chat completions choice completed its generated. Required. Known values - are: \"stop\", \"length\", \"content_filter\", and \"tool_calls\".""" - delta: "_models.ChatResponseMessage" = rest_field() - """An update to the chat message for a given chat completions prompt. Required.""" - - @overload - def __init__( - self, - *, - index: int, - finish_reason: Union[str, "_models._enums.CompletionsFinishReason"], - delta: "_models.ChatResponseMessage", - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - class ChatCompletions(_model_base.Model): """Representation of the response data from a chat completions request. Completions support a wide variety of tasks and generate text that continues from or @@ -425,71 +380,6 @@ class ChatCompletionsNamedToolSelection(_model_base.Model): """The object type. Required.""" -class StreamingChatCompletionsUpdate(_model_base.Model): - """Represents a response update to a chat completions request, when the service is streaming - updates - using Server Sent Events (SSE). - Completions support a wide variety of tasks and generate text that continues from or - "completes" - provided prompt data. - - All required parameters must be populated in order to send to server. - - :ivar id: A unique identifier associated with this chat completions response. Required. - :vartype id: str - :ivar created: The first timestamp associated with generation activity for this completions - response, - represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. - :vartype created: ~datetime.datetime - :ivar model: The model used for the chat completion. Required. - :vartype model: str - :ivar usage: Usage information for tokens processed and generated as part of this completions - operation. Required. - :vartype usage: ~azure.ai.inference.models.CompletionsUsage - :ivar choices: An update to the collection of completion choices associated with this - completions response. - Generally, ``n`` choices are generated per provided prompt with a default value of 1. - Token limits and other settings may limit the number of choices generated. Required. - :vartype choices: list[~azure.ai.inference.models.StreamingChatChoiceUpdate] - """ - - id: str = rest_field() - """A unique identifier associated with this chat completions response. Required.""" - created: datetime.datetime = rest_field(format="unix-timestamp") - """The first timestamp associated with generation activity for this completions response, - represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required.""" - model: str = rest_field() - """The model used for the chat completion. Required.""" - usage: "_models.CompletionsUsage" = rest_field() - """Usage information for tokens processed and generated as part of this completions operation. - Required.""" - choices: List["_models.StreamingChatChoiceUpdate"] = rest_field() - """An update to the collection of completion choices associated with this completions response. - Generally, ``n`` choices are generated per provided prompt with a default value of 1. - Token limits and other settings may limit the number of choices generated. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - created: datetime.datetime, - model: str, - usage: "_models.CompletionsUsage", - choices: List["_models.StreamingChatChoiceUpdate"], - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - class ChatResponseMessage(_model_base.Model): """A representation of a chat message as received in a response. @@ -896,6 +786,116 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, **kwargs) +class StreamingChatChoiceUpdate(_model_base.Model): + """Represents an update to a single prompt completion when the service is streaming updates + using Server Sent Events (SSE). + Generally, ``n`` choices are generated per provided prompt with a default value of 1. + Token limits and other settings may limit the number of choices generated. + + All required parameters must be populated in order to send to server. + + :ivar index: The ordered index associated with this chat completions choice. Required. + :vartype index: int + :ivar finish_reason: The reason that this chat completions choice completed its generated. + Required. Known values are: "stop", "length", "content_filter", and "tool_calls". + :vartype finish_reason: str or ~azure.ai.inference.models.CompletionsFinishReason + :ivar delta: An update to the chat message for a given chat completions prompt. Required. + :vartype delta: ~azure.ai.inference.models.ChatResponseMessage + """ + + index: int = rest_field() + """The ordered index associated with this chat completions choice. Required.""" + finish_reason: Union[str, "_models._enums.CompletionsFinishReason"] = rest_field() + """The reason that this chat completions choice completed its generated. Required. Known values + are: \"stop\", \"length\", \"content_filter\", and \"tool_calls\".""" + delta: "_models.ChatResponseMessage" = rest_field() + """An update to the chat message for a given chat completions prompt. Required.""" + + @overload + def __init__( + self, + *, + index: int, + finish_reason: Union[str, "_models._enums.CompletionsFinishReason"], + delta: "_models.ChatResponseMessage", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class StreamingChatCompletionsUpdate(_model_base.Model): + """Represents a response update to a chat completions request, when the service is streaming + updates + using Server Sent Events (SSE). + Completions support a wide variety of tasks and generate text that continues from or + "completes" + provided prompt data. + + All required parameters must be populated in order to send to server. + + :ivar id: A unique identifier associated with this chat completions response. Required. + :vartype id: str + :ivar created: The first timestamp associated with generation activity for this completions + response, + represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required. + :vartype created: ~datetime.datetime + :ivar model: The model used for the chat completion. Required. + :vartype model: str + :ivar usage: Usage information for tokens processed and generated as part of this completions + operation. Required. + :vartype usage: ~azure.ai.inference.models.CompletionsUsage + :ivar choices: An update to the collection of completion choices associated with this + completions response. + Generally, ``n`` choices are generated per provided prompt with a default value of 1. + Token limits and other settings may limit the number of choices generated. Required. + :vartype choices: list[~azure.ai.inference.models.StreamingChatChoiceUpdate] + """ + + id: str = rest_field() + """A unique identifier associated with this chat completions response. Required.""" + created: datetime.datetime = rest_field(format="unix-timestamp") + """The first timestamp associated with generation activity for this completions response, + represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. Required.""" + model: str = rest_field() + """The model used for the chat completion. Required.""" + usage: "_models.CompletionsUsage" = rest_field() + """Usage information for tokens processed and generated as part of this completions operation. + Required.""" + choices: List["_models.StreamingChatChoiceUpdate"] = rest_field() + """An update to the collection of completion choices associated with this completions response. + Generally, ``n`` choices are generated per provided prompt with a default value of 1. + Token limits and other settings may limit the number of choices generated. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created: datetime.datetime, + model: str, + usage: "_models.CompletionsUsage", + choices: List["_models.StreamingChatChoiceUpdate"], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + class SystemMessage(ChatRequestMessage, discriminator="system"): """A request chat message containing system instructions that influence how the model will generate a chat completions diff --git a/sdk/ai/azure-ai-inference/pyproject.toml b/sdk/ai/azure-ai-inference/pyproject.toml new file mode 100644 index 000000000000..0817f7c7a6c2 --- /dev/null +++ b/sdk/ai/azure-ai-inference/pyproject.toml @@ -0,0 +1,2 @@ +[tool.generate] +autorest-post-process = true diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py index 903f843dbcae..5a0f3a6408cd 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py @@ -21,6 +21,7 @@ import asyncio import io + async def sample_chat_completions_from_input_bytes_async(): import os @@ -34,7 +35,7 @@ async def sample_chat_completions_from_input_bytes_async(): from azure.ai.inference.aio import ChatCompletionsClient from azure.core.credentials import AzureKeyCredential - + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # Make a chat completion call, by directly providing the @@ -52,6 +53,7 @@ def read_text_file(file_name: str) -> io.BytesIO: The file is expected to be in the same directory as this Python script. """ from pathlib import Path + with Path(__file__).with_name(file_name).open("r") as f: return io.BytesIO(f.read().encode("utf-8")) diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py index c9ebda426c80..69222d5146e3 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py @@ -20,6 +20,7 @@ """ import asyncio + async def sample_chat_completions_from_input_json_async(): import os from azure.ai.inference.aio import ChatCompletionsClient @@ -53,9 +54,10 @@ async def sample_chat_completions_from_input_json_async(): response = await client.complete(request_body) print(response.choices[0].message.content) - + await client.close() + async def main(): await sample_chat_completions_from_input_json_async() diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py index b9e2de6ddb78..086144976137 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py @@ -33,7 +33,8 @@ python sample_chat_completions_azure_openai.py """ -key_auth:bool = True # Set to True for key authentication, or False for Entra ID authentication. +key_auth: bool = True # Set to True for key authentication, or False for Entra ID authentication. + def sample_chat_completions_azure_openai(): import os @@ -58,20 +59,20 @@ def sample_chat_completions_azure_openai(): exit() client = ChatCompletionsClient( - endpoint=endpoint, - credential=AzureKeyCredential(""), # Pass in an empty value. - headers={"api-key": key}, - api_version="2024-02-15-preview" # AOAI api-version. Update as needed. - ) + endpoint=endpoint, + credential=AzureKeyCredential(""), # Pass in an empty value. + headers={"api-key": key}, + api_version="2024-02-15-preview", # AOAI api-version. Update as needed. + ) - else: # Entra ID authentication + else: # Entra ID authentication from azure.identity import DefaultAzureCredential client = ChatCompletionsClient( - endpoint=endpoint, + endpoint=endpoint, credential=DefaultAzureCredential(), credential_scopes=["https://cognitiveservices.azure.com/.default"], - api_version="2024-02-15-preview" # AOAI api-version. Update as needed. + api_version="2024-02-15-preview", # AOAI api-version. Update as needed. ) response = client.complete( diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py index e11d34edca44..f3d681826407 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py @@ -20,6 +20,7 @@ """ import io + def sample_chat_completions_from_input_bytes(): import os @@ -33,7 +34,7 @@ def sample_chat_completions_from_input_bytes(): from azure.ai.inference import ChatCompletionsClient from azure.core.credentials import AzureKeyCredential - + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) # Make a chat completion call, by directly providing the @@ -49,6 +50,7 @@ def read_text_file(file_name: str) -> io.BytesIO: The file is expected to be in the same directory as this Python script. """ from pathlib import Path + with Path(__file__).with_name(file_name).open("r") as f: return io.BytesIO(f.read().encode("utf-8")) diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py index 4daa7bfa95b1..d47328e0f872 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py @@ -45,22 +45,20 @@ def sample_chat_completions_from_input_json(): }, { "role": "user", - "content": "What year was construction of the International Space Station mostly done?"}, + "content": "What year was construction of the International Space Station mostly done?", + }, { "role": "assistant", "content": "The main construction of the International Space Station (ISS) was completed between 1998 and 2011. During this period, more than 30 flights by US space shuttles and 40 by Russian rockets were conducted to transport components and modules to the station.", }, - { - "role": "user", - "content": "And what was the estimated cost to build it?" - } + {"role": "user", "content": "And what was the estimated cost to build it?"}, ] } ) # [END chat_completions] print(response.choices[0].message.content) - + client.close() diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py index 04c3944f1d51..9849f6cc623e 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py @@ -7,8 +7,8 @@ This sample demonstrates how to do chat completions with streaming, using a synchronous client, with an Entra ID authentication. It also shows how to set the optional HTTP request header `azureml-model-deployment`, - which is supported when you deploy a model using "Managed Compute without Azure - AI Content Safety". It can be used to target test deployment during staging, + which is supported when you deploy a model using "Managed Compute Endpoints". + It can be used to target test deployment during staging, instead of the default production deployment. USAGE: @@ -51,14 +51,15 @@ def sample_chat_completions_streaming_with_entra_id_auth(): client = ChatCompletionsClient( endpoint=endpoint, credential=DefaultAzureCredential(), - headers={"azureml-model-deployment": model_deployment}) + headers={"azureml-model-deployment": model_deployment} + ) response = client.complete( + stream=True, messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="Give me 5 good reasons why I should exercise every day."), - ], - stream=True + ] ) for update in response: @@ -66,5 +67,6 @@ def sample_chat_completions_streaming_with_entra_id_auth(): client.close() + if __name__ == "__main__": sample_chat_completions_streaming_with_entra_id_auth() diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py index c269f9958ac5..b849109a9efc 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py @@ -46,10 +46,7 @@ def sample_chat_completions_with_model_extras(): SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), ], - model_extras={ # Optional. Additional parameters to pass to the model. - "key1": "value1", - "key2": "value2" - }, + model_extras={"key1": "value1", "key2": "value2"}, # Optional. Additional parameters to pass to the model. ) # [END chat_completions] diff --git a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py index 58a9e0990fd9..404ccef53ff9 100644 --- a/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py +++ b/sdk/ai/azure-ai-inference/tests/model_inference_test_base.py @@ -121,7 +121,7 @@ def read_text_file(file_name: str) -> io.BytesIO: """ with Path(__file__).with_name(file_name).open("r") as f: return io.BytesIO(f.read().encode("utf-8")) - + @staticmethod def _print_model_info_result(model_info: sdk.models.ModelInfo): if ModelClientTestBase.PRINT_RESULT: @@ -131,7 +131,9 @@ def _print_model_info_result(model_info: sdk.models.ModelInfo): print("\tmodel_provider_name: {}".format(model_info.model_provider_name)) @staticmethod - def _validate_model_info_result(model_info: sdk.models.ModelInfo, expected_model_type: Union[str, sdk.models.ModelType]): + def _validate_model_info_result( + model_info: sdk.models.ModelInfo, expected_model_type: Union[str, sdk.models.ModelType] + ): assert model_info.model_name is not None assert len(model_info.model_name) > 0 assert model_info.model_provider_name is not None diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py index 801cdb7887aa..32d777686d7e 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -19,7 +19,7 @@ class TestModelAsyncClient(ModelClientTestBase): # HAPPY PATH TESTS - TEXT EMBEDDINGS # # ********************************************************************************** - """ live test with recording fails for this... why? + """live test with recording fails for this... why? @ServicePreparerEmbeddings() @recorded_by_proxy_async async def test_async_load_embeddings_client(self, **kwargs): @@ -38,7 +38,9 @@ async def test_async_get_model_info_on_embeddings_client(self, **kwargs): client = self._create_async_embeddings_client(**kwargs) response1 = await client.get_model_info() self._print_model_info_result(response1) - self._validate_model_info_result(response1, "embedding") # TODO: This should be ModelType.EMBEDDINGS once the model is fixed + self._validate_model_info_result( + response1, "embedding" + ) # TODO: This should be ModelType.EMBEDDINGS once the model is fixed # Get the model info again. No network calls should be made here, # as the response is cached in the client. @@ -71,7 +73,9 @@ async def test_async_load_chat_completions_client(self, **kwargs): assert isinstance(client, async_sdk.ChatCompletionsClient) response1 = await client.get_model_info() self._print_model_info_result(response1) - self._validate_model_info_result(response1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed + self._validate_model_info_result( + response1, "completion" + ) # TODO: This should be ModelType.CHAT once the model is fixed await client.close() @ServicePreparerChatCompletions() @@ -80,7 +84,9 @@ async def test_async_get_model_info_on_chat_client(self, **kwargs): client = self._create_async_chat_client(**kwargs) response1 = await client.get_model_info() self._print_model_info_result(response1) - self._validate_model_info_result(response1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed + self._validate_model_info_result( + response1, "completion" + ) # TODO: This should be ModelType.CHAT once the model is fixed # Get the model info again. No network calls should be made here, # as the response is cached in the client. @@ -171,7 +177,7 @@ async def test_async_chat_completions_streaming_with_json_input(self, **kwargs): {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Give me 3 good reasons why I should exercise every day."}, ], - "stream": True + "stream": True, } response = await client.complete(request_body) await self._validate_async_chat_completions_streaming_result(response) diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py index 2beae8e8d4d9..7d3aaf9237ed 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -27,7 +27,9 @@ def test_load_embeddings_client(self, **kwargs): assert isinstance(client, sdk.EmbeddingsClient) response1 = client.get_model_info() self._print_model_info_result(response1) - self._validate_model_info_result(response1, "embedding") # TODO: This should be ModelType.EMBEDDINGS once the model is fixed + self._validate_model_info_result( + response1, "embedding" + ) # TODO: This should be ModelType.EMBEDDINGS once the model is fixed client.close() @ServicePreparerEmbeddings() @@ -37,7 +39,9 @@ def test_get_model_info_on_embeddings_client(self, **kwargs): client = self._create_embeddings_client(**kwargs) response1 = client.get_model_info() self._print_model_info_result(response1) - self._validate_model_info_result(response1, "embedding") # TODO: This should be ModelType.EMBEDDINGS once the model is fixed + self._validate_model_info_result( + response1, "embedding" + ) # TODO: This should be ModelType.EMBEDDINGS once the model is fixed # Get the model info again. No network calls should be made here, # as the response is cached in the client. @@ -70,7 +74,9 @@ def test_load_chat_completions_client(self, **kwargs): assert isinstance(client, sdk.ChatCompletionsClient) response1 = client.get_model_info() self._print_model_info_result(response1) - self._validate_model_info_result(response1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed + self._validate_model_info_result( + response1, "completion" + ) # TODO: This should be ModelType.CHAT once the model is fixed client.close() @ServicePreparerChatCompletions() @@ -80,7 +86,9 @@ def test_get_model_info_on_chat_client(self, **kwargs): client = self._create_chat_client(**kwargs) response1 = client.get_model_info() self._print_model_info_result(response1) - self._validate_model_info_result(response1, "completion") # TODO: This should be ModelType.CHAT once the model is fixed + self._validate_model_info_result( + response1, "completion" + ) # TODO: This should be ModelType.CHAT once the model is fixed # Get the model info again. No network calls should be made here, # as the response is cached in the client. @@ -171,7 +179,7 @@ def test_chat_completions_streaming_with_json_input(self, **kwargs): {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Give me 3 good reasons why I should exercise every day."}, ], - "stream": True + "stream": True, } response = client.complete(request_body) self._validate_chat_completions_streaming_result(response) From 89d92d31f646f80dc787a243768763840d59b732 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 31 May 2024 11:04:21 -0700 Subject: [PATCH 090/112] Fix/supress mypy & pyright errors --- sdk/ai/azure-ai-inference/README.md | 15 +++------ .../ai/inference/_operations/_operations.py | 9 +++--- .../azure/ai/inference/_patch.py | 7 ++-- .../inference/aio/_operations/_operations.py | 9 +++--- .../azure/ai/inference/aio/_patch.py | 6 ++-- .../sample_chat_completions_async.py | 3 ++ ...chat_completions_from_input_bytes_async.py | 3 ++ ..._chat_completions_from_input_json_async.py | 3 ++ ...sample_chat_completions_streaming_async.py | 3 ++ .../samples/sample_chat_completions.py | 3 +- .../sample_chat_completions_azure_openai.py | 6 ++-- ...ample_chat_completions_from_input_bytes.py | 5 ++- ...sample_chat_completions_from_input_json.py | 2 ++ .../sample_chat_completions_with_history.py | 2 ++ ...mple_chat_completions_with_model_extras.py | 2 ++ .../sample_chat_completions_with_tools.py | 32 +++++++++++-------- .../samples/sample_load_client.py | 2 ++ 17 files changed, 71 insertions(+), 41 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index d5dd92426d52..900c356d9327 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -204,7 +204,7 @@ print(response.choices[0].message.content) -The following types or messages are supported: `SystemMessage`,`UserMessage`, `AssistantMessage`, `ToolMessage`. +The following types or messages are supported: `SystemMessage`,`UserMessage`, `AssistantMessage`, `ToolMessage` (See sample [sample_chat_completions_with_tools.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py) for usage of `ToolMessage`). Alternativley you can provide the messages as dictionary instead of using the strongly typed classes like `SystemMessage` and `UserMessage`: @@ -220,15 +220,13 @@ response = client.complete( }, { "role": "user", - "content": "What year was construction of the International Space Station mostly done?"}, + "content": "What year was construction of the International Space Station mostly done?", + }, { "role": "assistant", "content": "The main construction of the International Space Station (ISS) was completed between 1998 and 2011. During this period, more than 30 flights by US space shuttles and 40 by Russian rockets were conducted to transport components and modules to the station.", }, - { - "role": "user", - "content": "And what was the estimated cost to build it?" - } + {"role": "user", "content": "And what was the estimated cost to build it?"}, ] } ) @@ -287,10 +285,7 @@ response = client.complete( SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), ], - model_extras={ # Optional. Additional parameters to pass to the model. - "key1": "value1", - "key2": "value2" - }, + model_extras={"key1": "value1", "key2": "value2"}, # Optional. Additional parameters to pass to the model. ) ``` diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index 4a7deff4e26b..952b50430aae 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -243,6 +243,7 @@ def _complete( **kwargs: Any ) -> _models.ChatCompletions: # pylint: disable=line-too-long + # pylint: disable=too-many-locals """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or "completes" @@ -828,7 +829,7 @@ def _get_model_info(self, **kwargs: Any) -> _models.ModelInfo: class ImageEmbeddingsClientOperationsMixin(ImageEmbeddingsClientMixinABC): @overload - def embed( + def _embed( self, body: JSON, *, @@ -912,7 +913,7 @@ def embed( """ @overload - def embed( + def _embed( self, *, input: List[_models.EmbeddingInput], @@ -993,7 +994,7 @@ def embed( """ @overload - def embed( + def _embed( self, body: IO[bytes], *, @@ -1053,7 +1054,7 @@ def embed( """ @distributed_trace - def embed( + def _embed( self, body: Union[JSON, IO[bytes]] = _Unset, *, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index d1b78fcf1f09..ed6cf110d142 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -228,6 +228,7 @@ def complete( **kwargs: Any, ) -> Union[_models.StreamingChatCompletions, _models.ChatCompletions]: # pylint: disable=line-too-long + # pylint: disable=too-many-locals """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or "completes" provided prompt data. When using this method with `stream=True`, the response is streamed @@ -379,7 +380,7 @@ def complete( } if model_extras is not None and bool(model_extras): body.update(model_extras) - _unknown_params = _models._enums.UnknownParams.PASS_THROUGH + _unknown_params = _models._enums.UnknownParams.PASS_THROUGH # pylint: disable=protected-access body = {k: v for k, v in body.items() if v is not None} elif isinstance(body, dict) and "stream" in body and isinstance(body["stream"], bool): stream = body["stream"] @@ -609,7 +610,7 @@ def embed( } if model_extras is not None and bool(model_extras): body.update(model_extras) - _unknown_params = _models._enums.UnknownParams.PASS_THROUGH + _unknown_params = _models._enums.UnknownParams.PASS_THROUGH # pylint: disable=protected-access body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -839,7 +840,7 @@ def embed( } if model_extras is not None and bool(model_extras): body.update(model_extras) - _unknown_params = _models._enums.UnknownParams.PASS_THROUGH + _unknown_params = _models._enums.UnknownParams.PASS_THROUGH # pylint: disable=protected-access body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index e529655c4805..06c872161cf4 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -112,6 +112,7 @@ async def _complete( **kwargs: Any ) -> _models.ChatCompletions: # pylint: disable=line-too-long + # pylint: disable=too-many-locals """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or "completes" @@ -697,7 +698,7 @@ async def _get_model_info(self, **kwargs: Any) -> _models.ModelInfo: class ImageEmbeddingsClientOperationsMixin(ImageEmbeddingsClientMixinABC): @overload - async def embed( + async def _embed( self, body: JSON, *, @@ -781,7 +782,7 @@ async def embed( """ @overload - async def embed( + async def _embed( self, *, input: List[_models.EmbeddingInput], @@ -862,7 +863,7 @@ async def embed( """ @overload - async def embed( + async def _embed( self, body: IO[bytes], *, @@ -922,7 +923,7 @@ async def embed( """ @distributed_trace_async - async def embed( + async def _embed( self, body: Union[JSON, IO[bytes]] = _Unset, *, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index ff991c091183..79bf8d299b35 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -385,7 +385,7 @@ async def complete( } if model_extras is not None and bool(model_extras): body.update(model_extras) - _unknown_params = _models._enums.UnknownParams.PASS_THROUGH + _unknown_params = _models._enums.UnknownParams.PASS_THROUGH # pylint: disable=protected-access body = {k: v for k, v in body.items() if v is not None} elif isinstance(body, dict) and "stream" in body and isinstance(body["stream"], bool): stream = body["stream"] @@ -615,7 +615,7 @@ async def embed( } if model_extras is not None and bool(model_extras): body.update(model_extras) - _unknown_params = _models._enums.UnknownParams.PASS_THROUGH + _unknown_params = _models._enums.UnknownParams.PASS_THROUGH # pylint: disable=protected-access body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -845,7 +845,7 @@ async def embed( } if model_extras is not None and bool(model_extras): body.update(model_extras) - _unknown_params = _models._enums.UnknownParams.PASS_THROUGH + _unknown_params = _models._enums.UnknownParams.PASS_THROUGH # pylint: disable=protected-access body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py index 881ee89cc716..d7637a030a65 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py @@ -17,6 +17,9 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ +# mypy: disable-error-code="union-attr" +# pyright: reportAttributeAccessIssue=false + import asyncio diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py index 5a0f3a6408cd..6aaeb654ebae 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py @@ -18,6 +18,9 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ +# mypy: disable-error-code="union-attr" +# pyright: reportAttributeAccessIssue=false + import asyncio import io diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py index 69222d5146e3..cb3e6bf40de7 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py @@ -18,6 +18,9 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ +# mypy: disable-error-code="union-attr" +# pyright: reportAttributeAccessIssue=false + import asyncio diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py index 2b71226ad17c..b866724baa93 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py @@ -17,6 +17,9 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ +# mypy: disable-error-code="union-attr" +# pyright: reportAttributeAccessIssue=false, reportGeneralTypeIssues=false + import asyncio diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index bd69deae3888..54ef4131994a 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -17,7 +17,8 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ - +# mypy: disable-error-code="union-attr" +# pyright: reportAttributeAccessIssue=false def sample_chat_completions(): import os diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py index 086144976137..b3318ea725c4 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py @@ -32,8 +32,8 @@ 4. Run the sample: python sample_chat_completions_azure_openai.py """ - -key_auth: bool = True # Set to True for key authentication, or False for Entra ID authentication. +# mypy: disable-error-code="union-attr" +# pyright: reportAttributeAccessIssue=false def sample_chat_completions_azure_openai(): @@ -48,6 +48,8 @@ def sample_chat_completions_azure_openai(): print("Set it before running this sample.") exit() + key_auth = True # Set to True for key authentication, or False for Entra ID authentication. + if key_auth: from azure.core.credentials import AzureKeyCredential diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py index f3d681826407..ff15f0affe80 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py @@ -18,8 +18,11 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ -import io +# mypy: disable-error-code="union-attr" +# pyright: reportAttributeAccessIssue=false + +import io def sample_chat_completions_from_input_bytes(): import os diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py index d47328e0f872..5e93135c7b09 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py @@ -18,6 +18,8 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ +# mypy: disable-error-code="union-attr" +# pyright: reportAttributeAccessIssue=false def sample_chat_completions_from_input_json(): diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py index 7d3e8d7a74cf..8de268226fab 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py @@ -18,6 +18,8 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ +# mypy: disable-error-code="union-attr" +# pyright: reportAttributeAccessIssue=false def sample_chat_completions_with_history(): diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py index b849109a9efc..3176b2a9cf47 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py @@ -21,6 +21,8 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ +# mypy: disable-error-code="union-attr" +# pyright: reportAttributeAccessIssue=false def sample_chat_completions_with_model_extras(): diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py index 03dcc6abb750..9359a87f7c28 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py @@ -19,6 +19,8 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ +# mypy: disable-error-code="union-attr" +# pyright: reportAttributeAccessIssue=false def sample_chat_completions_with_tools(): @@ -36,6 +38,7 @@ def sample_chat_completions_with_tools(): from azure.ai.inference import ChatCompletionsClient from azure.ai.inference.models import ( AssistantMessage, + ChatCompletionsFunctionToolCall, ChatCompletionsFunctionToolDefinition, CompletionsFinishReason, FunctionDefinition, @@ -105,24 +108,27 @@ def get_flight_info(origin_city: str, destination_city: str): # Append the previous model response to the chat history messages.append(AssistantMessage(tool_calls=response.choices[0].message.tool_calls)) - # The tools call should be a function call - tool_call = response.choices[0].message.tool_calls[0] - if hasattr(tool_call, "function"): + # The tool should be of type function call. He we assume only one function call is required. + if response.choices[0].message.tool_calls is not None and len(response.choices[0].message.tool_calls) == 1: - function_args = json.loads(tool_call.function.arguments.replace("'", '"')) - print(f"Calling function `{tool_call.function.name}` with arguments {function_args}") - callable_func = locals()[tool_call.function.name] + tool_call = response.choices[0].message.tool_calls[0] - function_response = callable_func(**function_args) - print(f"Function response = {function_response}") + if isinstance(tool_call, ChatCompletionsFunctionToolCall): - # Provide the tool response to the model, by appending it to the chat history - messages.append(ToolMessage(tool_call_id=tool_call.id, content=function_response)) + function_args = json.loads(tool_call.function.arguments.replace("'", '"')) + print(f"Calling function `{tool_call.function.name}` with arguments {function_args}") + callable_func = locals()[tool_call.function.name] - # With the additional tools information on hand, get another response from the model - response = client.complete(messages=messages, tools=[flight_info]) + function_response = callable_func(**function_args) + print(f"Function response = {function_response}") - print(f"Model response = {response.choices[0].message.content}") + # Provide the tool response to the model, by appending it to the chat history + messages.append(ToolMessage(tool_call_id=tool_call.id, content=function_response)) + + # With the additional tools information on hand, get another response from the model + response = client.complete(messages=messages, tools=[flight_info]) + + print(f"Model response = {response.choices[0].message.content}") if __name__ == "__main__": diff --git a/sdk/ai/azure-ai-inference/samples/sample_load_client.py b/sdk/ai/azure-ai-inference/samples/sample_load_client.py index 683a05cb9c9d..57f0ebc0e305 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_load_client.py +++ b/sdk/ai/azure-ai-inference/samples/sample_load_client.py @@ -19,6 +19,8 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ +# mypy: disable-error-code="union-attr" +# pyright: reportAttributeAccessIssue=false def sample_load_client(): From e20d00337d6d5a1ac4b790797a379949277fd79a Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 31 May 2024 15:52:03 -0700 Subject: [PATCH 091/112] Fix missing Etra ID auth in load_client --- sdk/ai/azure-ai-inference/CHANGELOG.md | 4 +- .../azure/ai/inference/_patch.py | 58 +++++++++++++------ .../azure/ai/inference/aio/_patch.py | 57 ++++++++++++------ 3 files changed, 81 insertions(+), 38 deletions(-) diff --git a/sdk/ai/azure-ai-inference/CHANGELOG.md b/sdk/ai/azure-ai-inference/CHANGELOG.md index 628743d283a9..c2206f094797 100644 --- a/sdk/ai/azure-ai-inference/CHANGELOG.md +++ b/sdk/ai/azure-ai-inference/CHANGELOG.md @@ -1,5 +1,5 @@ # Release History -## 1.0.0b1 (1970-01-01) +## 1.0.0b1 (2024-06-07) -- Initial version +- Initial beta version diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index ed6cf110d142..3ccc73a150fb 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -18,7 +18,7 @@ import sys from io import IOBase -from typing import Any, Dict, Union, IO, List, Optional, overload, Type +from typing import Any, Dict, Union, IO, List, Optional, overload, Type, TYPE_CHECKING from azure.core.pipeline import PipelineResponse from azure.core.credentials import AzureKeyCredential from azure.core.tracing.decorator import distributed_trace @@ -26,10 +26,10 @@ from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, + map_error, ResourceExistsError, ResourceNotFoundError, ResourceNotModifiedError, - map_error, ) from . import models as _models from ._model_base import SdkJSONEncoder, _deserialize @@ -47,6 +47,11 @@ from collections.abc import MutableMapping else: from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import TokenCredential + JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object _Unset: Any = object() @@ -57,8 +62,25 @@ def load_client( - endpoint: str, credential: AzureKeyCredential, **kwargs: Any + endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any ) -> Union[ChatCompletionsClientGenerated, EmbeddingsClientGenerated, ImageEmbeddingsClientGenerated]: + """ + Load a client from a given endpoint URL. + + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-05-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + :return: ChatCompletionsClient or EmbeddingsClient or ImageEmbeddingsClient + :rtype: ~azure.ai.inference.ChatCompletionsClient or ~azure.ai.inference.EmbeddingsClient or ~azure.ai.inference.ImageEmbeddingsClient + :raises ~azure.core.exceptions.HttpResponseError + """ client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... model_info = client.get_model_info() @@ -191,7 +213,7 @@ def complete( :paramtype seed: int :return: ChatCompletions for non-streaming, or StreamingChatCompletions for streaming. :rtype: ~azure.ai.inference.models.ChatCompletions or ~azure.ai.inference.models.StreamingChatCompletions - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ @overload @@ -216,7 +238,7 @@ def complete( :paramtype content_type: str :return: ChatCompletions for non-streaming, or StreamingChatCompletions for streaming. :rtype: ~azure.ai.inference.models.ChatCompletions or ~azure.ai.inference.models.StreamingChatCompletions - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ @overload @@ -242,7 +264,7 @@ def complete( :paramtype content_type: str :return: ChatCompletions for non-streaming, or StreamingChatCompletions for streaming. :rtype: ~azure.ai.inference.models.ChatCompletions or ~azure.ai.inference.models.StreamingChatCompletions - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ @distributed_trace @@ -345,7 +367,7 @@ def complete( :paramtype seed: int :return: ChatCompletions for non-streaming, or StreamingChatCompletions for streaming. :rtype: ~azure.ai.inference.models.ChatCompletions or ~azure.ai.inference.models.StreamingChatCompletions - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ error_map = { 401: ClientAuthenticationError, @@ -432,7 +454,7 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :return: ModelInfo. The ModelInfo is compatible with MutableMapping :rtype: ~azure.ai.inference.models.ModelInfo - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ if self._model_info is None: self._model_info = self._get_model_info(**kwargs) @@ -496,7 +518,7 @@ def embed( :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ @overload @@ -516,7 +538,7 @@ def embed( :paramtype content_type: str :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ @overload @@ -536,7 +558,7 @@ def embed( :paramtype content_type: str :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ @distributed_trace @@ -583,7 +605,7 @@ def embed( :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ error_map: MutableMapping[int, Type[HttpResponseError]] = { 401: ClientAuthenticationError, @@ -662,7 +684,7 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :return: ModelInfo. The ModelInfo is compatible with MutableMapping :rtype: ~azure.ai.inference.models.ModelInfo - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ if self._model_info is None: self._model_info = self._get_model_info(**kwargs) @@ -726,7 +748,7 @@ def embed( :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ @overload @@ -746,7 +768,7 @@ def embed( :paramtype content_type: str :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ @overload @@ -766,7 +788,7 @@ def embed( :paramtype content_type: str :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ @distributed_trace @@ -813,7 +835,7 @@ def embed( :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ error_map: MutableMapping[int, Type[HttpResponseError]] = { 401: ClientAuthenticationError, @@ -891,7 +913,7 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :return: ModelInfo. The ModelInfo is compatible with MutableMapping :rtype: ~azure.ai.inference.models.ModelInfo - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ if self._model_info is None: self._model_info = self._get_model_info(**kwargs) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index 79bf8d299b35..8ab673684d32 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -12,7 +12,7 @@ import sys from io import IOBase -from typing import Any, Dict, Union, IO, List, Optional, overload, Type +from typing import Any, Dict, Union, IO, List, Optional, overload, Type, TYPE_CHECKING from azure.core.pipeline import PipelineResponse from azure.core.credentials import AzureKeyCredential from azure.core.tracing.decorator_async import distributed_trace_async @@ -20,10 +20,10 @@ from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, + map_error, ResourceExistsError, ResourceNotFoundError, ResourceNotModifiedError, - map_error, ) from .. import models as _models from .._model_base import SdkJSONEncoder, _deserialize @@ -36,6 +36,10 @@ build_image_embeddings_embed_request, ) +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import TokenCredential + if sys.version_info >= (3, 9): from collections.abc import MutableMapping else: @@ -46,8 +50,25 @@ async def load_client( - endpoint: str, credential: AzureKeyCredential, **kwargs: Any + endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any ) -> Union[ChatCompletionsClientGenerated, EmbeddingsClientGenerated, ImageEmbeddingsClientGenerated]: + """ + Load a client from a given endpoint URL. + + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-05-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + :return: ChatCompletionsClient or EmbeddingsClient or ImageEmbeddingsClient + :rtype: ~azure.ai.inference.ChatCompletionsClient or ~azure.ai.inference.EmbeddingsClient or ~azure.ai.inference.ImageEmbeddingsClient + :raises ~azure.core.exceptions.HttpResponseError + """ client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... model_info = await client.get_model_info() @@ -188,7 +209,7 @@ async def complete( :paramtype seed: int :return: ChatCompletions for non-streaming, or AsyncStreamingChatCompletions for streaming. :rtype: ~azure.ai.inference.models.ChatCompletions or ~azure.ai.inference.models.AsyncStreamingChatCompletions - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ @overload @@ -213,7 +234,7 @@ async def complete( :paramtype content_type: str :return: ChatCompletions for non-streaming, or AsyncStreamingChatCompletions for streaming. :rtype: ~azure.ai.inference.models.ChatCompletions or ~azure.ai.inference.models.AsyncStreamingChatCompletions - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ @overload @@ -238,7 +259,7 @@ async def complete( :paramtype content_type: str :return: ChatCompletions for non-streaming, or AsyncStreamingChatCompletions for streaming. :rtype: ~azure.ai.inference.models.ChatCompletions or ~azure.ai.inference.models.AsyncStreamingChatCompletions - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ @distributed_trace_async @@ -349,7 +370,7 @@ async def complete( :paramtype seed: int :return: ChatCompletions for non-streaming, or AsyncStreamingChatCompletions for streaming. :rtype: ~azure.ai.inference.models.ChatCompletions or ~azure.ai.inference.models.AsyncStreamingChatCompletions - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ error_map = { 401: ClientAuthenticationError, @@ -437,7 +458,7 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :return: ModelInfo. The ModelInfo is compatible with MutableMapping :rtype: ~azure.ai.inference.models.ModelInfo - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ if self._model_info is None: self._model_info = await self._get_model_info(**kwargs) @@ -501,7 +522,7 @@ async def embed( :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ @overload @@ -521,7 +542,7 @@ async def embed( :paramtype content_type: str :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ @overload @@ -541,7 +562,7 @@ async def embed( :paramtype content_type: str :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ @distributed_trace_async @@ -588,7 +609,7 @@ async def embed( :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ error_map: MutableMapping[int, Type[HttpResponseError]] = { 401: ClientAuthenticationError, @@ -667,7 +688,7 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :return: ModelInfo. The ModelInfo is compatible with MutableMapping :rtype: ~azure.ai.inference.models.ModelInfo - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ if self._model_info is None: self._model_info = await self._get_model_info(**kwargs) @@ -731,7 +752,7 @@ async def embed( :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ @overload @@ -751,7 +772,7 @@ async def embed( :paramtype content_type: str :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ @overload @@ -771,7 +792,7 @@ async def embed( :paramtype content_type: str :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ @distributed_trace_async @@ -818,7 +839,7 @@ async def embed( :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ error_map: MutableMapping[int, Type[HttpResponseError]] = { 401: ClientAuthenticationError, @@ -897,7 +918,7 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :return: ModelInfo. The ModelInfo is compatible with MutableMapping :rtype: ~azure.ai.inference.models.ModelInfo - :raises ~azure.core.exceptions.HttpResponseError: + :raises ~azure.core.exceptions.HttpResponseError """ if self._model_info is None: self._model_info = await self._get_model_info(**kwargs) From d7ef3b77985b439fe62cf87c10e7da5158278bf1 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 3 Jun 2024 06:58:56 -0700 Subject: [PATCH 092/112] Use vanity link for samples folder --- sdk/ai/azure-ai-inference/README.md | 14 ++++---- sdk/ai/azure-ai-inference/samples/README.md | 40 ++++++++++----------- sdk/ai/azure-ai-inference/tests/README.md | 2 +- 3 files changed, 28 insertions(+), 28 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 900c356d9327..6de0a7de4b14 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -11,10 +11,10 @@ Use the model inference client library to: * Get text embeddings -Note that for inference using OpenAI models hosted on Azure, you should be using the official [OpenAI Python client library](https://github.com/openai/openai-python) in product code instead of this client. However, for development and evaluation purposes (comparing OpenAI models to other models in the Azure AI Studio catalog), you can use the azure-ai-inference Python client library with Azure OpenAI endpoints, as shown [in this sample](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py). +Note that for inference using OpenAI models hosted on Azure, you should be using the official [OpenAI Python client library](https://github.com/openai/openai-python) in product code instead of this client. However, for development and evaluation purposes (comparing OpenAI models to other models in the Azure AI Studio catalog), you can use the azure-ai-inference Python client library with Azure OpenAI endpoints, as shown [in this sample](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_chat_completions_azure_openai.py). [Product documentation](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-api) -| [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples) +| [Samples](https://aka.ms/azsdk/azure-ai-inference/python/samples) | [API reference documentation](https://aka.ms/azsdk/azure-ai-inference/python/reference) | [Package (Pypi)](https://aka.ms/azsdk/azure-ai-inference/python/package) | [SDK source code](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/azure/ai/inference) @@ -145,13 +145,13 @@ AI model information is displayed (if available) when you `print(client)`. The `ChatCompletionsClient` has a method named `complete`. The method makes a REST API call to the `/chat/completions` route on the provided endpoint, as documented in [the REST API reference](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-chat-completions). -See simple chat completion examples below. More can be found in the [samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples) folder. +See simple chat completion examples below. More can be found in the [samples](https://aka.ms/azsdk/azure-ai-inference/python/samples) folder. ### Text Embeddings The `EmbeddingsClient` has a method named `embedding`. The method makes a REST API call to the `/embeddings` route on the provided endpoint, as documented in [the REST API reference](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-embeddings). -See simple text embedding example below. More can be found in the [samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples) folder. +See simple text embedding example below. More can be found in the [samples](https://aka.ms/azsdk/azure-ai-inference/python/samples) folder. -The following types or messages are supported: `SystemMessage`,`UserMessage`, `AssistantMessage`, `ToolMessage` (See sample [sample_chat_completions_with_tools.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py) for usage of `ToolMessage`). +The following types or messages are supported: `SystemMessage`,`UserMessage`, `AssistantMessage`, `ToolMessage` (See sample [sample_chat_completions_with_tools.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_chat_completions_with_tools.py) for usage of `ToolMessage`). Alternativley you can provide the messages as dictionary instead of using the strongly typed classes like `SystemMessage` and `UserMessage`: @@ -454,7 +454,7 @@ None redacted logs are generated for log level `logging.DEBUG` only. Be sure to ## Next steps -* Have a look at the [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples) folder, containing fully runnable Python code for doing inference using synchronous and asynchronous clients. +* Have a look at the [Samples](https://aka.ms/azsdk/azure-ai-inference/python/samples) folder, containing fully runnable Python code for doing inference using synchronous and asynchronous clients. ## Contributing diff --git a/sdk/ai/azure-ai-inference/samples/README.md b/sdk/ai/azure-ai-inference/samples/README.md index 33cc3fe7056c..4ed84a451395 100644 --- a/sdk/ai/azure-ai-inference/samples/README.md +++ b/sdk/ai/azure-ai-inference/samples/README.md @@ -18,30 +18,30 @@ These are runnable console Python scripts that show how to do chat completion, t |**File Name**|**Description**| |----------------|-------------| -|[sample_chat_completions_streaming.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py) | One chat completion operation using a synchronous client and streaming response. | -|[sample_chat_completions_streaming_with_entra_id_auth.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py) | One chat completion operation using a synchronous client and streaming response, using Entra ID authentication. This sample also shows setting the `azureml-model-deployment` HTTP request header, which may be required for Selfhosted Endpoints. | -|[sample_chat_completions.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py) | One chat completion operation using a synchronous client. | -|[sample_chat_completions_with_history.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py) | Two chat completion operations using a synchronous client, which the second completion using chat history from the first. | -|[sample_chat_completions_from_input_bytes.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py) | One chat completion operation using a synchronous client, with input messages provided as `IO[bytes]`. | -|[sample_chat_completions_from_input_json.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py) | One chat completion operation using a synchronous client, with input messages provided as `MutableMapping[str, Any]` | -|[sample_chat_completions_with_tools.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py) | Shows how do use a tool (function) in chat completions, for an AI model that supports tools | -|[sample_load_client.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_load_client.py) | Shows how do use the function `load_client` to create the appropriate synchronous client based on the provided endpoint URL. In this example, it creates a synchronous `ChatCompletionsClient`. | -|[sample_get_model_info.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py) | Get AI model information using the chat completions client. Similarly can be done with all other clients. | -|[sample_chat_completions_with_model_extras.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py) | Chat completions with additional model-specific parameters. | -|[sample_chat_completions_azure_openai.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py) | Chat completions against Azure OpenAI endpoint. | +|[sample_chat_completions_streaming.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_chat_completions_streaming.py) | One chat completion operation using a synchronous client and streaming response. | +|[sample_chat_completions_streaming_with_entra_id_auth.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_chat_completions_streaming_with_entra_id_auth.py) | One chat completion operation using a synchronous client and streaming response, using Entra ID authentication. This sample also shows setting the `azureml-model-deployment` HTTP request header, which may be required for Selfhosted Endpoints. | +|[sample_chat_completions.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_chat_completions.py) | One chat completion operation using a synchronous client. | +|[sample_chat_completions_with_history.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_chat_completions_with_history.py) | Two chat completion operations using a synchronous client, which the second completion using chat history from the first. | +|[sample_chat_completions_from_input_bytes.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_chat_completions_from_input_bytes.py) | One chat completion operation using a synchronous client, with input messages provided as `IO[bytes]`. | +|[sample_chat_completions_from_input_json.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_chat_completions_from_input_json.py) | One chat completion operation using a synchronous client, with input messages provided as `MutableMapping[str, Any]` | +|[sample_chat_completions_with_tools.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_chat_completions_with_tools.py) | Shows how do use a tool (function) in chat completions, for an AI model that supports tools | +|[sample_load_client.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_load_client.py) | Shows how do use the function `load_client` to create the appropriate synchronous client based on the provided endpoint URL. In this example, it creates a synchronous `ChatCompletionsClient`. | +|[sample_get_model_info.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_get_model_info.py) | Get AI model information using the chat completions client. Similarly can be done with all other clients. | +|[sample_chat_completions_with_model_extras.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_chat_completions_with_model_extras.py) | Chat completions with additional model-specific parameters. | +|[sample_chat_completions_azure_openai.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_chat_completions_azure_openai.py) | Chat completions against Azure OpenAI endpoint. | ### Text embeddings |**File Name**|**Description**| |----------------|-------------| -|[sample_embeddings.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_embeddings.py) | One embeddings operation using a synchronous client. | +|[sample_embeddings.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_embeddings.py) | One embeddings operation using a synchronous client. | ## Asynchronous client samples @@ -50,24 +50,24 @@ These are runnable console Python scripts that show how to do chat completion, t |**File Name**|**Description**| |----------------|-------------| -|[sample_chat_completions_streaming_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py) | One chat completion operation using an asynchronous client and streaming response. | -|[sample_chat_completions_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py) | One chat completion operation using an asynchronous client. | -|[sample_load_client_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py) | Shows how do use the function `load_async_client` to create the appropriate asynchronous client based on the provided endpoint URL. In this example, it creates an asynchronous `ChatCompletionsClient`. | -|[sample_chat_completions_from_input_bytes_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py) | One chat completion operation using a synchronous client, with input messages provided as `IO[bytes]`. | -|[sample_chat_completions_from_input_json_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py) | One chat completion operation using a synchronous client, with input messages provided as `MutableMapping[str, Any]` | +|[sample_chat_completions_streaming_async.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/async_samples/sample_chat_completions_streaming_async.py) | One chat completion operation using an asynchronous client and streaming response. | +|[sample_chat_completions_async.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/async_samples/sample_chat_completions_async.py) | One chat completion operation using an asynchronous client. | +|[sample_load_client_async.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/async_samples/sample_load_client_async.py) | Shows how do use the function `load_async_client` to create the appropriate asynchronous client based on the provided endpoint URL. In this example, it creates an asynchronous `ChatCompletionsClient`. | +|[sample_chat_completions_from_input_bytes_async.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/async_samples/sample_chat_completions_from_input_bytes_async.py) | One chat completion operation using a synchronous client, with input messages provided as `IO[bytes]`. | +|[sample_chat_completions_from_input_json_async.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/async_samples/sample_chat_completions_from_input_json_async.py) | One chat completion operation using a synchronous client, with input messages provided as `MutableMapping[str, Any]` | ### Text embeddings |**File Name**|**Description**| |----------------|-------------| -|[sample_embeddings_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py) | One embeddings operation using an asynchronous client. | +|[sample_embeddings_async.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/async_samples/sample_embeddings_async.py) | One embeddings operation using an asynchronous client. | ## Prerequisites diff --git a/sdk/ai/azure-ai-inference/tests/README.md b/sdk/ai/azure-ai-inference/tests/README.md index 712d5411c35e..ec32562700d3 100644 --- a/sdk/ai/azure-ai-inference/tests/README.md +++ b/sdk/ai/azure-ai-inference/tests/README.md @@ -32,7 +32,7 @@ The live tests were written against the AI models mentioned below. You will need ## Set environment variables -The tests read endpoints and keys from environemt variables. See the [Set environment variables](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/README.md#set-environment-variables) section in the samples README.md file for the full list of environment variables that need to be set for all tests to pass. +The tests read endpoints and keys from environemt variables. See the [Set environment variables](https://aka.ms/azsdk/azure-ai-inference/python/samples/README.md#set-environment-variables) section in the samples README.md file for the full list of environment variables that need to be set for all tests to pass. In addition, the following environment values **must be** defined, although not used. Assign any value to them: From 63fefdfd6a557a42afd0eb4b31da101b96fc12cc Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 3 Jun 2024 08:06:37 -0700 Subject: [PATCH 093/112] Revert "Use vanity link for samples folder" This reverts commit d7ef3b77985b439fe62cf87c10e7da5158278bf1. --- sdk/ai/azure-ai-inference/README.md | 14 ++++---- sdk/ai/azure-ai-inference/samples/README.md | 40 ++++++++++----------- sdk/ai/azure-ai-inference/tests/README.md | 2 +- 3 files changed, 28 insertions(+), 28 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 6de0a7de4b14..900c356d9327 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -11,10 +11,10 @@ Use the model inference client library to: * Get text embeddings -Note that for inference using OpenAI models hosted on Azure, you should be using the official [OpenAI Python client library](https://github.com/openai/openai-python) in product code instead of this client. However, for development and evaluation purposes (comparing OpenAI models to other models in the Azure AI Studio catalog), you can use the azure-ai-inference Python client library with Azure OpenAI endpoints, as shown [in this sample](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_chat_completions_azure_openai.py). +Note that for inference using OpenAI models hosted on Azure, you should be using the official [OpenAI Python client library](https://github.com/openai/openai-python) in product code instead of this client. However, for development and evaluation purposes (comparing OpenAI models to other models in the Azure AI Studio catalog), you can use the azure-ai-inference Python client library with Azure OpenAI endpoints, as shown [in this sample](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py). [Product documentation](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-api) -| [Samples](https://aka.ms/azsdk/azure-ai-inference/python/samples) +| [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples) | [API reference documentation](https://aka.ms/azsdk/azure-ai-inference/python/reference) | [Package (Pypi)](https://aka.ms/azsdk/azure-ai-inference/python/package) | [SDK source code](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/azure/ai/inference) @@ -145,13 +145,13 @@ AI model information is displayed (if available) when you `print(client)`. The `ChatCompletionsClient` has a method named `complete`. The method makes a REST API call to the `/chat/completions` route on the provided endpoint, as documented in [the REST API reference](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-chat-completions). -See simple chat completion examples below. More can be found in the [samples](https://aka.ms/azsdk/azure-ai-inference/python/samples) folder. +See simple chat completion examples below. More can be found in the [samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples) folder. ### Text Embeddings The `EmbeddingsClient` has a method named `embedding`. The method makes a REST API call to the `/embeddings` route on the provided endpoint, as documented in [the REST API reference](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-embeddings). -See simple text embedding example below. More can be found in the [samples](https://aka.ms/azsdk/azure-ai-inference/python/samples) folder. +See simple text embedding example below. More can be found in the [samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples) folder. -The following types or messages are supported: `SystemMessage`,`UserMessage`, `AssistantMessage`, `ToolMessage` (See sample [sample_chat_completions_with_tools.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_chat_completions_with_tools.py) for usage of `ToolMessage`). +The following types or messages are supported: `SystemMessage`,`UserMessage`, `AssistantMessage`, `ToolMessage` (See sample [sample_chat_completions_with_tools.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py) for usage of `ToolMessage`). Alternativley you can provide the messages as dictionary instead of using the strongly typed classes like `SystemMessage` and `UserMessage`: @@ -454,7 +454,7 @@ None redacted logs are generated for log level `logging.DEBUG` only. Be sure to ## Next steps -* Have a look at the [Samples](https://aka.ms/azsdk/azure-ai-inference/python/samples) folder, containing fully runnable Python code for doing inference using synchronous and asynchronous clients. +* Have a look at the [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples) folder, containing fully runnable Python code for doing inference using synchronous and asynchronous clients. ## Contributing diff --git a/sdk/ai/azure-ai-inference/samples/README.md b/sdk/ai/azure-ai-inference/samples/README.md index 4ed84a451395..33cc3fe7056c 100644 --- a/sdk/ai/azure-ai-inference/samples/README.md +++ b/sdk/ai/azure-ai-inference/samples/README.md @@ -18,30 +18,30 @@ These are runnable console Python scripts that show how to do chat completion, t |**File Name**|**Description**| |----------------|-------------| -|[sample_chat_completions_streaming.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_chat_completions_streaming.py) | One chat completion operation using a synchronous client and streaming response. | -|[sample_chat_completions_streaming_with_entra_id_auth.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_chat_completions_streaming_with_entra_id_auth.py) | One chat completion operation using a synchronous client and streaming response, using Entra ID authentication. This sample also shows setting the `azureml-model-deployment` HTTP request header, which may be required for Selfhosted Endpoints. | -|[sample_chat_completions.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_chat_completions.py) | One chat completion operation using a synchronous client. | -|[sample_chat_completions_with_history.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_chat_completions_with_history.py) | Two chat completion operations using a synchronous client, which the second completion using chat history from the first. | -|[sample_chat_completions_from_input_bytes.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_chat_completions_from_input_bytes.py) | One chat completion operation using a synchronous client, with input messages provided as `IO[bytes]`. | -|[sample_chat_completions_from_input_json.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_chat_completions_from_input_json.py) | One chat completion operation using a synchronous client, with input messages provided as `MutableMapping[str, Any]` | -|[sample_chat_completions_with_tools.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_chat_completions_with_tools.py) | Shows how do use a tool (function) in chat completions, for an AI model that supports tools | -|[sample_load_client.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_load_client.py) | Shows how do use the function `load_client` to create the appropriate synchronous client based on the provided endpoint URL. In this example, it creates a synchronous `ChatCompletionsClient`. | -|[sample_get_model_info.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_get_model_info.py) | Get AI model information using the chat completions client. Similarly can be done with all other clients. | -|[sample_chat_completions_with_model_extras.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_chat_completions_with_model_extras.py) | Chat completions with additional model-specific parameters. | -|[sample_chat_completions_azure_openai.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_chat_completions_azure_openai.py) | Chat completions against Azure OpenAI endpoint. | +|[sample_chat_completions_streaming.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming.py) | One chat completion operation using a synchronous client and streaming response. | +|[sample_chat_completions_streaming_with_entra_id_auth.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py) | One chat completion operation using a synchronous client and streaming response, using Entra ID authentication. This sample also shows setting the `azureml-model-deployment` HTTP request header, which may be required for Selfhosted Endpoints. | +|[sample_chat_completions.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py) | One chat completion operation using a synchronous client. | +|[sample_chat_completions_with_history.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py) | Two chat completion operations using a synchronous client, which the second completion using chat history from the first. | +|[sample_chat_completions_from_input_bytes.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py) | One chat completion operation using a synchronous client, with input messages provided as `IO[bytes]`. | +|[sample_chat_completions_from_input_json.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py) | One chat completion operation using a synchronous client, with input messages provided as `MutableMapping[str, Any]` | +|[sample_chat_completions_with_tools.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py) | Shows how do use a tool (function) in chat completions, for an AI model that supports tools | +|[sample_load_client.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_load_client.py) | Shows how do use the function `load_client` to create the appropriate synchronous client based on the provided endpoint URL. In this example, it creates a synchronous `ChatCompletionsClient`. | +|[sample_get_model_info.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_get_model_info.py) | Get AI model information using the chat completions client. Similarly can be done with all other clients. | +|[sample_chat_completions_with_model_extras.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py) | Chat completions with additional model-specific parameters. | +|[sample_chat_completions_azure_openai.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py) | Chat completions against Azure OpenAI endpoint. | ### Text embeddings |**File Name**|**Description**| |----------------|-------------| -|[sample_embeddings.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/sample_embeddings.py) | One embeddings operation using a synchronous client. | +|[sample_embeddings.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_embeddings.py) | One embeddings operation using a synchronous client. | ## Asynchronous client samples @@ -50,24 +50,24 @@ These are runnable console Python scripts that show how to do chat completion, t |**File Name**|**Description**| |----------------|-------------| -|[sample_chat_completions_streaming_async.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/async_samples/sample_chat_completions_streaming_async.py) | One chat completion operation using an asynchronous client and streaming response. | -|[sample_chat_completions_async.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/async_samples/sample_chat_completions_async.py) | One chat completion operation using an asynchronous client. | -|[sample_load_client_async.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/async_samples/sample_load_client_async.py) | Shows how do use the function `load_async_client` to create the appropriate asynchronous client based on the provided endpoint URL. In this example, it creates an asynchronous `ChatCompletionsClient`. | -|[sample_chat_completions_from_input_bytes_async.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/async_samples/sample_chat_completions_from_input_bytes_async.py) | One chat completion operation using a synchronous client, with input messages provided as `IO[bytes]`. | -|[sample_chat_completions_from_input_json_async.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/async_samples/sample_chat_completions_from_input_json_async.py) | One chat completion operation using a synchronous client, with input messages provided as `MutableMapping[str, Any]` | +|[sample_chat_completions_streaming_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py) | One chat completion operation using an asynchronous client and streaming response. | +|[sample_chat_completions_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py) | One chat completion operation using an asynchronous client. | +|[sample_load_client_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py) | Shows how do use the function `load_async_client` to create the appropriate asynchronous client based on the provided endpoint URL. In this example, it creates an asynchronous `ChatCompletionsClient`. | +|[sample_chat_completions_from_input_bytes_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py) | One chat completion operation using a synchronous client, with input messages provided as `IO[bytes]`. | +|[sample_chat_completions_from_input_json_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py) | One chat completion operation using a synchronous client, with input messages provided as `MutableMapping[str, Any]` | ### Text embeddings |**File Name**|**Description**| |----------------|-------------| -|[sample_embeddings_async.py](https://aka.ms/azsdk/azure-ai-inference/python/samples/async_samples/sample_embeddings_async.py) | One embeddings operation using an asynchronous client. | +|[sample_embeddings_async.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py) | One embeddings operation using an asynchronous client. | ## Prerequisites diff --git a/sdk/ai/azure-ai-inference/tests/README.md b/sdk/ai/azure-ai-inference/tests/README.md index ec32562700d3..712d5411c35e 100644 --- a/sdk/ai/azure-ai-inference/tests/README.md +++ b/sdk/ai/azure-ai-inference/tests/README.md @@ -32,7 +32,7 @@ The live tests were written against the AI models mentioned below. You will need ## Set environment variables -The tests read endpoints and keys from environemt variables. See the [Set environment variables](https://aka.ms/azsdk/azure-ai-inference/python/samples/README.md#set-environment-variables) section in the samples README.md file for the full list of environment variables that need to be set for all tests to pass. +The tests read endpoints and keys from environemt variables. See the [Set environment variables](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/README.md#set-environment-variables) section in the samples README.md file for the full list of environment variables that need to be set for all tests to pass. In addition, the following environment values **must be** defined, although not used. Assign any value to them: From ab6973fbb0973a320642c7d1b48195510ad8e7d2 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 3 Jun 2024 08:23:54 -0700 Subject: [PATCH 094/112] Fix mypy and pyright errors --- sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py | 3 ++- sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py | 7 ++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index 3ccc73a150fb..b158cc0e485f 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -78,7 +78,8 @@ def load_client( behavior. :paramtype api_version: str :return: ChatCompletionsClient or EmbeddingsClient or ImageEmbeddingsClient - :rtype: ~azure.ai.inference.ChatCompletionsClient or ~azure.ai.inference.EmbeddingsClient or ~azure.ai.inference.ImageEmbeddingsClient + :rtype: ~azure.ai.inference.ChatCompletionsClient or ~azure.ai.inference.EmbeddingsClient + or ~azure.ai.inference.ImageEmbeddingsClient :raises ~azure.core.exceptions.HttpResponseError """ diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index 8ab673684d32..d2ec9c2a6503 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -38,7 +38,7 @@ if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports - from azure.core.credentials import TokenCredential + from azure.core.credentials_async import AsyncTokenCredential if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -50,7 +50,7 @@ async def load_client( - endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any + endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any ) -> Union[ChatCompletionsClientGenerated, EmbeddingsClientGenerated, ImageEmbeddingsClientGenerated]: """ Load a client from a given endpoint URL. @@ -66,7 +66,8 @@ async def load_client( behavior. :paramtype api_version: str :return: ChatCompletionsClient or EmbeddingsClient or ImageEmbeddingsClient - :rtype: ~azure.ai.inference.ChatCompletionsClient or ~azure.ai.inference.EmbeddingsClient or ~azure.ai.inference.ImageEmbeddingsClient + :rtype: ~azure.ai.inference.ChatCompletionsClient or ~azure.ai.inference.EmbeddingsClient + or ~azure.ai.inference.ImageEmbeddingsClient :raises ~azure.core.exceptions.HttpResponseError """ From efbcf5cd047ce11207b1acd5b81b456c5153b2cd Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 3 Jun 2024 11:24:12 -0700 Subject: [PATCH 095/112] Update root README.md. Update operator ref doc comments --- sdk/ai/azure-ai-inference/README.md | 27 ++-- .../azure/ai/inference/_patch.py | 127 ++++++++++-------- .../azure/ai/inference/aio/_patch.py | 125 +++++++++-------- 3 files changed, 149 insertions(+), 130 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index 900c356d9327..a842fb2f3917 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -27,7 +27,7 @@ Note that for inference using OpenAI models hosted on Azure, you should be using * An [Azure subscription](https://azure.microsoft.com/free). * An [AI Model from the catalog](https://ai.azure.com/explore/models) deployed through Azure AI Studio. * To construct the client library, you will need to pass in the endpoint URL. The endpoint URL has the form `https://your-deployment-name.your-azure-region.inference.ai.azure.com`, where `your-deployment-name` is your unique model deployment name and `your-azure-region` is the Azure region where the model is deployed (e.g. `eastus2`). -* Depending on your model deployment, you either need a key to authenticate against the service, or Entra ID credentials. The key is a 32-character string. +* Depending on your model deployment and authentication preference, you either need a key to authenticate against the service, or Entra ID credentials. The key is a 32-character string. ### Install the package @@ -67,7 +67,7 @@ To create an asynchronous client, Install the additional package [aiohttp](https pip install aiohttp ``` -and update the code above to import `ChatCompletionsClient` from the `aio` namespace: +and update the code above to import `asyncio`, and import `ChatCompletionsClient` from the `azure.ai.inference.aio` namespace instead of `azure.ai.inference`: ```python import asyncio @@ -181,7 +181,7 @@ See the [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai ### Chat completions example -This example demonstrates how to generate a single chat completions. +This example demonstrates how to generate a single chat completions, with key authentication, assuming `endpoint` and `key` are already defined. @@ -238,7 +238,7 @@ To generate completions for additional messages, simply call `client.create` mul ### Streaming chat completions example -This example demonstrates how to generate a single chat completions with streaming response. You need to add `stream=True` to the `complete` call to enable streaming. +This example demonstrates how to generate a single chat completions with streaming response, with key authentication, assuming `endpoint` and `key` are already defined. You need to add `stream=True` to the `complete` call to enable streaming. @@ -271,7 +271,7 @@ To generate completions for additional messages, simply call `client.complete` m ### Chat completions with additional model-specific parameters -In this example, extra JSON elements are inserted at the root of the request body by setting `model_extras` when calling the `complete` method. These are indended for AI models that require extra parameters beyond what is defined in the REST API. +In this example, extra JSON elements are inserted at the root of the request body by setting `model_extras` when calling the `complete` method. These are intended for AI models that require extra parameters beyond what is defined in the REST API. Note that by default, the service will reject any request payload that includes unknown parameters (ones that are not defined in the REST API [Request Body table](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-chat-completions#request-body)). In order to change the default service behaviour, when the `complete` method includes `model_extras`, the client library will automatically add the HTTP request header `"unknown_params": "pass_through"`. @@ -306,7 +306,7 @@ In the above example, this will be the JSON payload in the HTTP request: ### Text Embeddings example -This example demonstrates how to get text embeddings. +This example demonstrates how to get text embeddings, with key authentication, assuming `endpoint` and `key` are already defined. @@ -382,7 +382,7 @@ To generate embeddings for additional phrases, simply call `client.create` multi ### Exceptions -The `create` and `get_model_info` methods on the clients raise an [HttpResponseError](https://learn.microsoft.com/python/api/azure-core/azure.core.exceptions.httpresponseerror) exception for a non-success HTTP status code response from the service. The exception's `status_code` will be the HTTP response status code. The exception's `error.message` contains a detailed message that will allow you to diagnose the issue: +The `create`, `embed` and `get_model_info` methods on the clients raise an [HttpResponseError](https://learn.microsoft.com/python/api/azure-core/azure.core.exceptions.httpresponseerror) exception for a non-success HTTP status code response from the service. The exception's `status_code` will be the HTTP response status code. The exception's `error.message` contains a detailed message that will allow you to diagnose the issue: ```python from azure.core.exceptions import HttpResponseError @@ -393,7 +393,6 @@ try: result = client.create( ... ) except HttpResponseError as e: print(f"Status code: {e.status_code} ({e.reason})") - print(f"{e}") ``` For example, when you provide a wrong authentication key: @@ -401,16 +400,14 @@ For example, when you provide a wrong authentication key: ```text Status code: 401 (Unauthorized) Operation returned an invalid status 'Unauthorized' -Content: {"status": "Invalid auth token"} -```v +``` -Or for example when you created an `EmbeddingsClient` and called `create` on the client, but the endpoint does not +Or for example when you created an `EmbeddingsClient` and called `embed` on the client, but the endpoint does not support the `/embeddings` route: ```text -Status code: 424 (Failed Dependency) -Operation returned an invalid status 'Failed Dependency' -Content: {"detail":"Not Found"} +Status code: 405 (Method Not Allowed) +Operation returned an invalid status 'Method Not Allowed' ``` ### Logging @@ -439,7 +436,7 @@ formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s") handler.setFormatter(formatter) ``` -By default logs redact the values of URL query strings, the values of some HTTP request and response headers (including `Authorization` which holds the key), and the request and response payloads. To create logs without redaction, set the method argument `logging_enable = True` when you construct the client library, or when you call any of the client's `create` methods. +By default logs redact the values of URL query strings, the values of some HTTP request and response headers (including `Authorization` which holds the key or token), and the request and response payloads. To create logs without redaction, set the method argument `logging_enable = True` when you construct the client library, or when you call any of the client's `create` methods. ```python # Create a Model Client with none redacted log diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index b158cc0e485f..c52ad6d7734d 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -65,7 +65,8 @@ def load_client( endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any ) -> Union[ChatCompletionsClientGenerated, EmbeddingsClientGenerated, ImageEmbeddingsClientGenerated]: """ - Load a client from a given endpoint URL. + Load a client from a given endpoint URL. The method makes a REST API call to the `/info` route + on the given endpoint, to determine the model type and therefore which client to instantiate. :param endpoint: Service host. Required. :type endpoint: str @@ -77,7 +78,7 @@ def load_client( "2024-05-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str - :return: ChatCompletionsClient or EmbeddingsClient or ImageEmbeddingsClient + :return: The appropriate client associated with the given endpoint :rtype: ~azure.ai.inference.ChatCompletionsClient or ~azure.ai.inference.EmbeddingsClient or ~azure.ai.inference.ImageEmbeddingsClient :raises ~azure.core.exceptions.HttpResponseError @@ -139,9 +140,12 @@ def complete( # pylint: disable=line-too-long """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or - "completes" provided prompt data. When using this method with `stream=True`, the response is streamed - back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions - object to get content updates as they arrive. + "completes" provided prompt data. The method makes a REST API call to the `/chat/completions` route + on the given endpoint. + When using this method with `stream=True`, the response is streamed + back to the client. Iterate over the resulting StreamingChatCompletions + object to get content updates as they arrive. By default, the response is a ChatCompletions object + (non-streaming). :keyword messages: The collection of context messages associated with this chat completions request. @@ -154,29 +158,32 @@ def complete( :paramtype content_type: str :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hyper parameters depends on the value of the + How the service handles these extra parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype model_extras: dict[str, Any] :keyword frequency_penalty: A value that influences the probability of generated tokens - appearing based on their cumulative - frequency in generated text. + appearing based on their cumulative frequency in generated text. Positive values will make tokens less likely to appear as their frequency increases and - decrease the likelihood of the model repeating the same statements verbatim. Default value is - None. + decrease the likelihood of the model repeating the same statements verbatim. + Supported range is [-2, 2]. + Default value is None. :paramtype frequency_penalty: float :keyword presence_penalty: A value that influences the probability of generated tokens appearing based on their existing presence in generated text. Positive values will make tokens less likely to appear when they already exist and increase - the - model's likelihood to output new topics. Default value is None. + the model's likelihood to output new topics. + Supported range is [-2, 2]. + Default value is None. :paramtype presence_penalty: float :keyword temperature: The sampling temperature to use that controls the apparent creativity of generated completions. Higher values will make output more random while lower values will make results more focused and deterministic. It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict. Default value is None. + interaction of these two settings is difficult to predict. + Supported range is [0, 1]. + Default value is None. :paramtype temperature: float :keyword top_p: An alternative to sampling with temperature called nucleus sampling. This value causes the @@ -228,11 +235,10 @@ def complete( # pylint: disable=line-too-long """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or - "completes" provided prompt data. When using this method with `stream=True`, the response is streamed - back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions - object to get content updates as they arrive. + "completes" provided prompt data. - :param body: Required. + :param body: An object of type MutableMapping[str, Any], such as a dictionary, that + specifies the full request payload. Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -254,11 +260,9 @@ def complete( # pylint: disable=too-many-locals """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or - "completes" provided prompt data. When using this method with `stream=True`, the response is streamed - back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions - object to get content updates as they arrive. + "completes" provided prompt data. - :param body: Required. + :param body: Specifies the full request payload. Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". @@ -298,7 +302,8 @@ def complete( back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions object to get content updates as they arrive. - :param body: Is either a JSON type or a IO[bytes] type. Required. + :param body: Is either a MutableMapping[str, Any] type (like a dictionary) or a IO[bytes] type + that specifies the full request payload. Required. :type body: JSON or IO[bytes] :keyword messages: The collection of context messages associated with this chat completions request. @@ -308,7 +313,7 @@ def complete( :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hyper parameters depends on the value of the + How the service handles these extra parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype model_extras: dict[str, Any] :keyword frequency_penalty: A value that influences the probability of generated tokens @@ -338,7 +343,9 @@ def complete( value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be considered. It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict. Default value is None. + interaction of these two settings is difficult to predict. + Supported range is [0, 1]. + Default value is None. :paramtype top_p: float :keyword max_tokens: The maximum number of tokens to generate. Default value is None. :paramtype max_tokens: int @@ -480,11 +487,12 @@ def embed( input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any, ) -> _models.EmbeddingsResult: - """Return the embeddings for a given text prompt. + """Return the embedding vectors for given text prompts. + The method makes a REST API call to the `/embeddings` route on the given endpoint. :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hyper parameters depends on the value of the + How the service handles these extra parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype model_extras: dict[str, Any] :keyword input: Input text to embed, encoded as a string or array of tokens. @@ -507,10 +515,8 @@ def embed( Returns a 422 error if the model doesn't support the value or parameter. Default value is None. :paramtype dimensions: int - :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings - should have. - Passing null causes the model to use its default value. - Returns a 422 error if the model doesn't support the value or parameter. Known values are: + :keyword encoding_format: Optional. The desired format for the returned embeddings. + Known values are: "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat :keyword input_type: Optional. The type of the input. @@ -530,9 +536,11 @@ def embed( content_type: str = "application/json", **kwargs: Any, ) -> _models.EmbeddingsResult: - """Return the embeddings for a given text prompt. + """Return the embedding vectors for given text prompts. + The method makes a REST API call to the `/embeddings` route on the given endpoint. - :param body: Required. + :param body: An object of type MutableMapping[str, Any], such as a dictionary, that + specifies the full request payload. Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -550,9 +558,10 @@ def embed( content_type: str = "application/json", **kwargs: Any, ) -> _models.EmbeddingsResult: - """Return the embeddings for a given text prompt. + """Return the embedding vectors for given text prompts. + The method makes a REST API call to the `/embeddings` route on the given endpoint. - :param body: Required. + :param body: Specifies the full request payload. Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". @@ -575,13 +584,15 @@ def embed( **kwargs: Any, ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long - """Return the embeddings for a given text prompt. + """Return the embedding vectors for given text prompts. + The method makes a REST API call to the `/embeddings` route on the given endpoint. - :param body: Is either a JSON type or a IO[bytes] type. Required. + :param body: Is either a MutableMapping[str, Any] type (like a dictionary) or a IO[bytes] type + that specifies the full request payload. Required. :type body: JSON or IO[bytes] :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hyper parameters depends on the value of the + How the service handles these extra parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype model_extras: dict[str, Any] :keyword input: Input text to embed, encoded as a string or array of tokens. @@ -594,10 +605,8 @@ def embed( Returns a 422 error if the model doesn't support the value or parameter. Default value is None. :paramtype dimensions: int - :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings - should have. - Passing null causes the model to use its default value. - Returns a 422 error if the model doesn't support the value or parameter. Known values are: + :keyword encoding_format: Optional. The desired format for the returned embeddings. + Known values are: "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat :keyword input_type: Optional. The type of the input. @@ -710,11 +719,12 @@ def embed( input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any, ) -> _models.EmbeddingsResult: - """Return the embeddings for given images. + """Return the embedding vectors for given images. + The method makes a REST API call to the `/images/embeddings` route on the given endpoint. :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hyper parameters depends on the value of the + How the service handles these extra parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype model_extras: dict[str, Any] :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an @@ -737,10 +747,8 @@ def embed( Returns a 422 error if the model doesn't support the value or parameter. Default value is None. :paramtype dimensions: int - :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings - should have. - Passing null causes the model to use its default value. - Returns a 422 error if the model doesn't support the value or parameter. Known values are: + :keyword encoding_format: Optional. The desired format for the returned embeddings. + Known values are: "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat :keyword input_type: Optional. The type of the input. @@ -760,9 +768,11 @@ def embed( content_type: str = "application/json", **kwargs: Any, ) -> _models.EmbeddingsResult: - """Return the embeddings for given images. + """Return the embedding vectors for given images. + The method makes a REST API call to the `/images/embeddings` route on the given endpoint. - :param body: Required. + :param body: An object of type MutableMapping[str, Any], such as a dictionary, that + specifies the full request payload. Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -780,9 +790,10 @@ def embed( content_type: str = "application/json", **kwargs: Any, ) -> _models.EmbeddingsResult: - """Return the embeddings for given images. + """Return the embedding vectors for given images. + The method makes a REST API call to the `/images/embeddings` route on the given endpoint. - :param body: Required. + :param body: Specifies the full request payload. Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". @@ -805,13 +816,15 @@ def embed( **kwargs: Any, ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long - """Return the embeddings for given images. + """Return the embedding vectors for given images. + The method makes a REST API call to the `/images/embeddings` route on the given endpoint. - :param body: Is either a JSON type or a IO[bytes] type. Required. + :param body: Is either a MutableMapping[str, Any] type (like a dictionary) or a IO[bytes] type + that specifies the full request payload. Required. :type body: JSON or IO[bytes] :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hyper parameters depends on the value of the + How the service handles these extra parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype model_extras: dict[str, Any] :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an @@ -824,10 +837,8 @@ def embed( Returns a 422 error if the model doesn't support the value or parameter. Default value is None. :paramtype dimensions: int - :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings - should have. - Passing null causes the model to use its default value. - Returns a 422 error if the model doesn't support the value or parameter. Known values are: + :keyword encoding_format: Optional. The desired format for the returned embeddings. + Known values are: "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat :keyword input_type: Optional. The type of the input. diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index d2ec9c2a6503..ff7e4a956a99 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -53,7 +53,8 @@ async def load_client( endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any ) -> Union[ChatCompletionsClientGenerated, EmbeddingsClientGenerated, ImageEmbeddingsClientGenerated]: """ - Load a client from a given endpoint URL. + Load a client from a given endpoint URL. The method makes a REST API call to the `/info` route + on the given endpoint, to determine the model type and therefore which client to instantiate. :param endpoint: Service host. Required. :type endpoint: str @@ -128,9 +129,12 @@ async def complete( # pylint: disable=line-too-long """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or - "completes" provided prompt data. When using this method with `stream=True`, the response is streamed - back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions - object to get content updates as they arrive. + "completes" provided prompt data. The method makes a REST API call to the `/chat/completions` route + on the given endpoint. + When using this method with `stream=True`, the response is streamed + back to the client. Iterate over the resulting StreamingChatCompletions + object to get content updates as they arrive. By default, the response is a ChatCompletions object + (non-streaming). :keyword messages: The collection of context messages associated with this chat completions request. @@ -143,7 +147,7 @@ async def complete( :paramtype content_type: str :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hyper parameters depends on the value of the + How the service handles these extra parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype model_extras: dict[str, Any] :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the @@ -154,25 +158,28 @@ async def complete( HTTP request header. Default value is None. :paramtype extras: dict[str, str] :keyword frequency_penalty: A value that influences the probability of generated tokens - appearing based on their cumulative - frequency in generated text. + appearing based on their cumulative frequency in generated text. Positive values will make tokens less likely to appear as their frequency increases and - decrease the likelihood of the model repeating the same statements verbatim. Default value is - None. + decrease the likelihood of the model repeating the same statements verbatim. + Supported range is [-2, 2]. + Default value is None. :paramtype frequency_penalty: float :keyword presence_penalty: A value that influences the probability of generated tokens appearing based on their existing presence in generated text. Positive values will make tokens less likely to appear when they already exist and increase - the - model's likelihood to output new topics. Default value is None. + the model's likelihood to output new topics. + Supported range is [-2, 2]. + Default value is None. :paramtype presence_penalty: float :keyword temperature: The sampling temperature to use that controls the apparent creativity of generated completions. Higher values will make output more random while lower values will make results more focused and deterministic. It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict. Default value is None. + interaction of these two settings is difficult to predict. + Supported range is [0, 1]. + Default value is None. :paramtype temperature: float :keyword top_p: An alternative to sampling with temperature called nucleus sampling. This value causes the @@ -224,11 +231,10 @@ async def complete( # pylint: disable=line-too-long """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or - "completes" provided prompt data. When using this method with `stream=True`, the response is streamed - back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions - object to get content updates as they arrive. + "completes" provided prompt data. - :param body: Required. + :param body: An object of type MutableMapping[str, Any], such as a dictionary, that + specifies the full request payload. Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -249,11 +255,9 @@ async def complete( # pylint: disable=line-too-long """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or - "completes" provided prompt data. When using this method with `stream=True`, the response is streamed - back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions - object to get content updates as they arrive. + "completes" provided prompt data. - :param body: Required. + :param body: Specifies the full request payload. Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". @@ -294,7 +298,8 @@ async def complete( back to the client. Iterate over the resulting ~azure.ai.inference.models.StreamingChatCompletions object to get content updates as they arrive. - :param body: Is either a JSON type or a IO[bytes] type. Required. + :param body: Is either a MutableMapping[str, Any] type (like a dictionary) or a IO[bytes] type + that specifies the full request payload. Required. :type body: JSON or IO[bytes] :keyword messages: The collection of context messages associated with this chat completions request. @@ -304,7 +309,7 @@ async def complete( :paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hyper parameters depends on the value of the + How the service handles these extra parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype model_extras: dict[str, Any] :keyword extras: Extra parameters (in the form of string key-value pairs) that are not in the @@ -341,7 +346,9 @@ async def complete( value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be considered. It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict. Default value is None. + interaction of these two settings is difficult to predict. + Supported range is [0, 1]. + Default value is None. :paramtype top_p: float :keyword max_tokens: The maximum number of tokens to generate. Default value is None. :paramtype max_tokens: int @@ -484,11 +491,12 @@ async def embed( input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any, ) -> _models.EmbeddingsResult: - """Return the embeddings for a given text prompt. + """Return the embedding vectors for given text prompts. + The method makes a REST API call to the `/embeddings` route on the given endpoint. :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hyper parameters depends on the value of the + How the service handles these extra parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype model_extras: dict[str, Any] :keyword input: Input text to embed, encoded as a string or array of tokens. @@ -511,10 +519,8 @@ async def embed( Returns a 422 error if the model doesn't support the value or parameter. Default value is None. :paramtype dimensions: int - :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings - should have. - Passing null causes the model to use its default value. - Returns a 422 error if the model doesn't support the value or parameter. Known values are: + :keyword encoding_format: Optional. The desired format for the returned embeddings. + Known values are: "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat :keyword input_type: Optional. The type of the input. @@ -534,9 +540,11 @@ async def embed( content_type: str = "application/json", **kwargs: Any, ) -> _models.EmbeddingsResult: - """Return the embeddings for a given text prompt. + """Return the embedding vectors for given text prompts. + The method makes a REST API call to the `/embeddings` route on the given endpoint. - :param body: Required. + :param body: An object of type MutableMapping[str, Any], such as a dictionary, that + specifies the full request payload. Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -554,9 +562,10 @@ async def embed( content_type: str = "application/json", **kwargs: Any, ) -> _models.EmbeddingsResult: - """Return the embeddings for a given text prompt. + """Return the embedding vectors for given text prompts. + The method makes a REST API call to the `/embeddings` route on the given endpoint. - :param body: Required. + :param body: Specifies the full request payload. Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". @@ -579,13 +588,15 @@ async def embed( **kwargs: Any, ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long - """Return the embeddings for a given text prompt. + """Return the embedding vectors for given text prompts. + The method makes a REST API call to the `/embeddings` route on the given endpoint. - :param body: Is either a JSON type or a IO[bytes] type. Required. + :param body: Is either a MutableMapping[str, Any] type (like a dictionary) or a IO[bytes] type + that specifies the full request payload. Required. :type body: JSON or IO[bytes] :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hyper parameters depends on the value of the + How the service handles these extra parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype model_extras: dict[str, Any] :keyword input: Input text to embed, encoded as a string or array of tokens. @@ -598,10 +609,8 @@ async def embed( Returns a 422 error if the model doesn't support the value or parameter. Default value is None. :paramtype dimensions: int - :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings - should have. - Passing null causes the model to use its default value. - Returns a 422 error if the model doesn't support the value or parameter. Known values are: + :keyword encoding_format: Optional. The desired format for the returned embeddings. + Known values are: "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat :keyword input_type: Optional. The type of the input. @@ -714,11 +723,12 @@ async def embed( input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any, ) -> _models.EmbeddingsResult: - """Return the embeddings for given images. + """Return the embedding vectors for given images. + The method makes a REST API call to the `/images/embeddings` route on the given endpoint. :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hyper parameters depends on the value of the + How the service handles these extra parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype model_extras: dict[str, Any] :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an @@ -741,10 +751,8 @@ async def embed( Returns a 422 error if the model doesn't support the value or parameter. Default value is None. :paramtype dimensions: int - :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings - should have. - Passing null causes the model to use its default value. - Returns a 422 error if the model doesn't support the value or parameter. Known values are: + :keyword encoding_format: Optional. The desired format for the returned embeddings. + Known values are: "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat :keyword input_type: Optional. The type of the input. @@ -764,9 +772,11 @@ async def embed( content_type: str = "application/json", **kwargs: Any, ) -> _models.EmbeddingsResult: - """Return the embeddings for given images. + """Return the embedding vectors for given images. + The method makes a REST API call to the `/images/embeddings` route on the given endpoint. - :param body: Required. + :param body: An object of type MutableMapping[str, Any], such as a dictionary, that + specifies the full request payload. Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -784,9 +794,10 @@ async def embed( content_type: str = "application/json", **kwargs: Any, ) -> _models.EmbeddingsResult: - """Return the embeddings for given images. + """Return the embedding vectors for given images. + The method makes a REST API call to the `/images/embeddings` route on the given endpoint. - :param body: Required. + :param body: Specifies the full request payload. Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". @@ -809,13 +820,15 @@ async def embed( **kwargs: Any, ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long - """Return the embeddings for given images. + """Return the embedding vectors for given images. + The method makes a REST API call to the `/images/embeddings` route on the given endpoint. - :param body: Is either a JSON type or a IO[bytes] type. Required. + :param body: Is either a MutableMapping[str, Any] type (like a dictionary) or a IO[bytes] type + that specifies the full request payload. Required. :type body: JSON or IO[bytes] :keyword model_extras: Additional, model-specific parameters that are not in the standard request payload. They will be added as-is to the root of the JSON in the request body. - How the service handles these hyper parameters depends on the value of the + How the service handles these extra parameters depends on the value of the ``unknown-parameters`` request header. Default value is None. :paramtype model_extras: dict[str, Any] :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an @@ -828,10 +841,8 @@ async def embed( Returns a 422 error if the model doesn't support the value or parameter. Default value is None. :paramtype dimensions: int - :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings - should have. - Passing null causes the model to use its default value. - Returns a 422 error if the model doesn't support the value or parameter. Known values are: + :keyword encoding_format: Optional. The desired format for the returned embeddings. + Known values are: "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat :keyword input_type: Optional. The type of the input. From 6f69018da8d1520cba86fec1a5b4ad7b17da4a05 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 3 Jun 2024 11:39:13 -0700 Subject: [PATCH 096/112] Re-emit --- .../ai/inference/_operations/_operations.py | 278 +++--------------- .../azure/ai/inference/_patch.py | 16 +- .../inference/aio/_operations/_operations.py | 272 +++-------------- .../azure/ai/inference/aio/_patch.py | 16 +- .../azure/ai/inference/models/__init__.py | 6 +- .../azure/ai/inference/models/_enums.py | 2 + .../azure/ai/inference/models/_models.py | 93 +++++- .../samples/sample_chat_completions.py | 1 + ...ample_chat_completions_from_input_bytes.py | 1 + ...ompletions_streaming_with_entra_id_auth.py | 6 +- sdk/ai/azure-ai-inference/tsp-location.yaml | 2 +- 11 files changed, 205 insertions(+), 488 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py index 952b50430aae..48a52c3b763f 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_operations/_operations.py @@ -43,7 +43,7 @@ def build_chat_completions_complete_request( - *, unknown_params: Optional[Union[str, _models.UnknownParams]] = None, **kwargs: Any + *, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -88,7 +88,7 @@ def build_chat_completions_get_model_info_request(**kwargs: Any) -> HttpRequest: def build_embeddings_embed_request( - *, unknown_params: Optional[Union[str, _models.UnknownParams]] = None, **kwargs: Any + *, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -133,7 +133,7 @@ def build_embeddings_get_model_info_request(**kwargs: Any) -> HttpRequest: def build_image_embeddings_embed_request( - *, unknown_params: Optional[Union[str, _models.UnknownParams]] = None, **kwargs: Any + *, unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -184,7 +184,7 @@ def _complete( self, body: JSON, *, - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: ... @@ -193,7 +193,7 @@ def _complete( self, *, messages: List[_models.ChatRequestMessage], - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", frequency_penalty: Optional[float] = None, stream_parameter: Optional[bool] = None, @@ -215,7 +215,7 @@ def _complete( self, body: IO[bytes], *, - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: ... @@ -226,7 +226,7 @@ def _complete( body: Union[JSON, IO[bytes]] = _Unset, *, messages: List[_models.ChatRequestMessage] = _Unset, - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, frequency_penalty: Optional[float] = None, stream_parameter: Optional[bool] = None, presence_penalty: Optional[float] = None, @@ -247,7 +247,8 @@ def _complete( """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or "completes" - provided prompt data. + provided prompt data. The method makes a REST API call to the ``/chat/completions`` route + on the given endpoint. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] @@ -266,8 +267,8 @@ def _complete( appearing based on their cumulative frequency in generated text. Positive values will make tokens less likely to appear as their frequency increases and - decrease the likelihood of the model repeating the same statements verbatim. Default value is - None. + decrease the likelihood of the model repeating the same statements verbatim. + Supported range is [-2, 2]. Default value is None. :paramtype frequency_penalty: float :keyword stream_parameter: A value indicating whether chat completions should be streamed for this request. Default value is None. @@ -277,14 +278,16 @@ def _complete( presence in generated text. Positive values will make tokens less likely to appear when they already exist and increase the - model's likelihood to output new topics. Default value is None. + model's likelihood to output new topics. + Supported range is [-2, 2]. Default value is None. :paramtype presence_penalty: float :keyword temperature: The sampling temperature to use that controls the apparent creativity of generated completions. Higher values will make output more random while lower values will make results more focused and deterministic. It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict. Default value is None. + interaction of these two settings is difficult to predict. + Supported range is [0, 1]. Default value is None. :paramtype temperature: float :keyword top_p: An alternative to sampling with temperature called nucleus sampling. This value causes the @@ -292,7 +295,8 @@ def _complete( value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be considered. It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict. Default value is None. + interaction of these two settings is difficult to predict. + Supported range is [0, 1]. Default value is None. :paramtype top_p: float :keyword max_tokens: The maximum number of tokens to generate. Default value is None. :paramtype max_tokens: int @@ -332,12 +336,13 @@ def _complete( probability of generated tokens appearing based on their cumulative frequency in generated text. Positive values will make tokens less likely to appear as their frequency increases and decrease the likelihood of the model repeating the same - statements verbatim. + statements verbatim. Supported range is [-2, 2]. "max_tokens": 0, # Optional. The maximum number of tokens to generate. "presence_penalty": 0.0, # Optional. A value that influences the probability of generated tokens appearing based on their existing presence in generated text. Positive values will make tokens less likely to appear when they already exist - and increase the model's likelihood to output new topics. + and increase the model's likelihood to output new topics. Supported range is [-2, + 2]. "response_format": "str", # Optional. An object specifying the format that the model must output. Used to enable JSON mode. Known values are: "text" and "json_object". @@ -355,7 +360,7 @@ def _complete( make output more random while lower values will make results more focused and deterministic. It is not recommended to modify temperature and top_p for the same completions request as the interaction of these two settings is difficult to - predict. + predict. Supported range is [0, 1]. "tool_choice": "str", # Optional. If specified, the model will configure which of the provided tools it can use for the chat completions response. Is either a Union[str, "_models.ChatCompletionsToolSelectionPreset"] type or a @@ -369,6 +374,7 @@ def _complete( only the tokens comprising the top 15% of probability mass to be considered. It is not recommended to modify temperature and top_p for the same completions request as the interaction of these two settings is difficult to predict. + Supported range is [0, 1]. } # response body for status code(s): 200 @@ -490,6 +496,7 @@ def _complete( def _get_model_info(self, **kwargs: Any) -> _models.ModelInfo: # pylint: disable=line-too-long """Returns information about the AI model. + The method makes a REST API call to the ``/info`` route on the given endpoint. :return: ModelInfo. The ModelInfo is compatible with MutableMapping :rtype: ~azure.ai.inference.models.ModelInfo @@ -563,7 +570,7 @@ def _embed( self, body: JSON, *, - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: ... @@ -572,7 +579,7 @@ def _embed( self, *, input: List[str], - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, @@ -584,7 +591,7 @@ def _embed( self, body: IO[bytes], *, - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: ... @@ -595,14 +602,15 @@ def _embed( body: Union[JSON, IO[bytes]] = _Unset, *, input: List[str] = _Unset, - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long - """Return the embeddings for a given text prompt. + """Return the embedding vectors for given text prompts. + The method makes a REST API call to the ``/embeddings`` route on the given endpoint. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] @@ -621,11 +629,8 @@ def _embed( Returns a 422 error if the model doesn't support the value or parameter. Default value is None. :paramtype dimensions: int - :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings - should have. - Passing null causes the model to use its default value. - Returns a 422 error if the model doesn't support the value or parameter. Known values are: - "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. + :keyword encoding_format: Optional. The desired format for the returned embeddings. Known + values are: "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat :keyword input_type: Optional. The type of the input. Returns a 422 error if the model doesn't support the value or parameter. Known values are: @@ -649,11 +654,9 @@ def _embed( resulting output embeddings should have. Passing null causes the model to use its default value. Returns a 422 error if the model doesn't support the value or parameter. - "encoding_format": "str", # Optional. Optional. The number of dimensions the - resulting output embeddings should have. Passing null causes the model to use its - default value. Returns a 422 error if the model doesn't support the value or - parameter. Known values are: "base64", "binary", "float", "int8", "ubinary", and - "uint8". + "encoding_format": "str", # Optional. Optional. The desired format for the + returned embeddings. Known values are: "base64", "binary", "float", "int8", + "ubinary", and "uint8". "input_type": "str" # Optional. Optional. The type of the input. Returns a 422 error if the model doesn't support the value or parameter. Known values are: "text", "query", and "document". @@ -760,6 +763,7 @@ def _embed( def _get_model_info(self, **kwargs: Any) -> _models.ModelInfo: # pylint: disable=line-too-long """Returns information about the AI model. + The method makes a REST API call to the ``/info`` route on the given endpoint. :return: ModelInfo. The ModelInfo is compatible with MutableMapping :rtype: ~azure.ai.inference.models.ModelInfo @@ -833,225 +837,31 @@ def _embed( self, body: JSON, *, - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.EmbeddingsResult: - # pylint: disable=line-too-long - """Return the embeddings for given images. - - :param body: Required. - :type body: JSON - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. - This sets the HTTP request header ``unknown-parameters``. Known values are: "error", "drop", - and "pass_through". Default value is None. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "input": [ - { - "image": "str", # The input image, in PNG format. Required. - "text": "str" # Optional. Optional. The text input to feed - into the model (like DINO, CLIP). Returns a 422 error if the model - doesn't support the value or parameter. - } - ], - "dimensions": 0, # Optional. Optional. The number of dimensions the - resulting output embeddings should have. Passing null causes the model to use its - default value. Returns a 422 error if the model doesn't support the value or - parameter. - "encoding_format": "str", # Optional. Optional. The number of dimensions the - resulting output embeddings should have. Passing null causes the model to use its - default value. Returns a 422 error if the model doesn't support the value or - parameter. Known values are: "base64", "binary", "float", "int8", "ubinary", and - "uint8". - "input_type": "str" # Optional. Optional. The type of the input. Returns a - 422 error if the model doesn't support the value or parameter. Known values are: - "text", "query", and "document". - } - - # response body for status code(s): 200 - response == { - "data": [ - { - "embedding": [ - 0.0 # List of embeddings value for the input prompt. - These represent a measurement of the vector-based relatedness of the - provided input. Required. - ], - "index": 0 # Index of the prompt to which the EmbeddingItem - corresponds. Required. - } - ], - "id": "str", # Unique identifier for the embeddings result. Required. - "model": "str", # The model ID used to generate this result. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "input_tokens": 0, # Number of tokens in the request prompt. - Required. - "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to ``input_tokens``. However, certain AI - models may add extra tokens to the input hence the number can be higher. (for - example when input_type="query"). Required. - "total_tokens": 0 # Total number of tokens transacted in this - request/response. Required. - } - } - """ - + ) -> _models.EmbeddingsResult: ... @overload def _embed( self, *, input: List[_models.EmbeddingInput], - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any - ) -> _models.EmbeddingsResult: - # pylint: disable=line-too-long - """Return the embeddings for given images. - - :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an - array. - The input must not exceed the max input tokens for the model. Required. - :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. - This sets the HTTP request header ``unknown-parameters``. Known values are: "error", "drop", - and "pass_through". Default value is None. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should - have. - Passing null causes the model to use its default value. - Returns a 422 error if the model doesn't support the value or parameter. Default value is - None. - :paramtype dimensions: int - :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings - should have. - Passing null causes the model to use its default value. - Returns a 422 error if the model doesn't support the value or parameter. Known values are: - "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. - :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat - :keyword input_type: Optional. The type of the input. - Returns a 422 error if the model doesn't support the value or parameter. Known values are: - "text", "query", and "document". Default value is None. - :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType - :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "data": [ - { - "embedding": [ - 0.0 # List of embeddings value for the input prompt. - These represent a measurement of the vector-based relatedness of the - provided input. Required. - ], - "index": 0 # Index of the prompt to which the EmbeddingItem - corresponds. Required. - } - ], - "id": "str", # Unique identifier for the embeddings result. Required. - "model": "str", # The model ID used to generate this result. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "input_tokens": 0, # Number of tokens in the request prompt. - Required. - "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to ``input_tokens``. However, certain AI - models may add extra tokens to the input hence the number can be higher. (for - example when input_type="query"). Required. - "total_tokens": 0 # Total number of tokens transacted in this - request/response. Required. - } - } - """ - + ) -> _models.EmbeddingsResult: ... @overload def _embed( self, body: IO[bytes], *, - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.EmbeddingsResult: - # pylint: disable=line-too-long - """Return the embeddings for given images. - - :param body: Required. - :type body: IO[bytes] - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. - This sets the HTTP request header ``unknown-parameters``. Known values are: "error", "drop", - and "pass_through". Default value is None. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "data": [ - { - "embedding": [ - 0.0 # List of embeddings value for the input prompt. - These represent a measurement of the vector-based relatedness of the - provided input. Required. - ], - "index": 0 # Index of the prompt to which the EmbeddingItem - corresponds. Required. - } - ], - "id": "str", # Unique identifier for the embeddings result. Required. - "model": "str", # The model ID used to generate this result. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "input_tokens": 0, # Number of tokens in the request prompt. - Required. - "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to ``input_tokens``. However, certain AI - models may add extra tokens to the input hence the number can be higher. (for - example when input_type="query"). Required. - "total_tokens": 0 # Total number of tokens transacted in this - request/response. Required. - } - } - """ + ) -> _models.EmbeddingsResult: ... @distributed_trace def _embed( @@ -1059,14 +869,15 @@ def _embed( body: Union[JSON, IO[bytes]] = _Unset, *, input: List[_models.EmbeddingInput] = _Unset, - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long - """Return the embeddings for given images. + """Return the embedding vectors for given images. + The method makes a REST API call to the ``/images/embeddings`` route on the given endpoint. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] @@ -1227,6 +1038,7 @@ def _embed( def _get_model_info(self, **kwargs: Any) -> _models.ModelInfo: # pylint: disable=line-too-long """Returns information about the AI model. + The method makes a REST API call to the ``/info`` route on the given endpoint. :return: ModelInfo. The ModelInfo is compatible with MutableMapping :rtype: ~azure.ai.inference.models.ModelInfo diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index c52ad6d7734d..cab2ddb1187c 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -181,7 +181,7 @@ def complete( Higher values will make output more random while lower values will make results more focused and deterministic. It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict. + interaction of these two settings is difficult to predict. Supported range is [0, 1]. Default value is None. :paramtype temperature: float @@ -237,7 +237,7 @@ def complete( Completions support a wide variety of tasks and generate text that continues from or "completes" provided prompt data. - :param body: An object of type MutableMapping[str, Any], such as a dictionary, that + :param body: An object of type MutableMapping[str, Any], such as a dictionary, that specifies the full request payload. Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -343,7 +343,7 @@ def complete( value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be considered. It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict. + interaction of these two settings is difficult to predict. Supported range is [0, 1]. Default value is None. :paramtype top_p: float @@ -410,7 +410,7 @@ def complete( } if model_extras is not None and bool(model_extras): body.update(model_extras) - _unknown_params = _models._enums.UnknownParams.PASS_THROUGH # pylint: disable=protected-access + _unknown_params = _models._enums.UnknownParams.PASS_THROUGH # pylint: disable=protected-access body = {k: v for k, v in body.items() if v is not None} elif isinstance(body, dict) and "stream" in body and isinstance(body["stream"], bool): stream = body["stream"] @@ -539,7 +539,7 @@ def embed( """Return the embedding vectors for given text prompts. The method makes a REST API call to the `/embeddings` route on the given endpoint. - :param body: An object of type MutableMapping[str, Any], such as a dictionary, that + :param body: An object of type MutableMapping[str, Any], such as a dictionary, that specifies the full request payload. Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -642,7 +642,7 @@ def embed( } if model_extras is not None and bool(model_extras): body.update(model_extras) - _unknown_params = _models._enums.UnknownParams.PASS_THROUGH # pylint: disable=protected-access + _unknown_params = _models._enums.UnknownParams.PASS_THROUGH # pylint: disable=protected-access body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -771,7 +771,7 @@ def embed( """Return the embedding vectors for given images. The method makes a REST API call to the `/images/embeddings` route on the given endpoint. - :param body: An object of type MutableMapping[str, Any], such as a dictionary, that + :param body: An object of type MutableMapping[str, Any], such as a dictionary, that specifies the full request payload. Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -874,7 +874,7 @@ def embed( } if model_extras is not None and bool(model_extras): body.update(model_extras) - _unknown_params = _models._enums.UnknownParams.PASS_THROUGH # pylint: disable=protected-access + _unknown_params = _models._enums.UnknownParams.PASS_THROUGH # pylint: disable=protected-access body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py index 06c872161cf4..33344e718128 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_operations/_operations.py @@ -53,7 +53,7 @@ async def _complete( self, body: JSON, *, - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: ... @@ -62,7 +62,7 @@ async def _complete( self, *, messages: List[_models.ChatRequestMessage], - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", frequency_penalty: Optional[float] = None, stream_parameter: Optional[bool] = None, @@ -84,7 +84,7 @@ async def _complete( self, body: IO[bytes], *, - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.ChatCompletions: ... @@ -95,7 +95,7 @@ async def _complete( body: Union[JSON, IO[bytes]] = _Unset, *, messages: List[_models.ChatRequestMessage] = _Unset, - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, frequency_penalty: Optional[float] = None, stream_parameter: Optional[bool] = None, presence_penalty: Optional[float] = None, @@ -116,7 +116,8 @@ async def _complete( """Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or "completes" - provided prompt data. + provided prompt data. The method makes a REST API call to the ``/chat/completions`` route + on the given endpoint. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] @@ -135,8 +136,8 @@ async def _complete( appearing based on their cumulative frequency in generated text. Positive values will make tokens less likely to appear as their frequency increases and - decrease the likelihood of the model repeating the same statements verbatim. Default value is - None. + decrease the likelihood of the model repeating the same statements verbatim. + Supported range is [-2, 2]. Default value is None. :paramtype frequency_penalty: float :keyword stream_parameter: A value indicating whether chat completions should be streamed for this request. Default value is None. @@ -146,14 +147,16 @@ async def _complete( presence in generated text. Positive values will make tokens less likely to appear when they already exist and increase the - model's likelihood to output new topics. Default value is None. + model's likelihood to output new topics. + Supported range is [-2, 2]. Default value is None. :paramtype presence_penalty: float :keyword temperature: The sampling temperature to use that controls the apparent creativity of generated completions. Higher values will make output more random while lower values will make results more focused and deterministic. It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict. Default value is None. + interaction of these two settings is difficult to predict. + Supported range is [0, 1]. Default value is None. :paramtype temperature: float :keyword top_p: An alternative to sampling with temperature called nucleus sampling. This value causes the @@ -161,7 +164,8 @@ async def _complete( value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be considered. It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict. Default value is None. + interaction of these two settings is difficult to predict. + Supported range is [0, 1]. Default value is None. :paramtype top_p: float :keyword max_tokens: The maximum number of tokens to generate. Default value is None. :paramtype max_tokens: int @@ -201,12 +205,13 @@ async def _complete( probability of generated tokens appearing based on their cumulative frequency in generated text. Positive values will make tokens less likely to appear as their frequency increases and decrease the likelihood of the model repeating the same - statements verbatim. + statements verbatim. Supported range is [-2, 2]. "max_tokens": 0, # Optional. The maximum number of tokens to generate. "presence_penalty": 0.0, # Optional. A value that influences the probability of generated tokens appearing based on their existing presence in generated text. Positive values will make tokens less likely to appear when they already exist - and increase the model's likelihood to output new topics. + and increase the model's likelihood to output new topics. Supported range is [-2, + 2]. "response_format": "str", # Optional. An object specifying the format that the model must output. Used to enable JSON mode. Known values are: "text" and "json_object". @@ -224,7 +229,7 @@ async def _complete( make output more random while lower values will make results more focused and deterministic. It is not recommended to modify temperature and top_p for the same completions request as the interaction of these two settings is difficult to - predict. + predict. Supported range is [0, 1]. "tool_choice": "str", # Optional. If specified, the model will configure which of the provided tools it can use for the chat completions response. Is either a Union[str, "_models.ChatCompletionsToolSelectionPreset"] type or a @@ -238,6 +243,7 @@ async def _complete( only the tokens comprising the top 15% of probability mass to be considered. It is not recommended to modify temperature and top_p for the same completions request as the interaction of these two settings is difficult to predict. + Supported range is [0, 1]. } # response body for status code(s): 200 @@ -359,6 +365,7 @@ async def _complete( async def _get_model_info(self, **kwargs: Any) -> _models.ModelInfo: # pylint: disable=line-too-long """Returns information about the AI model. + The method makes a REST API call to the ``/info`` route on the given endpoint. :return: ModelInfo. The ModelInfo is compatible with MutableMapping :rtype: ~azure.ai.inference.models.ModelInfo @@ -432,7 +439,7 @@ async def _embed( self, body: JSON, *, - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: ... @@ -441,7 +448,7 @@ async def _embed( self, *, input: List[str], - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, @@ -453,7 +460,7 @@ async def _embed( self, body: IO[bytes], *, - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any ) -> _models.EmbeddingsResult: ... @@ -464,14 +471,15 @@ async def _embed( body: Union[JSON, IO[bytes]] = _Unset, *, input: List[str] = _Unset, - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long - """Return the embeddings for a given text prompt. + """Return the embedding vectors for given text prompts. + The method makes a REST API call to the ``/embeddings`` route on the given endpoint. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] @@ -490,11 +498,8 @@ async def _embed( Returns a 422 error if the model doesn't support the value or parameter. Default value is None. :paramtype dimensions: int - :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings - should have. - Passing null causes the model to use its default value. - Returns a 422 error if the model doesn't support the value or parameter. Known values are: - "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. + :keyword encoding_format: Optional. The desired format for the returned embeddings. Known + values are: "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat :keyword input_type: Optional. The type of the input. Returns a 422 error if the model doesn't support the value or parameter. Known values are: @@ -518,11 +523,9 @@ async def _embed( resulting output embeddings should have. Passing null causes the model to use its default value. Returns a 422 error if the model doesn't support the value or parameter. - "encoding_format": "str", # Optional. Optional. The number of dimensions the - resulting output embeddings should have. Passing null causes the model to use its - default value. Returns a 422 error if the model doesn't support the value or - parameter. Known values are: "base64", "binary", "float", "int8", "ubinary", and - "uint8". + "encoding_format": "str", # Optional. Optional. The desired format for the + returned embeddings. Known values are: "base64", "binary", "float", "int8", + "ubinary", and "uint8". "input_type": "str" # Optional. Optional. The type of the input. Returns a 422 error if the model doesn't support the value or parameter. Known values are: "text", "query", and "document". @@ -629,6 +632,7 @@ async def _embed( async def _get_model_info(self, **kwargs: Any) -> _models.ModelInfo: # pylint: disable=line-too-long """Returns information about the AI model. + The method makes a REST API call to the ``/info`` route on the given endpoint. :return: ModelInfo. The ModelInfo is compatible with MutableMapping :rtype: ~azure.ai.inference.models.ModelInfo @@ -702,225 +706,31 @@ async def _embed( self, body: JSON, *, - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.EmbeddingsResult: - # pylint: disable=line-too-long - """Return the embeddings for given images. - - :param body: Required. - :type body: JSON - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. - This sets the HTTP request header ``unknown-parameters``. Known values are: "error", "drop", - and "pass_through". Default value is None. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "input": [ - { - "image": "str", # The input image, in PNG format. Required. - "text": "str" # Optional. Optional. The text input to feed - into the model (like DINO, CLIP). Returns a 422 error if the model - doesn't support the value or parameter. - } - ], - "dimensions": 0, # Optional. Optional. The number of dimensions the - resulting output embeddings should have. Passing null causes the model to use its - default value. Returns a 422 error if the model doesn't support the value or - parameter. - "encoding_format": "str", # Optional. Optional. The number of dimensions the - resulting output embeddings should have. Passing null causes the model to use its - default value. Returns a 422 error if the model doesn't support the value or - parameter. Known values are: "base64", "binary", "float", "int8", "ubinary", and - "uint8". - "input_type": "str" # Optional. Optional. The type of the input. Returns a - 422 error if the model doesn't support the value or parameter. Known values are: - "text", "query", and "document". - } - - # response body for status code(s): 200 - response == { - "data": [ - { - "embedding": [ - 0.0 # List of embeddings value for the input prompt. - These represent a measurement of the vector-based relatedness of the - provided input. Required. - ], - "index": 0 # Index of the prompt to which the EmbeddingItem - corresponds. Required. - } - ], - "id": "str", # Unique identifier for the embeddings result. Required. - "model": "str", # The model ID used to generate this result. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "input_tokens": 0, # Number of tokens in the request prompt. - Required. - "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to ``input_tokens``. However, certain AI - models may add extra tokens to the input hence the number can be higher. (for - example when input_type="query"). Required. - "total_tokens": 0 # Total number of tokens transacted in this - request/response. Required. - } - } - """ - + ) -> _models.EmbeddingsResult: ... @overload async def _embed( self, *, input: List[_models.EmbeddingInput], - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any - ) -> _models.EmbeddingsResult: - # pylint: disable=line-too-long - """Return the embeddings for given images. - - :keyword input: Input image to embed. To embed multiple inputs in a single request, pass an - array. - The input must not exceed the max input tokens for the model. Required. - :paramtype input: list[~azure.ai.inference.models.EmbeddingInput] - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. - This sets the HTTP request header ``unknown-parameters``. Known values are: "error", "drop", - and "pass_through". Default value is None. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword dimensions: Optional. The number of dimensions the resulting output embeddings should - have. - Passing null causes the model to use its default value. - Returns a 422 error if the model doesn't support the value or parameter. Default value is - None. - :paramtype dimensions: int - :keyword encoding_format: Optional. The number of dimensions the resulting output embeddings - should have. - Passing null causes the model to use its default value. - Returns a 422 error if the model doesn't support the value or parameter. Known values are: - "base64", "binary", "float", "int8", "ubinary", and "uint8". Default value is None. - :paramtype encoding_format: str or ~azure.ai.inference.models.EmbeddingEncodingFormat - :keyword input_type: Optional. The type of the input. - Returns a 422 error if the model doesn't support the value or parameter. Known values are: - "text", "query", and "document". Default value is None. - :paramtype input_type: str or ~azure.ai.inference.models.EmbeddingInputType - :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "data": [ - { - "embedding": [ - 0.0 # List of embeddings value for the input prompt. - These represent a measurement of the vector-based relatedness of the - provided input. Required. - ], - "index": 0 # Index of the prompt to which the EmbeddingItem - corresponds. Required. - } - ], - "id": "str", # Unique identifier for the embeddings result. Required. - "model": "str", # The model ID used to generate this result. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "input_tokens": 0, # Number of tokens in the request prompt. - Required. - "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to ``input_tokens``. However, certain AI - models may add extra tokens to the input hence the number can be higher. (for - example when input_type="query"). Required. - "total_tokens": 0 # Total number of tokens transacted in this - request/response. Required. - } - } - """ - + ) -> _models.EmbeddingsResult: ... @overload async def _embed( self, body: IO[bytes], *, - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, content_type: str = "application/json", **kwargs: Any - ) -> _models.EmbeddingsResult: - # pylint: disable=line-too-long - """Return the embeddings for given images. - - :param body: Required. - :type body: IO[bytes] - :keyword unknown_params: Controls what happens if unknown parameters are passed in the JSON - request payload. - This sets the HTTP request header ``unknown-parameters``. Known values are: "error", "drop", - and "pass_through". Default value is None. - :paramtype unknown_params: str or ~azure.ai.inference.models.UnknownParams - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: EmbeddingsResult. The EmbeddingsResult is compatible with MutableMapping - :rtype: ~azure.ai.inference.models.EmbeddingsResult - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "data": [ - { - "embedding": [ - 0.0 # List of embeddings value for the input prompt. - These represent a measurement of the vector-based relatedness of the - provided input. Required. - ], - "index": 0 # Index of the prompt to which the EmbeddingItem - corresponds. Required. - } - ], - "id": "str", # Unique identifier for the embeddings result. Required. - "model": "str", # The model ID used to generate this result. Required. - "usage": { - "capacity_type": "str", # Indicates whether your capacity has been - affected by the usage amount (token count) reported here. Required. Known - values are: "usage" and "fixed". - "input_tokens": 0, # Number of tokens in the request prompt. - Required. - "prompt_tokens": 0, # Number of tokens used for the prompt sent to - the AI model. Typically identical to ``input_tokens``. However, certain AI - models may add extra tokens to the input hence the number can be higher. (for - example when input_type="query"). Required. - "total_tokens": 0 # Total number of tokens transacted in this - request/response. Required. - } - } - """ + ) -> _models.EmbeddingsResult: ... @distributed_trace_async async def _embed( @@ -928,14 +738,15 @@ async def _embed( body: Union[JSON, IO[bytes]] = _Unset, *, input: List[_models.EmbeddingInput] = _Unset, - unknown_params: Optional[Union[str, _models.UnknownParams]] = None, + unknown_params: Optional[Union[str, _models._enums.UnknownParams]] = None, dimensions: Optional[int] = None, encoding_format: Optional[Union[str, _models.EmbeddingEncodingFormat]] = None, input_type: Optional[Union[str, _models.EmbeddingInputType]] = None, **kwargs: Any ) -> _models.EmbeddingsResult: # pylint: disable=line-too-long - """Return the embeddings for given images. + """Return the embedding vectors for given images. + The method makes a REST API call to the ``/images/embeddings`` route on the given endpoint. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] @@ -1096,6 +907,7 @@ async def _embed( async def _get_model_info(self, **kwargs: Any) -> _models.ModelInfo: # pylint: disable=line-too-long """Returns information about the AI model. + The method makes a REST API call to the ``/info`` route on the given endpoint. :return: ModelInfo. The ModelInfo is compatible with MutableMapping :rtype: ~azure.ai.inference.models.ModelInfo diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index ff7e4a956a99..811e0af277cf 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -177,7 +177,7 @@ async def complete( Higher values will make output more random while lower values will make results more focused and deterministic. It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict. + interaction of these two settings is difficult to predict. Supported range is [0, 1]. Default value is None. :paramtype temperature: float @@ -233,7 +233,7 @@ async def complete( Completions support a wide variety of tasks and generate text that continues from or "completes" provided prompt data. - :param body: An object of type MutableMapping[str, Any], such as a dictionary, that + :param body: An object of type MutableMapping[str, Any], such as a dictionary, that specifies the full request payload. Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -346,7 +346,7 @@ async def complete( value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be considered. It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict. + interaction of these two settings is difficult to predict. Supported range is [0, 1]. Default value is None. :paramtype top_p: float @@ -414,7 +414,7 @@ async def complete( } if model_extras is not None and bool(model_extras): body.update(model_extras) - _unknown_params = _models._enums.UnknownParams.PASS_THROUGH # pylint: disable=protected-access + _unknown_params = _models._enums.UnknownParams.PASS_THROUGH # pylint: disable=protected-access body = {k: v for k, v in body.items() if v is not None} elif isinstance(body, dict) and "stream" in body and isinstance(body["stream"], bool): stream = body["stream"] @@ -543,7 +543,7 @@ async def embed( """Return the embedding vectors for given text prompts. The method makes a REST API call to the `/embeddings` route on the given endpoint. - :param body: An object of type MutableMapping[str, Any], such as a dictionary, that + :param body: An object of type MutableMapping[str, Any], such as a dictionary, that specifies the full request payload. Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -646,7 +646,7 @@ async def embed( } if model_extras is not None and bool(model_extras): body.update(model_extras) - _unknown_params = _models._enums.UnknownParams.PASS_THROUGH # pylint: disable=protected-access + _unknown_params = _models._enums.UnknownParams.PASS_THROUGH # pylint: disable=protected-access body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -775,7 +775,7 @@ async def embed( """Return the embedding vectors for given images. The method makes a REST API call to the `/images/embeddings` route on the given endpoint. - :param body: An object of type MutableMapping[str, Any], such as a dictionary, that + :param body: An object of type MutableMapping[str, Any], such as a dictionary, that specifies the full request payload. Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -878,7 +878,7 @@ async def embed( } if model_extras is not None and bool(model_extras): body.update(model_extras) - _unknown_params = _models._enums.UnknownParams.PASS_THROUGH # pylint: disable=protected-access + _unknown_params = _models._enums.UnknownParams.PASS_THROUGH # pylint: disable=protected-access body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py index aff95c6a3c99..a89a3d78800e 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py @@ -11,6 +11,8 @@ from ._models import ChatCompletions from ._models import ChatCompletionsFunctionToolCall from ._models import ChatCompletionsFunctionToolDefinition +from ._models import ChatCompletionsFunctionToolSelection +from ._models import ChatCompletionsNamedFunctionToolSelection from ._models import ChatCompletionsNamedToolSelection from ._models import ChatCompletionsToolCall from ._models import ChatCompletionsToolDefinition @@ -36,7 +38,6 @@ from ._enums import ChatRole from ._enums import EmbeddingEncodingFormat from ._enums import EmbeddingInputType -from ._enums import UnknownParams from ._enums import CompletionsFinishReason from ._enums import ModelType @@ -52,6 +53,8 @@ "ChatCompletions", "ChatCompletionsFunctionToolCall", "ChatCompletionsFunctionToolDefinition", + "ChatCompletionsFunctionToolSelection", + "ChatCompletionsNamedFunctionToolSelection", "ChatCompletionsNamedToolSelection", "ChatCompletionsToolCall", "ChatCompletionsToolDefinition", @@ -76,7 +79,6 @@ "ChatRole", "EmbeddingEncodingFormat", "EmbeddingInputType", - "UnknownParams", "CompletionsFinishReason", "ModelType" ] diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py index a74712006c24..59baa8783ca5 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py @@ -45,6 +45,8 @@ class ChatCompletionsToolSelectionPreset(str, Enum, metaclass=CaseInsensitiveEnu standard chat completions response. Response content may still be influenced by the provided tool definitions.""" + REQUIRED = "required" + """Specifies that the model should respond with a call to one or more tools.""" class ChatRole(str, Enum, metaclass=CaseInsensitiveEnumMeta): diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py index 32670c1b32fa..b9904106583f 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py @@ -366,18 +366,107 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, type="function", **kwargs) +class ChatCompletionsFunctionToolSelection(_model_base.Model): + """A tool selection of a specific, named function tool that will limit chat completions to using + the named function. + + All required parameters must be populated in order to send to server. + + :ivar name: The name of the function that should be called. Required. + :vartype name: str + """ + + name: str = rest_field() + """The name of the function that should be called. Required.""" + + @overload + def __init__( + self, + *, + name: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + class ChatCompletionsNamedToolSelection(_model_base.Model): """An abstract representation of an explicit, named tool selection to use for a chat completions request. + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + ChatCompletionsNamedFunctionToolSelection + All required parameters must be populated in order to send to server. - :ivar type: The object type. Required. + :ivar type: The object type. Required. Default value is None. :vartype type: str """ + __mapping__: Dict[str, _model_base.Model] = {} type: str = rest_discriminator(name="type") - """The object type. Required.""" + """The object type. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ChatCompletionsNamedFunctionToolSelection( + ChatCompletionsNamedToolSelection, discriminator="function" +): # pylint: disable=name-too-long + """A tool selection of a specific, named function tool that will limit chat completions to using + the named function. + + All required parameters must be populated in order to send to server. + + :ivar type: The object type, which is always 'function'. Required. Default value is "function". + :vartype type: str + :ivar function: The function that should be called. Required. + :vartype function: ~azure.ai.inference.models.ChatCompletionsFunctionToolSelection + """ + + type: Literal["function"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'function'. Required. Default value is \"function\".""" + function: "_models.ChatCompletionsFunctionToolSelection" = rest_field() + """The function that should be called. Required.""" + + @overload + def __init__( + self, + *, + function: "_models.ChatCompletionsFunctionToolSelection", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="function", **kwargs) class ChatResponseMessage(_model_base.Model): diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index 54ef4131994a..3de024ef3643 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -20,6 +20,7 @@ # mypy: disable-error-code="union-attr" # pyright: reportAttributeAccessIssue=false + def sample_chat_completions(): import os diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py index ff15f0affe80..89fbd606ae27 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_bytes.py @@ -24,6 +24,7 @@ import io + def sample_chat_completions_from_input_bytes(): import os diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py index 9849f6cc623e..01ee44ef1cd2 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py @@ -49,9 +49,7 @@ def sample_chat_completions_streaming_with_entra_id_auth(): # https://learn.microsoft.com/python/api/overview/azure/identity-readme#defaultazurecredential client = ChatCompletionsClient( - endpoint=endpoint, - credential=DefaultAzureCredential(), - headers={"azureml-model-deployment": model_deployment} + endpoint=endpoint, credential=DefaultAzureCredential(), headers={"azureml-model-deployment": model_deployment} ) response = client.complete( @@ -59,7 +57,7 @@ def sample_chat_completions_streaming_with_entra_id_auth(): messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="Give me 5 good reasons why I should exercise every day."), - ] + ], ) for update in response: diff --git a/sdk/ai/azure-ai-inference/tsp-location.yaml b/sdk/ai/azure-ai-inference/tsp-location.yaml index 2d126c5532d2..e107572a4177 100644 --- a/sdk/ai/azure-ai-inference/tsp-location.yaml +++ b/sdk/ai/azure-ai-inference/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/ModelClient -commit: be191d796831bbdd495781dffb8a72443aee6a13 +commit: 907e4e19cf76132ea281e060fedcfee0eb671e92 repo: Azure/azure-rest-api-specs additionalDirectories: From aed30bf922c232ed60c2e47ee9f009cbd07ff9bf Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 3 Jun 2024 12:24:02 -0700 Subject: [PATCH 097/112] Fix mypy error in sample --- .../samples/sample_chat_completions_with_model_extras.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py index 3176b2a9cf47..b32a44bf1367 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py @@ -37,7 +37,7 @@ def sample_chat_completions_with_model_extras(): exit() from azure.ai.inference import ChatCompletionsClient - from azure.ai.inference.models import SystemMessage, UserMessage, UnknownParams + from azure.ai.inference.models import SystemMessage, UserMessage from azure.core.credentials import AzureKeyCredential client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) From c32740314cba2b5b255c3ac27d550422d06ffd35 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 3 Jun 2024 12:39:02 -0700 Subject: [PATCH 098/112] Fix missing ranges in ref-doc comments --- .../azure/ai/inference/_patch.py | 21 ++++++++++++------- .../azure/ai/inference/aio/_patch.py | 21 ++++++++++++------- 2 files changed, 26 insertions(+), 16 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index cab2ddb1187c..a34ca1932c6f 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -191,7 +191,9 @@ def complete( value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be considered. It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict. Default value is None. + interaction of these two settings is difficult to predict. + Supported range is [0, 1]. + Default value is None. :paramtype top_p: float :keyword max_tokens: The maximum number of tokens to generate. Default value is None. :paramtype max_tokens: int @@ -317,25 +319,28 @@ def complete( ``unknown-parameters`` request header. Default value is None. :paramtype model_extras: dict[str, Any] :keyword frequency_penalty: A value that influences the probability of generated tokens - appearing based on their cumulative - frequency in generated text. + appearing based on their cumulative frequency in generated text. Positive values will make tokens less likely to appear as their frequency increases and - decrease the likelihood of the model repeating the same statements verbatim. Default value is - None. + decrease the likelihood of the model repeating the same statements verbatim. + Supported range is [-2, 2]. + Default value is None. :paramtype frequency_penalty: float :keyword presence_penalty: A value that influences the probability of generated tokens appearing based on their existing presence in generated text. Positive values will make tokens less likely to appear when they already exist and increase - the - model's likelihood to output new topics. Default value is None. + the model's likelihood to output new topics. + Supported range is [-2, 2]. + Default value is None. :paramtype presence_penalty: float :keyword temperature: The sampling temperature to use that controls the apparent creativity of generated completions. Higher values will make output more random while lower values will make results more focused and deterministic. It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict. Default value is None. + interaction of these two settings is difficult to predict. + Supported range is [0, 1]. + Default value is None. :paramtype temperature: float :keyword top_p: An alternative to sampling with temperature called nucleus sampling. This value causes the diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index 811e0af277cf..0ea5eede4fe4 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -187,7 +187,9 @@ async def complete( value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be considered. It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict. Default value is None. + interaction of these two settings is difficult to predict. + Supported range is [0, 1]. + Default value is None. :paramtype top_p: float :keyword max_tokens: The maximum number of tokens to generate. Default value is None. :paramtype max_tokens: int @@ -320,25 +322,28 @@ async def complete( HTTP request header. Default value is None. :paramtype extras: dict[str, str] :keyword frequency_penalty: A value that influences the probability of generated tokens - appearing based on their cumulative - frequency in generated text. + appearing based on their cumulative frequency in generated text. Positive values will make tokens less likely to appear as their frequency increases and - decrease the likelihood of the model repeating the same statements verbatim. Default value is - None. + decrease the likelihood of the model repeating the same statements verbatim. + Supported range is [-2, 2]. + Default value is None. :paramtype frequency_penalty: float :keyword presence_penalty: A value that influences the probability of generated tokens appearing based on their existing presence in generated text. Positive values will make tokens less likely to appear when they already exist and increase - the - model's likelihood to output new topics. Default value is None. + the model's likelihood to output new topics. + Supported range is [-2, 2]. + Default value is None. :paramtype presence_penalty: float :keyword temperature: The sampling temperature to use that controls the apparent creativity of generated completions. Higher values will make output more random while lower values will make results more focused and deterministic. It is not recommended to modify temperature and top_p for the same completions request as the - interaction of these two settings is difficult to predict. Default value is None. + interaction of these two settings is difficult to predict. + Supported range is [0, 1]. + Default value is None. :paramtype temperature: float :keyword top_p: An alternative to sampling with temperature called nucleus sampling. This value causes the From 27f2ed6c1fd2725ce782c6012bb6afef4efb174e Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 3 Jun 2024 13:19:44 -0700 Subject: [PATCH 099/112] Remove unneeded cast --- .../azure-ai-inference/azure/ai/inference/models/_patch.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index 77cbdfd60673..203a5360d48b 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -117,8 +117,7 @@ def _read_next_block(self) -> bool: if self._ENABLE_CLASS_LOGS: logger.debug("[Reading next block]") try: - # Use 'cast' to make 'pyright' error go away - element = cast(Iterator[bytes], self._bytes_iterator).__next__() + element = self._bytes_iterator.__next__() except StopIteration: self.close() return True @@ -160,8 +159,7 @@ async def _read_next_block_async(self) -> bool: if self._ENABLE_CLASS_LOGS: logger.debug("[Reading next block]") try: - # Use 'cast' to make 'pyright' error go away - element = await cast(AsyncIterator[bytes], self._bytes_iterator).__anext__() + element = await self._bytes_iterator.__anext__() except StopAsyncIteration: await self.aclose() return True From 2c92e9dd8e82b769d1d0c8f0e9ba6c100913d6ca Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 3 Jun 2024 14:26:50 -0700 Subject: [PATCH 100/112] Fix pylint error --- sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index 203a5360d48b..c9186e266444 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -12,7 +12,7 @@ import queue import re -from typing import List, AsyncIterator, Iterator, cast +from typing import List, AsyncIterator, Iterator from azure.core.rest import HttpResponse, AsyncHttpResponse from .. import models as _models From 98cd4fd1b0b91599b89f3ab5c511a2986c636dfe Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 4 Jun 2024 05:55:28 -0700 Subject: [PATCH 101/112] Fix typos & method names. Thanks Jarno! --- sdk/ai/azure-ai-inference/CHANGELOG.md | 2 +- sdk/ai/azure-ai-inference/README.md | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/sdk/ai/azure-ai-inference/CHANGELOG.md b/sdk/ai/azure-ai-inference/CHANGELOG.md index c2206f094797..402c31bca81e 100644 --- a/sdk/ai/azure-ai-inference/CHANGELOG.md +++ b/sdk/ai/azure-ai-inference/CHANGELOG.md @@ -1,5 +1,5 @@ # Release History -## 1.0.0b1 (2024-06-07) +## 1.0.0b1 (2024-06-11) - Initial beta version diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index a842fb2f3917..b185e58c4cfc 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -1,7 +1,7 @@ # Azure AI Inference client library for Python The client Library (in preview) allows you to generate predictions from foundational models deployed to Azure AI Studio and Azure Machine Learning. It supports -Serverless API endpoints and Managed Compute Endpoints (formerly known as Managed Online Endpoints). The client library makes services calls using REST AP version `2024-05-01-preview`, as documented in [Azure AI Model Inference API](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-api). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). +Serverless API endpoints and Managed Compute Endpoints (formerly known as Managed Online Endpoints). The client library makes services calls using REST API version `2024-05-01-preview`, as documented in [Azure AI Model Inference API](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-api). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). Use the model inference client library to: @@ -206,7 +206,7 @@ print(response.choices[0].message.content) The following types or messages are supported: `SystemMessage`,`UserMessage`, `AssistantMessage`, `ToolMessage` (See sample [sample_chat_completions_with_tools.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py) for usage of `ToolMessage`). -Alternativley you can provide the messages as dictionary instead of using the strongly typed classes like `SystemMessage` and `UserMessage`: +Alternatively, you can provide the messages as dictionary instead of using the strongly typed classes like `SystemMessage` and `UserMessage`: @@ -234,7 +234,7 @@ response = client.complete( -To generate completions for additional messages, simply call `client.create` multiple times using the same `client`. +To generate completions for additional messages, simply call `client.complete` multiple times using the same `client`. ### Streaming chat completions example @@ -336,7 +336,7 @@ data[1]: length=1024, [0.036590576, -0.0059547424, ..., 0.011405945, 0.004863739 data[2]: length=1024, [0.04196167, 0.029083252, ..., -0.0027484894, 0.0073127747] ``` -To generate embeddings for additional phrases, simply call `client.create` multiple times using the same `client`. +To generate embeddings for additional phrases, simply call `client.embed` multiple times using the same `client`. ## Troubleshooting ### Exceptions -The `create`, `embed` and `get_model_info` methods on the clients raise an [HttpResponseError](https://learn.microsoft.com/python/api/azure-core/azure.core.exceptions.httpresponseerror) exception for a non-success HTTP status code response from the service. The exception's `status_code` will be the HTTP response status code. The exception's `error.message` contains a detailed message that will allow you to diagnose the issue: +The `complete`, `embed` and `get_model_info` methods on the clients raise an [HttpResponseError](https://learn.microsoft.com/python/api/azure-core/azure.core.exceptions.httpresponseerror) exception for a non-success HTTP status code response from the service. The exception's `status_code` will be the HTTP response status code. The exception's `error.message` contains a detailed message that will allow you to diagnose the issue: ```python from azure.core.exceptions import HttpResponseError @@ -390,7 +390,7 @@ from azure.core.exceptions import HttpResponseError ... try: - result = client.create( ... ) + result = client.complete( ... ) except HttpResponseError as e: print(f"Status code: {e.status_code} ({e.reason})") ``` From a44df4e1c698dc8c7c30f006d47c853332435cfe Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 5 Jun 2024 13:10:41 -0700 Subject: [PATCH 102/112] Address Johan's code review comments. Thanks Johan! --- sdk/ai/azure-ai-inference/README.md | 12 ++++-- .../azure/ai/inference/_patch.py | 36 ++++++++---------- .../azure/ai/inference/aio/_patch.py | 38 ++++++++----------- .../azure/ai/inference/models/_patch.py | 6 --- sdk/ai/azure-ai-inference/samples/README.md | 2 +- .../sample_chat_completions_async.py | 26 ++++++------- ...chat_completions_from_input_bytes_async.py | 14 +++---- ..._chat_completions_from_input_json_async.py | 38 +++++++++---------- ...sample_chat_completions_streaming_async.py | 33 ++++++++-------- .../async_samples/sample_embeddings_async.py | 25 ++++++------ .../sample_image_embeddings_async.py | 25 ++++++------ .../async_samples/sample_load_client_async.py | 24 ++++++------ .../sample_chat_completions_azure_openai.py | 2 +- ...sample_chat_completions_from_input_json.py | 2 - ...ompletions_streaming_with_entra_id_auth.py | 4 +- .../sample_chat_completions_with_tools.py | 2 +- .../test_model_inference_async_client.py | 9 +++++ .../tests/test_model_inference_client.py | 11 ++++++ 18 files changed, 153 insertions(+), 156 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index b185e58c4cfc..ba60f40cdb6d 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -26,7 +26,7 @@ Note that for inference using OpenAI models hosted on Azure, you should be using * [Python 3.8](https://www.python.org/) or later installed, including [pip](https://pip.pypa.io/en/stable/). * An [Azure subscription](https://azure.microsoft.com/free). * An [AI Model from the catalog](https://ai.azure.com/explore/models) deployed through Azure AI Studio. -* To construct the client library, you will need to pass in the endpoint URL. The endpoint URL has the form `https://your-deployment-name.your-azure-region.inference.ai.azure.com`, where `your-deployment-name` is your unique model deployment name and `your-azure-region` is the Azure region where the model is deployed (e.g. `eastus2`). +* To construct the client library, you will need to pass in the endpoint URL. The endpoint URL has the form `https://your-host-name.your-azure-region.inference.ai.azure.com`, where `your-host-name` is your unique model deployment host name and `your-azure-region` is the Azure region where the model is deployed (e.g. `eastus2`). * Depending on your model deployment and authentication preference, you either need a key to authenticate against the service, or Entra ID credentials. The key is a 32-character string. ### Install the package @@ -82,6 +82,8 @@ client = ChatCompletionsClient( ### Create and authenticate a client directly, using Entra ID +_Note: At the time of this package release, not all deployments support Entra ID authentication. For those who do, follow the instructions below._ + To use an Entra ID token credential, first install the [azure-identity](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity) package: ```python @@ -96,10 +98,12 @@ from azure.identity import DefaultAzureCredential client = ChatCompletionsClient( endpoint=endpoint, - credential=DefaultAzureCredential() + credential=DefaultAzureCredential(exclude_interactive_browser_credential=False) ) ``` -During application development, you would typically set up the environment for authentication using Entra ID by first [Installing the Azure CLI](https://learn.microsoft.com/cli/azure/install-azure-cli), running `az login` in your console window, then entering your credentials in the browser window that was opened. The call to `DefaultAzureCredential()` will then succeed. + +During application development, you would typically set up the environment for authentication using Entra ID by first [Installing the Azure CLI](https://learn.microsoft.com/cli/azure/install-azure-cli), running `az login` in your console window, then entering your credentials in the browser window that was opened. The call to `DefaultAzureCredential()` will then succeed. Setting `exclude_interactive_browser_credential=False` in that call will enable launching a browser window if the user isn't already logged in. + ### Create and authentice clients using `load_client` @@ -439,7 +443,7 @@ handler.setFormatter(formatter) By default logs redact the values of URL query strings, the values of some HTTP request and response headers (including `Authorization` which holds the key or token), and the request and response payloads. To create logs without redaction, set the method argument `logging_enable = True` when you construct the client library, or when you call any of the client's `create` methods. ```python -# Create a Model Client with none redacted log +# Create a chat completions client with none redacted logs client = ChatCompletionsClient( endpoint=endpoint, credential=AzureKeyCredential(key), diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index a34ca1932c6f..8f33014a42ae 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -84,12 +84,11 @@ def load_client( :raises ~azure.core.exceptions.HttpResponseError """ - client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... - model_info = client.get_model_info() - client.close() + with ChatCompletionsClient(endpoint, credential, **kwargs) as client: # Pick any of the clients, it does not matter. + model_info = client.get_model_info() # mypy: disable-attr-defined # pyright: ignore _LOGGER.info("model_info=%s", model_info) - if model_info.model_type in (None, ""): + if not model_info.model_type: raise ValueError( "The AI model information is missing a value for `model type`. Cannot create an appropriate client." ) @@ -97,17 +96,17 @@ def load_client( # TODO: Remove "completions" and "embedding" once Mistral Large and Cohere fixes their model type if model_info.model_type in (_models.ModelType.CHAT, "completion"): chat_completion_client = ChatCompletionsClient(endpoint, credential, **kwargs) - chat_completion_client._model_info = model_info # pylint: disable=protected-access + chat_completion_client._model_info = model_info # pylint: disable=protected-access,attribute-defined-outside-init return chat_completion_client if model_info.model_type in (_models.ModelType.EMBEDDINGS, "embedding"): embedding_client = EmbeddingsClient(endpoint, credential, **kwargs) - embedding_client._model_info = model_info # pylint: disable=protected-access + embedding_client._model_info = model_info # pylint: disable=protected-access,attribute-defined-outside-init return embedding_client if model_info.model_type == _models.ModelType.IMAGE_EMBEDDINGS: image_embedding_client = ImageEmbeddingsClient(endpoint, credential, **kwargs) - image_embedding_client._model_info = model_info # pylint: disable=protected-access + image_embedding_client._model_info = model_info # pylint: disable=protected-access,attribute-defined-outside-init return image_embedding_client raise ValueError(f"No client available to support AI model type `{model_info.model_type}`") @@ -457,8 +456,6 @@ def complete( return _deserialize(_models._models.ChatCompletions, response.json()) # pylint: disable=protected-access - # Cache here the results of get_model_info call - _model_info: Optional[_models.ModelInfo] = None @distributed_trace def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: @@ -469,13 +466,13 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError """ - if self._model_info is None: - self._model_info = self._get_model_info(**kwargs) + if not hasattr(self, "_model_info"): + self._model_info = self._get_model_info(**kwargs) # pylint: disable=attribute-defined-outside-init return self._model_info def __str__(self) -> str: # pylint: disable=client-method-name-no-double-underscore - return super().__str__() + f"\n{self._model_info}" + return super().__str__() + f"\n{self._model_info}" if hasattr(self, "_model_info") else super().__str__() class EmbeddingsClient(EmbeddingsClientGenerated): @@ -689,8 +686,6 @@ def embed( return deserialized # type: ignore - # Cache here the results of get_model_info call - _model_info: Optional[_models.ModelInfo] = None @distributed_trace def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: @@ -701,13 +696,13 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError """ - if self._model_info is None: - self._model_info = self._get_model_info(**kwargs) + if not hasattr(self, "_model_info"): + self._model_info = self._get_model_info(**kwargs) # pylint: disable=attribute-defined-outside-init return self._model_info def __str__(self) -> str: # pylint: disable=client-method-name-no-double-underscore - return super().__str__() + f"\n{self._model_info}" + return super().__str__() + f"\n{self._model_info}" if hasattr(self, "_model_info") else super().__str__() class ImageEmbeddingsClient(ImageEmbeddingsClientGenerated): @@ -921,7 +916,6 @@ def embed( return deserialized # type: ignore - _model_info: Optional[_models.ModelInfo] = None @distributed_trace def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: @@ -932,13 +926,13 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError """ - if self._model_info is None: - self._model_info = self._get_model_info(**kwargs) + if not hasattr(self, "_model_info"): + self._model_info = self._get_model_info(**kwargs) # pylint: disable=attribute-defined-outside-init return self._model_info def __str__(self) -> str: # pylint: disable=client-method-name-no-double-underscore - return super().__str__() + f"\n{self._model_info}" + return super().__str__() + f"\n{self._model_info}" if hasattr(self, "_model_info") else super().__str__() __all__: List[str] = [ diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index 0ea5eede4fe4..af3b0f8bfbd6 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -52,6 +52,7 @@ async def load_client( endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any ) -> Union[ChatCompletionsClientGenerated, EmbeddingsClientGenerated, ImageEmbeddingsClientGenerated]: + # pylint: disable=line-too-long """ Load a client from a given endpoint URL. The method makes a REST API call to the `/info` route on the given endpoint, to determine the model type and therefore which client to instantiate. @@ -72,12 +73,11 @@ async def load_client( :raises ~azure.core.exceptions.HttpResponseError """ - client = ChatCompletionsClient(endpoint, credential, **kwargs) # Pick any of the clients, it does not matter... - model_info = await client.get_model_info() - await client.close() + async with ChatCompletionsClient(endpoint, credential, **kwargs) as client: # Pick any of the clients, it does not matter. + model_info = await client.get_model_info() # mypy: disable-attr-defined # pyright: ignore _LOGGER.info("model_info=%s", model_info) - if model_info.model_type in (None, ""): + if not model_info.model_type: raise ValueError( "The AI model information is missing a value for `model type`. Cannot create an appropriate client." ) @@ -85,17 +85,17 @@ async def load_client( # TODO: Remove "completions" and "embedding" once Mistral Large and Cohere fixes their model type if model_info.model_type in (_models.ModelType.CHAT, "completion"): chat_completion_client = ChatCompletionsClient(endpoint, credential, **kwargs) - chat_completion_client._model_info = model_info # pylint: disable=protected-access + chat_completion_client._model_info = model_info # pylint: disable=protected-access,attribute-defined-outside-init return chat_completion_client if model_info.model_type in (_models.ModelType.EMBEDDINGS, "embedding"): embedding_client = EmbeddingsClient(endpoint, credential, **kwargs) - embedding_client._model_info = model_info # pylint: disable=protected-access + embedding_client._model_info = model_info # pylint: disable=protected-access,attribute-defined-outside-init return embedding_client if model_info.model_type == _models.ModelType.IMAGE_EMBEDDINGS: image_embedding_client = ImageEmbeddingsClient(endpoint, credential, **kwargs) - image_embedding_client._model_info = model_info # pylint: disable=protected-access + image_embedding_client._model_info = model_info # pylint: disable=protected-access,attribute-defined-outside-init return image_embedding_client raise ValueError(f"No client available to support AI model type `{model_info.model_type}`") @@ -461,8 +461,6 @@ async def complete( return _deserialize(_models.ChatCompletions, response.json()) # pylint: disable=protected-access - # Cache here the results of get_model_info call - _model_info: Optional[_models.ModelInfo] = None @distributed_trace_async async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: @@ -473,13 +471,13 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError """ - if self._model_info is None: - self._model_info = await self._get_model_info(**kwargs) + if not hasattr(self, "_model_info"): + self._model_info = await self._get_model_info(**kwargs) # pylint: disable=attribute-defined-outside-init return self._model_info def __str__(self) -> str: # pylint: disable=client-method-name-no-double-underscore - return super().__str__() + f"\n{self._model_info}" + return super().__str__() + f"\n{self._model_info}" if hasattr(self, "_model_info") else super().__str__() class EmbeddingsClient(EmbeddingsClientGenerated): @@ -693,8 +691,6 @@ async def embed( return deserialized # type: ignore - # Cache here the results of get_model_info call - _model_info: Optional[_models.ModelInfo] = None @distributed_trace_async async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: @@ -705,13 +701,13 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError """ - if self._model_info is None: - self._model_info = await self._get_model_info(**kwargs) + if not hasattr(self, "_model_info"): + self._model_info = await self._get_model_info(**kwargs) # pylint: disable=attribute-defined-outside-init return self._model_info def __str__(self) -> str: # pylint: disable=client-method-name-no-double-underscore - return super().__str__() + f"\n{self._model_info}" + return super().__str__() + f"\n{self._model_info}" if hasattr(self, "_model_info") else super().__str__() class ImageEmbeddingsClient(ImageEmbeddingsClientGenerated): @@ -925,8 +921,6 @@ async def embed( return deserialized # type: ignore - # Cache here the results of get_model_info call - _model_info: Optional[_models.ModelInfo] = None @distributed_trace_async async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: @@ -937,13 +931,13 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError """ - if self._model_info is None: - self._model_info = await self._get_model_info(**kwargs) + if not hasattr(self, "_model_info"): + self._model_info = await self._get_model_info(**kwargs) # pylint: disable=attribute-defined-outside-init return self._model_info def __str__(self) -> str: # pylint: disable=client-method-name-no-double-underscore - return super().__str__() + f"\n{self._model_info}" + return super().__str__() + f"\n{self._model_info}" if hasattr(self, "_model_info") else super().__str__() __all__: List[str] = [ diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py index c9186e266444..bbeda9d47f64 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_patch.py @@ -123,9 +123,6 @@ def _read_next_block(self) -> bool: return True return self._deserialize_and_add_to_queue(element) - def __enter__(self): - return self - def __exit__(self, exc_type, exc_val, exc_tb) -> None: self.close() @@ -165,9 +162,6 @@ async def _read_next_block_async(self) -> bool: return True return self._deserialize_and_add_to_queue(element) - def __enter__(self): - return self - def __exit__(self, exc_type, exc_val, exc_tb) -> None: asyncio.run(self.aclose()) diff --git a/sdk/ai/azure-ai-inference/samples/README.md b/sdk/ai/azure-ai-inference/samples/README.md index 33cc3fe7056c..72685f10d1a1 100644 --- a/sdk/ai/azure-ai-inference/samples/README.md +++ b/sdk/ai/azure-ai-inference/samples/README.md @@ -78,7 +78,7 @@ See [Prerequisites](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ * Clone or download this sample repository * Open a command prompt / terminal window in this samples folder -* Install the Image Analysis client library for Python with pip: +* Install the client library for Python with pip: ```bash pip install azure-ai-inference ``` diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py index d7637a030a65..0c310563d24d 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py @@ -17,7 +17,7 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ -# mypy: disable-error-code="union-attr" +# mypy: disable-error-code="union-attr,attr-defined" # pyright: reportAttributeAccessIssue=false import asyncio @@ -38,21 +38,19 @@ async def sample_chat_completions_async(): print("Set them before running this sample.") exit() - # Create a Model Client for synchronous operations - client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + # Create a chat completion client for synchronous operations + async with ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) as client: - # Do a single chat completion operation - response = await client.complete( - messages=[ - SystemMessage(content="You are a helpful assistant."), - UserMessage(content="How many feet are in a mile?"), - ] - ) + # Do a single chat completion operation + response = await client.complete( + messages=[ + SystemMessage(content="You are a helpful assistant."), + UserMessage(content="How many feet are in a mile?"), + ] + ) - # Print response the the console - print(response.choices[0].message.content) - - await client.close() + # Print response the the console + print(response.choices[0].message.content) async def main(): diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py index 6aaeb654ebae..3d71e92d62f9 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py @@ -18,7 +18,7 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ -# mypy: disable-error-code="union-attr" +# mypy: disable-error-code="union-attr,attr-defined" # pyright: reportAttributeAccessIssue=false import asyncio @@ -39,15 +39,13 @@ async def sample_chat_completions_from_input_bytes_async(): from azure.ai.inference.aio import ChatCompletionsClient from azure.core.credentials import AzureKeyCredential - client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + async with ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) as client: - # Make a chat completion call, by directly providing the - # HTTP request body as IO[bytes], containing chat messages. - response = await client.complete(read_text_file("example_chat.json")) + # Make a chat completion call, by directly providing the + # HTTP request body as IO[bytes], containing chat messages. + response = await client.complete(read_text_file("example_chat.json")) - print(response.choices[0].message.content) - - await client.close() + print(response.choices[0].message.content) def read_text_file(file_name: str) -> io.BytesIO: diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py index cb3e6bf40de7..ccd9b43aaff8 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py @@ -18,7 +18,7 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ -# mypy: disable-error-code="union-attr" +# mypy: disable-error-code="union-attr,attr-defined" # pyright: reportAttributeAccessIssue=false import asyncio @@ -37,28 +37,26 @@ async def sample_chat_completions_from_input_json_async(): print("Set them before running this sample.") exit() - client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) + async with ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) as client: - request_body = { - "messages": [ - { - "role": "system", - "content": "You are an AI assistant that helps people find information. Your replies are short, no more than two sentences.", - }, - {"role": "user", "content": "What year was construction of the International Space Station mostly done?"}, - { - "role": "assistant", - "content": "The main construction of the International Space Station (ISS) was completed between 1998 and 2011. During this period, more than 30 flights by US space shuttles and 40 by Russian rockets were conducted to transport components and modules to the station.", - }, - {"role": "user", "content": "And what was the estimated cost to build it?"}, - ] - } + request_body = { + "messages": [ + { + "role": "system", + "content": "You are an AI assistant that helps people find information. Your replies are short, no more than two sentences.", + }, + {"role": "user", "content": "What year was construction of the International Space Station mostly done?"}, + { + "role": "assistant", + "content": "The main construction of the International Space Station (ISS) was completed between 1998 and 2011. During this period, more than 30 flights by US space shuttles and 40 by Russian rockets were conducted to transport components and modules to the station.", + }, + {"role": "user", "content": "And what was the estimated cost to build it?"}, + ] + } - response = await client.complete(request_body) + response = await client.complete(request_body) - print(response.choices[0].message.content) - - await client.close() + print(response.choices[0].message.content) async def main(): diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py index b866724baa93..dd646eaa60e9 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py @@ -17,7 +17,7 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ -# mypy: disable-error-code="union-attr" +# mypy: disable-error-code="union-attr,attr-defined" # pyright: reportAttributeAccessIssue=false, reportGeneralTypeIssues=false import asyncio @@ -39,23 +39,20 @@ async def sample_chat_completions_streaming_async(): exit() # Create chat completions client for synchronous operations - client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - - # Do a single streaming chat completion operation. Start the operation and get a Future object. - response = await client.complete( - stream=True, - messages=[ - SystemMessage(content="You are a helpful assistant."), - UserMessage(content="Give me 5 good reasons why I should exercise every day."), - ], - ) - - # Iterate on the response to get chat completion updates, as they arrive from the service - async for update in response: - print(update.choices[0].delta.content or "", end="") - - # Remember to always close the asynchronous client when you are done with it - await client.close() + async with ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) as client: + + # Do a single streaming chat completion operation. Start the operation and get a Future object. + response = await client.complete( + stream=True, + messages=[ + SystemMessage(content="You are a helpful assistant."), + UserMessage(content="Give me 5 good reasons why I should exercise every day."), + ], + ) + + # Iterate on the response to get chat completion updates, as they arrive from the service + async for update in response: + print(update.choices[0].delta.content or "", end="") async def main(): diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py index 561f66c4d22e..d094da4e5e72 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py @@ -16,6 +16,9 @@ `your-azure-region` is the Azure region where your model is deployed. 2) EMBEDDINGS_KEY - Your model key (a 32-character string). Keep it secret. """ +# mypy: disable-error-code="attr-defined" +# pyright: reportAttributeAccessIssue=false + import asyncio @@ -33,20 +36,18 @@ async def sample_embeddings_async(): print("Set them before running this sample.") exit() - # Create an Image Analysis client for synchronous operations - client = EmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - - # Do a single embeddings operation. Start the operation and get a Future object. - response = await client.embed(input=["first phrase", "second phrase", "third phrase"]) + # Create a text embeddings client for synchronous operations + async with EmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) as client: - print("Embeddings response:") - for item in response.data: - length = len(item.embedding) - print( - f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, ..., {item.embedding[length-2]}, {item.embedding[length-1]}]" - ) + # Do a single embeddings operation. Start the operation and get a Future object. + response = await client.embed(input=["first phrase", "second phrase", "third phrase"]) - await client.close() + print("Embeddings response:") + for item in response.data: + length = len(item.embedding) + print( + f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, ..., {item.embedding[length-2]}, {item.embedding[length-1]}]" + ) async def main(): diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py index 0a948480c56a..c2b53062ef87 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py @@ -17,6 +17,9 @@ `your-azure-region` is the Azure region where your model is deployed. 2) IMAGE_EMBEDDINGS_KEY - Your model key (a 32-character string). Keep it secret. """ +# mypy: disable-error-code="attr-defined" +# pyright: reportAttributeAccessIssue=false + import asyncio @@ -41,20 +44,18 @@ async def sample_image_embeddings_async(): with open("sample2.png", "rb") as f: image2: str = base64.b64encode(f.read()).decode("utf-8") - client = ImageEmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) - - # Do a single image embeddings operation. Start the operation and get a Future object. - response = await client.embed(input=[EmbeddingInput(image=image1), EmbeddingInput(image=image2)]) + async with ImageEmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(key)) as client: - print("Embeddings response:") - for item in response.data: - length = len(item.embedding) - print( - f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " - f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" - ) + # Do a single image embeddings operation. Start the operation and get a Future object. + response = await client.embed(input=[EmbeddingInput(image=image1), EmbeddingInput(image=image2)]) - await client.close() + print("Embeddings response:") + for item in response.data: + length = len(item.embedding) + print( + f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " + f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" + ) async def main(): diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py index 7820a1711cff..7006dbef78e0 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_load_client_async.py @@ -35,22 +35,20 @@ async def sample_load_client_async(): from azure.ai.inference.aio import load_client, EmbeddingsClient from azure.core.credentials import AzureKeyCredential - client = await load_client(endpoint=endpoint, credential=AzureKeyCredential(key)) + async with await load_client(endpoint=endpoint, credential=AzureKeyCredential(key)) as client: - # This should create a client of type `EmbeddingsClient` - print(f"Created client of type `{type(client).__name__}`.") + # This should create a client of type `EmbeddingsClient` + print(f"Created client of type `{type(client).__name__}`.") - if isinstance(client, EmbeddingsClient): - response = await client.embed(input=["first phrase", "second phrase", "third phrase"]) + if isinstance(client, EmbeddingsClient): + response = await client.embed(input=["first phrase", "second phrase", "third phrase"]) - print("Embeddings response:") - for item in response.data: - length = len(item.embedding) - print( - f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, ..., {item.embedding[length-2]}, {item.embedding[length-1]}]" - ) - - await client.close() + print("Embeddings response:") + for item in response.data: + length = len(item.embedding) + print( + f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, ..., {item.embedding[length-2]}, {item.embedding[length-1]}]" + ) async def main(): diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py index b3318ea725c4..0f31be308610 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py @@ -72,7 +72,7 @@ def sample_chat_completions_azure_openai(): client = ChatCompletionsClient( endpoint=endpoint, - credential=DefaultAzureCredential(), + credential=DefaultAzureCredential(exclude_interactive_browser_credential=False), credential_scopes=["https://cognitiveservices.azure.com/.default"], api_version="2024-02-15-preview", # AOAI api-version. Update as needed. ) diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py index 5e93135c7b09..f3495f7d4904 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_json.py @@ -61,8 +61,6 @@ def sample_chat_completions_from_input_json(): print(response.choices[0].message.content) - client.close() - if __name__ == "__main__": sample_chat_completions_from_input_json() diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py index 01ee44ef1cd2..aac9a6e290d4 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_streaming_with_entra_id_auth.py @@ -49,7 +49,9 @@ def sample_chat_completions_streaming_with_entra_id_auth(): # https://learn.microsoft.com/python/api/overview/azure/identity-readme#defaultazurecredential client = ChatCompletionsClient( - endpoint=endpoint, credential=DefaultAzureCredential(), headers={"azureml-model-deployment": model_deployment} + endpoint=endpoint, + credential=DefaultAzureCredential(exclude_interactive_browser_credential=False), + headers={"azureml-model-deployment": model_deployment} ) response = client.complete( diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py index 9359a87f7c28..866ef6537d2a 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py @@ -70,7 +70,7 @@ def get_flight_info(origin_city: str, destination_city: str): flight_info = ChatCompletionsFunctionToolDefinition( function=FunctionDefinition( name="get_flight_info", - description="Returns information about the next flight between two cities. This inclues the name of the airline, flight number and the date and time of the next flight", + description="Returns information about the next flight between two cities. This includes the name of the airline, flight number and the date and time of the next flight", parameters={ "type": "object", "properties": { diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py index 32d777686d7e..b1334a49bb2b 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -26,6 +26,8 @@ async def test_async_load_embeddings_client(self, **kwargs): client = await self._load_async_embeddings_client(**kwargs) assert isinstance(client, async_sdk.EmbeddingsClient) + assert hasattr(client, "_model_info") + response1 = await client.get_model_info() self._print_model_info_result(response1) self._validate_model_info_result(response1, "embedding") # TODO: This should be ModelType.EMBEDDINGS once the model is fixed @@ -36,7 +38,10 @@ async def test_async_load_embeddings_client(self, **kwargs): @recorded_by_proxy_async async def test_async_get_model_info_on_embeddings_client(self, **kwargs): client = self._create_async_embeddings_client(**kwargs) + assert not hasattr(client, "_model_info") + response1 = await client.get_model_info() + assert hasattr(client, "_model_info") self._print_model_info_result(response1) self._validate_model_info_result( response1, "embedding" @@ -71,6 +76,8 @@ async def test_async_load_chat_completions_client(self, **kwargs): client = await self._load_async_chat_client(**kwargs) assert isinstance(client, async_sdk.ChatCompletionsClient) + assert hasattr(client, "_model_info") + response1 = await client.get_model_info() self._print_model_info_result(response1) self._validate_model_info_result( @@ -82,7 +89,9 @@ async def test_async_load_chat_completions_client(self, **kwargs): @recorded_by_proxy_async async def test_async_get_model_info_on_chat_client(self, **kwargs): client = self._create_async_chat_client(**kwargs) + assert not hasattr(client, "_model_info") response1 = await client.get_model_info() + assert hasattr(client, "_model_info") self._print_model_info_result(response1) self._validate_model_info_result( response1, "completion" diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py index 7d3aaf9237ed..41c03a826085 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -25,6 +25,7 @@ def test_load_embeddings_client(self, **kwargs): client = self._load_embeddings_client(**kwargs) assert isinstance(client, sdk.EmbeddingsClient) + assert hasattr(client, "_model_info") response1 = client.get_model_info() self._print_model_info_result(response1) self._validate_model_info_result( @@ -37,7 +38,11 @@ def test_load_embeddings_client(self, **kwargs): def test_get_model_info_on_embeddings_client(self, **kwargs): client = self._create_embeddings_client(**kwargs) + assert not hasattr(client, "_model_info") + response1 = client.get_model_info() + assert hasattr(client, "_model_info") + self._print_model_info_result(response1) self._validate_model_info_result( response1, "embedding" @@ -72,6 +77,8 @@ def test_load_chat_completions_client(self, **kwargs): client = self._load_chat_client(**kwargs) assert isinstance(client, sdk.ChatCompletionsClient) + assert hasattr(client, "_model_info") + response1 = client.get_model_info() self._print_model_info_result(response1) self._validate_model_info_result( @@ -84,7 +91,11 @@ def test_load_chat_completions_client(self, **kwargs): def test_get_model_info_on_chat_client(self, **kwargs): client = self._create_chat_client(**kwargs) + assert not hasattr(client, "_model_info") + response1 = client.get_model_info() + assert hasattr(client, "_model_info") + self._print_model_info_result(response1) self._validate_model_info_result( response1, "completion" From c0958e3dc4a0d94d1c40fb18632bd4e5da562235 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 5 Jun 2024 14:17:31 -0700 Subject: [PATCH 103/112] Fix mypy errors --- sdk/ai/azure-ai-inference/README.md | 4 ++-- sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py | 2 +- sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index ba60f40cdb6d..e4709bb620bf 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -443,7 +443,7 @@ handler.setFormatter(formatter) By default logs redact the values of URL query strings, the values of some HTTP request and response headers (including `Authorization` which holds the key or token), and the request and response payloads. To create logs without redaction, set the method argument `logging_enable = True` when you construct the client library, or when you call any of the client's `create` methods. ```python -# Create a chat completions client with none redacted logs +# Create a chat completions client with non redacted logs client = ChatCompletionsClient( endpoint=endpoint, credential=AzureKeyCredential(key), @@ -451,7 +451,7 @@ client = ChatCompletionsClient( ) ``` -None redacted logs are generated for log level `logging.DEBUG` only. Be sure to protect none redacted logs to avoid compromising security. For more information see [Configure logging in the Azure libraries for Python](https://aka.ms/azsdk/python/logging) +None redacted logs are generated for log level `logging.DEBUG` only. Be sure to protect non redacted logs to avoid compromising security. For more information see [Configure logging in the Azure libraries for Python](https://aka.ms/azsdk/python/logging) ## Next steps diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index 8f33014a42ae..51c436e2a8f6 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -85,7 +85,7 @@ def load_client( """ with ChatCompletionsClient(endpoint, credential, **kwargs) as client: # Pick any of the clients, it does not matter. - model_info = client.get_model_info() # mypy: disable-attr-defined # pyright: ignore + model_info = client.get_model_info() # type: ignore _LOGGER.info("model_info=%s", model_info) if not model_info.model_type: diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index af3b0f8bfbd6..fa4f996f0224 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -74,7 +74,7 @@ async def load_client( """ async with ChatCompletionsClient(endpoint, credential, **kwargs) as client: # Pick any of the clients, it does not matter. - model_info = await client.get_model_info() # mypy: disable-attr-defined # pyright: ignore + model_info = await client.get_model_info() # type: ignore _LOGGER.info("model_info=%s", model_info) if not model_info.model_type: From 2cbb1cf95172a4d12f5cceecfb250b20a6d0d3b6 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 5 Jun 2024 15:24:43 -0700 Subject: [PATCH 104/112] Minor update to root README.md --- sdk/ai/azure-ai-inference/README.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index e4709bb620bf..fa37ad708db6 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -1,7 +1,6 @@ # Azure AI Inference client library for Python -The client Library (in preview) allows you to generate predictions from foundational models deployed to Azure AI Studio and Azure Machine Learning. It supports -Serverless API endpoints and Managed Compute Endpoints (formerly known as Managed Online Endpoints). The client library makes services calls using REST API version `2024-05-01-preview`, as documented in [Azure AI Model Inference API](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-api). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). +The client Library (in preview) allows you to generate predictions from foundational models deployed to Azure AI Studio and Azure Machine Learning Studio. It supports Serverless API endpoints and Managed Compute Endpoints (formerly known as Managed Online Endpoints). The client library makes services calls using REST API version `2024-05-01-preview`, as documented in [Azure AI Model Inference API](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-api). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). Use the model inference client library to: @@ -104,7 +103,6 @@ client = ChatCompletionsClient( During application development, you would typically set up the environment for authentication using Entra ID by first [Installing the Azure CLI](https://learn.microsoft.com/cli/azure/install-azure-cli), running `az login` in your console window, then entering your credentials in the browser window that was opened. The call to `DefaultAzureCredential()` will then succeed. Setting `exclude_interactive_browser_credential=False` in that call will enable launching a browser window if the user isn't already logged in. - ### Create and authentice clients using `load_client` As an alternative to creating a specific client directly, you can use the function `load_client` to return the relevant client (of types `ChatCompletionsClient` or `EmbeddingsClient`) based on the provided endpoint: From 73c836e173ea4f75c8180bb377e4d103a8f7175a Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 5 Jun 2024 17:06:59 -0700 Subject: [PATCH 105/112] Remove capacity_type --- .../azure/ai/inference/models/__init__.py | 2 -- .../azure/ai/inference/models/_enums.py | 9 --------- .../azure/ai/inference/models/_models.py | 14 -------------- 3 files changed, 25 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py index a89a3d78800e..8a7c4bbbb7b3 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/__init__.py @@ -32,7 +32,6 @@ from ._models import ToolMessage from ._models import UserMessage -from ._enums import CapacityType from ._enums import ChatCompletionsResponseFormat from ._enums import ChatCompletionsToolSelectionPreset from ._enums import ChatRole @@ -73,7 +72,6 @@ "SystemMessage", "ToolMessage", "UserMessage", - "CapacityType", "ChatCompletionsResponseFormat", "ChatCompletionsToolSelectionPreset", "ChatRole", diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py index 59baa8783ca5..0d191c4d176d 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_enums.py @@ -10,15 +10,6 @@ from azure.core import CaseInsensitiveEnumMeta -class CapacityType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Whether your capacity has been affected by the usage amount (token count) reported here.""" - - USAGE = "usage" - """Your capacity has been affected by the usage amount (token count) reported here.""" - FIXED = "fixed" - """Your capacity has not been affected by the usage amount (token count) reported here.""" - - class ChatCompletionsResponseFormat(str, Enum, metaclass=CaseInsensitiveEnumMeta): """An representation of a response format configuration usable by Chat Completions. Can be used to enable JSON diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py index b9904106583f..66fa73e9173b 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/models/_models.py @@ -522,9 +522,6 @@ class CompletionsUsage(_model_base.Model): All required parameters must be populated in order to send to server. - :ivar capacity_type: Indicates whether your capacity has been affected by the usage amount - (token count) reported here. Required. Known values are: "usage" and "fixed". - :vartype capacity_type: str or ~azure.ai.inference.models.CapacityType :ivar completion_tokens: The number of tokens generated across all completions emissions. Required. :vartype completion_tokens: int @@ -536,9 +533,6 @@ class CompletionsUsage(_model_base.Model): :vartype total_tokens: int """ - capacity_type: Union[str, "_models.CapacityType"] = rest_field() - """Indicates whether your capacity has been affected by the usage amount (token count) reported - here. Required. Known values are: \"usage\" and \"fixed\".""" completion_tokens: int = rest_field() """The number of tokens generated across all completions emissions. Required.""" prompt_tokens: int = rest_field() @@ -550,7 +544,6 @@ class CompletionsUsage(_model_base.Model): def __init__( self, *, - capacity_type: Union[str, "_models.CapacityType"], completion_tokens: int, prompt_tokens: int, total_tokens: int, @@ -695,9 +688,6 @@ class EmbeddingsUsage(_model_base.Model): All required parameters must be populated in order to send to server. - :ivar capacity_type: Indicates whether your capacity has been affected by the usage amount - (token count) reported here. Required. Known values are: "usage" and "fixed". - :vartype capacity_type: str or ~azure.ai.inference.models.CapacityType :ivar input_tokens: Number of tokens in the request prompt. Required. :vartype input_tokens: int :ivar prompt_tokens: Number of tokens used for the prompt sent to the AI model. Typically @@ -709,9 +699,6 @@ class EmbeddingsUsage(_model_base.Model): :vartype total_tokens: int """ - capacity_type: Union[str, "_models.CapacityType"] = rest_field() - """Indicates whether your capacity has been affected by the usage amount (token count) reported - here. Required. Known values are: \"usage\" and \"fixed\".""" input_tokens: int = rest_field() """Number of tokens in the request prompt. Required.""" prompt_tokens: int = rest_field() @@ -726,7 +713,6 @@ class EmbeddingsUsage(_model_base.Model): def __init__( self, *, - capacity_type: Union[str, "_models.CapacityType"], input_tokens: int, prompt_tokens: int, total_tokens: int, From 0883400feacdd2dbca13c208a45b0bda41b63b8b Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 6 Jun 2024 07:07:38 -0700 Subject: [PATCH 106/112] Fix public patched methods not showing up in intellisense, when using async client with context manager. Thanks Johan! --- .../azure/ai/inference/_patch.py | 30 +++++++++++++++++++ .../azure/ai/inference/aio/_patch.py | 25 ++++++++++++++++ .../sample_chat_completions_async.py | 2 +- ...chat_completions_from_input_bytes_async.py | 2 +- ..._chat_completions_from_input_json_async.py | 2 +- ...sample_chat_completions_streaming_async.py | 2 +- .../async_samples/sample_embeddings_async.py | 1 - .../sample_image_embeddings_async.py | 1 - 8 files changed, 59 insertions(+), 6 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index 51c436e2a8f6..5b91dbbf2723 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -12,6 +12,10 @@ 2. Add support for function load_client 3. Add support for get_model_info, while caching the result (all clients) 4. Add support for chat completion streaming (ChatCompletionsClient client only) +5. __enter__ (and __aenter__) method had to be overridden due to https://github.com/Azure/autorest.python/issues/2619 (all clients). + Otherwise intellisense did not show the patched public methods on the client object, when the client is defined using + context manager ("with" statement). + """ import json import logging @@ -19,6 +23,8 @@ from io import IOBase from typing import Any, Dict, Union, IO, List, Optional, overload, Type, TYPE_CHECKING +from typing_extensions import Self + from azure.core.pipeline import PipelineResponse from azure.core.credentials import AzureKeyCredential from azure.core.tracing.decorator import distributed_trace @@ -470,11 +476,19 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: self._model_info = self._get_model_info(**kwargs) # pylint: disable=attribute-defined-outside-init return self._model_info + def __str__(self) -> str: # pylint: disable=client-method-name-no-double-underscore return super().__str__() + f"\n{self._model_info}" if hasattr(self, "_model_info") else super().__str__() + # Remove this once https://github.com/Azure/autorest.python/issues/2619 is fixed, + # and you see the equivalent auto-generated method in _client.py return "Self" + def __enter__(self) -> Self: + self._client.__enter__() + return self + + class EmbeddingsClient(EmbeddingsClientGenerated): @overload @@ -700,11 +714,19 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: self._model_info = self._get_model_info(**kwargs) # pylint: disable=attribute-defined-outside-init return self._model_info + def __str__(self) -> str: # pylint: disable=client-method-name-no-double-underscore return super().__str__() + f"\n{self._model_info}" if hasattr(self, "_model_info") else super().__str__() + # Remove this once https://github.com/Azure/autorest.python/issues/2619 is fixed, + # and you see the equivalent auto-generated method in _client.py return "Self" + def __enter__(self) -> Self: + self._client.__enter__() + return self + + class ImageEmbeddingsClient(ImageEmbeddingsClientGenerated): @overload @@ -930,11 +952,19 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: self._model_info = self._get_model_info(**kwargs) # pylint: disable=attribute-defined-outside-init return self._model_info + def __str__(self) -> str: # pylint: disable=client-method-name-no-double-underscore return super().__str__() + f"\n{self._model_info}" if hasattr(self, "_model_info") else super().__str__() + # Remove this once https://github.com/Azure/autorest.python/issues/2619 is fixed, + # and you see the equivalent auto-generated method in _client.py return "Self" + def __enter__(self) -> Self: + self._client.__enter__() + return self + + __all__: List[str] = [ "load_client", "ChatCompletionsClient", diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index fa4f996f0224..c76ea7e2198f 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -13,6 +13,8 @@ from io import IOBase from typing import Any, Dict, Union, IO, List, Optional, overload, Type, TYPE_CHECKING +from typing_extensions import Self + from azure.core.pipeline import PipelineResponse from azure.core.credentials import AzureKeyCredential from azure.core.tracing.decorator_async import distributed_trace_async @@ -475,11 +477,19 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: self._model_info = await self._get_model_info(**kwargs) # pylint: disable=attribute-defined-outside-init return self._model_info + def __str__(self) -> str: # pylint: disable=client-method-name-no-double-underscore return super().__str__() + f"\n{self._model_info}" if hasattr(self, "_model_info") else super().__str__() + # Remove this once https://github.com/Azure/autorest.python/issues/2619 is fixed, + # and you see the equivalent auto-generated method in _client.py return "Self" + async def __aenter__(self) -> Self: + await self._client.__aenter__() + return self + + class EmbeddingsClient(EmbeddingsClientGenerated): @overload @@ -705,11 +715,19 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: self._model_info = await self._get_model_info(**kwargs) # pylint: disable=attribute-defined-outside-init return self._model_info + def __str__(self) -> str: # pylint: disable=client-method-name-no-double-underscore return super().__str__() + f"\n{self._model_info}" if hasattr(self, "_model_info") else super().__str__() + # Remove this once https://github.com/Azure/autorest.python/issues/2619 is fixed, + # and you see the equivalent auto-generated method in _client.py return "Self" + async def __aenter__(self) -> Self: + await self._client.__aenter__() + return self + + class ImageEmbeddingsClient(ImageEmbeddingsClientGenerated): @overload @@ -935,11 +953,18 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: self._model_info = await self._get_model_info(**kwargs) # pylint: disable=attribute-defined-outside-init return self._model_info + def __str__(self) -> str: # pylint: disable=client-method-name-no-double-underscore return super().__str__() + f"\n{self._model_info}" if hasattr(self, "_model_info") else super().__str__() + # Remove this once https://github.com/Azure/autorest.python/issues/2619 is fixed, + # and you see the equivalent auto-generated method in _client.py return "Self" + async def __aenter__(self) -> Self: + await self._client.__aenter__() + return self + __all__: List[str] = [ "load_client", "ChatCompletionsClient", diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py index 0c310563d24d..a14baa5df4e9 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py @@ -17,7 +17,7 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ -# mypy: disable-error-code="union-attr,attr-defined" +# mypy: disable-error-code="union-attr" # pyright: reportAttributeAccessIssue=false import asyncio diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py index 3d71e92d62f9..a57a093d30df 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_bytes_async.py @@ -18,7 +18,7 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ -# mypy: disable-error-code="union-attr,attr-defined" +# mypy: disable-error-code="union-attr" # pyright: reportAttributeAccessIssue=false import asyncio diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py index ccd9b43aaff8..670638fd05cc 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_from_input_json_async.py @@ -18,7 +18,7 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ -# mypy: disable-error-code="union-attr,attr-defined" +# mypy: disable-error-code="union-attr" # pyright: reportAttributeAccessIssue=false import asyncio diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py index dd646eaa60e9..bc518a523077 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py @@ -17,7 +17,7 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ -# mypy: disable-error-code="union-attr,attr-defined" +# mypy: disable-error-code="union-attr" # pyright: reportAttributeAccessIssue=false, reportGeneralTypeIssues=false import asyncio diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py index d094da4e5e72..bba508ea2ef2 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py @@ -16,7 +16,6 @@ `your-azure-region` is the Azure region where your model is deployed. 2) EMBEDDINGS_KEY - Your model key (a 32-character string). Keep it secret. """ -# mypy: disable-error-code="attr-defined" # pyright: reportAttributeAccessIssue=false import asyncio diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py index c2b53062ef87..f9a8f9590fa1 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py @@ -17,7 +17,6 @@ `your-azure-region` is the Azure region where your model is deployed. 2) IMAGE_EMBEDDINGS_KEY - Your model key (a 32-character string). Keep it secret. """ -# mypy: disable-error-code="attr-defined" # pyright: reportAttributeAccessIssue=false import asyncio From d0930a83aa07f2a35585d954f4a7e722cf34d9f4 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 6 Jun 2024 07:27:05 -0700 Subject: [PATCH 107/112] Import 'Self' from Typing package starting from Python 3.11 --- sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py | 6 +++++- sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py | 7 ++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index 5b91dbbf2723..d3e2f85a338e 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -23,7 +23,6 @@ from io import IOBase from typing import Any, Dict, Union, IO, List, Optional, overload, Type, TYPE_CHECKING -from typing_extensions import Self from azure.core.pipeline import PipelineResponse from azure.core.credentials import AzureKeyCredential @@ -54,6 +53,11 @@ else: from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +if sys.version_info >= (3, 11): + from typing import Self +else: + from typing_extensions import Self + if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials import TokenCredential diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index c76ea7e2198f..4c80a8019109 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -13,7 +13,6 @@ from io import IOBase from typing import Any, Dict, Union, IO, List, Optional, overload, Type, TYPE_CHECKING -from typing_extensions import Self from azure.core.pipeline import PipelineResponse from azure.core.credentials import AzureKeyCredential @@ -46,6 +45,12 @@ from collections.abc import MutableMapping else: from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports + +if sys.version_info >= (3, 11): + from typing import Self +else: + from typing_extensions import Self + JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object _Unset: Any = object() _LOGGER = logging.getLogger(__name__) From 13e8ed6e18570bb9eb5407729a0a4d1d4b227338 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 6 Jun 2024 09:01:13 -0700 Subject: [PATCH 108/112] Fix pylint error, line too long --- sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index d3e2f85a338e..40c86e7bafd0 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -12,9 +12,10 @@ 2. Add support for function load_client 3. Add support for get_model_info, while caching the result (all clients) 4. Add support for chat completion streaming (ChatCompletionsClient client only) -5. __enter__ (and __aenter__) method had to be overridden due to https://github.com/Azure/autorest.python/issues/2619 (all clients). - Otherwise intellisense did not show the patched public methods on the client object, when the client is defined using - context manager ("with" statement). +5. __enter__ (and __aenter__) method had to be overridden due to + https://github.com/Azure/autorest.python/issues/2619 (all clients). + Otherwise intellisense did not show the patched public methods on the client object, + when the client is defined using context manager ("with" statement). """ import json From 223238c17f8aa045f953ae134d7d79a4688ca72a Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 6 Jun 2024 12:15:50 -0700 Subject: [PATCH 109/112] More AOAI samples. Update package README with regards to AOAI support --- sdk/ai/azure-ai-inference/README.md | 8 +- sdk/ai/azure-ai-inference/samples/README.md | 114 +++++++++++------- ...ompletions_streaming_azure_openai_async.py | 94 +++++++++++++++ .../sample_chat_completions_azure_openai.py | 12 +- .../samples/sample_embeddings.py | 2 +- .../samples/sample_embeddings_azure_openai.py | 82 +++++++++++++ 6 files changed, 253 insertions(+), 59 deletions(-) create mode 100644 sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_azure_openai_async.py create mode 100644 sdk/ai/azure-ai-inference/samples/sample_embeddings_azure_openai.py diff --git a/sdk/ai/azure-ai-inference/README.md b/sdk/ai/azure-ai-inference/README.md index fa37ad708db6..108c3cc23438 100644 --- a/sdk/ai/azure-ai-inference/README.md +++ b/sdk/ai/azure-ai-inference/README.md @@ -1,6 +1,6 @@ # Azure AI Inference client library for Python -The client Library (in preview) allows you to generate predictions from foundational models deployed to Azure AI Studio and Azure Machine Learning Studio. It supports Serverless API endpoints and Managed Compute Endpoints (formerly known as Managed Online Endpoints). The client library makes services calls using REST API version `2024-05-01-preview`, as documented in [Azure AI Model Inference API](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-api). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). +The client Library (in preview) does inference, including chat completions, for AI models deployed by [Azure AI Studio](https://ai.azure.com) and [Azure Machine Learning Studio](https://ml.azure.com/). It supports Serverless API endpoints and Managed Compute Endpoints (formerly known as Managed Online Endpoints). The client library makes services calls using REST API version `2024-05-01-preview`, as documented in [Azure AI Model Inference API](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-api). For more information see [Overview: Deploy models, flows, and web apps with Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/concepts/deployments-overview). Use the model inference client library to: @@ -10,7 +10,7 @@ Use the model inference client library to: * Get text embeddings -Note that for inference using OpenAI models hosted on Azure, you should be using the official [OpenAI Python client library](https://github.com/openai/openai-python) in product code instead of this client. However, for development and evaluation purposes (comparing OpenAI models to other models in the Azure AI Studio catalog), you can use the azure-ai-inference Python client library with Azure OpenAI endpoints, as shown [in this sample](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py). +With some minor adjustments, this client library can also be configured to do inference for Azure OpenAI endpoints. See samples with `azure_openai` in their name, in the [samples folder](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples). [Product documentation](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-api) | [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples) @@ -167,6 +167,10 @@ Embeddings operations target the URL route `images/embeddings` on the provided e The REST API defines common model parameters for chat completions, text embeddings, etc. If the model you are targeting has additional parameters you would like to set, the client library allows you easily do so. See [Chat completions with additional model-specific parameters](#chat-completions-with-additional-model-specific-parameters). It similarly applies to other clients. +### Inference using Azure OpenAI endpoints + +The request and response payloads of the [Azure AI Model Inference API](https://learn.microsoft.com/azure/ai-studio/reference/reference-model-inference-api) is mostly compatible with OpenAI REST APIs for chat completions and text embeddings. Therefore, with some minor adjustments, this client library can be configured to do inference using Azure OpenAI endpoints. See samples with `azure_openai` in their name, in the [samples folder](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-inference/samples), and the comments there. + ## Examples In the following sections you will find simple examples of: diff --git a/sdk/ai/azure-ai-inference/samples/README.md b/sdk/ai/azure-ai-inference/samples/README.md index 72685f10d1a1..8d9b0d40c77f 100644 --- a/sdk/ai/azure-ai-inference/samples/README.md +++ b/sdk/ai/azure-ai-inference/samples/README.md @@ -10,7 +10,72 @@ urlFragment: model-inference-samples # Samples for Azure AI Inference client library for Python -These are runnable console Python scripts that show how to do chat completion, text embeddings and image embeddings using the clients in this package. Samples in this folder use the a synchronous clients. Samples in the subfolder `async_samples` use the asynchronous clients. The concepts are similar, you can easily modify any of the synchronous samples to asynchronous. +These are runnable console Python scripts that show how to do chat completion and text embeddings using the clients in this package. Samples in this folder use the a synchronous clients. Samples in the subfolder `async_samples` use the asynchronous clients. The concepts are similar, you can easily modify any of the synchronous samples to asynchronous. + +## Prerequisites + +See [Prerequisites](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/README.md#prerequisites) here. + +## Setup + +* Clone or download this sample repository +* Open a command prompt / terminal window in this samples folder +* Install the client library for Python with pip: + ```bash + pip install azure-ai-inference + ``` + or update an existing installation: + ```bash + pip install --upgrade azure-ai-inference + ``` +* If you plan to run the asynchronous client samples, insall the additional package [aiohttp](https://pypi.org/project/aiohttp/): + ```bash + pip install aiohttp + ``` + +## Set environment variables + +To construct any of the clients, you will need to pass in the endpoint URL. If you are using key authentication, you also need to pass in the key associated with your deployed AI model. + +* The endpoint URL has the form `https://your-deployment-name.your-azure-region.inference.ai.azure.com`, where `your-deployment-name` is your unique model deployment name and `your-azure-region` is the Azure region where the model is deployed (e.g. `eastus2`). + +* The key is a 32-character string. + +For convenience, and to promote the practice of not hard-coding secrets in your source code, all samples here assume the endpoint URL and key are stored in environment variables. You will need to set these environment variables before running the samples as-is. The environment variables are mentioned in the tables below. + +Note that the client library does not directly read these environment variable at run time. The sample code reads the environment variables and constructs the relevant client with these values. + +## Serverless API and Managed Compute Endpoints + +| Sample type | Endpoint environment variable name | Key environment variable name | +|----------|----------|----------| +| Chat completions | `CHAT_COMPLETIONS_ENDPOINT` | `CHAT_COMPLETIONS_KEY` | +| Embeddings | `EMBEDDINGS_ENDPOINT` | `EMBEDDINGS_KEY` | + + +To run against a Managed Compute Endpoint, some samples also have an optional environment variable `CHAT_COMPLETIONS_DEPLOYMENT_NAME`. This is the value used to set the HTTP request header `azureml-model-deployment` when constructing the client. + +## Azure OpenAI Endpoints + +| Sample type | Endpoint environment variable name | Key environment variable name | +|----------|----------|----------| +| Chat completions | `AOAI_CHAT_COMPLETIONS_ENDPOINT` | `AOAI_CHAT_COMPLETIONS_KEY` | +| Embeddings | `AOAI_EMBEDDINGS_ENDPOINT` | `AOAI_EMBEDDINGS_KEY` | + + +## Running the samples + +To run the first sample, type: + +```bash +python sample_chat_completions.py +``` + +similarly for the other samples. ## Synchronous client samples @@ -35,6 +100,7 @@ These are runnable console Python scripts that show how to do chat completion, t |**File Name**|**Description**| |----------------|-------------| |[sample_embeddings.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_embeddings.py) | One embeddings operation using a synchronous client. | +|[sample_embeddings_azure_openai.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/samples/sample_embeddings_azure_openai.py) | One embeddings operation using a synchronous client, against Azure OpenAI endpoint. | -## Prerequisites - -See [Prerequisites](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/README.md#prerequisites) here. - -## Setup - -* Clone or download this sample repository -* Open a command prompt / terminal window in this samples folder -* Install the client library for Python with pip: - ```bash - pip install azure-ai-inference - ``` -* If you plan to run the asynchronous client samples, insall the additional package [aiohttp](https://pypi.org/project/aiohttp/): - ```bash - pip install aiohttp - ``` - -## Set environment variables - -To construct any of the clients, you will need to pass in the endpoint URL. If you are using key authentication, you also need to pass in the key associated with your deployed AI model. - -* The endpoint URL has the form `https://your-deployment-name.your-azure-region.inference.ai.azure.com`, where `your-deployment-name` is your unique model deployment name and `your-azure-region` is the Azure region where the model is deployed (e.g. `eastus2`). - -* The key is a 32-character string. - -For convenience, and to promote the practice of not hard-coding secrets in your source code, all samples here assume the endpoint URL and key are stored in environment variables. You will need to set these environment variables before running the samples as-is. These are the environment variables used: - -| Sample type | Endpoint environment variable name | Key environment variable name | -|----------|----------|----------| -| Chat completions | `CHAT_COMPLETIONS_ENDPOINT` | `CHAT_COMPLETIONS_KEY` | -| Embeddings | `EMBEDDINGS_ENDPOINT` | `EMBEDDINGS_KEY` | -| Image generation | `IMAGE_GENERATION_ENDPOINT` | `IMAGE_GENERATION_KEY` | - -Note that the client library does not directly read these environment variable at run time. The sample code reads the environment variables and constructs the relevant client with these values. - -## Running the samples - -To run the first sample, type: - -```bash -python sample_chat_completions.py -``` - -similarly for the other samples. - ## Troubleshooting See [Troubleshooting](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/README.md#troubleshooting) here. diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_azure_openai_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_azure_openai_async.py new file mode 100644 index 000000000000..5e74596f96e1 --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_azure_openai_async.py @@ -0,0 +1,94 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to get a chat completions streaming response from + the service using an asynchronous client, with an Azure OpenAI (AOAI) endpoint. + Two types of authentications are shown: key authentication and Entra ID + authentication. + +USAGE: + 1. Update `key_auth` below to `True` for key authentication, or `False` for + Entra ID authentication. + 2. Update `api_version` (the AOAI REST API version) as needed. + 3. Set one or two environment variables, depending on your authentication method: + * AOAI_CHAT_COMPLETIONS_ENDPOINT - Your AOAI endpoint URL, with partial path, in the form + https://.openai.azure.com/openai/deployments/ + where `your-unique-resource-name` is your globally unique AOAI resource name, + and `your-deployment-name` is your AI Model deployment name. + For example: https://your-unique-host.openai.azure.com/openai/deployments/gpt-4-turbo + * AOAI_CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. This + is only required for key authentication. + 4. Run the sample: + python sample_chat_completions_streaming_azure_openai_async.py +""" +# mypy: disable-error-code="union-attr" +# pyright: reportAttributeAccessIssue=false,reportGeneralTypeIssues=false + +import asyncio + +async def sample_chat_completions_streaming_azure_openai_async(): + import os + from azure.ai.inference.aio import ChatCompletionsClient + from azure.ai.inference.models import SystemMessage, UserMessage + + try: + endpoint = os.environ["AOAI_CHAT_COMPLETIONS_ENDPOINT"] + except KeyError: + print("Missing environment variable 'AOAI_CHAT_COMPLETIONS_ENDPOINT'") + print("Set it before running this sample.") + exit() + + key_auth = True # Set to True for key authentication, or False for Entra ID authentication. + + if key_auth: + from azure.core.credentials import AzureKeyCredential + + try: + key = os.environ["AOAI_CHAT_COMPLETIONS_KEY"] + except KeyError: + print("Missing environment variable 'AOAI_CHAT_COMPLETIONS_KEY'") + print("Set it before running this sample.") + exit() + + client = ChatCompletionsClient( + endpoint=endpoint, + credential=AzureKeyCredential(""), # Pass in an empty value. + headers={"api-key": key}, + api_version="2024-02-15-preview", # AOAI api-version. Update as needed. + ) + + else: # Entra ID authentication + from azure.identity import DefaultAzureCredential + + client = ChatCompletionsClient( + endpoint=endpoint, + credential=DefaultAzureCredential(exclude_interactive_browser_credential=False), + credential_scopes=["https://cognitiveservices.azure.com/.default"], + api_version="2024-02-15-preview", # AOAI api-version. Update as needed. + ) + + response = await client.complete( + stream=True, + messages=[ + SystemMessage(content="You are a helpful assistant."), + UserMessage(content="Give me 5 good reasons why I should exercise every day."), + ] + ) + + # Iterate on the response to get chat completion updates, as they arrive from the service + async for update in response: + if len(update.choices) > 0: + print(update.choices[0].delta.content or "", end="") + + await client.close() + + +async def main(): + await sample_chat_completions_streaming_azure_openai_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py index 0f31be308610..91ae49800863 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py @@ -9,23 +9,15 @@ Two types of authentications are shown: key authentication and Entra ID authentication. - Note that all production code should use the official OpenAI Python client - library when using Azure OpenAI (AOAI) endpoints. This library can be found - here: https://github.com/openai/openai-python - - For development and evaluation purposes (comparing OpenAI models to other - models in the Azure AI Studio catalog), you can use the azure-ai-inference - Python client library with AOAI endpoints, as shown in this sample. - USAGE: - 1. Update 'key_auth` below to 'True' for key authentication, or 'False' for + 1. Update `key_auth` below to `True` for key authentication, or `False` for Entra ID authentication. 2. Update `api_version` (the AOAI REST API version) as needed. 3. Set one or two environment variables, depending on your authentication method: * AOAI_CHAT_COMPLETIONS_ENDPOINT - Your AOAI endpoint URL, with partial path, in the form https://.openai.azure.com/openai/deployments/ where `your-unique-resource-name` is your globally unique AOAI resource name, - where `your-deployment-name` is your AI Model deployment name. + and `your-deployment-name` is your AI Model deployment name. For example: https://your-unique-host.openai.azure.com/openai/deployments/gpt-4-turbo * AOAI_CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. This is only required for key authentication. diff --git a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py index f8c97f5ba537..bddb19ce2a18 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_embeddings.py +++ b/sdk/ai/azure-ai-inference/samples/sample_embeddings.py @@ -4,7 +4,7 @@ # ------------------------------------ """ DESCRIPTION: - This sample demonstrates how to get embeddings for a list of sentences + This sample demonstrates how to get text embeddings for a list of sentences using a synchronous client. USAGE: diff --git a/sdk/ai/azure-ai-inference/samples/sample_embeddings_azure_openai.py b/sdk/ai/azure-ai-inference/samples/sample_embeddings_azure_openai.py new file mode 100644 index 000000000000..f4a86454b9ef --- /dev/null +++ b/sdk/ai/azure-ai-inference/samples/sample_embeddings_azure_openai.py @@ -0,0 +1,82 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to get text embeddings for a list of sentences + using a synchronous client, with an Azure OpenAI (AOAI) endpoint. + Two types of authentications are shown: key authentication and Entra ID + authentication. + +USAGE: + 1. Update `key_auth` below to `True` for key authentication, or `False` for + Entra ID authentication. + 2. Update `api_version` (the AOAI REST API version) as needed. + 3. Set one or two environment variables, depending on your authentication method: + * AOAI_EMBEDDINGS_ENDPOINT - Your AOAI endpoint URL, with partial path, in the form + https://.openai.azure.com/openai/deployments/ + where `your-unique-resource-name` is your globally unique AOAI resource name, + and `your-deployment-name` is your AI Model deployment name. + For example: https://your-unique-host.openai.azure.com/openai/deployments/gpt-4-turbo + * AOAI_EMBEDDINGS_KEY - Your model key (a 32-character string). Keep it secret. This + is only required for key authentication. + 4. Run the sample: + python sample_embeddings_azure_openai.py +""" +# mypy: disable-error-code="union-attr" +# pyright: reportAttributeAccessIssue=false + + +def sample_embeddings_azure_openai(): + import os + from azure.ai.inference import EmbeddingsClient + + try: + endpoint = os.environ["AOAI_EMBEDDINGS_ENDPOINT"] + except KeyError: + print("Missing environment variable 'AOAI_EMBEDDINGS_ENDPOINT'") + print("Set it before running this sample.") + exit() + + key_auth = True # Set to True for key authentication, or False for Entra ID authentication. + + if key_auth: + from azure.core.credentials import AzureKeyCredential + + try: + key = os.environ["AOAI_EMBEDDINGS_KEY"] + except KeyError: + print("Missing environment variable 'AOAI_EMBEDDINGS_KEY'") + print("Set it before running this sample.") + exit() + + client = EmbeddingsClient( + endpoint=endpoint, + credential=AzureKeyCredential(""), # Pass in an empty value. + headers={"api-key": key}, + api_version="2024-02-15-preview", # AOAI api-version. Update as needed. + ) + + else: # Entra ID authentication + from azure.identity import DefaultAzureCredential + + client = EmbeddingsClient( + endpoint=endpoint, + credential=DefaultAzureCredential(exclude_interactive_browser_credential=False), + credential_scopes=["https://cognitiveservices.azure.com/.default"], + api_version="2024-02-15-preview", # AOAI api-version. Update as needed. + ) + + response = client.embed(input=["first phrase", "second phrase", "third phrase"]) + + for item in response.data: + length = len(item.embedding) + print( + f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " + f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" + ) + + +if __name__ == "__main__": + sample_embeddings_azure_openai() From 0d03ad2ed5c266deed840ec69a2c2396ef47f5b4 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 6 Jun 2024 13:25:36 -0700 Subject: [PATCH 110/112] Add overloads with `stream: Literal[..]` to fix mypy and pyright errors. Thanks Johan! --- .../azure/ai/inference/_patch.py | 52 +++++++++++++++++- .../azure/ai/inference/aio/_patch.py | 54 ++++++++++++++++++- .../sample_chat_completions_async.py | 3 -- ...sample_chat_completions_streaming_async.py | 3 -- ...ompletions_streaming_azure_openai_async.py | 3 -- .../async_samples/sample_embeddings_async.py | 2 - .../sample_image_embeddings_async.py | 2 - .../samples/sample_chat_completions.py | 3 -- .../sample_chat_completions_azure_openai.py | 2 - .../sample_chat_completions_with_history.py | 2 - ...mple_chat_completions_with_model_extras.py | 2 - .../sample_chat_completions_with_tools.py | 2 - .../samples/sample_embeddings_azure_openai.py | 2 - .../samples/sample_load_client.py | 2 - 14 files changed, 104 insertions(+), 30 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index 40c86e7bafd0..8c34d6278608 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -23,7 +23,7 @@ import sys from io import IOBase -from typing import Any, Dict, Union, IO, List, Optional, overload, Type, TYPE_CHECKING +from typing import Any, Dict, Union, IO, List, Literal, Optional, overload, Type, TYPE_CHECKING from azure.core.pipeline import PipelineResponse from azure.core.credentials import AzureKeyCredential @@ -125,6 +125,56 @@ def load_client( class ChatCompletionsClient(ChatCompletionsClientGenerated): + @overload + def complete( + self, + *, + messages: List[_models.ChatRequestMessage], + content_type: str = "application/json", + model_extras: Optional[Dict[str, Any]] = None, + frequency_penalty: Optional[float] = None, + presence_penalty: Optional[float] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_tokens: Optional[int] = None, + response_format: Optional[Union[str, _models.ChatCompletionsResponseFormat]] = None, + stop: Optional[List[str]] = None, + stream: Literal[False] = False, + tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, + tool_choice: Optional[ + Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] + ] = None, + seed: Optional[int] = None, + **kwargs: Any, + ) -> _models.ChatCompletions: + ... + + + @overload + def complete( + self, + *, + messages: List[_models.ChatRequestMessage], + content_type: str = "application/json", + model_extras: Optional[Dict[str, Any]] = None, + frequency_penalty: Optional[float] = None, + presence_penalty: Optional[float] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_tokens: Optional[int] = None, + response_format: Optional[Union[str, _models.ChatCompletionsResponseFormat]] = None, + stop: Optional[List[str]] = None, + stream: Literal[True], + tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, + tool_choice: Optional[ + Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] + ] = None, + seed: Optional[int] = None, + **kwargs: Any, + ) -> _models.StreamingChatCompletions: + ... + + @overload def complete( self, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index 4c80a8019109..586d2a4dd421 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -12,7 +12,7 @@ import sys from io import IOBase -from typing import Any, Dict, Union, IO, List, Optional, overload, Type, TYPE_CHECKING +from typing import Any, Dict, Union, IO, List, Literal, Optional, overload, Type, TYPE_CHECKING from azure.core.pipeline import PipelineResponse from azure.core.credentials import AzureKeyCredential @@ -110,6 +110,58 @@ async def load_client( class ChatCompletionsClient(ChatCompletionsClientGenerated): + @overload + async def complete( + self, + *, + messages: List[_models.ChatRequestMessage], + content_type: str = "application/json", + model_extras: Optional[Dict[str, Any]] = None, + extras: Optional[Dict[str, str]] = None, + frequency_penalty: Optional[float] = None, + presence_penalty: Optional[float] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_tokens: Optional[int] = None, + response_format: Optional[Union[str, _models.ChatCompletionsResponseFormat]] = None, + stop: Optional[List[str]] = None, + stream: Literal[False] = False, + tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, + tool_choice: Optional[ + Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] + ] = None, + seed: Optional[int] = None, + **kwargs: Any, + ) -> _models.ChatCompletions: + ... + + + @overload + async def complete( + self, + *, + messages: List[_models.ChatRequestMessage], + content_type: str = "application/json", + model_extras: Optional[Dict[str, Any]] = None, + extras: Optional[Dict[str, str]] = None, + frequency_penalty: Optional[float] = None, + presence_penalty: Optional[float] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_tokens: Optional[int] = None, + response_format: Optional[Union[str, _models.ChatCompletionsResponseFormat]] = None, + stop: Optional[List[str]] = None, + stream: Literal[True], + tools: Optional[List[_models.ChatCompletionsToolDefinition]] = None, + tool_choice: Optional[ + Union[str, _models.ChatCompletionsToolSelectionPreset, _models.ChatCompletionsNamedToolSelection] + ] = None, + seed: Optional[int] = None, + **kwargs: Any, + ) -> _models.AsyncStreamingChatCompletions: + ... + + @overload async def complete( self, diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py index a14baa5df4e9..bb530e6f9dc5 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_async.py @@ -17,9 +17,6 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ -# mypy: disable-error-code="union-attr" -# pyright: reportAttributeAccessIssue=false - import asyncio diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py index bc518a523077..457d117a68c9 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_async.py @@ -17,9 +17,6 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ -# mypy: disable-error-code="union-attr" -# pyright: reportAttributeAccessIssue=false, reportGeneralTypeIssues=false - import asyncio diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_azure_openai_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_azure_openai_async.py index 5e74596f96e1..5db3d52848a8 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_azure_openai_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_chat_completions_streaming_azure_openai_async.py @@ -24,9 +24,6 @@ 4. Run the sample: python sample_chat_completions_streaming_azure_openai_async.py """ -# mypy: disable-error-code="union-attr" -# pyright: reportAttributeAccessIssue=false,reportGeneralTypeIssues=false - import asyncio async def sample_chat_completions_streaming_azure_openai_async(): diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py index bba508ea2ef2..02b894de948f 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_embeddings_async.py @@ -16,8 +16,6 @@ `your-azure-region` is the Azure region where your model is deployed. 2) EMBEDDINGS_KEY - Your model key (a 32-character string). Keep it secret. """ -# pyright: reportAttributeAccessIssue=false - import asyncio diff --git a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py index f9a8f9590fa1..48e5c0fa85cd 100644 --- a/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py +++ b/sdk/ai/azure-ai-inference/samples/async_samples/sample_image_embeddings_async.py @@ -17,8 +17,6 @@ `your-azure-region` is the Azure region where your model is deployed. 2) IMAGE_EMBEDDINGS_KEY - Your model key (a 32-character string). Keep it secret. """ -# pyright: reportAttributeAccessIssue=false - import asyncio diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py index 3de024ef3643..24fefc7f0c84 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions.py @@ -17,9 +17,6 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ -# mypy: disable-error-code="union-attr" -# pyright: reportAttributeAccessIssue=false - def sample_chat_completions(): import os diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py index 91ae49800863..94818f6527e5 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_azure_openai.py @@ -24,8 +24,6 @@ 4. Run the sample: python sample_chat_completions_azure_openai.py """ -# mypy: disable-error-code="union-attr" -# pyright: reportAttributeAccessIssue=false def sample_chat_completions_azure_openai(): diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py index 8de268226fab..7d3e8d7a74cf 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_history.py @@ -18,8 +18,6 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ -# mypy: disable-error-code="union-attr" -# pyright: reportAttributeAccessIssue=false def sample_chat_completions_with_history(): diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py index b32a44bf1367..7b9e2e3577eb 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_model_extras.py @@ -21,8 +21,6 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ -# mypy: disable-error-code="union-attr" -# pyright: reportAttributeAccessIssue=false def sample_chat_completions_with_model_extras(): diff --git a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py index 866ef6537d2a..e9dbec8114cf 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py +++ b/sdk/ai/azure-ai-inference/samples/sample_chat_completions_with_tools.py @@ -19,8 +19,6 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ -# mypy: disable-error-code="union-attr" -# pyright: reportAttributeAccessIssue=false def sample_chat_completions_with_tools(): diff --git a/sdk/ai/azure-ai-inference/samples/sample_embeddings_azure_openai.py b/sdk/ai/azure-ai-inference/samples/sample_embeddings_azure_openai.py index f4a86454b9ef..2ae3cff830a1 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_embeddings_azure_openai.py +++ b/sdk/ai/azure-ai-inference/samples/sample_embeddings_azure_openai.py @@ -24,8 +24,6 @@ 4. Run the sample: python sample_embeddings_azure_openai.py """ -# mypy: disable-error-code="union-attr" -# pyright: reportAttributeAccessIssue=false def sample_embeddings_azure_openai(): diff --git a/sdk/ai/azure-ai-inference/samples/sample_load_client.py b/sdk/ai/azure-ai-inference/samples/sample_load_client.py index 57f0ebc0e305..683a05cb9c9d 100644 --- a/sdk/ai/azure-ai-inference/samples/sample_load_client.py +++ b/sdk/ai/azure-ai-inference/samples/sample_load_client.py @@ -19,8 +19,6 @@ `your-azure-region` is the Azure region where your model is deployed. 2) CHAT_COMPLETIONS_KEY - Your model key (a 32-character string). Keep it secret. """ -# mypy: disable-error-code="union-attr" -# pyright: reportAttributeAccessIssue=false def sample_load_client(): From 864796470f52b50be43883916f5b1360ca521612 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 6 Jun 2024 16:57:39 -0700 Subject: [PATCH 111/112] Override all client __init__ methods so you can define and initialize _model_info --- .../azure/ai/inference/_patch.py | 65 +++++++++++++++-- .../azure/ai/inference/aio/_patch.py | 71 +++++++++++++++++-- .../test_model_inference_async_client.py | 13 ++-- .../tests/test_model_inference_client.py | 12 ++-- 4 files changed, 137 insertions(+), 24 deletions(-) diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py index 8c34d6278608..9dbeb1ffee6d 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py @@ -124,6 +124,24 @@ def load_client( class ChatCompletionsClient(ChatCompletionsClientGenerated): + """ChatCompletionsClient. + + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-05-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + self._model_info: Optional[_models.ModelInfo] = None + super().__init__(endpoint, credential, **kwargs) + @overload def complete( @@ -527,14 +545,14 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError """ - if not hasattr(self, "_model_info"): + if not self._model_info: self._model_info = self._get_model_info(**kwargs) # pylint: disable=attribute-defined-outside-init return self._model_info def __str__(self) -> str: # pylint: disable=client-method-name-no-double-underscore - return super().__str__() + f"\n{self._model_info}" if hasattr(self, "_model_info") else super().__str__() + return super().__str__() + f"\n{self._model_info}" if self._model_info else super().__str__() # Remove this once https://github.com/Azure/autorest.python/issues/2619 is fixed, @@ -545,6 +563,24 @@ def __enter__(self) -> Self: class EmbeddingsClient(EmbeddingsClientGenerated): + """EmbeddingsClient. + + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-05-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + self._model_info: Optional[_models.ModelInfo] = None + super().__init__(endpoint, credential, **kwargs) + @overload def embed( @@ -765,14 +801,14 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError """ - if not hasattr(self, "_model_info"): + if not self._model_info: self._model_info = self._get_model_info(**kwargs) # pylint: disable=attribute-defined-outside-init return self._model_info def __str__(self) -> str: # pylint: disable=client-method-name-no-double-underscore - return super().__str__() + f"\n{self._model_info}" if hasattr(self, "_model_info") else super().__str__() + return super().__str__() + f"\n{self._model_info}" if self._model_info else super().__str__() # Remove this once https://github.com/Azure/autorest.python/issues/2619 is fixed, @@ -783,6 +819,23 @@ def __enter__(self) -> Self: class ImageEmbeddingsClient(ImageEmbeddingsClientGenerated): + """ImageEmbeddingsClient. + + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-05-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + self._model_info: Optional[_models.ModelInfo] = None + super().__init__(endpoint, credential, **kwargs) @overload def embed( @@ -1003,14 +1056,14 @@ def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError """ - if not hasattr(self, "_model_info"): + if not self._model_info: self._model_info = self._get_model_info(**kwargs) # pylint: disable=attribute-defined-outside-init return self._model_info def __str__(self) -> str: # pylint: disable=client-method-name-no-double-underscore - return super().__str__() + f"\n{self._model_info}" if hasattr(self, "_model_info") else super().__str__() + return super().__str__() + f"\n{self._model_info}" if self._model_info else super().__str__() # Remove this once https://github.com/Azure/autorest.python/issues/2619 is fixed, diff --git a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py index 586d2a4dd421..9b5347bb5bc8 100644 --- a/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py +++ b/sdk/ai/azure-ai-inference/azure/ai/inference/aio/_patch.py @@ -109,6 +109,25 @@ async def load_client( class ChatCompletionsClient(ChatCompletionsClientGenerated): + """ChatCompletionsClient. + + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-05-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + ) -> None: + self._model_info: Optional[_models.ModelInfo] = None + super().__init__(endpoint=endpoint, credential=credential, **kwargs) @overload async def complete( @@ -530,14 +549,14 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError """ - if not hasattr(self, "_model_info"): + if not self._model_info: self._model_info = await self._get_model_info(**kwargs) # pylint: disable=attribute-defined-outside-init return self._model_info def __str__(self) -> str: # pylint: disable=client-method-name-no-double-underscore - return super().__str__() + f"\n{self._model_info}" if hasattr(self, "_model_info") else super().__str__() + return super().__str__() + f"\n{self._model_info}" if self._model_info else super().__str__() # Remove this once https://github.com/Azure/autorest.python/issues/2619 is fixed, @@ -548,6 +567,26 @@ async def __aenter__(self) -> Self: class EmbeddingsClient(EmbeddingsClientGenerated): + """EmbeddingsClient. + + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-05-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + ) -> None: + self._model_info: Optional[_models.ModelInfo] = None + super().__init__(endpoint=endpoint, credential=credential, **kwargs) + @overload async def embed( @@ -768,14 +807,14 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError """ - if not hasattr(self, "_model_info"): + if not self._model_info: self._model_info = await self._get_model_info(**kwargs) # pylint: disable=attribute-defined-outside-init return self._model_info def __str__(self) -> str: # pylint: disable=client-method-name-no-double-underscore - return super().__str__() + f"\n{self._model_info}" if hasattr(self, "_model_info") else super().__str__() + return super().__str__() + f"\n{self._model_info}" if self._model_info else super().__str__() # Remove this once https://github.com/Azure/autorest.python/issues/2619 is fixed, @@ -786,6 +825,26 @@ async def __aenter__(self) -> Self: class ImageEmbeddingsClient(ImageEmbeddingsClientGenerated): + """ImageEmbeddingsClient. + + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-05-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + ) -> None: + self._model_info: Optional[_models.ModelInfo] = None + super().__init__(endpoint=endpoint, credential=credential, **kwargs) + @overload async def embed( @@ -1006,14 +1065,14 @@ async def get_model_info(self, **kwargs: Any) -> _models.ModelInfo: :rtype: ~azure.ai.inference.models.ModelInfo :raises ~azure.core.exceptions.HttpResponseError """ - if not hasattr(self, "_model_info"): + if not self._model_info: self._model_info = await self._get_model_info(**kwargs) # pylint: disable=attribute-defined-outside-init return self._model_info def __str__(self) -> str: # pylint: disable=client-method-name-no-double-underscore - return super().__str__() + f"\n{self._model_info}" if hasattr(self, "_model_info") else super().__str__() + return super().__str__() + f"\n{self._model_info}" if self._model_info else super().__str__() # Remove this once https://github.com/Azure/autorest.python/issues/2619 is fixed, diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py index b1334a49bb2b..3c8281428844 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_async_client.py @@ -26,7 +26,7 @@ async def test_async_load_embeddings_client(self, **kwargs): client = await self._load_async_embeddings_client(**kwargs) assert isinstance(client, async_sdk.EmbeddingsClient) - assert hasattr(client, "_model_info") + assert client._model_info response1 = await client.get_model_info() self._print_model_info_result(response1) @@ -38,10 +38,10 @@ async def test_async_load_embeddings_client(self, **kwargs): @recorded_by_proxy_async async def test_async_get_model_info_on_embeddings_client(self, **kwargs): client = self._create_async_embeddings_client(**kwargs) - assert not hasattr(client, "_model_info") + assert not client._model_info response1 = await client.get_model_info() - assert hasattr(client, "_model_info") + assert client._model_info self._print_model_info_result(response1) self._validate_model_info_result( response1, "embedding" @@ -76,7 +76,7 @@ async def test_async_load_chat_completions_client(self, **kwargs): client = await self._load_async_chat_client(**kwargs) assert isinstance(client, async_sdk.ChatCompletionsClient) - assert hasattr(client, "_model_info") + assert client._model_info response1 = await client.get_model_info() self._print_model_info_result(response1) @@ -89,9 +89,10 @@ async def test_async_load_chat_completions_client(self, **kwargs): @recorded_by_proxy_async async def test_async_get_model_info_on_chat_client(self, **kwargs): client = self._create_async_chat_client(**kwargs) - assert not hasattr(client, "_model_info") + assert not client._model_info + response1 = await client.get_model_info() - assert hasattr(client, "_model_info") + assert client._model_info self._print_model_info_result(response1) self._validate_model_info_result( response1, "completion" diff --git a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py index 41c03a826085..c8e722d32026 100644 --- a/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py +++ b/sdk/ai/azure-ai-inference/tests/test_model_inference_client.py @@ -25,7 +25,7 @@ def test_load_embeddings_client(self, **kwargs): client = self._load_embeddings_client(**kwargs) assert isinstance(client, sdk.EmbeddingsClient) - assert hasattr(client, "_model_info") + assert client._model_info response1 = client.get_model_info() self._print_model_info_result(response1) self._validate_model_info_result( @@ -38,10 +38,10 @@ def test_load_embeddings_client(self, **kwargs): def test_get_model_info_on_embeddings_client(self, **kwargs): client = self._create_embeddings_client(**kwargs) - assert not hasattr(client, "_model_info") + assert not client._model_info response1 = client.get_model_info() - assert hasattr(client, "_model_info") + assert client._model_info self._print_model_info_result(response1) self._validate_model_info_result( @@ -77,7 +77,7 @@ def test_load_chat_completions_client(self, **kwargs): client = self._load_chat_client(**kwargs) assert isinstance(client, sdk.ChatCompletionsClient) - assert hasattr(client, "_model_info") + assert client._model_info response1 = client.get_model_info() self._print_model_info_result(response1) @@ -91,10 +91,10 @@ def test_load_chat_completions_client(self, **kwargs): def test_get_model_info_on_chat_client(self, **kwargs): client = self._create_chat_client(**kwargs) - assert not hasattr(client, "_model_info") + assert not client._model_info response1 = client.get_model_info() - assert hasattr(client, "_model_info") + assert client._model_info self._print_model_info_result(response1) self._validate_model_info_result( From 37157664eb0837b2562fc48fa20e37eb1d1ad5a7 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 6 Jun 2024 17:04:23 -0700 Subject: [PATCH 112/112] Cleanup: delete now unused platform-matrix-ai.json.old --- .../stages/platform-matrix-ai.json.old | 31 ------------------- 1 file changed, 31 deletions(-) delete mode 100644 eng/pipelines/templates/stages/platform-matrix-ai.json.old diff --git a/eng/pipelines/templates/stages/platform-matrix-ai.json.old b/eng/pipelines/templates/stages/platform-matrix-ai.json.old deleted file mode 100644 index 6a056ce2280b..000000000000 --- a/eng/pipelines/templates/stages/platform-matrix-ai.json.old +++ /dev/null @@ -1,31 +0,0 @@ -{ - "displayNames": { - "--disablecov": "", - "false": "", - "true": "" - }, - "matrix": { - "Agent": { - "windows-2022": { "OSVmImage": "env:WINDOWSVMIMAGE", "Pool": "env:WINDOWSPOOL" }, - "ubuntu-20.04": { "OSVmImage": "env:LINUXVMIMAGE", "Pool": "env:LINUXPOOL" }, - "macos-11": { "OSVmImage": "env:MACVMIMAGE", "Pool": "env:MACPOOL" } - }, - "PythonVersion": ["3.8", "3.10", "3.11" ], - "CoverageArg": "--disablecov", - "TestSamples": "false" - }, - "include": [ - { - "CoverageConfig": { - "ubuntu2004_39_coverage": { - "OSVmImage": "env:LINUXVMIMAGE", - "Pool": "env:LINUXPOOL", - "PythonVersion": "3.9", - "CoverageArg": "", - "TestSamples": "false" - } - } - } - ] -} -

Qe&`wO( zeksq1z;!aDGc*zl4GzpanUxFfNkrOAF!^8vpm&YjUD^c`pjGB`AUNY^j?d;jK0PaA z=N6Gr3 z^J1)ZD`VBedSa*so=fu(7~XY44*nj@6>=Ir998UXofLn2qMXlsj`8O|1w|vKAb5=) znd`!C575Kr7Jyc-Gi593t3J|Efpvl^k_C0Hf z@;;4~C#*d@f>gsdgyCaAb;M66=^j^$$~3To(RPWah=Az`Buyk5-y8R;ZhA+0aCli< z?-=!x^56tMk8xHsWDGCGAwq(Z>L{7Yu3Hr%{Q(8Xb=N>hVVJ_;LsUErW3%)Cj@qk+ zBuwJoEsC^QSWWM^Fg^pKTB3rBNv$TGk`$iq5M zNfQLT&aIdp{-ASKBlnv5fokE^64{v(9qAk;%{dvCU-P^gu^LSNqMo5sTEAY(Gh5crX&gL_?}gQ|w!CZgC)x21E0&reRv zN51?8j0KU^_kL1_W}{wQmBsgdT#mD8Il+nT!JF;$x}2<5r4Qfl{{Bbh)nEO2+3~rr z!JFsh{1cxl&GE1d-#9Oi-Z(3>A3ZCd|Ant2-+gFp@ehnsiMgd3cytl4U(PJd)QBJ> z%DsT8itz6heE!zI{;$ff;4nSBjJ~Ry^8f!*ca^1XZwxq(vW#Q8H)5nA)sfclsSJG9 z=z%c!C6&<X z>-9WF2ocMC1N8o%@7rx34pSp{Gfg@;)_BaJg?Qq)JWs+gn)(+*%ycLpI?iSuJ)X2? zAOUTtp{@$p?gn{?PQ_p;qGMvSb*Gcf!V4W1J`}43cb!A&X24-aMI)C(THuGliJ8qf z15C#3LW<}J0>TL}+vB+1!QfWBTjXuAo)Jlk_e=VPAMnAsixWoWF_>MMqHW+uXj9kR zGkm^JDqdNL(E7kBGYimxu~f0srhpw7%^3O^e(|-gMO%Ahuo=fi&_KSbNxOg1PoDL0 zyfeyNSO8fcug=_&37KYrp`4Iz z)qc`H^s2!CI`6!&QLU!g?*|#gPAekN{~!OhAA}&7!#uPP+8}5HRUU-pe&7tqzvERj zs|SI2WxdX8-KryaDKIfh2+awwXqq7h3~-$XUJTs7%-}pn{tgGhb&C`{z#q5QB;x07 z1ml^)3V{k^n$v?KOzA8Eq%}$kpJS9=cPWkgP5189OWb62y)0xtMt|M+U9eCP4$!w4>jB@oZjD%8W8k^=@fSW>9>4KMdHK%! z<@oHw(s;$#pS~zzGRPcu%7-ji;v~9{4T}i#m{hQJL}rqxa?C z`2F&q{pRoGGqv_L?+>Bxg6Ykbhf4N;cqOty@7>B|^m2=SDHGDf=t`i~;nBF6?sVA$ z-!WK~o&iV&#%DEn#=KFya_3=8k1sFFTTe`bTSVzI0W_Ru)YO?;plbGIg&t_EbSwk{ zTsZ_oXP&myb@*pcp)oRJT6xxh(RY<+(SYU{kLXuT_dfQcjqMO?tvi8SwBh=_Hp+XY zpK@XFX(mGa0G|wscuQ^_9kVaaOXo?OpflhSjhhFI*Y22W3^};v4lyyz$k`PKw;I<| zFqslO$CZP>ZW%>pn2R+SW;2pefqtG{U@@ICnBb0f`Zx&Lvdh?|v-U|@(W#4UD96lL z$p}9nM(Cip(XYXYynM)Td)S)GNcAXpKT%SkJ2ZW8_VS#1>CJfS!-$wCo$#3*av@(j zI3Xy4gHU7;{4a$y3Od>wA$Jcr1djKiM}xp6Ed;|%|L9d1<;Q?~1dq%+>OeL5LqQ}nm+5}2wSoSw3_V;eCOvXf@wV;!8qy@86qS; zZR%{&DyOGJt_X&dJ1Kah5W@ZUdUpc8#4&0%t)4T20hz79h;&J=5KwQ4c}umdxj0vt zrzgeDI5HYjK%IFT-Fr%r$I@Dy<}>7_NA`5W#2hgyvr$MND{IlA&#Bh9`-oOSVY%`d z)q;?bxJS=+%Ww>HG2+hmX0|U+Kb)4A&xz6i3xMo07u4UD+6 zvjAW))h@dt-~t*mH@a|cdY3Mj@kx4L>{%s zuSHv1qDRkp_8IYYKFW!E+N;v=Mme%a6+H@+-B?MZppXq=x;LPMX8FX;G`yqsSR2)4U- zj2GINX5^os&LHa>S_-W7g_`n_y60Can{wyn(oM(M`{T*ees?rL^xI_uU9DyK)HlB# z9b7zrRhrM9mEDiuDUI2z^q`5F?9t0sq-zLIa7vqiUlWijPhXdx{lEV#Clf|1o)F!B z`Yjwq4B+CneEJJtDeL!Mm(PCW)5vZgo|w5~Oz=1vv-tw&6lcdNVTfG=4NV!h=OKLT z7ynuLga6`xnRM!c5&rn~Rr#~O{k!s4-}`>~V7)BQ?l8i9?u%RUvbb@zmPDTz2p%v(AY7U^Dn&zZyB0suuiYUNo8v0zZ9{pw2*! zf2G|N<6%HdZ!?_I2Yb32TV)`)1)e!J#%Tah&&1HFHuJHJVKu{~Qx^ls#TuMM?_BFi zhXyM$yE>&b=2#h{ztrUogA|u7)ME?I-1!4eI5Xx41!%vn?MpMWb7Gw@jj=U8I@>AM z)edk-A6|{I1 zFw%($&MH*aN%^^&AIf5iq1SLgNY|o~3k*C8&Sy!r>Cw=yo*ko(7w%RmqZ*Bi6|_T) z*_h~Wf^k1Z2q$B_Ukv9FqppIPLXsI8TlZ(HATrT0>Ba5$dg6Niuus8-EJnRXXoe-G zO#QIY-~zo#`c{Kf60-rW14=iyvuN;#lTjI+jm!D@DFzBB1L4{qOeb3f>rw;2fg@lF zTaPk8#b?gf>Pi9Lr>LD0wfh*$I3&ztIy;mPpD)TR=gNC%ij1s9aTaoEIO}66n`K0V zsj%&=CbwE1`lj%Lal3ZCTr)4`I0R&Tp1Y^2X&57MygW+P)aSKN-NFdRF;kckCQmR6 zFhNgg6)!R#sKyK7r3pPswTHWz<87@r$63ktJuchR=G}3Vf~3K4oNhgdOIld)XUf!_ z1pU!z0MLH-x1J8?US!&|Pu><6nf);mV|o}wc+&6cqtX`2$ec`wuArYX<(dA=WqlyV zvlYfh(BPFrFOZQReE07$@;`D~<-9*Azx%KLpnU5;|HCr4{0MY7$rJiaiMfQLQd=4C zHbT92AF9*Gm<*B{)KOA@%wXsQGd9|U42#-0$g>7xbOs^gP(n9 zH|Rr$C7^cfmHsC_TAuvE&&3hhPXYdy(=t5b%s|)^4GC?U`OsNMEJ_=|JT6`lMy|@} zu3Hv=@i*ng&z?Gv(nVGWZ+)bE>@%M%>v!KR@BY93hjR0spOkTHTpqviDKrlwkDgm8 zzFxYG6TssWci3ngG$<%UmI}(*N6R1l$^SZrMJRmt*~{{m|KZQe```IdnLoQKAHM%# zIl7$@TIg{3k{s{6_mgr<^e8K=bI<{HDa|MF?P*`Jl5RTKCxE!RfP1Wv@&887&6vW= z`5OA+3~2C->K>i7kguvc7D{=SCnzdc3Y?iiqj$3riK@{f4U`=wRj*?qkGMTcDK1$c z13tylHEEB?)j`$)O|6l|i#U491+kStOa0RjibiF z=#w#S7tlfjW8h(QXN2pvJYxpk}4>L0cKY)96&?VR`YoOOrZ}h-SXmxJP0^6=>Xvep%Cu8o=NA8mr zF}BRt6Wqdi`FPMFsSZk(C*i+K8jf8@wj}??Jozj%MAnRUJv%>Z&pH?AvisyA0)gHz zPr%*DcQ)Vsu(V&u3h~7`ckZJaXA_Ys}Rfe@}KVnFIp^^qVm$>-MKN zQ-f5TI%G*5sY?X%8hfd8$7edf^2A`%-#z(Klu0L955WiuE*|6e5N+pyFc=p-J-44} zRBNGxznfNKvKY8lkBElJRxqmrQ zFAC%nz@l7$i^8ebvIC&pCn>=sa`r&Nuo1)69+?*HVBCD7n!w}nwQfl;KVwB|tHbYV z>%9`Aet;ni0Qlhx3gk8zzGA*E#CZY|rVaTqiqLac)G&-T!Ts)Kw%EtD9=FZ(FqEf{ zEU@UM($sFOS1_F)F(Dcf#9hnuGx?VX{WH$&pC&Rjin9uNI2@MA_`|c&V1$tB zEY--@l-B?Qd5bZ9@A;~{xSEv}blG_}HRCm9Z)IvdM_z{kx}vleQu?=wATao0UfeBh;q3bH==$wq7w2A>&>UrS73$Dpz5e z1$Cy+1WRijOrL9Pv$G=G4mCBjt^Cr%4C8)v^BNjlmGA%H`{f#E$8XdeQ;8TDCuXrwm+oLk^Ba3O-sDtAcSr#+duj;=ko0g?25z>oxiQy%*)fzxcbX#b?>! z9B1^CZj;CLd-0m_{?X^_TUnlI|pzQlWG(^%_0c+KnMmw0KM8A%CG*` z@0GV7Jw{_nSunS&=da85)pa?;Sv^NT2HfKzMmo<|0H1pOdUjo&zWYw9{&gbN@4y85 z=eAe*cSJu!M=&In&VJATvegh;&X$LA?c<4;9xh@OdMuC6`{nWDD%+tae9fBk*y6pK zh0M+Z=2RnIvoIKb=5)JX!IOC$TGZD6;#gSsVl@8h^|ZXay3X@2l>gLY^l`+HKQjP2 zADM%WF~-=jS|cFbdU|CKr(Xk6BRFJTU7=qMpn92gVZPtv$j+}ZzR_2x%6CSLKKePk zhM=KOc+|KF_-GbCX*2TU-~0-1g448*(7``L11Hr;VBqD-vs<*}qd`M5VDQ;P#yk^4 zi<;s$^gFgjS%;@tBL;Vr0hh_dTxHviEk3ju*WS4U-DKXpa}ET&wC2ZPO*^L{p5SOY zM=Q-Wdajq()I7IKKiYMkW=4P;haX-Mmhm~lN}t+#EkGPEeKS9vc{D7jc1ZxD-ruLg1a%pQ-RaPRt(leVuYrtMW{@am_|ma*my1s! zb&NV&F0~*75J7e12B^V}5ZCkx7rGXu0A{d6!U{)HL=h~bRlOGnso|y#-g+@dqG4ni zqA^e>E>rNrjHZtoIHC(vTb_XCsdN@?<+%ljo< zA}}sz4oSVk=*nPJ*tr85ru951!nDt5G|XDEQDn2}KGdqjsl?t}y}yhz(K_)Fp8zMc zOaTU-;`Oyh%Mx`#&ml&|f-?>ENqbP{i%t39*|cnNih>Dpo434jue#2JPeVGpILmey zmE`(`r^l_hmiP-^7)4yYzQ$3|=`j-Z0WhAVhjE~tJbZ<|HF7HJPoB=oGaoU#TH!gu z3j|&RZhA3kv6Ehmkc#LN2(9$@xdxLSMmseJrkH%-Mz=f~V*pWX(@RDtW_f(jk4}Y- zrN&-|(5-N03#>-IxEPkx)6>Y81wpgygTMRxe^|cqt^cen?>c2p)U5Gt8(j}v+k`%CymMs_kxh{xJP>%yU>))3SMbnN z3LQ(%gLVcwnY2wGblhDX0^mk!YoVpQJx?5Ai4qxu(rU5?*< zr!*JbhvTxHPGjhG`nEViw+kZsK`+}g+sLBY-h}o;cyROAe~xjTXB_Q|QQ4o5%H&fY zEB(`P>0O+b+nY^!a{j1XzWH%(8qsFOgs!y`I9(b}qYR5?UGwfd`9%5VKmF&KpY;6T z!`Efa`+y8x{qUz{hBNZ=$3H6{{_JUa{`^@0P@H@R2W4~hB6S7goZucz=l?oz*^Vbr zs)*_@GUfL!CmAK@3!lh4OhOD;9`mfThJ1?! zrl)m4H7aHRb&L%blKtX3+wzbc_nLcPir8dS8=62LpPs1$k)<%kxx8&Iyol2xW@yB-Ga&|XQ>Mt1yJhUlFaK68?_t!#h3ce2b>XpCdG;Dbxy!8(LoCrx>Ap0=k= zLuX}o zHHZjo&R5oe14cF$11S?^EKIIzK<-anFljRYbxp*{Y>7^-n)Uz&K}>-2K_H8{p0Jh4 zOrFVT^t1IW-n)cR85BS#dJmZNb9aIps8zWLJOr{>!jrRJU^Hh_Q=_h%-G<Ag=+Ho;oM34lbC6Vma)Z#@W5)9p7Sk8RJCv;J{ z-0qe1P@Iz!4Eq|;RIzp1DlOU>6Z41XSqFm7sqody;sOCxcrnwB-_tQJFF?821J^UqdrvJ?@3hD=yzP%GqasuDt!VFP5#2+S3>16d=aq zC{5;lhcP)Y-&;O2IzyeAIPwBrn~VXBEe8zY$pS|c*`L1i6gWbDiK<&~zEO4ztcx=+ z{mzfe-Sx8EUYGKPFMb2_F)EGT8N3ygkP!^0nJJB!AoP4XD_{8LKP^wj;~4ij&eaDP z_5F+M(naPkxPJd~QNI7De_7u9(GLSmedy)2@4fev((>`D$f{{ed6fsAIh_9qOL3NalGil z9-%wX44sU)0f@-2bw}=5Uo5#EC&EI`EBbR=7Y!wAChpudHL^OZViktXS%l|cXb__U zzQ-ziqMCB;fl1Ynnc`)RT<oHbZ0;?l6hna0m|D)Y$i5L zd`uhOZ!26ULOEA;UnfWGbvf5UaIH?9PW$I_=aWn@<5K3g$cI0*OF45Nde`hXoNFNb z76&CQt#Jv!Xte29IzSBC;SV}>{~niVWOWRj@a@6$q6Ai8U6UU29iT)5rXa0=wc_3N zs^xN*r<~;g8@&FF13ehosV|ja@p3g*j0K)SER8aQWvto~J=;#tbxsTwzq>}=>H(w( zZq6V^__KcCJkKhR0MrWSZ_WXlq<@Jk~FqM1oN;UfmF?VvQbp|>{FdyO7 z$&!XDy8?A}wJJY*{{{5*Fb5TtKKRb@9RMq7)kc97*!|tM%rR6}lItwx@DC>K{!*1k z&rYxSY&59HQuc^CdKyL@AB-`^%?RY3_pi$bfUy}FmrI)TW%Z_K!CSy0+$!)Yc7&Wc zBsJf6JBP*!2kU@=fu`d)R?T}4Tc_l%^!eN8Hz!>s;i11r66@TZo%YM4Cl}@X;!)|1 z&LV4Wdz;-TODF_$wNJfag~k1@^0$E;2sU;7i)-P72kFe`H_*PWH7}9J_g_3OLkwS+ zdA|JKJLTqkKP@}>GkyKa=E@tFXN-*~p9pPy+bEB3j^(=;`5%7w@6*P`*+u#G@BdNx z*0=w#j30f1K8Xs6`1}14qJlwHR`mJ=7UU>P7;+lrcU$HF1L!UjRoS^oOU2UrlyxOd zBQQB<9HVi@)X)$TxgTE6;ptuZ*e`ztP&_Tum)B*-xSNZc(pqEvHy*u-_z_*)!S54v zu7TW~@cH`nRXMBxh$Vc*Sot)P+aFz(SO4+v@~|df!?!NV$>UBLy>VV{{{APWIfkD& z8E;*_RZcHIQEu;ye0l&3*nkdYUo-EG#%}z$eCe0}S;o6%B0v1j_sT11HsY&a{M;AN z+jhBmbyYs|iBDmC-wqHV0p*IZKYjP_%kAn_obGCX(PNC0I;>-p%690Jr9Nny!?*xu z&u;J#sMm`{0td(BSr>0y^viI7Yy~8tu^=X;8QiUzLTIv_VXW5#9RRxETBB#}ow9?m z3-9XMz4GKT*L2k^FRnMpBhC?*Ptc>Wh6Y3JKLZL6aB}V0$UQ~{{);_=zY(KOXb$~? zk77#hL|jtn7P?v|qXSqAYSl9x4F_kWn$$d>`MG=fH8w5@h#gKreb+dM%?4OPl(Ln{ z@iriEIYA?-lcZ6w4Fs(8(Q&gPj>SUPr8N*6j4^F#EGr$V({EPF^tY~TT`JJ{i8UH` zw+`ZDf;(ckm@PE=Bu48Gti>_QXq!HCN{3h7HalV+fKk7gV)jJy8X15F%9i+}&TA-> zj@IzOUmXRX4Pkc1r92A+eUjzFwlf`UmpwevmdnEL`p_T@B(i3pX(mHF_FeTD8afte z=4o;Avw}Wjp^xmKDmn!3{i??2bW_ zeKRlcF~;1CIrkVaGQf;GriN>BPKN#K$)pK;TIw{<|o_R?9cTlp$NhA zr&4WYp32JQdFi>ya1Vm$_YoYJXw+D%UV%1b$LQF{D7w{Ry>FBig6hFF9&@^5a0-)( zrG#oEU5g4+XeU0-oyz7T`f6%$*uNtxZ1TQ;R3YO}^)-l@lsAM_&@&#lE+L4fNjwYL zb$w5gGTVR#KmwnNrnl!>j1#I}sZI|P5`JoQUBCBntt~uFobu8%y<#-MEjm4VfmTks z-fv3SEhlq4{pUnjtHm6lRLL0!isb(KL@4}jbeU}+Fw_8CK%>8^PSv?(i z{g)_}Pbz~p@JH`dheIILu)C&Z<)_ZT#_D?_GZnGN_$|!RY1=~I9SVe9EsrlxF`hm# zOu^U4BBZoyge{+c^4{z6-Ul=2STim08}_1QJeIxeM3ysyKdPVVo**oY-eZZI162zf@QhBnwAAoggXpW zZqJUEUJN1g&H)|F(RF^6*5W(&KR+z@c(T`rA3n`ituFWaxX^<|TsNEY{5#(-o$G12 z1dy+O_=7~U)+D_7=9}fB2k;;(=gWOL``NU7|Nr*y%lkihhbsW~)3fp~zWr~?*T41K zI3aIBOVjACEg};tI5cv7FKevojZP3hb)bwSm3R4t06-Jo$f~+!7R?C9qQap&8qhO& z&>vlt)-5`>0&LGm7WsZ}eiEm=CS{I|z^G&F~d-1B=BEu)Ulk!Wy`fJGlpxibv@E8v3 z2KLZafVrM-${Qd1VtMoJkEgKi!`W4N{zHuW!c4)oeDKq!<-33Jw`F;KlclHWD}Hvm zWyG~Vd*=sb`Rc>Sbe1xZrBo)=IMG(B)Je3Rc^+NEUq1Y08!`4L<*}vxNC2 z?XHc{=n%pBof%3UU(Jg~S_dr;^5SnlGvIRzV2Y$Q@ZhXVVP?t_;YYvGA8=IJw_@I7 zoV8`+Jfp&lwBv+O@)fcrsDTf6p0|%oxb@oeNUpD6C(8*r7^~wl^Hv=LSiuXdG{D2L zFkq3VaSHh^^#s<2X?vpE1ef3=u*o%YlWX9u?I^=}@RfXLJlZ>*lVp8ujH{d0Utnhq zCINHW1n<%jbEdD< zOwlr+r1L{`z=SVN57l+KAyK5@s+v0)y-1AOty|FaI14|GvKjXSF;G5*Ii8k6EaCVU zuaM7mAdE1(rg0F62?ln8;qs?O)3bs#t|@|3^I(c8({2~gH7!Uaxkj0dq}`sTfmfn+ zY?>`V7h_`t*dpAtpJro7@AMtEa82e7@l|*fsoh6tFv9Ko;z0#8>+DJob9XQ8dNC`@ z>9kxCwd+`PJ0r+!(S#M!w5$OF9IdWY08BBTdl-+W$1yjT6{gc=dH?+n0#a^u%3&uc zsA<<)f8d$!7PRT@YuM=96gEocX<`a&d7U0e77$&sGM&)B)G3 zfBM0qJbkf<)8%0yMmj2}>2$Zz=>!F9xITibVDR<`UQ>G;ynUkq0rJuo}?d$l#6&mlLX?u>Qd8iiWE4 zPSsUL*VVaW-li3OXvn>W=6a9T^uZV^su7!8)XpbwmN!q%%A_+W?T^1z&OY_=M9Hgn ze^#2$Uza1Yec)}nQhx1;FsC0pEyvZo?0Fw>5FvTluZeVVHhorqitrX!W&NlBsl5KP zzf8eXo=bo6W;wuv(~q2%aD61}Cl7PzW#<`s~VVeOo^E z^Ir-M=twPIT$hVOt6UOc-7t<7k^9%b^u_YIPky3&@SPu&@Bhtr%VappwBLQ_CuIO} zdM(qdfYjI(^`IO0@fhdl(IfPd=-4A%eE^H; z;$#MCa|g{gyyxhrS-w1|#i*0-BS-d1KcHIXx5i^dNMPhO=g!BVB1eHE z#|RC$Hx2-EGg8mCSTnHTgL8M+g%N0$veZ@RV8+jJd;Y!#euyFD8w~M2k+a#3x)yCz zj8WB5q#rR(@Q}tH$!pPa#=t-K#p{f1&`%?8gBrJynI&=8jL<1vx)3Rt#aQL9GGm5O zp6CV$wbEPK`OJF{mZ5~ zM%g}G$7i??+zLlqVzCi>f->!pBBzcx4({=v_RXw-*se{W(~Lh^8E9zB+DYZom$D`f z$SUV%5SDwOlRQK!p{v>MCF8@6wEBDo-{q1sk@g7*h^s42rVCf4d8DaRofL#Z`t ze9qe-!d(h3yExW1Z6|dPwh)~1AzoMpi(pwnZxox#Ra&paV-kR?0M0u}DzvUsy4GHw$?O|L4?$69u5oDbmqT0$E2*-_-D?0hpFQ30BalSy zX8`iqxKl28yU$&LcZ$w9elG;5hFj$^^-cdM^KLWC0WTWAx__0q;lUX(4`6zZhgD%A z67=B(^AKp~!Vpmo9grplcvO7QiB;-(gk@L900HLrRH%lic$KS@GWGaW7?DB>lvbcO zl5ng^*DzL{87t)7f9^Iv(_@FbE=J0A#cAlCl$ln7q7?XU4aNwi z$wOa6GO5jNrju$a5MahAyQb|K>?@3>ig*s6M$pMrQMSY}_L_mQdKl2wF`^CoGQHbu z5~bp-jhK@sA8v6JX4msFzX6Z|*80zfGcY#BYRXkF9r=fjI55jyxw>93*A;U!l2<7e zs6%N(i}}lEfb}#^j_JT#BY5U`IzA;zot7SWeEaUQT>a>05~j2;`m<|Ke*@6D4l_~C zkcakPez&|!B>((_4|1L7;s1-@`#0qmzVa&=YMhNb9YnJdIxZgEqA~-N@LVNj*pM9X zR2F&rx4zI1ri1ZX_T~B`g;--u-xUB_xgA-yk@T0Gq?(US$9Jzk?$EAJ!vfy(< zW1^>*FUp#~djojH+%39uO&%oL|KgFddx8KT>~u9F!rMcsT?-?oQ{z%zvkW`%N_Ak+Yh=kgt0u``x3|SS&_B4O z5kyyp$dt=s8k#JF@|lspb58bArwN1Nyfq?bJ6Iyv#(7}g24#D$6F-d5b&AC>os2AHGB20zyg&6rEsds2eHM}FEI3zjw*HHL z@y}?|vk&T$7>)?WSiCb(h>^RaucqtyM$4kJ4wpfm_+oZYqpE=vi_s5mHX_a$^?ZKB zPMC?)5vXTo=p(QxLt>@E(B4i)sz zvA*=95l+CGYg}doI~)V!*3k-{8j0j(c9-9=Oa62TK)e#?BClMl?Wq;>Iv!>dq07A! z?^13~!<4Q%PC+C=z-Zenvdi9TAs1_d&6Ghmm(Q(()1mP@Gp@2g{Kf@iEM^R$x7izR zRsJko3~DA~W^$&bC7X=X8%jz511x7}K9iny&;Ru5>!BBX# zB1XlC$4Jx$?wq_!1zI2{NIMoyFuBab!xk*~Gp(oM$aH}tQ2_*eDBNqbt3#u*S70$p zCW@ILtQa~EMDT>CKE|~>v|=8GGA)8K%Q)w|#NY^Kz3v2>5vc$khvN!?GJAm{GJgy0 zbgo>tZa_n;I~g+G^g6{YMsQZSyX7nUBe_<=QF*M0b{)@a6mpk8EN-;mtAQ71+J6Z^ zZPrAs%*m+c^c>H;0WVCeW_~CgM#U#!z5C&^JioR+Vp?X4X^fJB?X%4Fz`a`Gs@rV% zFM^>n<(e19oruw*B_poMz|7q=3_PpG@|ExUw$6kuE~)7pc#5PVvYfAQ z_UK1|2S7Cl0Z#DPi*WX$IaqX7V8thzz4?d1F3G2+r!p0 zErG>agRBE@jfnf+xo?7uL)$y_+`uNw4al<7^i$A;d0={biX^T13 z42zdS3@~Y%Wj1L5hC>%=Z1BY|aj ziHSjD><`ylYlhWeFbqVm$Uz(r9w|%1F~W|;&MgSz20aQs=Y~YQUA+2-J)dHfsaOTm zl-A)^gHLBF2opFQG*J2!+;3n01`#!KE)RI*pM_RAX-EY?Lals9KgtmAJ15lj%@zz8 zn$dufDPzN6SnQz@>GHiqblaIl8+u%fgt2&lj48JblitSHb~z&SqHo(u1*=3?P!VDY zt(WE6w?$DWFir(j2nLg31j-b7t#sFe?q%Lqfu7)W{Lm{GjobF94e#er(_*%&7bh5i4K^&dd8 zX4!ooc*>?X<(_ZvefM~S84Sh%GjIb(!U+UHSdK`LA+EHfD21Aopj}cMBq2pm5hQm> zDI_jQks<*a3_ucYfB~k*boX@kboaD*{roGRKC`m2s`mFg-+O=}wq93PRpyu9cki+P zbMCq49`uRGdA*GM9Jtmt9x(&k>rUgwmFv;Qp&wko%6$*BW88A18B_bq>l86KbDd6{ zboZmIBmA}BjJN*Y{}wl1_-3v-R~xPPz-K=jkA3K4Q9W+PtcSseRuBPNz$gq$Dmt>Q zfk|_$G1CK}$Vn@W3N-YLf*Ux<>zjQXX*(9d7v%{cXk|I-51o(I^XF1-!&AL3--v^q zYXoWV48FEj+lkh- zykVH*(b(xYJf%@R-tCPse#kLSsZm>qGi#p3-$-Yl+mUow+(+=6$VYj>5D5f5xJemS zuLQBF|BOM0l|@|`R09*UXxgF!0Qq4?))s&+QV+U z(8@x-8a6;csomCLm^qLJW*8goG9aTC&6U-3+J}ek1Q>m6HVRZK3*?vvbtXT&Gos9 zg!g2)_f((F2Ko&B&9M<6c6kx{NIx^6Ivi;wK*;3<$LdwccLv8NQ|zHZkgbTBnGOnP zZRV+111}+8TyMXJ8qX3T!Km7a$oJ9;K@MDIXZ{i znE`>1z*?E)>5$SToeK1T)&zLD?UVm3f_5E#W&u0b2`VktG_&XDX2;B1zi3nrvs|hz zD)aRnMZ>Wy>_dLd25CbK(h}&2Vs+RKW;XREJW$WftwkszADx|R2cnOR++kP==bKTt z0(NT+-1o;5%-2jb&eYF|pfZ!dGp&x- zSSaaP)u>|^9SKBWp!3W|1an5#t`!;;__|1ms2u^Y#ph&bD@njlI(`N`2NK0NSaE50 zy76$IxeAynjGHMi2bFwxzZci9-Hg56U5w9Rf(pACsWi7wPf`$FLt0uU5`lR`oZx=1 zi=r22lG{GDH*mYqIJ(1ws3356oLi4o3^mXvqurUtPlY`$n&w>IAH?1eCC7+m3r6Pe znxI{j+&-rXQ3g#BSx6{&%fd+jg*vC%wWb0L8IAIMtTh)hEvsQx`2=*_AyZ}u6Z0W@ z!5E6rU81OBc$!Vd$NTct<(s{IjQ2nQWOhP5E@Z+37fL7xB81`fDA0rG4?MyW1Ic*O z3GnUmfrUV(_uT(|_Ux%xZPj^ZQ7JC6?R9%mK_M`gnK5uDg$EQ(_x7T@x1CYZ5>Cq{ zqpE9ylK~WDyf}pla?sfA#NogH?YRCA{`a{4yUz!>F1MLI^O^6C$G-KGajtoP)Q(q@ zerb%9cb=67*)<>k=0ATJsd^k}sbb05Ty*yPvAsKvK6G7atj6JuZq(rYXmKviy!Yv- zpbQ6HqR_9s7IW`hV;;yR2KE@oH3S$O1Nx=|TggrUD$B8Qb`68(4h?`2dTl)5`ma&G zwJMs4>ZB8c-~P?mB~r)562&(`73)Otr(%w%b6Eq9Y+n4;UyF0CQ$+GlVSsSZ!@0Qg z+&$c54yWIWQJ?4T?nKr5Gr5f1Y8te1u310z?x*6^T^C|?WhKVQ$NtUBfG6|Vkh zrRtXY53k&-4fj(n?VM(6m^XPF`-ePhh_f~J?Rjfwq2zQbV6}^*W58x6XfW-ssaN)K zGKv(V73dp^vIWig9>}o6P5FvptoujQ;7TGDDVX#W(;1puC;Oo--e1GJC~`y+CT|AEZ%c6yU|t(S&ONeSB!DpPd0Var$|@v|o&0 zB54oR;cT1iqg~$MGkGW+W650>$kPWBkoYiL8xG5N;1C&^aq4$#AYS#glsjxz-e) z-^Bp~pfA2XzSO(sHW~p*jt>K8szg+waRIq6E}$qO%GUWN#hC&3hI#JK?%^m0y-qqW zx2Czj+lo#^A!ef!B9%c#pxNKr(WwxN2nsrbn`SYm83w2 z?w?$#gYt1!xrD$hE!vV#I$6_nDbNTQCKUd9xXe+s@ZYCe?lZn6w8&7ll|-@?cKL0= z&}g*JxJUd2VunuDN|9RfYy(f<+TZC#pGY~2FJ+1I=W2~l?i5I+A<9P)jbd~PE(NHh zxsJ%cqG8fWSgF6(ZpG=1wP+BHTL_e#(?CM&Nf%?!=iBS;xbVP*sGnNF+3;LIlj<@L z57mbfN>s+kE%N&jK+#$b0cD-&{@_3Uhq(Mpe=RQm(|-!s7$G=|asS7^EuQ?2Z;yxW zes7#<+?%>J%M`HxAz#qh^*NnarrO}M`_3({v_@rbAHZ^mK?5MJFytDtvy2fqX`hYN zhaS#3^uF|yMXkjbz-vbN}x!y|u zF~4j4HTv0bMEOCN3$)xukUm20J+}B%y8?JaS1`C4xx=HPT`S;J;16iB z4|}(}kK^j4UahV*V|}fG+#xqG+TA3|m!Lx6 zQo#d8i0743{!&&Ci#iy8w^)uJT!J7YQQANogyj+)8i567?v^C-7cwlU)K*QW*u+WU z4AY5u6ULCsaa?O=!3uTfZ0`z#pqg7z`CHqSX>M$*PSl+%rE)#$b-*LHv3sBPA|3M# zyMZIqdDU&viu{d6eZa4cj|y5b{82rtxe$Y%k&dPY4Z7y%%d<8rPFWNtC1>}ZS+>6gTq1?#< z&zCACc+dQx5u(Lw$N*!VArl^+W1DVIr8G;EfC9U&ZNFVhB&{c7ZFm>jTm0&lOPoBm zCTnH5uIN5!q^?|G=lJJXZH!~Fp1g-ajL~-%c{aS9^|536x?6Ud$GW9l-Dg%AkO;PN zSs7X+cp(@}X2HiSj^v(cOMQuN$0~n{ei2$K{j;_N*@r-8d))ny_rT81o4%eMVH}(| zNW{o1(-$c$rb0zC6JA465tK5{RL!t?jf0hdJquTcFeaTlh9AQBZ9~g*kR^x!Ghnb2 zlIfxsrO~M0q5~N#V@xZV@-LxCwX(;(qEHyCx(B8Z)#8$HfhkaA zrX@1{AyIV9Du=1y7Dkk)O$UlnNK=WPDI%#SF`;1DZ4^XQ8iQ(s&tdF}#u`MaO}Ca9 z5nwz=?g`bwo2C-s+yC98E*tz_I3u^W4f{icSagPha80rAT*I&wBLLCO?G7{;7w>Uz z)9Nle2-zL;HKstQ|6edJwX;|)J%h^ z$KB2(b~v#5JX|@>SoemT zo5uR?k9;^D_|(VZ?72tdeCvLUy-|~LCS)?oL?I1*lue@nq6F!+y9Y=H5W0il?$&xd zT3wG7_&mmGu0C*Q)YmuQBQo&jt5F;7MhU>V7&u?7z{wbZ;r1L1h6PKLem4%e&jNhc zNzJAsmoSSWGJ%1H6wrB$0iTB^%YdeXFMTl%zxg)f0#Kd?@K|SZ?n2Br89#hi9tL0e zYBX-{$HV9EjfXbQMjeCHK6^gb-*>Oyj52byXp}8sNnb%v`y!Dk5 z&$EsN-9B=^eufd4S3y4@4*u!%Y=x@c7SWr*1&$2;DYj#oc9sdeXNDk^>Xscc&|;!9 z{MIU@eH z7R;KZOm}mcG{v=IcRV}cAo*d>gS_8-6`eq~=B>NZrJkJOf)Qzi zKd2dfTSH;6S~%KdA@j8`++ssB6v~e_?RHRO|9f0b$Q%2s%D9-A04WG?tvTG4e zatY&Yuk#v+*wowMF@kKg%R{CJgp<*+=RqktH5|SvRE0%>l#!;utq{bX<^2`@ETT|3 z8K4DLu8(G`Qyh8=$J(rcPEjfg>gI^@1YQb<>Hp$T7b|T&P+z+Xs}b}QK>++$sf@~1 zP}kc;f@UR7M7l8kHu_OuOs`mI2d^;-W)E<9^^<6|i)JG=3|y(Yt zaGl5r+1fV?^>~)qh<Bh=yK7(+J&IACBLI^Q0KU}r7&pIWSBqmOEt&oNKe2F=WfER7^P!^7Bf zsQ_N-=oDlN*Oo!yt!6E^@hB7)=hUIQX~h%n;T9XyK(6;Nm=u0fKr0QRWFjc{@LQ1N zYR@skZRF+FAr(TtC>I(yWyG_dOKvqJLKeB2iP#3@*i|qE7`iA=uEUxAQGt{poxNy( zh!M?`rg)}~AIauhWnxSyhz)esSDJAK2eo#3BV~GT=Vt6}Z)N(~{q?T-yB}KvKH>TM z9Z@^-+V4eYb1UVkvf7H;8nT7L9(gbgw1O4+tl0stp{Oo5l`s_b-hS-=!@n1A{ttgU z_TPRdjoF#UpNI!P@ojO}osY#mtM|j-X3A~Wha9;q1tGak#n&m^yxEJJTRoioeymp; z@xHV7L}SD=k)MEft9PBl&@M#p#*LVK9yN#M9a|ej{WQ$2@-HVqE*(-^%mNm)gyk$BDXSskFYHY1+AV zJ=z!6WBYvaF4pf*x%fZCj7YP z;rqA}FtB|!W<*RAXlqpF!ALqv>0$uja*QXV#WA2_coLodVT_rd`KmVK+8`3Q-Kb7O z;HPs(s`y=fHamna^1de$YTw+Q;1(>??wLLDIkJE+ovYFAaKiP_DxC)OH1M#g)%t2J zRsoEu+q^!9p6k54LCP%mD(VEZ_BvRf0d297X;ZTSN}c-j5^0P8twx$ zZ0?1WLNm8EBP2wxd)aAIqhdoVmm73YIxhnxx4AhsqhQzVHGBFD|EFGKHn=~}gf6*0 z&ArtXx6C=eOsnhA6SV8l&6Y?X$6C~$c$%S3&TNKFhH}d(&-P3X9ji1?U|c*nb>6-h zY0;rqcAw*Zj2v)S<2mpJ*^p=251X$Qbz>G?y38t&P)CgX4J6c0Xq67U;Mp=krp4rD zFSO(G%j`xrLy^`@)3m!|ljiy~cT#wra}oAFBS)l^Z*fFf;E&)+8^J6-EIeVX@LNNlf z5~lTasyO{F5%u0)AEySx5LOEWn?wOt;Jf8)Zm|;E`$P)}zXe5}ywzyddEa!kYc)m} z3($keI5}!@kxgt&Wg4mA+?bE3bTSs8%}2A+AVRUizo^=Ej-ouHZ3`CNFYI9)b`?a= z5`dY?R&i<=0YQd5aYCjemkgOvMp1pL{ZE^W0luKS9^ZD zEl4FvRE*K|pbfVrHjTQ_4Gr=3RzI#^A7mvwlF3|`xfW2!I7}VN^WxkBYdc(XH!3Xl z#Jl{FAPv4a9}T^NztSvc5AuzTwH(i&H^dmCSQsY1@>pG!0YNmiY>k5_kj<~JVko)K zmW5_IK#i{%6 zi$}iqQ*qZLPsdYtJ{@7AG#5rxAemw5!I8gIv{JUV_2s!mP%2$(bgnGpiec?7T7 z4!2M(+PjvqB2Gl{)9C;EFU8iEzKX5@D$$M1P7w{yqX!cs$<;OZHG=o;Xu_`v4_v)) zHbK~Eb0<^(M(zE}*Q5918!>wOS~LK)J2!5|1Rbre6-W1OZ(aj*bQtd}ZHrqi5j1!U zDCfB@-5CA2%cIx#KowJxfQD*0POZ0)G1poJ!_0x`#|Y1*#d(~#sZ{mDdAi+8Cuwo) ziS46}VBvQbc5#0VsZo?Y9~2?m!7f2Vzf?T|?CNSg4Xxl_yP|GoN*t*mPWdB)s?^Rg z4~zGV`j$P5!ujx8@qBEK@@GVc4Cl5gmj?v(yysECdHP_=HoDJCx#YmL+C6nw+vECn z?n;5qnKEW9Iz_khnod@Zby{`w+Q7nf^m*uHbZ4J+d1a<_b!9be?%`yJ>_O|P(Yenk zoN<4>$hbVxA7vI&VKB4?V5(F(C*=*=2qC!rExF?P_^zU0 zliM*bWy5uL?XZpCa%=)Px5M$Cw#Vg-qNhOmY5s?wR5p+;eXIZ~Wr4x!L%q3l#7t&Z z-AVXff*5S2&n_~b^3AcqGv&t%R0gZ8jgm!Fhay23T|t>_#b|?sVoeBxhM@6r26vR( zoQ$fVDGJDIpmi=|06rg~6fE#$5%n=dp73TKjKGXk7?%RvR$$6J*Sh$W``e8`vr=5A zAv*PLf&qS0DLfxtwC0+Xk-TGuu6#2&iKFnL6rNS?`Wiz?$8!RbPWk12S-lRhSisp@ zv{1Hkx-63rNB~4}*iKPvpu%Vzoy$C9)#Njj9Fu6({i6#Drh$gBYgPJg4`%8-0Y|xS zzLx8e7V{`r*S9>0s*@47(XadJEs_%jx^}AIxuqrdY{Fy}VGqz?wCyz-2)XF_=b}6d z)mBz;?if8AGborxB1LGY!Sq^L`n727j1g|tx$D{mZWslejs2}_eDfC|Vw?zGewQ)4jh1OsiDw)5gXrv; zKGV4?Q;Qj)xyv9ca#NW!1b_hq%5`_sU@FpazRxF6yczuVL@(Ff8W^+n)n=T+xeMyX z03yMNTluErA!9tq?OD0s6hT;IZWW^W)AwD7yT0{faqlNS4G>U}kXC@wOq7mPN^hgE zEQCSsb?z#fGH1QX;fP3@J2x2Trht5A5 z?|by!@xYxA$NI|M@ck51FK6U*;mn=!-S7W+eCCe(V&%cR zV>}wf=*_pIHX9*Zb1~}oW7->~@i6Q2N2gdxV02?N>gIa-5YCE(d75HTp#b zKeCDwD&cHL&;3?(f9qG^H;%p4j7|r-L6b57=%{lLd(XX*OIQ`6`JF2_(VJ$p0H?$K zy;y{Q~KxQn;p0qa- z9uLRS0ibzSyZmmTCu=J@O&y+5`&`spt641P835{L#X?)2sm|LIjMpQ7j-6+L{3+JM_vXc6IyZx*^GVIl0#kHRm=&LX8i*7&< zmloWfXST@QDC&rgQm4y=^DAqJ7&D^DOX{g>_9$F_IYzCXI_%#XcANs=0{$vsyjpeF zkj_cl&3&|SUdsnua?5`04FeBeXYML@76YP}HbPnEvu0*(uj)J?6Jx?2a+9>r30iPw zI9SK6`BTSjU2A4g+n`BS?kvuhex|z3C^&9G$AWwQaXtr=Ua>r(((pN>+l)A&sdDBr zVQvG2<{8Bz!Nqbbx(ZL7U&^+3L;vhaaG;Md!pDX$LGK!RoRKx3HrwE`q)R3icDr>@ z$DX!?=cFH#zW65tlsdst#SAP#Ue;zXUjc3w@o}w7s1Do+z5vSFC;+iKgH0ENSqSLb zd`9?u)-e}b?#x~wd(xhJSwp297LmRd4w?5e(97Ld0t9zR9N@#W_hYY3V28{@Yl|Z# zqx3PDmF8J9hCJmKL~Nlx&haPMQYsW^(dRJ=>3Wabv6^nFz#uOnsHUbNXQG;1Hz#r& zyVZy&@YuBrl?CK$dsW{Pvl)G58i?QdXg*j3xPZeH3CX=VgrSU6D!U$AK8|*CA({;a zTwz$EKM0Ucn_+pq%y7?iHg-_>__iT}g%GZv6^{ z^o)LNvO;8S^`lPA=+UZI(>Dt5)O0L!_E}10gQ+`&D z)@w!gC7oifbMfRW*Y~n7z3ZNs1dIYm3v!rlk}jr%vMB?=!<66=lGJstGt4&9FAz4- z1b^E^s17|=TT2)}5IsoJtsY8p?gwRDgVAAJA+qWbHCmlJov4r^cnxL=hh9dw9pgL> zM=3_Mn%J(g)Pdl24K0QPcO3My8B1-Yl`>;mR^A!uU48z=7FAjS+j0N~M z56=$l-OgA=&&s2zTem?OZ5%As;|POp5!%TC27~VgW1IfR-;T?F<9~|H&woB9BY+&y z!CjyDXqFGf+E=R4M-H1h=aqY(CXy1EhG|=7Moy{0@_W&D* zX}}BPtGue?E|(eAS%7+ob7y=QNFDSFHp!D3GuUOUrZaur$VVe5XwL|ZG3@W|$L?N_ z-vRH)TV^K%JAk}tq?J8+?Ygz63u?5rjLI$|8?8nq?l^ZQnpWEz31cIYcJQ~@ayusK z?Pa}$X=k$*ZNNq?i(%b!&9yvpnIIw8ZIQvksd;~8-mE6Jkjc1%C2Lx+T^jMC742{+dx_VJDBS>(v0+<^R5wqA27)9bf3wZXu3M(fQl*pR*$LkA zO9$$i00ob=X*xprlxG%b8wEwkOKv$6|3F8VMdXprS(>WvX1p>3lFrm;^O|4=K1gSS zpJEr5pc8w>er>EWtPWUrU@@YZ+RU=@dCzGXj?9`n zFEiuVZe&$|B;4^c2Eqc~+=?eC=dtozzfvq)@lcsI!TbBr5)P)(0wLBJkm(^No`mIH zox@;&(sf6mdd5G)0OHUU&PD3Z?`<(hUA5!)ofy{x(S6vRC}@RT*H7-1$4xShWC411ReR2X4wrmDG* zg6KN?$pT0R<`~80+WalFmZzLS@I0>`)ad{!yv9xQ00qxiSLsX>1G*7r3(ICW6czq3 z880d?gWAg_70gGPc0P`4*LR3o_d$CaCtgoW<=%CbMzoJpasJ#xw99akFV4$UTPg-% z%Cxuiu7Msmh@=__xv3(v2@0Ufpa6J`=JlN(&ixQU5sA7+WP=wRQKrkwS&*lY+StYF zaM!`qlbj>8)G&?sj0UR#@LI=kI2IjkqUi~)1m%@Kwt+Mq@2&xjf#WWfD^YLTXs4)U zXyB+s$fiZJz(s}TfmVhO_3?NZ1xsR}nAKVlE@pzM|J!lx@BM@5TzV6Q zosTn5JrQ?&;^VQ{+K4I&d8XTs3tPRouX7ZuJ6lofccQHBhfRYlYO6%+kq6`aCqBY_ zr_q1?jX3!FD{*jTixJ=;RBjC>d=s2%;J~3VPjokOos>B`rs){K=D#%58g+mT)-<~x zYAhGjPh02;vg;FC@;yemO`|=Be5{{83l}50y}g+9`?>8*KxnGd2vsAikb9z6uLp0S zrS`%I%xqBw+r%LG#{8+u$gMRz%B%Lo41kVDXVuNR3e&Yk{fqB$^VIXy4x=q@l3+A5 zO&Um}RNe!)t^sV;+)1$D5+ioe?S9o_D`R#T!#kPTa94^3zc|rQqitvjZ={N2%`c-f z#s?i-3h>Y^`C&AkJn}W%PZ2Cs@@VRRSUYRgs)aqcTONWwnKbM-!_g|n0{ z76yAlTW&?;d&xBZEr;fIQyqgkC;-lyq5&WUdtv~U>3*Fa_cVJ~*uyf9(t+vN7@)Mo z9yr*E#ntGII}Tj_l7EW`v34xivylS}k)@Gf$IK|Ip7eHCfi#B{`M+EOWIozn11Oz) zh5KY`JaLY;O(UQqJr28UrelRZMehMKFwhD)G{fR2a!VVu&uwt{itb>K_mB;AUb%NF zc~8DF^9z{tVSJK%@ssxiABBEtpNg$`*)0!wGB|M#f*lN423T&1<7z*ibJCVOcI{oQ zZlN|~)`bfYu;CfF@N9QSrEO>Yrst22iq{n*ft0Cv106`p@5qqD=CkTZu>?>&Umghb zG6Q@B<2Cp;C}}p!(Q0F;Ev`{vG`9TH03M)h8R5d1OqpUh21E)6M(_;8_OppDkTeu2LZ5v!IX_#W)wgR2i*~1 zL7^4a5_x~O6J5S#D$`1%s?zAl_ICP!9>D>D_eZg-rD>Dr9r z$!=1q)J0mUIO?#Vk@X8hdplzcZ1-B?y#p08GZjZf>!sLOtz-0xH8|0mky=(>^1DjA z)gj6vf*)YWtvXNi$P?-Sf+pzR%22@%k7hoFaAkoBW7bicW9Vg%^W$g&0M=U;LKzKN z6JX(-X(mCTMNL(ne-u3&sTmWyZ#lnudpX)Go>%V~@h+>(a!oY>US>DoH@|BHjmEP3 zl!^tN~Ku)@xC^rya}puf_V47ovK;745t4j>h>rqWkijvGdRVc^rS^r97KJgEr{_ zZ25VjEY<*}$~^Ka-Dk*(g-h0>Qqrju(P|x2M(eR-7T=w@ELiV>eNt)>G}gRO&T%JC|`)GsbA#I%pg~x zLq;zME_2G1D?>M1*VnldfLI%3+Pl$gWpSM{q)e!{%1>q(p@rQwwQYw-(3F5c{jAlH zhh`Okqdi$d9*W%|>Pp@N`uR+Oxv!hyR6hmm%AWe6!AF$QPorv$Kcj{xBbQkKokruf zYsH-Mt@A9F57I_ylljgaLwuIUB=9i8b%&8BJ7y4oE@eaybIpF3t~NMvm7;703Pa^~ zUFoC)_Mg{f9SXW8uo&3cZ|t$p3%suV!5%UP9hs4@*YOc(CeVOzd|R>Zw*VneiG&_` zj4$JmKF*bwQXh<71$(OaJW;m5K#=Tvg-&T@@G|)^eiOe*E{wKy8QM~7GzgKZM$=Z zcDI%+@sK(H1UI~zT?uQ*6ysBuJP_+(>M3;6Ouk72vlT^(-p>~4bp3)&kBS{%^4Clc zl;M43lW}vyl*BD4X~psgIoI({aeYaRT~uPI1UM&VR}H2R`! z*US}O(^X|0d%b28k)lb1h{!guS|J-$xL!UF+Os!zI(`JtD9kks`^jUO(vaS6HK|Y~ z!b2!iFn#{K>v)Brs(>0J3pFfiGd*Q=Y6@BabHeYgd+RK8Eee>7_POYPZ0~e(yPFY) zhEAhn#Id_OiYu2p*&(ZlKnjI6qLiT;m=T|U^U`K)Zf|pEvj&0+k+c9qlzb~|q2Wn~ z#`B?BhbVM792AIUdOoJZWgT|d1eAD#^l!=$}}7v#m&wLwCW){MlzyX z5v@xh7(M&M^UqYS%P|_PgE4$!-lqCWo;SeNogNB-;JQ8PRI|^8W_ADo|MW>jK~&hT zh=)t1PAASiP3_y+OQGxT?I*HKK*#)7aP0P_x0+r-wO7LS=6*WJGJGu>Bke!D!F8$Ik#pJg>ALRgW zfl|`hLG&YwANeh}m>@@Ei_-SGg%d|U1TqI>oDog{Ye1C0Y;0krTkD{6_FX`3T<$Q+ zmzUlzi(>h0_RVb zOeYpB970zG_`RFExt(xtFpjy^R-A6uv%qe1YYVyODezLmsKM4YXhh8tb^yYIteHS& znTI_MT$eX%X6B)a9BLP=jYwz6=LP&D=I8!=qaCT@wqDN-H)W~pL$c(h2R+$XtAkGW z`p^Y_+VIF$!Ugcusk9l3I&y0(ETBJ~5waz)bqOJ(JB>4R*C-XX4ts-;8Zda`WC>tv zeUVEk2OjoA#LF#GHj5*(#X8td47g5Fa4SLNE}G9jL92EY{&IoMPt<_~8t8*djdSqF z+LmR2T(w@xnjCG4eLFme#`=-Wlqn}^TMfLdaWErmV1_fSVv}(4{%%C>(uU74^_yw! z1h8fZELaop5%`%#XTE9Iv1Po7Erl*zXXedpos6KtK!SHo^EkJk2tKG{5n8BZGE0gz}(ah zpaxKau4Cw4*s3BpDUN@IPs+QJ3C$Mg2$Z00kx_DqN?M@5+7{?n@SAs_skYtvi()IJ zFH0ZjDJbLq%B8ZH5j*y?j4!LYjK*~ff8slzd3I%GJso?l)j$vpw1!t^4+n=3VU%l4 zZgny!0-*E0Xq8A#r=w7CI|WYAnGTotOfflbQ;D9nj$EK{M0KDsBR-oem>M)Lpn+UgcIPIMw^+gcgTv;nESTcnX&@n- zh)d@#VnjvqULsuYEfU<_2I;w<*n8cIYE6^(JBP8mg9AlS<`;EzL>T}T*U(M*^T4ku3BjGW zIRd&?kr53s*c%zWVLrpG87jb`yF(WB9#5x>Wzsu!MzAefZ~+6;GWv{&l6sBj#kJbey3$G2&Jn=A>AT|>2i4mHj%ZHkedx^1yw ztKDF{M!GsvWX#|6!r4Fw#l5Yo z+6d{Cg<X(t z@eDzBvA;dL+o8YO=f%u&@;c-3b>+`gzWoGl-C8e}nVH-^$K1^dWicAGk=6zcW?pq< z`Uo!kmq+P>8^81tk0omhj*|ih?*ayL_ zmX5*he4cixUh1p`h;tsqgAFM9z8L8v|L928`FK}p0IhYxj!m$nEz9LWz=hGj6E2*$ zPd13+{$>EUXEqmeM9Cw8nIP7PTi5H~7U~r`a(pt$_sopjk`IDDU%>m4Tr>b82? zElN5+_pi%7Q>Lc+C!+yG(32HTSz%2^DHKVvBEs`NqUl6k7`Y-nr&5UUe5N!-NMM>M z%`brhx&W!6C7c84RyckQ5(J#@pYYpdb@&p)o;i&^8OyY|3NZg3Rs?Mse9* z;TQy&QAfZaPn7Vscs7=%n8 zLl>1$p|@CQmuN`?q%h?9_ktttU4z)`9u>zB!+{+Z(5v(QJEQNjX#G{q7J$RWzFLSpvi1 z01(4CHQe$lmrGE_MALa4yyxEhSNx6)x<;pi6?mLI?Fn5KXjAapdz1W$c4IUhM&p^sqj~mx^sepTaQ`6Y z_jl8Q8$rqw2c+U3?(D=8PO-@BB#ca1l4m)m62?JcleW;Ijw2m^6VUMbH)4F{W-PhW zp|=;kFaBEW{Ih=?hhKd@7Pqg&@ZxJQooIL-2afSjzTBc}s?WJdGmX85C_yLBGbOHb zHj|b^L@uP6hBl)tj8lSkBP!_W@|2mtETlpob2k7^&d>Szz+;pQ&-fWeFr4yKON$3X24v9>4CEic4{ z&wgvlU$4I(uYC230G^w%K(u5OqE2d*1@G34*zVdYV$XFyd&)yR3LQ0icUM3b5n&h& z0Id13i{PN!PeX3z&Nb*Jj?QhTypR!-(N#v^I2$9m(U9jM&pNb1-iX}M;U@BKA#d(D z;QHB2Lyk_SQ};Z0Km^geJTL=S!A>a^X;M;`)&E8ARgRT4m(K+HfKuc!i_y5y2-HF= zmqyL{IEPGcr}e=OFy72oGA`=_hCKU_-wi$lP1+D0i#+nDQKZGlZh332wxil;MyZM| z!mbGLbSU|c>kW=<4J?S-CV0`Ao4Iv+tvZ;e)bX9sFJrX$a6+^yxKWOCDTVu(*8k%! zp@J?QB(_9nW~K$ep)5_Ao3_pfzX0E64GVrIh~cNsD6_8UiZ<3QyY7_IKAM>mRNK;5 z2kc=$3TT3llWBqymld^Vm1;fZ*sgW_$mN+_x>zhT-r{IjkI7wPtOk0yEiUOIEw!PH z2cFye!`hV0E~#tyl`6rgbulyOrJQB(${cMeZk+;uAu}#@lmP;6i7f0<(w}d2I2|>g z&rE>+hB*k%RDgM&B@i=9ZavZ%9=KHO_;R-x?`t|n>##b3YPkuu$Zfib0$DTxyih^Pi5j&PdF9rZeyUKC77L>;1A zVZ1J@M-fDx5omv#AHlo%KtLvj`Ui2_HrER?elvNHtEJs2vAyB;+(r7n<@M5+bYMPz5B?~8V((vVu=ToeDaixh!R?|3XDE=sYs)#YBEX3p91;7$KYIzI8 zmCG0!0kaj{&hal0?X}lxQQg~$gMasb##?{&KaX$xg})U0|Hr?Mg}vP<1EglqX9=1g z&6Q%z2Q}^j7LUY5Y~W`h@7~*})cI@t5d*%FbsUaGViMcGHBV-YOk+waYh)b1G(rf& z{@r{$4ehaGg$~}&UKLB_dK|v}Ry_Z2{z+W;**_6a{a1fAj@oC{xacAa|Ki{NoA_&g z^KZmI{1^W$Y9Dwq?*6IokCh+#_BeWAov{-ca#L$pl!3L4jf6kLPB)JFMLS>{swl@6 zTiGYvLx61kZ1JQK;hFV%)EhV+#_x8mWo3Vkh=&R2Kr~`@WgGQ9FaVKTj zEoYl+=!uJPj5v39;Micst+5_0DiF_QgVZ5+E=;g(0(otyUGa<{d5-kwM(UYg>YQ0F zE6US`=`5I#PDdIzro*EV&o&*6eO+IVT5~0VP0>;n2ckZ~U2T(kHOBt9^>6^q>@KOE zd1ix-TU)Q5@_=-dMSl}H#3XNLM;X7Z#SQxA+!BeMc59h-91{2=>E$+RBlLv5>By{% zXYBkl{q0x;LKzft`^?DlIYG4R$95F8M96^7qkS{m3XOcenP30TQYzDcl?OG)UUcG|Yd39O_kR@&te9kVpK9n{&kMGaJj4_x9zcynEz zvAf-`7@PDd_@0HF+M66FB;jR&yT7Eo&S6^@@S%_k5{QH#gpM7;mDwY00BA zB`(6xYq@U`C+`VAj-!Vm>M`#>iBJ`J3_c6~DY?(?RRxw7YaQ&F7A2jH*1N)TxjmSBu~v@o7403OgO_vHd; z;1beExy2a{hB|UldNt)lV{D_1?M)mw@8tepK;XzTuN~uFFD5viC0?s8*Wzg6Jh{%2 zc2Rx;4O5(^^xz2!k@G>ZT|-nMC)@`ONR7&@2va=-ROr;xAoK2cG-RHxfkX4fgQ!k7 zqk6EDhGDu;jh*fw_K9GJe7?tXx}zeUXH~KCojVg4g*!it7Ig@R08PP#W6;TDCPSUW zQMrxZ^tHg|Fr5H~OxiNGBgX07GGfd7Y7I;kI#To!ns43Qjq9^&od1h|CC>lYPsCf> z+i^{uWHz=)e(=#p;`{&PpNP-=zz@Xvryq~i2Oo`lzvWYL?t4BP<%b_;?qwp-e$*eh zE9%IgU)x)oU~P;4tTU(&Yw|2C6q@;T-arG zq}M6F4*_^?b=A51_#*_?t+=L%&5maF6r1ZfowJThTV_VeVpWT(k9dC(dnzBDgX7Uy zm@dy$Kekg4WxAsB;TBu<-`CA-YM&f!W})$YI`GW;-Qt<|lvV~^+DvVfzRLbAlNq2% zFm+Oi1N6UG`c{VJG2?K6(91C@kD2m^*4joR`m~|?7vA@Xa7SV`iOW;~nc}(>sB%yg z(1}z{x8`w)2$_zq0KsEqYtiC_+|F(J+Ufh zGTZ&$3co3IoPzgUs=#c77O6{sk$Z-_qv&kkbf*Q3L3{bE(XeUxjt9!%5SVU`MS-tW zD?}I#B3~}$K70H9*a93#Y|*_1gvHvH%B$cQITuF89snAcAwXiU+rzoyScy!J5daap zhfW}kv1VF+qaEdj+sle|BG&<}E;nKv;#fx*MSj3TTi1G&WD)7w=GWs=jfS#Gj-tT* zRXhbjqhxzvjfcQxAS&nmRZy;be-$Fftz#SF+;v>09_6iqT{Mj-3Y=8VCS$GCn4{@I zgumbKGZs_JT+Vnk!rEfDu(-yn3<*#KIsz62RD-E=7r9FxE06^+#XeOWBWzh~7a%3k zEb``=0Mg8qe8;L_=pii*kuT>kg%}#Ny`9ZeZIQimux`R;A#*(c2tIhO^Atcb+T7(T zu5*nOUYnxK!b+a6tU|7&uSP9Xyo`00M%1Vpjd8~{Y~IY8 z6(Sm={3$n>GDfdGMEe0q(fIZGl#v!KdO zW`hREIQFi*Y!^0*(i|7p^585C(sLU^f8!yjeVp3$yu-=M;!=b+|(m#lr^r z7|7w!w4jCv<{ot&tgTsV82V;?{?-}FWt@$N zuv8I_LeV5@3bQ}?)+syTE|HWDBS8X!TXxL^VOuWSMz+SYmlI8=kZ_HJF4ELG3Y~&B z8DWGm5>e;gT3N>tuk!hNIy6sL@{DGUtfy2RPThBlGa~ZK>O`yeM}hqSzw6wMHdQxfQNT1j?jG@W-_;1x2MxK4F9pK;LW`$biv9lvy=cTZ%Xwxctt``BCYH~@&W-8g03vi`#QFv97 zME%g-q9hB7+5djkXzV}L$+Z`TvMfEg*9&J#Ym^kNigR3QK5yL*3gHvv~# z=%?Z_%tYmDZTV*w1Bb-*(!(fmk9+TNk6mPEAE#!VIb!Vl0G{jHJ4C~y*d+pGCeRje zSe}piYAah3n~wBUHl7@-=Qa{ZAYD;uuEZjI&WK$kjv=$a$SWO_GFPBxX~FxpXJ$;f z=IcusBGrvnVC`sXsq6eBR@xdT9V z52G;SnSSS1H=LTLW6t%(lF=Ws?N}I=9l{t|_Lg^S*|S1@VSGn8up@vDB9~dX8P|FJ z+wkPEzy6ow{vZDS__e?PkK&!*eIYJA|9X7=i(if_H*WGI3_RbkC4cIQ{dfDTe6}5v zI&)lY#_FT@_xj6t7n%H1^7l znVn+k$8l%3!93fquPgrGsD$(ftMV$Pp=2zraU8D10g@{V6iOGep%4(9B#n3>vg)up;6=H>R+ zB1#q{n}u`O)>XOWYYl@-I~JkV_>B67;Yxe(F zHnB}^&8!gUm_<+q?19jzuL2?oF!2L|rebR&`pkUP>)g6JwIUyz2<-&0yqxl@jNUFJ z75pS7(q9t2=MFJwV+FmDdmfbwQ3|kSaL9Z_u*^taX_J*lDJ~c&itGxx)H>;-O30*3 zjq`^SgbAZE9_h@A_0b8@j3@PsiKYiclG#BA0+^N))tDMErQ_*5uGwQ%6!mocZo~3? zVH?=2H0NTiC7~**%+pD~APS7N#Us?os5TqVK#4)O0~2vjD0#tP7{|1-(%0uF5pn)9 zdYQGE{P1lmuI0J$2&Q!gxn40}BC6=-_8Qm9tsb3U zf_OaGJ^mKexD8Ob*KvuKUBi|6u33&pJpcekxjTsxLT}-Q_pyh+Yd={$f{=^AO-Jk8 z-3}%rt^7_zu7bc&d)Qlu=4Yf`8)M{3@o`BYe3 z0{0XAS(#d-^y`2L*Z%4#f(0?2Yi^N^3Sft(+`iSpupqe}rz_ZTty&PNfMyDkd(Rk5 zo|Xgy_H}x^XVtKccs5q6v2nT$A5czY(u~UP?soJT<0BvVVEow6|M~dyfBD(?3;*R` zk6--1|2Odq|Ji>X-}S>k756;)L`GHv_vk|_kHwXb_DvXe_MsJg!l+CEcndg!rD2z= z53`fqVim^$&{0;4ZJ+#P4%P_?2p~o}hRg&Yx27;%(sABK!-#Df`P}BhA7&RAdIGuO z7-vm5O+1{i7fbO~8oG;mlC>{DvkT5@hAK1+J)OM(?ga>Ikx%pku?q&jZlR z1T7&4CuTa31#Xeio1GDj6t&$t|2gKJfCPOxg2pp=aEbAr`^g`U_kHdc<2U}N|4$5F zdL>H8=5&7;{hhsZzB+4e`TzIjb8IIxU#`YvIE*2@YTt7q-!lU^B7krQj?tLWj#;yv ztqyi)3KfoWmyM3Ys`cV*aP&OW%o_H>`IzldMj$`}a*J05mlj;@J07^FzR&{HsRbC zp#A#kGkG7Ey6oBSP9vYMjmkAy-b3S4uX1@&aKeEFj52XnvLR)X(w~Z1-Z*KE+3F?(voS%7&;`N)oJ*?JpHhBtYcYd{sSg%w z8;tbLoD{n@1Qi0aLdSBM0y$w4#qZ9E`Cc0ASM=ptY^4k!mPscpeZf79C2?LvAl-M8rE|yFHBeNygo(;L2i>h8V^f`sO2i^^^28_$6_!L_6QXArX= zT$|&ok5RYnZnflCP2U&K9AQX%DkzGf;JOtgw{kE~E5`*JBR-oay!EZ_$1!k?5jv2T z@i8dI?TKBE)6|s3hEP8`x3VQFkIfH^n7X}Q^kK4KpjBT=I(QVDkX9w9Z+T+IZOdaYA`UO+)sLys)ei^^d>ko z(`$T|iRk=%U)@;%5Dg9l)kf63$NamP7?dyN_`hW60+*hBJ)t zF}h+(H?IdcH77f9218zFma{=8u3g{G{pixS$!FR%>nb$9+}LFd#%ALt^;uxYTZLhR zj&uB7$!sC|4M`?jtO9#+8&)M_duR` zQp$#6YjK>t0@UN8lH9)jg{`oSwb>_~+zend9`(}+Ya0xtcng4tLz+i_t*!E8QFKQ~ zl?{6YolI1xEVos$qWMh$4qzrf(J$myTb3a#2oXCUR$p^FTt@k-WnM*h{}WW zbDLc5yg&%_Ls`d>Dg0T=he6%~!HTJTo5~nXTd$yP6m-qa3ob^C*DiYG^VxQnND?&Zh%Snq?82GIl`ydsoKqn>(afZy=y}lamwrOh|d%LBB z$5~_q&i$tDqa0&URaQs{Q|dJ%LYxMZFNzy5jG#5o=XT9lrt8=bXI%zs)X#u1=rY)O z$W))NVy4pIB>XaEBNDVq-aVjh-_Y+XWm(vQA#)6KI1~4m4v8SLAA1^Qi011_jTyTdp}fy!nG00@P0K_{4OA);-3b-r@Qs@c8$ZUQP9 zfXe54Zpkx}^+1*toV)veE%ea&S=5+OfY(Dew*P)uheJN-ONXKE1h>MnSqNuktC2Y!)=w$PorYt~y?oRTFMD zqcC1XThOoCDr8)sGDQAuy=oVSDf1m;aA)!p{tW>E?pkqcOrPI1?}7nqU9tx`#$^tA zx)d`Q>4=H^2hf4*Jtc4mE&7aY#8{_XD*!O49X2fS>uA)Ad$!+<$F5wACwF$@aUA@U(BO`8D^4v# z_X;3U{W3F$)5{b4q%E@L7BK^YEILF63SL-DrQT{URB>o1=mv0GC~S&+hRx|itKO~U zEsZ&9nupe`wrW{b?Jf@G()CfJR?iSO`{3u@>ukgZ<*)-quZt{{;-AmUA}5TkTYHVL z>`&iwnGvvMByGKkp<2m(Se7Yt_Dmu@;}YwNGc1wgS2po$iV`xP9+{Yl`(@ z>;u>4vK(V`4B5RBfmFUNP!iAztp2#j*63L~qdqwo94&q!qfg{bAfLzBDwpYuv1QOf zi)|)X;Ov28)<=0PZYo{g_c)h~&HlNJC-||?fV$<$7a8h`+N&VB8zT@L(Dv9x|NH1;7&n!bA^-+}sEX zNzx%g6(1_mjm#8Qk!)E6jYC?+(6?(isD>$G*Ji3X5{xxObKPx%vX~xLVBB71>RHBf z!%TtHn=4UmSlA$<-RgE>d<-TRy`cG$4l;`phYFNN(6dyZ{scJZsCWik8WoY z`#Xb74KW0S$u)EttHDnk#W+!JjVA`y{`og|`?0gvXZRLbf!YulhJr2AfVuCthLf)6 z)+*CI$1~^16i{|^!Y6$1?2U7M&hp4?wt$nCTvZ%WaU7>w)=MDx%tIq0iai{wsQiY5DA1-(-87WE@ph||M)G%mua}X! zdR-J50f6AW$|D*!t(d~fn6mN|MNnD%X}2>A!zMh}6rS}CtjF4FJyUfy73ve2 z2~fO|&hP#QpNiUYJ9+hg{Te{?H#axqxtCv#w=TVtGe^b2pv3@# zwetR_Wf`WNujG@GI0GJ*FD6enE`jwry_v%)B`_W$fcr)U= zk-=x`i*XK{QWaEch1GTDsS{Clrr4uA{0Z6N4GcPZ={%v2J9*shJ0Vb+B8%>+w{7jE zE4y*=GP({((Ggf1b9$o*pa+0vSvcX0=om>(D2=jTJTSmZ)T|1%`0Gnwx(g77tTb^t8Z)X;>sFM4bnemK! zmj`kkoM+niz{9VsBQmS&y1Mp7Tb9crJh1R9x1P)EW)QToW&)~}m9%MQ6s%wHnj+1A zOP}D8!LSjt?Q*sE`YXS(SrLw2{xS4?mVhX?d@_D1;ZmH&xTtMY_Mtc9HefQK(O(IQ zlmqE*aAW}K7|krC3>M5v&}houAan`-&Wl*x-#+Er%31eeXWKvKE76Qz{6IRSTZC|+ znGS&|p47BzYOT4PjUB2)@TM+ArMU(-;Qk5`3a>M^?chR2K^BV@RrXeyo32vG@{}(L zjgZVQqImp0>UCjqktQ{IaKc=g0;+(S@|5_dc`cAi%gdOZxY1re3vxu_rqjpK*~Qtz z2>6_lrVi&2L)ZoFcCGw2RSv>@MB+yH5*i$q*XK4uXroii zidY;F=06o^7i7Q3_Yut2L0Rl!3eUnbnX=kwf?|sT3MiP0J!eOxT$?J;?#@oXi2fmv z5upgq`PM79iUvJ(EmKmGQsF2feT6`UHbrldhZz``HtIO9wT6+qwFO2MC>+<@PhO{P z)Fj_ES~|DFk(nxWovaco7O9o1`JBBydu*E>f&9>31+@EB?ioheU1usP$1ya_U%+E^ z=m2`|4lt@&&%vA=KVx-0b%k+ltXH!_xQK>uhJ?Pk)(nl@#yM}~l~i}2fBa>GBX=0Q z`qFFh`G5Ya@oWF|U&hYnR-9?Ch<)PR1NX++cRw9RcitP*Gj~Px-S3Ik2j3T^2OmUs zOEGx$o5&OZ5_!$kyip~bF81;!d}E>-GJfYh!IpYjTdBt)PHnke#t9$B7+LQF9uJuR zcw#ogEpR8{A*MrQ1b-L26hnS?k>fE&W5)bPytlPCjeX?TGuL|>JLo@#{yK9b9aH2+ z@VYxw_tw+cP$dhr)Kk;W7!lK~h+2YyDZ(LmaE*6_Bfy^WbrLuF2hslcC*zTie=J`9 zd;cKX(A)ZrZe|AnUH}(UACErr2$44Yfadc@2hqK8C3bP@yBFV%(T%M*HhP{6i7Z#K zkIS+7m2buYI;U|uL2u@9B3SwC4^cfr*0=YgdF`!uxc5%HXMZc6+3LlkoujxD4Ameq zDrsk#_XI%HN9PUzI&ML2RI1Uc5xJgd0E@aD9|Gw_+sLbJdj*(9ZHjdq%h;1@$pFI! zO}08M?3uEyLpF-F5H0(g!;g8KI~qyE@9q$IN-$tH%Ox0>vaHv!0L;cFrmU^!QFpDH zhkVGZjuHotyzBf#T>jLlNOKpllqH6SuIngt>gcOJGc}$u2qO6DRB)ET?m!_-aJ zgLQVU*;?_g6IC}7#4`sApl7C@C!?%EaEFaHGj|a!0ld7Pu?Z}6E>_QbPPyare%1`A zyN=6Z<6L4u-z?q|m14 z;F&$a-8OatZWo!ErrqD$1$m)F7`QwDA7?O!;gykD6_~<7l;k>nPO?B$3Wg3|v^N0AT`biVV}rW z$7Nptt2RyF$UI+jU$@1HI!=Q&i7qlVouea>Cfl~fC4z%ZI0=4toP4%MWNRIRlU0cP zfcMQtFA&3dI9I`$e3s^3>){>i7^-^1f(@R-GcDrlc7_=txkl&bGjn6QkAKQYW*Ug7 ztz@^}z*PA4tJ^4Aky6h(2&j|u<373f8n9=2OM~JVq=ka4vRNdUqRF_kkqyuDzH7+S z*;6ag21H=8m>a?kfXKs)=8++OUb}WPR_?qb9(eL;o=8N+eQtmCxVJ?s$Gu@J6PX_G z_2bR2zk&g&#@!FxjjTg^i0$&u=H;!h*Y(D|7ovXmg=jtbNIJ9mRy{_qz8c4Gzkx!d z1AvwzjDurT9S;F7%B;UnVYpqb8bpWl&}hiqO<@m^uRacSbTAiPjAW0Af~8Tz`RI+1 zW#}-1B}4hc&jaqQvj)4Qlb^uSnR4%Y@YgD4&-Yg0AvU~*n;2E)EgKTS51o4&0Qi^N z5TU(*AS>r35zlnghx}ndpP7@bW;?$3Z~byy`NOY7_39@4SYQqS641o;!B)gwEAgE_ z{b%Cg2OkMsMjT_f1~+cR&f9Orbayu<+k3J5_B*k4=~AqmITh#bdo+4)ZO5hG_;R!! zyb$+1c3_VuXXd@63?f&Qfi(GiB|SVK*Ozk~s6U^~{BaEm$AdA=S0Y)`C{ zaYsQJo8d98>YdKb2-7+W^~T*II)dDF!1eOQvk=xjA3vP!4meXZ_xi)tQ%!)wNa?hvvjCz}8vd)l%DpxoD`B6UhT zIv&8^tu8jhqp__!unQoziLuy*JLjTByCoZ0@S1vSs$IF_zRV@Jm138SwhOt!-bn-O zgW{;1F`c}326^eI)O+orOMcFs3#A30)wWpQBrx%Fj;mqkIzBV#nhmrz1DgYoa*MNB z6X}@e>q8ZbdCjtztq^cEnoY)L&6fehEC$#~ZL*-r^8##2V+Pl)r&i?a_~i%0gg3XB zBQ1o~#hJPnL=~A?^~(x*eMuRcoAid)`Z?_aZaIO4a4fe3GH1cz?bB7AlLVH=?gYzh z2IFwYkj1qV6yCA;Q6qZwxY(@(33IlEgLkmmhX7d;Xeg^mL*5ct>0jKTWs$c9d-F#y z#kETJ-O6x{u*gk%?KnJ(T%+uDv!kW(xW-+ayA4+q zX30(E6js8zDl)2|N7Ut>JiMm@lFjOuv zh!`zOkzu)3&HXeAeT(uyA&%FSugfR$CVN18PBmmxshzju^4X>o8?G312)mo;o^2R=iIhl1pEO5B4xi4qcyl-Ep^&;2deU2&PJ`twoBpoDzZS*4c*U|gFY1dWc=@gOs>P8)Tlb9l5*AJl) zk}N{C_Q3NDUii{i;_{8Fh0}4kx4se-ZSTOTrD(0L#R?AW)M6uE`M19rfB37Pk5gw) z$HHnm%IDYOfe$L&Oxm*~DhTeaG-DtM3_; z75uhH=CRTD^SpP&{pI&XjOe?5>L+8iwHu@7-@sY6v%0@8C>eHR!u{U+neU3npL`~> z1xLu>HGt1MU;TPauIkl<9Q*{4BoIAMJ!`ahx1u7Z8|%(+iok(~X!N%jPoLZ(h%CYsimv zP1gPFcl)_SVRIHixEVd8&+ISHKW!>xOP%o-f487lJ|o)pqB0TIQJbT-?AQ&>%v)fE057BB0P-_PU*1UNJzBS2}2m z0hgJ-TaE>v9+uSW?Q>n9Ioeg!wRHn6cn5j1oiO9~+ZhVxd~_B~q6cYfvF$!R3x}b( zL7?Mhay(bQNdx(Al<&bZj>D!p_&TPa1#$2;h{FByJs(m4H|bQYR$ zlR@?gP}r^1wGCos9vF{8tbj1H*d9EEyg3LN##Hc1S%SmDpB>?+G_GsfO7erYq7UI%t9rjkW9lN zSn~h&x_zpbAc?QRaF9ZoMpOmD7@5w`gK8Le1-aU2R^t4bW^9~lap6)NFy}$f-V7=e z9OX00SQgo>0{4;WgzDLm4)Vr-}O|MF8g5l)ZftdoXaSy!LZzA9A z_k1Wm^`HEyxcIMsE6&y$S;+LFK+x=HyDXG-J=zSQ##~2bQJqm&kt&6l{HKiC$;Vt z*w_3p_snc5pI7cn=tk}^fHo8O*j%Y%D{LdpXZSLLbg98>Gu4kgX#Df=o-MPWW(9GQ zJXc+I+0Z&8gCBJzcXxO%=;T)hdW_rkXbXjzB{p_BmatDg(WN{;r-RNt{Jdy3EDyrL z&X~$JdiKkn?Zt9op~FVB`WPF5F&*ft?GSXBQNY$^Fe4B2A6&pZwDoz;2lvZ$Y3MEA z9k8`BW$fL_u_^C4oc@^k4H+MD4_~#J0s|d*))v46`Rm=Jg9md-_nMgv#-x5Zm!fd; zmVVan!NQ>Hv zz0Y`1o%OM)+1o;IYS#Ba!#IdWZ7$X~T10Xy3|fcgKu|K6Xww{ql#?(GW|4xaXCv&y zAt;b39hs*9=GPpA0Bx#B8MSgh86ZKjO5RC8_!0&oJCKzc>EQ6iI2{|vXcSCG7U|OB zzJdj(g%O_M+!02Bh(x!DaL_x9t=&Do_jk%7q4Oytd|n3v8D;j%b{`B)I*+?MA6hCq40XZ*c@ zpj!ZfQ$B=|j7!vQk&N5;CZInoS9$KK8;mj`OO zciDCK*{8hGP|dKBjY*}b(0Sy-;_rkn^CIW(7aCI zoLi20Huj$Yq;RzEbnu@BJnLtWu|_(v5@QGGpeLuGsmH3i6kwJy+a@C|GxXKo?JAzB z;5`q&#}l1h;v}x6LhyY^vh$B%>>x0)?#1-K!X*5uxr=So}dZ25~x)H zOyv?zn0rbG{g7P{J&Di2#n0J5NqMrkS{ZUlNA=Uurc>rwStv)ak>~JZQ*$R8*EH%j zZA0ear+hs;UWgu_cZmTq!uNtE%4D&XTmRy{vDNTS-PKp?=L}#IV8I*fm1@x5BVx^P z7TIw40?MCvk_xngE*gNqSP1H%j9y*4^))L*mzGW-CIf9OB^qRm87XLBJg~!oxn;!4 zOFN!boAs!Ipxyp(dvK=8v+yDrpc9s~%ubnv_->zi8iEJfq}ijlPKzWS(hs3z%fO#W#&gjk&>OoVLp$RyS%m zDpgM?a;byJfO$zzjce|kB~meJ&2#rBL<_E6IX6b)JleL$#i0r=G#COVqX~2%Xb=u%vY5Y@Wdp{U{&@oUsH3}%Tc2r+a#nC3qB6(UE$ z$Le~U>m1iCc6a;G!9pcX5OM|$Ct1j+yHhlvIlql|Zp6R(fB(<%_BURPeI_ubCVvJFe zrT!lCD~b(Kn~a9D6B^Iaxih46_yPn$pljxR=GA51`{>bC=ywf)+d}U5cz>JE-{hKW zOy~xL-$B2+yw*qFyWFG4eO-X@2}7bDS2o1H)cLu$NW<$R9m*7HYDXaH?(GGirv z&m|WeM-M}xV|KlJWOfDJu+YvZ@Etz$~b z@bX)+INZVk<2ICa#sit+2$`E(1fbWO@D)dazLeHal;x%eJYmJ@kIiK#c~|% zFvDv%qV(Du(f-EOcx2}!9zLu^ZRWc00#WG!GTKAmJj@8)V4iOGa>*$pPznJOln4zn60#X8JVJQZk5WyQ)p@cqSH*7hOVyPD|1E7in5@@;?$X> zhTq~*^+6kFbfJ48VF((y>QGR=cC=mX%XS(Mts+JFr2PK zxn_m5@`ys{R@PRDP-_VOD4lGcFqa6+w1S9JA{cn+pqRih!o=is#+hFCJyRN@p#%d- z^Q66}!Flg;xxouEK}I@Jl?$dxSC}BuWV#6kiW9}=Gv&R#-A~7mxD2&T_D4Y}KX;kg$WmyRett7A&;kl&-g1bjp=iUNk zTe*MZ=1zhi#YwKvt0%cRXmp_II|UyR#K%?!7CjE9*$1(Fmp* znw&bb7B??ljU_HEBkMJQ+ychD26GR#m=83*@1q}#*T4AtvH#kuao zc3&q_F9&SNZE436k1>xh9u}yHzHKbCi1H7)sEY#a!Sey6+&LV_7OrsupZ8}YqITpT zvUt7GwWT@yNauMt2l(Z0{fm*0MoM1!LrNx~;CcRTW0mwnICj64^5U}?T=dI?s_XD> zzsvP^UK_!K5wtVEW=emb`ELRifAZ%(7lW_87F7(NMp}n-j6U7yY{!Kk`Eu2wXtyf--g{zmN zyni*~@|&@U9B5G0y?LUl(&^JN4`9s@Rz1PE<MpPrC4341AdO<^5$lA`X1Y9+PAPD7F}7(lZ|DNS&ghUCn-z^M+(9mb$hFP0(;V5{RUw)3Kk*b8}R zb+x`k9o9K1;0JuC!-4+{k+IuOnz1y>=(ZUmcG_zhU}cT6eyVf|VzCv%ZDn*njD0Uy5ucjv$4H>yc3K>{IWB=o)v@rPAXS#MTuu?W$^3d+Ay$A(W z@thnNXR1U(DmNM^g~TE+I}0ha3b9VkwTu2>5Sv@w7!n#8=_}+$?#*W5z<4zs|L)EJ zWN$3x=|wi|@xTJ^k1)6$F3~$QF+H5A6W;5AiU<9E3Wx`(xE`YNxii779j=YE0TH#j zTWX}Kg=x?=-=62^w>;-g{QxOy$xcb<#N${KMPbXTD< zKG$#Fi0Nn$%jt|zdgd_P+>6x~pSyW8jw_4tAkq8F|L4Do?jJlC&Bq^^}3zjcx2n+(^|z8e6{1IF|h|L!lxkNg*ZDK0$ocx-n% zarw&CEX1?9k5QY}B8#t}neBPykux?&k;ftPELok1o_GA)-}o^6^F6iB&myfDIUT)g z^Ol0T);{ANMd2TJRG0XiYyFo_X7S1{WWn}7J;ho*>7~HWQ-}1Si zP5IQ^O#tdI|K9J%o4@w&qq#SVCj71-KLlvup?mztI2aqo=%~IF8-S#>JMW6^SHBU} zE8mRD-gN}Ji?b|6YvWXGz5Z5IJyU*TozF2Z_1z30_CY$>>ejmoke901A>?#njcX@F z*Em}p{vuI6LKcUkew2s9*g*bD&|p^wQ!6^HL57q$Yg#nAMqzG|vSqGISoyy6gC=Ic z@+^6KJMesgq{g^rCK3nWHq`xn`&gjk+|x+S)82~p;9FnNBYCCw=1T{k>2YnW;DXl; zNZdLnC$%ZsucIS_9KZ&@XAg2@)MW*?^a*t$7M7{M*-?#o>bP}&M&mjY zml66sYhkPb;PZS|UGh{kEo}{*(=lhg40d0A)!7yiW&zO01Q{;*>45bOT+XX@%k)%B zQ|bgjylCLmz^Hs|qrqRUdCg8$R<9wAKg9@Fifu4ZE zekltJ23Fd2w=beox3UAZrr8we#CXk6TJIp8T{bq=e!Hurpe~nH8IQiix+BM#nMLFP zqF~2LTvq@&70k4~z^wpeo;1pJJX4!!R#sWh2%d48vChCav)A|pdBrcejG;m0b2@nM z>2pp0`~GM=$nzxZWa&7SHT_RvgUz0+b$*$BNDWE8I0ggYte9tfw(xH9?Hc_p4a{o= z3;FM(?|I@`xQe0BSdNJv9SFjBD>@L3G;51QSj%}Fp=e0?%?AGbqL8>wWid+89uUNh zaOOG;#!%z#sdGB;c{@JooMi|@5%Ji^HxUw#vtrSS8ja?<5)NIUAdxhpI$8zF_xUgH z=^BygkW49I(LDGjqX6#ex~@nK*~GaMv0%{qAlhu!0Dz?D?G=eER7Db zP!FN7sg0?xJ$rn23xP3u7eEdX_Q9wCK8~S5l;b+f5TUTR!^%yKlFx81uDc6pIv7!p z>1}0u*^@Z6LDYkSx+ZJta16NVbO#8L>tJSPq>tQSwmi#-*n3uL%TXnYvAbPyAV@EU zJ=fP=L$u+K85k6LQXHe0v{4qIyZ3S%n@&gUYmu1gVR@#Zunnv8G74zc%DL^%v1sh< zA~+bh4Fvgur;g1=5gIR}Fe_NCC(z-$gy6DH9t`378MM5)c|C66MAz@SkX^ozCXDey z)YewAJ*(}IyVtjQZY@qQ7V@IRv$|KWMESzmxa&h7h`0Xj=VSQX7ot3cmKcZq(R|oV z)y$2~@(3AL8C|M55tN`K7SG+li4dp8E(UHF8ToU6=WoW3{LFtEXD{3t&%F18@u?sB zzBqsH9r4DiZ^U(+sLoNcrCI_Yq@1dZ)Te$xq9XOrH9pTmR4PXE2m*Xq{STI_xE4OJ;>cimxXCQf+12GD<5z=a;^DCc;N#fx*+*`PZX05VbT zQDu(k)xGy82$;im&0|kik(rGO&c=)sM*9RHodz8mXcXQQDNkzF)D@8g%wCxadyV(m_*s}o3jjrI_} zGJXZX-(6exvuVJYyOCm*KtOrdiOElw7*^H{j^HCYWcJf_d4YirMetx&Z9FOtEOKAJ z8AgMVtTW)-@fhK{ZtIphmnEzRaIV@lGmqMSn>l&rK?R-C5j$@8f;%pssqHp87eFjp zYefLz+ON&1G6M@4?1pEr1??g)F;@Z=Y+)S$=#p!>WUUZ(9mnb}A%VYkPktIvX91b^ zAE_SMI@k3u-fIKEJnhXIAMIC>uC|-1jf;x$+A7wlUfW=el|IcK5o(g-u@RF^h-6}> z=&f7gdDWKXDJ>{+NM{~?k2T$;d3o0*}-Bs2Y zJM_t5**PHZ3m<;h{m*8kq#=db?y=QDIXNTk_3To&1d>NUQ8XFq;O(uRGmv>q$~AWn zbKzYWKVl9l5bra&sb!x{% z_y`7f!4VLzz(|#~*Wc%H02IuQm5wTzLTz99>o;~#&I!-qT?F0(Ph79g#u1F$eehSV z+`zdEl3_M;xLxt6kyV*=Na6C76-Db4;g?u}iaPhdClto?#Vio@Gx2 zt-~n9i88BwQUVYZ^n6i}l`?ypnVNE)@<|5^j#w@xv#*a$^O`8L*@2_^(>rc6EP zD6>7x%QZfoyKC>fjWU^DTj07;79EOm1vd@AlYq&LOASLa==b97H!sGS`yPlo(VjBp z@x4n#1eY&giQ!)FkD`Y%_g^5&Sb&fFH@D*ar#>Fd2kwjOU;1J!5OE*Pm7_CSh@Nyq zZjJzj`0=cKJz+dkWW^>lH&M)s%y|!5?;scN{q(oSU-=*Y4*byt7GD!&zPYv<-~HL| zW$Z`s{MWx86XaV*kI<=2d};nu((?(bI$MG|r`>kuO z$j@qhU(ajMO>Vf!9e?mo|LM4Mt_d3H5V1C*zthP~-1VKED1GSZ_{g9B@tE*z9h-d@ zUigh)kKPylAlltgw9x&EMh63LUZ@7`t#>d|D|zNNj;L|rOk8;U{^(rWjGh1R`8eAm zve9t^`RqTjw7~DpHlN1U0A4N9%EBsV%S4o?lQM=GhhR+)g2e-@(nnUDx6zaE5l0}f z%~r)ce>NFL0~&0=TO-AJYf#X29hV&_tys^cyCkeBT84#9Q+M0Y!6II(#??c4Vt`Ta!2xvM% zZjgQLA!eC*XJego_SimbPuq@;H8UH^6h{75ac}Aj6XCA>UC9Gwc6Pgko(nX1LtWBd zxnz-zyWpSey&2^?KE|m-a2h(Z+v$0?*_r}(ym>$LMi8MbQa2MY0McFN8{<@chS^uI z9cMJ3Ld0;3bz-xO)-TlmD932h0uS*Or z0koh|kX3egs!2&U!o}yc;HL=qz?78!p^Xe^2roljAaUGM^roczs#OU9wD6o;7!fDB;FjNn_9ASRl!#GDO?7vA^O z{m)ue>CNRtk@B#xV=2zA)#CJ80|F?&I$Onxhk276_$@?Ou~?#xz%sefK#Nq|4`gff z>qxDVPXa*%CoI}680esRoeyPdi!UdWXN_}@JZ%mFWCQ&~MW(w15{C#F$_0~Wpw02H z3sZYcw#NGkoPEp3L|((#+8V{y_Fg))+XuE7@fuN^VmF<2peIHKBK|x47|U^Ga3k`xR{^BHser-hHKDI6lo2E%urb?*=lk*_Il&kyeTqe zz9_p)^(=rPG*sv_J%;dEh3{NFc`N+|jCYHRYc4u>_xbu+_wiSlqgxkGt=FR2swdBq zKA_{>eYcOGxQxw+Hc=Vc|n_QNcmPOn@A zuy_qJxpZ(GZDeGH2w44fo1$I61jVL6Q{J&%rph&{I!-fcM#>YMgLcd1KwXgNQefcn zg)*m+6(lIH8dJT8+kzcO?mFS0(YlcrA`%P8u4~AGjI6HbxlU0)<2*FV>aujxCTc&j zzD3)~>n_4n8r3*+_Dnih?IX8Oxfdwf1V~OhMr(lf2^h?{IR@{e5zry4JVh~E5%|Ci zo(|YT|D=&)RaT75onIEH3RL)~E(>}f1)l|o6M+^>t^`xqGi|rayY3)xQtED2mOk%-sxjHhM#rZl6MGQyiB#1PKOj z+IllKxgO3Oe0I^Jq{tL0TaHQlug#Mfy53rIHT{lsFw?QmTrIk`z)tX}47$%g>(#WA z>9p~Y@JoCv2vhFlnfFjO^e-g>MjOL5_UXb3Q~;EAX+`$S6&Q;r~&jIJSSk#0#HPeDgy zQdksnsFDoGe#gF}!-KHKD@!oVFP)KwKC=Lf!L=iu%&}`vhjyR?;lLPE7&>8(P~F?_ z#O|&?M+ixgs`4Njm8}W@5aAeIUcPcYi^TH0cD_Y)iT)vy83zw=Nhb4bqdu!2Wssi{ z47oMOx&i6px>*{V3eoABTk1rS(3ZJpnukcfT5}C8VteNx)6stJ_!y+hJtn5UK#h8+^LXlj?sMc`C5K-e} z7eB?ksZqwbC{qhGqqN$J&CSgiZ*E6* zO5}#oF>A2EK)RRhL67Wz^dm8U_IzCWo!^N?qC%HA4&XyJkHMK4J?~+ZI?(PCpS#K! z#t?Ul$v^Y4kHmlR-~2VkTg?0Rm*cjaCgiYeBIqHerA(9 z{Max2T--ND#Byy5U8oS5_K=fVT*ENl^%LJ4@BQ>AqR7`o42PX~?Tf!3SO4{IM7z_) zQ3JMku1-ODS;lErd4>npETmr9yIX^Zep?_0_^e_V<_7yQZ+49_9Rbj~dwWqmjl%-y zrk`O>o@JlHpa7W&b7IKFvC$a%IuBpx0QYm~_`F69Szp58ms)M)%;mlra?(b|i0+Wl zIdq~5F;+0tKEg&pxUFs6V&pw-7koL< zMj?wvbjdHq9j%~ z(Tf&yY_GJKTM;bfAv?TxyZFjJ_4>S>-BIlB?5Dg~Kcq7-!qh1{?sNpat`l(`n(3zl zH7i+U$BM>3>Z;McH1UHvb9ti-yJH{^(SlWW_{(!Av|GF;QWNahLb(70+J8rbgJRt3 zgnQBr6pDZgfCk_8d;J87r`KAsN?@5;V%}5g zm-#}gZok7gj-uIE$$Q*hAJ_h7Rs~DovTiQ3Bu&f?qesZ6;-_=B$(eq}?2e8)+rDxy zqi4ZIb`10%?`QKGn98jf5BHa-JYRnLntn@==F+hMMj*V`!-nE_^^a~z*QQFt406>4 z2IlgLfn^3q+(xjaSY>q1FBSOjc`#;rmJ=8%Z_?XdB$EV;3r{|B=d%i#ZE1CYrjt*t z)MBN@hf38Pn8rfKJ9p$(I2mV}88VpyKzT%i2M3^V1;7?x>DZkRjMZ?8J~GY4;G8f6 zNvDReE8Sj;^zc5DM^Po9>v=l!%qS$9HC^P1KCbPIP%sJ1=wOOR+%mmn%JMRi`@tZ( zIK(}id%uT}K_KVmPuEd0&4EIwaMx~ZXCa%NvFa$MCndQ&k!b9=NPipc`sH{mklE|n zS?-W)1VxM+K~S>ft0}lD&T(Va0tk_ppY%Gb+yM^UpP9w(HYmFzE%Eeo7$gNH(?P~( z0t?50axzc*)fa+jiu(XXATp^DArS#9t~P;DiK?#k1I|1VO^Qed3rRZtquAf`jB}kh zAO-_0{Y@?EkaJB3LGnm7_@jF#->-)b{{JNRiNF%n<@@(ZBZrMT;{$8t_`vQDJt$!=fzm;WmI zZ(NE~XNWYBd6jhK%vyA=UXAYNR^0Vn-xWQ8?3FM477^r8Mzmv`j!_9VF7}!DHDvj1 z=s0HVu5k~z|6h3aug1Hdd5>%?CeGLGUq$z)?mQox*Ei#9zw-xC0<3-d+ddJGKk-Do z@X9N(GwP>~K*IctG`mgT+us(^xvyQ2VML#%2F9qTM)Nv*3#|kQM*iMI6FWRUjEy_* zjz9PF|7i@KdpVYLbWCK_>%?|7NuF(uGgG@3X4X?s;@V0I6sKz1;O0OyS6nA6Pk z3iHR6=~kkKQ(T2lOUS|2q#u18k#!}?jLx_Mz*FDlkn5|aoUM6LSB&;110p*D2xVQO zWcn17m1lGCivQEG8_2jcXaJE*blkF>HpPh3lq|N;7j>=)46uW#9E=iMl1r0(d5O-$ z=-3?q736vS^cn$3VZ(ggG`LRJej8qkWg=oWBiPIHHguw<@Da__SLv#)&N?QZAs})b zb9Si9pa^FQZ@S%X7RqTOttyv3S?j~KW&`}JAVBBpXlsCMb#yoch%p``c>6GDr!uuJ zZM+soCJ514Hvsn9pbS{Jp$ABrvbGSejZI^iNf*I@fJR5>vELRN3mWYI;C%enE;Z^c z=%l~U2BC|iL0Tp4R<>A2bG4I>#SBc!8t>o!k|x?+w}tyGeTaqUnenlnzZf-e4BJ5ulavXfs`wD3(wPUKR$^ z%Km~*+Hf9`^K+j1SGN}1-nI7V9iJa}Vv$CMXJe zB3hNkc}aYp$Hz?3NmKU@YU~o}^KZ(1L~m9e+ZWtUK#oyi7WFF3gPu|Q6lY+>>Uc~; zq0BH+rwlA+9>+MaT|R#pH9l*VtQC&g7n}PDS{8Ab+4KN$nO`&&b`U$AA@dj`c+iG% zir6yTNS>$^7VT9L)Orevkv%pDgv@=Gm=Js_Ub zg=CqwLx2h-jse0Mx0nQUMI7K*zZo`CXU+sDBPlSc_ZyD zXvub!2#o3YX1huB+F;I&*xc>|kh+ZDb*3^-(Nven&}~9Iz!9o35W%Q10Sd-d{*4*q z(5MR@Dsbtb;FrQ})#rN4<&k#I!+B;ZN+m{Fb@-;Rq^-qH>wpz2n-7l}D~88Rg-aIg zMp1l}*(YQwi#$<=YJ)id^hC=QqR6|D=im5W{z1I(JHHu7X?RAUX8O20jP4t6=eqjB zz4yi39e2ePf*y?yVi{w#aOqlfUVbT_`k6l))lYpkc6qLRcAKADW87c9i6XttbNbN1 zOo$Qq#>Pgx_ru?MYtH=d*4OP{T|#gK=N^7ICIG%~Ub-AFe(5Xm*x9?|e}4Aac+Vpb z#WtUw@C*s2eE2W#$G4-9`oh2i-t(ExsmHsfp@+zbz3r_;Ib?jgCCk4YA8}cH-w*$A z%)N6NU@?p%BDKM_n{fgdy9q5H{GrdpskPO-r?JN&*Ib4MSHAK>EMU|Z;b9pgvy2T` zM3>9(bJ^uH2GqbH)?nJw@tF69QAd`SuDlb+-+UF6a3khML{$gFSip`LaW9=YA5$FX z(y8;JzBs|DA0J_sm&(~?59P^u=NfyTk?(nOBeRx%f?(|iJka4E0g5aDYh5@W^D7N( zz!30QM-QN(bv_3G^-08ofXAoiaY#dh2)F?6-6pHCRL1AANjm3KXHFOOCpZ_EFVrXX z-L-3j6&*_+Ma=yDQpPh&i45y3&5IdK+OONd%MWRba_=|J)d0X9G3hvE zJ+$wATK7~y$>;GjRu8Y1WIx~*f)XcKyw#VG;YypOMCy%pdg zgD;+4^wMzY%QbX^6zhJ9qOsdWk7hOtAel2vo1@*$%6I(&w$pd4v+;yX5vO(`mobn+ zYq&gGJeOLz&Ixu1#Cq6lqffUF+i%}uxy;0Jf7jL3mpSODeU&EawzRc*jqBvuhXZ-D zQI5;BdRQUPFzc8I27Q!v#Q-%6A{~?S#*gJ%zRNAvRuB<0AD^4;(hmg&g{*mv^mZ<) zreM!GtCP~u+K0R^&rv}=-{Ay5X~s6|ua>P7viR>PI)fp)q|aDL=U!xfbnqH}qj!K1 zzSnPm_d9oOaE+WV3qSx{wop#cp}ow1$PT_hpjI@PBLIKbllMJag77QNQk+?<#roPx z_J#IAlF10~NJj*-4re0-r$7w3HG@gJeMTdjYdb_)$BfbK5k;Fq2FKqS$uoG-tIkF7 z%e4>J}Z+=lVq187~(ZD#FV#A%~PaO;ZRah zV+F=};DBR7R*6i9;aS7(zpUq0G51v$pd;7O5bWGLno252Aqwt}QrS)AE3l$N(OWVc z=V`&-ne|H2(sWTWj9FTR>w1k0Iy{_@tsRe~bqfpgVE&$%lxZG`<(Eu``qD;)HXV~a zp`oc6fVH*}?Q#}Bi9Fqs*Q_i@d$j=%T+cSv)PWzz&CPDO*VlRi8$%?Y=1MVg#{@zs zUEGSZw!Q)=X=8krVsmppu3x{w+=#_RY55z0K|6Ckz?&MnWn0Z};AFp62cQkDL z?V8&FOY+E5PsWe@xj!3wfY?0a0i`qPt6@~$C8RqXR@XOTjB;Ll`(m`=$IkO_5FsDL zXFl>lboMyDa`9~?AeOmaO)BWEPHGxcI`W=sM1CVH`ICFP1`kEOB4x?Xj4^Iap84m0 z^2eg{J70*U5rzvRJO#|$fUdI-JQd&a(?5=k6zOaw@f%a_8(rj{b_=e+gZCEE&G!oIdGdg6>?4ub1ZH5oC zfKJ^^`XLi$-i+$}+fyf%;R1{ndQnigaPp4V!cm<}ZdHR81~od1#qvU|dS(Lpk||p5 zne*eB+Nr{s9z=J)8#izEqic60zH&X5e*$vpT4WC`rh+bj77NUbgj)h)Xrt~K5V^x8 zmnyZrI0*I96Eg(?KHud*i)(Ee{nFX- z@YEjs%OQ?biklg*Rk6TJn=e??hw0l4$h@bvBo7_}An3=mU+S&@jt&WM;C%*o5}Lct z0Whq{7Yr&yf;8oK6pAd7P*Lkb)u&2nVhK0NYVX}enums(;7}Dxo>0GDXNkr{Ru~~sG7ix+ziA`| z(hgR3pYxjAsUM#q^CYswHmi|{ocBCwPJmI;7@?F_h{8uMW$gUjAaXx{x*4l0Dky?} z3($yiO_xbG_kQ;}01&=&9S|C-P+8caCfpKCg}W%QSOBG3MT%1L>;)Y33X!Ylb=zlK zV4$EiVEDT0{UG$BcxEmvGTGVbA&|Dqb(;>DUK-2jejo06fs{ofr-u153qyEhsaW9ekQl%O@T&rw5uPU;+%=js~FQ@>>^UdihFJx!>a9aeVygcg0=nXXAxS7h?w=7f!w~ zFv?p>T^hTLgYw%w^GD3V_w!tcyb4WZT&{__D*J1-{ z&%o;L0CI`a`<83y5HueV348W{!YQST2wUf#raI-?pvRw%!$?+JAzjQ?dX-B-^5_63 zDOPDt?|Kj!7;(T?R$-?t;FxytO_FedTxc|S{om!XUdfT z#6mic;#D_ofMp+wZl{MFF)bd7>S6;7UU&h!!Egwy2y(Q=%8Q_iDBZaa2K~(RDKkxI z?rrUoQFZzt=#vglo6kJVTFfDf z8f^i9+uZ~>*;AgN#q6)AK`KAmE>rUdW+91K1t;1-9-91C795Ab&xr^I4sr0Fc$&rN zbBE9yn`&!X?&o}b=7{SoIyP`{oCb*A%iCm)gYwJ|L8-0N{mQ-iY8TJ;ZH3QFS^{X~ zTv^OuQ~<@dJP@d4K{7X9_}F{zdv>M11hHHz0wwV;(}KL2>+PnEliyo@E1?>@Cmdvrq;it2kC`+C*i<^%ju0J|-a? zk0~l!T{=cyNMj(f_0JB`{s_mVvlejZT&)(D-Y##n5ZLpl8?mt5uWLuz*Z-Y?W#*J#fv& zD$cF$@`I>dn|P92@4QAqPQE~PkFQOXo9i5eh}p74~%B{DDRYUQXXT0RW9|32VKMtI7c83ogTDy;XZAh$I;0g7miUb*ik5#JHY zqybohex>bk+*w_TmtTA__E6l%pMEOVFWebNWuiwwlV?y*i6q>nGwq=4qhYi$I9qSM z6BQhVL_PbycgN<=R=oMeFT^Gw>Eh%#SN4j|QcMM5&^dnKV;_kReEQ?LmT8fWd}LZ_ z82e*r$h?k`A+KM8&UfE+S8UvOKBh#w!+j##?g$1R#|IvIDBcACdH(IU;(D$rs((2Y zf6G7o=a+J*GcSG?-~P|X`}+vlZmzAxU;g=Di0*HHIZm&hiSG6N80`*YtJjZ(4?P(l z`Ex&&>2KYSYo}LUeI;)G{ug5Y)wiR994!-aHyLjey;Q#u=H*?WSC7G-|JTA5t_Vat&4I1xQI-sDXBg4US_cHLRv}AshBb2YNshT1AIy z*c$iTW7HGu0>1cs?m|L;G~5{>9~5i49(k+&%V*R0rOG0@S&k}lkH|n*>zkkr@9DTb zTGmdY<56*(wEZ~DGGzBYIG=S;JX8I5=}@_Gdziq)0?8G0-)pT!=ydy;E;bw3AgUKY z89h2q3wxEcUVnfr8=dNG3n!(`@EF=6-94XxQM=% zqetp|)ow}35WLNd2RcyLMP7%l%9%Sf1S{G{*UfeM+PZXp$cG*7e4c@%J7*Sfw1?=s zy*(V8{PAYC$=0~G4WQZ)m|ToUF>x^=A|%Af%L@_BjhfHQN9Z_-v|T?kQsF59Gzb1k105Gq5C z*JpX)o8Ve1Y4yFQ&3e3LX1xtW^+RiQ9Gwv+4%w|*xemq+HP(|!aV)70T*ZOwk_X0u zIx*;W@mhc|7G>#-91PD#|3q2*!BZ3A;>czanFQA{sL z$V6jK2&Sg;;0T69-WnGx1dmZZ6==}E6%nIwMhiv%=7nfBM5v~l%FG3%*&A`4hp&LH znhjo8=v{A7p`0It;&+9?LL=}G4!IAS0}!ZTj7cT%Xg{4emojdv)+@2TYWgfET|6Iw z&C1+`Wt84D)tCfoGdY7p?<93UFGBV+ruJNi2I)8TqFpzAqS}hlc))?W6 zD6JY_L2+#%Eb*na#$|sGWIQyBVHQQS%6#T=tQgwtdCdJhU%3S^XtkH~yZ~1}wzv1v z__;sd>R2!A?Cs^+p28<6*T5;%!;y!49OC4Q4s#iqFs5u!q98CJS+9UMp|_a_d2SZP zlfP_TJY#+%=+Ot9Su>F9f_zeiO8#?A7sqSyi*0u`4rY%`Cl&xgFo7XNey!0^X$OFT z0miCYM!}HR<-JMNyW=>|9Y1{bOg#6G|5^NpU;S56Vea>Q=;?UsyFV3=efKA$ap&nc zamvk7oU|J_hjyIB@$7x|>oI-vN*rCf7T@uo{nt_c(6`5RQ07P?Dla^PQMl;Dcf_d+ zXK_NP9ROq8w|_u44y`{qn8oI`-MCCtzBk;DA-sUI*#Ymq`yY;P|FeHK&i~+NVwb4* z>XprS_2qBIy^Zzw`=9%{`1F$xMW6e7YF)YogjB_51h>XeeEU^6dT7tzc`XFp;pZnl z`LS5ObR(8{=-_5IhBxW zbY>;y;EB#~N;Hcw#0fv$Qs?@7*dEzu8kIwHoq$PzrF4?{=VlDiZZJKFBFL}dRyLgS5BU{fqv^?JU&{$uU)oA zAa{q98_-rB7fyXdbeWEjaZU(WhHf>44@R;YdO@{W1f7Uah+*du)J-@ zo_y&TY83)3Y_fyH#HAd`Z`*C>4jJtX|D=KEHCO=ba+bQSMRe&@y37ET%BUx1nh_8{ zwOS4AjZrOtiTk=Ax4`x=la9Pr;qnnJsXM z)A`-Di=GgfnY9DxgB%FFv|GiJh*7g(jlY>&4L$RWljMeWJ$Z#rF`nBQWYx=g`C88N zSZz7VfburraT&p8Zpwox_M(#_6n8qu7@MH^ul#$n+w9pM}X zo3TJ_h|0GDn5h>T={Qn65CE^`r%e?4FyDQ&MFplQ92^Wz#tviI_+ri;^^Bps6s=}G z3uWx#+#>=v)#Lm`#csLEHi#NQgg8+lgmhe1c3Z^Y77`0U+!7_|Fl}Q41GgUtfO1X< z$f@>1thP;mq5cStNK+bk5|+w?yb~EtVgv|reQY=`=BB1(0iA{r0nRlO7(6=yz(^SG z?OszAx@fOhptXqz@D>0tMN_K-bhsUXG7O&J+^sF}s6^(K!bPNlL0iIj*w)l9+vTpT z*8w@U6)q*XxO(MAjF_)`{7s44S^2@!u7D)CxI6~7_@Yv6aJXIDL5O(sju+gqtwn)w@UgeBy z1y9PE$}VEpa0>Dqa~@yIwe!2O@D7#{Z_&|+M>aWyJy&8Rb;sTqOo{irgg@A#|F#<+bR*QJ>&=qdkxLONTzqFM)|%__H-7$e@iQO)loZOP0RGD-D-S-?uOb>KoV@P|0_2Q`)-=)IPu%xN z>^=W-*fV~-*GZm^5YFa%o{Wb-{$b`Lzlsmvc=L_e{p!~u-n<@*fTa>Tzkm_S^+YMY^ZtwtRK$*Xu0BQ~zdz|d@60d7;k%=^# z=*{6yCuV~lV*!M7{~5YjZmjY-fF<@|5of;$a9G(N#shP;ST8p*sK^lG7z2Dm5MG>->_qsUHQO{GrSW%#Ine#bqV; zEt@6evMNDIy{6p(Jlwjj$d1}Q($nDFJwyaK1%!8oY0ME-?48ScuC*hb@JXFCyP#te zgzMZrs@j&T%8?nuUe^||I0I=_0_Ac|QPW`UP64*HcM0MN0QFmf>oNSX2Fj%c!Ii>@ zh@=C{WdT7q{|shqW@TS}$zTtA&qGj`)ortq))^&0gMPUk(P9_uP1b(ltgK&1=d2?Y z+(AIwy-ufn9Hez%MHF5E2wBi(P~c%=X<7gZ)-YLceLJOJ^aWt7*p6a5p!T=O;JS|Q zy1qNDiYQutW@b^4q@6kB*~M|ej!VClEp?e|`YtBvcTpKngbyHdiPbjbuC;q+fCYiJ zzIG|KC^Xk$7g4zQL;55Pg02aZr$yB@2MD;|AqpYl%jEeBOR6 z{NQ)I`&kRXQVjW@hX7HZX9_`pU|~eIqg7(~5>PnQLI<1}LMhTi;fzRmQ11Iy&^tuqHgy=e zjmCTPngtg%gs9oDO1r_>m*VE;ZtQN~v}+~ryZyoS36MI>F1oF`PKy#S#YF_tVibXc z+p$a|xvpm6i#^`mGk*jT?OJpKvpxOn&a(iZvgsnzD%^)}9lW#l7*@CI?RQ48yKjn? z&oeoNaWZwq_SQaf1sCfmlcl$7UT!-QkXU7_5K0e$h2u=40o|-d_pIxT7C@@K!6b_~ zbiyj8$3v>Hjz^`DuQe3S;*Yj{?EyPBkhp$rJ9f5qGqWMU@$+Mlc}8m(Mr$2(AeowE zj9rXNpD0#trQq|=H7V(x3Y|vXNNFkB?q9{g7!{bN$|Z?gG_a`09{eh;%4=4}7L^*B zeH_bf9zDCrJt}GJlns@$C>ShuKDf-{mx|KaD|T<&$!}?J6w5a{(K5}Z0&xF%Xt2Wd zLwNGirAu+w(~m}VrHvCQ>P5zk_vXbnqlE(RzV>THODk_s2H# z|Jv{UHlsB>;pqKU18E^6M#wu9s_b1WBg+T{A!7o~F&S=$7$kDqqP$!F3G&?Uw){ zfAX3jNPf9f$VNK}*PPe45lQJuC`k#e10Ck{C4L(kCwdsN&rw|;9sxX``)-ZKw4yQOUMNPg;^rMF${RAe_MQb87DO02l zcmN2R3lcR=T)*OPjcQR0BtmkX6m$i`TwHShqERzLK}r0=4lvL{nO%2LS&YaIh|onP zuJ2h{Z=`JWDA8`>Q)#JjsS}JA&2EdBx`f?VDVDK5=a5&AB z@h%A4g9UuuYT0Zs1Cw;@MNx;5&@F_@?*b$RsBrc=aFHBFR8I1_3FBG8QMa2#>LyQL z!|3=t6~fP0FJMN(C`;aY#Hp34r%#{4!LOx)i#$D8XKQOS(`Yq~D)jdZe8(uzxV=2$ z|E#4L;xLEMU!Vfll3vM2jTq)b3+ij804?5ZVgQZ2l>;Ljvjwir3D{g($r=tmD>8PA zl1{$Y@5lZ=%85~tr*)i$PSB{T=tOAK3mXR6u)_nA0Du}jl%~o(ERB--tUax{`_ym_Vy;ypbj82@a1El{CND>&;Cqo@9xGcUwbjW`K2$%tAFq%BK9|< zOC;aN;lKTjS7V=_g0vE{Vk=carS~!7R^D7PIrHR$akSEiH=lnk2H(6ELq79?cRw26 z@tNJsb&v#!SQmuXD<8e}|#m(RSQZA1ijr%d{O``e46EXRN z-;3p5Cl*hw#SuK3gZ=`|DY`V*gl60uJIh=d1Y`4H7~9((?{`zT7fp3n(JLH`$K)O} zPNTjBvp!rqGOLpJwEo8eG~^DWJ3gAkHjYuiXYkWkpt z3~VOQlxNJ)#p^eA2_lBv6K7;^a-4ZK{Q(FVIcIdqy>-~yFLxZ79dM3^6Xt{7*zMCL znnvAbHy&Jbko?wjn2nh595bBpJ`}hNjs%57+@Lr zxm@59vQezXj`B_Wl>G@3i~&k#%#TS_$^@_|kxn=Z54orI$>U(@9l*S;&+uFqmr%N+qv&Jna(BfHx(iAmuMLXS&*IChtbh<5 zMwzPkDuX2pn`Z!rGC_k&mj-DY341p5!6C?x4{#qQQ$*>m1{%4$PC5hK8|_-WW=AX$yMxuxGlDeQ%S4MA zh2aPjsX!ka#xjIds1!~WM58spuq#}J#!9FooRMt>{d=otql|m|VxLMEUsUU-=UYc;=F>G>KX2wk8LuMq>W!e+U`4xyOONhA;7jDnTyXmvqaWVOZ|7q3t0KFH&%$qX_d1N-Wff?M zOqBp3M#0j=oj%qdI9~yl{>UIwdjd0=s=!RJVRVli2)(&R{!g$6`n4rMYG!Ppa~3>8 zCqa|N>e9e$h8b-u=l47Pw0S4)*9Slt)cB>1PGa+fBKs|iaPFX@7Iwk+Y(JhgF7Wga zUuKZnaHpGoO#3Tyl{xkQmfSgB$10du`1X%HkgaOHy9HsUFhrQe`jA@yh}<0!rpX$_|+Z=+SBi8`vq-Qe>G}kVGK<(ys zXxFP8EJ&DZiwcRT z*&zBRlHS|di%w^R!90#Wk-JM9IxY+JL}encqL@Ws#WbZ8^bo=81U2|7M4k{N7;-#1 zNhYv@GT8h@$7|}wEQhFAz8JOJeM@8Jmj)-dhoGG9rCq^!m75qZ=sp7BU%zrKqY4#B z`VIjKB3U^n|LrIz*qUJ&EwV99=ek%LC&s1R_&JS2foP>k7dr}tx+|yEi3E%{+y?!?Z1kpMkB6Xyclo(*1wO% zo10NOwhm*IsnB`me>5D&613}YZN{~$SE9MPA_76q;JI}!$|*FI2Y#PAdn(R6^JsJq z0X^HDxbc;jV+uI<(I5RG=6x8?z504O=)AgMd(zhRITtK`>cih2AHMrO55~lk_1^hr zl)m|5OkaI5mL7j1qwv`aFCjP5?I0$vT|s97yjJGxG%?^h?zro|II{58jfg8NaR?Y+ zI(0gZ8T%ZL#NHQz9ckjfV;sSPz>S}#fy+dX$3~OrZG~&A$jSs=pE7oJAlKgkGXhN0 z;6|uMXl{Yiw&kgG5RjR2cXi>m&Z5uiC$>|)^)7~t{sOibqif+Vr>SF(Rf*tsIp++%_}kr1Zv8^#2zUni!OSi=Zqwa%&ofJal13c| z)kb$MqBiTQEJz{K-|j#f5?E+A*6p0xYDTqjd(XGDvYFNhAv(g*)iYCI0^5Ps_upd&+In1WL6 zJ>T9BIXCm<5{5Qh8*RX<&2y{*U3Z#UpXE}7%Y$ZX4zUxqTkrJy(HlZ6?rSl*vZT$< zB1G=teUu4na1tytV0#lRVMFtjLO}}SvZzn+Sc4vBjRZm&DCbxOjQCP{g%8f`H*_+K zzVN9JKm4p8H0@p@qEI4ao@)|D<}zGiSSpDqc~Tk?)5=;Q9X-aKLGmf(qEH52)36t* zR}m{hEm8ci9N~T{fQ(6$rW1ky*-0rChIbP|73)-fWVbVooxKSH2~)X^+tEzB*wL&} zcip3wMqz({7?&?yPvxo-(G`{XroamX@0SG0V_P-oIAtA2pNPc>MqyWIP0JVE-xwqF zQ+@0cuuAmK40+vZd7KZ1Po-CQEnwNk2ybt75iXn>hH;3IQDAfbsMkR=6)m^ZK`U;P z&(PUv3@o&B%oR~92Gud>P^D)Rg<5SD+g(gY3kG!V(pZBd;BgFzKs8c4PoCs?_h~e6 zghbU7(DOB%`)o2wXRk6_FJQ{mViWIgN-m2*pqbB_A~5pPB0Pr5ZHZ49x2=WK`6_7e z$$Ey3Rz2DlPMP9kjGhu#v~+Ynw}g_XD4<3vqGk6o8y)TS>_la)MJXE)72pQP4NdHq zunOXLSBK3>rWVm?n2`~5Bg(V6sO*fQF+K|7Htt&}vg6aOi}sC@5|}ax*ClO#`oaq@ zM(O;SIQQT~C^XMveCO}ID>g9(H{Q7scinLZp9M_={+2ObHfTAxax*Se+wt7XFUK`# z@Tu?o&iIp``-Ql%xgD2Yc{Of+>A8$Jm(7!bYTY@YV%a{|{?T>huDi7zn|s?h@zVg+ zTJp=i+w;nmg38=Aw{DL-_1GgZMZO2u_u~3jUXAI_ZhYUje!jt3 zwA|yjkKS`%eD2#mnLXW^RrEIZVju9iU$4d4AN{Vl@ZjC~9L=wdfnNI6UylREUAcZW zN-k0Ge49P#P?l2Oxr@pOUG4Z~O^AE6U6wJ5U9MN6d2RzEFeZ|=X$w4<(+DHK@HoNY z%DI@88!=(-W#~4&axtdQKNoWsUyu3GFdc48I)F3i1#ONlUy3EyhF98IU}zdO9hCN= zG#``8m!pJCXp2A@3F=OupKI?&%we(ZvXkH*xl@mvFu#{pn#ks19PASTJ0+C^V_Gt+ z2Okb_@CO#XYAB(Q;8Z{;N86M`uFff%%%2fUhzb1ScoQ$OI2 zCDZEea;g)M*n_`Fkr#7P52d%)!l1p&2DS& zI|K&Zq03m-a}bpIES)<~a31nTALqB@xABr^bSSUV%(eOxe6hf-UajR05bbE%d3Y*c z6&=T{z0Lwu#7=tpweZOgJ@~9C8`qq2Z=fg&$L@nOGUL-a7$=~-rf~3@PDRBv-Jk&K zOW+Gap+Xo1BIHay*Pr;H*DJJHiOu^(#E04Cv$h8l9hsJqphnfA#RZgYaiJW0-Q(EW z8gg$_MHq6|i(#Bw;F>mm!O^=uv3au_7vH&tF&%S_2-tn)1-jwGJX_}?K?`9OKp0J1 zG|}&M3&UYoB7{f<bA-Q;6+LZhFRJU%kH$ z+Gm*-vK}C#BJORJ?Q=}2I5yX~?C;D=2yrI)82@~N75Cfwm8Y&TA3p;*(;!q(I1P~e z@LXrqoacF5Y$aCKP!i~2k8M+p*RI{fp$`yL#>6Ca?gs}LAcWpy7;}A%XNam*b_Mwa z;FIfiI7XFR;WypwllV|x=lZy383?+3&4L>F>LC;v#V5cLu^QDMX7)v1stDE*2(bJz zyJ3M-7Ox;{(#xhP6%0ojmKqT}ulM&miHhZQ8QRtQhf(iNaU80rKx!!7yHa`(oVp)Kk%1UkiJ@m-ud-Jrl3L_BQBe7U%A}iwGY>gno?NT>t<& zyLu*$;Pn(o@VWo~zm1i9?unZ>Zp7Qa|Ft-H>CIRM*qi_o7NOUo3WEH3oaM5LgnpHf z^I>NPd7C23It^$)63Ffl9gEb>TxkZlmT{hX=>DkPdq?zkI}vjD${ zW88V|t+@E_J|B&D-i$_%C|KZz9N9EQ<#undTb^{NCuj9}$giynyS4~M+P&FN37myExIgW{NvkDxba7@&%^IilvX`ube9XZMa zteE4z%YcUIE>MW^^QP-j$P+xmF&O=Jh=9h(wXB;VE9$GeZ*-DIkOKFjMn*&7x1wfN z%1BDjnNp!Bh{P*#Eo7XAA1Ef9m+03llOl=ZXBN#XYXz%XrW@OxCPPtQtac2Q9 zU)mf2w-d=qe6I6-gD~}~YCwevcU$fdkZsYQ_SVAdtUd>{;3{zV)>PKK)wwrx9gc9k7W*O}JRx1L4D^e~nZ+&A{k)?egEnxK z=YiSrm2SnWHCyGL zuBoFBf-J9fPkoI5+{3vF|5?aC=7g_ezGfU48gg*Nm<4~OSorj}KJsj((n#Sq)h+`i zntShbNEWv_Yfp>Qs5=NzlQYkCC5R>xmPrO{7h$| zTq=(TzPPWon26P*e1?Jk|Csu-E@`vtz7yLq&k-3py;IF76h>kO34o9ULLxv=6bDJ& zZCQ5rYI*s>YpH$b`}z*{NAT(^U)bGR-KG|)mPl~~L68`$3aIfNbBv5RGBXDMe*3A? za=w+7nGsJs&pG?-;lGEok3Hp|ofj=x4OzMH8njLuuN809v3s82>YR3G<%o?%G-$lk4l+srjIVtsmu;E{u6#M?7eM1Jk?q?(xmgJilv{ zGoQwNyj}yZ5|rLHD^jQo@0qicR?Z<$N@5-sXC@$MpSnCZOPBf+h@DJ#a5_oHgCCd`x=1ZwJ&@TW7SS@+J#;%Jcu{`lns5&w~kK0U+F>fbYNhN_^vc--{Q%_05>EhUbqT#mQg%RowpQC(+rU4A%%6*IN|j z&ng4aGj6cv4c2e~eP&NbnU-+t%=^FmMOLPF2JnF_#@g!n!67+^!QJCH_`=6x-9|Rv zJBf>5z905W`}NO!KK|Kn{NwoEH-01j;b*@P|GRH|3quvrJ~)c$v-5cL)1SrRo#U7@ zkN#&q7N7kW-;E74KL6mC@y5UZqj=|k`v1nk=seocpuSilHz;Xj(_$-mT&1a@G<0Y? z;A_^)<{T~7TW__>I#n7_eOXla+YCn|k$?)Yu3i^B?vSy{9*UhYmy!9ZZDD)u*!fIk za?3g(Oj$Ii!s114#(Elpbu4;t=mNXdc_xBwTD_+E^{+9$Qmju-qGtXQzV`VR_4)Nt z*fn_o-I5-)P7OS=ow7zeZO5E988L%58R0d&?=FOEl(@pG5wYu?@;RLv?PD$BXVZD= zOrw_?WEHrwA^$6nd^i=eaxd^f875fi?y0*34|vl2v~s>u*z%f}W~qcfy$*3s1cJfOV3%Up7TJLqH+8hKnjaJb)# zyLS)sGddTJPySL5$}bijX8OXgHLq)!jU>2!I%kfv*r%>{jp2oe#THt56}eBE8q)p{ z95ZKs)7j9QZ?=Zm08eSdctg*XfRN|8Qm(DeU@i>5W_}$IFP~|H)REc|TPPcG$@&Cn zE#NUsf6ZFzvTm_H`@3i;$8NFa(KGMQGf#&lH)f z6y^D#TTzn46REhBzy(hamc`L~l)zF-;z{5d%;Ww>4Yo!m4I>lwA_0R;Ih!p-5Lr3c z!8>yi_2^D7mw4|WA_(WboTwr7L6`4dkG~-#cRB-wKSfEp#V@yJfbfkJ7ai>{aI zxQb#O^F^RW(4OsCy}t`C!==#UrJLV(->LN10~WPkoL}PgUUDxDzM-o!G!WFG6c8BE zvrTJU7l-AzoFl_XH{M3CA(gHFsQ`IjTqWN^wiv)G58rzf<-K7%_sLJBM<`7NT#vv1qqu(a9p;VE zKNwi(mfb_hb1t-;cr7X|#~15@XZ2=N22DWB6RD zsxzo2zqbGyO#tEg>@rT?eII(PqgiXmn6*+^tH{iX`?y!yP^Ih9?(fCDFMU25ckaaK z;b~m{^1Zlv{5TFf_4vfc@5iSPj{pthI3LgOfbp1FFJ-}FW|xfTQ-AQAarg6|$nAO; zfAq&O`*(jFE#}ucy@)cOsWVVPk|!VQ#XI&qa~hsA_0l`GqZoz{-Z84jz&;U#3+e;IXx9vTztdVF>o%@2)} z&CMoEAmu{e!nPqRo5AXIy+~r8EE$67g(kK7w{r4(8vgda_e?rqowkY zZI_{9{*GzM;^8UA&#r9V%LXk1@g8#Q7G9%H8ayKp_PmfL0*yP!kUGQk8tk9)h$J#U z!HtGmfC6|z)*btNKIIwC2XD99Od}`%sna!*u2*(a6dH$_K(QfyUb!oPO z2Lw;UTqozJUK4n!Yl@IOABTSY!SB)mn!|fV(I0v%9mj&_{qnp53o><{lwHR!pqK%C zMw9E9F4nAzynu}Gc|nkCtPw689dxxd2GB*%y#o#9&zUJ>?p<_bm1h~4L(&fM9EX>? z-vo8SQ*5*8$d95)tY$ilmEZoypM71gHn(+vBr?~TgD8kxgiidWP@1C6YaZ-m%qJEm zpm^W=_~m{~&xxZ!1^;uKHQJ=diY%)WpO&o+}p$yja8tfG%+-704RAT3Wv`h58H7tEJDZ{9T`~K)DbFP$^}<&6}I^TZmnP{UUf4x5Mkarw6DpouL4)E+_onjy@igfWnA_;UwoFPdgkrCkqN; z62A;d=?9E4UKyI_HN9@V`rL-YJocH}y*tD74opYL$nuAUFF9uT#+n0b1PpbVPH?V9 z58MxHokAN%wgNSkJBl&S9>7qzMX1v&S{j>*xmhpc^x4yBVPxI*;_Ky;d6eJ3j1Fs} z*Qr54>ijbSessMiev@p!*lfqr){R;H^;L(%lJh&4F&)tWgTG4_}_u!-J zH+~k~0Y-(tr=xLHr87M97+!twoBuHCcQLl3Nu2!TjhO!6Z=wx0S+)8wAUhpNa`86Ij62J!wIZ^r4<6ZqyJBSJ_X$`+-LB1yx7B0G5H#n}7o zs~IJlJs!o)2n7b%T#e4+7DIjT>L+3?4?uVM=@x}L*sH_~zxNx_I6jW$!w(|<_Ag`i zCw~(C$xT#{!>oGu{)n{)mNei^`L~J^7Xk1-k~Crf9*zTh(5OFHI;3T-Gs+^ zuCk-2oFP?Sa~!}MmZG3ZNuUQ%SAW5{c>5R|6e5hEbxwB)QV=Px1dE{0Z=C=7q z;7@6kU(nvJa0MviE#4FKTw6rP_`Sbdr=_pEUF@eIz-VFH7)wQ>1sCxEeyUsLB>{m>v(C7-%Gx3$ znHfDo2O62wVXXY#Z+`jp>@0^@ogTLfim{OKbBmU@XZ|Wk2Y&1EdkU5N{t#9@dJVfo zQDF73x?K=HUZQAC0Vwh{^|;pH!Axx!OXj}3ykOz3<6y7N=P+n|W-`0MU{7K(bqkmV ztq8Tbn4a70mI{r9M;2a;rg3#OM z2BY6qwXQ*2HyAPZ)`P70``8}s?lWh>x?JZ;aW1eGtBJgqGJKJiSb=1(QGgbr0031G?XiuHx~-hq=wMe{=xAJBT43$7Vc=;|I@0 zg|YAO1j>y2^w~v}KK@d?^jqJ~d8`dXzWsKL|MmybnM^b9o?;0-nNIR1#7QbD8x}$` znpBw1I}dK*KJG1Vu+Ha?9^xSws++~+{5;&->zX$$sN^oHrSATI9DntTaRX02eB&3f zM0Uz6jMlmmkKI~_!e~EukOzaj(7`L)UB%vSd_4{A;&1*kwmcY2qoGlGCH{9wiKw%4vCa z67~DguRF||2Im=gEQYHMzgx+kKm<@Ek3p;`8EO|u!QU#qEqqYBcZ3lFG`;W=M1)_s zZxvlof}ip4V&{D!H z0Q0Hi2pFDj@LC1B%(-U6eU~>z6b(}sx=r1gTeJie@R7ems+0p2rv^#GssU1e8r|xq zgHmHH)T#0%M!nzt5d1e#%;re+3 zr+L2Sp7&xGOEo%p@NV%^*3?((jx`RD1hzXKxJVsw9}LQsJ8%ALmrIe zF2f^m8BSiCpJR9yIKyDPt`Wr6z;iw`i{{WPd6aFd<1CmndWq*-+hu1~BZ(gQt#hG4 zEV|~&2TTUub30%mUw{c{@7iWmTlvU)v{_dpl{#WZN^ShJ;r{U7)><;R3__!iBA(ov z#Y%lFe;AQgH>nhyyZTXi>M$i6MJ-kWyIJz2I|&k#0vzQ_?WieN^YJ=vEI#8CcqwPn z6&{2)bcToffZ%`r-7mfFphatj<#TU(*)R!%;y))RqBh*A$7UYLE@D|E$NLTd)sRA!UWmtB4#lkPcW-Xq?CIM%7!H=I=Oj0m}1|_#Rz&IjJ zJ#BNcPReZ=SECtMn7{FP=8$$6BE49=5$Go)SWJ>0cnX*&$=MV5mc_?A!w_2Jx0o%m z(oy89_j_lrm(}X_x$d+oAYkSMv*}UWXu&<&W4tv{=<>#JJ_ZlctWYXby+Rcm3tMGD zHK>9UbI}@c!{c^}(vmtp-r~CqxbV<_6h4d~THZq_2LPkY+3}wOEkewYQ^zWD5U*$& z^jR2d8&O!FU@={n+q-K)FT2>DCL`N{%9Eq4VigrvP>c|;K+;^VV987xA{p1vrOGOW zJ6~6(peH}{oCmFH+&u))^}2wyNeWEXF0giaau(MrbZ(QB_R?H$Lc?n1sl|wb4?}>3 z5`=1uiN`WW(v9^8zyCKeedpac#5f$?yBAyLarNH2afGM7 z!Qj37gTIdv3j5Xn>bIkI{~%62cofSxo^$ee4TB0T);Yf*jSFrHmJ zj>Ror&l121;AnsRejNSUH{#@nKa6<%W(>aYnP}a)AM>|<63ajP-(vmaH=}OE3ZrGT zNoDPUE>JH!9^Tk&1S3A!K6s%p3~Af}xaO6ug3m2=R)+IW1!-8?vk*2Y zof3*SFbu9=E;I}#U@&BFcyWjBff@7OC1gjP0rB8PJ z?(qz~%IPA?dXDIV?Fhp#f_Z=an{oJ0e=A0R@t3iA?@?3-d$Bn@N+szmcC4%8a9?~K zK3**+u|+Sn_XmkHOhfco$n|a&XDH`s@;Hxxe^mI%@6gK?6cAXfsxDA{svvbb%wcPU zHF$ccZdq0Miz3xE#vP9rJ!x8NcyOHg?kPsiD5Et_Sx*eS6!Uv zQ9vuxdKS!iVy9Kxu2~nby$9X*hd4~gvgt(So*`wrK(HcFEFMG%GP2-y-X_EuxQJ_kJE@F1?(WTaCTM0zhePtMyD(B-mhn z@TBB5omkikBO|7K>al0R>hFIK559zPPGu+pa=<6MN}#|m4`$y=f1v0aF~#j5CF_D>QA4}GAFDuR@q4q6~DOz zjj;yIYj4&zc^D;Qn&&ANyghD5(IOG5bL}QBFYNUz)1Vhs-Ylp_-eXg<3lz^(u*{Lm5JlIw zUFP9M$9ijqPz*D-S%?hBAXow=^Kt!dGxh)#GPPBvzGt{jMiU?i0tDGmhXuaQy`?U; zdn8-k7EgjSJaf0htCR>xuiFUqkX>0B-m2&#z2MM9pBPyF_E|p15TDoCGrmw_ni54w z#LnE1!@Gw)gr>`ynPXciBQRGH-jjDs`?yU;wh>|5K3H#hHpL?aRG~(q$NR2fhP)s# z_%PA8UH7u5wex2VR{iUl_YhJQ(Jh`xZM2H^>2)*_2>rf9@i4EvEIpjJYvdW}3WxyF z8jakeEe!!we*649{04z6Gd_)M&+6DWe-@*sXR-hJFU8$Yyc!oj|GRknC;uUK(`~%_ z#@q4A_kKN|`_|Xu{dXV68OD9__;Fmn@kVUleK#5qw$6N_< z=hyLA*KVcKdk5&N@aZv2ZKjrt-Dp7UqDw13tYq%+HN)qNUs^X(21ov*%bNYqd2Dd!Csjzwx&=w*?CL}Id5zKeuZR(fMDhQ((N zSVl3#;pTiNoC}PgXD(NT>a+R0-m*y76LHMXyPZShetI^=P-$!#3-@%O8XA$FJ=2S# zzXAbS9AwTbC(cClYHa-P8d+_a2H%N7K&vy=oal%Y#w_~dUPTA8O>SkIBES|udD@Y= zF#$`bQ;z-sCWK*KeU-n+>gsY8o==XgDZmB-X6{{swFJGJA2s*tb~g=_OX+JCSMa#J zm!H#X@3Ej7Znrks5W*2FBzF(?;5q2cT0msL1VYVM8ZNa4-vSzyg3W0}+aA7>U<8`D zutqV8FAz2gM9<%DOIFES5z1Zo+i<0xrNE-Z-xxRq))2ZvK7~o8S$=D@SwDqK@8H1n z0f=4C)Z#nAogqS_AP_d!#O)}IntPVx_-H=?%mp6A40@ZBGCd-|DMEyzdd4b9F)xU+ zcPy^awcb8s(WBQu=QyS}v3I$N&J_m6IY7_ca_6)8=lKTmakj>VzZ+fF1flm;=bl0a zZyFx+tZ+Rc4>EEd3N{NKoi#}R?Vq6-Cvp61zZUmC{c4>5-B04Zzxr|D*2U-k+uw~- z*8T7Q!=FZp@s+Nov3}?6XaEZ8@QYz>cW=P@d0)LN=?y`P2m=GD@>8&(qLT;Zj|yN% zzdmPxz=Ld}Uv0!*DF;j^eAi&l7Nv7CmKJq@hr^WP60)#c;PH3bQQI3vg}Ii8`)S}yti|mD@PU`!eCb8Tjt+X` zmr;KIAt0_2tCQ2%9UMjpd0W5vW|a5(0aL`y!zVfZHOjZb<2O8aadMI%r3Q~05i%`g z+HnLw%I(s?@fadDdSNAbDqGhY`OI_PA)-1;8Pt(bo`*x5x@aUkEe_-8VNVzvKU*B! zRR=UJ(39ro)p5`d@6XfX&M&9v814=KcT$0)m!OUMHg_grT)OasQO>NZ!B}Ubm`2e} zayFizyQUZ=>s>JVtZQMc)Z6F<^lq_>%F6R9>y4~1MRltH!uu=UMgW$uB`>7Y((l+^ z&FCtIxl!h!4B zp*9bOMHrv+w1JvN6(LDpvGZQRs{&CR6y(L4o;Lhpl(-n1^%wB4I-*oh;|(QK5BmJ< zP6tn`S3R*$XD;0TuKaRu9bMCy)&v->$h2SHi_b&P2@dRnaf{Eh>K*2d0ILDo}3xFqW zb>*M`76#ryFiGyfhnXO6cup#-Sm+jmb+Qu4$&XN49%(BI z&KwvM^i(6OwbfB6MO=E}z?<;+u|xcV4EDc;7=^m-Nw z>hZ!ftkiT25SUzNjex0yEzil;u^7oK1ZRPez@s2dm>r?jOBA(<5VNnd#DhK=dV*LM zaNVpHSzsoxa*XEpj}Q9EFu6d`SZ~**k&cNrU0Cmp0u!y{P ziWA7S(aIdYTU9I&^7+~1!?wcQ%SgtIamb9))b+odP1AVGFxmD{0cZT?yWEd|uwV4t z7u~uYM+GLG@<_=K4X05Mmt?qa*S>08UUi zg2y~Nz65>r;8JM#sD-2|+diLPz5jO1QJD9?{tZ0KTKvWT_&>(0|M2s1_t(D`Z~XBe z$N0~FfG7Mk){mb>Jb4_o>4-JQ3vjIzKG%)hK|xxev|)|r!@Zj ztUxb|m+C0=^71NHS7*^1?#WJhB&}(SPOl%g@;aZfmY|Dq-AvEp#Y2GGY#q;j_=9M^ z^=EOvZw{GdL;lbYC`Xt)E8-2yr+$Gh3A9*8 zkA^J_<#n9D^>#E5i?Dodv%^4GL$I-LhEm3OH^`tT%Wchb0)#3W%R6^s`|Lb!&o5(V zXGzph@Bi?xV|Vr}nwMv>o8s*;$+}TQ6t8*r9b-HH@f#SyM$E6q(X_FW48dpX$O7s) zM(`^tHrLP`zIsb<*7egtGNy3DSh7Vb^s_j1im@`?cz87AebyIJ=c$<7Bt!S`VIQ7U zDVwLw?P%(9bc~_-(MZR_e0EXClYGOPa5>M_yA<3PDLVCpHAW85f?6Xei`hKRuddKn z*F3kGV8T$ZGM;&6WZ!LGYj{IrYxuleQ5QHCqj-#(XA0(5_JC(-IvhRgqA={%#AI7( ze%Er_opfxQZtDm(%WTpj_~`cRm(Lm*?YIS~K`$Ku0ft6T`2b-+p1;-zS`_S-K<$sR zDO51h zKHD`*8^HP}K!D#}PnpNLr?brSEp9b!mQfIR%(S7H&3p9z-38#$jiv?qC>}dPnGZ>13hHywp&~Sp2^cc8w&kY9bW04p z?+TLiO!xQ9B^iq6HST{ox`HX3mkXFyzVPnL@w>VsbT4V{6z>#ni$x z(wiTntjx{G0RlR7ZZeXEJ#z~NGeuC0cMPo;2w*Z6~#B4ZUQ=>jS&w$ z`CG=X_qKx$4QOE9!3|3C*t{UdYWB-t#taW|_QAvW(SQ1*`1Swuzlja^IQ_%_IktcE zli0lTizs0<%FA)oSsT3ojk{aT6lOhA_h~z-RF=sfjt}L`Ts`?u0w%x8*!h3{L9diO zqb`i211+~G#_YleEtY2&jMKs>8!gRJF7WMI$!!?^uGS}=)Cqy+?+g%t+fKI@p>}m zGsq4`vMP{PS@HMog%@HsIV(JPmE*$?5>%7`B!=lL-}sHN%DMzF%4hRTpD*AEJlvg5 z0f4^x%F9tkalwR14_sP=e-2FmpBLwG4KGxNhtc@h$C+#;7Vkca=Kc|M0eCYBAT#{W z?*N6`_#&#%Y=d(4gfF*RdXinG*NqMLpFevR`^QHN9vNWHo{NbH0awG6ZAK{T^7?pXd8}pdzCUitrjp_-k)eW98 z!O-Gr>Zxkxd@V-fIo#L$upYPt$13K-!$AztAp-L8Yz$!N#s2U(jrY0jv!?)M=xeIZ z5V(p&@Zi>FhDgfHq14Lc1ghudE0z>4%NNk?1&i##WXp`gzK}(T$H;2vrSl8q^-uJlR z#oX?O+yj~jMC=CG!9Exbu|LF;IobG21MQYN>n=*=LT}p_UrV!iQnY88<8WH=0_WEUYGAWG4Y=AkNe!#*yG$VNgIdH1aH z!w9Gx3PdPE1UaEd_1e<7^Q?=p+uJaX7?a#8f|6zZ7?j4njgiIJK?J=)3+iSdeZ6(_ z@$QS3NXof_=QypZR5|2+^L7s}0M8!KAfi%<+wjH8N4Ifl*lqHlS7^aj#~$_==vKr2 z$0(^3K9u8aIX!mISU8zIRVDR=mQ7Kz0B3RJ*{6d2)YI9x*<@tP3aHy~;{+xEE?02IhnManU+}FDmxkkps`n#6Oul%5}x(><@^2*qp za`73aq6GnX@C(9g4}16dw|svxiffEwMMa4$tlY*p8pY`^-j36U@4@@yc<_nWcoN>@ zfBxfG{o>~t39uGp3D8=j$P!HP&FC0wHR@gDhQF6%fC4h&K{QsHSK-ZdtrG{|`cB;a z?e9h7{{7f$2;X@xD)37kd2a6aSlC)D071>;J9xiW*-qD^WaCyW;V#!s z2Ss!X=qz>jV)NeHQAcT(S$hlQO7IZOf^b7e74!ZsOg=n2;NdbD6xC^+Q9ZgH!tWvr3o$xnWqk(3*FwtRdHd2J*!iN?#Xro5Km z(I()d((C}{-bY>mr1D9wiGXbvH}K2_dTp({PLavzLGlOVifTU7K?iCw0aS`Wvu?7Ix2D-slQ$;`!$e;>A~<=e}(~#1!Yj zIx9e9trc$f&4NYv%)(*EDp)lft}SUWR^Llq1IV==OmDTt`q;nSh*fFFxVfKEVC6($ zR-D9XsJ_VCyUWLUYLm1bK0AvdZ#0V5!U(+0w-~T6HeBHdcH|ISbjB-sz%iE_> zt?`=1)A!usl^z;OSVL|#wUcv8MHX!^5o|8R!*GCXy-ynD4;Rv|J5$i;(`SoBlv@aH zh*CsWXTi)_8TwJ#tL)|&;u$DfyJj(z$kX>Xnb&|&C|fWF?^~GB$DCeH! zJTh=@;|7Hq=L7?52q}xT_?$+aNx?7(C@Y*%jijq)2--+!N6!mw<~iiErY)>D@U^kw zM2w=~$;LX?Hl&e&=`&2l{f%N+71{y_p~nik@;Onykqqx|(P4(!RVpxnp-fS-Xjx!k z^NF$b2fUWZ8pbrLpn$3PG)RIvYbD$cH|PMMP^v|ar=E)J;_Nc#XpvkIy%CW|93Hge z=x~pFZQ|_YiutVIj~zm3Slm#%3KBupb5B5J2Ri{^5&hutiF)NNLEq-K=yceetYiQ3 zCR!*0Y0?^c_HvPz*AvOz29BGt7|jvUcDv>^m(ktZkM8hJY*8HUz0R}mp@lS8c-=|h zF{17zhsfxW+vvbw>TQ*A7sa;(Xg-H`ymllT$#b+3c#&9nVKKc2$En{F0?nL|kajd4(sP6AY%W(7bDoTq{?4ZN# zBeRGad|l`HYj}Bc@-()1OWW`KUaSoN|MYL7`uVS7bWfvpcAn?A8$#KsumG-0E6DNA ztMJ7Z-qqVr(1wwLjHzd$gn?(Ql`3BA#XNRAvx2eR0Z=?OE)E8wj?PoBnRh3;WI9yMFEJ5{k(;}V@Y z=UTVM!j{O8vI0*-M|oGkt)rsRa@*2y*oBwXTTC% z8}H zN}a%3S>|sqwK_LBE-2c+CzBRXoTC@y=R^eAV6TOMJn?K-H&t5XI(?P{JABv z^83H_x!1FTd53ail1@ZKuaPSDBlCCJ{DlS_BUM4rRX}K+Z2W?v6lGhvXnsM45|tX> z)Noi)nc*P5B%WQ3&Mt28P)7`&`>cYz0)AsiOi`DFigyioi>{obX zdT$o~7}mF?Y#vX!tH)Sx82Wv*?LlOh=fQKmxIqI}fEo9UHUL%)jQed&QEDFH=)z{Y zgfFi(Or^jW_O>me9-BzgPJNS!#d}uu7Wsbb82mEyufP~B@DwIb>pR+Sr{WVeTZDIw z!Z|y)*bxQ6*P2BsQ1dQ5_kEkfFi(}s#pRfF1I#eq2Hr{*-&NcJ;cf;29mWrawo2P= zeHp^jfTR6Pw`6#W@sw7(*t=ZE0HkiH%!66hn~2;GJ`Eymmf1V~LmXnH1$V-Mjhoj;BB$uo@GJUx8{ z(R<1PDu!Gvc*a^1*f8m<(pNEfxP0-Ytib?DV%+?^_x70esnsD_E)|T+vOrwKYuftU1-7@`&eAi zsA%NuoTp=fj=BYOG~si%`#nb2%`md^c~)m@ty7m3krC5;%DbR-&z8RMLmGGZKKrc0 z8%=4}-ebLyZ`R)3L3x;q2A}J%kn2l;yWmv=FR)g}m==@AbgXm?EQ<4WtKBV-wpWHc zWVrk3sl7r>*{w57t_