diff --git a/sdk/vision/azure-ai-vision-imageanalysis/CHANGELOG.md b/sdk/vision/azure-ai-vision-imageanalysis/CHANGELOG.md index dc6b3810a2eb..2eab642be1ea 100644 --- a/sdk/vision/azure-ai-vision-imageanalysis/CHANGELOG.md +++ b/sdk/vision/azure-ai-vision-imageanalysis/CHANGELOG.md @@ -1,14 +1,10 @@ # Release History -## 1.0.0b2 (Unreleased) - -### Features Added +## 1.0.0b2 (2024-02-09) ### Breaking Changes -### Bugs Fixed - -### Other Changes +- In the previous version, you would call the `analyze` method on the `ImageAnalysisClient` to analyze an image from a publicly accessible URL, or from a memory buffer. To better align with other Azure client libraires, this was changed in this release. Call the new dedicated `analyze_from_url` method to analyze an image from URL. Keep calling the `analyze` method to analyze an image from a memory buffer. ## 1.0.0b1 (2024-01-09) diff --git a/sdk/vision/azure-ai-vision-imageanalysis/README.md b/sdk/vision/azure-ai-vision-imageanalysis/README.md index 117987fc51bb..e22bd18bb062 100644 --- a/sdk/vision/azure-ai-vision-imageanalysis/README.md +++ b/sdk/vision/azure-ai-vision-imageanalysis/README.md @@ -168,7 +168,7 @@ This example is similar to the above, expect it calls the `analyze` method and p ```python # Get a caption for the image. This will be a synchronously (blocking) call. -result = client.analyze( +result = client.analyze_from_url( image_url="https://aka.ms/azsdk/image-analysis/sample.jpg", visual_features=[VisualFeatures.CAPTION], gender_neutral_caption=True, # Optional (default is False) @@ -224,7 +224,7 @@ This example is similar to the above, expect it calls the `analyze` method and p ```python # Extract text (OCR) from an image stream. This will be a synchronously (blocking) call. -result = client.analyze( +result = client.analyze_from_url( image_url="https://aka.ms/azsdk/image-analysis/sample.jpg", visual_features=[VisualFeatures.READ] ) diff --git a/sdk/vision/azure-ai-vision-imageanalysis/azure/ai/vision/imageanalysis/_model_base.py b/sdk/vision/azure-ai-vision-imageanalysis/azure/ai/vision/imageanalysis/_model_base.py index 585ccb0e0f90..bd51cdeb4465 100644 --- a/sdk/vision/azure-ai-vision-imageanalysis/azure/ai/vision/imageanalysis/_model_base.py +++ b/sdk/vision/azure-ai-vision-imageanalysis/azure/ai/vision/imageanalysis/_model_base.py @@ -16,7 +16,7 @@ import re import copy import typing -import email +import email.utils from datetime import datetime, date, time, timedelta, timezone from json import JSONEncoder import isodate @@ -462,7 +462,13 @@ def _get_rest_field( def _create_value(rf: typing.Optional["_RestField"], value: typing.Any) -> typing.Any: - return _deserialize(rf._type, value) if (rf and rf._is_model) else _serialize(value, rf._format if rf else None) + if not rf: + return _serialize(value, None) + if rf._is_multipart_file_input: + return value + if rf._is_model: + return _deserialize(rf._type, value) + return _serialize(value, rf._format) class Model(_MyMutableMapping): @@ -559,7 +565,14 @@ def as_dict(self, *, exclude_readonly: bool = False) -> typing.Dict[str, typing. for k, v in self.items(): if exclude_readonly and k in readonly_props: # pyright: ignore[reportUnboundVariable] continue - result[k] = Model._as_dict_value(v, exclude_readonly=exclude_readonly) + is_multipart_file_input = False + try: + is_multipart_file_input = next( + rf for rf in self._attr_to_rest_field.values() if rf._rest_name == k + )._is_multipart_file_input + except StopIteration: + pass + result[k] = v if is_multipart_file_input else Model._as_dict_value(v, exclude_readonly=exclude_readonly) return result @staticmethod @@ -567,7 +580,7 @@ def _as_dict_value(v: typing.Any, exclude_readonly: bool = False) -> typing.Any: if v is None or isinstance(v, _Null): return None if isinstance(v, (list, tuple, set)): - return [Model._as_dict_value(x, exclude_readonly=exclude_readonly) for x in v] + return type(v)(Model._as_dict_value(x, exclude_readonly=exclude_readonly) for x in v) if isinstance(v, dict): return {dk: Model._as_dict_value(dv, exclude_readonly=exclude_readonly) for dk, dv in v.items()} return v.as_dict(exclude_readonly=exclude_readonly) if hasattr(v, "as_dict") else v @@ -762,6 +775,7 @@ def __init__( visibility: typing.Optional[typing.List[str]] = None, default: typing.Any = _UNSET, format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, ): self._type = type self._rest_name_input = name @@ -771,6 +785,7 @@ def __init__( self._is_model = False self._default = default self._format = format + self._is_multipart_file_input = is_multipart_file_input @property def _rest_name(self) -> str: @@ -816,8 +831,16 @@ def rest_field( visibility: typing.Optional[typing.List[str]] = None, default: typing.Any = _UNSET, format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, ) -> typing.Any: - return _RestField(name=name, type=type, visibility=visibility, default=default, format=format) + return _RestField( + name=name, + type=type, + visibility=visibility, + default=default, + format=format, + is_multipart_file_input=is_multipart_file_input, + ) def rest_discriminator( diff --git a/sdk/vision/azure-ai-vision-imageanalysis/azure/ai/vision/imageanalysis/_operations/_operations.py b/sdk/vision/azure-ai-vision-imageanalysis/azure/ai/vision/imageanalysis/_operations/_operations.py index 9dcaf4e9adef..b004fca9f53f 100644 --- a/sdk/vision/azure-ai-vision-imageanalysis/azure/ai/vision/imageanalysis/_operations/_operations.py +++ b/sdk/vision/azure-ai-vision-imageanalysis/azure/ai/vision/imageanalysis/_operations/_operations.py @@ -170,9 +170,6 @@ def _analyze_from_image_data( If however you would like to make sure analysis results do not change over time, set this value to a specific model version. Default value is None. :paramtype model_version: str - :keyword content_type: The format of the HTTP payload. Default value is - "application/octet-stream". - :paramtype content_type: str :return: ImageAnalysisResult. The ImageAnalysisResult is compatible with MutableMapping :rtype: ~azure.ai.vision.imageanalysis.models.ImageAnalysisResult :raises ~azure.core.exceptions.HttpResponseError: @@ -488,8 +485,6 @@ def _analyze_from_url( If however you would like to make sure analysis results do not change over time, set this value to a specific model version. Default value is None. :paramtype model_version: str - :keyword content_type: The format of the HTTP payload. Default value is None. - :paramtype content_type: str :return: ImageAnalysisResult. The ImageAnalysisResult is compatible with MutableMapping :rtype: ~azure.ai.vision.imageanalysis.models.ImageAnalysisResult :raises ~azure.core.exceptions.HttpResponseError: diff --git a/sdk/vision/azure-ai-vision-imageanalysis/azure/ai/vision/imageanalysis/_patch.py b/sdk/vision/azure-ai-vision-imageanalysis/azure/ai/vision/imageanalysis/_patch.py index 2701ff7dfecc..4a4ed79b677c 100644 --- a/sdk/vision/azure-ai-vision-imageanalysis/azure/ai/vision/imageanalysis/_patch.py +++ b/sdk/vision/azure-ai-vision-imageanalysis/azure/ai/vision/imageanalysis/_patch.py @@ -6,16 +6,8 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize -Note 1: the analyze overloads here should have been implemented in the `_patch.py` file in the `_operations` folder -instead of here. That would have worked fine, except there is an issue with the generated Python -ref-docs. The overloads do not show up. See this GitHub issue: https://github.com/Azure/autorest.python/issues/1315. -To overcome this, the overloads are defined here. Consider moving them to the right place once the -above issue is fixed. - -Note 2: Don't bother documenting the two overload methods below. The doc tool (sphinx) will not pick them up. Instead, -document the 3rd method. """ -from typing import List, overload, Any, Optional, Union +from typing import List, Any, Optional, Union from azure.core.tracing.decorator import distributed_trace from . import models as _models from ._operations._operations import ImageAnalysisClientOperationsMixin @@ -35,41 +27,73 @@ class ImageAnalysisClient(ImageAnalysisClientGenerated): :paramtype api_version: str """ - @overload - def analyze( + @distributed_trace + def analyze_from_url( self, - *, image_url: str, visual_features: List[_models.VisualFeatures], - language: Optional[str] = None, - gender_neutral_caption: Optional[bool] = None, - smart_crops_aspect_ratios: Optional[List[float]] = None, - model_version: Optional[str] = None, - **kwargs: Any - ) -> _models.ImageAnalysisResult: - ... - - @overload - def analyze( - self, *, - image_data: bytes, - visual_features: List[_models.VisualFeatures], language: Optional[str] = None, gender_neutral_caption: Optional[bool] = None, smart_crops_aspect_ratios: Optional[List[float]] = None, model_version: Optional[str] = None, **kwargs: Any ) -> _models.ImageAnalysisResult: - ... + """Performs a single Image Analysis operation. + + :param image_url: The publicly accessible URL of the image to analyze. + :type image_url: str + :param visual_features: A list of visual features to analyze. Required. Seven visual features + are supported: Caption, DenseCaptions, Read (OCR), Tags, Objects, SmartCrops, and People. At + least one visual feature must be specified. + :type visual_features: list[~azure.ai.vision.imageanalysis.models.VisualFeatures] + :keyword language: The desired language for result generation (a two-letter language code). + Defaults to 'en' (English). See https://aka.ms/cv-languages for a list of supported languages. + :paramtype language: str + :keyword gender_neutral_caption: Boolean flag for enabling gender-neutral captioning for + Caption and Dense Captions features. Defaults to 'false'. + Captions may contain gender terms (for example: 'man', 'woman', or 'boy', 'girl'). + If you set this to 'true', those will be replaced with gender-neutral terms (for example: + 'person' or 'child'). + :paramtype gender_neutral_caption: bool + :keyword smart_crops_aspect_ratios: A list of aspect ratios to use for smart cropping. + Defaults to one crop region with an aspect ratio the service sees fit between + 0.5 and 2.0 (inclusive). Aspect ratios are calculated by dividing the target crop + width in pixels by the height in pixels. When set, supported values are + between 0.75 and 1.8 (inclusive). + :paramtype smart_crops_aspect_ratios: list[float] + :keyword model_version: The version of cloud AI-model used for analysis. Defaults to 'latest', + for the latest AI model with recent improvements. + The format is the following: 'latest' or 'YYYY-MM-DD' or 'YYYY-MM-DD-preview', + where 'YYYY', 'MM', 'DD' are the year, month and day associated with the model. + If you would like to make sure analysis results do not change over time, set this + value to a specific model version. + :paramtype model_version: str + :return: ImageAnalysisResult. The ImageAnalysisResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.imageanalysis.models.ImageAnalysisResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + + visual_features_impl: List[Union[str, _models.VisualFeatures]] = list(visual_features) + + return ImageAnalysisClientOperationsMixin._analyze_from_url( # pylint: disable=protected-access + self, + image_content=_models._models.ImageUrl(url=image_url), # pylint: disable=protected-access + visual_features=visual_features_impl, + language=language, + gender_neutral_caption=gender_neutral_caption, + smart_crops_aspect_ratios=smart_crops_aspect_ratios, + model_version=model_version, + **kwargs + ) + @distributed_trace def analyze( self, - *, + image_data: bytes, visual_features: List[_models.VisualFeatures], - image_data: Optional[bytes] = None, - image_url: Optional[str] = None, + *, language: Optional[str] = None, gender_neutral_caption: Optional[bool] = None, smart_crops_aspect_ratios: Optional[List[float]] = None, @@ -78,14 +102,12 @@ def analyze( ) -> _models.ImageAnalysisResult: """Performs a single Image Analysis operation. - :keyword image_url: The publicly accessible URL of the image to analyze. - :paramtype image_url: str - :keyword image_data: A buffer containing the whole image to be analyzed. - :paramtype image_data: bytes - :keyword visual_features: A list of visual features to analyze. Required. Seven visual features + :param image_data: A buffer containing the whole image to be analyzed. + :type image_data: bytes + :param visual_features: A list of visual features to analyze. Required. Seven visual features are supported: Caption, DenseCaptions, Read (OCR), Tags, Objects, SmartCrops, and People. At least one visual feature must be specified. - :paramtype visual_features: list[~azure.ai.vision.imageanalysis.models.VisualFeatures] + :type visual_features: list[~azure.ai.vision.imageanalysis.models.VisualFeatures] :keyword language: The desired language for result generation (a two-letter language code). Defaults to 'en' (English). See https://aka.ms/cv-languages for a list of supported languages. :paramtype language: str @@ -115,31 +137,16 @@ def analyze( visual_features_impl: List[Union[str, _models.VisualFeatures]] = list(visual_features) - if image_url is not None: - return ImageAnalysisClientOperationsMixin._analyze_from_url( # pylint: disable=protected-access - self, - image_content=_models._models.ImageUrl(url=image_url), # pylint: disable=protected-access - visual_features=visual_features_impl, - language=language, - gender_neutral_caption=gender_neutral_caption, - smart_crops_aspect_ratios=smart_crops_aspect_ratios, - model_version=model_version, - **kwargs - ) - - if image_data is not None: - return ImageAnalysisClientOperationsMixin._analyze_from_image_data( # pylint: disable=protected-access - self, - image_content=image_data, - visual_features=visual_features_impl, - language=language, - gender_neutral_caption=gender_neutral_caption, - smart_crops_aspect_ratios=smart_crops_aspect_ratios, - model_version=model_version, - **kwargs - ) - - raise ValueError("Either image_data or image_url must be specified.") + return ImageAnalysisClientOperationsMixin._analyze_from_image_data( # pylint: disable=protected-access + self, + image_content=image_data, + visual_features=visual_features_impl, + language=language, + gender_neutral_caption=gender_neutral_caption, + smart_crops_aspect_ratios=smart_crops_aspect_ratios, + model_version=model_version, + **kwargs + ) __all__: List[str] = [ diff --git a/sdk/vision/azure-ai-vision-imageanalysis/azure/ai/vision/imageanalysis/aio/_operations/_operations.py b/sdk/vision/azure-ai-vision-imageanalysis/azure/ai/vision/imageanalysis/aio/_operations/_operations.py index ddfde057ad26..161b5411b449 100644 --- a/sdk/vision/azure-ai-vision-imageanalysis/azure/ai/vision/imageanalysis/aio/_operations/_operations.py +++ b/sdk/vision/azure-ai-vision-imageanalysis/azure/ai/vision/imageanalysis/aio/_operations/_operations.py @@ -89,9 +89,6 @@ async def _analyze_from_image_data( If however you would like to make sure analysis results do not change over time, set this value to a specific model version. Default value is None. :paramtype model_version: str - :keyword content_type: The format of the HTTP payload. Default value is - "application/octet-stream". - :paramtype content_type: str :return: ImageAnalysisResult. The ImageAnalysisResult is compatible with MutableMapping :rtype: ~azure.ai.vision.imageanalysis.models.ImageAnalysisResult :raises ~azure.core.exceptions.HttpResponseError: @@ -407,8 +404,6 @@ async def _analyze_from_url( If however you would like to make sure analysis results do not change over time, set this value to a specific model version. Default value is None. :paramtype model_version: str - :keyword content_type: The format of the HTTP payload. Default value is None. - :paramtype content_type: str :return: ImageAnalysisResult. The ImageAnalysisResult is compatible with MutableMapping :rtype: ~azure.ai.vision.imageanalysis.models.ImageAnalysisResult :raises ~azure.core.exceptions.HttpResponseError: diff --git a/sdk/vision/azure-ai-vision-imageanalysis/azure/ai/vision/imageanalysis/aio/_patch.py b/sdk/vision/azure-ai-vision-imageanalysis/azure/ai/vision/imageanalysis/aio/_patch.py index 9db9789449e9..117a0e6c4ebb 100644 --- a/sdk/vision/azure-ai-vision-imageanalysis/azure/ai/vision/imageanalysis/aio/_patch.py +++ b/sdk/vision/azure-ai-vision-imageanalysis/azure/ai/vision/imageanalysis/aio/_patch.py @@ -6,16 +6,8 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize -Note 1: the analyze overloads here should have been implemented in the `_patch.py` file in the `_operations` folder -instead of here. That would have worked fine, except there is an issue with the generated Python -ref-docs. The overloads do not show up. See this GitHub issue: https://github.com/Azure/autorest.python/issues/1315. -To overcome this, the overloads are defined here. Consider moving them to the right place once the -above issue is fixed. - -Note 2: Don't bother documenting the two overload methods below. The doc tool (sphinx) will not pick them up. Instead, -document the 3rd method. """ -from typing import List, overload, Any, Optional, Union +from typing import List, Any, Optional, Union from azure.core.tracing.decorator_async import distributed_trace_async from .. import models as _models from ._operations._operations import ImageAnalysisClientOperationsMixin @@ -35,41 +27,73 @@ class ImageAnalysisClient(ImageAnalysisClientGenerated): :paramtype api_version: str """ - @overload - async def analyze( + @distributed_trace_async + async def analyze_from_url( self, - *, image_url: str, visual_features: List[_models.VisualFeatures], - language: Optional[str] = None, - gender_neutral_caption: Optional[bool] = None, - smart_crops_aspect_ratios: Optional[List[float]] = None, - model_version: Optional[str] = None, - **kwargs: Any - ) -> _models.ImageAnalysisResult: - ... - - @overload - async def analyze( - self, *, - image_data: bytes, - visual_features: List[_models.VisualFeatures], language: Optional[str] = None, gender_neutral_caption: Optional[bool] = None, smart_crops_aspect_ratios: Optional[List[float]] = None, model_version: Optional[str] = None, **kwargs: Any ) -> _models.ImageAnalysisResult: - ... + """Performs a single Image Analysis operation. + + :param image_url: The publicly accessible URL of the image to analyze. + :type image_url: str + :param visual_features: A list of visual features to analyze. Required. Seven visual features + are supported: Caption, DenseCaptions, Read (OCR), Tags, Objects, SmartCrops, and People. At + least one visual feature must be specified. + :type visual_features: list[~azure.ai.vision.imageanalysis.models.VisualFeatures] + :keyword language: The desired language for result generation (a two-letter language code). + Defaults to 'en' (English). See https://aka.ms/cv-languages for a list of supported languages. + :paramtype language: str + :keyword gender_neutral_caption: Boolean flag for enabling gender-neutral captioning for + Caption and Dense Captions features. Defaults to 'false'. + Captions may contain gender terms (for example: 'man', 'woman', or 'boy', 'girl'). + If you set this to 'true', those will be replaced with gender-neutral terms (for example: + 'person' or 'child'). + :paramtype gender_neutral_caption: bool + :keyword smart_crops_aspect_ratios: A list of aspect ratios to use for smart cropping. + Defaults to one crop region with an aspect ratio the service sees fit between + 0.5 and 2.0 (inclusive). Aspect ratios are calculated by dividing the target crop + width in pixels by the height in pixels. When set, supported values are + between 0.75 and 1.8 (inclusive). + :paramtype smart_crops_aspect_ratios: list[float] + :keyword model_version: The version of cloud AI-model used for analysis. Defaults to 'latest', + for the latest AI model with recent improvements. + The format is the following: 'latest' or 'YYYY-MM-DD' or 'YYYY-MM-DD-preview', + where 'YYYY', 'MM', 'DD' are the year, month and day associated with the model. + If you would like to make sure analysis results do not change over time, set this + value to a specific model version. + :paramtype model_version: str + :return: ImageAnalysisResult. The ImageAnalysisResult is compatible with MutableMapping + :rtype: ~azure.ai.vision.imageanalysis.models.ImageAnalysisResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + + visual_features_impl: List[Union[str, _models.VisualFeatures]] = list(visual_features) + + return await ImageAnalysisClientOperationsMixin._analyze_from_url( # pylint: disable=protected-access + self, + image_content=_models._models.ImageUrl(url=image_url), # pylint: disable=protected-access + visual_features=visual_features_impl, + language=language, + gender_neutral_caption=gender_neutral_caption, + smart_crops_aspect_ratios=smart_crops_aspect_ratios, + model_version=model_version, + **kwargs + ) + @distributed_trace_async async def analyze( self, - *, + image_data: bytes, visual_features: List[_models.VisualFeatures], - image_data: Optional[bytes] = None, - image_url: Optional[str] = None, + *, language: Optional[str] = None, gender_neutral_caption: Optional[bool] = None, smart_crops_aspect_ratios: Optional[List[float]] = None, @@ -78,14 +102,12 @@ async def analyze( ) -> _models.ImageAnalysisResult: """Performs a single Image Analysis operation. - :keyword image_url: The publicly accessible URL of the image to analyze. - :paramtype image_url: str - :keyword image_data: A buffer containing the whole image to be analyzed. - :paramtype image_data: bytes - :keyword visual_features: A list of visual features to analyze. Required. Seven visual features + :param image_data: A buffer containing the whole image to be analyzed. + :type image_data: bytes + :param visual_features: A list of visual features to analyze. Required. Seven visual features are supported: Caption, DenseCaptions, Read (OCR), Tags, Objects, SmartCrops, and People. At least one visual feature must be specified. - :paramtype visual_features: list[~azure.ai.vision.imageanalysis.models.VisualFeatures] + :type visual_features: list[~azure.ai.vision.imageanalysis.models.VisualFeatures] :keyword language: The desired language for result generation (a two-letter language code). Defaults to 'en' (English). See https://aka.ms/cv-languages for a list of supported languages. :paramtype language: str @@ -115,31 +137,16 @@ async def analyze( visual_features_impl: List[Union[str, _models.VisualFeatures]] = list(visual_features) - if image_url is not None: - return await ImageAnalysisClientOperationsMixin._analyze_from_url( # pylint: disable=protected-access - self, - image_content=_models._models.ImageUrl(url=image_url), # pylint: disable=protected-access - visual_features=visual_features_impl, - language=language, - gender_neutral_caption=gender_neutral_caption, - smart_crops_aspect_ratios=smart_crops_aspect_ratios, - model_version=model_version, - **kwargs - ) - - if image_data is not None: - return await ImageAnalysisClientOperationsMixin._analyze_from_image_data( # pylint: disable=protected-access - self, - image_content=image_data, - visual_features=visual_features_impl, - language=language, - gender_neutral_caption=gender_neutral_caption, - smart_crops_aspect_ratios=smart_crops_aspect_ratios, - model_version=model_version, - **kwargs - ) - - raise ValueError("Either image_data or image_url must be specified.") + return await ImageAnalysisClientOperationsMixin._analyze_from_image_data( # pylint: disable=protected-access + self, + image_content=image_data, + visual_features=visual_features_impl, + language=language, + gender_neutral_caption=gender_neutral_caption, + smart_crops_aspect_ratios=smart_crops_aspect_ratios, + model_version=model_version, + **kwargs + ) __all__: List[str] = [ diff --git a/sdk/vision/azure-ai-vision-imageanalysis/samples/async_samples/sample_ocr_image_url_async.py b/sdk/vision/azure-ai-vision-imageanalysis/samples/async_samples/sample_ocr_image_url_async.py index 8df4c30dcd54..502d05d4417d 100644 --- a/sdk/vision/azure-ai-vision-imageanalysis/samples/async_samples/sample_ocr_image_url_async.py +++ b/sdk/vision/azure-ai-vision-imageanalysis/samples/async_samples/sample_ocr_image_url_async.py @@ -53,7 +53,7 @@ async def sample_ocr_image_file_async(): ) # Extract text (OCR) from an image URL, asynchronously. - result = await client.analyze( + result = await client.analyze_from_url( image_url="https://aka.ms/azsdk/image-analysis/sample.jpg", visual_features=[VisualFeatures.READ] ) diff --git a/sdk/vision/azure-ai-vision-imageanalysis/samples/sample_caption_image_url.py b/sdk/vision/azure-ai-vision-imageanalysis/samples/sample_caption_image_url.py index 797b70df2b9a..fe9dad3aa4a1 100644 --- a/sdk/vision/azure-ai-vision-imageanalysis/samples/sample_caption_image_url.py +++ b/sdk/vision/azure-ai-vision-imageanalysis/samples/sample_caption_image_url.py @@ -51,7 +51,7 @@ def sample_caption_image_url(): # [START caption] # Get a caption for the image. This will be a synchronously (blocking) call. - result = client.analyze( + result = client.analyze_from_url( image_url="https://aka.ms/azsdk/image-analysis/sample.jpg", visual_features=[VisualFeatures.CAPTION], gender_neutral_caption=True, # Optional (default is False) diff --git a/sdk/vision/azure-ai-vision-imageanalysis/samples/sample_ocr_image_url.py b/sdk/vision/azure-ai-vision-imageanalysis/samples/sample_ocr_image_url.py index f508ec23a210..96a835bb9cb3 100644 --- a/sdk/vision/azure-ai-vision-imageanalysis/samples/sample_ocr_image_url.py +++ b/sdk/vision/azure-ai-vision-imageanalysis/samples/sample_ocr_image_url.py @@ -54,7 +54,7 @@ def sample_ocr_image_url(): # [START read] # Extract text (OCR) from an image stream. This will be a synchronously (blocking) call. - result = client.analyze( + result = client.analyze_from_url( image_url="https://aka.ms/azsdk/image-analysis/sample.jpg", visual_features=[VisualFeatures.READ] ) diff --git a/sdk/vision/azure-ai-vision-imageanalysis/setup.py b/sdk/vision/azure-ai-vision-imageanalysis/setup.py index 9d083a621a72..b8b29a5bb973 100644 --- a/sdk/vision/azure-ai-vision-imageanalysis/setup.py +++ b/sdk/vision/azure-ai-vision-imageanalysis/setup.py @@ -65,7 +65,7 @@ }, install_requires=[ "isodate<1.0.0,>=0.6.1", - "azure-core<2.0.0,>=1.29.5", + "azure-core<2.0.0,>=1.30.0", ], python_requires=">=3.8", ) diff --git a/sdk/vision/azure-ai-vision-imageanalysis/tests/image_analysis_test_base.py b/sdk/vision/azure-ai-vision-imageanalysis/tests/image_analysis_test_base.py index 8458ba0dd9e7..2823ae263dd2 100644 --- a/sdk/vision/azure-ai-vision-imageanalysis/tests/image_analysis_test_base.py +++ b/sdk/vision/azure-ai-vision-imageanalysis/tests/image_analysis_test_base.py @@ -95,10 +95,8 @@ def _do_analysis( **kwargs, ): - image_content: Union[str, bytes] - if "http" in image_source: - result = self.client.analyze( + result = self.client.analyze_from_url( image_url=image_source, visual_features=visual_features, language=language, @@ -147,10 +145,8 @@ async def _do_async_analysis( **kwargs, ): - image_content: Union[str, bytes] - if "http" in image_source: - result = await self.async_client.analyze( + result = await self.async_client.analyze_from_url( image_url=image_source, visual_features=visual_features, language=language, @@ -197,10 +193,9 @@ def _do_analysis_with_error( **kwargs, ): - image_content: Union[str, bytes] try: if "http" in image_source: - result = self.client.analyze(image_url=image_source, visual_features=visual_features) + result = self.client.analyze_from_url(image_url=image_source, visual_features=visual_features) else: # Load image to analyze into a 'bytes' object with open(image_source, "rb") as f: @@ -225,11 +220,11 @@ async def _do_async_analysis_with_error( **kwargs, ): - image_content: Union[str, bytes] - try: if "http" in image_source: - result = await self.async_client.analyze(image_url=image_source, visual_features=visual_features) + result = await self.async_client.analyze_from_url( + image_url=image_source, visual_features=visual_features + ) else: # Load image to analyze into a 'bytes' object with open(image_source, "rb") as f: