diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_inference_async.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_inference_async.py index a5cf09794648..008428cd0a84 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_inference_async.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_inference_async.py @@ -89,14 +89,11 @@ def get_chat_completions_client(self, **kwargs: Any) -> "ChatCompletionsClient": ) from e endpoint = self._get_inference_url(self._outer_instance._config.endpoint) # pylint: disable=protected-access - # Older Inference SDK versions use ml.azure.com as the scope. Make sure to set the correct value here. This - # is only relevant of course if EntraID auth is used. - credential_scopes = ["https://ai.azure.com/.default"] client = ChatCompletionsClient( endpoint=endpoint, credential=self._outer_instance._config.credential, # pylint: disable=protected-access - credential_scopes=credential_scopes, + credential_scopes=self._outer_instance._config.credential_scopes, # pylint: disable=protected-access, user_agent=kwargs.pop( "user_agent", self._outer_instance._patched_user_agent # pylint: disable=protected-access ), @@ -131,14 +128,11 @@ def get_embeddings_client(self, **kwargs: Any) -> "EmbeddingsClient": # type: i ) from e endpoint = self._get_inference_url(self._outer_instance._config.endpoint) # pylint: disable=protected-access - # Older Inference SDK versions use ml.azure.com as the scope. Make sure to set the correct value here. This - # is only relevant of course if EntraID auth is used. - credential_scopes = ["https://ai.azure.com/.default"] client = EmbeddingsClient( endpoint=endpoint, credential=self._outer_instance._config.credential, # pylint: disable=protected-access - credential_scopes=credential_scopes, + credential_scopes=self._outer_instance._config.credential_scopes, # pylint: disable=protected-access, user_agent=kwargs.pop( "user_agent", self._outer_instance._patched_user_agent # pylint: disable=protected-access ), @@ -173,14 +167,11 @@ def get_image_embeddings_client(self, **kwargs: Any) -> "ImageEmbeddingsClient": ) from e endpoint = self._get_inference_url(self._outer_instance._config.endpoint) # pylint: disable=protected-access - # Older Inference SDK versions use ml.azure.com as the scope. Make sure to set the correct value here. This - # is only relevant of course if EntraID auth is used. - credential_scopes = ["https://ai.azure.com/.default"] client = ImageEmbeddingsClient( endpoint=endpoint, credential=self._outer_instance._config.credential, # pylint: disable=protected-access - credential_scopes=credential_scopes, + credential_scopes=self._outer_instance._config.credential_scopes, # pylint: disable=protected-access, user_agent=kwargs.pop( "user_agent", self._outer_instance._patched_user_agent # pylint: disable=protected-access ), diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_inference.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_inference.py index 95f84bc58bca..aa35b5bd6090 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_inference.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_inference.py @@ -82,15 +82,11 @@ def get_chat_completions_client(self, **kwargs: Any) -> "ChatCompletionsClient": ) from e endpoint = self._get_inference_url(self._outer_instance._config.endpoint) # pylint: disable=protected-access - # TODO: Remove this before //build? - # Older Inference SDK versions use ml.azure.com as the scope. Make sure to set the correct value here. This - # is only relevant of course if EntraID auth is used. - credential_scopes = ["https://ai.azure.com/.default"] client = ChatCompletionsClient( endpoint=endpoint, credential=self._outer_instance._config.credential, # pylint: disable=protected-access - credential_scopes=credential_scopes, + credential_scopes=self._outer_instance._config.credential_scopes, # pylint: disable=protected-access user_agent=kwargs.pop( "user_agent", self._outer_instance._patched_user_agent # pylint: disable=protected-access ), @@ -125,14 +121,11 @@ def get_embeddings_client(self, **kwargs: Any) -> "EmbeddingsClient": # type: i ) from e endpoint = self._get_inference_url(self._outer_instance._config.endpoint) # pylint: disable=protected-access - # Older Inference SDK versions use ml.azure.com as the scope. Make sure to set the correct value here. This - # is only relevant of course if EntraID auth is used. - credential_scopes = ["https://ai.azure.com/.default"] client = EmbeddingsClient( endpoint=endpoint, credential=self._outer_instance._config.credential, # pylint: disable=protected-access - credential_scopes=credential_scopes, + credential_scopes=self._outer_instance._config.credential_scopes, # pylint: disable=protected-access, user_agent=kwargs.pop( "user_agent", self._outer_instance._patched_user_agent # pylint: disable=protected-access ), @@ -167,14 +160,11 @@ def get_image_embeddings_client(self, **kwargs: Any) -> "ImageEmbeddingsClient": ) from e endpoint = self._get_inference_url(self._outer_instance._config.endpoint) # pylint: disable=protected-access - # Older Inference SDK versions use ml.azure.com as the scope. Make sure to set the correct value here. This - # is only relevant of course if EntraID auth is used. - credential_scopes = ["https://ai.azure.com/.default"] client = ImageEmbeddingsClient( endpoint=endpoint, credential=self._outer_instance._config.credential, # pylint: disable=protected-access - credential_scopes=credential_scopes, + credential_scopes=self._outer_instance._config.credential_scopes, # pylint: disable=protected-access, user_agent=kwargs.pop( "user_agent", self._outer_instance._patched_user_agent # pylint: disable=protected-access ), diff --git a/sdk/ai/azure-ai-projects/dev_requirements.txt b/sdk/ai/azure-ai-projects/dev_requirements.txt index a905ecb6e0d2..4a26587c01f1 100644 --- a/sdk/ai/azure-ai-projects/dev_requirements.txt +++ b/sdk/ai/azure-ai-projects/dev_requirements.txt @@ -2,8 +2,6 @@ ../../core/azure-core ../../identity/azure-identity aiohttp -azure.storage.blob azure.ai.inference -azure.ai.agents openai prompty diff --git a/sdk/ai/azure-ai-projects/pyproject.toml b/sdk/ai/azure-ai-projects/pyproject.toml index a7c22e0ddd95..033d204a4d51 100644 --- a/sdk/ai/azure-ai-projects/pyproject.toml +++ b/sdk/ai/azure-ai-projects/pyproject.toml @@ -11,6 +11,9 @@ warn_unused_configs = true ignore_missing_imports = true follow_imports_for_stubs = false +[tool.azure-sdk-build] +verifytypes = false + [tool.isort] profile = "black" line_length = 120 diff --git a/sdk/ai/azure-ai-projects/pyrightconfig.json b/sdk/ai/azure-ai-projects/pyrightconfig.json index 13b632b65ee5..5bf110e10df8 100644 --- a/sdk/ai/azure-ai-projects/pyrightconfig.json +++ b/sdk/ai/azure-ai-projects/pyrightconfig.json @@ -3,8 +3,6 @@ "reportMissingImports": false, "pythonVersion": "3.11", "exclude": [ - "**/_client.py", - "**/_operations.py" ], "extraPaths": [ "./../../core/azure-core", diff --git a/sdk/ai/azure-ai-projects/samples/evaluation/sample_evaluations.py b/sdk/ai/azure-ai-projects/samples/evaluation/sample_evaluations.py index 0e7591f46373..ecb0a8ad8721 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluation/sample_evaluations.py +++ b/sdk/ai/azure-ai-projects/samples/evaluation/sample_evaluations.py @@ -35,10 +35,12 @@ # DatasetVersion, ) -endpoint = os.environ["PROJECT_ENDPOINT"] # Sample : https://.services.ai.azure.com/api/projects/ -model_endpoint = os.environ["MODEL_ENDPOINT"] # Sample : https://.services.ai.azure.com -model_api_key= os.environ["MODEL_API_KEY"] -model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] # Sample : gpt-4o-mini +endpoint = os.environ[ + "PROJECT_ENDPOINT" +] # Sample : https://.services.ai.azure.com/api/projects/ +model_endpoint = os.environ["MODEL_ENDPOINT"] # Sample : https://.services.ai.azure.com +model_api_key = os.environ["MODEL_API_KEY"] +model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] # Sample : gpt-4o-mini with DefaultAzureCredential(exclude_interactive_browser_credential=False) as credential: @@ -85,10 +87,13 @@ }, ) - evaluation_response: Evaluation = project_client.evaluations.create(evaluation, headers={ - "model-endpoint": model_endpoint, - "api-key": model_api_key, - }) + evaluation_response: Evaluation = project_client.evaluations.create( + evaluation, + headers={ + "model-endpoint": model_endpoint, + "api-key": model_api_key, + }, + ) print(evaluation_response) print("Get evaluation") diff --git a/sdk/ai/azure-ai-projects/samples/evaluation/sample_evaluations_aoai_graders.py b/sdk/ai/azure-ai-projects/samples/evaluation/sample_evaluations_aoai_graders.py index c5c37faa61d1..a3e5192a09bd 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluation/sample_evaluations_aoai_graders.py +++ b/sdk/ai/azure-ai-projects/samples/evaluation/sample_evaluations_aoai_graders.py @@ -39,10 +39,12 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] # Sample : https://.services.ai.azure.com/api/projects/ -model_endpoint = os.environ["MODEL_ENDPOINT"] # Sample : https://.services.ai.azure.com -model_api_key= os.environ["MODEL_API_KEY"] -model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] # Sample : gpt-4o-mini +endpoint = os.environ[ + "PROJECT_ENDPOINT" +] # Sample : https://.services.ai.azure.com/api/projects/ +model_endpoint = os.environ["MODEL_ENDPOINT"] # Sample : https://.services.ai.azure.com +model_api_key = os.environ["MODEL_API_KEY"] +model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] # Sample : gpt-4o-mini with DefaultAzureCredential(exclude_interactive_browser_credential=False) as credential: @@ -89,8 +91,8 @@ "string_check": EvaluatorConfiguration( id=EvaluatorIds.STRING_CHECK_GRADER.value, init_params={ - "input" :"{{item.query}}", - "name":"starts with what is", + "input": "{{item.query}}", + "name": "starts with what is", "operation": "like", "reference": "What is", "deployment_name": model_deployment_name, @@ -113,8 +115,8 @@ "evaluation_metric": "fuzzy_match", "input": "{{item.query}}", "name": "similarity", - "pass_threshold" :1, - "reference":"{{item.query}}", + "pass_threshold": 1, + "reference": "{{item.query}}", "deployment_name": model_deployment_name, }, ), @@ -134,10 +136,13 @@ }, ) - evaluation_response: Evaluation = project_client.evaluations.create(evaluation, headers={ - "model-endpoint": model_endpoint, - "api-key": model_api_key, - }) + evaluation_response: Evaluation = project_client.evaluations.create( + evaluation, + headers={ + "model-endpoint": model_endpoint, + "api-key": model_api_key, + }, + ) print(evaluation_response) print("Get evaluation") diff --git a/sdk/ai/azure-ai-projects/samples/evaluation/sample_evaluations_async.py b/sdk/ai/azure-ai-projects/samples/evaluation/sample_evaluations_async.py index bf73028409e1..e43576e57497 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluation/sample_evaluations_async.py +++ b/sdk/ai/azure-ai-projects/samples/evaluation/sample_evaluations_async.py @@ -34,8 +34,11 @@ # DatasetVersion, ) + async def main() -> None: - endpoint = os.environ["PROJECT_ENDPOINT"] # Sample : https://.services.ai.azure.com/api/projects/ + endpoint = os.environ[ + "PROJECT_ENDPOINT" + ] # Sample : https://.services.ai.azure.com/api/projects/ model_endpoint = os.environ["MODEL_ENDPOINT"] # Sample : https://.services.ai.azure.com model_api_key = os.environ["MODEL_API_KEY"] model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] # Sample : gpt-4o-mini @@ -85,10 +88,13 @@ async def main() -> None: }, ) - evaluation_response: Evaluation = await project_client.evaluations.create(evaluation, headers={ - "model-endpoint": model_endpoint, - "api-key": model_api_key, - }) + evaluation_response: Evaluation = await project_client.evaluations.create( + evaluation, + headers={ + "model-endpoint": model_endpoint, + "api-key": model_api_key, + }, + ) print(evaluation_response) print("Get evaluation") diff --git a/sdk/ai/azure-ai-projects/tests/samples/test_samples.py b/sdk/ai/azure-ai-projects/tests/samples/test_samples.py index 3e539b3c443e..179d5ce9af57 100644 --- a/sdk/ai/azure-ai-projects/tests/samples/test_samples.py +++ b/sdk/ai/azure-ai-projects/tests/samples/test_samples.py @@ -18,8 +18,9 @@ class TestSamples: * 'cd' to the folder '/sdk/ai/azure-ai-projects' in your azure-sdk-for-python repo. * set PROJECT_ENDPOINT= - Define your Azure AI Foundry project endpoint used by the test. * set ENABLE_AZURE_AI_PROJECTS_CONSOLE_LOGGING=false - to make sure logging is not enabled in the test, to reduce console spew. + * Uncomment the two lines that start with "@pytest.mark.skip" below. * Run: pytest tests/samples/test_samples.py::TestSamples - * Load the resulting report in Excel: tests\samples\samples_report.csv + * Load the resulting report in Excel: tests/samples/samples_report.csv """ @classmethod @@ -190,7 +191,7 @@ async def _run_sample_async(cls, sample_name: str) -> None: ), ( "samples\\inference\\sample_image_embeddings_with_azure_ai_inference_client.py", - "to-do-add-model", + "Cohere-embed-v3-english", "", "samples\\inference", ), @@ -249,7 +250,7 @@ def test_samples( ), ( "samples\\inference\\async_samples\\sample_image_embeddings_with_azure_ai_inference_client_async.py", - "to-do-add-model", + "Cohere-embed-v3-english", "", "samples\\inference\\async_samples", ), diff --git a/shared_requirements.txt b/shared_requirements.txt index ba6e37575abe..3868d47ae0d2 100644 --- a/shared_requirements.txt +++ b/shared_requirements.txt @@ -1,5 +1,6 @@ msrest anyio +azure-ai-agents azure-ai-ml azure-ai-resources azure-common