diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 1e33088ac275..179d92a1d031 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -250,7 +250,7 @@ print( dataset: DatasetVersion = project_client.datasets.upload_file( name=dataset_name, version=dataset_version_1, - file_path=file_path, + file_path=data_file, ) print(dataset) @@ -260,7 +260,7 @@ print( dataset = project_client.datasets.upload_folder( name=dataset_name, version=dataset_version_2, - folder=folder_path, + folder=data_folder, ) print(dataset) diff --git a/sdk/ai/azure-ai-projects/samples/datasets/sample_folder/sample_file1.txt b/sdk/ai/azure-ai-projects/samples/datasets/data_folder/data_file1.txt similarity index 100% rename from sdk/ai/azure-ai-projects/samples/datasets/sample_folder/sample_file1.txt rename to sdk/ai/azure-ai-projects/samples/datasets/data_folder/data_file1.txt diff --git a/sdk/ai/azure-ai-projects/samples/datasets/sample_folder/sample_file2.txt b/sdk/ai/azure-ai-projects/samples/datasets/data_folder/data_file2.txt similarity index 100% rename from sdk/ai/azure-ai-projects/samples/datasets/sample_folder/sample_file2.txt rename to sdk/ai/azure-ai-projects/samples/datasets/data_folder/data_file2.txt diff --git a/sdk/ai/azure-ai-projects/samples/datasets/sample_folder/sample_subfolder1/sample_file3.txt b/sdk/ai/azure-ai-projects/samples/datasets/data_folder/data_subfolder/data_file3.txt similarity index 100% rename from sdk/ai/azure-ai-projects/samples/datasets/sample_folder/sample_subfolder1/sample_file3.txt rename to sdk/ai/azure-ai-projects/samples/datasets/data_folder/data_subfolder/data_file3.txt diff --git a/sdk/ai/azure-ai-projects/samples/datasets/sample_folder/sample_subfolder1/sample_file4.txt b/sdk/ai/azure-ai-projects/samples/datasets/data_folder/data_subfolder/data_file4.txt similarity index 100% rename from sdk/ai/azure-ai-projects/samples/datasets/sample_folder/sample_subfolder1/sample_file4.txt rename to sdk/ai/azure-ai-projects/samples/datasets/data_folder/data_subfolder/data_file4.txt diff --git a/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets.py b/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets.py index 834e045a840c..c52f67068389 100644 --- a/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets.py +++ b/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets.py @@ -23,6 +23,7 @@ 2) DATASET_NAME - Optional. The name of the Dataset to create and use in this sample. 3) DATASET_VERSION_1 - Optional. The first version of the Dataset to create and use in this sample. 4) DATASET_VERSION_2 - Optional. The second version of the Dataset to create and use in this sample. + 5) DATA_FOLDER - Optional. The folder path where the data files for upload are located. """ import os @@ -35,10 +36,10 @@ dataset_version_1 = os.environ.get("DATASET_VERSION_1", "1.0") dataset_version_2 = os.environ.get("DATASET_VERSION_2", "2.0") -# Construct the full folder path `sample_folder`, and full file path `sample_folder/sample_file1.txt` +# Construct the paths to the data folder and data file used in this sample script_dir = os.path.dirname(os.path.abspath(__file__)) -folder_path = os.path.join(script_dir, "sample_folder") -file_path = os.path.join(folder_path, "sample_file1.txt") +data_folder = os.environ.get("DATA_FOLDER", os.path.join(script_dir, "data_folder")) +data_file = os.path.join(data_folder, "data_file1.txt") with DefaultAzureCredential(exclude_interactive_browser_credential=False) as credential: @@ -51,7 +52,7 @@ dataset: DatasetVersion = project_client.datasets.upload_file( name=dataset_name, version=dataset_version_1, - file_path=file_path, + file_path=data_file, ) print(dataset) @@ -61,7 +62,7 @@ dataset = project_client.datasets.upload_folder( name=dataset_name, version=dataset_version_2, - folder=folder_path, + folder=data_folder, ) print(dataset) diff --git a/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_async.py b/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_async.py index 383bfb11efaa..cd921590e5b3 100644 --- a/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_async.py +++ b/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_async.py @@ -23,6 +23,7 @@ 2) DATASET_NAME - Optional. The name of the Dataset to create and use in this sample. 3) DATASET_VERSION_1 - Optional. The first version of the Dataset to create and use in this sample. 4) DATASET_VERSION_2 - Optional. The second version of the Dataset to create and use in this sample. + 5) DATA_FOLDER - Optional. The folder path where the data files for upload are located. """ import asyncio @@ -31,10 +32,10 @@ from azure.ai.projects.aio import AIProjectClient from azure.ai.projects.models import DatasetVersion -# Construct the full folder path `sample_folder`, and full file path `sample_folder/sample_file1.txt` +# Construct the paths to the data folder and data file used in this sample script_dir = os.path.dirname(os.path.abspath(__file__)) -folder_path = os.path.join(script_dir, "sample_folder") -file_path = os.path.join(folder_path, "sample_file1.txt") +data_folder = os.environ.get("DATA_FOLDER", os.path.join(script_dir, "data_folder")) +data_file = os.path.join(data_folder, "data_file1.txt") async def main() -> None: @@ -54,7 +55,7 @@ async def main() -> None: dataset: DatasetVersion = await project_client.datasets.upload_file( name=dataset_name, version=dataset_version_1, - file_path=file_path, + file_path=data_file, ) print(dataset) @@ -64,7 +65,7 @@ async def main() -> None: dataset = await project_client.datasets.upload_folder( name=dataset_name, version=dataset_version_2, - folder=folder_path, + folder=data_folder, ) print(dataset) diff --git a/sdk/ai/azure-ai-projects/samples/datasets/sample_folder/sample_subfolder2/sample_file5.txt b/sdk/ai/azure-ai-projects/samples/datasets/sample_folder/sample_subfolder2/sample_file5.txt deleted file mode 100644 index 2f4e87e14c77..000000000000 --- a/sdk/ai/azure-ai-projects/samples/datasets/sample_folder/sample_subfolder2/sample_file5.txt +++ /dev/null @@ -1 +0,0 @@ -This is sample file 5 diff --git a/sdk/ai/azure-ai-projects/samples/datasets/sample_folder/sample_subfolder2/sample_file6.txt b/sdk/ai/azure-ai-projects/samples/datasets/sample_folder/sample_subfolder2/sample_file6.txt deleted file mode 100644 index e55c3637cdf5..000000000000 --- a/sdk/ai/azure-ai-projects/samples/datasets/sample_folder/sample_subfolder2/sample_file6.txt +++ /dev/null @@ -1 +0,0 @@ -This is sample file 6 diff --git a/sdk/ai/azure-ai-projects/samples/datasets/sample_folder/sample_subfolder2/sample_file7.txt b/sdk/ai/azure-ai-projects/samples/datasets/sample_folder/sample_subfolder2/sample_file7.txt deleted file mode 100644 index 843cf01054c4..000000000000 --- a/sdk/ai/azure-ai-projects/samples/datasets/sample_folder/sample_subfolder2/sample_file7.txt +++ /dev/null @@ -1 +0,0 @@ -This is sample file 7 diff --git a/sdk/ai/azure-ai-projects/samples/datasets/sample_folder/sample_subfolder2/sample_folder3/sample_file8.txt b/sdk/ai/azure-ai-projects/samples/datasets/sample_folder/sample_subfolder2/sample_folder3/sample_file8.txt deleted file mode 100644 index 73a747b6b520..000000000000 --- a/sdk/ai/azure-ai-projects/samples/datasets/sample_folder/sample_subfolder2/sample_folder3/sample_file8.txt +++ /dev/null @@ -1 +0,0 @@ -This is sample file 8 diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_prompty_file.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_prompty_file.py index 70af780169bf..03dc13c53db2 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_prompty_file.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_prompty_file.py @@ -23,6 +23,7 @@ 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the overview page of your Azure AI Foundry project. 2) MODEL_DEPLOYMENT_NAME - The AI model deployment name, as found in your AI Foundry project. + 3) DATA_FOLDER - Optional. The folder path where the Prompty file is located. """ import os @@ -32,14 +33,17 @@ endpoint = os.environ["PROJECT_ENDPOINT"] model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] +# Construct the path to the Prompty file used in this sample +data_folder = os.environ.get("DATA_FOLDER", os.path.dirname(os.path.abspath(__file__))) +prompty_file = os.path.join(data_folder, "sample1.prompty") + with DefaultAzureCredential(exclude_interactive_browser_credential=False) as credential: with AIProjectClient(endpoint=endpoint, credential=credential) as project_client: with project_client.inference.get_chat_completions_client() as client: - path = "./sample1.prompty" - prompt_template = PromptTemplate.from_prompty(file_path=path) + prompt_template = PromptTemplate.from_prompty(file_path=prompty_file) input = "When I arrived, can I still have breakfast?" rules = [ diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_image_embeddings_with_azure_ai_inference_client.py b/sdk/ai/azure-ai-projects/samples/inference/sample_image_embeddings_with_azure_ai_inference_client.py index 85c97681c2d6..50e642e33c9e 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_image_embeddings_with_azure_ai_inference_client.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_image_embeddings_with_azure_ai_inference_client.py @@ -21,6 +21,7 @@ 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the overview page of your Azure AI Foundry project. 2) MODEL_DEPLOYMENT_NAME - The AI model deployment name, as found in your AI Foundry project. + 3) DATA_FOLDER - Optional. The folder path where the image file is located. """ import os @@ -31,6 +32,10 @@ endpoint = os.environ["PROJECT_ENDPOINT"] model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] +# Construct the path to the image file used in this sample +data_folder = os.environ.get("DATA_FOLDER", os.path.dirname(os.path.abspath(__file__))) +image_file = os.path.join(data_folder, "sample1.png") + with DefaultAzureCredential(exclude_interactive_browser_credential=False) as credential: with AIProjectClient(endpoint=endpoint, credential=credential) as project_client: @@ -39,7 +44,7 @@ response = client.embed( model=model_deployment_name, - input=[ImageEmbeddingInput.load(image_file="sample1.png", image_format="png")], + input=[ImageEmbeddingInput.load(image_file=image_file, image_format="png")], ) for item in response.data: diff --git a/sdk/ai/azure-ai-projects/tests/samples/test_samples.py b/sdk/ai/azure-ai-projects/tests/samples/test_samples.py index 3cab2b0861cb..c80c57014dc5 100644 --- a/sdk/ai/azure-ai-projects/tests/samples/test_samples.py +++ b/sdk/ai/azure-ai-projects/tests/samples/test_samples.py @@ -13,7 +13,7 @@ class TestSamples: @classmethod def setup_class(cls): current_path = os.path.abspath(__file__) - cls._samples_folder_path = os.path.join(current_path, "..", "..", "..", "samples") + cls._samples_folder_path = os.path.join(current_path, "..", "..", "..") cls._results: dict[str, bool] = {} cls._results_async: dict[str, bool] = {} @@ -66,34 +66,72 @@ async def _run_sample_async(cls, sample_path: str) -> None: # Await the main() coroutine defined in the sample await module.main() - @pytest.mark.skip(reason="This test should only run manually on your local machine, with live service calls.") @pytest.mark.parametrize( - "sample_name, model_deployment_name, connection_name", + "sample_name, model_deployment_name, connection_name, data_folder", [ - ("agents\\sample_agents.py", "gpt-4o", ""), - ("connections\\sample_connections.py", "", "connection1"), - ("deployments\\sample_deployments.py", "DeepSeek-V3", ""), - ("datasets\\sample_datasets.py", "", ""), - # ("evaluation\\sample_evaluations.py", "", ""), - ("indexes\\sample_indexes.py", "", ""), - ("inference\\sample_chat_completions_with_azure_ai_inference_client.py", "Phi-4", ""), + ("samples\\agents\\sample_agents.py", "gpt-4o", "", ""), + ("samples\\connections\\sample_connections.py", "", "connection1", ""), + ("samples\\deployments\\sample_deployments.py", "DeepSeek-V3", "", ""), + ("samples\\datasets\\sample_datasets.py", "", "", "samples\\datasets\\data_folder"), + # ("samples\\evaluation\\sample_evaluations.py", "", "", ""), + ("samples\\indexes\\sample_indexes.py", "", "", ""), + ("samples\\inference\\sample_chat_completions_with_azure_ai_inference_client.py", "Phi-4", "", ""), + ( + "samples\\inference\\sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py", + "Phi-4", + "", + "", + ), + ( + "samples\\inference\\sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py", + "Phi-4", + "", + "", + ), + ( + "samples\\inference\\sample_chat_completions_with_azure_ai_inference_client_and_prompty_file.py", + "Phi-4", + "", + "samples\\inference", + ), ( - "inference\\sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py", + "samples\\inference\\sample_chat_completions_with_azure_ai_inference_client_and_prompt_string.py", "Phi-4", "", + "", + ), + ("samples\\inference\\sample_chat_completions_with_azure_openai_client.py", "gpt-4o", "", ""), + ( + "samples\\inference\\sample_chat_completions_with_azure_openai_client_and_azure_monitor_tracing.py", + "gpt-4o", + "", + "", + ), + ( + "samples\\inference\\sample_chat_completions_with_azure_openai_client_and_console_tracing.py", + "gpt-4o", + "", + "", ), - ("inference\\sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py", "Phi-4", ""), - ("inference\\sample_chat_completions_with_azure_ai_inference_client_and_prompty_file.py", "Phi-4", ""), - ("inference\\sample_chat_completions_with_azure_ai_inference_client_and_prompt_string.py", "Phi-4", ""), - ("inference\\sample_chat_completions_with_azure_openai_client.py", "gpt-4o", ""), - ("inference\\sample_chat_completions_with_azure_openai_client_and_azure_monitor_tracing.py", "gpt-4o", ""), - ("inference\\sample_chat_completions_with_azure_openai_client_and_console_tracing.py", "gpt-4o", ""), - ("inference\\sample_image_embeddings_with_azure_ai_inference_client.py", "to-do-add-model", ""), - ("inference\\sample_text_embeddings_with_azure_ai_inference_client.py", "text-embedding-3-large", ""), - ("telemetry\\sample_telemetry.py", "", ""), + ( + "samples\\inference\\sample_image_embeddings_with_azure_ai_inference_client.py", + "to-do-add-model", + "", + "samples\\inference", + ), + ( + "samples\\inference\\sample_text_embeddings_with_azure_ai_inference_client.py", + "text-embedding-3-large", + "", + "", + ), + ("samples\\telemetry\\sample_telemetry.py", "", "", ""), ], ) - def test_samples(self, sample_name: str, model_deployment_name: str, connection_name: str) -> None: + @pytest.mark.skip(reason="This test should only run manually on your local machine, with live service calls.") + def test_samples( + self, sample_name: str, model_deployment_name: str, connection_name: str, data_folder: str + ) -> None: """ Run all the synchronous sample code in the samples folder. If a sample throws an exception, which for example happens when the service responds with an error, the test will fail. @@ -105,38 +143,57 @@ def test_samples(self, sample_name: str, model_deployment_name: str, connection_ TestSamples._results[sample_name] = False self._set_env_vars( - sample_name, **{"model_deployment_name": model_deployment_name, "connection_name": connection_name} + sample_name, + **{ + "model_deployment_name": model_deployment_name, + "connection_name": connection_name, + "data_folder": data_folder, + }, ) sample_path = os.path.normpath(os.path.join(TestSamples._samples_folder_path, sample_name)) TestSamples._run_sample(sample_path) TestSamples._results[sample_name] = True - @pytest.mark.skip(reason="This test should only run manually on your local machine, with live service calls.") @pytest.mark.parametrize( - "sample_name, model_deployment_name, connection_name", + "sample_name, model_deployment_name, connection_name, data_folder", [ - ("agents\\sample_agents_async.py", "", ""), - ("connections\\sample_connections_async.py", "", "connection1"), - ("datasets\\sample_datasets_async.py", "", ""), - ("deployments\\sample_deployments_async.py", "DeepSeek-V3", ""), - # ("evaluation\\sample_evaluations_async.py", "", ""), - ("indexes\\sample_indexes_async.py", "", ""), - ("inference\\async_samples\\sample_chat_completions_with_azure_ai_inference_client_async.py", "Phi-4", ""), - ("inference\\async_samples\\sample_chat_completions_with_azure_openai_client_async.py", "gpt-4o", ""), + ("samples\\agents\\sample_agents_async.py", "", "", ""), + ("samples\\connections\\sample_connections_async.py", "", "connection1", ""), + ("samples\\datasets\\sample_datasets_async.py", "", "", "samples\\datasets\\data_folder"), + ("samples\\deployments\\sample_deployments_async.py", "DeepSeek-V3", "", ""), + # ("samples\\evaluation\\sample_evaluations_async.py", "", "", ""), + ("samples\\indexes\\sample_indexes_async.py", "", "", ""), ( - "inference\\async_samples\\sample_image_embeddings_with_azure_ai_inference_client_async.py", + "samples\\inference\\async_samples\\sample_chat_completions_with_azure_ai_inference_client_async.py", + "Phi-4", + "", + "", + ), + ( + "samples\\inference\\async_samples\\sample_chat_completions_with_azure_openai_client_async.py", + "gpt-4o", + "", + "", + ), + ( + "samples\\inference\\async_samples\\sample_image_embeddings_with_azure_ai_inference_client_async.py", "to-do-add-model", "", + "", ), ( - "inference\\async_samples\\sample_text_embeddings_with_azure_ai_inference_client_async.py", + "samples\\inference\\async_samples\\sample_text_embeddings_with_azure_ai_inference_client_async.py", "text-embedding-3-large", "", + "", ), - ("telemetry\\sample_telemetry_async.py", "", ""), + ("samples\\telemetry\\sample_telemetry_async.py", "", "", ""), ], ) - async def test_samples_async(self, sample_name: str, model_deployment_name: str, connection_name: str) -> None: + @pytest.mark.skip(reason="This test should only run manually on your local machine, with live service calls.") + async def test_samples_async( + self, sample_name: str, model_deployment_name: str, connection_name: str, data_folder: str + ) -> None: """ Run all the asynchronous sample code in the samples folder. If a sample throws an exception, which for example happens when the service responds with an error, the test will fail. @@ -148,7 +205,12 @@ async def test_samples_async(self, sample_name: str, model_deployment_name: str, TestSamples._results_async[sample_name] = False self._set_env_vars( - sample_name, **{"model_deployment_name": model_deployment_name, "connection_name": connection_name} + sample_name, + **{ + "model_deployment_name": model_deployment_name, + "connection_name": connection_name, + "data_folder": data_folder, + }, ) sample_path = os.path.normpath(os.path.join(TestSamples._samples_folder_path, sample_name)) await TestSamples._run_sample_async(sample_path)