From c46c82126108601010a388aaa24cd1a46e640270 Mon Sep 17 00:00:00 2001 From: Ankit Singhal Date: Thu, 1 May 2025 10:52:08 -0700 Subject: [PATCH 1/2] Adding sample for agent evaluation --- .../evaluation/sample_agent_evaluations.py | 68 +++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 sdk/ai/azure-ai-projects/samples/evaluation/sample_agent_evaluations.py diff --git a/sdk/ai/azure-ai-projects/samples/evaluation/sample_agent_evaluations.py b/sdk/ai/azure-ai-projects/samples/evaluation/sample_agent_evaluations.py new file mode 100644 index 000000000000..2331e270e051 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/evaluation/sample_agent_evaluations.py @@ -0,0 +1,68 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + Given an AIProjectClient, this sample demonstrates how to use the synchronous + `.evaluations` methods to submit evaluation for an agent run. + +USAGE: + python sample_agent_evaluations.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + Azure AI Foundry project. +""" + +import os + +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import ( + AgentEvaluationRequest, + InputDataset, + EvaluatorIds, + EvaluatorConfiguration, + AgentEvaluationSamplingConfiguration +) +from dotenv import load_dotenv + +load_dotenv() + +endpoint = os.environ["PROJECT_ENDPOINT"] + +with DefaultAzureCredential(exclude_interactive_browser_credential=False) as credential: + + with AIProjectClient(endpoint=endpoint, credential=credential) as project_client: + + # [START evaluations_agent_sample] + + agent_evaluation_request = AgentEvaluationRequest( + run_id="run-id", + thread_id="thread-id", + evaluators={ + "violence": EvaluatorConfiguration( + id=EvaluatorIds.VIOLENCE, + ) + }, + sampling_configuration=AgentEvaluationSamplingConfiguration( + name="test", + sampling_percent=0.5, + ), + app_insights_connection_string="connection-string", + ) + + agent_evaluation_response = project_client.evaluations.create_agent_evaluation( + evaluation=agent_evaluation_request + ) + + print(agent_evaluation_response) + + # [END evaluations_agent_sample] From 7961e5acbd4aa4ffca3757988a5367f88767cabf Mon Sep 17 00:00:00 2001 From: Ankit Singhal Date: Thu, 1 May 2025 11:45:54 -0700 Subject: [PATCH 2/2] Review comments --- .../samples/evaluation/sample_agent_evaluations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/ai/azure-ai-projects/samples/evaluation/sample_agent_evaluations.py b/sdk/ai/azure-ai-projects/samples/evaluation/sample_agent_evaluations.py index 2331e270e051..8943a638d114 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluation/sample_agent_evaluations.py +++ b/sdk/ai/azure-ai-projects/samples/evaluation/sample_agent_evaluations.py @@ -56,7 +56,7 @@ name="test", sampling_percent=0.5, ), - app_insights_connection_string="connection-string", + app_insights_connection_string=project_client.telemetry.get_connection_string(), ) agent_evaluation_response = project_client.evaluations.create_agent_evaluation(