Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
f364876
Pick up changes to tool naming
trangevi Aug 13, 2024
afb3c25
Test/sample fixes
trangevi Aug 13, 2024
e5a9b77
TSP commit
trangevi Aug 14, 2024
65a55c8
Merge branch 'main' into trangevi/ai-inference-sdk
trangevi Aug 14, 2024
bf629a5
Update with recent typespec changes. Re-record tests
trangevi Aug 23, 2024
bab6b9d
Add embedding client and associated changes
trangevi Aug 29, 2024
7071138
Add samples. Regenerate apiview
trangevi Aug 30, 2024
762f437
Add test for user agent
trangevi Sep 16, 2024
e04113e
Updates to flatten Choices array
trangevi Sep 23, 2024
1db2dfc
Merge branch 'main' into trangevi/ai-inference-sdk
trangevi Sep 25, 2024
2b0bcd7
Pick up changes from tsp
trangevi Sep 25, 2024
c2a1f72
Merge branch 'main' into trangevi/ai-inference-sdk
trangevi Sep 25, 2024
c3d996e
Finally got tool streaming working!
trangevi Oct 3, 2024
b8fe531
regenerate after changes
trangevi Oct 3, 2024
b417e4e
Merge branch 'main' into trangevi/ai-inference-sdk
trangevi Oct 3, 2024
9f4fb2f
up
Oct 8, 2024
a6cb31f
Merge branch 'main' into trangevi/ai-inference-sdk
trangevi Oct 9, 2024
4c8ed0d
Merge fixes. Removing test recordings
trangevi Oct 10, 2024
920be7a
Rename sample files
trangevi Oct 10, 2024
b600294
Add handling to pass both auth headers in every request
trangevi Oct 10, 2024
7e20f3e
Add handling for image files for chat completions
trangevi Oct 11, 2024
652596c
Regen code and api
trangevi Oct 11, 2024
cacdc03
Update spellchecker
trangevi Oct 11, 2024
02ef186
Merge branch 'main' into trangevi/ai-inference-sdk
trangevi Oct 11, 2024
37e36fb
Update snippets
trangevi Oct 11, 2024
df9b796
Update sample names/pointers
trangevi Oct 15, 2024
c19fb2d
Update test recordings asset pointer
trangevi Oct 15, 2024
b861fcf
Rename some enum values
trangevi Oct 18, 2024
bf2143d
Update Apiview
trangevi Oct 18, 2024
8215877
Merge branch 'main' into trangevi/ai-inference-sdk
trangevi Oct 18, 2024
6e961ca
Regenerated
trangevi Oct 18, 2024
d617623
Changelog update
trangevi Oct 21, 2024
c8c7759
Changelog update
trangevi Oct 22, 2024
3584458
Update hopefull release date
trangevi Oct 23, 2024
b4dc0e6
Remove extraParams. Add more samples
trangevi Oct 24, 2024
8a031ac
Missed change to apiview
trangevi Oct 24, 2024
176b9d7
Snippet fixes
trangevi Oct 24, 2024
3b1fb6f
Update release date
trangevi Oct 24, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Add samples. Regenerate apiview
Signed-off-by: Travis Angevine <[email protected]>
  • Loading branch information
trangevi committed Aug 30, 2024
commit 7071138e5dcb0f0eea5eb2d957be7ddd5f970856
299 changes: 220 additions & 79 deletions sdk/ai/Azure.AI.Inference/api/Azure.AI.Inference.netstandard2.0.cs

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ Set these two environment variables before running the sample:
var endpoint = new Uri(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_ENDPOINT"));
var credential = new AzureKeyCredential(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_KEY"));

var client = new ChatCompletionsClient(endpoint, credential, new ChatCompletionsClientOptions());
var client = new ChatCompletionsClient(endpoint, credential, new AzureAIInferenceClientOptions());

var requestOptions = new ChatCompletionsOptions()
{
Expand All @@ -35,7 +35,7 @@ An `async` option is also available.
var endpoint = new Uri(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_ENDPOINT"));
var credential = new AzureKeyCredential(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_KEY"));

var client = new ChatCompletionsClient(endpoint, credential, new ChatCompletionsClientOptions());
var client = new ChatCompletionsClient(endpoint, credential, new AzureAIInferenceClientOptions());

var requestOptions = new ChatCompletionsOptions()
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ var key = System.Environment.GetEnvironmentVariable("AZURE_OPENAI_CHAT_KEY");
// the credential object is still required. So create with a dummy value.
var credential = new AzureKeyCredential("foo");

ChatCompletionsClientOptions clientOptions = new ChatCompletionsClientOptions();
AzureAIInferenceClientOptions clientOptions = new AzureAIInferenceClientOptions();
clientOptions.AddPolicy(new AddAoaiAuthHeaderPolicy(key), HttpPipelinePosition.PerCall);

var client = new ChatCompletionsClient(endpoint, credential, clientOptions);
Expand All @@ -61,7 +61,7 @@ Alternatively, you can use EntraId to authenticate. This does not require the he
var endpoint = new Uri(System.Environment.GetEnvironmentVariable("AZURE_OPENAI_CHAT_ENDPOINT"));
var credential = new DefaultAzureCredential(includeInteractiveCredentials: true);

ChatCompletionsClientOptions clientOptions = new ChatCompletionsClientOptions();
AzureAIInferenceClientOptions clientOptions = new AzureAIInferenceClientOptions();

BearerTokenAuthenticationPolicy tokenPolicy = new BearerTokenAuthenticationPolicy(credential, new string[] { "https://cognitiveservices.azure.com/.default" });
clientOptions.AddPolicy(tokenPolicy, HttpPipelinePosition.PerRetry);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ Set these two environment variables before running the sample:
var endpoint = new Uri(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_ENDPOINT"));
var credential = new AzureKeyCredential(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_KEY"));

var client = new ChatCompletionsClient(endpoint, credential, new ChatCompletionsClientOptions());
var client = new ChatCompletionsClient(endpoint, credential, new AzureAIInferenceClientOptions());

var requestOptions = new ChatCompletionsOptions()
{
Expand Down Expand Up @@ -48,7 +48,7 @@ An `async` option is also available for the initial streaming call.
var endpoint = new Uri(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_ENDPOINT"));
var credential = new AzureKeyCredential(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_KEY"));

var client = new ChatCompletionsClient(endpoint, credential, new ChatCompletionsClientOptions());
var client = new ChatCompletionsClient(endpoint, credential, new AzureAIInferenceClientOptions());

var requestOptions = new ChatCompletionsOptions()
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ Set these two environment variables before running the sample:
var endpoint = new Uri(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_ENDPOINT"));
var credential = new AzureKeyCredential(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_KEY"));

var client = new ChatCompletionsClient(endpoint, credential, new ChatCompletionsClientOptions());
var client = new ChatCompletionsClient(endpoint, credential, new AzureAIInferenceClientOptions());
var messages = new List<ChatRequestMessage>()
{
new ChatRequestSystemMessage("You are an AI assistant that helps people find information. Your replies are short, no more than two sentences."),
Expand All @@ -40,7 +40,7 @@ An `async` option is also available.
var endpoint = new Uri(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_ENDPOINT"));
var credential = new AzureKeyCredential(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_KEY"));

var client = new ChatCompletionsClient(endpoint, credential, new ChatCompletionsClientOptions());
var client = new ChatCompletionsClient(endpoint, credential, new AzureAIInferenceClientOptions());
var messages = new List<ChatRequestMessage>()
{
new ChatRequestSystemMessage("You are an AI assistant that helps people find information. Your replies are short, no more than two sentences."),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ Set these two environment variables before running the sample:
var endpoint = new Uri(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_ENDPOINT"));
var credential = new AzureKeyCredential(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_KEY"));

var client = new ChatCompletionsClient(endpoint, credential, new ChatCompletionsClientOptions());
var client = new ChatCompletionsClient(endpoint, credential, new AzureAIInferenceClientOptions());

ChatMessageImageContentItem imageContentItem =
new ChatMessageImageContentItem(
Expand Down Expand Up @@ -45,7 +45,7 @@ An `async` option is also available.
var endpoint = new Uri(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_ENDPOINT"));
var credential = new AzureKeyCredential(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_KEY"));

var client = new ChatCompletionsClient(endpoint, credential, new ChatCompletionsClientOptions());
var client = new ChatCompletionsClient(endpoint, credential, new AzureAIInferenceClientOptions());

ChatMessageImageContentItem imageContentItem =
new ChatMessageImageContentItem(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ Set these two environment variables before running the sample:
var endpoint = new Uri(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_ENDPOINT"));
var credential = new AzureKeyCredential(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_KEY"));

var client = new ChatCompletionsClient(endpoint, credential, new ChatCompletionsClientOptions());
var client = new ChatCompletionsClient(endpoint, credential, new AzureAIInferenceClientOptions());

var requestOptions = new ChatCompletionsOptions()
{
Expand All @@ -41,7 +41,7 @@ An `async` option is also available.
var endpoint = new Uri(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_ENDPOINT"));
var credential = new AzureKeyCredential(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_KEY"));

var client = new ChatCompletionsClient(endpoint, credential, new ChatCompletionsClientOptions());
var client = new ChatCompletionsClient(endpoint, credential, new AzureAIInferenceClientOptions());

var requestOptions = new ChatCompletionsOptions()
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ Set these two environment variables before running the sample:
var endpoint = new Uri(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_ENDPOINT"));
var credential = new AzureKeyCredential(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_KEY"));

var client = new ChatCompletionsClient(endpoint, credential, new ChatCompletionsClientOptions());
var client = new ChatCompletionsClient(endpoint, credential, new AzureAIInferenceClientOptions());

FunctionDefinition futureTemperatureFunction = new FunctionDefinition("get_future_temperature")
{
Expand All @@ -39,7 +39,7 @@ FunctionDefinition futureTemperatureFunction = new FunctionDefinition("get_futur
},
new JsonSerializerOptions() { PropertyNamingPolicy = JsonNamingPolicy.CamelCase })
};
ChatCompletionsFunctionToolDefinition functionToolDef = new ChatCompletionsFunctionToolDefinition(futureTemperatureFunction);
ChatCompletionsToolDefinition functionToolDef = new ChatCompletionsToolDefinition(futureTemperatureFunction);

var requestOptions = new ChatCompletionsOptions()
{
Expand All @@ -55,7 +55,7 @@ Response<ChatCompletions> response = client.Complete(requestOptions);
System.Console.WriteLine(response.Value.Choices[0].Message.Content);

ChatResponseMessage responseMessage = response.Value.Choices[0].Message;
ChatCompletionsFunctionToolCall functionToolCall = responseMessage.ToolCalls[0] as ChatCompletionsFunctionToolCall;
ChatCompletionsToolCall functionToolCall = responseMessage.ToolCalls[0] as ChatCompletionsToolCall;

ChatCompletionsOptions followupOptions = new()
{
Expand Down Expand Up @@ -86,7 +86,7 @@ An `async` option is also available.
var endpoint = new Uri(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_ENDPOINT"));
var credential = new AzureKeyCredential(System.Environment.GetEnvironmentVariable("AZURE_AI_CHAT_KEY"));

var client = new ChatCompletionsClient(endpoint, credential, new ChatCompletionsClientOptions());
var client = new ChatCompletionsClient(endpoint, credential, new AzureAIInferenceClientOptions());

FunctionDefinition futureTemperatureFunction = new FunctionDefinition("get_future_temperature")
{
Expand All @@ -111,7 +111,7 @@ FunctionDefinition futureTemperatureFunction = new FunctionDefinition("get_futur
},
new JsonSerializerOptions() { PropertyNamingPolicy = JsonNamingPolicy.CamelCase })
};
ChatCompletionsFunctionToolDefinition functionToolDef = new ChatCompletionsFunctionToolDefinition(futureTemperatureFunction);
ChatCompletionsToolDefinition functionToolDef = new ChatCompletionsToolDefinition(futureTemperatureFunction);

var requestOptions = new ChatCompletionsOptions()
{
Expand All @@ -127,7 +127,7 @@ Response<ChatCompletions> response = await client.CompleteAsync(requestOptions);
System.Console.WriteLine(response.Value.Choices[0].Message.Content);

ChatResponseMessage responseMessage = response.Value.Choices[0].Message;
ChatCompletionsFunctionToolCall functionToolCall = responseMessage.ToolCalls[0] as ChatCompletionsFunctionToolCall;
ChatCompletionsToolCall functionToolCall = responseMessage.ToolCalls[0] as ChatCompletionsToolCall;

ChatCompletionsOptions followupOptions = new()
{
Expand Down
68 changes: 68 additions & 0 deletions sdk/ai/Azure.AI.Inference/samples/Sample8_Embeddings.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
# Simple Embeddings

This sample demonstrates how to get text embeddings for a list of sentences. Here we use the service default of returning embeddings as a list of floating point values.

This sample assumes the AI model is hosted on a Serverless API or Managed Compute endpoint. For GitHub Models or Azure OpenAI endpoints, the client constructor needs to be modified. See package documentation for details.

## Usage

Set these two environment variables before running the sample:

1. AZURE_AI_EMBEDDINGS_ENDPOINT - Your endpoint URL, in the form `https://your-deployment-name.your-azure-region.inference.ai.azure.com` where `your-deployment-name` is your unique AI Model deployment name, and `your-azure-region` is the Azure region where your model is deployed.

2. AZURE_AI_EMBEDDINGS_KEY - Your model key (a 32-character string). Keep it secret.

```C# Snippet:Azure_AI_Inference_BasicEmbedding
var endpoint = new Uri(System.Environment.GetEnvironmentVariable("AZURE_AI_EMBEDDINGS_ENDPOINT"));
var credential = new AzureKeyCredential(System.Environment.GetEnvironmentVariable("AZURE_AI_EMBEDDINGS_KEY"));

var client = new EmbeddingsClient(endpoint, credential, new AzureAIInferenceClientOptions());

var input = new List<string> { "King", "Queen", "Jack", "Page" };
var requestOptions = new EmbeddingsOptions(input);

Response<EmbeddingsResult> response = client.Embed(requestOptions);
foreach (EmbeddingItem item in response.Value.Data)
{
List<float> embedding = item.Embedding.ToObjectFromJson<List<float>>();
Console.WriteLine($"Index: {item.Index}, Embedding: <{string.Join(", ", embedding)}>");
}
```

An `async` option is also available.

```C# Snippet:Azure_AI_Inference_BasicEmbeddingAsync
var endpoint = new Uri(System.Environment.GetEnvironmentVariable("AZURE_AI_EMBEDDINGS_ENDPOINT"));
var credential = new AzureKeyCredential(System.Environment.GetEnvironmentVariable("AZURE_AI_EMBEDDINGS_KEY"));

var client = new EmbeddingsClient(endpoint, credential, new AzureAIInferenceClientOptions());

var input = new List<string> { "King", "Queen", "Jack", "Page" };
var requestOptions = new EmbeddingsOptions(input);

Response<EmbeddingsResult> response = await client.EmbedAsync(requestOptions);
foreach (EmbeddingItem item in response.Value.Data)
{
List<float> embedding = item.Embedding.ToObjectFromJson<List<float>>();
Console.WriteLine($"Index: {item.Index}, Embedding: <{string.Join(", ", embedding)}>");
}
```

### Alternative Response Type

It is also possible to request embeddings as base64 encoded strings, instead of the service default of lists of floats.

```C# Snippet:Azure_AI_Inference_Base64Embedding
var input = new List<string> { "King", "Queen", "Jack", "Page" };
var requestOptions = new EmbeddingsOptions(input)
{
EncodingFormat = EmbeddingEncodingFormat.Base64,
};

Response<EmbeddingsResult> response = client.Embed(requestOptions);
foreach (EmbeddingItem item in response.Value.Data)
{
string embedding = item.Embedding.ToObjectFromJson<string>();
Console.WriteLine($"Index: {item.Index}, Embedding: {embedding}");
}
```
117 changes: 117 additions & 0 deletions sdk/ai/Azure.AI.Inference/samples/Sample9_EmbeddingsWithAoai.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
# Simple Embeddings Using Azure OpenAI

This sample demonstrates how to get text embeddings for a list of sentences using a synchronous client, with an Azure OpenAI (AOAI) endpoint. Two types of authentications are shown: key authentication and Entra ID authentication.

## Usage

Set these two environment variables before running the sample:

1. AZURE_OPENAI_EMBEDDINGS_ENDPOINT - Your endpoint URL, in the form `https://your-deployment-name.your-azure-region.inference.ai.azure.com` where `your-deployment-name` is your unique AI Model deployment name, and `your-azure-region` is the Azure region where your model is deployed.

2. AZURE_OPENAI_EMBEDDINGS_KEY - Your model key (a 32-character string). Keep it secret.

In order to target AOAI, the auth key must currently be provided as a separate header. This can be done by creating a `HttpPipelinePolicy` like below:

```C# Snippet:Azure_AI_Inference_AoaiAuthHeaderPolicy
private class AddAoaiAuthHeaderPolicy : HttpPipelinePolicy
{
public string AoaiKey { get; }

public AddAoaiAuthHeaderPolicy(string key)
{
AoaiKey = key;
}

public override void Process(HttpMessage message, ReadOnlyMemory<HttpPipelinePolicy> pipeline)
{
// Add your desired header name and value
message.Request.Headers.Add("api-key", AoaiKey);

ProcessNext(message, pipeline);
}

public override ValueTask ProcessAsync(HttpMessage message, ReadOnlyMemory<HttpPipelinePolicy> pipeline)
{
// Add your desired header name and value
message.Request.Headers.Add("api-key", AoaiKey);

return ProcessNextAsync(message, pipeline);
}
}
```

The policy can then be added to the `AzureAIInferenceClientOptions` object, to configure the client to add the header at runtime.

```C# Snippet:Azure_AI_Inference_BasicEmbeddingAoaiScenarioClientCreate
var endpoint = new Uri(System.Environment.GetEnvironmentVariable("AZURE_OPENAI_EMBEDDINGS_ENDPOINT"));
var key = System.Environment.GetEnvironmentVariable("AZURE_OPENAI_EMBEDDINGS_KEY");

// For AOAI, currently the key is passed via a different header not directly handled by the client, however
// the credential object is still required. So create with a dummy value.
var credential = new AzureKeyCredential("foo");

AzureAIInferenceClientOptions clientOptions = new AzureAIInferenceClientOptions();
clientOptions.AddPolicy(new AddAoaiAuthHeaderPolicy(key), HttpPipelinePosition.PerCall);

var client = new EmbeddingsClient(endpoint, credential, clientOptions);
```

Alternatively, you can use EntraId to authenticate. This does not require the header policy, but it does currently require a separate built-in policy, `BearerTokenAuthenticationPolicy`, to apply the correct token scope.

```C# Snippet:Azure_AI_Inference_EmbeddingWithEntraIdClientCreate
var endpoint = new Uri(System.Environment.GetEnvironmentVariable("AZURE_OPENAI_EMBEDDINGS_ENDPOINT"));
var credential = new DefaultAzureCredential(includeInteractiveCredentials: true);
AzureAIInferenceClientOptions clientOptions = new AzureAIInferenceClientOptions();

BearerTokenAuthenticationPolicy tokenPolicy = new BearerTokenAuthenticationPolicy(credential, new string[] { "https://cognitiveservices.azure.com/.default" });
clientOptions.AddPolicy(tokenPolicy, HttpPipelinePosition.PerRetry);

var client = new EmbeddingsClient(endpoint, credential, clientOptions);
```

After the client is created, you can make completion requests with it as shown

```C# Snippet:Azure_AI_Inference_BasicEmbeddingAoai
var input = new List<string> { "King", "Queen", "Jack", "Page" };
var requestOptions = new EmbeddingsOptions(input);

Response<EmbeddingsResult> response = client.Embed(requestOptions);
foreach (EmbeddingItem item in response.Value.Data)
{
List<float> embedding = item.Embedding.ToObjectFromJson<List<float>>();
Console.WriteLine($"Index: {item.Index}, Embedding: <{string.Join(", ", embedding)}>");
}
```

An `async` option is also available.

```C# Snippet:Azure_AI_Inference_BasicEmbeddingAoaiAsync
var input = new List<string> { "King", "Queen", "Jack", "Page" };
var requestOptions = new EmbeddingsOptions(input);

Response<EmbeddingsResult> response = await client.EmbedAsync(requestOptions);
foreach (EmbeddingItem item in response.Value.Data)
{
List<float> embedding = item.Embedding.ToObjectFromJson<List<float>>();
Console.WriteLine($"Index: {item.Index}, Embedding: <{string.Join(", ", embedding)}>");
}
```

### Alternative Response Type

It is also possible to request embeddings as base64 encoded strings, instead of the service default of lists of floats.

```C# Snippet:Azure_AI_Inference_Base64Embedding
var input = new List<string> { "King", "Queen", "Jack", "Page" };
var requestOptions = new EmbeddingsOptions(input)
{
EncodingFormat = EmbeddingEncodingFormat.Base64,
};

Response<EmbeddingsResult> response = client.Embed(requestOptions);
foreach (EmbeddingItem item in response.Value.Data)
{
string embedding = item.Embedding.ToObjectFromJson<string>();
Console.WriteLine($"Index: {item.Index}, Embedding: {embedding}");
}
```
12 changes: 1 addition & 11 deletions sdk/ai/Azure.AI.Inference/tests/EmbeddingClientTests.cs
Original file line number Diff line number Diff line change
Expand Up @@ -3,20 +3,10 @@

using System;
using System.Collections.Generic;
using System.Diagnostics.Tracing;
using System.IO;
using System.Linq;
using System.Text;
using System.Text.Json;
using System.Threading.Tasks;
using Azure.Core;
using Azure.Core.Diagnostics;
using Azure.Core.Pipeline;
using Azure.Core.TestFramework;
using Azure.Identity;
using Newtonsoft.Json;
using NUnit.Framework;
using File = System.IO.File;

namespace Azure.AI.Inference.Tests
{
Expand Down Expand Up @@ -83,7 +73,7 @@ public async Task TestEmbedWithBase64()
Assert.That(response.Value.Usage.TotalTokens, Is.GreaterThan(0));
}

#region Helpers
#region Helpers
private EmbeddingsClient CreateClient(Uri endpoint, AzureKeyCredential credential, AzureAIInferenceClientOptions clientOptions)
{
return InstrumentClient(new EmbeddingsClient(endpoint, credential, InstrumentClientOptions(clientOptions)));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ public class InferenceClientTestEnvironment : TestEnvironment
public string GithubEndpoint => GetRecordedVariable("AZUREAI_GITHUB_URL");
public string GithubToken => GetRecordedVariable("AZUREAI_GITHUB_TOKEN", options => options.IsSecret());
public string AoaiEndpoint => GetRecordedVariable("AOAI_CHAT_COMPLETIONS_ENDPOINT");
public string AoaiKey => GetRecordedVariable("AOAI_CHAT_COMPLETIONS_KEY");
public string AoaiKey => GetRecordedVariable("AOAI_CHAT_COMPLETIONS_KEY", options => options.IsSecret());
public string AoaiEmbeddingsEndpoint => GetRecordedVariable("AOAI_EMBEDDINGS_ENDPOINT");
public string AoaiEmbeddingsKey => GetRecordedVariable("AOAI_EMBEDDINGS_KEY", options => options.IsSecret());
public string TestImageJpgInputPath => GetRecordedVariable("AZUREAI_TEST_IMAGE_JPG_INPUT_PATH");

// Add other client paramters here as above.
Expand Down
Loading