Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
d7f8a46
Prompty support within Azure AI Inference SDK
YusakuNo1 Oct 16, 2024
1e25075
Fix unit test
YusakuNo1 Oct 16, 2024
ffeaab8
Address PR feedback with copyright, merge PromptConfig to PromptTemplate
YusakuNo1 Oct 18, 2024
44d2f2c
Add comment and set model_name as optional
YusakuNo1 Oct 18, 2024
2d1d132
Bug fixes
YusakuNo1 Oct 22, 2024
9f7b679
Updated parameter names from PM feedbacks
YusakuNo1 Oct 22, 2024
b4f2d5b
Merge branch 'main' into users/daviwu/prompty
YusakuNo1 Oct 22, 2024
b7657e5
Merge branch 'main' into users/daviwu/prompty
YusakuNo1 Oct 28, 2024
38eb258
Improve sample code and unit tests
YusakuNo1 Oct 28, 2024
aa28df4
Update readme and comments
YusakuNo1 Oct 28, 2024
9a1eb79
Rename files
YusakuNo1 Oct 28, 2024
1252b3a
Address PR comment
YusakuNo1 Oct 29, 2024
b3e8616
add Pydantic as dependency
YusakuNo1 Oct 29, 2024
c43f88e
Fix type errors
YusakuNo1 Oct 29, 2024
e9cab12
Fix spelling issues
YusakuNo1 Oct 29, 2024
24c3ced
Address PR comments and fix linter issues
YusakuNo1 Oct 29, 2024
19316b8
Fix type import for "Self"
YusakuNo1 Oct 30, 2024
ed718cb
Change to keyword-only constructor and fix linter issues
YusakuNo1 Oct 30, 2024
ebfa1f8
Rename function `from_message` to `from_str`; `render` to `create_mes…
YusakuNo1 Nov 1, 2024
25a0365
Change from `from_str` to `from_string`
YusakuNo1 Nov 1, 2024
6b8ad60
Merge branch 'main' into users/daviwu/prompty
YusakuNo1 Nov 3, 2024
a7a0bf2
Merge latest code from `microsoft/prompty` and resolve linter issues
YusakuNo1 Nov 3, 2024
4b43b46
Fix PR comment
YusakuNo1 Nov 4, 2024
633c84f
Fix PR comments
YusakuNo1 Nov 5, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Fix unit test
  • Loading branch information
YusakuNo1 committed Oct 16, 2024
commit 1e250757286ac41fd30e5fa7c75a27cc467f5062
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,10 @@ def sample_chat_completions_from_input_prompt_string():


prompt_template = """
system:
You are an AI assistant in a hotel. You help guests with their requests and provide information about the hotel and its services.

user:
{input}
"""
prompt_config = PromptyTemplate.from_message(
Expand Down
30 changes: 30 additions & 0 deletions sdk/ai/azure-ai-inference/tests/sample1.prompty
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
---
name: Basic Prompt
description: A basic prompt that uses the GPT-3 chat API to answer questions
authors:
- author_1
- author_2
model:
api: chat
configuration:
azure_deployment: gpt-4o-mini
parameters:
temperature: 1
frequency_penalty: 0.5
presence_penalty: 0.5
sample:
firstName: Jane
lastName: Doe
question: What is the meaning of life?
chat_history: []
---
system:
You are an AI assistant in a hotel. You help guests with their requests and provide information about the hotel and its services.

{{#chat_history}}
{{role}}:
{{content}}
{{/chat_history}}

user:
{{input}}
82 changes: 34 additions & 48 deletions sdk/ai/azure-ai-inference/tests/test_prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,62 +3,48 @@
# Licensed under the MIT License.
# ------------------------------------
import os
import json
import azure.ai.inference as sdk
import azure.ai.inference.prompts as prompts
from azure.ai.inference.prompts import PromptyTemplate
from devtools_testutils import AzureRecordedTestCase

from model_inference_test_base import (
ModelClientTestBase,
ServicePreparerChatCompletions,
ServicePreparerAOAIChatCompletions,
ServicePreparerEmbeddings,
)
from azure.core.pipeline.transport import RequestsTransport
from devtools_testutils import recorded_by_proxy
from azure.core.exceptions import AzureError, ServiceRequestError
from azure.core.credentials import AzureKeyCredential



class TestModelClient(ModelClientTestBase):
class TestPrompts(AzureRecordedTestCase):

# **********************************************************************************
#
# UNIT TESTS
#
# **********************************************************************************

def test_prompty(self, **kwargs):
path = "/Users/weiwu/Workspace/1_Testing/TestAI/test-prompty/test.prompty"
p = prompts.load(path)

inputs = {
"input": "my first question",
def test_prompt_config_from_prompty(self, **kwargs):
script_dir = os.path.dirname(os.path.abspath(__file__))
prompty_file_path = os.path.join(script_dir, "sample1.prompty")
prompt_config = PromptyTemplate.load(prompty_file_path)
assert prompt_config.model_name == "gpt-4o-mini"
assert prompt_config.config["temperature"] == 1
assert prompt_config.config["frequency_penalty"] == 0.5
assert prompt_config.config["presence_penalty"] == 0.5
input_variables = {
"input": "please tell me a joke about cats",
}

print(p)

parsed = prompts.prepare(p, inputs)

lc_messages = [] # TODO: will be removed
for message in parsed:
message_class = prompts.RoleMap.get_message_class(message["role"])
lc_messages.append(message_class(content=message["content"]))

print(lc_messages)

assert True


def test_prompt_config(self, **kwargs):
path = "/Users/weiwu/Workspace/1_Testing/TestAI/test-prompty/test.prompty"
prompt_config = prompts.get_prompt_config(file_path=path)

inputs = {
"input": "my first question",
messages = prompt_config.render(input_variables=input_variables)
assert len(messages) == 2
assert messages[0]["role"] == "system"
assert messages[1]["role"] == "user"
assert messages[1]["content"] == "please tell me a joke about cats"

def test_prompt_config_from_message(self, **kwargs):
prompt_config = PromptyTemplate.from_message(
api = "chat",
model_name = "gpt-4o-mini",
prompt_template = "system prompt template {input}"
)
assert prompt_config.model_name == "gpt-4o-mini"
input_variables = {
"input": "please tell me a joke about cats",
}

messages = prompt_config.render(inputs)
print(messages)

assert True
messages = prompt_config.render(input_variables=input_variables)
assert len(messages) == 1
assert messages[0]["role"] == "system"
# TODO: need to separate the system prompt from the user input
# assert messages[1]["role"] == "user"
# assert messages[1]["content"] == "please tell me a joke about cats"