Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
d7f8a46
Prompty support within Azure AI Inference SDK
YusakuNo1 Oct 16, 2024
1e25075
Fix unit test
YusakuNo1 Oct 16, 2024
ffeaab8
Address PR feedback with copyright, merge PromptConfig to PromptTemplate
YusakuNo1 Oct 18, 2024
44d2f2c
Add comment and set model_name as optional
YusakuNo1 Oct 18, 2024
2d1d132
Bug fixes
YusakuNo1 Oct 22, 2024
9f7b679
Updated parameter names from PM feedbacks
YusakuNo1 Oct 22, 2024
b4f2d5b
Merge branch 'main' into users/daviwu/prompty
YusakuNo1 Oct 22, 2024
b7657e5
Merge branch 'main' into users/daviwu/prompty
YusakuNo1 Oct 28, 2024
38eb258
Improve sample code and unit tests
YusakuNo1 Oct 28, 2024
aa28df4
Update readme and comments
YusakuNo1 Oct 28, 2024
9a1eb79
Rename files
YusakuNo1 Oct 28, 2024
1252b3a
Address PR comment
YusakuNo1 Oct 29, 2024
b3e8616
add Pydantic as dependency
YusakuNo1 Oct 29, 2024
c43f88e
Fix type errors
YusakuNo1 Oct 29, 2024
e9cab12
Fix spelling issues
YusakuNo1 Oct 29, 2024
24c3ced
Address PR comments and fix linter issues
YusakuNo1 Oct 29, 2024
19316b8
Fix type import for "Self"
YusakuNo1 Oct 30, 2024
ed718cb
Change to keyword-only constructor and fix linter issues
YusakuNo1 Oct 30, 2024
ebfa1f8
Rename function `from_message` to `from_str`; `render` to `create_mes…
YusakuNo1 Nov 1, 2024
25a0365
Change from `from_str` to `from_string`
YusakuNo1 Nov 1, 2024
6b8ad60
Merge branch 'main' into users/daviwu/prompty
YusakuNo1 Nov 3, 2024
a7a0bf2
Merge latest code from `microsoft/prompty` and resolve linter issues
YusakuNo1 Nov 3, 2024
4b43b46
Fix PR comment
YusakuNo1 Nov 4, 2024
633c84f
Fix PR comments
YusakuNo1 Nov 5, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Fix PR comments
  • Loading branch information
YusakuNo1 committed Nov 5, 2024
commit 633c84fae171c478ff7716d004e13424b3dd9476
18 changes: 13 additions & 5 deletions .vscode/cspell.json
Original file line number Diff line number Diff line change
Expand Up @@ -1323,12 +1323,20 @@
{
"filename": "sdk/ai/azure-ai-inference/**",
"words": [
"ubinary",
"mros",
"Nify",
"ctxt",
"wday",
"dtype"
"dels",
"dtype",
"fmatter",
"fspath",
"fstring",
"ldel",
"mros",
"nify",
"okwargs",
"prompty",
"rdel",
"ubinary",
"wday"
]
},
{
Expand Down
42 changes: 1 addition & 41 deletions sdk/ai/azure-ai-inference/azure/ai/inference/prompts/_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from pathlib import Path
from typing import Any, AsyncIterator, Dict, Iterator, List, Literal, Union
from ._tracer import Tracer, to_dict
from ._utils import load_json, load_json_async
from ._utils import load_json


@dataclass
Expand Down Expand Up @@ -185,20 +185,6 @@ def _process_file(file: str, parent: Path) -> Any:
else:
raise FileNotFoundError(f"File {file} not found")

@staticmethod
async def _process_file_async(file: str, parent: Path) -> Any:
file_path = Path(parent / Path(file)).resolve().absolute()
if file_path.exists():
items = await load_json_async(file_path)
if isinstance(items, list):
return [Prompty.normalize(value, parent) for value in items]
elif isinstance(items, Dict):
return {key: Prompty.normalize(value, parent) for key, value in items.items()}
else:
return items
else:
raise FileNotFoundError(f"File {file} not found")

@staticmethod
def _process_env(variable: str, env_error=True, default: Union[str, None] = None) -> Any:
if variable in os.environ.keys():
Expand Down Expand Up @@ -237,32 +223,6 @@ def normalize(attribute: Any, parent: Path, env_error=True) -> Any:
else:
return attribute

@staticmethod
async def normalize_async(attribute: Any, parent: Path, env_error=True) -> Any:
if isinstance(attribute, str):
attribute = attribute.strip()
if attribute.startswith("${") and attribute.endswith("}"):
# check if env or file
variable = attribute[2:-1].split(":")
if variable[0] == "env" and len(variable) > 1:
return Prompty._process_env(
variable[1],
env_error,
variable[2] if len(variable) > 2 else None,
)
elif variable[0] == "file" and len(variable) > 1:
return await Prompty._process_file_async(variable[1], parent)
else:
raise ValueError(f"Invalid attribute format ({attribute})")
else:
return attribute
elif isinstance(attribute, list):
return [await Prompty.normalize_async(value, parent) for value in attribute]
elif isinstance(attribute, Dict):
return {key: await Prompty.normalize_async(value, parent) for key, value in attribute.items()}
else:
return attribute


def param_hoisting(top: Dict[str, Any], bottom: Dict[str, Any], top_key: Union[str, None] = None) -> Dict[str, Any]:
if top_key:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def __init__(
"prompt_template": prompt_template,
}
else:
raise ValueError("Please invalid arguments for PromptTemplate")
raise ValueError("Please pass valid arguments for PromptTemplate")

def create_messages(self, data: Optional[Dict[str, Any]] = None, **kwargs) -> List[Dict[str, Any]]:
"""Render the prompt template with the given data.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,6 @@
)
from ._utils import (
load_global_config,
load_global_config_async,
load_prompty_async,
load_prompty,
)

Expand Down Expand Up @@ -82,62 +80,6 @@ def headless(
return Prompty(model=modelSettings, template=templateSettings, content=content)


@trace(description="Create a headless prompty object for programmatic use.")
async def headless_async(
api: str,
content: Union[str, List[str], dict],
configuration: Dict[str, Any] = {},
parameters: Dict[str, Any] = {},
connection: str = "default",
) -> Prompty:
"""Create a headless prompty object for programmatic use.

Parameters
----------
api : str
The API to use for the model
content : Union[str, List[str], dict]
The content to process
configuration : Dict[str, Any], optional
The configuration to use, by default {}
parameters : Dict[str, Any], optional
The parameters to use, by default {}
connection : str, optional
The connection to use, by default "default"

Returns
-------
Prompty
The headless prompty object

Example
-------
>>> import prompty
>>> p = await prompty.headless_async(
api="embedding",
configuration={"type": "azure", "azure_deployment": "text-embedding-ada-002"},
content="hello world",
)
>>> emb = prompty.execute(p)

"""

# get caller's path (to get relative path for prompty.json)
caller = Path(traceback.extract_stack()[-2].filename)
templateSettings = TemplateSettings(type="NOOP", parser="NOOP")

global_config = await load_global_config_async(caller.parent, connection)
c = await Prompty.normalize_async(param_hoisting(configuration, global_config), caller.parent)

modelSettings = ModelSettings(
api=api,
configuration=c,
parameters=parameters,
)

return Prompty(model=modelSettings, template=templateSettings, content=content)


def _load_raw_prompty(attributes: dict, content: str, p: Path, global_config: dict):
if "model" not in attributes:
attributes["model"] = {}
Expand Down Expand Up @@ -205,7 +147,7 @@ def load(prompty_file: Union[str, Path], configuration: str = "default") -> Prom

Parameters
----------
prompty_file : str
prompty_file : Union[str, Path]
The path to the prompty file
configuration : str, optional
The configuration to use, by default "default"
Expand Down Expand Up @@ -251,59 +193,6 @@ def load(prompty_file: Union[str, Path], configuration: str = "default") -> Prom
return prompty


@trace(description="Load a prompty file.")
async def load_async(prompty_file: Union[str, Path], configuration: str = "default") -> Prompty:
"""Load a prompty file.

Parameters
----------
prompty_file : str
The path to the prompty file
configuration : str, optional
The configuration to use, by default "default"

Returns
-------
Prompty
The loaded prompty object

Example
-------
>>> import prompty
>>> p = prompty.load("prompts/basic.prompty")
>>> print(p)
"""

p = Path(prompty_file)
if not p.is_absolute():
# get caller's path (take into account trace frame)
caller = Path(traceback.extract_stack()[-3].filename)
p = Path(caller.parent / p).resolve().absolute()

# load dictionary from prompty file
matter = await load_prompty_async(p)

attributes = matter["attributes"]
content = matter["body"]

# normalize attribute dictionary resolve keys and files
attributes = await Prompty.normalize_async(attributes, p.parent)

# load global configuration
config = await load_global_config_async(p.parent, configuration)
global_config = await Prompty.normalize_async(config, p.parent)

prompty = _load_raw_prompty(attributes, content, p, global_config)

# recursive loading of base prompty
if "base" in attributes:
# load the base prompty from the same directory as the current prompty
base = await load_async(p.parent / attributes["base"])
prompty = Prompty.hoist_base_prompty(prompty, base)

return prompty


@trace(description="Prepare the inputs for the prompt.")
def prepare(
prompt: Prompty,
Expand Down Expand Up @@ -524,57 +413,3 @@ def execute(
result = run(prompt, content, configuration, parameters, raw)

return result


@trace(description="Execute a prompty")
async def execute_async(
prompt: Union[str, Prompty],
configuration: Dict[str, Any] = {},
parameters: Dict[str, Any] = {},
inputs: Dict[str, Any] = {},
raw: bool = False,
config_name: str = "default",
):
"""Execute a prompty.

Parameters
----------
prompt : Union[str, Prompty]
The prompty object or path to the prompty file
configuration : Dict[str, Any], optional
The configuration to use, by default {}
parameters : Dict[str, Any], optional
The parameters to use, by default {}
inputs : Dict[str, Any], optional
The inputs to the prompt, by default {}
raw : bool, optional
Whether to skip processing, by default False
connection : str, optional
The connection to use, by default "default"

Returns
-------
Any
The result of the prompt

Example
-------
>>> import prompty
>>> inputs = {"name": "John Doe"}
>>> result = await prompty.execute_async("prompts/basic.prompty", inputs=inputs)
"""
if isinstance(prompt, str):
path = Path(prompt)
if not path.is_absolute():
# get caller's path (take into account trace frame)
caller = Path(traceback.extract_stack()[-3].filename)
path = Path(caller.parent / path).resolve().absolute()
prompt = await load_async(path, config_name)

# prepare content
content = await prepare_async(prompt, inputs)

# run LLM model
result = await run_async(prompt, content, configuration, parameters, raw)

return result
33 changes: 0 additions & 33 deletions sdk/ai/azure-ai-inference/azure/ai/inference/prompts/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
import re
import yaml
import json
import aiofiles
from typing import Any, Dict, Union
from pathlib import Path

Expand All @@ -22,22 +21,10 @@ def load_text(file_path, encoding="utf-8"):
return file.read()


async def load_text_async(file_path, encoding="utf-8"):
async with aiofiles.open(file_path, mode="r", encoding=encoding) as f:
content = await f.read()
return content


def load_json(file_path, encoding="utf-8"):
return json.loads(load_text(file_path, encoding=encoding))


async def load_json_async(file_path, encoding="utf-8"):
# async file open
content = await load_text_async(file_path, encoding=encoding)
return json.loads(content)


def _find_global_config(prompty_path: Path = Path.cwd()) -> Union[Path, None]:
prompty_config = list(Path.cwd().glob("**/prompty.json"))

Expand Down Expand Up @@ -65,31 +52,11 @@ def load_global_config(prompty_path: Path = Path.cwd(), configuration: str = "de
return {}


async def load_global_config_async(prompty_path: Path = Path.cwd(), configuration: str = "default") -> Dict[str, Any]:
# prompty.config laying around?
config = _find_global_config(prompty_path)

# if there is one load it
if config is not None:
c = await load_json_async(config)
if configuration in c:
return c[configuration]
else:
raise ValueError(f'Item "{configuration}" not found in "{config}"')

return {}


def load_prompty(file_path, encoding="utf-8") -> Dict[str, Any]:
contents = load_text(file_path, encoding=encoding)
return parse(contents)


async def load_prompty_async(file_path, encoding="utf-8"):
contents = await load_text_async(file_path, encoding=encoding)
return parse(contents)


def parse(contents):
global _yaml_regex

Expand Down
13 changes: 0 additions & 13 deletions sdk/ai/azure-ai-inference/cspell.json

This file was deleted.

3 changes: 0 additions & 3 deletions sdk/ai/azure-ai-inference/dev_requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,4 @@
../../monitor/azure-monitor-opentelemetry
aiohttp
opentelemetry-sdk
aiofiles
dataclasses
types-pyyaml
types-aiofiles
6 changes: 1 addition & 5 deletions sdk/ai/azure-ai-inference/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,11 +62,7 @@
package_data={
"azure.ai.inference": ["py.typed"],
},
install_requires=[
"isodate>=0.6.1",
"azure-core>=1.30.0",
"typing-extensions>=4.6.0",
],
install_requires=["isodate>=0.6.1", "azure-core>=1.30.0", "typing-extensions>=4.6.0"],
python_requires=">=3.8",
extras_require={"opentelemetry": ["azure-core-tracing-opentelemetry"]},
)
Loading