Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
73 commits
Select commit Hold shift + click to select a range
0d47c43
gguf: add GGUFReader.read_field(field) method + read template example
Apr 27, 2024
0d1d46e
grammars: add troubleshooting section to readme
Apr 8, 2024
63d1324
server.py: hacky code
Mar 25, 2024
ffc7436
agents: scripts to run scripts as sandboxed fastapi servers
Mar 26, 2024
d5d9993
server.py: default tools work!
Mar 26, 2024
8afd4de
server.py: make tools work w/ mixtral-8x7b-instruct
Mar 27, 2024
aa9605c
server.py: kinda api-compliant output, disabled grammar
Mar 27, 2024
a406293
server.py: reenable grammar, accommodate mistral's escaped underscores
Mar 27, 2024
63a384d
server.py: raise n_predict
Mar 28, 2024
5f3de16
server.py: pass all request options, comments in ts sigs, render tool…
Mar 28, 2024
59b4114
server.py: refactor chat handlers
Mar 29, 2024
253b68d
server.py: crude reactor
Mar 29, 2024
e874565
agent: split code from openai example
Mar 29, 2024
b63f91a
Update agent.py
Mar 29, 2024
c340e8c
Update example_weather_tools.py
Mar 29, 2024
ce2fb01
agent: add --allow_parallel_calls
Mar 29, 2024
ea34bd3
agent/openai:nits
Mar 29, 2024
80c7930
openai: fix message merging for mixtral (parallel calls)
Mar 29, 2024
9ab493f
Update prompting.py
Mar 29, 2024
e0c8af4
agent: --style
Mar 29, 2024
b4e292e
Create requirements.txt
Mar 29, 2024
d1d8602
agent: disable parallel by default
Mar 29, 2024
eb9a552
agent: nits
Mar 29, 2024
3da30ed
agent: fix functionary tool_calls templating
Mar 29, 2024
ff6563a
Delete test.sh
Mar 29, 2024
dd11bb6
agent: format still broken
Mar 29, 2024
22b980f
agent: update readme
Mar 29, 2024
61f35e0
agent: prepare to test various templates
Mar 29, 2024
d8a53ea
openai: test features of templates at runtime, to make sure no bits o…
Mar 30, 2024
ad2f4c1
Update test_chat_handlers.py
Mar 30, 2024
3c3eff5
openai: quiet + update prompt output
Mar 30, 2024
6935503
openai: refactor chat handler vs. template
Mar 30, 2024
d9f30f8
Update test_chat_handlers.md
Mar 30, 2024
da2067a
openai: only special-format assistant in thoughtful mode
Mar 30, 2024
09de4eb
openai: actually use thoughtful examples in tests
Mar 30, 2024
19811a4
openai: tests didn't catch output format
Mar 30, 2024
22fe86d
openai tools: TS signatures work well too at a fraction of the eval cost
Mar 30, 2024
6e52a9c
Update test_chat_handlers.md
Apr 8, 2024
701a66d
agent: fix response_format
Apr 9, 2024
b447a74
agent: revert to json schemas (ts not ready for refs)
Apr 9, 2024
85820f4
agent: fix sandbox dockerfile
Apr 9, 2024
6880f1d
agent: support basic openapi tools (incl. from fastify sandbox)
Apr 9, 2024
0532680
agent: nits
Apr 9, 2024
a634e03
agent: cache_prompt=True
Apr 10, 2024
9fe269e
openai: nit
Apr 10, 2024
a61ebeb
agent: hint at math import in python tool
Apr 10, 2024
24e34f1
agent: nit
Apr 10, 2024
1475b1e
agent: fix killing of subprocesses
Apr 10, 2024
6c00378
agent: nits
Apr 10, 2024
082d54d
agent: rename fake weather tools
Apr 10, 2024
f9afb04
agent: python tool: test serializability of variables
Apr 10, 2024
a98f483
agent: python tool: return errors
Apr 10, 2024
ea0c31b
agent: ensure DATA_DIR exists
Apr 10, 2024
89dcc06
agent: mypy type fixes
Apr 10, 2024
0120f7c
agent: fix wait --std-tools
Apr 10, 2024
09c2565
grammars: early exit when no next_candidates to reject
Apr 21, 2024
00c709e
grammars: cache decoded tokens
Apr 21, 2024
8d503ef
grammars: faster llama_grammar_copy
Apr 21, 2024
b4a00ce
Merge branch 'gguf-read' into agent-example
Apr 27, 2024
7675ac6
Merge remote-tracking branch 'origin/master' into agent-example
Apr 30, 2024
312e20b
openai: update after merge
Apr 30, 2024
ca1a640
server: tool call grammar-constraints
May 2, 2024
2b2127c
agent: url params
May 2, 2024
e41b6ce
server: update tool calling, introduce system prompt for json schema
May 2, 2024
a1d64cf
openai: function call arguments must be returned stringified!
May 18, 2024
3f5a25f
Merge remote-tracking branch 'origin/master' into agent-example
May 18, 2024
5ea637e
openai: fix merge
May 21, 2024
6dadcd2
Merge remote-tracking branch 'origin/master' into agent-example
May 21, 2024
c8458fa
openai: make content optional for tool call grammar gen
May 22, 2024
a39e6e0
openai: pretty indent json response
May 22, 2024
793f4ff
agent: support OpenAI: --endpoint https://api.openai.com --auth "Bear…
May 22, 2024
a1c4aac
server: ultra basic tools, tool_choice, tool_calls support
May 22, 2024
298c098
Merge remote-tracking branch 'origin/master' into agent-example
Jun 9, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
server: update tool calling, introduce system prompt for json schema
  • Loading branch information
ochafik committed May 2, 2024
commit e41b6ceee9f0b9bbae28f5c608dff3e3f6fb4864
99 changes: 70 additions & 29 deletions common/json-schema-to-grammar.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -200,28 +200,28 @@ static std::string format_literal(const std::string & literal) {
return "\"" + escaped + "\"";
}


/*
not_literal('a') -> '[^a]'
not_literal('abc') -> '([^a] | "a" ([^b] | "b" ([^c])?)?)?'
*/
static std::string not_literal(const std::string & literal, bool dotall = true) {
assert(literal.size() > 0);
std::stringstream out;
std::function<void(int)> recurse = [&](size_t i) {
const auto & c = literal[i];
out << "[^" << c << "]";
if (i < literal.size() - 1) {
out << " | " << format_literal(std::to_string(c)) << " (";
recurse(i + 1);
out << ")?";
}
};
out << "(";
recurse(0);
out << ")" << (dotall ? DOTALL : DOT) << "*";
return out.str();
}
// static std::string not_literal(const std::string & literal, bool dotall = true) {
// assert(literal.size() > 0);
// std::stringstream out;
// std::function<void(int)> recurse = [&](size_t i) {
// const char & c = literal[i];
// out << "[^" << c << "]";
// out << " " << (dotall ? DOTALL : DOT) << "*";
// if (i < literal.size() - 1) {
// out << " | " << format_literal(literal.substr(i, 1)) << " (";
// recurse(i + 1);
// out << ")?";
// }
// };
// out << "(";
// recurse(0);
// out << ")";
// return out.str();
// }


class SchemaConverter {
Expand Down Expand Up @@ -625,17 +625,57 @@ class SchemaConverter {
visit_refs(schema);
}

/*
reply ::= prefix tool-call*

prefix ::= [^<] prefix
| "<" [^t] prefix
| "<t" [^o] prefix
| "<to" [^o] prefix
| "<too" [^l] prefix
| "<tool" [^_] prefix
| "<tool_" [^c] prefix
| "<tool_c" [^a] prefix
| "<tool_ca" [^l] prefix
| "<tool_cal" [^l] prefix
| "<tool_call" [^l] prefix
| "<tool_call" [^>] prefix
|

*/

std::string not_literal(const std::string & literal) {
auto rule_name = _find_rule_name("not" + literal, "!!!");
std::stringstream out;
for (size_t i = 0, n = literal.size(); i < n; i++) {
out << " | ";
if (i > 0) {
out << format_literal(literal.substr(0, i)) << " ";
}
out << "[^" << literal[i] << "] " << rule_name.c_str();
}
_rules[rule_name] = out.str();
return rule_name;
}

std::string _escape_name(const std::string & name) {
return regex_replace(name, INVALID_RULE_CHARS_RE, "-");
}
std::string _find_rule_name(const std::string & name, const std::string & rule) {
auto esc_name = _escape_name(name);
int i = 0;
while (_rules.find(esc_name + std::to_string(i)) != _rules.end() && _rules[esc_name + std::to_string(i)] != rule) {
i++;
}
return esc_name + std::to_string(i);
}
std::string add_rule(const std::string & name, const std::string & rule) {
std::string esc_name = regex_replace(name, INVALID_RULE_CHARS_RE, "-");
auto esc_name = _escape_name(name);
if (_rules.find(esc_name) == _rules.end() || _rules[esc_name] == rule) {
_rules[esc_name] = rule;
return esc_name;
} else {
int i = 0;
while (_rules.find(esc_name + std::to_string(i)) != _rules.end() && _rules[esc_name + std::to_string(i)] != rule) {
i++;
}
std::string key = esc_name + std::to_string(i);
auto key = _find_rule_name(esc_name, rule);
_rules[key] = rule;
return key;
}
Expand Down Expand Up @@ -789,7 +829,7 @@ std::string json_schema_to_grammar(const json & schema) {
return converter.format_grammar();
}

std::string tool_call_grammar(const json & tools) {
std::string tool_call_grammar(const json & tools, bool allow_parallel_calls) {
SchemaConverter converter([](const std::string &) { return json::object(); }, /* dotall= */ false);

std::vector<std::string> tool_rules;
Expand All @@ -814,12 +854,13 @@ std::string tool_call_grammar(const json & tools) {

converter.add_rule(
"root",
not_literal("<tool_call>") + " | "
+ converter.add_rule(
converter.not_literal("<tool_call>") + " " +
converter.add_rule(
"tool_call",
"\"<tool_call>\" "
"\"<tool_call>\" ("
+ join(tool_rules.begin(), tool_rules.end(), " | ")
+ " \"</tool_call>\""));
+ ") \"</tool_call>\""
) + (allow_parallel_calls ? "*" : "?"));

converter.check_errors();
return converter.format_grammar();
Expand Down
2 changes: 1 addition & 1 deletion common/json-schema-to-grammar.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#pragma once
#include "json.hpp"

std::string tool_call_grammar(const nlohmann::ordered_json & tools);
std::string tool_call_grammar(const nlohmann::ordered_json & tools, bool allow_parallel_calls = false);
std::string json_schema_to_grammar(const nlohmann::ordered_json& schema);
66 changes: 38 additions & 28 deletions examples/server/utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ inline bool verify_custom_template(const std::string & tmpl) {
}

// Format given chat. If tmpl is empty, we take the template from model metadata
inline std::string format_chat(const struct llama_model * model, const std::string & tmpl, const std::vector<json> & messages, const std::string & tools_tag) {
inline std::string format_chat(const struct llama_model * model, const std::string & tmpl, const std::vector<json> & messages, const std::string & extra_system_message) {
size_t alloc_size = 0;
// vector holding all allocated string to be passed to llama_chat_apply_template
std::vector<std::string> str(messages.size() * 2);
Expand All @@ -138,18 +138,12 @@ inline std::string format_chat(const struct llama_model * model, const std::stri
chat[i].content = str[i*2 + 1].c_str();
}

if (!tools_tag.empty()) {
alloc_size += tools_tag.size();
if (chat.empty()) {
str.resize(2);
str[0] = "user";
str[1] = tools_tag;
chat.push_back({str[0].c_str(), str[1].c_str()});
} else {
auto & content = str[str.size() - 1];
content += tools_tag;
chat[chat.size() - 1].content = content.c_str();
}
if (!extra_system_message.empty()) {
alloc_size += extra_system_message.size();

llama_chat_message msg { "system", extra_system_message.c_str() };
chat.insert(chat.begin(), msg);
// chat.push_back(msg);
}

const char * ptr_tmpl = tmpl.empty() ? nullptr : tmpl.c_str();
Expand Down Expand Up @@ -387,15 +381,42 @@ static json oaicompat_completion_params_parse(
llama_params["temperature"] = json_value(body, "temperature", 0.0);
llama_params["top_p"] = json_value(body, "top_p", 1.0);

std::string tools_tag;
if (body.contains("tools") && body["tools"].is_array()) {
std::string extra_system_message;

// Handle "response_format" field
if (body.contains("response_format")) {
json response_format = json_value(body, "response_format", json::object());
std::string response_type = json_value(response_format, "type", std::string());
if (response_type == "json_object") {
llama_params["json_schema"] = json_value(response_format, "schema", json::object());
extra_system_message = (std::stringstream()
<< "You are a helpful assistant that answers in JSON. Here's the json schema you must adhere to:\n<schema>\n"
<< llama_params["json_schema"].dump().c_str()
<< "\n</schema>"
).str();
} else if (!response_type.empty() && response_type != "text") {
throw std::runtime_error("response_format type must be one of \"text\" or \"json_object\", but got: " + response_type);
}
} else if (body.contains("tools") && body["tools"].is_array()) {
const auto & tools = body["tools"];
llama_params["grammar"] = tool_call_grammar(tools);
tools_tag = (std::stringstream() << "\n\n<tools>" << tools.dump(2) << "</tools>").str();

extra_system_message = (std::stringstream()
<< "You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. "
<< "You may call one or more functions to assist with the user query. "
<< "Don't make assumptions about what values to plug into functions. "
<< "Here are the available tools: <tools>"
<< tools.dump().c_str()
<< "</tools>\n"
<< "For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:"
<< "<tool_call>"
<< "{\"arguments\": <args-dict>, \"name\": <function-name>}"
<< "</tool_call>"
).str();
}

// Apply chat template to the list of messages
llama_params["prompt"] = format_chat(model, chat_template, body["messages"], tools_tag);
llama_params["prompt"] = format_chat(model, chat_template, body["messages"], extra_system_message);

// Handle "stop" field
if (body.contains("stop") && body["stop"].is_string()) {
Expand All @@ -404,17 +425,6 @@ static json oaicompat_completion_params_parse(
llama_params["stop"] = json_value(body, "stop", json::array());
}

// Handle "response_format" field
if (body.contains("response_format")) {
json response_format = json_value(body, "response_format", json::object());
std::string response_type = json_value(response_format, "type", std::string());
if (response_type == "json_object") {
llama_params["json_schema"] = json_value(response_format, "schema", json::object());
} else if (!response_type.empty() && response_type != "text") {
throw std::runtime_error("response_format type must be one of \"text\" or \"json_object\", but got: " + response_type);
}
}

// Handle "n" field
int n_choices = json_value(body, "n", 1);
if (n_choices != 1) {
Expand Down