-
Notifications
You must be signed in to change notification settings - Fork 857
Expand file tree
/
Copy pathprompts.py
More file actions
166 lines (152 loc) · 7.36 KB
/
prompts.py
File metadata and controls
166 lines (152 loc) · 7.36 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
from datetime import datetime
summary_prompt = (
"Summarize the excerpt below to help answer a question.\n\nExcerpt from"
" {citation}\n\n------------\n\n{text}\n\n------------"
"\n\nQuestion: {question}\n\nDo not directly"
" answer the question, instead summarize to give evidence to help answer the"
" question. Stay detailed; report specific numbers, equations, or direct quotes"
' (marked with quotation marks). Reply "Not applicable" if the excerpt is'
" irrelevant. At the end of your response,"
"provide an integer score from 1-10 on a newline indicating relevance to question." # Don't use 0-10 since we mention "not applicable" instead # noqa: E501
" Do not explain your score."
"\n\nRelevant Information Summary ({summary_length}):"
)
# This prompt template integrates with `text` variable of the above `summary_prompt`
text_with_tables_prompt_template = (
"{text}\n\n------------\n\nMarkdown tables from {citation}."
" If the markdown is poorly formatted, defer to the images"
"\n\n{tables}"
)
summary_json_prompt = (
"Excerpt from {citation}\n\n------------\n\n{text}\n\n------------"
"\n\nQuestion: {question}\n\n"
)
# The below "cannot answer" sentinel phrase should:
# 1. Lead to complete tool being called with has_successful_answer=False
# 2. Can be used for unit testing
CANNOT_ANSWER_PHRASE = "I cannot answer"
answer_iteration_prompt_template = (
"You are iterating on a prior answer, with a potentially different context:\n\n"
"{prior_answer}\n\n"
"Create a new answer only using context keys and data from the included context."
" You can not use context keys from the prior answer which are not "
"also included in the above context.\n\n"
)
CITATION_KEY_CONSTRAINTS = (
"## Valid citation examples, only use comma/space delimited parentheticals: \n"
"- (pqac-d79ef6fa, pqac-0f650d59) \n"
"- (pqac-d79ef6fa) \n"
"## Invalid citation examples: \n"
"- (pqac-d79ef6fa and pqac-0f650d59) \n"
"- (pqac-d79ef6fa;pqac-0f650d59) \n"
"- (pqac-d79ef6fa-pqac-0f650d59) \n"
"- pqac-d79ef6fa and pqac-0f650d59 \n"
"- Example's work (pqac-d79ef6fa) \n"
"- (pages pqac-d79ef6fa) \n"
)
qa_prompt = (
"Answer the question below with the context.\n\n"
"Context:\n\n{context}\n\n------------\n\n"
"Question: {question}\n\n"
"Write an answer based on the context. "
"If the context provides insufficient information reply "
f'"{CANNOT_ANSWER_PHRASE}." '
"For each part of your answer, indicate which sources most support "
"it via citation keys at the end of sentences, like {example_citation}. "
"Only cite from the context above and only use the citation keys from the context. "
f"{CITATION_KEY_CONSTRAINTS}"
"Do not concatenate citation keys, just use them as is. "
"Write in the style of a scientific article, with concise sentences and "
"coherent paragraphs. This answer will be used directly, "
"so do not add any extraneous information.\n\n"
"{prior_answer_prompt}"
"Answer ({answer_length}):"
)
select_paper_prompt = (
"Select papers that may help answer the question below. "
"Papers are listed as $KEY: $PAPER_INFO. "
"Return a list of keys, separated by commas. "
'Return "None", if no papers are applicable. '
"Choose papers that are relevant, from reputable sources, and timely "
"(if the question requires timely information).\n\n"
"Question: {question}\n\n"
"Papers: {papers}\n\n"
"Selected keys:"
)
citation_prompt = (
"Provide the citation for the following text in MLA Format. "
"Do not write an introductory sentence. "
"Do not fabricate a DOI such as '10.xxxx' if one cannot be found,"
" just leave it out of the citation. "
f"If reporting date accessed, the current year is {datetime.now().year}\n\n"
"{text}\n\n"
"Citation:"
)
structured_citation_prompt = (
"Extract the title, authors, and doi as a JSON from this MLA citation. "
"If any field can not be found, return it as null. "
"Use title, authors, and doi as keys, author's value should be a list of authors. "
"{citation}\n\n"
"Citation JSON:"
)
default_system_prompt = (
"Answer in a direct and concise tone. "
"Your audience is an expert, so be highly specific. "
"If there are ambiguous terms or acronyms, first define them."
)
# NOTE: we use double curly braces here so it's not considered an f-string template
summary_json_system_prompt = (
"Provide a summary of the relevant information"
" that could help answer the question based on the excerpt."
" Your summary, combined with many others,"
" will be given to the model to generate an answer."
" Respond with the following JSON format:"
'\n\n{{\n "summary": "...",\n "relevance_score": 0-10\n}}'
"\n\nwhere `summary` is relevant information from the text - {summary_length} words."
" `relevance_score` is an integer 0-10 for the relevance of `summary` to the question."
"\n\nThe excerpt may or may not contain relevant information."
" If not, leave `summary` empty, and make `relevance_score` be 0."
)
summary_json_multimodal_system_prompt = (
"Provide a summary of the relevant information"
" that could help answer the question based on the excerpt."
" Your summary, combined with many others,"
" will be given to the model to generate an answer."
" Respond with the following JSON format:"
'\n\n{{\n "summary": "...",\n "relevance_score": 0-10,\n "used_images": "..."\n}}'
"\n\nwhere `summary` is relevant information from the text - {summary_length} words."
" `relevance_score` is an integer 0-10 for the relevance of `summary` to the question."
" `used_images` is a boolean flag indicating"
" if any images present in a multimodal message were used,"
" and if no images were present it should be false."
"\n\nThe excerpt may or may not contain relevant information."
" If not, leave `summary` empty, and make `relevance_score` be 0."
)
env_system_prompt = (
# Matching https://github.com/langchain-ai/langchain/blob/langchain%3D%3D0.2.3/libs/langchain/langchain/agents/openai_functions_agent/base.py#L213-L215
"You are a helpful AI assistant."
)
env_reset_prompt = (
"Use the tools to answer the question: {question}"
"\n\nWhen the answer looks sufficient,"
" you can terminate by calling the {complete_tool_name} tool."
" If the answer does not look sufficient,"
" and you have already tried to answer several times with different evidence,"
" terminate by calling the {complete_tool_name} tool."
" The current status of evidence/papers/cost is {status}"
)
# Prompt templates for use with LitQA
QA_PROMPT_TEMPLATE = "Q: {question}\n\nOptions:\n{options}"
EVAL_PROMPT_TEMPLATE = (
"Given the following question and a proposed answer to the question, return the"
" single-letter choice in the question that matches the proposed answer."
" If the proposed answer is blank or an empty string,"
" or multiple options are matched, respond with '0'."
"\n\nQuestion: {qa_prompt}"
"\n\nProposed Answer: {qa_answer}"
"\n\nSingle Letter Answer:"
)
CONTEXT_OUTER_PROMPT = "{context_str}\n\nValid Keys: {valid_keys}"
EMPTY_CONTEXTS = len(CONTEXT_OUTER_PROMPT.format(context_str="", valid_keys="").strip())
CONTEXT_INNER_PROMPT_NOT_DETAILED = "{name}: {text}"
CONTEXT_INNER_PROMPT = f"{CONTEXT_INNER_PROMPT_NOT_DETAILED}\nFrom {{citation}}"