forked from MemTensor/MemOS
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathllm.py
More file actions
184 lines (162 loc) · 4.87 KB
/
llm.py
File metadata and controls
184 lines (162 loc) · 4.87 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
from memos.configs.llm import LLMConfigFactory, OllamaLLMConfig
from memos.llms.factory import LLMFactory
from memos.llms.ollama import OllamaLLM
# Scenario 1: Using LLMFactory with Ollama Backend
# This is the most recommended way! 🌟
config = LLMConfigFactory.model_validate(
{
"backend": "ollama",
"config": {
"model_name_or_path": "qwen3:0.6b",
"temperature": 0.8,
"max_tokens": 1024,
"top_p": 0.9,
"top_k": 50,
},
}
)
llm = LLMFactory.from_config(config)
messages = [
{"role": "user", "content": "How are you? /no_think"},
]
response = llm.generate(messages)
print("Scenario 1:", response)
print("==" * 20)
# Scenario 2: Using Pydantic model directly
config = OllamaLLMConfig(
model_name_or_path="qwen3:0.6b",
temperature=0.8,
max_tokens=1024,
top_p=0.9,
top_k=50,
)
ollama = OllamaLLM(config)
messages = [
{"role": "user", "content": "How are you? /no_think"},
]
response = ollama.generate(messages)
print("Scenario 2:", response)
print("==" * 20)
# Scenario 3: Using LLMFactory with OpenAI Backend
config = LLMConfigFactory.model_validate(
{
"backend": "openai",
"config": {
"model_name_or_path": "gpt-4.1-nano",
"temperature": 0.8,
"max_tokens": 1024,
"top_p": 0.9,
"top_k": 50,
"api_key": "sk-xxxx",
"api_base": "https://api.openai.com/v1",
},
}
)
llm = LLMFactory.from_config(config)
messages = [
{"role": "user", "content": "Hello, who are you"},
]
response = llm.generate(messages)
print("Scenario 3:", response)
print("==" * 20)
print("Scenario 3:\n")
for chunk in llm.generate_stream(messages):
print(chunk, end="")
print("==" * 20)
# Scenario 4: Using LLMFactory with Huggingface Models
config = LLMConfigFactory.model_validate(
{
"backend": "huggingface",
"config": {
"model_name_or_path": "Qwen/Qwen3-1.7B",
"temperature": 0.8,
"max_tokens": 1024,
"top_p": 0.9,
"top_k": 50,
},
}
)
llm = LLMFactory.from_config(config)
messages = [
{"role": "user", "content": "Hello, who are you"},
]
response = llm.generate(messages)
print("Scenario 4:", response)
print("==" * 20)
# Scenario 5: Using LLMFactory with Qwen (DashScope Compatible API)
# Note:
# This example works for any model that supports the OpenAI-compatible Chat Completion API,
# including but not limited to:
# - Qwen models: qwen-plus, qwen-max-2025-01-25
# - DeepSeek models: deepseek-chat, deepseek-coder, deepseek-v3
# - Other compatible providers: MiniMax, Fireworks, Groq, OpenRouter, etc.
#
# Just set the correct `api_key`, `api_base`, and `model_name_or_path`.
config = LLMConfigFactory.model_validate(
{
"backend": "qwen",
"config": {
"model_name_or_path": "qwen-plus", # or qwen-max-2025-01-25
"temperature": 0.7,
"max_tokens": 1024,
"top_p": 0.9,
"top_k": 50,
"api_key": "sk-xxx",
"api_base": "https://dashscope.aliyuncs.com/compatible-mode/v1",
},
}
)
llm = LLMFactory.from_config(config)
messages = [
{"role": "user", "content": "Hello, who are you"},
]
response = llm.generate(messages)
print("Scenario 5:", response)
print("==" * 20)
print("Scenario 5:\n")
for chunk in llm.generate_stream(messages):
print(chunk, end="")
print("==" * 20)
# Scenario 6: Using LLMFactory with Deepseek-chat
cfg = LLMConfigFactory.model_validate(
{
"backend": "deepseek",
"config": {
"model_name_or_path": "deepseek-chat",
"api_key": "sk-xxx",
"api_base": "https://api.deepseek.com",
"temperature": 0.6,
"max_tokens": 512,
"remove_think_prefix": False,
},
}
)
llm = LLMFactory.from_config(cfg)
messages = [{"role": "user", "content": "Hello, who are you"}]
resp = llm.generate(messages)
print("Scenario 6:", resp)
# Scenario 7: Using LLMFactory with Deepseek-chat + reasoning + CoT + streaming
cfg2 = LLMConfigFactory.model_validate(
{
"backend": "deepseek",
"config": {
"model_name_or_path": "deepseek-reasoner",
"api_key": "sk-xxx",
"api_base": "https://api.deepseek.com",
"temperature": 0.2,
"max_tokens": 1024,
"remove_think_prefix": False,
},
}
)
llm = LLMFactory.from_config(cfg2)
messages = [
{
"role": "user",
"content": "Explain how to solve this problem step-by-step. Be explicit in your thinking process. Question: If a train travels from city A to city B at 60 mph and returns at 40 mph, what is its average speed for the entire trip? Let's think step by step.",
},
]
print("Scenario 7:\n")
for chunk in llm.generate_stream(messages):
print(chunk, end="")
print("==" * 20)