forked from MemTensor/MemOS
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
216 lines (198 loc) · 9.2 KB
/
.env.example
File metadata and controls
216 lines (198 loc) · 9.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
# MemOS Environment Variables (core runtime)
# Legend: [required] needed for default startup; others are optional or conditional per comments.
## Base
TZ=Asia/Shanghai
MOS_CUBE_PATH=/tmp/data_test # local data path
MEMOS_BASE_PATH=. # CLI/SDK cache path
MOS_ENABLE_DEFAULT_CUBE_CONFIG=true # enable default cube config
MOS_ENABLE_REORGANIZE=false # enable memory reorg
# MOS Text Memory Type
MOS_TEXT_MEM_TYPE=general_text # general_text | tree_text
ASYNC_MODE=sync # async/sync, used in default cube config
## User/session defaults
# Top-K for LLM in the Product API(old version)
MOS_TOP_K=50
## Chat LLM (main dialogue)
# LLM model name for the Product API
MOS_CHAT_MODEL=gpt-4o-mini
# Temperature for LLM in the Product API
MOS_CHAT_TEMPERATURE=0.8
# Max tokens for LLM in the Product API
MOS_MAX_TOKENS=2048
# Top-P for LLM in the Product API
MOS_TOP_P=0.9
# LLM for the Product API backend
MOS_CHAT_MODEL_PROVIDER=openai # openai | huggingface | vllm
OPENAI_API_KEY=sk-xxx # [required] when provider=openai
OPENAI_API_BASE=https://api.openai.com/v1 # [required] base for the key
## MemReader / retrieval LLM
MEMRADER_MODEL=gpt-4o-mini
MEMRADER_API_KEY=sk-xxx # [required] can reuse OPENAI_API_KEY
MEMRADER_API_BASE=http://localhost:3000/v1 # [required] base for the key
MEMRADER_MAX_TOKENS=5000
## Embedding & rerank
# embedding dim
EMBEDDING_DIMENSION=1024
# set default embedding backend
MOS_EMBEDDER_BACKEND=universal_api # universal_api | ollama
# set openai style
MOS_EMBEDDER_PROVIDER=openai # required when universal_api
# embedding model
MOS_EMBEDDER_MODEL=bge-m3 # siliconflow → use BAAI/bge-m3
# embedding url
MOS_EMBEDDER_API_BASE=http://localhost:8000/v1 # required when universal_api
# embedding model key
MOS_EMBEDDER_API_KEY=EMPTY # required when universal_api
OLLAMA_API_BASE=http://localhost:11434 # required when backend=ollama
# reranker config
MOS_RERANKER_BACKEND=http_bge # http_bge | http_bge_strategy | cosine_local
# reranker url
MOS_RERANKER_URL=http://localhost:8001 # required when backend=http_bge*
# reranker model
MOS_RERANKER_MODEL=bge-reranker-v2-m3 # siliconflow → use BAAI/bge-reranker-v2-m3
MOS_RERANKER_HEADERS_EXTRA= # extra headers, JSON string, e.g. {"Authorization":"Bearer your_token"}
# use source
MOS_RERANKER_STRATEGY=single_turn
# External Services (for evaluation scripts)
# API key for reproducting Zep(compertitor product) evaluation
ZEP_API_KEY=your_zep_api_key_here
# API key for reproducting Mem0(compertitor product) evaluation
MEM0_API_KEY=your_mem0_api_key_here
# API key for reproducting MemU(compertitor product) evaluation
MEMU_API_KEY=your_memu_api_key_here
# API key for reproducting MEMOBASE(compertitor product) evaluation
MEMOBASE_API_KEY=your_memobase_api_key_here
# Project url for reproducting MEMOBASE(compertitor product) evaluation
MEMOBASE_PROJECT_URL=your_memobase_project_url_here
# LLM for evaluation
MODEL=gpt-4o-mini
# embedding model for evaluation
EMBEDDING_MODEL=nomic-embed-text:latest
## Internet search & preference memory
# Enable web search
ENABLE_INTERNET=false
# API key for BOCHA Search
BOCHA_API_KEY= # required if ENABLE_INTERNET=true
# default search mode
SEARCH_MODE=fast # fast | fine | mixture
# Slow retrieval strategy configuration, rewrite is the rewrite strategy
FINE_STRATEGY=rewrite # rewrite | recreate | deep_search
# Whether to enable preference memory
ENABLE_PREFERENCE_MEMORY=true
# Preference Memory Add Mode
PREFERENCE_ADDER_MODE=fast # fast | safe
# Whether to deduplicate explicit preferences based on factual memory
DEDUP_PREF_EXP_BY_TEXTUAL=false
## Reader chunking
MEM_READER_BACKEND=simple_struct # simple_struct | strategy_struct
MEM_READER_CHAT_CHUNK_TYPE=default # default | content_length
MEM_READER_CHAT_CHUNK_TOKEN_SIZE=1600 # tokens per chunk (default mode)
MEM_READER_CHAT_CHUNK_SESS_SIZE=10 # sessions per chunk (default mode)
MEM_READER_CHAT_CHUNK_OVERLAP=2 # overlap between chunks
## Scheduler (MemScheduler / API)
# Enable or disable the main switch for configuring the memory scheduler during MemOS class initialization
MOS_ENABLE_SCHEDULER=false
# Determine the number of most relevant memory entries that the scheduler retrieves or processes during runtime (such as reordering or updating working memory)
MOS_SCHEDULER_TOP_K=10
# The time interval (in seconds) for updating "Activation Memory" (usually referring to caching or short-term memory mechanisms)
MOS_SCHEDULER_ACT_MEM_UPDATE_INTERVAL=300
# The size of the context window considered by the scheduler when processing tasks (such as the number of recent messages or conversation rounds)
MOS_SCHEDULER_CONTEXT_WINDOW_SIZE=5
# The maximum number of working threads allowed in the scheduler thread pool for concurrent task execution
MOS_SCHEDULER_THREAD_POOL_MAX_WORKERS=10000
# The polling interval (in seconds) for the scheduler to consume new messages/tasks from the queue. The smaller the value, the faster the response, but the CPU usage may be higher
MOS_SCHEDULER_CONSUME_INTERVAL_SECONDS=0.01
# Whether to enable the parallel distribution function of the scheduler to improve the throughput of concurrent operations
MOS_SCHEDULER_ENABLE_PARALLEL_DISPATCH=true
# The specific switch to enable or disable the "Activate Memory" function in the scheduler logic
MOS_SCHEDULER_ENABLE_ACTIVATION_MEMORY=false
# Control whether the scheduler instance is actually started during server initialization. If false, the scheduler object may be created but its background loop will not be started
API_SCHEDULER_ON=true
# Specifically define the window size for API search operations in OptimizedScheduler. It is passed to the ScherderrAPIModule to control the scope of the search context
API_SEARCH_WINDOW_SIZE=5
# Specify how many rounds of previous conversations (history) to retrieve and consider during the 'hybrid search' (fast search+asynchronous fine search). This helps provide context aware search results
API_SEARCH_HISTORY_TURNS=5
MEMSCHEDULER_USE_REDIS_QUEUE=false
## Graph / vector stores
# Neo4j database selection mode
NEO4J_BACKEND=neo4j-community # neo4j-community | neo4j | nebular | polardb
# Neo4j database url
NEO4J_URI=bolt://localhost:7687 # required when backend=neo4j*
# Neo4j database user
NEO4J_USER=neo4j # required when backend=neo4j*
# Neo4j database password
NEO4J_PASSWORD=12345678 # required when backend=neo4j*
# Neo4j database name
NEO4J_DB_NAME=neo4j # required for shared-db mode
# Neo4j database data sharing with Memos
MOS_NEO4J_SHARED_DB=false
QDRANT_HOST=localhost
QDRANT_PORT=6333
# For Qdrant Cloud / remote endpoint (takes priority if set):
QDRANT_URL=your_qdrant_url
QDRANT_API_KEY=your_qdrant_key
# milvus server uri
MILVUS_URI=http://localhost:19530 # required when ENABLE_PREFERENCE_MEMORY=true
MILVUS_USER_NAME=root # same as above
MILVUS_PASSWORD=12345678 # same as above
# PolarDB endpoint/host
POLAR_DB_HOST=localhost
# PolarDB port
POLAR_DB_PORT=5432
# PolarDB username
POLAR_DB_USER=root
# PolarDB password
POLAR_DB_PASSWORD=123456
# PolarDB database name
POLAR_DB_DB_NAME=shared_memos_db
# PolarDB Server Mode:
# If set to true, use Multi-Database Mode where each user has their own independent database (physical isolation).
# If set to false (default), use Shared Database Mode where all users share one database with logical isolation via username.
POLAR_DB_USE_MULTI_DB=false
# PolarDB connection pool size
POLARDB_POOL_MAX_CONN=100
## Related configurations of Redis
# Reddimq sends scheduling information and synchronization information for some variables
MEMSCHEDULER_REDIS_HOST= # fallback keys if not using the global ones
MEMSCHEDULER_REDIS_PORT=
MEMSCHEDULER_REDIS_DB=
MEMSCHEDULER_REDIS_PASSWORD=
MEMSCHEDULER_REDIS_TIMEOUT=
MEMSCHEDULER_REDIS_CONNECT_TIMEOUT=
## Nacos (optional config center)
# Nacos turns off long polling listening, defaults to true
NACOS_ENABLE_WATCH=false
# The monitoring interval for long rotation training is 60 seconds, and the default 30 seconds can be left unconfigured
NACOS_WATCH_INTERVAL=60
# nacos server address
NACOS_SERVER_ADDR=
# nacos dataid
NACOS_DATA_ID=
# nacos group
NACOS_GROUP=DEFAULT_GROUP
# nacos namespace
NACOS_NAMESPACE=
# nacos ak
AK=
# nacos sk
SK=
# chat model for chat api
CHAT_MODEL_LIST='[{
"backend": "deepseek",
"api_base": "http://localhost:1234",
"api_key": "your-api-key",
"model_name_or_path": "deepseek-r1",
"support_models": ["deepseek-r1"]
}]'
# RabbitMQ host name for message-log pipeline
MEMSCHEDULER_RABBITMQ_HOST_NAME=
# RabbitMQ user name for message-log pipeline
MEMSCHEDULER_RABBITMQ_USER_NAME=
# RabbitMQ password for message-log pipeline
MEMSCHEDULER_RABBITMQ_PASSWORD=
# RabbitMQ virtual host for message-log pipeline
MEMSCHEDULER_RABBITMQ_VIRTUAL_HOST=memos
# Erase connection state on connect for message-log pipeline
MEMSCHEDULER_RABBITMQ_ERASE_ON_CONNECT=true
# RabbitMQ port for message-log pipeline
MEMSCHEDULER_RABBITMQ_PORT=5672