Skip to content

Commit bca0bae

Browse files
committed
new version update
1 parent 67af268 commit bca0bae

File tree

10 files changed

+676
-131
lines changed

10 files changed

+676
-131
lines changed

cli/cli_interface.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def __init__(self):
3939
self.uploaded_file = None
4040
self.is_running = True
4141
self.processing_history = []
42-
self.enable_indexing = True # Default configuration
42+
self.enable_indexing = False # Default configuration (matching UI: fast mode by default)
4343

4444
# Load segmentation config from the same source as UI
4545
self._load_segmentation_config()

cli/main_cli.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -214,15 +214,17 @@ async def main():
214214
# 创建CLI应用
215215
app = CLIApp()
216216

217-
# 设置配置
217+
# 设置配置 - 默认禁用索引功能以加快处理速度
218218
if args.optimized:
219219
app.cli.enable_indexing = False
220220
print(
221221
f"\n{Colors.YELLOW}⚡ Optimized mode enabled - indexing disabled{Colors.ENDC}"
222222
)
223223
else:
224+
# 默认也禁用索引功能
225+
app.cli.enable_indexing = False
224226
print(
225-
f"\n{Colors.GREEN}🧠 Comprehensive mode enabled - full intelligence analysis{Colors.ENDC}"
227+
f"\n{Colors.YELLOW}⚡ Fast mode enabled - indexing disabled by default{Colors.ENDC}"
226228
)
227229

228230
# Configure document segmentation settings
@@ -248,7 +250,9 @@ async def main():
248250
if not os.path.exists(args.file):
249251
print(f"{Colors.FAIL}❌ File not found: {args.file}{Colors.ENDC}")
250252
sys.exit(1)
251-
success = await run_direct_processing(app, args.file, "file")
253+
# 使用 file:// 前缀保持与交互模式一致,确保文件被复制而非移动
254+
file_url = f"file://{os.path.abspath(args.file)}"
255+
success = await run_direct_processing(app, file_url, "file")
252256
elif args.url:
253257
success = await run_direct_processing(app, args.url, "url")
254258
elif args.chat:

cli/workflows/cli_workflow_adapter.py

Lines changed: 94 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,14 @@
44
55
This adapter provides CLI-optimized interface to the latest agent orchestration engine,
66
with enhanced progress reporting, error handling, and CLI-specific optimizations.
7+
8+
Version: 2.0 (Updated to match UI version)
9+
Changes:
10+
- Default enable_indexing=False for faster processing (matching UI defaults)
11+
- Mode-aware progress callback with detailed stage mapping
12+
- Chat pipeline now accepts enable_indexing parameter
13+
- Improved error handling and resource management
14+
- Enhanced progress display for different modes (fast/comprehensive/chat)
715
"""
816

917
import os
@@ -36,7 +44,7 @@ def __init__(self, cli_interface=None):
3644

3745
async def initialize_mcp_app(self) -> Dict[str, Any]:
3846
"""
39-
Initialize MCP application for CLI usage.
47+
Initialize MCP application for CLI usage (improved version matching UI).
4048
4149
Returns:
4250
dict: Initialization result
@@ -47,7 +55,7 @@ async def initialize_mcp_app(self) -> Dict[str, Any]:
4755
"🚀 Initializing Agent Orchestration Engine", 2.0
4856
)
4957

50-
# Initialize MCP application
58+
# Initialize MCP application using async context manager (matching UI pattern)
5159
self.app = MCPApp(name="cli_agent_orchestration")
5260
self.app_context = self.app.run()
5361
agent_app = await self.app_context.__aenter__()
@@ -56,8 +64,6 @@ async def initialize_mcp_app(self) -> Dict[str, Any]:
5664
self.context = agent_app.context
5765

5866
# Configure filesystem access
59-
import os
60-
6167
self.context.config.mcp.servers["filesystem"].args.extend([os.getcwd()])
6268

6369
if self.cli_interface:
@@ -93,48 +99,75 @@ async def cleanup_mcp_app(self):
9399
f"⚠️ Cleanup warning: {str(e)}", "warning"
94100
)
95101

96-
def create_cli_progress_callback(self) -> Callable:
102+
def create_cli_progress_callback(self, enable_indexing: bool = True) -> Callable:
97103
"""
98-
Create CLI-optimized progress callback function.
104+
Create CLI-optimized progress callback function with mode-aware stage mapping.
105+
106+
This matches the UI version's detailed progress mapping logic.
107+
108+
Args:
109+
enable_indexing: Whether indexing is enabled (affects stage mapping)
99110
100111
Returns:
101112
Callable: Progress callback function
102113
"""
103114

104115
def progress_callback(progress: int, message: str):
105116
if self.cli_interface:
106-
# Map progress to CLI stages
107-
if progress <= 10:
108-
self.cli_interface.display_processing_stages(1)
109-
elif progress <= 25:
110-
self.cli_interface.display_processing_stages(2)
111-
elif progress <= 40:
112-
self.cli_interface.display_processing_stages(3)
113-
elif progress <= 50:
114-
self.cli_interface.display_processing_stages(4)
115-
elif progress <= 60:
116-
self.cli_interface.display_processing_stages(5)
117-
elif progress <= 70:
118-
self.cli_interface.display_processing_stages(6)
119-
elif progress <= 85:
120-
self.cli_interface.display_processing_stages(7)
117+
# Mode-aware stage mapping (matching UI version logic)
118+
if enable_indexing:
119+
# Full workflow mapping: Initialize -> Analyze -> Download -> Plan -> References -> Repos -> Index -> Implement
120+
if progress <= 5:
121+
stage = 0 # Initialize
122+
elif progress <= 10:
123+
stage = 1 # Analyze
124+
elif progress <= 25:
125+
stage = 2 # Download
126+
elif progress <= 40:
127+
stage = 3 # Plan
128+
elif progress <= 50:
129+
stage = 4 # References
130+
elif progress <= 60:
131+
stage = 5 # Repos
132+
elif progress <= 70:
133+
stage = 6 # Index
134+
elif progress <= 85:
135+
stage = 7 # Implement
136+
else:
137+
stage = 8 # Complete
121138
else:
122-
self.cli_interface.display_processing_stages(8)
139+
# Fast mode mapping: Initialize -> Analyze -> Download -> Plan -> Implement
140+
if progress <= 5:
141+
stage = 0 # Initialize
142+
elif progress <= 10:
143+
stage = 1 # Analyze
144+
elif progress <= 25:
145+
stage = 2 # Download
146+
elif progress <= 40:
147+
stage = 3 # Plan
148+
elif progress <= 85:
149+
stage = 4 # Implement (skip References, Repos, Index)
150+
else:
151+
stage = 4 # Complete
152+
153+
self.cli_interface.display_processing_stages(stage, enable_indexing)
123154

124155
# Display status message
125156
self.cli_interface.print_status(message, "processing")
126157

127158
return progress_callback
128159

129160
async def execute_full_pipeline(
130-
self, input_source: str, enable_indexing: bool = True
161+
self, input_source: str, enable_indexing: bool = False
131162
) -> Dict[str, Any]:
132163
"""
133164
Execute the complete intelligent multi-agent research orchestration pipeline.
165+
166+
Updated to match UI version: default enable_indexing=False for faster processing.
134167
135168
Args:
136169
input_source: Research input source (file path, URL, or preprocessed analysis)
137-
enable_indexing: Whether to enable advanced intelligence analysis
170+
enable_indexing: Whether to enable advanced intelligence analysis (default: False)
138171
139172
Returns:
140173
dict: Comprehensive pipeline execution result
@@ -145,16 +178,19 @@ async def execute_full_pipeline(
145178
execute_multi_agent_research_pipeline,
146179
)
147180

148-
# Create CLI progress callback
149-
progress_callback = self.create_cli_progress_callback()
181+
# Create CLI progress callback with mode awareness
182+
progress_callback = self.create_cli_progress_callback(enable_indexing)
150183

151184
# Display pipeline start
152185
if self.cli_interface:
153-
mode = "comprehensive" if enable_indexing else "optimized"
186+
if enable_indexing:
187+
mode_msg = "🧠 comprehensive (with indexing)"
188+
else:
189+
mode_msg = "⚡ fast (indexing disabled)"
154190
self.cli_interface.print_status(
155-
f"🚀 Starting {mode} agent orchestration pipeline...", "processing"
191+
f"🚀 Starting {mode_msg} agent orchestration pipeline...", "processing"
156192
)
157-
self.cli_interface.display_processing_stages(0)
193+
self.cli_interface.display_processing_stages(0, enable_indexing)
158194

159195
# Execute the pipeline
160196
result = await execute_multi_agent_research_pipeline(
@@ -166,7 +202,8 @@ async def execute_full_pipeline(
166202

167203
# Display completion
168204
if self.cli_interface:
169-
self.cli_interface.display_processing_stages(8)
205+
final_stage = 8 if enable_indexing else 4
206+
self.cli_interface.display_processing_stages(final_stage, enable_indexing)
170207
self.cli_interface.print_status(
171208
"🎉 Agent orchestration pipeline completed successfully!",
172209
"complete",
@@ -189,12 +226,15 @@ async def execute_full_pipeline(
189226
"pipeline_mode": "comprehensive" if enable_indexing else "optimized",
190227
}
191228

192-
async def execute_chat_pipeline(self, user_input: str) -> Dict[str, Any]:
229+
async def execute_chat_pipeline(self, user_input: str, enable_indexing: bool = False) -> Dict[str, Any]:
193230
"""
194231
Execute the chat-based planning and implementation pipeline.
232+
233+
Updated to match UI version: accepts enable_indexing parameter.
195234
196235
Args:
197236
user_input: User's coding requirements and description
237+
enable_indexing: Whether to enable indexing for enhanced code understanding (default: False)
198238
199239
Returns:
200240
dict: Chat pipeline execution result
@@ -208,51 +248,42 @@ async def execute_chat_pipeline(self, user_input: str) -> Dict[str, Any]:
208248
# Create CLI progress callback for chat mode
209249
def chat_progress_callback(progress: int, message: str):
210250
if self.cli_interface:
211-
# Map progress to CLI stages for chat mode
251+
# Map progress to CLI stages for chat mode (matching UI logic)
212252
if progress <= 5:
213-
self.cli_interface.display_processing_stages(
214-
0, chat_mode=True
215-
) # Initialize
253+
stage = 0 # Initialize
216254
elif progress <= 30:
217-
self.cli_interface.display_processing_stages(
218-
1, chat_mode=True
219-
) # Planning
255+
stage = 1 # Planning
220256
elif progress <= 50:
221-
self.cli_interface.display_processing_stages(
222-
2, chat_mode=True
223-
) # Setup
257+
stage = 2 # Setup
224258
elif progress <= 70:
225-
self.cli_interface.display_processing_stages(
226-
3, chat_mode=True
227-
) # Save Plan
259+
stage = 3 # Save Plan
228260
else:
229-
self.cli_interface.display_processing_stages(
230-
4, chat_mode=True
231-
) # Implement
261+
stage = 4 # Implement
262+
263+
self.cli_interface.display_processing_stages(stage, chat_mode=True)
232264

233265
# Display status message
234266
self.cli_interface.print_status(message, "processing")
235267

236268
# Display pipeline start
237269
if self.cli_interface:
270+
indexing_note = " (with indexing)" if enable_indexing else " (fast mode)"
238271
self.cli_interface.print_status(
239-
"🚀 Starting chat-based planning pipeline...", "processing"
272+
f"🚀 Starting chat-based planning pipeline{indexing_note}...", "processing"
240273
)
241274
self.cli_interface.display_processing_stages(0, chat_mode=True)
242275

243-
# Execute the chat pipeline with indexing enabled for enhanced code understanding
276+
# Execute the chat pipeline with configurable indexing
244277
result = await execute_chat_based_planning_pipeline(
245278
user_input=user_input,
246279
logger=self.logger,
247280
progress_callback=chat_progress_callback,
248-
enable_indexing=True, # Enable indexing for better code implementation
281+
enable_indexing=enable_indexing, # Pass through enable_indexing parameter
249282
)
250283

251284
# Display completion
252285
if self.cli_interface:
253-
self.cli_interface.display_processing_stages(
254-
4, chat_mode=True
255-
) # Final stage for chat mode
286+
self.cli_interface.display_processing_stages(4, chat_mode=True)
256287
self.cli_interface.print_status(
257288
"🎉 Chat-based planning pipeline completed successfully!",
258289
"complete",
@@ -268,17 +299,18 @@ def chat_progress_callback(progress: int, message: str):
268299
return {"status": "error", "error": error_msg, "pipeline_mode": "chat"}
269300

270301
async def process_input_with_orchestration(
271-
self, input_source: str, input_type: str, enable_indexing: bool = True
302+
self, input_source: str, input_type: str, enable_indexing: bool = False
272303
) -> Dict[str, Any]:
273304
"""
274305
Process input using the intelligent agent orchestration engine.
275-
306+
276307
This is the main CLI interface to the latest agent orchestration capabilities.
308+
Updated to match UI version: default enable_indexing=False.
277309
278310
Args:
279-
input_source: Input source (file path or URL)
280-
input_type: Type of input ('file' or 'url')
281-
enable_indexing: Whether to enable advanced intelligence analysis
311+
input_source: Input source (file path, URL, or chat input)
312+
input_type: Type of input ('file', 'url', or 'chat')
313+
enable_indexing: Whether to enable advanced intelligence analysis (default: False)
282314
283315
Returns:
284316
dict: Processing result with status and details
@@ -301,7 +333,10 @@ async def process_input_with_orchestration(
301333
# Execute appropriate pipeline based on input type
302334
if input_type == "chat":
303335
# Use chat-based planning pipeline for user requirements
304-
pipeline_result = await self.execute_chat_pipeline(input_source)
336+
# Pass enable_indexing to chat pipeline as well
337+
pipeline_result = await self.execute_chat_pipeline(
338+
input_source, enable_indexing=enable_indexing
339+
)
305340
else:
306341
# Use traditional multi-agent research pipeline for files/URLs
307342
pipeline_result = await self.execute_full_pipeline(

mcp_agent.config.yaml

Lines changed: 27 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ anthropic: null
33
default_search_server: brave
44
document_segmentation:
55
enabled: true
6-
size_threshold_chars: 3000
6+
size_threshold_chars: 50000
77
execution_engine: asyncio
88
logger:
99
level: info
@@ -25,12 +25,35 @@ mcp:
2525
BOCHA_API_KEY: ''
2626
PYTHONPATH: .
2727
brave:
28+
# macos and linux should use this
29+
# args:
30+
# - -y
31+
# - '@modelcontextprotocol/server-brave-search'
32+
# command: npx
33+
34+
# windows should use this
2835
args:
29-
- -y
30-
- '@modelcontextprotocol/server-brave-search'
31-
command: npx
36+
# please use the correct path for your system
37+
- C:/Users/LEGION/AppData/Roaming/npm/node_modules/@modelcontextprotocol/server-brave-search/dist/index.js
38+
command: node
3239
env:
3340
BRAVE_API_KEY: ''
41+
filesystem:
42+
# macos and linux should use this
43+
# args:
44+
# - -y
45+
# - '@modelcontextprotocol/server-filesystem'
46+
# - .
47+
# command: npx
48+
49+
# windows should use this
50+
args:
51+
# please use the correct path for your system
52+
- C:/Users/LEGION/AppData/Roaming/npm/node_modules/@modelcontextprotocol/server-filesystem/dist/index.js
53+
- .
54+
command: node
55+
56+
3457
code-implementation:
3558
args:
3659
- tools/code_implementation_server.py
@@ -71,12 +94,6 @@ mcp:
7194
command: python
7295
env:
7396
PYTHONPATH: .
74-
filesystem:
75-
args:
76-
- -y
77-
- '@modelcontextprotocol/server-filesystem'
78-
- .
79-
command: npx
8097
github-downloader:
8198
args:
8299
- tools/git_command.py

mcp_agent.secrets.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,5 +5,5 @@ openai:
55

66

77

8-
anthropic:
9-
api_key: ""
8+
# anthropic:
9+
# api_key: ""

0 commit comments

Comments
 (0)