forked from Metaculus/forecasting-tools
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrun_optimizer.py
More file actions
69 lines (62 loc) · 2.57 KB
/
run_optimizer.py
File metadata and controls
69 lines (62 loc) · 2.57 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import asyncio
import logging
from forecasting_tools.ai_models.general_llm import GeneralLlm
from forecasting_tools.auto_optimizers.bot_optimizer import BotOptimizer
from forecasting_tools.auto_optimizers.prompt_data_models import ResearchTool, ToolName
from forecasting_tools.data_models.data_organizer import DataOrganizer
from forecasting_tools.util.custom_logger import CustomLogger
logger = logging.getLogger(__name__)
async def run_optimizer() -> None:
# ----- Settings for the optimizer -----
metaculus_question_path = (
"logs/forecasts/benchmarks/questions_v4.0.train__50qs.json"
)
questions = DataOrganizer.load_questions_from_file_path(metaculus_question_path)
questions_batch_size = 25
research_tools = [
ResearchTool(
tool_name=ToolName.PERPLEXITY_LOW_COST,
max_calls=7,
),
ResearchTool(
tool_name=ToolName.ASKNEWS,
max_calls=2,
),
ResearchTool(
tool_name=ToolName.DATA_ANALYZER,
max_calls=1,
),
ResearchTool(
tool_name=ToolName.PERPLEXITY_REASONING_PRO_SEARCH,
max_calls=1,
),
]
ideation_llm = "openrouter/google/gemini-2.5-pro"
research_coordination_llm = "openrouter/openai/gpt-4.1-nano"
reasoning_llm = GeneralLlm(model="openrouter/openai/gpt-4.1-nano", temperature=0.3)
folder_to_save_benchmarks = "logs/forecasts/benchmarks/"
num_iterations_per_run = 3
remove_background_info = True
initial_prompt_population_size = 20
survivors_per_iteration = 5
mutated_prompts_per_survivor = 3
breeded_prompts_per_iteration = 5
# ------ Run the optimizer -----
await BotOptimizer.optimize_a_combined_research_and_reasoning_prompt(
evaluation_questions=questions,
research_tools_bot_can_use=research_tools,
research_agent_llm_name=research_coordination_llm,
reasoning_llm=reasoning_llm,
batch_size_for_question_evaluation=questions_batch_size,
num_iterations_per_run=num_iterations_per_run,
ideation_llm_name=ideation_llm,
remove_background_info_from_questions=remove_background_info,
folder_to_save_benchmarks=folder_to_save_benchmarks,
initial_prompt_population_size=initial_prompt_population_size,
survivors_per_iteration=survivors_per_iteration,
mutated_prompts_per_survivor=mutated_prompts_per_survivor,
breeded_prompts_per_iteration=breeded_prompts_per_iteration,
)
if __name__ == "__main__":
CustomLogger.setup_logging()
asyncio.run(run_optimizer())