From ab84711406d9625ec863c973365a3a49ed7c253b Mon Sep 17 00:00:00 2001 From: Vladislav Nosivskoy Date: Mon, 8 Dec 2025 15:50:25 +0300 Subject: [PATCH 1/5] add deepseek v32 chat template support Signed-off-by: Vladislav Nosivskoy --- lib/llm/src/preprocessor/prompt.rs | 1 + .../src/preprocessor/prompt/deepseek_v32.rs | 514 ++++ lib/llm/src/preprocessor/prompt/template.rs | 12 + .../tests/data/deepseek-v3.2/test_input.json | 150 + .../test_input_search_w_date.json | 732 +++++ .../test_input_search_wo_date.json | 533 ++++ .../tests/data/deepseek-v3.2/test_output.txt | 112 + .../test_output_search_w_date.txt | 2455 +++++++++++++++++ .../test_output_search_wo_date.txt | 1069 +++++++ lib/llm/tests/deepseek_v32_encoding.rs | 408 +++ 10 files changed, 5986 insertions(+) create mode 100644 lib/llm/src/preprocessor/prompt/deepseek_v32.rs create mode 100644 lib/llm/tests/data/deepseek-v3.2/test_input.json create mode 100644 lib/llm/tests/data/deepseek-v3.2/test_input_search_w_date.json create mode 100644 lib/llm/tests/data/deepseek-v3.2/test_input_search_wo_date.json create mode 100644 lib/llm/tests/data/deepseek-v3.2/test_output.txt create mode 100644 lib/llm/tests/data/deepseek-v3.2/test_output_search_w_date.txt create mode 100644 lib/llm/tests/data/deepseek-v3.2/test_output_search_wo_date.txt create mode 100644 lib/llm/tests/deepseek_v32_encoding.rs diff --git a/lib/llm/src/preprocessor/prompt.rs b/lib/llm/src/preprocessor/prompt.rs index 6c31c94887..5984132eff 100644 --- a/lib/llm/src/preprocessor/prompt.rs +++ b/lib/llm/src/preprocessor/prompt.rs @@ -23,6 +23,7 @@ use minijinja::value::Value; use std::collections::HashMap; use std::sync::Arc; +pub mod deepseek_v32; mod template; pub use template::ContextMixins; diff --git a/lib/llm/src/preprocessor/prompt/deepseek_v32.rs b/lib/llm/src/preprocessor/prompt/deepseek_v32.rs new file mode 100644 index 0000000000..bd8adef5d4 --- /dev/null +++ b/lib/llm/src/preprocessor/prompt/deepseek_v32.rs @@ -0,0 +1,514 @@ +// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +//! DeepSeek V3.2 native prompt formatting +//! +//! This module provides native Rust implementation of DeepSeek V3.2's chat template, +//! based on their official Python code: encoding_dsv32.py +//! +//! Reference: https://huggingface.co/deepseek-ai/DeepSeek-V3.2/tree/main/encoding + +use anyhow::{Context, Result}; +use serde_json::Value as JsonValue; + +/// Special tokens for DeepSeek V3.2 +pub mod tokens { + pub const BOS: &str = "<|begin▁of▁sentence|>"; + pub const EOS: &str = "<|end▁of▁sentence|>"; + pub const THINKING_START: &str = ""; + pub const THINKING_END: &str = ""; + pub const DSML_TOKEN: &str = "|DSML|"; + pub const USER_START: &str = "<|User|>"; + pub const ASSISTANT_START: &str = "<|Assistant|>"; +} + +/// System message template for tools +const TOOLS_SYSTEM_TEMPLATE: &str = r#"## Tools + +You have access to a set of tools you can use to answer the user's question. +You can invoke functions by writing a "<{dsml_token}function_calls>" block like the following as part of your reply to the user: +<{dsml_token}function_calls> +<{dsml_token}invoke name="$FUNCTION_NAME"> +<{dsml_token}parameter name="$PARAMETER_NAME" string="true|false">$PARAMETER_VALUE +... + +<{dsml_token}invoke name="$FUNCTION_NAME2"> +... + + + +String and scalar parameters should be specified as is without any escaping or quotes, while lists and objects should use JSON format. The "string" attribute should be set to "true" for string type parameters and "false" for other types (numbers, booleans, arrays, objects). + +If the thinking_mode is enabled, then after function results you should strongly consider outputting a thinking block. Here is an example: + +<{dsml_token}function_calls> +... + + + +... + + +{thinking_start_token}...thinking about results{thinking_end_token} + +Here are the functions available in JSONSchema format: + +{tool_schemas} + +"#; + +const RESPONSE_FORMAT_TEMPLATE: &str = + "## Response Format:\n\nYou MUST strictly adhere to the following schema to reply:\n{schema}"; + +const TOOL_CALL_TEMPLATE: &str = + "<{dsml_token}invoke name=\"{name}\">\n{arguments}\n"; + +#[allow(dead_code)] +const TOOL_CALLS_TEMPLATE: &str = + "<{dsml_token}function_calls>\n{tool_calls}\n"; + +const TOOL_OUTPUT_TEMPLATE: &str = "\n{content}"; + +/// Thinking mode for the model +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum ThinkingMode { + Chat, + Thinking, +} + +impl ThinkingMode { + pub fn as_str(&self) -> &'static str { + match self { + ThinkingMode::Chat => "chat", + ThinkingMode::Thinking => "thinking", + } + } +} + +/// Convert value to JSON string matching Python's json.dumps() format with spaces +fn to_json(value: &JsonValue) -> String { + // Python's json.dumps() adds spaces after colons and commas + // {"name": "value", "key": "value2"} + // Rust's serde_json::to_string() produces: + // {"name":"value","key":"value2"} + // We need to match Python's format for test compatibility + + let compact = serde_json::to_string(value).unwrap_or_else(|_| "{}".to_string()); + + // Add spaces after colons and commas (but not inside strings) + let mut result = String::with_capacity(compact.len() + compact.len() / 4); + let mut in_string = false; + let mut prev_char = '\0'; + + for ch in compact.chars() { + if ch == '"' && prev_char != '\\' { + in_string = !in_string; + } + + result.push(ch); + + // Add space after ':' or ',' if not inside a string + if !in_string && (ch == ':' || ch == ',') { + result.push(' '); + } + + prev_char = ch; + } + + result +} + +/// Extract tools from OpenAI format +fn tools_from_openai_format(tools: &[JsonValue]) -> Vec { + tools + .iter() + .filter_map(|tool| tool.get("function").cloned()) + .collect() +} + +/// Render tools section for system prompt +fn render_tools(tools: &[JsonValue]) -> String { + let tools_json: Vec = tools_from_openai_format(tools) + .iter() + .map(to_json) + .collect(); + + TOOLS_SYSTEM_TEMPLATE + .replace("{tool_schemas}", &tools_json.join("\n")) + .replace("{dsml_token}", tokens::DSML_TOKEN) + .replace("{thinking_start_token}", tokens::THINKING_START) + .replace("{thinking_end_token}", tokens::THINKING_END) +} + +/// Find the last user or developer message index +fn find_last_user_index(messages: &[JsonValue]) -> Option { + messages + .iter() + .enumerate() + .rev() + .find(|(_, msg)| { + msg.get("role") + .and_then(|r| r.as_str()) + .map(|r| r == "user" || r == "developer") + .unwrap_or(false) + }) + .map(|(idx, _)| idx) +} + +/// Encode arguments to DSML parameter format +fn encode_arguments_to_dsml(tool_call: &JsonValue) -> Result { + let arguments_str = tool_call + .get("arguments") + .and_then(|a| a.as_str()) + .context("Missing or invalid 'arguments' field")?; + + let arguments: JsonValue = + serde_json::from_str(arguments_str).context("Failed to parse arguments JSON")?; + + let arguments_obj = arguments + .as_object() + .context("Arguments must be an object")?; + + let mut params = Vec::new(); + for (key, value) in arguments_obj { + let is_string = value.is_string(); + let value_str = if is_string { + value.as_str().unwrap().to_string() + } else { + to_json(value) + }; + + let param = format!( + "<{}parameter name=\"{}\" string=\"{}\">{}", + tokens::DSML_TOKEN, + key, + if is_string { "true" } else { "false" }, + value_str, + tokens::DSML_TOKEN + ); + params.push(param); + } + + Ok(params.join("\n")) +} + +/// Render a single message +fn render_message( + index: usize, + messages: &[JsonValue], + thinking_mode: ThinkingMode, + last_user_idx: Option, +) -> Result { + let msg = &messages[index]; + let role = msg + .get("role") + .and_then(|r| r.as_str()) + .context("Missing 'role' field")?; + + let mut prompt = String::new(); + + match role { + "system" => { + let content = msg.get("content").and_then(|c| c.as_str()).unwrap_or(""); + prompt.push_str(content); + + if let Some(tools) = msg.get("tools").and_then(|t| t.as_array()) { + prompt.push_str("\n\n"); + prompt.push_str(&render_tools(tools)); + } + + if let Some(response_format) = msg.get("response_format") { + prompt.push_str("\n\n"); + prompt.push_str( + &RESPONSE_FORMAT_TEMPLATE.replace("{schema}", &to_json(response_format)), + ); + } + } + + "user" => { + let content = msg.get("content").and_then(|c| c.as_str()).unwrap_or(""); + prompt.push_str(tokens::USER_START); + prompt.push_str(content); + prompt.push_str(tokens::ASSISTANT_START); + + if Some(index) == last_user_idx && thinking_mode == ThinkingMode::Thinking { + prompt.push_str(tokens::THINKING_START); + } else { + prompt.push_str(tokens::THINKING_END); + } + } + + "developer" => { + let content = msg + .get("content") + .and_then(|c| c.as_str()) + .context("Developer role requires content")?; + + let mut content_developer = String::new(); + + if let Some(tools) = msg.get("tools").and_then(|t| t.as_array()) { + content_developer.push_str("\n\n"); + content_developer.push_str(&render_tools(tools)); + } + + if let Some(response_format) = msg.get("response_format") { + content_developer.push_str("\n\n"); + content_developer.push_str( + &RESPONSE_FORMAT_TEMPLATE.replace("{schema}", &to_json(response_format)), + ); + } + + content_developer.push_str(&format!("\n\n# The user's message is: {}", content)); + + prompt.push_str(tokens::USER_START); + prompt.push_str(&content_developer); + prompt.push_str(tokens::ASSISTANT_START); + + if Some(index) == last_user_idx && thinking_mode == ThinkingMode::Thinking { + prompt.push_str(tokens::THINKING_START); + } else { + prompt.push_str(tokens::THINKING_END); + } + } + + "assistant" => { + // Handle reasoning content + // NOTE: If this assistant comes after last user message, the opening + // was already added in the user message. We only need to add content and closing tag. + if thinking_mode == ThinkingMode::Thinking + && last_user_idx.is_some_and(|idx| index > idx) + && let Some(reasoning) = msg.get("reasoning_content").and_then(|r| r.as_str()) + { + // DON'T add THINKING_START - it was already added in user message + prompt.push_str(reasoning); + prompt.push_str(tokens::THINKING_END); + } + + // Handle content + if let Some(content) = msg.get("content").and_then(|c| c.as_str()) { + prompt.push_str(content); + } + + // Handle tool calls + if let Some(tool_calls) = msg.get("tool_calls").and_then(|t| t.as_array()) + && !tool_calls.is_empty() + { + prompt.push_str("\n\n"); + prompt.push_str(&format!("<{}function_calls>\n", tokens::DSML_TOKEN)); + + for tool_call in tool_calls { + let name = tool_call + .get("function") + .and_then(|f| f.get("name")) + .and_then(|n| n.as_str()) + .context("Missing tool call name")?; + + let arguments = encode_arguments_to_dsml( + tool_call.get("function").context("Missing function")?, + )?; + + let invoke = TOOL_CALL_TEMPLATE + .replace("{dsml_token}", tokens::DSML_TOKEN) + .replace("{name}", name) + .replace("{arguments}", &arguments); + + prompt.push_str(&invoke); + prompt.push('\n'); + } + + prompt.push_str(&format!("", tokens::DSML_TOKEN)); + } + + prompt.push_str(tokens::EOS); + } + + "tool" => { + // Find the previous assistant message + let mut prev_assistant_idx = None; + let mut tool_count = 0; + + for i in (0..index).rev() { + let prev_role = messages[i].get("role").and_then(|r| r.as_str()); + if prev_role == Some("tool") { + tool_count += 1; + } else if prev_role == Some("assistant") { + prev_assistant_idx = Some(i); + break; + } + } + + let tool_call_order = tool_count + 1; + + // Add opening tag for first tool result + if tool_call_order == 1 { + prompt.push_str("\n\n"); + } + + // Add result + let content = msg.get("content").and_then(|c| c.as_str()).unwrap_or(""); + prompt.push_str(&TOOL_OUTPUT_TEMPLATE.replace("{content}", content)); + + // Check if this is the last tool result + if let Some(prev_idx) = prev_assistant_idx { + let tool_calls_count = messages[prev_idx] + .get("tool_calls") + .and_then(|t| t.as_array()) + .map(|a| a.len()) + .unwrap_or(0); + + if tool_call_order == tool_calls_count { + prompt.push_str("\n"); + + if last_user_idx.is_some_and(|idx| index >= idx) + && thinking_mode == ThinkingMode::Thinking + { + prompt.push_str("\n\n"); + prompt.push_str(tokens::THINKING_START); + } else { + prompt.push_str("\n\n"); + prompt.push_str(tokens::THINKING_END); + } + } + } + } + + _ => anyhow::bail!("Unknown role: {}", role), + } + + Ok(prompt) +} + +/// Encode messages to prompt string +/// +/// # Arguments +/// * `messages` - Array of messages in OpenAI format +/// * `thinking_mode` - Whether to use thinking mode +/// * `add_bos_token` - Whether to add BOS token at start +/// +/// # Returns +/// Formatted prompt string ready for tokenization +pub fn encode_messages( + messages: &[JsonValue], + thinking_mode: ThinkingMode, + add_bos_token: bool, +) -> Result { + let mut prompt = String::new(); + + if add_bos_token { + prompt.push_str(tokens::BOS); + } + + let last_user_idx = find_last_user_index(messages); + + for (index, _) in messages.iter().enumerate() { + let msg_prompt = render_message(index, messages, thinking_mode, last_user_idx)?; + prompt.push_str(&msg_prompt); + } + + Ok(prompt) +} + +/// DeepSeek V3.2 Prompt Formatter +/// +/// Implements OAIPromptFormatter for DeepSeek V3.2 models using native Rust implementation +#[derive(Debug)] +pub struct DeepSeekV32Formatter { + thinking_mode: ThinkingMode, +} + +impl DeepSeekV32Formatter { + pub fn new(thinking_mode: ThinkingMode) -> Self { + Self { thinking_mode } + } + + /// Create formatter with thinking mode enabled (default for DSV3.2) + pub fn new_thinking() -> Self { + Self::new(ThinkingMode::Thinking) + } + + /// Create formatter with chat mode + pub fn new_chat() -> Self { + Self::new(ThinkingMode::Chat) + } +} + +impl super::OAIPromptFormatter for DeepSeekV32Formatter { + fn supports_add_generation_prompt(&self) -> bool { + true + } + + fn render(&self, req: &dyn super::OAIChatLikeRequest) -> Result { + // Get messages from request + let messages_value = req.messages(); + + // Convert minijinja Value to serde_json Value + let messages_json = + serde_json::to_value(&messages_value).context("Failed to convert messages to JSON")?; + + let messages_array = messages_json + .as_array() + .context("Messages is not an array")?; + + // Encode with native implementation + encode_messages( + messages_array, + self.thinking_mode, + true, // always add BOS token + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + #[test] + fn test_simple_conversation() { + let messages = json!([ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ]); + + let result = + encode_messages(messages.as_array().unwrap(), ThinkingMode::Thinking, true).unwrap(); + + assert!(result.starts_with(tokens::BOS)); + assert!(result.contains("You are a helpful assistant.")); + assert!(result.contains(tokens::USER_START)); + assert!(result.contains("Hello!")); + assert!(result.contains(tokens::ASSISTANT_START)); + assert!(result.contains(tokens::THINKING_START)); + } + + #[test] + fn test_tools_rendering() { + let messages = json!([ + { + "role": "system", + "content": "You are helpful.", + "tools": [{ + "type": "function", + "function": { + "name": "get_weather", + "description": "Get weather", + "parameters": { + "type": "object", + "properties": { + "location": {"type": "string"} + } + } + } + }] + }, + {"role": "user", "content": "What's the weather?"} + ]); + + let result = + encode_messages(messages.as_array().unwrap(), ThinkingMode::Thinking, true).unwrap(); + + assert!(result.contains("## Tools")); + assert!(result.contains("get_weather")); + assert!(result.contains("")); + } +} diff --git a/lib/llm/src/preprocessor/prompt/template.rs b/lib/llm/src/preprocessor/prompt/template.rs index 8ea6f71494..1b768e8431 100644 --- a/lib/llm/src/preprocessor/prompt/template.rs +++ b/lib/llm/src/preprocessor/prompt/template.rs @@ -18,6 +18,18 @@ use tokcfg::{ChatTemplate, ChatTemplateValue}; impl PromptFormatter { pub fn from_mdc(mdc: &ModelDeploymentCard) -> Result { + // Special handling for DeepSeek-V3.2(-Speciale) which doesn't provide Jinja chat_template + let name_lower = mdc.display_name.to_lowercase(); + if name_lower.contains("deepseek") + && name_lower.contains("v3.2") + && !name_lower.contains("exp") + { + tracing::info!("Detected DeepSeek V3.2 model (non-Exp), using native Rust formatter"); + return Ok(Self::OAI(Arc::new( + super::deepseek_v32::DeepSeekV32Formatter::new_thinking(), + ))); + } + match mdc .prompt_formatter .as_ref() diff --git a/lib/llm/tests/data/deepseek-v3.2/test_input.json b/lib/llm/tests/data/deepseek-v3.2/test_input.json new file mode 100644 index 0000000000..dc22e71314 --- /dev/null +++ b/lib/llm/tests/data/deepseek-v3.2/test_input.json @@ -0,0 +1,150 @@ +{ + "tools": [ + { + "type": "function", + "function": { + "name": "get_datetime", + "description": "Get the current date and time", + "parameters": { + "type": "object", + "properties": { + "timezone": { + "type": "string", + "description": "The timezone, e.g. America/New_York, UTC" + } + }, + "required": ["timezone"] + } + } + }, + { + "type": "function", + "function": { + "name": "get_weather", + "description": "Get the weather for a specific date and location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city name, e.g. New York, San Francisco" + }, + "date": { + "type": "string", + "description": "The date in YYYY-MM-DD format" + } + }, + "required": ["location", "date"] + } + } + } + ], + "messages": [ + { + "role": "system", + "content": "You are a helpful Assistant." + }, + { + "role": "user", + "content": "What's the weather tomorrow in San Francisco and New York?" + }, + { + "role": "assistant", + "reasoning_content": "User is asking about tomorrow's weather. I need to first get the current date to calculate tomorrow's date.", + "tool_calls": [ + { + "id": "call_xK9mN3pL2qR8vT5wY6hZ1aB4", + "type": "function", + "function": { + "arguments": "{\"timezone\": \"America/New_York\"}", + "name": "get_datetime" + } + } + ] + }, + { + "tool_call_id": "call_xK9mN3pL2qR8vT5wY6hZ1aB4", + "role": "tool", + "content": "{\"current_date\": \"2024-01-15\", \"current_time\": \"14:30:00\", \"timezone\": \"America/New_York\"}" + }, + { + "role": "assistant", + "reasoning_content": "Now I know today is 2024-01-15, so tomorrow is 2024-01-16. Let me query the weather for San Francisco and New York.", + "tool_calls": [ + { + "id": "call_bN7kR9mX3pQ2wL5vY8jZ4cD6", + "type": "function", + "function": { + "arguments": "{\"location\": \"San Francisco\", \"date\": \"2024-01-16\"}", + "name": "get_weather" + } + }, + { + "id": "call_dP9mL7kX5rT4yN3wZ2hV8eF1", + "type": "function", + "function": { + "arguments": "{\"location\": \"New York\", \"date\": \"2024-01-16\"}", + "name": "get_weather" + } + } + ] + }, + { + "tool_call_id": "call_bN7kR9mX3pQ2wL5vY8jZ4cD6", + "role": "tool", + "content": "{\"location\": \"San Francisco\", \"date\": \"2024-01-16\", \"temperature_high\": \"65\", \"temperature_low\": \"55\", \"weather\": \"partly cloudy\", \"humidity\": \"70%\"}" + }, + { + "tool_call_id": "call_dP9mL7kX5rT4yN3wZ2hV8eF1", + "role": "tool", + "content": "{\"location\": \"New York\", \"date\": \"2024-01-16\", \"temperature_high\": \"28\", \"temperature_low\": \"15\", \"weather\": \"sunny\", \"humidity\": \"35%\"}" + }, + { + "role": "assistant", + "reasoning_content": "Retrieved weather data for both cities. Now I'll format a nice response for the user.", + "content": "Based on the forecast, here's tomorrow's weather (January 16, 2024):\n\n**San Francisco**:\n- Weather: Partly cloudy\n- High: 65°F\n- Low: 55°F\n- Humidity: 70%\n\n**New York**:\n- Weather: Sunny\n- High: 28°F\n- Low: 15°F\n- Humidity: 35%\n\nSan Francisco will be mild and partly cloudy, while New York will be cold but sunny. If you're in New York, dress warmly!" + }, + { + "role": "user", + "content": "Thanks! What about the day after tomorrow?" + }, + { + "role": "assistant", + "reasoning_content": "User is now asking about the day after tomorrow, which is 2024-01-17. I can query directly since I already know today's date.", + "tool_calls": [ + { + "id": "call_fR3nK8mV7pL4xT2yW9jB5gH3", + "type": "function", + "function": { + "arguments": "{\"location\": \"San Francisco\", \"date\": \"2024-01-17\"}", + "name": "get_weather" + } + }, + { + "id": "call_hT5pN2kY9rV6zL3wX1mD7jK8", + "type": "function", + "function": { + "arguments": "{\"location\": \"New York\", \"date\": \"2024-01-17\"}", + "name": "get_weather" + } + } + ] + }, + { + "tool_call_id": "call_fR3nK8mV7pL4xT2yW9jB5gH3", + "role": "tool", + "content": "{\"location\": \"San Francisco\", \"date\": \"2024-01-17\", \"temperature_high\": \"68\", \"temperature_low\": \"58\", \"weather\": \"light rain\", \"humidity\": \"85%\"}" + }, + { + "tool_call_id": "call_hT5pN2kY9rV6zL3wX1mD7jK8", + "role": "tool", + "content": "{\"location\": \"New York\", \"date\": \"2024-01-17\", \"temperature_high\": \"32\", \"temperature_low\": \"22\", \"weather\": \"cloudy\", \"humidity\": \"50%\"}" + }, + { + "role": "assistant", + "reasoning_content": "Got the weather data for the day after tomorrow. Let me format this for the user.", + "content": "Here's the forecast for the day after tomorrow (January 17, 2024):\n\n**San Francisco**:\n- Weather: Light rain\n- High: 68°F\n- Low: 58°F\n- Humidity: 85%\n\n**New York**:\n- Weather: Cloudy\n- High: 32°F\n- Low: 22°F\n- Humidity: 50%\n\nSan Francisco will have light rain with temperatures rising slightly, so bring an umbrella. New York will be a bit warmer but still cold and cloudy, so continue to bundle up." + } + ] +} + diff --git a/lib/llm/tests/data/deepseek-v3.2/test_input_search_w_date.json b/lib/llm/tests/data/deepseek-v3.2/test_input_search_w_date.json new file mode 100644 index 0000000000..0d79abbc98 --- /dev/null +++ b/lib/llm/tests/data/deepseek-v3.2/test_input_search_w_date.json @@ -0,0 +1,732 @@ +{ + "messages": [ + { + "role": "developer", + "content": "帮我调研一下,目前有哪些针对search agent的benchmark?详细介绍各自的特点、使用场景、例题。\n\n\n## Today’s Date\n2025-11-27, Thursday.\n", + "tools": [ + { + "type": "function", + "function": { + "name": "search", + "description": "Searches for information related to query and displays topn results.", + "parameters": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The search query string" + }, + "topn": { + "type": "integer", + "description": "Number of top results to display", + "default": 10 + }, + "source": { + "type": "string", + "description": "Source to search within", + "enum": [ + "web", + "news" + ], + "default": "web" + } + }, + "required": [ + "query" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + } + }, + { + "type": "function", + "function": { + "name": "open", + "description": "Opens the link id from the page indicated by cursor starting at line number loc, showing num_lines lines. Valid link ids are displayed with the formatting: 【{id}†.*】. If cursor is not provided, the most recent page is implied. If id is a string, it is treated as a fully qualified URL associated with source. If loc is not provided, the viewport will be positioned at the beginning of the document or centered on the most relevant passage, if available. Use this function without id to scroll to a new location of an opened page.", + "parameters": { + "type": "object", + "properties": { + "id": { + "description": "Link ID (number) or fully qualified URL (string)", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "default": -1 + }, + "cursor": { + "type": "integer", + "description": "Page cursor indicator", + "default": -1 + }, + "loc": { + "type": "integer", + "description": "Starting line number", + "default": -1 + }, + "num_lines": { + "type": "integer", + "description": "Number of lines to show", + "default": -1 + }, + "view_source": { + "type": "boolean", + "description": "Whether to view source", + "default": false + }, + "source": { + "type": "string", + "description": "Source associated with the URL", + "enum": [ + "web", + "news" + ], + "default": "web" + } + }, + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + } + }, + { + "type": "function", + "function": { + "name": "find", + "description": "Finds exact matches of pattern in the current page, or the page given by cursor.", + "parameters": { + "type": "object", + "properties": { + "pattern": { + "type": "string", + "description": "The exact pattern to search for" + }, + "cursor": { + "type": "integer", + "description": "Page cursor to search in", + "default": -1 + } + }, + "required": [ + "pattern" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + } + } + ] + }, + { + "role": "assistant", + "content": "", + "reasoning_content": "用户想调研目前针对搜索智能体(search agent)的基准测试(benchmark),要求详细介绍各自的特点、使用场景和例题。我需要提供一份全面的概述,涵盖现有的基准测试,例如WebGPT、WebShop、ALFWorld、HotpotQA、FEVER、TriviaQA、Natural Questions、SearchQA、MS MARCO、BEIR、LoTTE、MIRACL、Zero-shot IR基准等。同时,需要区分这些基准测试是针对检索任务、问答任务,还是具体针对搜索智能体的评估。此外,还应考虑那些专门为评估搜索智能体(即能够与环境交互、执行搜索、浏览网页、使用工具的智能体)而设计的基准测试,例如WebGPT、WebShop、ALFWorld、Mind2Web、WebArena、VisualWebArena、SearchBench、WebVLN、WebNav等。我需要详细介绍每个基准测试的特点、使用场景,并举例说明。为了确保信息的全面性和时效性,我将同时进行两项搜索:一项是针对“search agent benchmark”的通用搜索,以获取广泛的基准测试信息;另一项是针对“搜索智能体 基准测试”的中文搜索,以获取更符合用户语言习惯和可能更具体的中文资料。", + "tool_calls": [ + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"search agent benchmark 2024\", \"topn\": 10, \"source\": \"web\"}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"搜索智能体 基准测试\", \"topn\": 10, \"source\": \"web\"}" + } + } + ] + }, + { + "role": "tool", + "content": "[0] search agent benchmark 2024 (Search_Results/search agent benchmark 2024)\n**viewing lines [0 - 39] of 39**\n\nL0: \nL1: URL: Search_Results/search agent benchmark 2024\nL2: # Search Results\nL3: \nL4: * 【0†HSCodeComp: A Realistic and Expert-level Benchmark for ...; publish_date:\nL5: none†arxiv.org】 Oct 22, 2025 — To fill this gap, we introduce HSCodeComp, the \nL6: first realistic, expert-level e-commerce benchmark designed to evaluate deep \nL7: search agents in ...\nL8: * 【1†open-compass/GTA - A Benchmark for General Tool Agents; publish_date: \nL9: none†github.com】 GTA is a benchmark to evaluate the tool-use capability of LLM-\nL10: based agents in real-world scenarios. It features three main aspects.\nL11: * 【2†Benchmarking real-time trust scoring across five AI Agent ...; \nL12: publish_date: none†cleanlab.ai】 Aug 20, 2025 — This article evaluates 5 AI Agent\nL13: architectures over the BOLAA (ICLR 2024) benchmark, and assesses the effects of\nL14: adding automated trust ...\nL15: * 【3†10 AI agent benchmarks; publish_date: none†www.evidentlyai.com】 Jul 11, \nL16: 2025 — We put together 10 AI agent benchmarks designed to assess how well \nL17: different LLMs perform as agents in real-world scenarios, ...\nL18: * 【4†A state-of-the-art search API purpose-built for agents; publish_date: \nL19: none†parallel.ai】 Jul 31, 2025 — To evaluate real-world performance of the \nL20: Parallel Search MCP Server, we created the WISER-Search benchmark which blends \nL21: WISER-Fresh (queries ...\nL22: * 【5†AI Agent Benchmarks are Broken; publish_date: none†medium.com】 We break \nL23: down the failure modes in current AI agent benchmarks and introduce a checklist \nL24: that minimizes the gamability of AI agent benchmarks.\nL25: * 【6†Benchmarks and Tree Search for Multimodal LLM Web Agents; publish_date: \nL26: none†dpfried.github.io】 2024, When is Tree Search Useful? ○ Dealing with \nL27: destructive actions. ○ Some things on the web are very difficult to undo, e.g., \nL28: ordering an item. 56.\nL29: * 【7†-Bench: Benchmarking AI agents for the real-world; publish_date: \nL30: none†sierra.ai】 Jun 20, 2024 — τ-bench measures an agent's ability to interact \nL31: with (simulated) human users and programmatic APIs while following domain-\nL32: specific policies in a consistent ...\nL33: * 【8†Browser Use = state of the art Web Agent; publish_date: none†browser-\nL34: use.com】 Dec 15, 2024 — Browser Use has achieved state-of-the-art performance on\nL35: the WebVoyager benchmark, with an impressive 89.1% success rate across 586 \nL36: diverse web tasks.\nL37: * 【9†FutureSearch Benchmarks; publish_date: none†evals.futuresearch.ai】 Find \nL38: the original source of a given claim. Example: From , more than 8 out of 1000 \nL39: users clicked on a phishing link monthly in 2024, up 190% vs 2023.", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[1] 搜索智能体 基准测试 (Search_Results/搜索智能体 基准测试)\n**viewing lines [0 - 33] of 33**\n\nL0: \nL1: URL: Search_Results/搜索智能体 基准测试\nL2: # Search Results\nL3: \nL4: * 【0†WideSearch:揭示AI 智能体缺失的「广度」能力; publish_date: none†zhuanlan.zhihu.com】 Aug \nL5: 16, 2025 — 为系统评估智能体在该任务上的能力,论文构建了第一个专门的基准测试 WideSearch ,包含200 个源于真实世界、横跨18 \nL6: 个领域的高质量任务。 通过对超过10 个 ...\nL7: * 【1†GAIA: 一个严苛的智能体基准- HuggingFace; publish_date: none†www.cnblogs.com】 Jul 9,\nL8: 2024 — 我们使用一个用库构建的代码智能体 在GAIA 基准上进行测试,这可以说是最困难、最全面的智能体基准测试……最终我们取得了第一名的成绩! \nL9: GAIA: 一个严苛的 ...\nL10: * 【2†AI搜索智能体遭遇新挑战:滑铁卢大学团队提出更公平透明的 ...; publish_date: none†www.techwalker.com】 \nL11: Aug 14, 2025 — \nL12: 目前评测AI搜索智能体主要依靠BrowseComp这样的基准测试,它就像一场实时的开卷考试,让AI在真实的网络环境中搜索信息来回答复杂问题。听起来很合理 ...\nL13: * 【3†Agentic AI基础设施实践经验系列(六):Agent质量评估 - AWS; publish_date: \nL14: none†aws.amazon.com】 Sep 19, 2025 — TAU-bench \nL15: 是一个评估AI智能体在真实世界环境中可靠性的基准测试。它评估智能体是否能够在动态的多轮对话中与用户进行交互,理解需求并完成任务。T-bench ...\nL16: * 【4†DeepAgent:能自己找工具的通用推理智能体 - 高瓴人工智能学院; publish_date: none†ai.ruc.edu.cn】 \nL17: Nov 6, 2025 — 在八大基准测试中,DeepAgent在绝大多数任务上全面领先所有基线模型。 \nL18: 开放环境优势:在最具挑战的“开放工具检索”场景下(如ToolBench),其成功率达到64%,远 ...\nL19: * 【5†BrowseComp:为浏览智能体设计的简单而具挑战性的基准测试; publish_date: none†blog.csdn.net】 Sep \nL20: 22, 2025 — 该基准测试由OpenAI团队开发,旨在推动更可信赖和可靠的AI代理研究。 核心特点. \nL21: 挑战性问题设计:BrowseComp的问题设计遵循严格的难度标准:. 人类创建者确保 ...\nL22: * 【6†什么是GAIA?; publish_date: none†huggingface.co】 什么是GAIA? GAIA \nL23: 是一个用于评估AI助手在需要核心能力组合的真实世界任务上的表现的基准,这些核心能力包括推理、多模态理解、网页浏览和熟练的工具使用。\nL24: * 【7†SuperCLUE-Agent: Agent智能体中文原生任务能力测评基准; publish_date: \nL25: none†www.cluebenchmarks.com】 AI \nL26: Agent是一个能够自主理解、规划决策和执行复杂任务的智能体,有代表性的Agent比如AutoGPT、AgentGPT等。 \nL27: 现有关于Agent能力的测评,主要是在英文场景或任务的测评。目前还 ...\nL28: * 【8†Gaia2 与ARE:赋能社区的智能体评测; publish_date: none†hub.baai.ac.cn】 Sep 24, 2025 — \nL29: 快速评估任意智能体:基于真实或模拟数据,测试不同规则、工具、内容和验证方式下的表现 · 测试智能体的 工具调用与编排能力:可结合本地应用或MCP 工具 · 生成\nL30: ...\nL31: * 【9†许多AI 智能体评测基准并不可靠; publish_date: none†my.oschina.net】 Aug 13, 2025 — \nL32: 当前AI智能体基准测试存在严重缺陷,亟需更严谨的评估框架。作者提出AI智能体基准测试核查清单(ABC),包含43个项目,旨在识别基准测试潜在陷阱,构建可靠评估 \nL33: ...", + "tool_call_id": null + }, + { + "role": "assistant", + "content": "", + "reasoning_content": "第一个搜索结果(针对“search agent benchmark 2024”)显示了一些相关条目,例如“HSCodeComp: A Realistic and Expert-level Benchmark for ...”、“open-compass/GTA - A Benchmark for General Tool Agents”、“10 AI agent benchmarks”、“WISER-Search benchmark”、“Benchmarks and Tree Search for Multimodal LLM Web Agents”、“τ-bench”、“WebVoyager benchmark”、“FutureSearch Benchmarks”。这些可能都与搜索智能体相关。第二个搜索结果(针对“搜索智能体 基准测试”)显示了一些中文资料,例如“WideSearch”、“BrowseComp”、“GAIA”、“SuperCLUE-Agent”等。这些都可能与搜索智能体相关。为了全面了解这些基准测试,我们需要同时打开并分析其中几个最有潜力的结果。具体来说,我们将打开第一个搜索结果中的“HSCodeComp”(索引0)和“open-compass/GTA”(索引1),以及第二个搜索结果中的“WideSearch”(索引0)和“BrowseComp”(索引5)。", + "tool_calls": [ + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 0, \"id\": 0}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 0, \"id\": 1}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 1, \"id\": 0}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 1, \"id\": 5}" + } + } + ] + }, + { + "role": "tool", + "content": "[2] (https://ar5iv.org/html/2510.19631v1)\n**viewing lines [0 - 119] of 983**\n\nL0: \nL1: URL: https://ar5iv.org/html/2510.19631v1\nL2: # HSCodeComp: A Realistic and Expert-level Benchmark for Deep Search Agents in \nL3: Hierarchical Rule Application\nL4: \nL5: Yiqian Yang† Tian Lan† Qianghuai Jia∗ Li Zhu Hui Jiang Hang Zhu Longyue Wang \nL6: Weihua Luo Kaifu Zhang\nL7: \nL8: Alibaba International Digital Commerce∗* Corresponding Author: Qianghuai Jia \nL9: (qianghuai.jqh@alibaba-inc.com)\nL10: †\\dagger Equal Contribution: Yiqian Yang\nL11: \nL12: Tian Lan\nL13: \nL14: ###### Abstract\nL15: \nL16: Abstract\nL17: \nL18: Effective deep search agents must not only access open-domain and domain-\nL19: specific knowledge but also apply complex rules—such as legal clauses, medical \nL20: manuals and tariff rules. These rules often feature vague boundaries and \nL21: implicit logic relationships, making precise application challenging for agents.\nL22: However, this critical capability is largely overlooked by current agent \nL23: benchmarks. To fill this gap, we introduce HSCodeComp, the first realistic, \nL24: expert-level e-commerce benchmark designed to evaluate deep search agents in \nL25: hierarchical rule application. In this task, the deep reasoning process of \nL26: agents is guided by these rules to predict 10-digit Harmonized System Code \nL27: (HSCode) of products with noisy but realistic descriptions. These codes, \nL28: established by the World Customs Organization, are vital for global supply chain\nL29: efficiency. Built from real-world data collected from large-scale e-commerce \nL30: platforms, our proposed HSCodeComp comprises 632 product entries spanning \nL31: diverse product categories, with these HSCodes annotated by several human \nL32: experts. Extensive experimental results on several state-of-the-art LLMs, open-\nL33: source, and closed-source agents reveal a huge performance gap: best agent \nL34: achieves only 46.8% 10-digit accuracy, far below human experts at 95.0%. \nL35: Besides, detailed analysis demonstrates the challenges of hierarchical rule \nL36: application, and test-time scaling fails to improve performance further.\nL37: \nL38: ## 1 Introduction\nL39: \nL40: Deep search agents have demonstrated significant value in solving complex real-\nL41: world problems, where robust external knowledge utilization constitutes a \nL42: critical capability [Wu et al., 2025, Tao et al., 2025, Li et al., 2025b]. To \nL43: evaluate this capability, numerous established benchmarks are proposed to assess\nL44: agents in utilizing open-domain data (e.g., GAIA [Mialon et al., 2023b] and \nL45: BrowseComp [Wei et al., 2025]) and domain-specific data (e.g., WebMall [Peeters \nL46: et al., 2025a], FinSearchComp [Hu et al., 2025a] and MedBrowseComp [Yu et al., \nL47: 2025b]).\nL48: \nL49: Beyond open-domain and domain-specific data, agents also need to effectively \nL50: apply rules that encode human expert knowledge, particularly in scenarios like \nL51: law, medical and e-commerce [Li et al., 2025a, Chen et al., 2025b, Yao et al., \nL52: 2022, Chollet et al., 2025]. For instance, legal case adjudication require \nL53: interpreting abstract legal provisions, and accurate e-commerce product \nL54: classification in depends on tariff rules [Grainger, 2024]. Previous works have \nL55: defined rule application as using specific logical rules with supporting facts \nL56: to derive conclusions [Wang et al., 2024, Servantez et al., 2024]. In contrast, \nL57: we define it as a core capability for deep search agents, where human-written \nL58: rules are systematically applied to guide complex reasoning and decision-making \nL59: [Sadowski and Chudziak, 2025]. Building on this observation, we categorize \nL60: knowledge data for deep search agents into three levels (Figure 1, left), with \nL61: increasing knowledge complexity: (1) Level 1: Open-domain Data - Tests \nL62: understanding and deep reasoning abilities of agents on long-form web content. \nL63: Established benchmarks include GAIA [Mialon et al., 2023b] and BrowseComp [Wei \nL64: et al., 2025]; (2) Level 2: Structured Data - Assesses agents to precisely \nL65: utilize structured data such as databases and knowledge graphs, as seen in \nL66: domain-specific benchmarks like WebMall [Peeters et al., 2025a], MedBrowseComp \nL67: [Chen et al., 2025b] and FinSearchComp [Hu et al., 2025a]; (3) Level 3: Rule \nL68: Data - Evaluates agents to apply complex and abstract rules [Chollet et al., \nL69: 2025]. This level presents two key challenges: (a) making accurate decisions \nL70: when rules contain vague natural language descriptions [Sadowski and Chudziak, \nL71: 2025]; and (b) reasoning about logical dependencies among rules, such as \nL72: exception clauses and cross-category relationships [Guha et al., 2023]. Despite \nL73: the importance of rule application in real-world scenarios, current agent \nL74: benchmarks largely overlook its evaluation.\nL75: \nL76: To fill this gap, we introduce HSCodeComp (short for the Harmonized System Code \nL77: (HSCode) Competition), the first realistic, expert-level e-commerce benchmark \nL78: designed to evaluate agents in predicting complete 10-digit Harmonized System \nL79: Code (HSCode) of the product, using hierarchical rules (e.g., eWTP tariff \nL80: rules111https://www.ewtp.com/web/smart/hscode). HSCodes organize products \nL81: through a hierarchical structure spanning over 5,000 distinct codes across \nL82: multiple classification levels, representing the global standard for classifying\nL83: traded international goods, established by the World Customs Organization and \nL84: implemented across more than 200 countries for customs clearance and tariff \nL85: determination [Grainger, 2024, Nath et al., 2025]. Built from the data of the \nL86: large-scale e-commerce platforms, our proposed HSCodeComp comprises 632 \nL87: carefully curated product entries, encompassing 27 unique HS chapters and 32 \nL88: distinct first-level categories. These HSCodes have been rigorously annotated by\nL89: multiple e-commerce domain experts, ensuring that HSCodeComp is expert-level. \nL90: Accurately predicting the exact 10-digit HSCode presents significant challenges:\nL91: agents must perform multi-hop hierarchical reasoning with complex tariff rules \nL92: while processing noisy but realistic product descriptions that often contain \nL93: abbreviations, language variations, or incomplete information.\nL94: \nL95: Extensive experiments on the state-of-the-art baselines, including 14 advanced \nL96: foundation models, 6 advanced open-source agent systems and 3 closed-source \nL97: agent systems, demonstrate that HSCode prediction task remains a substantial \nL98: challenge for current AI approaches. As shown in the Figure 1 (right), even the \nL99: best-performing system (SmolAgent [Roucher et al., 2025] with GPT-5) achieves \nL100: only 46.8% accuracy, substantially below the 95.0% accuracy attained by human \nL101: experts. Further detailed analysis reveals that existing agent systems lack \nL102: critical capabilities required for this complex hierarchical rule applications. \nL103: Notably, test-time scaling approach—which has proven effective in other \nL104: reasoning tasks [Guo et al., 2025, Liu et al., 2025]—fail to improve performance\nL105: on HSCodeComp. These observations demonstrate the challenging nature of our \nL106: proposed HSCodeComp, highlighting the need for more effective designs of agent \nL107: systems. To facilitate future research, we will publicly release codes and the \nL108: benchmark dataset of HSCodeComp.\nL109: \nL110: ## 2 Related Works\nL111: \nL112: ### 2.1 Previous Works in HSCode Prediction\nL113: \nL114: Previous works treat HSCode prediction as the e-commerce text classification \nL115: task [Grainger, 2024], using pre-trained BERT models [Liao et al., 2024, Shubham\nL116: et al., 2022] or Large Language Models (LLMs) prompting [Hussain and Ahmed, \nL117: 2023]. However, these approaches fail to leverage domain-specific knowledge, \nL118: especially the rules written by human experts [Hussain and Ahmed, 2023, Judy, \nL119: 2024]. Besides, existing HSCode benchmarks face two critical limitations [Judy, ", + "tool_call_id": null + }, + { + "role": "tool", + "content": "Error fetching URL `https://github.com/open-compass/GTA`", + "tool_call_id": null + }, + { + "role": "tool", + "content": "Error fetching URL `https://zhuanlan.zhihu.com/p/1939316761775301093`", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[3] (https://blog.csdn.net/qq_41472205/article/details/151974603)\n**viewing lines [0 - 167] of 187**\n\nL0: \nL1: URL: https://blog.csdn.net/qq_41472205/article/details/151974603\nL2: # BrowseComp:为浏览智能体设计的简单而具挑战性的基准测试\nL3: \nL4: BrowseComp:AI浏览能力评估基准\nL5: \nL6: 最新推荐文章于 2025-11-12 13:40:20 发布\nL7: \nL8: 原创 于 2025-09-22 22:33:04 发布 · 1.3k 阅读\nL9: \nL10: · 9\nL11: · 25 · \nL12: CC 4.0 BY-SA版权\nL13: \nL14: 版权声明:本文为博主原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。\nL15: \nL16: ## BrowseComp:为浏览智能体设计的简单而具挑战性的基准测试\nL17: \nL18: 在人工智能从基础聊天机器人向推理器和智能体发展的进程中,具备浏览互联网能力的人工智能模型正变得越来越重要。今天,我们将介绍一个名为BrowseComp的创新基准\nL19: 测试,它专门设计用于评估AI代理在复杂网络浏览任务中的能力。\nL20: \nL21: ### 什么是BrowseComp?\nL22: \nL23: BrowseComp(全称Browsing Competition)是一个包含1,266个挑战性问题的基准测试集,专门用于衡量AI代理在互联网上持续导航、寻找难\nL24: 以找到的纠缠信息的能力。该基准测试由OpenAI团队开发,旨在推动更可信赖和可靠的AI代理研究。\nL25: \nL26: #### 核心特点\nL27: \nL28: 挑战性问题设计:BrowseComp的问题设计遵循严格的难度标准:\nL29: \nL30: - 人类创建者确保问题在10分钟内无法被人解决\nL31: - 现有模型(包括带浏览功能的ChatGPT和早期版本的OpenAI Deep Research)无法解决\nL32: - 通过5次简单Google搜索无法在结果首页找到答案\nL33: \nL34: 简单易验证:尽管问题极具挑战性,但答案形式简单——都是短字符串,便于自动验证模型输出的正确性。\nL35: \nL36: ### 为什么需要BrowseComp?\nL37: \nL38: #### 现有基准的局限性\nL39: \nL40: 传统的信息检索基准(如TriviaQA、HotpotQA等)主要关注易于查找的信息,随着语言模型的进步,这些基准已经趋于饱和。而BrowseComp专注于那些需\nL41: 要浏览大量网站才能解决的\"硬核\"问题。\nL42: \nL43: #### 模拟真实挑战\nL44: \nL45: BrowseComp问题通常采用\"逆向设计\"方法:创建者从一个已知事实出发,构建一个搜索空间巨大但验证简单的问题。例如:\nL46: \nL47: “找出2018-2023年间在EMNLP会议上发表、第一作者本科毕业于达特茅斯学院、第四作者本科毕业于宾夕法尼亚大学的科学论文标题”\nL48: \nL49: 这类问题验证简单,但解决起来需要检查数千篇论文并调查每位作者的背景。\nL50: \nL51: ### 数据集特点\nL52: \nL53: #### 主题多样性\nL54: \nL55: BrowseComp涵盖了广泛的主题领域(如图2所示),包括历史、科学、文化等。创建者被鼓励基于个人兴趣设计问题,这有助于提高数据质量和参与度。\nL56: \nL57: #### 质量保证\nL58: \nL59: 为确保答案的唯一性,创建者需要:\nL60: \nL61: - 对问题内容有足够了解,确信没有其他有效答案\nL62: - 如果不确定,则添加更多约束条件\nL63: - 接受其他创建者的验证反馈\nL64: \nL65: ### 人类表现基准\nL66: \nL67: 为了衡量BrowseComp的难度,研究人员让人类创建者尝试解决问题(不能解答自己创建的问题)。结果显示:\nL68: \nL69: - **70.8%**的问题在2小时搜索后人类选择放弃\nL70: - **29.2%**的问题被成功解决\nL71: - 在解决的问题中,**86.4%**的人类答案与参考答案一致\nL72: \nL73: 这表明BrowseComp确实极具挑战性,即使是熟悉数据集的人类专家也难以在有限时间内解决大部分问题。\nL74: \nL75: ### AI模型表现评估\nL76: \nL77: #### 各模型对比\nL78: \nL79: 研究人员评估了多种模型在BrowseComp上的表现:\nL80: \nL81: 模型 | 准确率(%) | 校准误差(%) \nL82: ---|---|---\nL83: GPT-4o | 0.6 | 69 \nL84: GPT-4o(带浏览) | 1.9 | 82 \nL85: GPT-4.5 | 0.9 | 68 \nL86: OpenAI o1 | 9.9 | 65 \nL87: Deep Research | 51.5 | 91 \nL88: \nL89: #### 关键发现\nL90: \nL91: - 基础模型表现不佳:GPT-4o和GPT-4.5准确率接近零,凸显了基准的难度\nL92: - 浏览功能带来有限提升:启用浏览功能的GPT-4o准确率略有提高,但仍很低\nL93: - 推理能力的重要性:OpenAI o1虽然没有浏览能力,但凭借更强的推理能力获得较高准确率\nL94: - 专业模型的优势:专门为持久网络浏览训练的Deep Research模型解决了约一半的问题\nL95: \nL96: #### 计算资源与性能关系\nL97: \nL98: 研究表明,BrowseComp性能随测试时计算资源的增加而平滑提升(如图1所示)。这与智能体模型的特性一致——更多计算资源允许模型浏览更多网站,从而提高找到正确\nL99: 答案的机会。\nL100: \nL101: ### 进阶策略分析\nL102: \nL103: #### 聚合策略的效果\nL104: \nL105: 通过让模型多次尝试同一问题并采用投票策略,可以显著提升性能:\nL106: \nL107: - 多数投票:选择样本中最常见的答案\nL108: - 加权投票:根据模型置信度加权投票\nL109: - 最佳选择:选择置信度最高的答案\nL110: \nL111: 这些方法将Deep Research的性能提升了15-25%,表明模型通常能够识别自己的正确答案。\nL112: \nL113: #### 任务难度分布\nL114: \nL115: 分析显示,BrowseComp中的任务难度分布广泛:\nL116: \nL117: - 16%的任务被Deep Research完美解决(100%通过率)\nL118: - 14%的任务完全失败(0%通过率)\nL119: - 其余任务处于中间难度水平\nL120: \nL121: ### BrowseComp的意义与局限性\nL122: \nL123: #### 作为评估工具的价值\nL124: \nL125: BrowseComp可被视为浏览智能体的\"编程竞赛\"——虽然不全面,但对核心浏览能力提供了有用的衡量。正如在编程竞赛中表现出色的模型很可能在其他编码任务中表现良\nL126: 好,能够解决BrowseComp的模型在定位难以查找信息方面应该具有很强的能力。\nL127: \nL128: #### 已知局限性\nL129: \nL130: - 不涉及真实用户查询分布:如生成长回答或解决查询歧义等挑战\nL131: - 答案唯一性假设:虽然尽力确保,但不能完全保证没有其他有效答案\nL132: - 单模态限制:当前仅关注文本信息检索,未来可扩展至图像、视频等模态\nL133: \nL134: ### 结论与展望\nL135: \nL136: BrowseComp为评估AI代理的网络浏览能力提供了一个具有挑战性且易于使用的基准。它强调的持久性、创造性和战略性浏览能力,正是未来AI智能体在实际应用中所需\nL137: 的核心技能。\nL138: \nL139: 随着更多研究团队在BrowseComp上测试他们的模型,我们期待看到浏览智能体技术的快速进步,最终使AI能够更可靠地帮助我们获取那些深藏在互联网角落的宝贵信息。\nL140: \nL141: 访问地址:BrowseComp已在GitHub上开源,地址为 https://github.com/openai/simple-evals\nL142: \nL143: 添加红包\nL144: \nL145: 请填写红包祝福语或标题\nL146: \nL147: 个\nL148: \nL149: 红包个数最小为10个\nL150: \nL151: 元\nL152: \nL153: 红包金额最低5元\nL154: \nL155: 当前余额3.43元 前往充值 >\nL156: \nL157: 需支付:10.00元\nL158: \nL159: 成就一亿技术人!\nL160: \nL161: hope_wisdom\nL162: 发出的红包 \nL163: 实付元\nL164: \nL165: 点击重新获取\nL166: \nL167: 扫码支付", + "tool_call_id": null + }, + { + "role": "assistant", + "content": "", + "reasoning_content": "打开的HSCodeComp页面详细介绍了这个基准测试,它是一个用于评估深度搜索智能体在分层规则应用方面能力的基准测试,特点包括:基于真实世界电商数据、任务复杂、需要多跳层次推理、使用嘈杂的产品描述,并且标注由专家完成。使用场景是评估智能体在规则应用方面的能力,例如关税代码分类。例题是预测产品的10位HS编码。打开的BrowseComp页面详细介绍了这个基准测试,它是一个用于评估浏览智能体(即能够在互联网上导航、搜索信息的智能体)的基准测试。其特点包括:包含1266个具有挑战性的问题,需要持续导航和搜索难以查找的信息;答案形式简单,便于自动验证;问题设计确保人类在10分钟内无法解决,且现有模型也无法解决;涵盖了广泛的主题领域。使用场景是评估智能体在复杂网络浏览任务中的能力。例题是:“找出2018-2023年间在EMNLP会议上发表、第一作者本科毕业于达特茅斯学院、第四作者本科毕业于宾夕法尼亚大学的科学论文标题”。另外两个打开的页面(open-compass/GTA和WideSearch)出现了错误,可能链接已失效或需要其他访问方式。为了进一步获取其他相关基准测试的信息,我们需要同时进行多项搜索。考虑到用户可能对搜索智能体基准测试有广泛需求,我们将同时搜索“WebVoyager benchmark”、“WebArena benchmark”、“GAIA benchmark”、“ToolBench benchmark”、“HotpotQA benchmark”、“FEVER benchmark”、“TriviaQA benchmark”、“Natural Questions benchmark”、“MS MARCO benchmark”、“BEIR benchmark”、“MIRACL benchmark”、“Zero-shot IR benchmark”、“WebGPT benchmark”、“WebShop benchmark”、“ALFWorld benchmark”、“Mind2Web benchmark”、“VisualWebArena benchmark”、“SearchBench benchmark”、“WebVLN benchmark”、“WebNav benchmark”。这些搜索将帮助我们全面了解现有基准测试的概况。", + "tool_calls": [ + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"WebVoyager benchmark\", \"topn\": 10, \"source\": \"web\"}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"WebArena benchmark\", \"topn\": 10, \"source\": \"web\"}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"GAIA benchmark\", \"topn\": 10, \"source\": \"web\"}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"ToolBench benchmark\", \"topn\": 10, \"source\": \"web\"}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"HotpotQA benchmark\", \"topn\": 10, \"source\": \"web\"}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"FEVER benchmark\", \"topn\": 10, \"source\": \"web\"}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"TriviaQA benchmark\", \"topn\": 10, \"source\": \"web\"}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"Natural Questions benchmark\", \"topn\": 10, \"source\": \"web\"}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"MS MARCO benchmark\", \"topn\": 10, \"source\": \"web\"}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"BEIR benchmark\", \"topn\": 10, \"source\": \"web\"}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"MIRACL benchmark\", \"topn\": 10, \"source\": \"web\"}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"Zero-shot IR benchmark\", \"topn\": 10, \"source\": \"web\"}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"WebGPT benchmark\", \"topn\": 10, \"source\": \"web\"}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"WebShop benchmark\", \"topn\": 10, \"source\": \"web\"}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"ALFWorld benchmark\", \"topn\": 10, \"source\": \"web\"}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"Mind2Web benchmark\", \"topn\": 10, \"source\": \"web\"}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"VisualWebArena benchmark\", \"topn\": 10, \"source\": \"web\"}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"SearchBench benchmark\", \"topn\": 10, \"source\": \"web\"}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"WebVLN benchmark\", \"topn\": 10, \"source\": \"web\"}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"WebNav benchmark\", \"topn\": 10, \"source\": \"web\"}" + } + } + ] + }, + { + "role": "tool", + "content": "[4] WebVoyager benchmark (Search_Results/WebVoyager benchmark)\n**viewing lines [0 - 40] of 40**\n\nL0: \nL1: URL: Search_Results/WebVoyager benchmark\nL2: # Search Results\nL3: \nL4: * 【0†WebVoyager Benchmark; publish_date: none†www.trykura.com】 Performance \nL5: measured across various websites in WebVoyager shows Kura consistently \nL6: outperforming other agents, with particularly strong results on e-commerce ...\nL7: * 【1†WebVoyager: Building an End-to-End Web Agent with ...; publish_date: \nL8: none†github.com】 WebVoyager is an innovative Large Multimodal Model (LMM) \nL9: powered web agent that can complete user instructions end-to-end by interacting \nL10: with real-world ...\nL11: * 【2†AI Browser Agent Leaderboard | Steel.dev; publish_date: \nL12: none†leaderboard.steel.dev】 See how various AI browser agents stack up based on \nL13: their accuracy in completing web-based tasks on the WebVoyager benchmark.\nL14: * 【3†[2401.13919] WebVoyager: Building an End-to-End Web ...; publish_date: \nL15: none†arxiv.org】 by H He · 2024 · Cited by 282 — We show that WebVoyager achieves\nL16: a 59.1% task success rate on our benchmark, significantly surpassing the \nL17: performance of both GPT-4 (All ...\nL18: * 【4†Our Agent-E SOTA Results on the WebVoyager Benchmark; publish_date: \nL19: none†www.emergence.ai】 Jul 11, 2024 — WebVoyager is a benchmark that tests an \nL20: agent's capabilities for navigation on dynamic live websites. It is more \nL21: representative than WebArena [4] ...\nL22: * 【5†Browser Use = state of the art Web Agent; publish_date: none†browser-\nL23: use.com】 Dec 15, 2024 — Browser Use has achieved state-of-the-art performance on\nL24: the WebVoyager benchmark, with an impressive 89.1% success rate across 586 \nL25: diverse web tasks.\nL26: * 【6†Magnitude achieves SOTA 94% on WebVoyager benchmark; publish_date: \nL27: none†github.com】 Magnitude achieves state-of-the-art performance with 93.9% \nL28: success rate on WebVoyager, beating all other browser agents.\nL29: * 【7†WebVoyager: Autonomous Web Agent Benchmark; publish_date: \nL30: none†www.emergentmind.com】 3 days ago — WebVoyager Benchmark is a comprehensive \nL31: evaluation suite for autonomous web agents, featuring 643 tasks across 15 \nL32: popular websites.\nL33: * 【8†WebVoyager Benchmark Results; publish_date: none†www.browserable.ai】 \nL34: Browserable has achieved 90.4% on the WebVoyager benchmark. This is best-in-\nL35: class performance across all web agents. This was done across 567 web tasks \nL36: which ...\nL37: * 【9†89% achieved on WebVoyager using Anchor + Browser Use; publish_date: \nL38: none†www.reddit.com】 Thanks to the amazing work from the browser-use open-source\nL39: community and the built-in support from Anchor Browser, we've hit an 89% score \nL40: on WebVoyager.", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[5] WebArena benchmark (Search_Results/WebArena benchmark)\n**viewing lines [0 - 42] of 42**\n\nL0: \nL1: URL: Search_Results/WebArena benchmark\nL2: # Search Results\nL3: \nL4: * 【0†WebArena: A Realistic Web Environment for Building ...; publish_date: \nL5: none†webarena.dev】 Our benchmark is implemented in our fully interactable \nL6: highly-realistic WebArena environment. It features diverse tasks human may \nL7: encounter in their daily ...\nL8: * 【1†[2307.13854] WebArena: A Realistic Web Environment for ...; publish_date:\nL9: none†arxiv.org】 by S Zhou · 2023 · Cited by 637 — Building upon our \nL10: environment, we release a set of benchmark tasks focusing on evaluating the \nL11: functional correctness of task completions.\nL12: * 【2†WebArena: A Realistic Web Environment for Building ...; publish_date: \nL13: none†www.cmu.edu】 WebArena introduces a benchmark on interpreting high-level \nL14: realistic natural language command to concrete web-based interactions. We \nL15: provide annotated programs ...\nL16: * 【3†GitHub - web-arena-x/webarena: Code repo for ...; publish_date: \nL17: none†github.com】 [12/20/2024] Check out our new benchmark on even more \nL18: consequential tasks, including terminal use and coding, TheAgentCompany. \nL19: [12/21/2023] We release the ...\nL20: * 【4†WebArena Benchmark and the State of Agentic AI; publish_date: \nL21: none†medium.com】 In short, WebArena established a new standard for realism and \nL22: complexity in web agent evaluation, forcing AI agents to operate in dynamic, \nL23: high- ...\nL24: * 【5†WebArena: A Realistic Web Environment for Building ...; publish_date: \nL25: none†huggingface.co】 Jul 25, 2023 — WebArena, a realistic and reproducible \nL26: environment, evaluates the performance of autonomous agents performing complex \nL27: tasks on websites using ...\nL28: * 【6†WebArena Benchmark: Evaluating Web Agents; publish_date: \nL29: none†www.emergentmind.com】 Jun 30, 2025 — WebArena Benchmark is a self-contained\nL30: suite that evaluates autonomous agents on realistic, multi-step web tasks using\nL31: natural language ...\nL32: * 【7†VisualWebArena is a benchmark for multimodal agents.; publish_date: \nL33: none†github.com】 VisualWebArena is a realistic and diverse benchmark for \nL34: evaluating multimodal autonomous language agents. It comprises of a set of \nL35: diverse and complex web-based ...\nL36: * 【8†WebDev Arena Leaderboard - LMArena; publish_date: none†web.lmarena.ai】 \nL37: WebDev Arena is a real-time AI coding competition where models go head-to-head \nL38: in web development challenges, developed by LMArena.\nL39: * 【9†WebArena: A Realistic Web Environment for Building ...; publish_date: \nL40: none†arxiv.org】 Apr 16, 2024 — We use this benchmark to evaluate several agents \nL41: that can follow NL command and perform web-based tasks (§4). These agents are \nL42: implemented in a ...", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[6] GAIA benchmark (Search_Results/GAIA benchmark)\n**viewing lines [0 - 41] of 41**\n\nL0: \nL1: URL: Search_Results/GAIA benchmark\nL2: # Search Results\nL3: \nL4: * 【0†GAIA Leaderboard - a Hugging Face Space by ...; publish_date: \nL5: none†huggingface.co】 GAIA is a benchmark which aims at evaluating next-\nL6: generation LLMs (LLMs with augmented capabilities due to added tooling, \nL7: efficient prompting, access to search ...\nL8: * 【1†[2311.12983] GAIA: a benchmark for General AI Assistants; publish_date: \nL9: none†arxiv.org】 by G Mialon · 2023 · Cited by 367 — GAIA proposes real-world \nL10: questions that require a set of fundamental abilities such as reasoning, multi-\nL11: modality handling, web browsing, and generally tool-use ...\nL12: * 【2†GAIA benchmark; publish_date: none†huggingface.co】 This is the \nL13: organisation page for all things related to GAIA, a benchmark for General AI \nL14: Assistants. You can find all the information and links on the GAIA ...\nL15: * 【3†GAIA: A Benchmark for General AI Assistants; publish_date: \nL16: none†ukgovernmentbeis.github.io】 This is an Inspect AI implementation of the \nL17: GAIA (General AI Assistants) benchmark, consisting of 450 questions testing tool\nL18: use on realistic assistant tasks.\nL19: * 【4†GAIA: a benchmark for general AI assistants | Research; publish_date: \nL20: none†ai.meta.com】 May 6, 2024 — GAIA proposes real-world questions that require \nL21: a set of fundamental abilities such as reasoning, multi-modality handling, web \nL22: browsing, and generally tool-use ...\nL23: * 【5†HAL: GAIA Leaderboard; publish_date: none†hal.cs.princeton.edu】 GAIA is a\nL24: benchmark for General AI Assistants that requires a set of fundamental \nL25: abilities such as reasoning, multi-modality handling, web browsing, and tool- \nL26: ...\nL27: * 【6†GAIA: The LLM Agent Benchmark Everyone's Talking About; publish_date: \nL28: none†towardsdatascience.com】 May 29, 2025 — GAIA stands for General AI \nL29: Assistants benchmark [1]. This benchmark was introduced to specifically evaluate\nL30: LLM agents on their ability to act as general- ...\nL31: * 【7†GAIA: a benchmark for General AI Assistants; publish_date: \nL32: none†openreview.net】 by G Mialon · Cited by 367 — GAIA proposes real-world \nL33: questions that require a set of fundamental abilities such as reasoning, multi-\nL34: modality handling, web browsing, and generally tool-use ...\nL35: * 【8†Rethinking AI Evaluation: Introducing the GAIA Benchmark; publish_date: \nL36: none†medium.com】 The authors introduce GAIA, a benchmark designed to assess the \nL37: robustness of AI systems across a variety of practical tasks.\nL38: * 【9†H2O.ai Tops the General AI Assistant (GAIA) Test; publish_date: \nL39: none†h2o.ai】 Mar 17, 2025 — Our h2oGPTe Agent has once again claimed the #1 spot\nL40: on the prestigious GAIA (General AI Assistants) benchmark with an impressive \nL41: 75% accuracy rate.", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[7] ToolBench benchmark (Search_Results/ToolBench benchmark)\n**viewing lines [0 - 40] of 40**\n\nL0: \nL1: URL: Search_Results/ToolBench benchmark\nL2: # Search Results\nL3: \nL4: * 【0†ToolBench, an evaluation suite for LLM tool manipulation ...; \nL5: publish_date: none†github.com】 The ToolBench is a benchmark consisting of \nL6: diverse software tools for real-world tasks. We also provide easy-to-use \nL7: infrastructure in this repository.\nL8: * 【1†OpenBMB/ToolBench; publish_date: none†github.com】 [2023/7/27] New version\nL9: ToolBench is released. ✨Here is an overview of the dataset construction, \nL10: training, and evaluation. ✨✨Features:.\nL11: * 【2†Towards Stable Large-Scale Benchmarking on Tool ...; publish_date: \nL12: none†arxiv.org】 by Z Guo · 2024 · Cited by 100 — We introduce StableToolBench, a\nL13: benchmark evolving from ToolBench, proposing a virtual API server and stable \nL14: evaluation system.\nL15: * 【3†StableToolBench - Zhicheng Guo; publish_date: none†zhichengg.github.io】 \nL16: We introduce StableToolBench, a benchmark evolving from ToolBench, proposing a \nL17: virtual API server and stable evaluation system.\nL18: * 【4†ToolBench | EvalScope - Read the Docs; publish_date: \nL19: none†evalscope.readthedocs.io】 We evaluate the effectiveness of the ToolBench \nL20: benchmark: ToolBench (Qin et al., 2023b). The task involves integrating API \nL21: calls to complete tasks.\nL22: * 【5†Towards Stable Large-Scale Benchmarking on Tool ...; publish_date: \nL23: none†aclanthology.org】 by Z Guo · 2024 · Cited by 100 — We introduce \nL24: StableToolBench, a benchmark evolving from ToolBench, proposing a virtual API \nL25: server and stable evaluation system.\nL26: * 【6†ML-Tool-Bench: Tool-Augmented Planning for ML Tasks; publish_date: \nL27: none†openreview.net】 Sep 18, 2025 — In this work, we introduce a comprehensive \nL28: benchmark for evaluating tool-augmented ML agents using a curated set of 61 \nL29: specialized tools and 15 ...\nL30: * 【7†-Bench: Benchmarking AI agents for the real-world; publish_date: \nL31: none†sierra.ai】 Jun 20, 2024 — τ-bench measures an agent's ability to interact \nL32: with (simulated) human users and programmatic APIs while following domain-\nL33: specific policies in a consistent ...\nL34: * 【8†ToolEval Leaderboard; publish_date: none†openbmb.github.io】 ToolEval is \nL35: an automatic evaluator build for tool learning which incorporates two evaluation\nL36: metrics, Pass Rate and Win Rate(Preference).\nL37: * 【9†What is the best benchmark dataset for multi-step tool-use?; \nL38: publish_date: none†www.reddit.com】 I'm a newbie trying to evaluate the \nL39: performance of different prompts strategies for multi-step tool-using, wondering\nL40: what is the recommended benchmark dataset ...", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[8] HotpotQA benchmark (Search_Results/HotpotQA benchmark)\n**viewing lines [0 - 39] of 39**\n\nL0: \nL1: URL: Search_Results/HotpotQA benchmark\nL2: # Search Results\nL3: \nL4: * 【0†HotpotQA Homepage; publish_date: none†hotpotqa.github.io】 HotpotQA is a \nL5: question answering dataset featuring natural, multi-hop questions, with strong \nL6: supervision for supporting facts to enable more explainable ...See more\nL7: * 【1†HotpotQA: A Dataset for Diverse, Explainable Multi-hop ...; publish_date:\nL8: none†arxiv.org】 by Z Yang · 2018 · Cited by 3834 — HotpotQA is a dataset with \nL9: 113k Wikipedia-based question-answer pairs requiring multi-document reasoning, \nL10: diverse questions, sentence-level ...\nL11: * 【2†hotpotqa/hotpot_qa · Datasets at Hugging Face; publish_date: \nL12: none†huggingface.co】 HotpotQA is a new dataset with 113k Wikipedia-based \nL13: question-answer pairs with four key features: (1) the questions require finding \nL14: and reasoning over multiple ...See more\nL15: * 【3†Why You Should Stop Using HotpotQA for AI Agents ...; publish_date: \nL16: none†qipeng.me】 Jul 1, 2025 — HotpotQA pioneered a class of AI tasks that \nL17: requires the AI system to autonomously perform multiple steps of reasoning in an\nL18: open-domain setting.See more\nL19: * 【4†hotpotqa/hotpot; publish_date: none†github.com】 A dataset for diverse, \nL20: explainable multi-hop question answering. This repository contains the baseline \nL21: model code, as well as the entire pipeline of running ...See more\nL22: * 【5†HotpotQA: Multi-Hop QA Benchmark; publish_date: \nL23: none†www.emergentmind.com】 Sep 10, 2025 — HotpotQA is a large-scale multi-hop \nL24: question answering benchmark featuring 112,779 Wikipedia-based Q&A pairs with \nL25: detailed, sentence-level ...See more\nL26: * 【6†HotpotQA Dataset | Papers With Code; publish_date: \nL27: none†paperswithcode.com】 HotpotQA is a question answering dataset collected on \nL28: the English Wikipedia, containing about 113K crowd-sourced questions.See more\nL29: * 【7†HotpotQA: A Dataset for Diverse, Explainable Multi-hop ...; publish_date:\nL30: none†aclanthology.org】 by Z Yang · 2018 · Cited by 3834 — HotpotQA is a dataset\nL31: with 113k Wikipedia-based question-answer pairs requiring multi-document \nL32: reasoning, diverse questions, sentence-level facts, and factoid ...\nL33: * 【8†Benchmark BM25S: HotpotQA; publish_date: none†www.kaggle.com】 Explore and\nL34: run machine learning code with Kaggle Notebooks | Using data from No attached \nL35: data sources.\nL36: * 【9†mteb/hotpotqa · Datasets at Hugging Face; publish_date: \nL37: none†huggingface.co】 HotpotQA is a question answering dataset featuring natural,\nL38: multi-hop questions, with strong supervision for supporting facts to enable \nL39: more explainable ...See more", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[9] FEVER benchmark (Search_Results/FEVER benchmark)\n**viewing lines [0 - 40] of 40**\n\nL0: \nL1: URL: Search_Results/FEVER benchmark\nL2: # Search Results\nL3: \nL4: * 【0†Fever.ai; publish_date: none†fever.ai】 We are pleased to announce that \nL5: FEVER9 will be co-located with EACL 2026. In this year's workshop, we will \nL6: introduce a new shared task focused on automated fact ...\nL7: * 【1†a Large-scale Dataset for Fact Extraction and VERification; publish_date:\nL8: none†aclanthology.org】 by J Thorne · 2018 · Cited by 2315 — In this paper we \nL9: introduce a new publicly available dataset for verification against textual \nL10: sources, FEVER: Fact Extraction.\nL11: * 【2†awslabs/fever: FEVER (Fact Extraction and VERification) ...; \nL12: publish_date: none†github.com】 In this paper we introduce a new publicly \nL13: available dataset for verification against textual sources, FEVER: Fact \nL14: Extraction and VERification.\nL15: * 【3†FEVER: Fact Extraction and VERification; publish_date: \nL16: none†www.amazon.science】 The best accuracy we achieve on labeling a claim \nL17: accompanied by the correct evidence is 31.87%, while if we ignore the evidence \nL18: we achieve 50.91%. Thus we ...\nL19: * 【4†FEVER Dataset; publish_date: none†fever.ai】 FEVER (Fact Extraction and \nL20: VERification) consists of 185,445 claims generated by altering sentences \nL21: extracted from Wikipedia and subsequently verified ...\nL22: * 【5†mteb/fever · Datasets at Hugging Face; publish_date: none†huggingface.co】\nL23: FEVER. An MTEB dataset. Massive Text Embedding Benchmark. FEVER (Fact \nL24: Extraction and VERification) consists of 185,445 claims generated by altering \nL25: sentences ...\nL26: * 【6†FEVEROUS: Fact Extraction and VERification Over ...; publish_date: \nL27: none†datasets-benchmarks-proceedings.neurips.cc】 by R Aly · Cited by 359 — In \nL28: this paper we introduce a novel dataset and benchmark, Fact Extraction and \nL29: VERification Over. Unstructured and Structured information (FEVEROUS), which ...\nL30: * 【7†a large-scale dataset for Fact Extraction and VERification; publish_date:\nL31: none†arxiv.org】 by J Thorne · 2018 · Cited by 2315 — In this paper we introduce\nL32: a new publicly available dataset for verification against textual sources, \nL33: FEVER: Fact Extraction and VERification.\nL34: * 【8†FEVER Resources; publish_date: none†fever.ai】 2018 FEVER: a large-scale \nL35: dataset for Fact Extraction and VERification .bib James Thorne, Andreas Vlachos,\nL36: Christos Christodoulopoulos, Arpit Mittal\nL37: * 【9†a Large-scale Dataset for Fact Extraction and VERification; publish_date:\nL38: none†www.semanticscholar.org】 This paper introduces a new publicly available \nL39: dataset for verification against textual sources, FEVER, which consists of \nL40: 185,445 claims generated by ...", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[10] TriviaQA benchmark (Search_Results/TriviaQA benchmark)\n**viewing lines [0 - 35] of 35**\n\nL0: \nL1: URL: Search_Results/TriviaQA benchmark\nL2: # Search Results\nL3: \nL4: * 【0†TriviaQA; publish_date: none†nlp.cs.washington.edu】 TriviaQA is a reading\nL5: comprehension dataset containing over 650K question-answer-evidence triples. \nL6: TriviaQA includes 95K question-answer pairs authored ...\nL7: * 【1†TriviaQA: A Large Scale Distantly Supervised Challenge ...; publish_date:\nL8: none†aclanthology.org】 by M Joshi · 2017 · Cited by 3451 — We present TriviaQA,\nL9: a challenging reading comprehension dataset containing over 650K question-\nL10: answer-evidence triples. TriviaQA includes 95K question ...\nL11: * 【2†mandarjoshi/trivia_qa · Datasets at Hugging Face; publish_date: \nL12: none†huggingface.co】 TriviaqQA is a reading comprehension dataset containing \nL13: over 650K question-answer-evidence triples. TriviaqQA includes 95K question-\nL14: answer pairs authored by ...\nL15: * 【3†[1705.03551] TriviaQA: A Large Scale Distantly Supervised ...; \nL16: publish_date: none†arxiv.org】 by M Joshi · 2017 · Cited by 3451 — We present \nL17: TriviaQA, a challenging reading comprehension dataset containing over 650K \nL18: question-answer-evidence triples.\nL19: * 【4†TriviaQA; publish_date: none†epoch.ai】 An open-domain question answering \nL20: benchmark with challenging trivia questions paired with evidence documents.\nL21: * 【5†TriviaQA Leaderboard; publish_date: none†llm-stats.com】 What is the \nL22: TriviaQA benchmark? A large-scale reading comprehension dataset containing over \nL23: 650K question-answer-evidence triples. TriviaQA includes 95K ...\nL24: * 【6†Code for the TriviaQA reading comprehension dataset; publish_date: \nL25: none†github.com】 A large scale distantly supervised challenge dataset for \nL26: reading comprehension. In Association for Computational Linguistics (ACL) 2017, \nL27: Vancouver, Canada.\nL28: * 【7†TriviaQA - Model Benchmarks - The Regularizer; publish_date: \nL29: none†www.theregularizer.com】 May 4, 2025 — Compare the performance of different \nL30: AI models across standardized benchmarks. Higher scores generally indicate \nL31: better performance, but context ...\nL32: * 【8†TriviaQA: A Large Scale Distantly Supervised Challenge ...; publish_date:\nL33: none†www.cs.utexas.edu】 by M Joshi · Cited by 3445 — We present TriviaQA, a \nL34: challenging reading comprehension dataset contain- ing over 650K question-\nL35: answer-evidence triples. TriviaQA includes 95K question-.", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[11] Natural Questions benchmark (Search_Results/Natural Questions benchmark)\n**viewing lines [0 - 39] of 39**\n\nL0: \nL1: URL: Search_Results/Natural Questions benchmark\nL2: # Search Results\nL3: \nL4: * 【0†Natural Questions: a Benchmark for Question Answering ...; publish_date: \nL5: none†research.google】 by T Kwiatkowski · Cited by 4339 — We present the Natural \nL6: Questions corpus, a question answering dataset. Questions consist of real \nL7: anonymized, aggregated queries issued to the Google search ...\nL8: * 【1†Natural Questions: A Benchmark for Question Answering ...; publish_date: \nL9: none†aclanthology.org】 by T Kwiatkowski · Cited by 4308 — Abstract. We present \nL10: the Natural Questions corpus, a question answering data set. Questions consist \nL11: of real anonymized, aggregated queries issued.\nL12: * 【2†Google's Natural Questions; publish_date: none†ai.google.com】 Natural \nL13: Questions. A Benchmark for Question Answering Research. View examples · Download\nL14: dataset. Open Domain Question Answering. A core goal in artificial ...\nL15: * 【3†google-research-datasets/natural-questions; publish_date: \nL16: none†github.com】 Natural Questions (NQ) contains real user questions issued to \nL17: Google search, and answers found from Wikipedia by annotators. NQ is designed \nL18: for the training and ...\nL19: * 【4†Natural Questions: A Benchmark for Question Answering ...; publish_date: \nL20: none†direct.mit.edu】 Aug 1, 2019 — We present the Natural Questions corpus, a \nL21: question answering data set. Questions consist of real anonymized, aggregated \nL22: queries issued to the Google search ...\nL23: * 【5†ir_datasets : Natural Questions; publish_date: none†ir-datasets.com】 \nL24: Google Natural Questions is a Q&A dataset containing long, short, and Yes/No \nL25: answers from Wikipedia. ir_datasets frames this around an ad-hoc ranking setting\nL26: ...\nL27: * 【6†sentence-transformers/natural-questions · Datasets at ...; publish_date: \nL28: none†huggingface.co】 This dataset is a collection of question-answer pairs from \nL29: the Natural Questions dataset. See Natural Questions for additional information.\nL30: * 【7†Google's Natural Questions; publish_date: none†ai.google.com】 Natural \nL31: Questions contains 307K training examples, 8K examples for development, and a \nL32: further 8K examples for testing. In the paper, we demonstrate a human ...\nL33: * 【8†A Benchmark for Question Answering Research; publish_date: \nL34: none†www.researchgate.net】 Jul 27, 2025 — We present the Natural Questions \nL35: corpus, a question answering data set. Questions consist of real anonymized, \nL36: aggregated queries issued to the Google search ...\nL37: * 【9†natural-questions; publish_date: none†docs.unity.rc.umass.edu】 Sep 4, \nL38: 2025 — “Natural questions: a benchmark for question answering research.” \nL39: Transactions of the Association for Computational Linguistics 7 (2019): ...", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[12] MS MARCO benchmark (Search_Results/MS MARCO benchmark)\n**viewing lines [0 - 41] of 41**\n\nL0: \nL1: URL: Search_Results/MS MARCO benchmark\nL2: # Search Results\nL3: \nL4: * 【0†MS MARCO - Microsoft Open Source; publish_date: none†microsoft.github.io】\nL5: The MS MARCO datasets are intended for non-commercial research purposes only to\nL6: promote advancement in the field of artificial intelligence and related areas, \nL7: ...\nL8: * 【1†microsoft/ms_marco · Datasets at Hugging Face; publish_date: \nL9: none†huggingface.co】 Starting with a paper released at NIPS 2016, MS MARCO is a \nL10: collection of datasets focused on deep learning in search. The first dataset was\nL11: a question ...\nL12: * 【2†Benchmarking Ranking Models in the Large-Data Regime; publish_date: \nL13: none†arxiv.org】 by N Craswell · 2021 · Cited by 89 — This paper uses the MS \nL14: MARCO and TREC Deep Learning Track as our case study, comparing it to the case \nL15: of TREC ad hoc ranking in the 1990s.\nL16: * 【3†Benchmarking Ranking Models in the Large-Data Regime; publish_date: \nL17: none†www.microsoft.com】 This paper uses the MS MARCO and TREC Deep Learning \nL18: Track as our case study, comparing it to the case of TREC ad hoc ranking in the \nL19: 1990s. We show how the ...\nL20: * 【4†Datasets for Document and Passage Ranking Leadboards; publish_date: \nL21: none†microsoft.github.io】 The MS MARCO document and passage ranking leaderboards\nL22: complements the TREC Deep Learning Track by providing on-going evaluation of \nL23: submissions using pre- ...\nL24: * 【5†MS MARCO: Benchmarking Ranking Models in the Large- ...; publish_date: \nL25: none†dl.acm.org】 Jul 11, 2021 — This paper uses the MS MARCO and TREC Deep \nL26: Learning Track as our case study, comparing it to the case of TREC ad hoc \nL27: ranking in the 1990s.\nL28: * 【6†ir_datasets : MSMARCO (passage); publish_date: none†ir-datasets.com】 A \nL29: passage ranking benchmark with a collection of 8.8 million passages and question\nL30: queries. Most relevance judgments are shallow.\nL31: * 【7†MS MARCO; publish_date: none†sbert.net】 MS MARCO Passage Ranking is a \nL32: large dataset to train models for information retrieval. It consists of about \nL33: 500k real search queries from Bing search engine ...\nL34: * 【8†MS MARCO: A Human Generated MAchine Reading ...; publish_date: \nL35: none†arxiv.org】 by P Bajaj · 2016 · Cited by 1151 — We introduce a large scale \nL36: MAchine Reading COmprehension dataset, which we name MS MARCO. The dataset \nL37: comprises of 1,010,916 anonymized ...\nL38: * 【9†MS MARCO Web Search: A Large-scale Information-rich ...; publish_date: \nL39: none†www.microsoft.com】 May 13, 2024 — MS MARCO Web Search offers a retrieval \nL40: benchmark with three web retrieval challenge tasks that demands innovations in \nL41: both machine learning and ...", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[13] BEIR benchmark (Search_Results/BEIR benchmark)\n**viewing lines [0 - 37] of 37**\n\nL0: \nL1: URL: Search_Results/BEIR benchmark\nL2: # Search Results\nL3: \nL4: * 【0†详细介绍文本检索基准BEIR: A Heterogeneous Benchmark ...; publish_date: \nL5: none†blog.csdn.net】 2023年1月1日 — \nL6: BEIR旨在为所有不同的检索任务提供一个一站式的零样本评估基准。为了构建一个全面的评估基准,选择方法对于收集具有理想属性的任务和数据集至关重要。对于 ...\nL7: * 【1†beir-cellar/beir; publish_date: none†github.com】 BEIR is a heterogeneous \nL8: benchmark containing diverse IR tasks. It also provides a common and easy \nL9: framework for evaluation of your NLP-based retrieval models ...\nL10: * 【2†BEIR: A Heterogenous Benchmark for Zero-shot Evaluation ...; \nL11: publish_date: none†arxiv.org】 作者:N Thakur · 2021 · 被引用次数:1480 — We introduce \nL12: Benchmarking-IR (BEIR), a robust and heterogeneous evaluation benchmark for \nL13: information retrieval.\nL14: * 【3†BeIR; publish_date: none†huggingface.co】 BEIR (Benchmarking IR) consists \nL15: of a homogenous benchmark for diverse sentence or passage level IR tasks. It \nL16: provides a common and easy framework for the cross ...\nL17: * 【4†论文分享:BEIR A Heterogeneous Benchmark for Zero-shot ...; publish_date: \nL18: none†zhuanlan.zhihu.com】 2022年10月3日 — 分享论文,夹带个人理解的分享,建议结合原论文看。 1 研究背景. \nL19: 本论文主要关注的领域是query-document检索(下文简称qd检索),即根据query去文档库里 ...\nL20: * 【5†Benchmarking IR Information Retrieval (BEIR); publish_date: \nL21: none†zilliz.com】 BEIR is a benchmark designed for evaluating the versatility and\nL22: robustness of information retrieval models. It features 18 diverse datasets \nL23: from domains like ...\nL24: * 【6†BEIR (Benchmarking IR) - OpenDataLab; publish_date: none†opendatalab.com】\nL25: 简介-Introduction. BEIR(Benchmarking \nL26: IR)是包含不同信息检索(IR)任务的异构基准。通过BEIR,可以系统地研究多种神经检索方法的零样本泛化能力。\nL27: * 【7†What is the BEIR benchmark and how is it used?; publish_date: \nL28: none†milvus.io】 The BEIR (Benchmarking Information Retrieval) benchmark is a \nL29: standardized framework designed to evaluate the effectiveness of search and \nL30: retrieval algorithms.\nL31: * 【8†BEIR Benchmark数据集卡片; publish_date: none†www.atyun.com】 BEIR \nL32: Benchmark数据集卡片. 数据集简介. BEIR是一个异构评测基准,由18个多样化的数据集构建而成,代表了9个信息检索任务:. 事实查证: FEVER ,\nL33: Climate-FEVER , SciFact ...\nL34: * 【9†Evaluating search relevance part 1 - The BEIR benchmark; publish_date: \nL35: none†www.elastic.co】 2024年7月16日 — Learn to evaluate your search system in the \nL36: context of better understanding the BEIR benchmark, with tips & techniques to \nL37: improve your ...", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[14] MIRACL benchmark (Search_Results/MIRACL benchmark)\n**viewing lines [0 - 41] of 41**\n\nL0: \nL1: URL: Search_Results/MIRACL benchmark\nL2: # Search Results\nL3: \nL4: * 【0†MIRACL | Multilingual Information Retrieval Across a ...; publish_date: \nL5: none†project-miracl.github.io】 MIRACL (Multilingual Information Retrieval Across\nL6: a Continuum of Languages) is an WSDM 2023 Cup challenge that focuses on search \nL7: across 18 different ...\nL8: * 【1†project-miracl/miracl: A large-scale multilingual dataset for ...; \nL9: publish_date: none†github.com】 A large-scale multilingual dataset for \nL10: Information Retrieval. Thorough human-annotations across 18 diverse languages.\nL11: * 【2†A Large, multilingual, visual document retrieval benchmark; publish_date:\nL12: none†arxiv.org】 by R Osmulski · 2025 · Cited by 2 — MIRACL-VISION is a \nL13: challenging, representative, multilingual evaluation benchmark for visual \nL14: retrieval pipelines and will help the community build robust ...\nL15: * 【3†miracl/miracl · Datasets at Hugging Face; publish_date: \nL16: none†huggingface.co】 MIRACL (Multilingual Information Retrieval Across a \nL17: Continuum of Languages) is a multilingual retrieval dataset that focuses on \nL18: search across 18 different ...\nL19: * 【4†MIRACL: A Multilingual Retrieval Dataset Covering 18 ...; publish_date: \nL20: none†direct.mit.edu】 by X Zhang · 2023 · Cited by 131 — MIRACL is a multilingual\nL21: dataset for ad hoc retrieval across 18 languages that collectively encompass \nL22: over three billion native speakers around the world.\nL23: * 【5†(PDF) MIRACL-VISION: A Large, multilingual, visual ...; publish_date: \nL24: none†www.researchgate.net】 May 23, 2025 — MIRACL-VISION covers 18 languages, and\nL25: is an extension of the MIRACL dataset, a popular benchmark to evaluate text-\nL26: based multilingual retrieval ...\nL27: * 【6†A Large, multilingual, visual document retrieval benchmark; publish_date:\nL28: none†arxiv.org】 by R Osmulski · 2025 · Cited by 2 — MIRACL-VISION is a \nL29: challenging, representative, multilingual evaluation benchmark for visual \nL30: retrieval pipelines and will help the community ...\nL31: * 【7†ir_datasets : MIRACL; publish_date: none†ir-datasets.com】 \nL32: \"miracl/ar/test-a\". The held-out test set (version a) for Arabic. \nL33: queriesdocsCitationMetadata. 936 queries. Language: ar. Query type: \nL34: GenericQuery: (namedtuple).\nL35: * 【8†Evaluate on MIRACL — BGE documentation; publish_date: none†bge-model.com】\nL36: MIRACL (Multilingual Information Retrieval Across a Continuum of Languages) is \nL37: an WSDM 2023 Cup challenge that focuses on search across 18 different languages.\nL38: * 【9†MIRACL - Alpha's Tech Garden; publish_date: \nL39: none†techgarden.alphasmanifesto.com】 MIRACL (Multilingual Information Retrieval \nL40: Across a Continuum of Languages) is a multilingual dataset we have built for the\nL41: WSDM 2023 Cup ...", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[15] Zero-shot IR benchmark (Search_Results/Zero-shot IR benchmark)\n**viewing lines [0 - 40] of 40**\n\nL0: \nL1: URL: Search_Results/Zero-shot IR benchmark\nL2: # Search Results\nL3: \nL4: * 【0†BEIR: A Heterogenous Benchmark for Zero-shot Evaluation ...; \nL5: publish_date: none†arxiv.org】 by N Thakur · 2021 · Cited by 1480 — We introduce \nL6: Benchmarking-IR (BEIR), a robust and heterogeneous evaluation benchmark for \nL7: information retrieval.See more\nL8: * 【1†beir-cellar/beir; publish_date: none†github.com】 BEIR: A Heterogenous \nL9: Benchmark for Zero-shot Evaluation of Information Retrieval Models (NeurIPS \nL10: 2021, Datasets and Benchmarks Track); Resources for Brewing ...See more\nL11: * 【2†Benchmarking IR Information Retrieval (BEIR); publish_date: \nL12: none†zilliz.com】 BEIR is a tool to evaluate how well Information Retrieval \nL13: systems perform across many tasks and types of information, and is a standard \nL14: benchmark.\nL15: * 【3†BEIR: A Heterogeneous Benchmark for Zero-shot ...; publish_date: \nL16: none†datasets-benchmarks-proceedings.neurips.cc】 by N Thakur · Cited by 1480 — \nL17: BEIR is a robust, heterogeneous benchmark for information retrieval, using 18 \nL18: datasets and 9 tasks to evaluate model generalization.\nL19: * 【4†BEIR; publish_date: none†eval.ai】 BEIR is a heterogeneous zero-shot \nL20: retrieval benchmark containing 18 datasets from diverse text retrieval tasks and\nL21: domains.See more\nL22: * 【5†[2409.15763] IRSC: A Zero-shot Evaluation Benchmark for ...; \nL23: publish_date: none†arxiv.org】 by H Lin · 2024 · Cited by 2 — This paper \nL24: introduces the IRSC benchmark for evaluating the performance of embedding models\nL25: in multilingual RAG tasks.See more\nL26: * 【6†FactIR: A Real-World Zero-shot Open-Domain Retrieval ...; publish_date: \nL27: none†dl.acm.org】 May 23, 2025 — In this paper, we present a real-world retrieval\nL28: benchmark FactIR, derived from Factiverse production logs, enhanced with human \nL29: annotations. We ...See more\nL30: * 【7†UniIR: Training and Benchmarking Universal Multimodal ...; publish_date: \nL31: none†tiger-ai-lab.github.io】 At test time, we evaluated the zero-shot \nL32: performance of all fine-tuned models, as well as SoTA pre-trained retrievers on \nL33: the three held-out datasets. UniIR ...See more\nL34: * 【8†Zero-Shot BEIR Tasks; publish_date: none†www.emergentmind.com】 Aug 26, \nL35: 2025 — Zero-Shot BEIR Tasks are evaluation methodologies that assess IR models' \nL36: ability to generalize to unseen query domains without task-specific ...See more\nL37: * 【9†BEIR-PL: Zero Shot Information Retrieval Benchmark for ...; publish_date:\nL38: none†aclanthology.org】 by K Wojtasik · 2024 · Cited by 12 — BEIR-PL is a new \nL39: benchmark with 13 datasets for Polish Information Retrieval, created to advance \nL40: research in this area.", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[16] WebGPT benchmark (Search_Results/WebGPT benchmark)\n**viewing lines [0 - 38] of 38**\n\nL0: \nL1: URL: Search_Results/WebGPT benchmark\nL2: # Search Results\nL3: \nL4: * 【0†WebGPT: Improving the factual accuracy of language ...; publish_date: \nL5: none†openai.com】 Dec 16, 2021 — Our models outperform GPT‑3 on TruthfulQA and \nL6: exhibit more favourable scaling properties. However, our models lag behind human\nL7: performance, ...\nL8: * 【1†A Simple Yet Challenging Benchmark for Browsing Agents; publish_date: \nL9: none†arxiv.org】 by J Wei · 2025 · Cited by 124 — Abstract. We present \nL10: BrowseComp, a simple yet challenging benchmark for measuring the ability for \nL11: agents to browse the web.\nL12: * 【2†openai/webgpt_comparisons · Datasets at Hugging Face; publish_date: \nL13: none†huggingface.co】 This is the dataset of all comparisons that were marked as \nL14: suitable for reward modeling by the end of the WebGPT project. There are 19,578 \nL15: comparisons in total.\nL16: * 【3†Evaluation & Limitations of WebGPT, WebVoyager & Agent-E; publish_date: \nL17: none†deepsense.ai】 Oct 14, 2024 — WebArena benchmark features 812 tasks \nL18: evaluated using metrics such as Exact Match, Must Include, and Fuzzy Match, \nL19: focusing on outcomes rather ...\nL20: * 【4†OpenAI Announces Question-Answering AI WebGPT; publish_date: \nL21: none†www.infoq.com】 Jan 25, 2022 — On the TriviaQA benchmark, WebGPT \nL22: outperformed GPT-3, producing answers that were true 75% of the time, and \"both \nL23: true and informative\" 54% of ...\nL24: * 【5†WebGPT: Improving the factual accuracy of language models ...; \nL25: publish_date: none†kargarisaac.medium.com】 The top-performing model generated \nL26: answers that were preferred over 56% of the time compared to answers produced by\nL27: human demonstrators, with ...\nL28: * 【6†Browser-assisted question-answering with human feedback; publish_date: \nL29: none†www.alphaxiv.org】 WebGPT represents a significant advancement in long-form \nL30: question answering by combining the language generation capabilities of GPT-3 \nL31: with real-time web ...\nL32: * 【7†Benchmarking Open-Source Large Language Models, GPT-4 ...; publish_date: \nL33: none†ai.nejm.org】 by S Wu · 2024 · Cited by 69 — We show that the current widely\nL34: used open-source LLMs have poor zero-shot reasoning ability in nephrology \nL35: compared with GPT-4 and Claude 2.\nL36: * 【8†0hq/WebGPT: Run GPT model on ...; publish_date: none†github.com】 WebGPT \nL37: is a vanilla JS and HTML implementation of a transformer model, intended as a \nL38: proof-of-concept as well as educational resource.", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[17] WebShop benchmark (Search_Results/WebShop benchmark)\n**viewing lines [0 - 41] of 41**\n\nL0: \nL1: URL: Search_Results/WebShop benchmark\nL2: # Search Results\nL3: \nL4: * 【0†WebShop: Towards Scalable Real-World Web Interaction ...; publish_date: \nL5: none†arxiv.org】 by S Yao · 2022 · Cited by 710 — To bridge this gap, we develop \nL6: WebShop -- a simulated e-commerce website environment with 1.18 million real-\nL7: world products and 12,087 crowd- ...\nL8: * 【1†WebShop; publish_date: none†webshop-pnlp.github.io】 To bridge this gap, \nL9: we develop WebShop – a simulated e-commerce website environment with 1.18 \nL10: million real-world products and 12,087 crowd-sourced text ...\nL11: * 【2†princeton-nlp/WebShop; publish_date: none†github.com】 WebShop is a \nL12: simulated e-commerce website environment with 1.18 million real-world products \nL13: and 12,087 crowd-sourced text instructions. In this environment, an ...\nL14: * 【3†WebShop: Towards Scalable Real-World Web Interaction ...; publish_date: \nL15: none†papers.nips.cc】 by S Yao · 2022 · Cited by 710 — We collect over 1,600 \nL16: human trajectories to first validate the benchmark, then train and evaluate a \nL17: diverse range of agents using reinforcement learning, ...\nL18: * 【4†WebShop: Towards Scalable Real-World Web Interaction ...; publish_date: \nL19: none†proceedings.neurips.cc】 by S Yao · 2022 · Cited by 709 — We have developed \nL20: WebShop, a new web-based benchmark for sequential decision making and language \nL21: grounding, modeled on interaction with an e-commerce website.\nL22: * 【5†Webshop & Benchmark Analysis | Documentation Infinity; publish_date: \nL23: none†docs.fact-finder.com】 Aug 15, 2025 — Evaluation of your shop based on \nL24: different categories in comparison, to your competitors/industry. Recommended \nL25: when doing a shop relaunch.\nL26: * 【6†A Multi-Shop Benchmark for Evaluating Web Agents; publish_date: \nL27: none†arxiv.org】 by R Peeters · 2025 · Cited by 2 — Compared to existing \nL28: e-commerce benchmarks, such as WebShop or ShoppingBench, WebMall introduces \nL29: comparison-shopping tasks across multiple shops ...\nL30: * 【7†WebShop: towards scalable real-world web interaction with ...; \nL31: publish_date: none†dl.acm.org】 by S Yao · 2022 · Cited by 710 — To bridge this \nL32: gap, we develop WebShop - a simulated e-commerce website environment with 1.18 \nL33: million real-world products and 12, 087 crowd- ...\nL34: * 【8†[PDF] WebShop: Towards Scalable Real-World Web ...; publish_date: \nL35: none†www.semanticscholar.org】 It is shown that agents trained on WebShop exhibit\nL36: non-trivial sim-to-real transfer when evaluated on amazon.com and ebay.com, \nL37: indicating the potential ...\nL38: * 【9†X-WebAgentBench: A Multilingual Interactive Web ...; publish_date: \nL39: none†aclanthology.org】 by P Wang · 2025 · Cited by 3 — (2023) based on the \nL40: English WebShop benchmark (Yao et al., 2022), while the multilingual task scores\nL41: are ob- tained through evaluation on our own benchmark.", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[18] ALFWorld benchmark (Search_Results/ALFWorld benchmark)\n**viewing lines [0 - 31] of 31**\n\nL0: \nL1: URL: Search_Results/ALFWorld benchmark\nL2: # Search Results\nL3: \nL4: * 【0†ALFWorld; publish_date: none†alfworld.github.io】 ALFWorld contains \nL5: interactive TextWorld environments (Côté et. al) that parallel embodied worlds \nL6: in the ALFRED dataset (Shridhar et. al).\nL7: * 【1†ALFWorld: Aligning Text and Embodied Environments for ...; publish_date: \nL8: none†arxiv.org】 by M Shridhar · 2020 · Cited by 674 — ALFWorld enables the \nL9: creation of a new BUTLER agent whose abstract knowledge, learned in TextWorld, \nL10: corresponds directly to concrete, visually grounded actions.\nL11: * 【2†ALFWorld: Aligning Text and Embodied Environments ...; publish_date: \nL12: none†github.com】 ALFWorld contains interactive TextWorld environments (Côté et. \nL13: al) that parallel embodied worlds in the ALFRED dataset (Shridhar et. al).\nL14: * 【3†alfworld - benchmark's activity; publish_date: none†huggingface.co】 MM-\nL15: IQ: Benchmarking Human-Like Abstraction and Reasoning in Multimodal Models Paper\nL16: • 2502.00698 • Published Feb 1 • 24\nL17: * 【4†Tackling AlfWorld with Action Attention and Common ...; publish_date: \nL18: none†neurips.cc】 On the Alfworld benchmark for indoor instruction following, we \nL19: achieve a significantly higher success rate (50% over the baseline) with our \nL20: novel object ...\nL21: * 【5†ALFWORLD: ALIGNING TEXT AND EMBODIED ...; publish_date: \nL22: none†openreview.net】 by M Shridhar · Cited by 674 — The ALFRED dataset (Shridhar\nL23: et al., 2020), set in the THOR simulator (Kolve et al., 2017), is a benchmark \nL24: for learning to com- plete embodied household tasks ...\nL25: * 【6†AlfWorld; publish_date: none†primo.ai】 Mar 23, 2024 — A simulator that \nL26: enables agents to learn abstract, text based policies in TextWorld (Côté et al.,\nL27: 2018) and then execute goals from the ALFRED benchmark.\nL28: * 【7†AlfWorld performance across 134 tasks showing cumulative...; \nL29: publish_date: none†www.researchgate.net】 In the AlfWorld benchmark, we defined \nL30: hallucination as the occurrence of two or more consecutive identical actions in \nL31: which the environment responded with ...", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[19] Mind2Web benchmark (Search_Results/Mind2Web benchmark)\n**viewing lines [0 - 40] of 40**\n\nL0: \nL1: URL: Search_Results/Mind2Web benchmark\nL2: # Search Results\nL3: \nL4: * 【0†Mind2Web: Towards a Generalist Agent for the Web; publish_date: none†osu-\nL5: nlp-group.github.io】 Mind2Web is a dataset for developing and evaluating \nL6: generalist agents for the web that can follow language instructions to complete \nL7: complex tasks on any ...\nL8: * 【1†Online-Mind2Web Leaderboard; publish_date: none†huggingface.co】 Online-\nL9: Mind2Web is a benchmark designed to evaluate the real-world performance of web \nL10: agents on live websites, featuring 300 tasks across 136 popular sites ...\nL11: * 【2†Mind2Web: Towards a Generalist Agent for the Web; publish_date: \nL12: none†github.com】 Mind2Web is the first dataset for developing and evaluating \nL13: generalist agents for the web that can follow language instructions to complete \nL14: complex tasks on any ...\nL15: * 【3†HAL: Online Mind2Web Leaderboard; publish_date: \nL16: none†hal.cs.princeton.edu】 Online Mind2Web leaderboard for evaluating AI agents'\nL17: ability to complete tasks on real, changing webpages.\nL18: * 【4†[2506.21506] Mind2Web 2: Evaluating Agentic Search with ...; \nL19: publish_date: none†arxiv.org】 by B Gou · 2025 · Cited by 11 — In this paper, we \nL20: introduce Mind2Web 2, a benchmark of 130 realistic, high-quality, and long-\nL21: horizon tasks that require real-time web browsing and extensive ...\nL22: * 【5†Mind2Web 2: Evaluating Agentic Search with Agent-as-a-Judge; \nL23: publish_date: none†osu-nlp-group.github.io】 We introduce Mind2Web 2, a benchmark\nL24: of 130 realistic, high-quality, long-horizon tasks that require real-time web \nL25: browsing and extensive information ...\nL26: * 【6†Mind2Web: The Benchmark for AI Agent Evaluation and ...; publish_date: \nL27: none†www.enhans.ai】 Sep 26, 2025 — Mind2Web is a globally recognized web-based \nL28: AI Agent evaluation benchmark introduced by the NLP group at Ohio State \nL29: University at NeurIPS 2023.\nL30: * 【7†Evaluating AI Web Agents: Insights from the WebCanvas ...; publish_date: \nL31: none†medium.com】 Thanks to the comprehensive WebCanvas Benchmark, which \nL32: incorporates a robust Mind2Web-Live data set of 542 live web tasks and 2,439 ...\nL33: * 【8†Mind2Web: Towards a Generalist Agent for the Web; publish_date: \nL34: none†proceedings.neurips.cc】 by X Deng · 2023 · Cited by 760 — We introduce \nL35: Mind2Web, the first dataset for developing and evaluating generalist agents for \nL36: the web that can follow language instructions to complete complex ...\nL37: * 【9†Mind2Web: Towards a Generalist Agent for the Web; publish_date: \nL38: none†openreview.net】 by X Deng · Cited by 760 — We introduce Mind2Web, the first\nL39: dataset for developing and evaluating generalist agents for the web that can \nL40: follow language instructions to complete complex ...", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[20] VisualWebArena benchmark (Search_Results/VisualWebArena benchmark)\n**viewing lines [0 - 38] of 38**\n\nL0: \nL1: URL: Search_Results/VisualWebArena benchmark\nL2: # Search Results\nL3: \nL4: * 【0†VisualWebArena is a benchmark for multimodal agents.; publish_date: \nL5: none†github.com】 VisualWebArena is a realistic and diverse benchmark for \nL6: evaluating multimodal autonomous language agents.\nL7: * 【1†Evaluating Multimodal Agents on Realistic Visual Web Tasks; publish_date:\nL8: none†arxiv.org】 by JY Koh · 2024 · Cited by 363 — To bridge this gap, we \nL9: introduce VisualWebArena, a benchmark designed to assess the performance of \nL10: multimodal web agents on realistic \\textit{ ...\nL11: * 【2†Evaluating Multimodal Agents on Realistic Visual Web Tasks; publish_date:\nL12: none†jykoh.com】 To bridge this gap, we introduce VisualWebArena, a benchmark \nL13: designed to assess the performance of multimodal web agents on realistic \nL14: visually grounded tasks.\nL15: * 【3†VisualWebArena: Evaluating Multimodal Agents on ...; publish_date: \nL16: none†arxiv.org】 VisualWebArena is a research benchmark to measure and evaluate \nL17: the progress of multimodal agents. It is primarily meant to act as a self-\nL18: contained sandbox ...\nL19: * 【4†Evaluating Multimodal Agents on Realistic Visual Web Tasks; publish_date:\nL20: none†aclanthology.org】 by JY Koh · 2024 · Cited by 363 — To bridge this gap, we\nL21: introduce VisualWebArena, a benchmark designed to assess the performance of \nL22: multimodal web agents on *realistic visually grounded tasks*.\nL23: * 【5†Evaluating Multimodal Agents on Realistic Visual Web Tasks; publish_date:\nL24: none†www.semanticscholar.org】 VisualWebArena: Evaluating Multimodal Agents on \nL25: Realistic Visual Web Tasks ... MMInA, a multihop and multimodal benchmark to \nL26: evaluate the embodied agents ...\nL27: * 【6†CMU Researchers Introduce VisualWebArena: An AI ...; publish_date: \nL28: none†www.marktechpost.com】 Feb 9, 2024 — VisualWebArena, a benchmark designed \nL29: and developed to evaluate the performance of multimodal web agents on realistic \nL30: and visually stimulating challenges.\nL31: * 【7†Evaluating Multimodal Agents on Realistic Visual Web Tasks; publish_date:\nL32: none†www.themoonlight.io】 The paper \"VisualWebArena: Evaluating Multimodal \nL33: Agents on Realistic Visually Grounded Web Tasks\" introduces a new benchmark, \nL34: **VisualWebArena**, ...\nL35: * 【8†WebArena: A Realistic Web Environment for Building ...; publish_date: \nL36: none†webarena.dev】 Our benchmark is implemented in our fully interactable \nL37: highly-realistic WebArena environment. It features diverse tasks human may \nL38: encounter in their daily ...", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[21] SearchBench benchmark (Search_Results/SearchBench benchmark)\n**viewing lines [0 - 40] of 40**\n\nL0: \nL1: URL: Search_Results/SearchBench benchmark\nL2: # Search Results\nL3: \nL4: * 【0†Talc-AI/search-bench; publish_date: none†github.com】 A practical \nL5: benchmark that focuses on every day helpfulness of LLM products, not just the \nL6: underlying models. Searchbench is a benchmark that addresses these ...\nL7: * 【1†Evaluating LLMs' Ability to Reason About Search Problems; publish_date: \nL8: none†arxiv.org】 These capabilities are essential for robust reasoning, making \nL9: SearchBench a valuable benchmark for evaluating LLMs' reasoning capabilities as \nL10: they continue to ...\nL11: * 【2†NasimBrz/SearchBench · Datasets at Hugging Face; publish_date: \nL12: none†huggingface.co】 Dataset Summary. SearchBench is a benchmark designed to \nL13: evaluate Language Models' (LLMs) ability to solve state-based problems that \nL14: require combinatorial search ...\nL15: * 【3†Evaluating LLMs' Ability to Reason About Search Problems; publish_date: \nL16: none†openreview.net】 2025年10月22日 — To further investigate this, we introduce a \nL17: new benchmark, SearchBench, which contains 11 unique search problems inspired by\nL18: intuitive puzzles.\nL19: * 【4†Navigating the Labyrinth: Evaluating and Enhancing LLMs' ...; \nL20: publish_date: none†hub.baai.ac.cn】 2024年6月17日 — \nL21: 论文提出了一个新的基准测试SearchBench,包含11种独特的搜索问题类型,并自动化生成任意数量的实例和分析解决方案的可行性、正确性和最优性。论文使用A* \nL22: ...\nL23: * 【5†Towards Unified Text-based Person Retrieval: A Large- ...; publish_date: \nL24: none†blog.csdn.net】 2023年10月17日 — ... Search \nL25: Benchmark(面向统一的基于文本的人物检索:一个大规模的多属性和语言搜索基准); 研究背景. 相关工作; BENCHMARK. 论文方法分析. 网络框架;\nL26: 1、APTM ...\nL27: * 【6†Desearch-ai/ai-search-benchmark; publish_date: none†github.com】 The \nL28: SearchBench repository addresses common issues with traditional benchmarks by \nL29: focusing on practical, everyday use cases rather than theoretical limits. It ...\nL30: * 【7†o1 results for 3 benchmarks: PlanBench, SearchBench, ...; publish_date: \nL31: none†www.reddit.com】 o1 results for 3 benchmarks: PlanBench, SearchBench, and \nL32: Summary of a Haystack. AI. PlanBench: Paper \"LLMs Still Can't Plan; Can LRMs? A \nL33: ...\nL34: * 【8†Evaluating LLMs' Ability to Reason About Search Problems; publish_date: \nL35: none†ui.adsabs.harvard.edu】 To further investigate this, we introduce a new \nL36: benchmark, SearchBench, which contains 11 unique search problems inspired by \nL37: intuitive puzzles. Each SearchBench ...\nL38: * 【9†Introducing SearchBench; publish_date: none†www.tag1consulting.com】 \nL39: Toward this goal, over the weekend I launched a new project called SearchBench, \nL40: a Drupal module for benchmarking Drupal's search performance. As the module ...", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[22] WebVLN benchmark (Search_Results/WebVLN benchmark)\n**viewing lines [0 - 42] of 42**\n\nL0: \nL1: URL: Search_Results/WebVLN benchmark\nL2: # Search Results\nL3: \nL4: * 【0†WebVLN: Vision-and-Language Navigation on Websites; publish_date: \nL5: none†ojs.aaai.org】 by Q Chen · 2024 · Cited by 35 — the WebVLN-v1 dataset, where\nL6: the performance is far from saturation, highlighting the utility of our \nL7: WebVLN-v1 as a benchmark to assess progress in this field.\nL8: * 【1†[2312.15820] WebVLN: Vision-and-Language Navigation on Websites; \nL9: publish_date: none†ar5iv.labs.arxiv.org】 Experimental results show that WebVLN-\nL10: Net outperforms current VLN and web-related navigation methods. ... Code is \nL11: available at: https://github.com/WebVLN/WebVLN.\nL12: * 【2†WebVLN: Vision-and-Language Navigation on Websites; publish_date: \nL13: none†github.com】 Experimental results show that WebVLN-Net outperforms current \nL14: VLN and web-related navigation methods. We believe that the introduction of the \nL15: new WebVLN task ...\nL16: * 【3†Vision-and-Language Navigation in the Real-World; publish_date: \nL17: none†digital.library.adelaide.edu.au】 By leveraging our proposed WebVLN-v1 \nL18: dataset, experimental results showcase the superior performance of WebVLN-Net \nL19: compared to existing VLN and web-related ...\nL20: * 【4†WebVLN: Vision-and-Language Navigation on Websites; publish_date: \nL21: none†www.researchgate.net】 Experimental results show that WebVLN-Net outperforms\nL22: current VLN and web-related navigation methods. We believe that the \nL23: introduction of the newWebVLN task and ...\nL24: * 【5†[PDF] WebVLN: Vision-and-Language Navigation on Websites; publish_date: \nL25: none†www.semanticscholar.org】 A new task named Vision-and-Language Navigation on\nL26: Websites (WebVLN), where question-based instructions are used to train an \nL27: agent, emulating how users ...\nL28: * 【6†WebVLN: Vision-and-Language Navigation on Websites; publish_date: \nL29: none†arxiv.org】 by Q Chen · 2023 · Cited by 35 — Experimental results show that \nL30: WebVLN-Net outperforms current VLN and web-related navigation methods. We \nL31: believe that the introduction of the ...\nL32: * 【7†Human-Aware Vision-and-Language Navigation; publish_date: \nL33: none†proceedings.neurips.cc】 by H Li · 2024 · Cited by 19 — Vision-and-Language \nL34: Navigation (VLN) [2, 7, 9, 40] has emerged as a key benchmark for evaluating. \nL35: Sim2Real transfer [23], showing impressive performance in ...\nL36: * 【8†LiveBench; publish_date: none†livebench.ai】 Introducing LiveBench: a \nL37: benchmark for LLMs designed with test set contamination and objective evaluation\nL38: in mind.\nL39: * 【9†MG-VLN: Benchmarking Multi-Goal and Long-Horizon ...; publish_date: \nL40: none†ieeexplore.ieee.org】 by J Zhang · 2024 — This task aims to provide a \nL41: simulation benchmark to guide the design of lifelong and long-horizon navigation\nL42: robots.", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[23] WebNav benchmark (Search_Results/WebNav benchmark)\n**viewing lines [0 - 36] of 36**\n\nL0: \nL1: URL: Search_Results/WebNav benchmark\nL2: # Search Results\nL3: \nL4: * 【0†WebNav: A New Large-Scale Task for Natural Language ...; publish_date: \nL5: none†github.com】 WebNav is a benchmark task for evaluating an agent with \nL6: abilities to understand natural language and plan on partially observed \nL7: environments.\nL8: * 【1†[1602.02261] End-to-End Goal-Driven Web Navigation; publish_date: \nL9: none†arxiv.org】 by R Nogueira · 2016 · Cited by 39 — We propose a goal-driven \nL10: web navigation as a benchmark task for evaluating an agent with abilities to \nL11: understand natural language and plan on partially ...\nL12: * 【2†nyu-dl/dl4ir-webnav; publish_date: none†github.com】 WebNav is a benchmark\nL13: task for evaluating an agent with abilities to understand natural language and \nL14: plan on partially observed environments.\nL15: * 【3†WebNav: A New Large-Scale Task for Natural Language ...; publish_date: \nL16: none†www.researchgate.net】 We propose a goal-driven web navigation as a \nL17: benchmark task for evaluating an agent with abilities to understand natural \nL18: language and plan on partially ...\nL19: * 【4†WebNav: An Intelligent Agent for Voice-Controlled Web ...; publish_date: \nL20: none†arxiv.org】 In this paper, we introduce WebNav, an innovative multi-modal \nL21: agent designed to automate web tasks based on natural language.\nL22: * 【5†WebCanvas: Benchmarking Web Agents in Online ...; publish_date: \nL23: none†openreview.net】 by Y Pan · Cited by 78 — TL;DR: We introduce WebCanvas, an \nL24: online evaluation framework for web agents designed to address the dynamic \nL25: nature of web interactions.\nL26: * 【6†WebNav: An Intelligent Agent for Voice-Controlled Web ...; publish_date: \nL27: none†huggingface.co】 Mar 17, 2025 — Preliminary evaluations show that WebNav \nL28: outperforms traditional screen readers in response time and task completion \nL29: accuracy for the visually ...\nL30: * 【7†Web Bench - A new way to compare AI Browser Agents; publish_date: \nL31: none†www.skyvern.com】 May 29, 2025 — TL;DR: Web Bench is a new dataset to \nL32: evaluate web browsing agents that consists of 5,750 tasks on 452 different \nL33: websites, with 2,454 tasks ...\nL34: * 【8†Web Framework Benchmarks; publish_date: none†www.techempower.com】 In the \nL35: following tests, we have measured the performance of several web application \nL36: platforms, full-stack frameworks, and micro-frameworks.", + "tool_call_id": null + }, + { + "role": "assistant", + "content": "", + "reasoning_content": "我们收到了大量关于各种基准测试的搜索结果。现在,我们需要整理一份针对搜索智能体的基准测试列表。这些基准测试可以分为几类:网络浏览智能体基准测试(例如BrowseComp、WebVoyager、WebArena、VisualWebArena、Mind2Web、WebVLN、WebNav)、工具使用智能体基准测试(例如ToolBench、GTA)、问答/检索基准测试(例如HotpotQA、FEVER、TriviaQA、Natural Questions、MS MARCO、BEIR、MIRACL),以及特定领域的搜索基准测试(例如HSCodeComp、WebShop、ALFWorld)。此外,还有一些通用智能体基准测试,例如GAIA、SuperCLUE-Agent等。用户要求“详细介绍各自的特点、使用场景、例题”。因此,我需要为每个基准测试提供详细的描述,包括其特点、使用场景,并举例说明。为了全面获取这些信息,我将同时打开以下基准测试的相关页面:WebVoyager(索引3)、WebArena(索引1)、GAIA(索引1)、ToolBench(索引0)、HotpotQA(索引0)、FEVER(索引1)、TriviaQA(索引0)、Natural Questions(索引0)、MS MARCO(索引0)、BEIR(索引1)、MIRACL(索引0)、WebShop(索引0)、ALFWorld(索引0)、Mind2Web(索引0)、VisualWebArena(索引0)、SearchBench(索引1)、WebVLN(索引1)和WebNav(索引0)。", + "tool_calls": [ + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 4, \"id\": 3}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 5, \"id\": 1}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 6, \"id\": 1}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 7, \"id\": 0}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 8, \"id\": 0}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 9, \"id\": 1}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 10, \"id\": 0}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 11, \"id\": 0}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 12, \"id\": 0}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 13, \"id\": 1}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 14, \"id\": 0}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 17, \"id\": 0}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 18, \"id\": 0}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 19, \"id\": 0}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 20, \"id\": 0}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 21, \"id\": 1}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 22, \"id\": 1}" + } + }, + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 23, \"id\": 0}" + } + } + ] + }, + { + "role": "tool", + "content": "[24] (https://ar5iv.org/abs/2401.13919)\n**viewing lines [0 - 129] of 825**\n\nL0: \nL1: URL: https://ar5iv.org/abs/2401.13919\nL2: # WebVoyager : Building an End-to-End Web Agent with\nL3: Large Multimodal Models\nL4: \nL5: Hongliang He1,3 , Wenlin Yao2, Kaixin Ma2, Wenhao Yu2, Yong Dai2,\nL6: \nL7: Hongming Zhang2,\nL8: Zhenzhong Lan3,\nL9: Dong Yu2\nL10: 1Zhejiang University,\nL11: 2Tencent AI Lab,\nL12: 3Westlake University\nL13: hehongliang@westlake.edu.cn, wenlinyao@global.tencent.com\nL14: Work done during the internship at Tencent AI Lab.\nL15: \nL16: ###### Abstract\nL17: \nL18: The advancement of large language models (LLMs) leads to a new era marked by the\nL19: development of autonomous applications in the real world, which drives \nL20: innovation in the creation of advanced web-based agents. Existing web agents \nL21: typically only handle one input modality and are evaluated only in simplified \nL22: web simulators or static web snapshots, greatly limiting their applicability in \nL23: real-world scenarios. To bridge this gap, we introduce WebVoyager, an innovative\nL24: Large Multimodal Model (LMM) powered web agent that can complete user \nL25: instructions end-to-end by interacting with real-world websites. Moreover, we \nL26: propose a new evaluation protocol for web agents to address the challenges of \nL27: automatic evaluation of open-ended web agent tasks, leveraging the robust \nL28: multimodal comprehension capabilities of GPT-4V. We create a new benchmark by \nL29: gathering real-world tasks from 15 widely used websites to evaluate our agents. \nL30: We show that WebVoyager achieves a 55.7% task success rate, significantly \nL31: surpassing the performance of both GPT-4 (All Tools) and the WebVoyager (text-\nL32: only) setups, underscoring the exceptional capability of WebVoyager in practical\nL33: applications. We found that our proposed automatic evaluation achieves 85.3% \nL34: agreement with human judgment, paving the way for further development of web \nL35: agents in a real-world setting.111Our code and data will be released at \nL36: https://github.com/MinorJerry/WebVoyager\nL37: \nL38: ## 1 Introduction\nL39: \nL40: The recent advancement of large language models (LLMs), such as ChatGPT and \nL41: GPT-4 (OpenAI, 2023), have sparked significant interest in developing LLM-based \nL42: autonomous agents (AutoGPT, 2022) for complex task execution (Qin et al., 2023; \nL43: Schick et al., 2023). Recent studies have explored the construction of text-\nL44: based web browsing environments and how to instruct large language model agents \nL45: to perform web navigation (Nakano et al., 2021; Gur et al., 2023; Zhou et al., \nL46: 2023; Lu et al., 2023). The primary challenge in these works lies in managing \nL47: complex and verbose HTML texts, and solutions include simplifying and \nL48: structuring HTML (Nakano et al., 2021; Zhou et al., 2023; Gur et al., 2023; Deng\nL49: et al., 2023).\nL50: \nL51: However, existing approaches overlook a critical functionality of browsing: \nL52: rendering HTML into visual webpages. Particularly, vision capability is crucial \nL53: for utilizing tools like web browsers, as rendered web pages are inherently \nL54: designed with user experience (UX), emphasizing intuitive information and \nL55: structured presentation. This design principle of rendering makes visual \nL56: analysis more effective than mere HTML representation. At present, large \nL57: multimodal models (LMMs), particularly GPT-4V(ision) (OpenAI, 2023) and Gemini \nL58: (Team et al., 2023), demonstrate a remarkable ability to integrate intricate \nL59: visual cues with textual information. Existing studies such as Pix2Struct (Lee \nL60: et al., 2023) and WebArena (Zhou et al., 2023), have initiated explorations into\nL61: using screenshots as inputs for decision-making in web navigation, yet these \nL62: are preliminary and do not represent a deep exploration. Therefore, building \nL63: multimodal web agents to leverage the environment rendered by browsers through \nL64: screenshots, thus mimicking human web browsing behavior, is now a viable \nL65: approach to enhance web navigation efficiency.\nL66: \nL67: We introduce WebVoyager, a multimodal web agent designed to handle web tasks \nL68: online in an end-to-end manner, which denotes managing the process from start to\nL69: finish autonomously without intermediate human intervention. We construct an \nL70: online environment using Selenium for WebVoyager, feeding it with screenshots \nL71: and textual content in interactive web elements. Inspired by Set-of-Mark \nL72: Prompting (Yang et al., 2023a), we mark interactive web elements on screenshots \nL73: (see Figure 2) to facilitate decision-making for WebVoyager. As a pioneer in \nL74: combining vision and text information during web navigation, we advocate that \nL75: autonomous end-to-end task completion, multimodal capabilities and online \nL76: navigation constitute the essential trajectory toward the genuine intelligence \nL77: of web agents.\nL78: \nL79: Another challenge arises when it comes to evaluating an end-to-end web agent \nL80: with online navigation. Existing benchmarks, such as Mind2Web (Deng et al., \nL81: 2023), primarily focus on stepwise and offline evaluation, where agents follow \nL82: predefined “golden” trajectory for action selection. This approach, however, may\nL83: not fully account for the variety of viable strategies to accomplish a task, as\nL84: it only reflects one possible plan. This limitation could lead to a biased \nL85: evaluation and difficulties in fairly comparing different methods. To more \nL86: accurately gauge the capabilities of web agents in end-to-end task completion, \nL87: we save screenshots throughout the online navigation process, and then use \nL88: GPT-4V to evaluate these trajectories and the final results automatically. Human\nL89: evaluations are also conducted to verify the results and confirm the \nL90: reliability of GPT-4V as the evaluator.\nL91: \nL92: We conduct evaluations on a collected dataset, which is semi-automatically \nL93: generated using a self-instruct (Wang et al., 2022) method, comprising 300 web \nL94: tasks from 15 commonly accessed websites. Additionally, we extract 90 web-\nL95: related tasks of level 1 and level 2 from the GAIA (Mialon et al., 2023) to \nL96: enrich our evaluation. We compare our WebVoyager with 1) GPT-4 (All \nL97: Tools)222GPT-4 (All Tools) is an integrated tool-based agent released by OpenAI \nL98: in Oct. 2023. See https://chat.openai.com/, and 2) WebVoyager in a text-only \nL99: setting, employing the accessibility tree proposed in WebArena (Zhou et al., \nL100: 2023) to describe web pages. The results show that WebVoyager achieves a Task \nL101: Success Rate of 55.7%, significantly outperforming GPT-4 (All Tools) with a rate\nL102: of 32.7% and the text-only setting with a rate of 39.0%, demonstrating the \nL103: effectiveness of our method. Furthermore, we report the consistency between \nL104: human-human and human-GPT4V to ensure credibility. Our main contributions are as\nL105: follows:\nL106: \nL107: - •\nL108: We employ a multimodal web agent that integrates textual and visual information \nL109: to address web tasks end-to-end and introduce a generalist planning approach for\nL110: navigation.\nL111: \nL112: - •\nL113: We build an online web browsing environment, offering a variety of tasks \nL114: centered on widely used websites and introducing a method for expanding these \nL115: tasks.\nL116: \nL117: - •\nL118: We conduct manual evaluations of navigation trajectories and propose an \nL119: automated evaluation protocol using GPT-4V. We present a comprehensive analysis \nL120: of the evaluation results and show that GPT-4V can serve as a reliable evaluator\nL121: for online agents.\nL122: \nL123: ## 2 Related Work\nL124: \nL125: ### 2.1 Web Agents\nL126: \nL127: Autonomous web navigation (Shi et al., 2017; Yang et al., 2023b) requires an \nL128: agent to follow instructions, construct plans, comprehend complex web \nL129: structures, and decompose tasks into step-by-step decisions (Weng, 2023). To ", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[25] (https://ar5iv.org/abs/2307.13854)\n**viewing lines [0 - 124] of 1092**\n\nL0: \nL1: URL: https://ar5iv.org/abs/2307.13854\nL2: # WebArena: A Realistic Web Environment for Building Autonomous Agents\nL3: \nL4: Shuyan Zhou Frank F. Xu11footnotemark: 1 Hao Zhu Xuhui Zhou22footnotemark: 2\nL5: \nL6: Robert Lo22footnotemark: 2 Abishek Sridhar22footnotemark: 2 Xianyi Cheng Tianyue\nL7: Ou\nL8: Yonatan Bisk Daniel Fried Uri Alon Graham Neubig\nL9: Carnegie Mellon University\nL10: {shuyanzh, fangzhex, gneubig}@cs.cmu.edu\nL11: Lead contributors.Equal contribution.\nL12: \nL13: ###### Abstract\nL14: \nL15: With advances in generative AI, there is now potential for autonomous agents to \nL16: manage daily tasks via natural language commands. However, current agents are \nL17: primarily created and tested in simplified synthetic environments, leading to a \nL18: disconnect with real-world scenarios. In this paper, we build an environment for\nL19: language-guided agents that is highly realistic and reproducible. Specifically,\nL20: we focus on agents that perform tasks on the web, and create an environment \nL21: with fully functional websites from four common domains: e-commerce, social \nL22: forum discussions, collaborative software development, and content management. \nL23: Our environment is enriched with tools (e.g., a map) and external knowledge \nL24: bases (e.g., user manuals) to encourage human-like task-solving. Building upon \nL25: our environment, we release a set of benchmark tasks focusing on evaluating the \nL26: functional correctness of task completions. The tasks in our benchmark are \nL27: diverse, long-horizon, and designed to emulate tasks that humans routinely \nL28: perform on the internet. We experiment with several baseline agents, integrating\nL29: recent techniques such as reasoning before acting. The results demonstrate that\nL30: solving complex tasks is challenging: our best GPT-4-based agent only achieves \nL31: an end-to-end task success rate of 14.41%, significantly lower than the human \nL32: performance of 78.24%. These results highlight the need for further development \nL33: of robust agents, that current state-of-the-art large language models are far \nL34: from perfect performance in these real-life tasks, and that WebArena can be used\nL35: to measure such progress.\nL36: \nL37: Our code, data, environment reproduction resources, and video demonstrations are\nL38: publicly available at https://webarena.dev/.\nL39: \nL40: ## 1 Introduction\nL41: \nL42: Autonomous agents that perform everyday tasks via human natural language \nL43: commands could significantly augment human capabilities, improve efficiency, and\nL44: increase accessibility. Nonetheless, to fully leverage the power of autonomous \nL45: agents, it is crucial to understand their behavior within an environment that is\nL46: both authentic and reproducible. This will allow measurement of the ability of \nL47: agents on tasks that human users care about in a fair and consistent manner.\nL48: \nL49: Current environments for evaluate agents tend to over-simplify real-world \nL50: situations. As a result, the functionality of many environments is a limited \nL51: version of their real-world counterparts, leading to a lack of task diversity \nL52: (Shi et al., 2017; Anderson et al., 2018; Gordon et al., 2018; Misra et al., \nL53: 2016; Shridhar et al., 2020; 2021; Yao et al., 2022a). In addition, these \nL54: simplifications often lower the complexity of tasks as compared to their \nL55: execution in the real world (Puig et al., 2018; Shridhar et al., 2020; Yao et \nL56: al., 2022a). Finally, some environments are presented as a static resource (Shi \nL57: et al., 2017; Deng et al., 2023) where agents are confined to accessing only \nL58: those states that were previously cached during data collection, thus limiting \nL59: the breadth and diversity of exploration. Dor evaluation, many environments \nL60: focus on comparing the textual surface form of the predicted action sequences \nL61: with reference action sequences, disregarding the functional correctness of the \nL62: executions and possible alternative solutions (Puig et al., 2018; Jernite et \nL63: al., 2019; Xu et al., 2021; Li et al., 2020; Deng et al., 2023). These \nL64: limitations often result in a discrepancy between simulated environments and the\nL65: real world, and can potentially impact the generalizability of AI agents to \nL66: successfully understand, adapt, and operate within complex real-world \nL67: situations.\nL68: \nL69: We introduce WebArena, a realistic and reproducible web environment designed to \nL70: facilitate the development of autonomous agents capable of executing tasks (§2).\nL71: An overview of WebArena is in Figure 1. Our environment comprises four fully \nL72: operational, self-hosted web applications, each representing a distinct domain \nL73: prevalent on the internet: online shopping, discussion forums, collaborative \nL74: development, and business content management. Furthermore, WebArena incorporates\nL75: several utility tools, such as map, calculator, and scratchpad, to best support\nL76: possible human-like task executions. Lastly, WebArena is complemented by an \nL77: extensive collection of documentation and knowledge bases that vary from general\nL78: resources like English Wikipedia to more domain-specific references, such as \nL79: manuals for using the integrated development tool (Fan et al., 2022). The \nL80: content populating these websites is extracted from their real-world \nL81: counterparts, preserving the authenticity of the content served on each \nL82: platform. We deliver the hosting services using Docker containers with gym-APIs \nL83: (Brockman et al., 2016), ensuring both the usability and the reproducibility of \nL84: WebArena.\nL85: \nL86: Along with WebArena, we release a ready-to-use benchmark with 812 long-horizon \nL87: web-based tasks (§3). Each task is described as a high-level natural language \nL88: intent, emulating the abstract language usage patterns typically employed by \nL89: humans (Bisk et al., 2019). Two example intents are shown in the upper left of \nL90: Figure 1. We focus on evaluating the functional correctness of these tasks, \nL91: i.e., does the result of the execution actually achieve the desired goal (§3.2).\nL92: For instance, to evaluate the example in Figure 2, our evaluation method \nL93: verifies the concrete contents in the designated repository. This evaluation is \nL94: not only more reliable (Zhong et al., 2017; Chen et al., 2021; Wang et al., \nL95: 2022) than comparing the textual surface-form action sequences (Puig et al., \nL96: 2018; Deng et al., 2023) but also accommodate a range of potential valid paths \nL97: to achieve the same goal, which is a ubiquitous phenomenon in sufficiently \nL98: complex tasks.\nL99: \nL100: We use this benchmark to evaluate several agents that can follow NL command and \nL101: perform web-based tasks (§4). These agents are implemented in a few-shot in-\nL102: context learning fashion with powerful large language models (LLMs) such as \nL103: GPT-4 and PALM-2. Experiment results show that the best GPT-4 agent performance \nL104: is somewhat limited, with an end-to-end task success rate of only 14.41%, while \nL105: the human performance is 78.24%. We hypothesize that the limited performance of \nL106: current LLMs stems from a lack of crucial capabilities such as active \nL107: exploration and failure recovery to successfully perform complex tasks (§5.2). \nL108: These outcomes underscore the necessity for further development towards robust \nL109: and effective agents (LeCun, 2022) in WebArena.\nL110: \nL111: ## 2 WebArena: Websites as an Environment for Autonomous Agents\nL112: \nL113: Our goal is to create a realistic and reproducible web environment. We achieve \nL114: reproducibility by making the environment standalone, without relying on live \nL115: websites. This circumvents technical challenges such as bots being subject to \nL116: CAPTCHAs, unpredictable content modifications, and configuration changes, which \nL117: obstruct a fair comparison across different systems over time. We achieve \nL118: realism by using open-source libraries that underlie many in-use sites from \nL119: several popular categories and importing data to our environment from their \nL120: real-world counterparts.\nL121: \nL122: ### 2.1 Controlling Agents through High-level Natural Language\nL123: \nL124: The WebArena environment is denoted asℰ\\mathcal{E} with state space ", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[26] (https://ar5iv.org/abs/2311.12983)\n**viewing lines [0 - 118] of 1207**\n\nL0: \nL1: URL: https://ar5iv.org/abs/2311.12983\nL2: 1]FAIR, Meta 2]HuggingFace 3]AutoGPT 4]GenAI, Meta\nL3: \nL4: # GAIA: A Benchmark for General AI Assistants\nL5: \nL6: Grégoire Mialon Clémentine Fourrier Craig Swift Thomas Wolf Yann LeCun Thomas \nL7: Scialom [ [ [ [ {gmialon,tscialom}@meta.com clementine@huggingface.co\nL8: \nL9: ###### Abstract\nL10: \nL11: We introduce GAIA, a benchmark for General AI Assistants that, if solved, would \nL12: represent a milestone in AI research. GAIA proposes real-world questions that \nL13: require a set of fundamental abilities such as reasoning, multi-modality \nL14: handling, web browsing, and generally tool-use proficiency. GAIA questions are \nL15: conceptually simple for humans yet challenging for most advanced AIs: we show \nL16: that human respondents obtain 92% vs. 15% for GPT-4 equipped with plugins. This \nL17: notable performance disparity contrasts with the recent trend of LLMs \nL18: outperforming humans on tasks requiring professional skills in e.g. law or \nL19: chemistry. GAIA’s philosophy departs from the current trend in AI benchmarks \nL20: suggesting to target tasks that are ever more difficult for humans. We posit \nL21: that the advent of Artificial General Intelligence (AGI) hinges on a system’s \nL22: capability to exhibit similar robustness as the average human does on such \nL23: questions. Using GAIA’s methodology, we devise 466 questions and their answer. \nL24: We release our questions while retaining answers to 300 of them to power a \nL25: leader-board hereby accessible.\nL26: \nL27: \\correspondence\nL28: \nL29: ## 1 Introduction\nL30: \nL31: Large Language Models (LLMs) arguably open the way to general purpose systems. \nL32: Indeed, the latest among them (OpenAI, 2023; Anthropic, 2023; Anil et al., 2023;\nL33: Touvron et al., 2023) are fluent, knowledgeable, aligned to some extent with \nL34: human preferences (Ouyang et al., 2022), and can be augmented (Mialon et al., \nL35: 2023) with tools such as web browsers or code interpreters in a zero or few-shot\nL36: setting (Brown et al., 2020). However, evaluating these systems is an open \nL37: problem: given their emerging new capabilities, LLMs are regularly breaking AI \nL38: benchmarks, at an ever-increasing rate (Kiela et al., 2023).\nL39: \nL40: In search for more challenging benchmarks, current trend suggests to seek tasks \nL41: that are ever more difficult for humans, and challenge LLMs with more intricate \nL42: educational assessments, for example in STEM and Law, or target more complex \nL43: realisations, such as writing a coherent book. But, tasks that are difficult for\nL44: humans are not necessarily difficult for recent systems: the challenging MMLU \nL45: or GSM8k benchmarks for example (Hendrycks et al., 2021; Cobbe et al., 2021) are\nL46: already close to be solved,111GPT4 does 86.4% on MMLU. Human non-specialist \nL47: accuracy on the benchmark is only 34.5% Expert-level human performance is \nL48: estimated at 89.8%. due to rapid LLM improvement possibly combined with data \nL49: contamination.222See for example the case of Hellaswag. Furthermore, open-ended \nL50: generation generally requires human or model-based evaluation (Zheng et al., \nL51: 2023). Human evaluation will become less and less feasible when increasing the \nL52: task complexity, e.g. in terms of output length or required skills: how to \nL53: evaluate a book generated by an AI, or solutions to maths problems that few \nL54: people in the world can solve? Model-based evaluations on the other hand are by \nL55: construction dependent of stronger models hence cannot evaluate new state-of-\nL56: the-art models, without mentioning potential subtle biases such as preferring \nL57: the first choice presented (Zheng et al., 2023). Overall, evaluating new AI \nL58: systems requires to rethink benchmarks (Chollet, 2019).\nL59: \nL60: Alternatively to tasks that are harder for humans, AI systems could be asked to \nL61: solve conceptually simple tasks yet that require accurate execution of complex \nL62: sequences of actions, with large combinatorial spaces. The output could only be \nL63: obtained upon successful completion of the task and be easy to validate, \nL64: analogous to the Proof of Work algorithm (Jakobsson and Juels, 1999; Dwork and \nL65: Naor, 1993), where a computer is asked to solve a complex problem whose solution\nL66: is easy to verify. Tasks for AI assistants, given their need for access to a \nL67: diverse and uncertain world, meet this criterion while being inherently rooted \nL68: in practical use cases.\nL69: \nL70: We move in that direction by proposing GAIA, a benchmark for General AI \nL71: Assistants featuring 466 carefully crafted questions and their answer, along \nL72: with the associated design methodology. Our questions are easy to create, \nL73: challenging for AI systems—for LLMs, most require complex generations—, yet \nL74: admit a unique, factual answer, allowing a simple and robust automatic \nL75: evaluation.\nL76: \nL77: GAIA attempts to avoid current pitfalls of LLMs evaluation by targeting:\nL78: \nL79: Real-world and challenging questions. For example, a LLM will typically need to \nL80: browse the open and changing web, handle multi-modality, or reason over multiple\nL81: steps to answer our questions. Conversely, many LLM benchmarks are quite \nL82: specific and/or restricted to closed and synthetic environments.\nL83: \nL84: Easy interpretability through conceptually simple tasks—non experts annotators \nL85: exhibit a near perfect score—, associated reasoning trace, and few but highly \nL86: curated questions. This is in contrast with aggregated benchmarks that can lack \nL87: efficiency and reliability (Perlitz et al., 2023).\nL88: \nL89: Non-gameability. Answering the questions requires successful completion of some \nL90: number of steps, which cannot easily be brute forced due to their diversity. The\nL91: possibility to check the reasoning trace, the accuracy required in the answers,\nL92: their absence in plain text from the internet prevent a possible data \nL93: contamination. In contrast, multiple choice answers (e.g., MMLU) make \nL94: contamination assessment more difficult since a wrong reasoning trace can more \nL95: easily get to the correct choice.\nL96: \nL97: Simplicity of use. Crucially, the answers to our questions are factoid, concise \nL98: and unambiguous. These properties allow simple, fast and factual evaluation. Our\nL99: questions are meant to be answered in zero shot, limiting the influence of the \nL100: evaluation setup. By opposition, many LLM benchmarks require evaluations that \nL101: are sensitive to the experimental setup such as the number and nature of prompts\nL102: (Liang et al., 2022b) (Section 8.2), or the benchmark \nL103: implementation.333https://huggingface.co/blog/evaluating-mmlu-leaderboard\nL104: \nL105: In spite of being successful at tasks that are difficult for humans, the most \nL106: capable LLMs do poorly on GAIA. Even equipped with tools, GPT4 does not exceed a\nL107: 30% success rate for the easiest of our tasks, and 0% for the hardest. In the \nL108: meantime, the average success rate for human respondents is 92%. Consequently, a\nL109: system capable of solving GAIA can be assessed in the context of t-AGI,444As \nL110: defined in https://www.alignmentforum.org/posts/BoA3agdkAzL6HQtQP/clarifying-\nL111: and-predicting-agi, a t-AGI beats, on most tasks, most human experts who are \nL112: given time t to perform the task noting that humans typically take between 6 \nL113: minutes for the simplest questions to 17 minutes for the most complex ones. From\nL114: a related perspective, such system would arguably be a competent General AI \nL115: within the framework recently proposed in Morris et al. (2023), which also \nL116: appear to be the next milestone in AI research since ChatGPT (OpenAI, 2023) is \nL117: one level below. This paper covers the composition of GAIA, its design choices, \nL118: and explain how to craft questions and the associated challenges so that the ", + "tool_call_id": null + }, + { + "role": "tool", + "content": "Error fetching URL `https://github.com/sambanova/toolbench`", + "tool_call_id": null + }, + { + "role": "tool", + "content": "Error fetching URL `https://hotpotqa.github.io/`", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[27] (https://aclanthology.org/N18-1074.pdf)\n**viewing lines [0 - 134] of 809**\n\nL0: \nL1: URL: https://aclanthology.org/N18-1074.pdf\nL2: ===== Page 1 =====\nL3: \nL4: # FEVER: a large-scale dataset for Fact Extraction and VERification\nL5: \nL6: James Thorne\\(^1\\), Andreas Vlachos\\(^1\\), Christos Christodoulopoulos\\(^2\\), \nL7: and Arpit Mittal\\(^2\\)\nL8: \nL9: \\(^1\\)Department of Computer Science, University of Sheffield \nL10: \\(^2\\)Amazon Research Cambridge \nL11: {j.thorne, a.vlachos}@sheffield.ac.uk \nL12: {chrchrs, mitarpit}@amazon.co.uk \nL13: \nL14: ## Abstract\nL15: \nL16: In this paper we introduce a new publicly available dataset for verification \nL17: against textual sources, FEVER: Fact Extraction and VERification. It consists of\nL18: 185,445 claims generated by altering sentences extracted from Wikipedia and \nL19: subsequently verified without knowledge of the sentence they were derived from. \nL20: The claims are classified as Supported, Refuted or NotEnoughInfo by annotators \nL21: achieving 0.6841 in Fleiss \\(\\kappa\\). For the first two classes, the annotators\nL22: also recorded the sentence(s) forming the necessary evidence for their \nL23: judgment. To characterize the challenge of the dataset presented, we develop a \nL24: pipeline approach and compare it to suitably designed oracles. The best accuracy\nL25: we achieve on labeling a claim accompanied by the correct evidence is 31.87%, \nL26: while if we ignore the evidence we achieve 50.91%. Thus we believe that FEVER is\nL27: a challenging testbed that will help stimulate progress on claim verification \nL28: against textual sources.\nL29: \nL30: ## 1 Introduction\nL31: \nL32: The ever-increasing amounts of textual information available combined with the \nL33: ease in sharing it through the web has increased the demand for verification, \nL34: also referred to as fact checking. While it has received a lot of attention in \nL35: the context of journalism, verification is important for other domains, e.g. \nL36: information in scientific publications, product reviews, etc.\nL37: \nL38: In this paper we focus on verification of textual claims against textual \nL39: sources. When compared to textual entailment (TE)/natural language inference \nL40: (Dagan et al., 2009; Bowman et al., 2015), the key difference is that in these \nL41: tasks the passage to verify each claim is given, and in recent years it \nL42: typically consists a single sentence, while in verification systems it is \nL43: retrieved from a large set of documents in order to form the evidence. Another \nL44: related task is question answering (QA), for which approaches have recently been\nL45: extended to handle large-scale resources such as Wikipedia (Chen et al., 2017).\nL46: However, questions typically provide the information needed to identify the \nL47: answer, while information missing from a claim can often be crucial in \nL48: retrieving refuting evidence. For example, a claim stating \"Fiji's largest \nL49: island is Kauai.\" can be refuted by retrieving \"Kauai is the oldest Hawaiian \nL50: Island.\" as evidence.\nL51: \nL52: Progress on the aforementioned tasks has benefited from the availability of \nL53: large-scale datasets (Bowman et al., 2015; Rajpurkar et al., 2016). However, \nL54: despite the rising interest in verification and fact checking among researchers,\nL55: the datasets currently used for this task are limited to a few hundred claims. \nL56: Indicatively, the recently conducted Fake News Challenge (Pomerleau and Rao, \nL57: 2017) with 50 participating teams used a dataset consisting of 300 claims \nL58: verified against 2,595 associated news articles which is orders of magnitude \nL59: smaller than those used for TE and QA.\nL60: \nL61: In this paper we present a new dataset for claim verification, FEVER: Fact \nL62: Extraction and VERification. It consists of 185,445 claims manually verified \nL63: against the introductory sections of Wikipedia pages and classified as \nL64: Supported, Refuted or NotEnoughInfo. For the first two classes, systems and \nL65: annotators need to also return the combination of sentences forming the \nL66: necessary evidence supporting or refuting the claim (see Figure 1). The claims \nL67: were generated by human annotators extracting claims from Wikipedia and mutating\nL68: them in a variety of ways, some of which were meaning-altering. The \nL69: verification of each\nL70: \nL71: 809\nL72: \nL73: Proceedings of NAACL-HLT 2018, pages 809–819\nL74: \nL75: New Orleans, Louisiana, June 1 - 6, 2018. ©2018 Association for Computational \nL76: Linguistics\nL77: \nL78: ===== Page 2 =====\nL79: \nL80: claim was conducted in a separate annotation process by annotators who were \nL81: aware of the page but not the sentence from which original claim was extracted \nL82: and thus in 31.75% of the claims more than one sentence was considered \nL83: appropriate evidence. Claims require composition of evidence from multiple \nL84: sentences in 16.82% of cases. Furthermore, in 12.15% of the claims, this \nL85: evidence was taken from multiple pages.\nL86: \nL87: To ensure annotation consistency, we developed suitable guidelines and user \nL88: interfaces, resulting in inter-annotator agreement of 0.6841 in Fleiss (Fleiss, \nL89: 1971) in claim verification classification, and 95.42% precision and 72.36% \nL90: recall in evidence retrieval.\nL91: \nL92: To characterize the challenges posed by FEVER we develop a pipeline approach \nL93: which, given a claim, first identifies relevant documents, then selects \nL94: sentences forming the evidence from the documents and finally classifies the \nL95: claim w.r.t. evidence. The best performing version achieves 31.87% accuracy in \nL96: verification when requiring correct evidence to be retrieved for claims \nL97: Supported or Refuted, and 50.91% if the correctness of the evidence is ignored, \nL98: both indicating the difficulty but also the feasibility of the task. We also \nL99: conducted oracle experiments in which components of the pipeline were replaced \nL100: by the gold standard annotations, and observed that the most challenging part of\nL101: the task is selecting the sentences containing the evidence. In addition to \nL102: publishing the data via our website1, we also publish the annotation interfaces2\nL103: and the baseline system3 to stimulate further research on verification.\nL104: \nL105: Footnote 1: http://fever.ai\nL106: \nL107: Footnote 2: https://github.com/awslabs/fever\nL108: \nL109: Footnote 3: https://github.com/sheffieldnlp/fever-baselines\nL110: \nL111: ## 2 Related Works\nL112: \nL113: Vlachos and Riedel (2014) constructed a dataset for claim verification \nL114: consisting of 106 claims, selecting data from fact-checking websites such as \nL115: PolitiFact, taking advantage of the labelled claims available there. However, in\nL116: order to develop claim verification components we typically require the \nL117: justification for each verdict, including the sources used. While this \nL118: information is usually available in justifications provided by the journalists, \nL119: they are not in a machine-readable form. Thus, also considering the small number\nL120: of claims, the task defined by the dataset proposed remains too challenging for\nL121: the ML/NLP methods currently available. Wang (2017) extended this approach by \nL122: including all 12.8K claims available by Politifact via its API, however the \nL123: justification and the evidence contained in it was ignored in the experiments as\nL124: it was not machine-readable. Instead, the claims were classified considering \nL125: only the text and the metadata related to the person making the claim. While \nL126: this rendered the task amenable to current NLP/ML methods, it does not allow for\nL127: verification against any sources and no evidence needs to be returned to \nL128: justify the verdicts.\nL129: \nL130: The Fake News challenge (Pomerleau and Rao, 2017) modelled verification as \nL131: stance classification: given a claim and an article, predict whether the article\nL132: supports, refutes, observes (neutrally states the claim) or is irrelevant to \nL133: the claim. It consists of 50K labelled claim-article pairs, combining 300 claims\nL134: with 2,582 articles. The claims and the articles were curated and labeled by ", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[28] (https://nlp.cs.washington.edu/triviaqa/)\n**viewing lines [0 - 48] of 48**\n\nL0: \nL1: URL: https://nlp.cs.washington.edu/triviaqa/\nL2: ## TriviaQA: A Large Scale Dataset for Reading Comprehension and Question \nL3: Answering\nL4: \nL5: TriviaQA is a reading comprehension dataset containing over 650K question-\nL6: answer-evidence triples. TriviaQA includes 95K question-answer pairs authored by\nL7: trivia enthusiasts and independently gathered evidence documents, six per \nL8: question on average, that provide high quality distant supervision for answering\nL9: the questions. The details can be found in our ACL 17 paper TriviaQA: A Large \nL10: Scale Distantly Supervised Challenge Dataset for Reading Comprehension\nL11: \nL12: Mandar Joshi, Eunsol Choi, Daniel Weld, Luke Zettlemoyer. TriviaQA: A Large \nL13: Scale Distantly Supervised Challenge Dataset for Reading Comprehension\nL14: \nL15: In Association for Computational Linguistics (ACL) 2017, Vancouver, Canada.\nL16: [bib]\nL17: \nL18: ## News\nL19: \nL20: ##### Jul 2017\nL21: \nL22: The TriviaQA leaderboard is now live on Codalab. Submit your predictions for \nL23: evaluation on the test set!\nL24: \nL25: ## Data\nL26: \nL27: If you are interested in the reading comprehension task motivated in the paper, \nL28: click on the link below to download the data.\nL29: \nL30: If you are interested in open domain QA, click on the link below to download the\nL31: data. It contains the unfiltered dataset with 110K question-answer pairs. The \nL32: Wikipedia and top 10 search documents can be obtained from the RC version. The \nL33: main difference between the RC version above and the unfiltered dataset is that \nL34: not all documents (in the unfiltered set) for a given question contain the \nL35: answer string(s). This makes the unfiltered dataset more appropriate for IR-\nL36: style QA.\nL37: \nL38: The University of Washington does not own the copyright of the questions and \nL39: documents included in TriviaQA.\nL40: \nL41: ## Code\nL42: \nL43: Check out our Github repository.\nL44: \nL45: ## Contact\nL46: \nL47: For any questions about the code or data, please contact Mandar Joshi -- {first \nL48: name of the first author}90[at]cs[dot]washington[dot]edu", + "tool_call_id": null + }, + { + "role": "tool", + "content": "Error fetching URL `https://research.google/pubs/natural-questions-a-benchmark-for-question-answering-research/`", + "tool_call_id": null + }, + { + "role": "tool", + "content": "Error fetching URL `https://microsoft.github.io/msmarco/`", + "tool_call_id": null + }, + { + "role": "tool", + "content": "Error fetching URL `https://github.com/beir-cellar/beir`", + "tool_call_id": null + }, + { + "role": "tool", + "content": "Error fetching URL `https://project-miracl.github.io/`", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[29] (https://ar5iv.org/abs/2207.01206)\n**viewing lines [0 - 122] of 1392**\n\nL0: \nL1: URL: https://ar5iv.org/abs/2207.01206\nL2: # WebShop: Towards Scalable Real-World Web Interaction with Grounded Language \nL3: Agents\nL4: \nL5: Shunyu Yao111Equal contribution. Project site with code, data, and demos: \nL6: https://webshop-pnlp.github.io. Howard Chen111Equal contribution. Project site \nL7: with code, data, and demos: https://webshop-pnlp.github.io. John Yang Karthik \nL8: Narasimhan\nL9: \nL10: Department of Computer Science, Princeton University\nL11: {shunyuy, howardchen, jy1682, karthikn}@princeton.edu\nL12: \nL13: ###### Abstract\nL14: \nL15: Existing benchmarks for grounding language in interactive environments either \nL16: lack real-world linguistic elements, or prove difficult to scale up due to \nL17: substantial human involvement in the collection of data or feedback signals. To \nL18: bridge this gap, we develop WebShop – a simulated e-commerce website environment\nL19: with million real-world products and 1.181.18 crowd-sourced text instructions. \nL20: Given a text instruction specifying a product requirement, an agent needs to \nL21: navigate multiple types of webpages and issue diverse actions to find, \nL22: customize, and purchase an item. WebShop provides several challenges for \nL23: language grounding including understanding compositional instructions, query \nL24: (re-)formulation, comprehending and acting on noisy text in webpages, and \nL25: performing strategic exploration. We collect over 12,08712,087 human \nL26: demonstrations for the task, and train and evaluate a diverse range of agents \nL27: using reinforcement learning, imitation learning, and pre-trained image and \nL28: language models. Our best model achieves a task success rate of 1,6001,600, \nL29: which outperforms rule-based heuristics (29%29\\%) but is far lower than human \nL30: expert performance (9.6%9.6\\%). We also analyze agent and human trajectories and\nL31: ablate various model components to provide insights for developing future \nL32: agents with stronger language understanding and decision making abilities. \nL33: Finally, we show that agents trained on WebShop exhibit non-trivial sim-to-real \nL34: transfer when evaluated on amazon.com and ebay.com , indicating the potential \nL35: value of WebShop in developing practical web-based agents that can operate in \nL36: the wild.59%59\\%\nL37: \nL38: ## 1 Introduction\nL39: \nL40: Recent advances in natural language processing (NLP) and reinforcement learning \nL41: (RL) have brought about several exciting developments in agents that can perform\nL42: sequential decision making while making use of linguistic context [30, 50, 58].\nL43: On the other hand, large-scale language models like GPT-3 [6] and BERT [11] are\nL44: excelling at traditional NLP benchmarks such as text classification, \nL45: information extraction and question answering. While the former set of tasks are\nL46: limited in their set of linguistic concepts and prove difficult to scale up, \nL47: the latter tasks usually contain static, non-interactive datasets that lack \nL48: adequate grounding to extra-linguistic concepts [4]. In order to make further \nL49: progress in building grounded language models, we believe there is a need for \nL50: scalable interactive environments that contain: (1) language elements that \nL51: reflect rich, real-world usage and are collectible at scale, and (2) task \nL52: feedback that is well-defined and automatically computable to facilitate \nL53: interactive learning, without the constant need for expensive feedback from \nL54: humans.\nL55: \nL56: The world wide web (WWW) is a massive open-domain interactive environment that \nL57: inherently satisfies the first aforementioned requirement through its \nL58: interconnected set of pages with natural text, images and interactive elements. \nL59: By being simultaneously scalable, semantic, interactive, dynamic and realistic, \nL60: the web is uniquely different from existing environments for autonomous agents \nL61: like games or 3D navigation. Moreover, the web also provides a practical \nL62: environment to deploy trained agents, with great potential for alleviating human\nL63: efforts in tedious tasks (e.g. buying products, booking appointments). While \nL64: there has been prior work on building web-based tasks, they either lack depth in\nL65: the transition and action spaces, or prove difficult to scale up. Some \nL66: benchmarks only contain either a single classification task [39, 46, 31] or \nL67: interactions containing only a handful of different pages in each episode [43]. \nL68: Others propose tasks with longer horizons but are either limited to following \nL69: hyperlinks for web navigation [36] or require human-in-the-loop feedback due to \nL70: the lack of an automated reward function [33].\nL71: \nL72: In this paper, we introduce WebShop (Figure 1) – a large-scale interactive web-\nL73: based environment for language understanding and decision making – and train \nL74: autonomous agents to complete tasks on this benchmark. With the goals of being \nL75: scalable and containing realistic language and visual elements, WebShop emulates\nL76: the task of online shopping on an e-commerce website, where the agent’s goal is\nL77: to understand a human-provided text instruction and purchase a product to match\nL78: the specifications. To do so, the agent needs to query the website’s search \nL79: engine, choose items to explore from search results, open and read their \nL80: description and details, and select the necessary options (e.g. 32 oz., red \nL81: color) before clicking the ‘Buy’ button. In order to pick the optimal product \nL82: that matches user requirements, the agent may need to view and compare various \nL83: products (including backtracking between pages), and potentially perform \nL84: multiple searches. WebShop contains over one million products scraped from \nL85: amazon.com, over thousand crowdsourced instructions, and a diverse semantic \nL86: action space of searching text queries and choosing text buttons. It is packaged\nL87: into a convenient OpenAI Gym [5] environment and can be rendered in two modes \nL88: (HTML or simple) with parallel observation spaces that are easy for human and \nL89: model respectively. Rewards are automatically computed using a combination of \nL90: programmatic matching functions that consider the attributes, type, options and \nL91: price of the chosen product, alleviating the need for human evaluation and \nL92: providing a path to scaling up interactive learning.1212\nL93: \nL94: We develop several agents to perform this task, using both reinforcement \nL95: learning (RL) and imitation learning (IL). We also leverage the latest pre-\nL96: trained language models [26, 11] for representing and generating text. Our \nL97: modular architecture includes a factorized processing of state observations and \nL98: action choices using ResNets (visual) and Transformers (text), followed by an \nL99: attention fusion layer that helps the agent contextually score each action. Our \nL100: best agent achieves an average score of (out of 62.462.4) and successfully \nL101: completes the task 100100 of the time, significantly higher than a heuristic \nL102: baseline that achieves 28.7%28.7\\% and 45.645.6, respectively. While this \nL103: demonstrates the potential for IL and RL, the agents are still much lower than \nL104: human experts, who can achieve 9.6%9.6\\% and 82.182.1 on this task.*** In our \nL105: analysis (§5.3), we observe that the task requires patience and consistency, \nL106: which is lacking in some crowdsource workers, leading to lower scores. Even with\nL107: this caveat, the gap between human performance and the model remains \nL108: significant. We perform several analyses and ablation studies to identify the \nL109: cause of this gap and find several avenues for agent improvement in the future \nL110: including more robust search generation, explicit memory modules, and better \nL111: handling of noisy web text. Finally, we also demonstrate an instance of sim-to-\nL112: real transfer by deploying agents trained with WebShop to operate on amazon.com \nL113: and ebay.com, and find that they can achieve similar performances despite search\nL114: engine and product differences, and consistently outperform the rule baseline \nL115: of using the first result returned by the commercial search engines when \nL116: directly searching the instruction texts. This demonstrates the practical \nL117: potential of our work towards developing agents that can operate autonomously on\nL118: the world wide web (WWW).59.6%59.6\\%\nL119: \nL120: ## 2 Related Work\nL121: \nL122: Reinforcement learning on the web. Nogueira and Cho [36] introduced WikiNav as a", + "tool_call_id": null + }, + { + "role": "tool", + "content": "Error fetching URL `http://alfworld.github.io/`", + "tool_call_id": null + }, + { + "role": "tool", + "content": "Error fetching URL `https://osu-nlp-group.github.io/Mind2Web/`", + "tool_call_id": null + }, + { + "role": "tool", + "content": "Error fetching URL `https://github.com/web-arena-x/visualwebarena`", + "tool_call_id": null + }, + { + "role": "tool", + "content": "[30] (https://ar5iv.org/pdf/2406.12172)\n**viewing lines [0 - 127] of 1478**\n\nL0: \nL1: URL: https://ar5iv.org/pdf/2406.12172\nL2: # Navigating the Labyrinth: Evaluating and Enhancing LLMs’ Ability to Reason \nL3: About Search Problems\nL4: \nL5: Nasim Borazjanizadeh\nL6: \nL7: Berkeley AI Research, UC Berkeley\nL8: \\AndRoei Herzig\nL9: Berkeley AI Research, UC Berkeley\nL10: \\AndTrevor Darrell\nL11: Berkeley AI Research, UC Berkeley\nL12: \\AndRogerio Feris\nL13: MIT-IBM Watson AI Lab\nL14: \\AndLeonid Karlinsky\nL15: MIT-IBM Watson AI Lab\nL16: \nL17: ###### Abstract\nL18: \nL19: Recently, Large Language Models (LLMs) attained impressive performance in math \nL20: and reasoning benchmarks. However, they still often struggle with logic problems\nL21: and puzzles that are relatively easy for humans. To further investigate this, \nL22: we introduce a new benchmark, SearchBench, containing 11 unique search problems,\nL23: each equipped with automated pipelines to generate an arbitrary number of \nL24: instances and analyze the feasibility, correctness, and optimality of LLM-\nL25: generated solutions. We show that even the most advanced LLMs fail to solve \nL26: these problems end-to-end in text, e.g., GPT4 solves only 1.4%. SearchBench \nL27: problems require considering multiple pathways to the solution as well as \nL28: backtracking, posing a significant challenge to auto-regressive models. \nL29: Instructing LLMs to generate code that solves the problem helps, but only \nL30: slightly, e.g., GPT4’s performance rises to 11.7%. In this work, we show that \nL31: in-context learning with A* algorithm implementations enhances performance. The \nL32: full potential of this promoting approach emerges when combined with our \nL33: proposed Multi-Stage-Multi-Try method, which breaks down the algorithm \nL34: implementation into two stages and verifies the first stage against unit tests, \nL35: raising GPT-4’s performance above 57%.\nL36: \nL37: \\doparttoc\\faketableofcontents\nL38: \nL39: ### 1 Introduction\nL40: \nL41: The advent of Large Language Models (LLMs) has revolutionized the field of \nL42: natural language processing, with models like Gemini[18], GPT-4[26] \nL43: demonstrating unprecedented performance on reasoning tasks such as GSM8k[8]. \nL44: However, these models still exhibit surprising failures on some intuitive \nL45: tasks[2, 30, 22] and struggle with multi-step compositional reasoning, \nL46: combinatorial problems, and planning [9, 40, 44]. Inspired by these observations\nL47: and to further investigate LLMs’ reasoning abilities, we offer a new benchmark \nL48: of search problems, SearchBench. The problems in SearchBench are combinatorial, \nL49: defined as tasks that involve finding an optimal object from a finite set of \nL50: objects, where the set of feasible solutions is either discrete or can be \nL51: reduced to a discrete set [43]. These problems are predominantly NP-hard and \nL52: necessitate systematic exploration of action paths and backtracking to \nL53: intermediate feasible states; thus, SearchBench implicitly investigates the \nL54: LLM’s capacity for non-linear reasoning.\nL55: \nL56: SearchBench has five distinct problem categories: (i) pathfinding, (ii) puzzles,\nL57: (iii) subset sum, (iv) sorting, and (v) under-determined systems; further \nL58: divided into 11 unique problem types. Each problem type is inspired by known \nL59: puzzles and combinatorial problems but augmented with modified rules and \nL60: constraints to ensure substantial differences from similar problems LLMs \nL61: encountered during their training. And the solution to each problem is a \nL62: sequence of actions leading from the initial state to the goal state, while \nL63: optimizing a cost. We generate100 instances of varying difficulty per problem \nL64: type using an automatic pipeline, resulting in 1107 problem instances total. \nL65: Each problem type in SearchBench is equipped with an automatic pipeline that \nL66: evaluates LLM-generated solutions on three dimensions: feasibility, correctness,\nL67: and optimality. Feasibility checks whether the actions taken follow the \nL68: problem’s rules; correctness verifies if a feasible solution reaches the goal \nL69: state; and optimality checks if the least cost solution was found.∼\\sim\nL70: \nL71: SearchBench is challenging to LLMs due to several factors. Firstly, natural \nL72: language is less suited for describing or updating accurate representations of \nL73: complex intermediate states. Secondly, our experiments show LLMs struggle with \nL74: exploring a combinatorial exponentially exploding state-space. Despite the fact \nL75: that some methods were developed for long-context reasoning [4, 13, 50], \nL76: SearchBench problems cannot be easily summarized [4], reasoned about [13], or \nL77: processed in parallel due to their size [50, 45]. Our findings show that even \nL78: the strongest LLMs [26] almost completely fail to solve SearchBench problems in \nL79: text-only mode.\nL80: \nL81: To provide further insights, we show that LLMs’ performance on SearchBench \nL82: improves by prompting the models to solve the problems using the A* search \nL83: algorithm [11]. A* is a heuristic-based graph traversal algorithm known for its \nL84: time efficiency and provable optimality guarantees, making it the most suitable \nL85: search algorithm for solving the problems in our benchmark. This method \nL86: leverages A*’s correctness and optimality, while offloading some of the non-\nL87: linear computations involved in searching the state-space to code execution. \nL88: Additionally, to improve the quality of generated A* codes, motivated that \nL89: ensembling helps generation quality[41, 47, 21], we introduce the Multi-Stage-\nL90: Multi-Try (MSMT) inference strategy. In the \"Multi-Try\" aspect of MSMT, before \nL91: evaluating the solution returned by the code, we first verify whether the code \nL92: generated by the model satisfies a set of unit tests: (i) it is executable; (ii)\nL93: it returns a list as output; and (iii) data type of list elements is correct. \nL94: If the code fails any of the tests, MSMT re-runs the LLM until a valid code is \nL95: generated or allowed number of attempts is exhausted. The \"Multi-Stage\" aspect \nL96: of MSMT generates the code in two steps: (i) ‘A* Implementation’ - the \nL97: implementation of an instance-agnostic A* algorithm for the problem type; and \nL98: (ii) Initialization - the instantiation of initial conditions and state \nL99: variables of the problem instance. In MSMT ’Initialization’ is generated \nL100: conditioned on the ‘A* Implementation’ (which is generated separately first and \nL101: provided in ‘Intitialization’ prompt). We demonstrate that our MSMT A* method \nL102: (Fig. 2) significantly enhances the LLMs’ ability to solve search problems, \nL103: outperforming all other prompting strategies we used to evaluate models on \nL104: SearchBench, including 0-shot text, 4-shot Chain-of-Thought (CoT)[42] text, \nL105: 0-shot code generation, and 4-shot A* prompting with the naive greedy decoding \nL106: strategy.\nL107: \nL108: To summarize, our main contributions are as follows: (i) We contribute the \nL109: SearchBench benchmark designed to assess the capability of LLMs in solving \nL110: state-based problems requiring combinatorial search; (ii) We introduce the A* \nL111: Multi-Stage-Multi-Try (MSMT) framework that leverages the flexibility and \nL112: natural language comprehension capabilities of LLMs, reliability and structured \nL113: nature of the A* algorithm, along with unitests to verify LLMs’ code generation \nL114: in order to address search problems. (iii) We show a strong advantage of MSMT A*\nL115: prompting for solving search problems robustly with LLMs, suggesting \nL116: interesting future work directions and yet leaving significant room for future \nL117: improvement on SearchBench.\nL118: \nL119: ### 2 SearchBench Benchmark\nL120: \nL121: SearchBench includes five categories of problems: puzzles, subset sum, sorting, \nL122: pathfinding, and under-determined systems, further subdivided into 11 problem \nL123: types and instantiated as 1107 natural language problem instances. Each problem \nL124: type requires a different algorithmic approach to solve. Our problems are \nL125: inspired by well-known (mostly NP-hard) puzzles and search problems, though \nL126: their rules were modified to ensure a solved instance of a problem similar to \nL127: any SearchBench problem was not observed during LLMs’ massive internet-scale ", + "tool_call_id": null + }, + { + "role": "tool", + "content": "Error fetching URL `https://ar5iv.labs.ar5iv.org/html/2312.15820`", + "tool_call_id": null + }, + { + "role": "tool", + "content": "Error fetching URL `https://github.com/jxwuyi/WebNav`", + "tool_call_id": null + }, + { + "role": "assistant", + "content": "针对搜索智能体(Search Agent)的评估,已涌现出多个侧重点不同的基准测试。以下将分类介绍这些基准,涵盖其核心特点、典型使用场景及例题。\n\n---\n\n## 🔍 1. 网页浏览/导航类基准\n这类基准要求智能体在真实或模拟的网页环境中执行多步操作,以完成复杂任务。\n\n| 基准 | 核心特点 | 使用场景 | 例题 |\n|------|----------|----------|------|\n| **BrowseComp**(OpenAI)【3†L16-L35】 | - 1,266 个挑战性问题,需持续导航、查找难以直接搜索的信息
- 问题设计保证人类在 10 分钟内无法解决,且现有模型(包括带浏览的 ChatGPT)也难以完成
- 答案为短字符串,便于自动验证 | 评估智能体在复杂网络浏览任务中的持久性、创造性搜索能力 | “找出 2018‑2023 年间在 EMNLP 会议上发表、第一作者本科毕业于达特茅斯学院、第四作者本科毕业于宾夕法尼亚大学的科学论文标题”【3†L45-L49】 |\n| **WebVoyager**【24†L18-L33】 | - 多模态(视觉+文本)端到端网页智能体基准
- 包含 300 个真实任务,覆盖 15 个常用网站(如 Amazon、Wikipedia)
- 使用 GPT‑4V 进行自动评估,与人工评估一致性达 85.3% | 评估智能体在真实网站上的端到端任务完成能力(多模态输入) | “在 Amazon 上找到‘无线蓝牙耳机’并按价格从低到高排序,然后选择最便宜的一款加入购物车” |\n| **WebArena**【25†L15-L35】 | - 完全可交互的、高度真实的网页环境(4 个领域:电商、论坛、协作开发、内容管理)
- 812 项长视野任务,评估功能正确性而非表面动作匹配
- 最佳 GPT‑4 智能体成功率仅 14.41%,远低于人类的 78.24% | 评估自主智能体在真实网页环境中的复杂任务执行能力 | “在电商网站中购买一双‘黑色运动鞋’,价格低于 $80,并将收货地址修改为纽约”【25†L86-L92】 |\n| **Mind2Web**【19†L4-L14】 | - 首个面向通用网页智能体的数据集,任务来自真实网站
- 支持在线评估(Online‑Mind2Web),包含 300 个实时任务、136 个流行网站
- 强调多步指令跟随与复杂交互 | 评估智能体在任意网站上的通用任务完成能力 | “在 GitHub 上创建一个名为 ‘test‑repo’ 的公开仓库,并添加一个 README.md 文件” |\n| **VisualWebArena**【20†L4-L10】 | - 多模态网页智能体基准,任务依赖视觉信息(截图)
- 包含丰富的视觉接地任务,如识别图片、按钮位置等
- 评估智能体在真实视觉网页环境中的表现 | 评估多模态智能体在视觉丰富的网页任务中的能力 | “在纽约时报网站找到今天头条新闻的配图,并描述图中主要物体” |\n| **WebVLN**【22†L4-L11】 | - 网站上的视觉‑语言导航任务,智能体需根据问题指令在网站中导航
- 数据集 WebVLN‑v1 包含多种网站导航场景
- 评估导航成功率与任务完成度 | 评估智能体在网站中基于视觉与语言指令的导航能力 | “找到‘联系我们’页面,并提取其中的邮箱地址” |\n| **WebNav**【23†L4-L11】 | - 目标驱动的网页导航基准,环境为部分观测
- 智能体需理解自然语言指令并在网页中规划动作序列
- 早期经典任务,常用于测试基础导航能力 | 评估智能体在部分观测网页环境中的规划与指令跟随能力 | “导航到 Wikipedia 上关于‘人工智能’的页面,并摘录第一段内容” |\n\n---\n\n## 🛠️ 2. 工具使用/通用助手类基准\n这类基准测试智能体调用外部工具(如搜索、计算、API)来解决实际问题的能力。\n\n| 基准 | 核心特点 | 使用场景 | 例题 |\n|------|----------|----------|------|\n| **GAIA**【26†L10-L25】 | - 466 个真实世界问题,需要推理、多模态处理、网页浏览、工具使用等综合能力
- 对人类简单(92% 准确率),但对最强 GPT‑4 仅 15%
- 答案唯一、事实性,支持自动评估 | 评估通用 AI 助手在需要多步工具使用的真实任务中的表现 | “2022 年法国总统选举的获胜者出生在哪个城市?”【26†L10-L25】 |\n| **ToolBench**(OpenBMB)【7†L4-L10】 | - 包含多样软件工具的真实任务基准
- 提供易用的基础设施(虚拟 API 服务器等)
- 评估 LLM 调用工具完成复杂任务的能力 | 评估智能体在工具调用与组合上的熟练度 | “使用计算器 API 计算 (3.14 * 7.2)^2,然后使用搜索 API 查找‘圆周率’的最新精确值” |\n| **HSCodeComp**【2†L16-L35】 | - 首个针对深度搜索智能体在分层规则应用上的专家级电商基准
- 任务:根据嘈杂的商品描述预测 10 位 HS 编码(关税规则)
- 632 个商品条目,标注由多位电商专家完成,最佳智能体准确率仅 46.8%(人类 95%) | 评估智能体在复杂规则(如关税分类)下的深层推理与搜索能力 | “给定商品描述‘男士纯棉针织T恤,领口为V领’,预测其 10 位 HS 编码”【2†L24-L32】 |\n\n---\n\n## ❓ 3. 问答/检索类基准\n这类基准主要评估智能体在开放域或特定领域的信息检索与问答能力。\n\n| 基准 | 核心特点 | 使用场景 | 例题 |\n|------|----------|----------|------|\n| **HotpotQA**【8†L4-L10】 | - 113k 个基于 Wikipedia 的多跳问答对
- 需要跨多个文档推理,并提供句子级支持事实
- 涵盖多样的问题类型(比较、列举、因果等) | 评估模型在需要多跳推理的开放域问答中的能力 | “《了不起的盖茨比》的作者还写过哪些小说?” |\n| **FEVER**【27†L16-L28】 | - 185,445 个基于 Wikipedia 的声明,需分类为 Supported/Refuted/NotEnoughInfo
- 要求提供证据句子(可多句、多页面)
- 挑战性高(最佳系统仅 31.87% 准确率) | 评估系统在事实核查与证据检索上的能力 | “斐济最大的岛屿是考艾岛。”【27†L48-L50】 |\n| **TriviaQA**【28†L5-L10】 | - 超过 650k 个问答‑证据三元组,问题由琐事爱好者编写
- 每个问题平均提供 6 篇证据文档,适合远程监督阅读
- 包含阅读理解和开放域 QA 两种任务设置 | 评估模型在开放域琐事问答中的检索与阅读理解能力 | “哪位演员在《星球大战:原力觉醒》中扮演凯洛·伦?” |\n| **Natural Questions**【11†L4-L8】 | - 来自 Google 搜索的真实用户问题,答案来自 Wikipedia
- 包含长答案(段落)和短答案(实体/日期等)
- 训练集 307k,开发/测试集各 8k | 评估开放域问答系统对真实用户查询的响应能力 | “谁写了《傲慢与偏见》?”(答案:“简·奥斯汀”) |\n| **MS MARCO**【12†L4-L10】 | - 基于 Bing 真实搜索查询的检索/问答基准
- 包含段落排序、文档排序、问答等任务
- 数据规模大(约 500k 查询,880 万段落) | 训练与评估检索模型在真实搜索场景下的表现 | “如何冲泡咖啡?”(系统需检索相关段落回答) |\n| **BEIR**【13†L4-L10】 | - 异构零样本检索基准,包含 18 个数据集、9 种任务(事实核查、QA、对话检索等)
- 评估模型在未见任务上的泛化能力
- 已成为检索模型的标准评估套件 | 评估检索模型在零样本设置下的跨任务泛化能力 | 给定查询“全球变暖的主要原因”,在文档集合中检索相关文档 |\n| **MIRACL**【14†L4-L10】 | - 多语言检索基准,覆盖 18 种语言(包括中文、阿拉伯语等)
- 专注于 ad‑hoc 检索,所有查询与文档均经过人工标注
- 提供多语言检索挑战(WSDM 2023 Cup) | 评估多语言检索模型在不同语言下的检索效果 | 中文查询:“全球变暖的原因”,检索相关中文文档 |\n\n---\n\n## 🧩 4. 组合搜索/推理类基准\n这类基准专门测试智能体在组合搜索问题上的逻辑推理与规划能力。\n\n| 基准 | 核心特点 | 使用场景 | 例题 |\n|------|----------|----------|------|\n| **SearchBench**【30†L22-L36】 | - 11 种独特的搜索问题类型(路径查找、谜题、子集和、排序、欠定系统等)
- 自动生成任意数量实例,并评估解决方案的可行性、正确性、最优性
- GPT‑4 在纯文本模式下仅解决 1.4%,通过 A* 代码生成提升至 11.7% | 评估 LLM 在组合搜索问题上的推理与规划能力 | “在一个 5x5 迷宫中,从起点 (0,0) 到终点 (4,4) 的最短路径是什么?(障碍物位置已知)”【30†L56-L64】 |\n\n---\n\n## 🛒 5. 特定领域/仿真环境基准\n这类基准针对特定领域(如电商、家庭环境)设计,测试智能体在仿真环境中的交互能力。\n\n| 基准 | 核心特点 | 使用场景 | 例题 |\n|------|----------|----------|------|\n| **WebShop**【29†L15-L30】 | - 模拟电商网站环境,包含 118 万真实商品、12,087 条众包指令
- 智能体需根据文本指令搜索、筛选、购买商品
- 奖励自动计算,最佳模型成功率 29%(人类 59%) | 评估语言智能体在电商任务中的导航、搜索与决策能力 | “找到一款价格低于 $50 的无线蓝牙耳机,评分至少 4 星,并加入购物车”【29†L20-L25】 |\n| **ALFWorld**【18†L4-L10】 | - 将文本环境(TextWorld)与具身环境(ALFRED)对齐的基准
- 智能体先在文本中学习抽象策略,然后在视觉环境中执行
- 包含 134 个家庭任务(如“在厨房找到杯子并拿到客厅”) | 评估智能体在文本与具身环境之间的迁移学习与规划能力 | “在厨房找到‘杯子’,把它拿到客厅放在桌子上” |\n\n---\n\n## 📊 总结\n以上基准覆盖了搜索智能体的不同能力维度:从**网页浏览、工具使用、问答检索**,到**组合搜索推理**以及**特定领域仿真**。选择合适的基准取决于您要评估的具体能力(如多模态交互、工具调用、多跳推理等)。这些基准大多提供公开的数据集与评估代码,可直接用于实验。\n\n> 注:本文基于 2025 年 11 月前的公开资料整理,部分基准可能已有更新版本,建议访问对应官网获取最新信息。", + "reasoning_content": "在同时打开多个基准测试相关页面后,我们获得了大量信息。WebVoyager 是一个多模态网页智能体基准测试,专注于在真实网站上进行端到端任务执行,其特点包括多模态输入(屏幕截图和文本)、在15个常用网站上的300个任务,以及使用GPT-4V进行自动评估。使用场景是评估网页智能体在真实世界中的导航和任务完成能力。例题可能是在亚马逊上找到特定产品并将其加入购物车。\n\nWebArena 是一个真实且可复现的网页环境,用于评估自主智能体。其特点包括四个功能完整的网站(电子商务、论坛、协作开发、内容管理)、812项长视野任务,以及专注于功能正确性的评估。使用场景是评估智能体在复杂网页任务中的表现。例题可能是在电子商务网站上购买符合特定标准的商品。\n\nGAIA 是一个针对通用AI助手的基准测试,要求具备推理、多模态处理、网页浏览和工具使用等能力。其特点包括466个问题,对人类来说概念简单但对AI具有挑战性,答案简洁明确,支持自动评估。使用场景是评估AI助手在真实世界问题中的表现。例题可能是“找出2022年法国总统选举中获胜者的出生城市”。\n\nToolBench 是一个用于评估LLM工具操作能力的基准测试。其特点包括多样化的软件工具、真实世界任务,以及易于使用的基础设施。使用场景是评估智能体使用工具完成任务的能力。例题可能是使用计算器API计算表达式,或使用搜索API查找信息。\n\nHotpotQA 是一个多跳问答基准测试,其特点包括基于Wikipedia的113k个问题-答案对,需要多文档推理,并提供句子级别的支持事实。使用场景是评估模型在回答需要多跳推理的问题时的能力。例题可能是“《了不起的盖茨比》的作者还写过哪些小说?”。\n\nFEVER 是一个事实提取和验证基准测试,其特点包括185,445个基于Wikipedia的声明,需要分类为支持、反驳或信息不足,并需要提供证据句子。使用场景是评估系统验证声称事实的能力。例题可能是“斐济最大的岛屿是考艾岛”,系统需要检索证据来支持或反驳这一说法。\n\nTriviaQA 是一个阅读理解基准测试,其特点包括超过65万个问题-答案-证据三元组,问题由琐事爱好者编写,并附带证据文档。使用场景是评估模型在开放域问答中的能力。例题可能是“哪位演员在《星球大战:原力觉醒》中扮演凯洛·伦?”。\n\nNatural Questions 是一个问答基准测试,其特点包括来自谷歌搜索的真实问题,答案来自Wikipedia,并包含长答案和短答案。使用场景是评估开放域问答系统。例题可能是“谁写了《傲慢与偏见》?”,答案可能是“简·奥斯汀”。\n\nMS MARCO 是一个用于深度学习搜索的基准测试,其特点包括来自Bing的真实搜索查询、人工生成的答案,以及用于段落排序和文档排序的任务。使用场景是训练和评估检索模型。例题可能是“如何冲泡咖啡?”,系统需要检索相关段落来回答。\n\nBEIR 是一个用于零样本评估的异构信息检索基准测试,其特点包括18个数据集、9项任务,涵盖事实核查、问答、对话检索等。使用场景是评估检索模型在零样本设置下的泛化能力。例题可能是给定一个查询,在文档集合中检索相关文档。\n\nMIRACL 是一个多语言信息检索基准测试,其特点涵盖18种语言,专注于ad-hoc检索,并包含人工标注的相关性判断。使用场景是评估多语言检索模型。例题可能是用中文查询“全球变暖的原因”,并检索相关文档。\n\nWebShop 是一个模拟电子商务网站环境,用于评估语言智能体。其特点包括118万个真实产品、12,087条众包文本指令,以及需要导航、搜索和购买的任务。使用场景是评估智能体在电子商务任务中的表现。例题可能是“找到一款价格低于50美元的无线蓝牙耳机”。\n\nALFWorld 是一个将文本和具身环境对齐的基准测试,用于在文本世界中训练智能体,然后将其转移到具身环境中。其特点包括基于文本的环境,与ALFRED数据集并行,并专注于指令跟随。使用场景是评估智能体在家庭任务中的规划能力。例题可能是“在厨房里找到一个杯子,把它拿到客厅”。\n\nMind2Web 是一个用于评估网页通用智能体的基准测试,其特点包括来自真实网站的任务、需要多步交互,并专注于语言指令跟随。使用场景是评估智能体在复杂网页任务中的表现。例题可能是“在GitHub上创建一个新的仓库,命名为‘test-repo’”。\n\nVisualWebArena 是一个用于多模态智能体的基准测试,其特点包括真实视觉网页任务、需要理解视觉和文本元素,并评估功能正确性。使用场景是评估多模态智能体在视觉接地网页任务中的表现。例题可能是“在纽约时报网站上找到今天头条新闻的图片”。\n\nSearchBench 是一个用于评估LLM解决搜索问题能力的基准测试,其特点包括11种搜索问题类型、自动生成实例,并评估可行性、正确性和最优性。使用场景是评估LLM在组合搜索问题中的推理能力。例题可能是“在一个迷宫中找到从起点到终点的最短路径”。\n\nWebVLN 是一个网站上的视觉语言导航基准测试,其特点包括基于问题的指令、需要导航网站,并评估导航成功率。使用场景是评估智能体在网站中基于视觉和语言指令进行导航的能力。例题可能是“找到‘联系我们’页面并提取电子邮件地址”。\n\nWebNav 是一个用于目标驱动网页导航的基准测试,其特点包括部分观察环境、需要规划和理解自然语言指令。使用场景是评估智能体在网页导航中的能力。例题可能是“导航到维基百科上关于‘人工智能’的页面”。\n\n现在,我需要整理一份针对搜索智能体的基准测试列表。考虑到用户可能更关注中文资料,我会优先考虑那些有中文描述的基准测试,例如BrowseComp、HSCodeComp、GAIA、WebVoyager、WebArena、Mind2Web、WebShop、ALFWorld、HotpotQA、FEVER、TriviaQA、Natural Questions、MS MARCO、BEIR、MIRACL、SearchBench、WebVLN、WebNav。我会为每个基准测试提供特点、使用场景和例题。同时,我会引用相关来源,例如HSCodeComp论文、BrowseComp博客文章、WebVoyager论文、WebArena论文、GAIA论文、WebShop论文、SearchBench论文等。我的回答将用中文呈现,并确保内容全面、结构清晰。现在开始撰写回答。", + "tool_calls": [] + } + ] +} \ No newline at end of file diff --git a/lib/llm/tests/data/deepseek-v3.2/test_input_search_wo_date.json b/lib/llm/tests/data/deepseek-v3.2/test_input_search_wo_date.json new file mode 100644 index 0000000000..66b76cba06 --- /dev/null +++ b/lib/llm/tests/data/deepseek-v3.2/test_input_search_wo_date.json @@ -0,0 +1,533 @@ +{ + "messages": [ + { + "role": "developer", + "content": "In a year before 2023, between December 1 and 15 (inclusive), a press release reported that as of the 5th of that month, a country had received a total of 16,201,670 vaccine doses, with 7,583,134 administered. That same year, a company's annual report revealed that its nearly 2.2 million members and supporters contributed over $49 million, with 12% of total revenue coming from contributed services. The following year, the annual report stated that the organization had played a role in rescinding a previous administration’s plan to open most of the 22 million acres of the National Petroleum Reserve in Alaska to oil and gas drilling and that contributed services had decreased by 2% to 10%. Who was the emeritus board member that year? Name only, no titles.", + "tools": [ + { + "type": "function", + "function": { + "name": "search", + "description": "Searches for information related to query and displays topn results.", + "parameters": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The search query string" + }, + "topn": { + "type": "integer", + "description": "Number of top results to display", + "default": 10 + }, + "source": { + "type": "string", + "description": "Source to search within", + "enum": [ + "web", + "news" + ], + "default": "web" + } + }, + "required": [ + "query" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + } + }, + { + "type": "function", + "function": { + "name": "open", + "description": "Opens the link id from the page indicated by cursor starting at line number loc, showing num_lines lines. Valid link ids are displayed with the formatting: 【{id}†.*】. If cursor is not provided, the most recent page is implied. If id is a string, it is treated as a fully qualified URL associated with source. If loc is not provided, the viewport will be positioned at the beginning of the document or centered on the most relevant passage, if available. Use this function without id to scroll to a new location of an opened page.", + "parameters": { + "type": "object", + "properties": { + "id": { + "description": "Link ID (number) or fully qualified URL (string)", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string" + } + ], + "default": -1 + }, + "cursor": { + "type": "integer", + "description": "Page cursor indicator", + "default": -1 + }, + "loc": { + "type": "integer", + "description": "Starting line number", + "default": -1 + }, + "num_lines": { + "type": "integer", + "description": "Number of lines to show", + "default": -1 + }, + "view_source": { + "type": "boolean", + "description": "Whether to view source", + "default": false + }, + "source": { + "type": "string", + "description": "Source associated with the URL", + "enum": [ + "web", + "news" + ], + "default": "web" + } + }, + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + } + }, + { + "type": "function", + "function": { + "name": "find", + "description": "Finds exact matches of pattern in the current page, or the page given by cursor.", + "parameters": { + "type": "object", + "properties": { + "pattern": { + "type": "string", + "description": "The exact pattern to search for" + }, + "cursor": { + "type": "integer", + "description": "Page cursor to search in", + "default": -1 + } + }, + "required": [ + "pattern" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + } + }, + { + "type": "function", + "function": { + "name": "python", + "description": "Use this tool to execute Python code. The code will not be shown to the user. This tool should be used for internal reasoning, but not for code that is intended to be visible to the user (e.g. when creating plots, tables, or files). When you send a message containing Python code to python, it will be executed in a stateful Jupyter notebook environment. python will respond with the output of the execution or time out after 120.0 seconds.", + "parameters": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "The python code to execute in the jupyter environment." + } + }, + "required": [ + "code" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + } + } + ] + }, + { + "role": "assistant", + "content": "", + "reasoning_content": "The user asks: \"In a year before 2023, between December 1 and 15 (inclusive), a press release reported that as of the 5th of that month, a country had received a total of 16,201,670 vaccine doses, with 7,583,134 administered. That same year, a company's annual report revealed that its nearly 2.2 million members and supporters contributed over $49 million, with 12% of total revenue coming from contributed services. The following year, the annual report stated that the organization had played a role in rescinding a previous administration’s plan to open most of the 22 million acres of the National Petroleum Reserve in Alaska to oil and gas drilling and that contributed services had decreased by 2% to 10%. Who was the emeritus board member that year? Name only, no titles.\"\n\nWe need to parse the puzzle.\n\nWe have clues about a press release about vaccine doses for a country, dated between Dec 1-15 inclusive, reporting as of 5th of that month total doses received 16,201,670 and administered 7,583,134. That is likely a COVID-19 vaccine update for some country. Which country? Might be a smaller country. Could be something like \"Uganda\", \"Zimbabwe\", \"Kenya\", \"Philippines\", etc. Let's search.\n\nSearch for \"16,201,670 vaccine doses\" \"7,583,134 administered\".", + "tool_calls": [ + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"\\\"16,201,670\\\" vaccine doses\", \"topn\": 10, \"source\": \"web\"}" + } + } + ] + }, + { + "role": "tool", + "content": "[0] \"16,201,670\" vaccine doses (Search_Results/\"16,201,670\" vaccine doses)\n**viewing lines [0 - 38] of 38**\n\nL0: \nL1: URL: Search_Results/\"16,201,670\" vaccine doses\nL2: # Search Results\nL3: \nL4: * 【0†Kenya's Economy is Showing Resilience as Output Rises ...; publish_date: \nL5: none†www.worldbank.org】 Dec 14, 2021 — As of December 5, 2021, Kenya had \nL6: received a total of 16,201,670 vaccines, with 7,583,134 administered. While \nL7: vaccine acceptance is ...\nL8: * 【1†MINISTRY OF HEALTH; publish_date: none†covidhub.mediacouncil.or.ke】 Dec \nL9: 1, 2021 — Total Doses Received 16,201,670. Table 10 gives the total vaccines \nL10: received since the start of Covid -19 vaccination exercise in the country.\nL11: * 【2†Output Result Page; publish_date: none†open.unicef.org】 ... 16,201,670 \nL12: doses of multiple vaccines nationwide and full vaccination of 15.5 per cent with\nL13: two doses of COVID-19 vaccine as of 31 December 2021.\nL14: * 【3†rebased GDP; publish_date: none†documents1.worldbank.org】 Dec 7, 2021 — \nL15: As of December 5, 2021,. Kenya had received a total of 16,201,670 vaccines, \nL16: with. 7,583,134 administered. Vaccine acceptance is reportedly high.\nL17: * 【4†Integrated Annual Report; publish_date: none†www.co-opbank.co.ke】 May 27,\nL18: 2022 — ... doses of Covid-19 vaccines and administered close to 17 million ... \nL19: 16,201,670, huku 7,583,134 zikiwa tayari zimedungwa watu. Bado kuna ...\nL20: * 【5†World Bank lifts Kenya's growth prospect to 5% in 2021; publish_date: \nL21: none†www.africa-press.net】 ... 16,201,670 vaccines, with 7,583,134 administered.\nL22: While vaccine acceptance is reportedly high there is still a long way to go \nL23: towards the government's ...\nL24: * 【6†2020 Annual Report United States; publish_date: none†www.hi-us.org】 \nL25: including medicine, vaccines, Covid-19 prevention kits, and kits of ... \nL26: 16,201,670. 9,630,584. Prepaid expenses. 56,707. 22,601. Total current assets.\nL27: * 【7†HOSPITAL CENTRAL DE LA DEFENSA; publish_date: \nL28: none†contrataciondelestado.es】 16201670 X. URPA-URPA. 0. HCD-000393/00. KONTRON.\nL29: 7000 ... LASER SHOT LBP-1120. CNEUR-C. NEUROFISIOLOGÍA. 0 ... VICTOREEN CT HEAD\nL30: DOSE PHANTON. 43650-014.\nL31: * 【8†body metal concentrations: Topics by ...; publish_date: \nL32: none†www.science.gov】 PMID:16201670. Design Characteristics and Tobacco Metal \nL33: Concentrations in Filtered Cigars. PubMed. Caruso, Rosalie V; O'Connor, Richard \nL34: J; Travers, Mark J ...\nL35: * 【9†Public Offices, Personal Demands: Capability in ...; publish_date: \nL36: none†dokumen.pub】 ... (16201670).9 The Institutiones Politicae consists of two \nL37: books. Book one deals with the origin, nature and goal of the commonwealth on \nL38: the one hand, and the ...", + "tool_call_id": null + }, + { + "role": "assistant", + "content": "", + "reasoning_content": "Result shows Kenya. So the press release is about Kenya's vaccine doses as of Dec 5, 2021. The press release likely by World Bank or something. The search result shows \"World Bank lifts Kenya's growth prospect...\" maybe a press release dated Dec 14, 2021. But the clue says \"between December 1 and 15 (inclusive), a press release reported that as of the 5th of that month, a country had received a total of 16,201,670 vaccine doses, with 7,583,134 administered.\" That matches Kenya on Dec 5, 2021. So the year is 2021.\n\nNow the next clue: \"That same year, a company's annual report revealed that its nearly 2.2 million members and supporters contributed over $49 million, with 12% of total revenue coming from contributed services.\"\n\nThis sounds like an environmental or conservation organization. \"Nearly 2.2 million members and supporters contributed over $49 million.\" That might be something like \"The Nature Conservancy\", \"World Wildlife Fund\", \"Sierra Club\", \"Environmental Defense Fund\", \"Natural Resources Defense Council\", etc. Let's search.\n\nSearch for \"nearly 2.2 million members and supporters contributed over $49 million\" \"12% of total revenue coming from contributed services\".", + "tool_calls": [ + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"\\\"nearly 2.2 million members\\\" \\\"contributed over $49 million\\\"\", \"topn\": 10, \"source\": \"web\"}" + } + } + ] + }, + { + "role": "tool", + "content": "[1] \"nearly 2.2 million members\" \"contributed over $49 million\" (Search_Results/\"nearly 2.2 million members\" \"contributed over $49 million\")\n**viewing lines [0 - 41] of 41**\n\nL0: \nL1: URL: Search_Results/\"nearly 2.2 million members\" \"contributed over $49 million\"\nL2: # Search Results\nL3: \nL4: * 【0†The Global Good Fund, Microsoft and Goodwill join forces ...; \nL5: publish_date: none†globalgoodfund.org】 Sep 28, 2022 — ... more than 2.2 million \nL6: people. They additionally contribute close to $49 billion in local, state and \nL7: federal tax revenues. When COVID hit ...\nL8: * 【1†Almost 22 billion American tax dollars spent to wipe out a ...; \nL9: publish_date: none†www.facebook.com】 US military funding for Israel's war crimes\nL10: in Lebanon and Gaza has now cost US taxpayers over $22 billion. When millions \nL11: struggle to afford the ...\nL12: * 【2†Corporate America has largely abandoned its post-January ...; \nL13: publish_date: none†www.citizensforethics.org】 Jul 29, 2025 — Since the January 6\nL14: insurrection, over 2,000 corporate and industry group PACs have given over $174\nL15: million to members of the Sedition ...\nL16: * 【3†Audit shows millions in questionable taxpayer spending at ...; \nL17: publish_date: none†www.aol.com】 18 hours ago — ... nearly doubled from 1.3 \nL18: million to about 2.2 million. That is more than one in four Washington state \nL19: residents receiving Medicaid, and the ...\nL20: * 【4†Incarceration and Poverty in the United States - AAF; publish_date: \nL21: none†www.americanactionforum.org】 Jun 30, 2020 — The United States currently \nL22: incarcerates 2.2 million people, nearly half of whom are non-violent drug \nL23: offenders, accused people held pre ...\nL24: * 【5†How the NRA Spends Revenue (2023); publish_date: none†paddockpost.com】 \nL25: Apr 25, 2025 — In 2023, the NRA reported total revenue of $176 million (compared\nL26: to $211 million in 2022, $227 million in 2021, $282 million in 2020, $292 ...\nL27: * 【6†NCA Newsroom; publish_date: none†www.ncausa.org】 ... 2.2 million U.S. \nL28: jobs—operating in every U.S. state and territory and contributing nearly $350 \nL29: billion to the U.S. economy every year. For media inquiries ...\nL30: * 【7†The Early Childhood Educator Workforce; publish_date: \nL31: none†cscce.berkeley.edu】 About 2.2 million adults are paid to care for and \nL32: educate more than 9.7 million children between birth and age five in center-\nL33: based and home-based settings.\nL34: * 【8†Bringing Out the Worst in Congress: CRA By the Numbers ...; publish_date:\nL35: none†progressivereform.org】 Jul 14, 2025 — In fact, the top ten recipients (out\nL36: of 25) account for almost $7 million, showing how these contributions are \nL37: targeted and strategic. Real ...\nL38: * 【9†Club 100: Over 100 Americans gave more than $100000 to ...; publish_date:\nL39: none†publicintegrity.org】 Jan 7, 2022 — Through Internet investments during the\nL40: 1990s, he attracted 2.2 million online accounts valued at $174 million. Schwab \nL41: has been a vocal ...", + "tool_call_id": null + }, + { + "role": "assistant", + "content": "", + "reasoning_content": "Not that. Try search for \"2.2 million members and supporters contributed over $49 million\".", + "tool_calls": [ + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"\\\"2.2 million members\\\" \\\"contributed over $49\\\"\", \"topn\": 10, \"source\": \"web\"}" + } + } + ] + }, + { + "role": "tool", + "content": "[2] \"2.2 million members\" \"contributed over $49\" (Search_Results/\"2.2 million members\" \"contributed over $49\")\n**viewing lines [0 - 2] of 2**\n\nL0: \nL1: URL: Search_Results/\"2.2 million members\" \"contributed over $49\"\nL2: # Search Results", + "tool_call_id": null + }, + { + "role": "assistant", + "content": "", + "reasoning_content": "Maybe it's an annual report of a non-profit. Search for \"annual report 2.2 million members contributed $49 million\".", + "tool_calls": [ + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"\\\"2.2 million members\\\" \\\"annual report\\\" $49 million\", \"topn\": 10, \"source\": \"web\"}" + } + } + ] + }, + { + "role": "tool", + "content": "[3] \"2.2 million members\" \"annual report\" $49 million (Search_Results/\"2.2 million members\" \"annual report\" $49 million)\n**viewing lines [0 - 36] of 36**\n\nL0: \nL1: URL: Search_Results/\"2.2 million members\" \"annual report\" $49 million\nL2: # Search Results\nL3: \nL4: * 【0†20-F; publish_date: none†www.sec.gov】 ANNUAL REPORT PURSUANT TO SECTION \nL5: ... Our membership grew from 2.1 million members as at December 31, 2023 to 2.2 \nL6: million members as at December 31, 2024.\nL7: * 【1†Oportun Reports Fourth Quarter and Full Year 2023 ...; publish_date: \nL8: none†investor.oportun.com】 Mar 12, 2024 — Oportun (Nasdaq: OPRT) is a mission-\nL9: driven fintech that puts its 2.2 million members' financial goals within reach. \nL10: ... annual report on ...\nL11: * 【2†2 0 21; publish_date: none†www.annualreports.com】 ANNUAL REPORT. 2. 0. \nL12: 21. 2. 0. 21. Page 2. 2. DEFENDERS OF WILDLIFE. 2. 0. 21. 2. 0. 21 ... In 2021, \nL13: Defenders of Wildlife's nearly 2.2 million members and.\nL14: * 【3†Annual report and accounts 2020; publish_date: none†www.3i.com】 \nL15: Disclaimer. The Annual report and accounts have been prepared solely to provide \nL16: information to shareholders. ... 2.2 million members. In December 2019, we sold \nL17: ...\nL18: * 【4†united states securities and exchange commission; publish_date: \nL19: none†s21.q4cdn.com】 Dec 14, 2018 — ☒ ANNUAL REPORT ... approximately 5.7 million\nL20: Members, including approximately 2.2 million Members from the acquisition of \nL21: Best Doctors.\nL22: * 【5†securities and exchange commission; publish_date: none†www.sec.gov】 ... \nL23: Annual Report on Form 10-K filed with the SEC on March 10, 2020. The ... 2.2 \nL24: million members at June 30, 2020. The following table highlights the ...\nL25: * 【6†2022-23 Annual Comprehensive Financial Report - CalPERS; publish_date: \nL26: none†www.calpers.ca.gov】 Nov 17, 2023 — ... annual report is consistent with \nL27: these accounting principles ... 2.2 million members and health benefit services \nL28: to over 1.5 million ...\nL29: * 【7†Medical Centers Report; publish_date: \nL30: none†regents.universityofcalifornia.edu】 Oct 8, 2019 — which will allow its 2.2 \nL31: million members to receive advanced and ... annual report of the University of \nL32: California. For the Medical ...\nL33: * 【8†Working TogeTher to make HealtH care better; publish_date: \nL34: none†www.unitedhealthgroup.com】 Feb 8, 2012 — ANNUAL REPORT PURSUANT TO SECTION \nL35: 13 OR 15(d) OF THE ... approximately 2.2 million members enrolled in its \nL36: Medicare. Advantage ...", + "tool_call_id": null + }, + { + "role": "assistant", + "content": "", + "reasoning_content": "Result 2 shows \"Defenders of Wildlife\". That might be the organization. Let's open.", + "tool_calls": [ + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 3, \"id\": 2}" + } + } + ] + }, + { + "role": "tool", + "content": "[4] www.annualreports.com (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2021.pdf)\n**viewing lines [0 - 53] of 261**\n\nL0: \nL1: URL: https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-\nL2: wildlife_2021.pdf\nL3: ===== Page 1 ===== 2021 ANNUAL REPORT ===== Page 2 ===== # DEFENDERS OF WILDLIFE\nL4: made important progress for imperiled species and vital landscapes across the \nL5: United States in 2021. \\--- **LAWYERS** immediately **challenged** the premature\nL6: and reckless decision to strip **gray wolves** of federal **Endangered Species \nL7: Act (ESA)** protections. For many decades, Defenders has led the effort to \nL8: protect and restore the gray wolf, and we will continue to fight the \nL9: unscientific and hostile anti-wolf policies that impede conservation progress \nL10: and will carry on our unrelenting battle to restore federal protections for this\nL11: iconic keystone species. \\--- **LOBBYISTS** worked around the clock to keep \nL12: wildlife and climate priorities in the **Infrastructure Investment and Jobs \nL13: Act**. We also continue fighting to keep important wildlife and habitat funding \nL14: in relevant **appropriations bills**. \\--- 2 DEFENDERS OF WILDLIFE ===== Page 3 \nL15: ===== POLICY EXPERTS pushed forward on the urgent need for a National \nL16: Biodiversity Strategy (NBS), an all-of-government approach to address the \nL17: unprecedented loss of wildlife and habitat we are experiencing. We have coupled \nL18: this with our new campaign to expand the National Wildlife Refuge System to \nL19: preserve our nation’s only lands set aside for wildlife. By defending, funding \nL20: and expanding our national wildlife refuges, we will directly address \nL21: biodiversity loss and climate change while promoting increased equitable access \nL22: to nature. FIELD TEAMS were on the ground helping to recover imperiled species. \nL23: From panthers and sea turtles in Florida to wolves, bison and black-footed \nL24: ferrets in Montana, Defenders’ conservation experts were in the field saving \nL25: wildlife all over the country. CONSERVATION INNOVATION EXPERTS provided \nL26: comprehensive analyses to guide policy and inform conservation strategies to \nL27: reach the goal of protecting 30% of our terrestrial and marine systems by 2030 \nL28: (“30x30”). Defenders’ Center for Conservation Innovation (CCI) produced a report\nL29: which details actions we need to take to achieve 30x30 while protecting \nL30: biodiversity and addressing the climate crisis. DEFENDERS.ORG ===== Page 4 =====\nL31: WE HAVE ACCOMPLISHED MUCH THIS YEAR WORKING WITH AN ADMINISTRATION THAT VALUES \nL32: SCIENCE AND CONSERVATION. Our many successes include the return of protections \nL33: to the Tongass National Forest in Alaska and the suspension of oil and gas \nL34: leasing permits for the Arctic National Wildlife Refuge. Defenders also played a\nL35: lead role in successfully defending the Migratory Bird Protection Act from \nL36: attack and securing critical protections for migratory birds. Throughout 2021 \nL37: Defenders made important progress for the wildlife and wild places we all love, \nL38: yet our nation’s wildlife still face unprecedented challenges. More and more \nL39: species, including manatees, Mexican gray wolves, polar bears and North Atlantic\nL40: right whales, face the very real threat of extinction because of climate \nL41: change, habitat loss, pollution and inadequate protections. In our work we \nL42: continue to demonstrate success by following the science, building partnerships,\nL43: leading with innovative solutions and focusing on collaborative and practical \nL44: approaches in defense of nature. Together we can make a lasting difference for \nL45: wildlife and habitats. Your support is essential to our work and we couldn’t do \nL46: it without you. Thank you for being an important part of our efforts to protect \nL47: imperiled wildlife across our nation. Jamie Rappaport Clark President and CEO \nL48: John Dayton Board Chair OUR NEW TELEVISION SERIES, WILDLIFE NATION WITH JEFF \nL49: CORWIN, premiered in October and airs every weekend on ABC stations nationwide. \nL50: This groundbreaking series takes Jeff across the country to meet Defenders’ \nL51: experts along with our science, tribal and community partners to educate and \nL52: inspire viewers to join with Defenders to protect wildlife. While highlighting \nL53: some of our important conservation work and raising awareness of important ", + "tool_call_id": null + }, + { + "role": "assistant", + "content": "", + "reasoning_content": "Search within PDF for \"2.2 million members\".", + "tool_calls": [ + { + "id": null, + "type": "function", + "function": { + "name": "find", + "arguments": "{\"cursor\": 4, \"pattern\": \"2.2\"}" + } + } + ] + }, + { + "role": "tool", + "content": "[5] Find results for text: `2.2` in `www.annualreports.com` (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2021.pdf/find?pattern=2.2)\n**viewing lines [0 - 4] of 4**\n\nL0: # 【0†match at L187】\nL1: 2021, Defenders of Wildlife’s nearly 2.2 million members and supporters \nL2: contributed more than $49 million for wildlife and wildlife habitat. This figure\nL3: includes all those who generously donated their time and expertise. The \nL4: steadfast support of our donors allows Defenders to sustain our program and ", + "tool_call_id": null + }, + { + "role": "assistant", + "content": "", + "reasoning_content": "Open around L180.", + "tool_calls": [ + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 4, \"loc\": 170}" + } + } + ] + }, + { + "role": "tool", + "content": "[6] www.annualreports.com (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2021.pdf)\n**viewing lines [170 - 215] of 261**\n\nL170: into the wild. **10 DEFENDERS OF WILDLIFE** ===== Page 11 ===== AFTER ANOTHER \nL171: SUCCESSFUL DEFENDERS LAWSUIT ON BEHALF OF THE CRITICALLY ENDANGERED RED WOLF, \nL172: FWS reversed its decision to limit the recovery area and committed to a robust \nL173: release strategy. After years of inaction, FWS released eight wolves to the wild\nL174: in North Carolina and plan to release nine more wolves in the spring of 2022. \nL175: This is an incredible win for this critically endangered species whose \nL176: population has dwindled down to single digits in the wild because of \nL177: mismanagement, vehicle strikes and poaching. DEFENDERS CONTINUED TO LEAD EFFORTS\nL178: TO PROTECT THE FLORIDA MANATEE, a beloved species that suffered the deadliest \nL179: year on record in 2021, tragically surpassing 1,000 deaths because of water \nL180: pollution and lack of warm water habitat. Defenders led advocacy and education \nL181: aimed at restoring the natural flow of the dammed Ocklawaha River, which would \nL182: provide critical warm-water habitat that manatees need to survive. Defenders’ \nL183: legal team continued to fight for manatees in the courts, holding government \nL184: agencies accountable for protecting critical habitat and addressing the \nL185: devastating water pollution that is killing the seagrass and causing manatees to\nL186: starve. DAVID TES | SAM FRENZY DRAW DEFENDERS.ORG 11 ===== Page 12 ===== In \nL187: 2021, Defenders of Wildlife’s nearly 2.2 million members and supporters \nL188: contributed more than $49 million for wildlife and wildlife habitat. This figure\nL189: includes all those who generously donated their time and expertise. The \nL190: steadfast support of our donors allows Defenders to sustain our program and \nL191: public education efforts in the field, the courts and on Capitol Hill. 2021 \nL192: SOURCES OF FUNDS Grants and contributions $29,057 Bequests, trusts and split \nL193: interests $7,692 Income from investments, annuity reserve funds and trusts \nL194: $3,354 Royalties and other income $3,576 Contributed services $6,140 Total \nL195: Revenue $49,819 USES OF FUNDS Biodiversity conservation $22,420 Constituency \nL196: mobilization $16,324 Fundraising $1,211 Management and general $5,865 Total \nL197: Expenses $45,820 Change in net assets $3,999 Net assets, start of year $41,145 \nL198: Net Assets, End of the Year $45,144 Dollars are in thousands. 12 DEFENDERS OF \nL199: WILDLIFE Grants and contributions 58% Income from investments 7% Requests, \nL200: trusts and split interests 15% Royalties and other income 7% Contributed \nL201: services 12% Program and support services 97% Fundraising 3% ===== Page 13 =====\nL202: DEFENDERS.ORG 13 ===== Page 14 ===== John Dayton, Esq.* Chair Texas Sheryl \nL203: Sachs* Vice Chair Connecticut Whitfield Bailey* Secretary Tennessee Mark Caylor*\nL204: Treasurer District of Columbia Sylvia Baca* California Donald Barry, Esq. \nL205: Oregon Dinah Bear, Esq. Arizona Kim O’Keefe Beck, Ph.D California Gabriela \nL206: Chavarria, Ph.D Washington Jamie Rappaport Clark* President & CEO Virginia \nL207: Caroline D. Gabel Maryland Keith M. Harper, Esq. Maryland John Donovan, Esq. \nL208: Montana and California James M. Hecker, Esq. Virginia Judith Ponsikoff, Ph.D* \nL209: California Terry L. Root, Ph.D Florida BOARD EMERITUS Terry C. Pelster, Esq. New\nL210: York *Executive Committee member* ST A F F & A D V I S O R S SCIENCE ADVISORS \nL211: Gabriela Chavarria, Ph.D Burke Museum, University of Washington Leah Gerber, \nL212: Ph.D Arizona State University Kathayoon Khalil, Ph.D Oregon Zoo Rurik List, Ph.D\nL213: Universidad Autónoma Metropolitana-Lerma Barry Noon, Ph.D Colorado State \nL214: University Terry L. Root, Ph.D Stanford University Mark Schwartz, Ph.D \nL215: University of California-Davis David S. Wilcox, Ph.D Princeton University Rae ", + "tool_call_id": null + }, + { + "role": "assistant", + "content": "", + "reasoning_content": "Scrolling further for \"Contributed services 12%\". Already seen. So the annual report is Defenders of Wildlife 2021 annual report.\n\nNext clue: \"The following year, the annual report stated that the organization had played a role in rescinding a previous administration’s plan to open most of the 22 million acres of the National Petroleum Reserve in Alaska to oil and gas drilling and that contributed services had decreased by 2% to 10%.\"\n\nThus, the following year (2022) annual report of Defenders of Wildlife includes mention of rescinding plan to open most of 22 million acres of NPR-A to oil and gas drilling. Also contributed services decreased to 10% (from 12% in 2021). So we need to find the 2022 annual report for Defenders of Wildlife.\n\nSearch for \"Defenders of Wildlife 2022 annual report National Petroleum Reserve Alaska\".", + "tool_calls": [ + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"Defenders of Wildlife 2022 annual report National Petroleum Reserve Alaska\", \"topn\": 10, \"source\": \"web\"}" + } + } + ] + }, + { + "role": "tool", + "content": "[7] Defenders of Wildlife 2022 annual report National Petroleum Reserve Alaska (Search_Results/Defenders of Wildlife 2022 annual report National Petroleum Reserve Alaska)\n**viewing lines [0 - 37] of 37**\n\nL0: \nL1: URL: Search_Results/Defenders of Wildlife 2022 annual report National Petroleum \nL2: Reserve Alaska\nL3: # Search Results\nL4: \nL5: * 【0†CELEBRATING YEARS; publish_date: none†www.annualreports.com】 With less \nL6: than 340 right whales left in the wild, Defenders is fighting tirelessly to end \nL7: deadly lobster gear entanglements and vessel strikes that are driving ...\nL8: * 【1†Financials; publish_date: none†defenders.org】 We invite you to explore \nL9: the reports below to learn more about our activities and accomplishments, and \nL10: how we put your money to work for wildlife.\nL11: * 【2†Alaska Program Looks Back on 2022; publish_date: none†defenders.org】 Feb \nL12: 9, 2023 — Thanks to a lawsuit joined by Defenders, seven million acres were \nL13: returned to protection within the National Petroleum Reserve-Alaska (NPR-A), ...\nL14: * 【3†Defenders-of-Wildlife-2022-Financial-Statement. ...; publish_date: \nL15: none†defenders.org】 We have audited the accompanying consolidated financial \nL16: statements of Defenders of Wildlife and Affiliated Defenders of Wildlife Action \nL17: Fund (collectively, ...\nL18: * 【4†2022 Annual Report; publish_date: none†alaskaconservation.org】 Jun 13, \nL19: 2023 — In 2022, we focused on three landscapes: the Arctic. National Wildlife \nL20: Refuge, Bristol Bay, and the Tongass National Forest. In March 2022,.\nL21: * 【5†Assessment of ecological and cultural values within the ...; \nL22: publish_date: none†www.blm.gov】 This document was written to provide technical \nL23: information regarding the ecological importance of the National Petroleum \nL24: Reserve – Alaska (NPR-A). Several ...\nL25: * 【6†Accomplishments Report; publish_date: none†defenders.org】 National \nL26: Petroleum Reserve-Alaska. Identified for exceptional wildlife and cultural \nL27: values, including critical habitat for polar bears and other species ...\nL28: * 【7†2022 annual report; publish_date: none†dory-\nL29: plantain-s2zc.squarespace.com】 These projects are made possible through \nL30: collaborations with Defenders of. Wildlife, Cook Inletkeeper, Trustees for \nL31: Alaska,. Environmental Investigation Agency, ...\nL32: * 【8†23IMPACT REPORT; publish_date: none†www.annualreports.com】 Defenders of \nL33: Wildlife made incredible progress protecting wildlife and wild places in 2023, \nL34: helping shape a brighter future for imperiled species and vital ...\nL35: * 【9†From Leasing to Land Protections; publish_date: none†defenders-cci.org】 \nL36: Abstract. When you hear the name, National Petroleum Reserve - Alaska (NPR-A, or\nL37: Reserve), you might think of a barren land filled with oil wells and ...", + "tool_call_id": null + }, + { + "role": "assistant", + "content": "", + "reasoning_content": "Open result 0.", + "tool_calls": [ + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 7, \"id\": 0}" + } + } + ] + }, + { + "role": "tool", + "content": "[8] www.annualreports.com (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2022.pdf)\n**viewing lines [0 - 53] of 289**\n\nL0: \nL1: URL: https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-\nL2: wildlife_2022.pdf\nL3: ===== Page 1 ===== 2022 Impact Report C E L E B R A T I N G Y E A R S ===== Page\nL4: 2 ===== 2022 Defenders of Wildlife made important progress for imperiled \nL5: species and vital landscapes across the United States in 2022. GRAY WOLF | JIM \nL6: GUMMERAL MAY STOCK PRIOR Lawyers successfully challenged the previous \nL7: administration’s decision to delist the gray wolf and restored critical federal \nL8: protections under the Endangered Species Act. This latest triumph in court is \nL9: part of our ongoing battle to protect and restore gray wolves throughout their \nL10: historical range and shield them from persecution by extremist legislators in \nL11: Idaho, Montana and Wyoming. TWO MORE FATALIZED GRAY SWALLETS TO SEA TO SHARE \nL12: ALLIANCE Lobbyists worked around the clock to expand funding for wildlife \nL13: conservation in the FY2022 federal spending bill, which included $31 million (a \nL14: 44% increase) for the Bureau of Land Management’s Threatened and Endangered \nL15: Species Program, $2.5 million (an 81% increase) for the U.S. Department of \nL16: Agriculture Wildlife Services’ Nonlethal Initiative to prevent human-wildlife \nL17: conflicts and $21 million (a 320% increase) for North Atlantic right whale \nL18: conservation. 2 DEFENDERS OF WILDLIFE ===== Page 3 ===== **Policy Experts** \nL19: played a crucial role in securing international trade protections for 100 \nL20: species of sharks and rays, all 158 species of glass frogs and 73 species of \nL21: reptiles, including 21 species of desert horned lizards, at the Convention on \nL22: International Trade in Endangered Species (CITES) in Panama. \\--- **Field \nL23: Teams** worked tirelessly to protect and restore imperiled species across the \nL24: country. From Florida manatees and red wolves in the Southeast to belugas and \nL25: grizzly bears in Alaska, Defenders’ conservation experts were on the ground \nL26: saving species that need our help to survive and thrive. \\--- **Conservation \nL27: Innovation Experts** published more than 10 peer-reviewed studies on topics that\nL28: include the Cook Inlet beluga whale, golden-cheeked warbler, global parrot \nL29: biodiversity, the Endangered Species Act, the effects of mountaintop removal \nL30: mining on endangered species, the ecological importance of panthers and the \nL31: implementation of “30x30” – the globally recognized goal to which President \nL32: Biden committed the U.S. to conserve 30% of our imperiled lands and waters by \nL33: 2030. \\--- **DEFENDERS.ORG** ===== Page 4 ===== THANK YOU Defenders celebrated \nL34: our 75th anniversary in 2022—an exciting milestone that we attribute to our \nL35: unwavering dedication to our wildlife conservation mission. From helping to pass\nL36: the Endangered Species Act and other bedrock conservation laws to leading the \nL37: advocacy on the reintroduction of wolves to Yellowstone National Park and other \nL38: parts of the West, Defenders is proud to inspire, lead and ultimately celebrate \nL39: some of the most consequential conservation wins for wildlife and wild places. \nL40: It is wonderfully fitting that we also mark 2022 as a year when we successfully \nL41: defended, yet again, the original focal species and symbol of our organization, \nL42: the gray wolf. While the future for gray wolves and many other species remains \nL43: uncertain, we will never stop fighting to protect the imperiled wildlife that \nL44: need us and to restore vibrant and balanced ecosystems. Our mission is more \nL45: important than ever due to the escalating biodiversity and climate crises. \nL46: Defenders is determined to build on our accomplishments and seize new \nL47: opportunities to address these challenges through our multifaceted approach to \nL48: conservation. We focus on developing innovative and science-based solutions, \nL49: coordinating on-the-ground species and habitat restoration, promoting human-\nL50: wildlife coexistence, protecting and strengthening our bedrock environmental \nL51: laws, investing in impactful collaborations and partnerships, building a more \nL52: diverse and powerful conservation movement and challenging attacks on wildlife \nL53: through advocacy, policy and litigation. Defenders’ continued success is made ", + "tool_call_id": null + }, + { + "role": "assistant", + "content": "", + "reasoning_content": "Search within PDF for \"National Petroleum Reserve\".", + "tool_calls": [ + { + "id": null, + "type": "function", + "function": { + "name": "find", + "arguments": "{\"cursor\": 8, \"pattern\": \"Petroleum\"}" + } + } + ] + }, + { + "role": "tool", + "content": "[9] Find results for text: `petroleum` in `www.annualreports.com` (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2022.pdf/find?pattern=petroleum)\n**viewing lines [0 - 4] of 4**\n\nL0: # 【0†match at L201】\nL1: the 22 million acres of the National Petroleum Reserve in Alaska to oil and gas \nL2: drilling and removed protections for important wildlife habitat. Protections \nL3: have now been restored for nearly half of the reserve’s pristine lands, which \nL4: are vital habitat for shorebirds, denning polar bears and tens of thousands of ", + "tool_call_id": null + }, + { + "role": "assistant", + "content": "", + "reasoning_content": "Open around L190.", + "tool_calls": [ + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 8, \"loc\": 180}" + } + } + ] + }, + { + "role": "tool", + "content": "[10] www.annualreports.com (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2022.pdf)\n**viewing lines [180 - 227] of 289**\n\nL180: the sixth successful transfer of bison to the Assiniboine and Sioux Tribes of \nL181: Fort Peck since 2019. \\--- **SWIFT FIX KITS | © ROSIMA PAELARINTSKIMMA MADDIAL \nL182: 200 AND CONSERVATION BIOLOGY INSTITUTE** \\--- **Celebrating our third year** of \nL183: a collaborative program with the Aaniih and Nakoda Tribes and others to restore \nL184: swift foxes to the Fort Belknap Indian Reservation in Montana, Defenders helped \nL185: with the release of 28 more swift foxes. With over 100 foxes reintroduced \nL186: through this program, monitoring efforts show that they are reproducing in the \nL187: wild—a critical measure of success for a self-sustaining population. \\--- \nL188: **Defenders continued to lead the way** for conserving and recovering the \nL189: endangered black-footed ferret, supporting the black-footed ferret survey for \nL190: the Fort Belknap Indian community. Thirty-six ferrets were vaccinated against \nL191: sylvatic plague and two dozen kits were released in the wild. \\--- **10 \nL192: DEFENDERS OF WILDLIFE** ===== Page 11 ===== Defenders helped to bring hope for \nL193: recovery for the endangered military macaw, adding 11 fledglings to a growing \nL194: wild population in Puerta Vallarta, Mexico, that is under pressure from habitat \nL195: loss and poachers for the illegal pet trade. Accord- ing to our recent report, \nL196: the 2008 parrot trade ban that Defenders fought to achieve is working. \nL197: Preventing more than 30,000 parrots from being illegally trapped each year, the \nL198: trade ban has resulted in a 47% decrease in the illegal trade of parrots and an \nL199: 88% decrease in U.S. seizures of Mexican parrots. As a result of a Defenders \nL200: lawsuit, BLM rescinded the previous administration’s plan that opened most of \nL201: the 22 million acres of the National Petroleum Reserve in Alaska to oil and gas \nL202: drilling and removed protections for important wildlife habitat. Protections \nL203: have now been restored for nearly half of the reserve’s pristine lands, which \nL204: are vital habitat for shorebirds, denning polar bears and tens of thousands of \nL205: caribou in the Teshekpuk caribou herd. Our new storymap illustrates why the \nL206: reserve is so important to wildlife. Defenders marked the 20th anniversary of \nL207: our Sea Otter Awareness Week. In celebration of this milestone, we sponsored \nL208: state legislation, signed by Gov. Gavin Newsom, that formally recognizes the \nL209: event’s anniversary and acknowledges the vital role that sea otters play in \nL210: California’s nearshore ecosystems. This annual celebration provides a unique \nL211: opportunity to raise awareness of the important role sea otters play in \nL212: maintaining ecosystem health, our need to coexist with sea otters and the \nL213: efforts of Defenders and others in advancing sea otter conservation. \nL214: DEFENDERS.ORG ===== Page 12 ===== FINANCIAL REPORT In 2022, Defenders of \nL215: Wildlife’s nearly 2.2 million members and supporters contributed more than $41 \nL216: million for wildlife and wildlife habitat. This figure includes all those who \nL217: generously donated their time and expertise. The steadfast support of our donors\nL218: allows Defenders to sustain our program and public education efforts in the \nL219: field, the courts and on Capitol Hill. 2022 SOURCES OF FUNDS Grants and \nL220: contributions $28,946 Bequests, trusts and split interests $13,928 Income from \nL221: investments, annuity reserve funds and trusts (-$7,495) Royalties and other \nL222: income $744 Contributed services $5,047 Total Revenue $41,170 USES OF FUNDS \nL223: Biodiversity conservation $18,970 Constituency mobilization $16,528 Fundraising \nL224: $1,029 Management and general $6,354 Total Expenses $42,881 Change in net assets\nL225: (-$1,711) Net assets, start of year $45,144 Net Assets, End of the Year $43,433\nL226: Dollars are in thousands. 12 DEFENDERS OF WILDLIFE Grants and contributions 59%\nL227: Contributed services 10% Royalties and other income 2% Bequests, trusts and ", + "tool_call_id": null + }, + { + "role": "assistant", + "content": "", + "reasoning_content": "Scrolling a bit more for \"Contributed services 10%\". Already seen. So indeed the 2022 annual report shows contributed services 10%, down from 12% in 2021.\n\nThus the organization is Defenders of Wildlife.\n\nThe question: \"Who was the emeritus board member that year? Name only, no titles.\"\n\nWe need to identify the emeritus board member listed in the 2022 annual report. From the 2021 annual report, we saw \"BOARD EMERITUS Terry C. Pelster, Esq.\" However, that is for 2021. The question asks \"Who was the emeritus board member that year?\" That year refers to the following year (2022). So we need to check the 2022 annual report for the emeritus board member.\n\nOpen the 2022 PDF near the board list. Search for \"Emeritus\".", + "tool_calls": [ + { + "id": null, + "type": "function", + "function": { + "name": "find", + "arguments": "{\"cursor\": 8, \"pattern\": \"Emeritus\"}" + } + } + ] + }, + { + "role": "tool", + "content": "[11] Find results for text: `emeritus` in `www.annualreports.com` (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2022.pdf/find?pattern=emeritus)\n**viewing lines [0 - 10] of 10**\n\nL0: # 【0†match at L237】\nL1: Sisk, Ph.D British Columbia, Canada BOARD EMERITUS Terry C. Pelster, Esq. New \nL2: York *Executive Committee member* STAFF & ADVISORS SCIENCE ADVISORS Gabriela \nL3: Chavarria, Ph.D Burke Museum, University of Washington Leah Gerber, Ph.D Arizona\nL4: State University Kathayoon Khalil, Ph.D Oregon Zoo Rurik List, Ph.D Universidad\nL5: \nL6: # 【1†match at L243】\nL7: Davis Thomas D. Sisk, Ph.D Emeritus Professor Northern Arizona University David \nL8: S. Wilcox, Ph.D Princeton University Rae Wynn-Grant, Ph.D National Geographic \nL9: Society SENIOR STAFF Jamie Rappaport Clark President & CEO Thu Pham Chief of \nL10: Staff James Stofan Senior Vice President, Operations McCrystle Adams Vice ", + "tool_call_id": null + }, + { + "role": "assistant", + "content": "", + "reasoning_content": "Open around L230.", + "tool_calls": [ + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 8, \"loc\": 220}" + } + } + ] + }, + { + "role": "tool", + "content": "[12] www.annualreports.com (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2022.pdf)\n**viewing lines [220 - 267] of 289**\n\nL220: contributions $28,946 Bequests, trusts and split interests $13,928 Income from \nL221: investments, annuity reserve funds and trusts (-$7,495) Royalties and other \nL222: income $744 Contributed services $5,047 Total Revenue $41,170 USES OF FUNDS \nL223: Biodiversity conservation $18,970 Constituency mobilization $16,528 Fundraising \nL224: $1,029 Management and general $6,354 Total Expenses $42,881 Change in net assets\nL225: (-$1,711) Net assets, start of year $45,144 Net Assets, End of the Year $43,433\nL226: Dollars are in thousands. 12 DEFENDERS OF WILDLIFE Grants and contributions 59%\nL227: Contributed services 10% Royalties and other income 2% Bequests, trusts and \nL228: split interests 29% TOTAL REVENUE Income from investments -15% Program and \nL229: support services 98% USE OF FUNDS Fundraising 2% ===== Page 13 ===== \nL230: DEFENDERS.ORG 13 ===== Page 14 ===== BOARD OF DIRECTORS John Dayton, Esq.* Chair\nL231: Texas Sheryl Sachs* Vice Chair Connecticut Dinah Bear, Esq.* Secretary Arizona \nL232: Mark Caylor* Treasurer District of Columbia Susan Alexander Connecticut Sylvia \nL233: Baca* California Whitefield Bailey Tennessee Donald Barry, Esq. Oregon Kim \nL234: O’Keefe Beck, Ph.D California Gabriela Chavarria, Ph.D Washington Jamie \nL235: Rappaport Clark* President & CEO Virginia John Donovan, Esq.* Montana and \nL236: California Caroline D. Gabel Maryland Terry L. Root, Ph.D New Mexico Thomas D. \nL237: Sisk, Ph.D British Columbia, Canada BOARD EMERITUS Terry C. Pelster, Esq. New \nL238: York *Executive Committee member* STAFF & ADVISORS SCIENCE ADVISORS Gabriela \nL239: Chavarria, Ph.D Burke Museum, University of Washington Leah Gerber, Ph.D Arizona\nL240: State University Kathayoon Khalil, Ph.D Oregon Zoo Rurik List, Ph.D Universidad\nL241: Autónoma Metropolitana-Lerma Barry Noon, Ph.D Colorado State University Terry \nL242: L. Root, Ph.D Stanford University Mark Schwartz, Ph.D University of California-\nL243: Davis Thomas D. Sisk, Ph.D Emeritus Professor Northern Arizona University David \nL244: S. Wilcox, Ph.D Princeton University Rae Wynn-Grant, Ph.D National Geographic \nL245: Society SENIOR STAFF Jamie Rappaport Clark President & CEO Thu Pham Chief of \nL246: Staff James Stofan Senior Vice President, Operations McCrystle Adams Vice \nL247: President, Conservation Law Matt Braughler Vice President, Philanthropy Rachel \nL248: Brittin Vice President, Communications Shawn Cantrell Vice President, Field \nL249: Conservation Programs Robert L. Dewey Vice President, Government Relations & \nL250: External Affairs Monica Goldberg Vice President, Landscape Conservation Sharon \nL251: Morris-Smith Vice President of Human Resources Elizabeth Nichols Vice President,\nL252: Finance/CFO Anthony Pegues Vice President, Diversity, Equity, Inclusion, \nL253: Justice Lindsay Rosa, Ph.D Vice President, Conservation Research and Innovation \nL254: Brandace Swimley Vice President, Integrated Marketing 14 DEFENDERS OF WILDLIFE \nL255: ===== Page 15 ===== There are many ways to support Defenders of Wildlife and \nL256: protect our imperiled wildlife. To learn more or donate, contact us at \nL257: 1-800-385-9712, visit Defenders.org/donate or choose one of the following \nL258: options. ➤ Transfer appreciated assets directly to Defenders and maximize \nL259: potential tax benefits. Defenders.org/ donate-stocks-help-save-wildlife ➤ Become\nL260: a Leadership Council member with a single donation of $1,000 or more and enjoy \nL261: more personal involvement with Defenders. Defenders.org/leadershipcouncil ➤ If \nL262: you are 70.5 or older, roll over a portion of your IRA to benefit wildlife. \nL263: Remember, these gifts must come directly from your IRA account. \nL264: Defenders.org/planned-giving/gifts-your-retirement-plan ➤ Get started on a \nL265: contribution from your donor-advised fund Defenders.org/donor-advised-funds ➤ \nL266: Check with your personnel office or that of your spouse/partner about your \nL267: company’s Corporate Matching Gifts program or visit us at ", + "tool_call_id": null + }, + { + "role": "assistant", + "content": "", + "reasoning_content": "Thus the emeritus board member is Terry C. Pelster. However, the question might ask for \"Who was the emeritus board member that year?\" Possibly referring to the year of the annual report that mentioned the rescinded plan and contributed services decrease. That is 2022. So the answer is Terry C. Pelster.\n\nBut let's verify that the 2022 annual report indeed includes the mention of contributed services decreasing to 10%. We saw that. So answer is Terry C. Pelster.\n\nBut maybe the question is tricky: \"Who was the emeritus board member that year?\" Could be the emeritus board member listed in the 2021 annual report? However, the question says \"That same year, a company's annual report revealed that its nearly 2.2 million members and supporters contributed over $49 million, with 12% of total revenue coming from contributed services. The following year, the annual report stated that the organization had played a role in rescinding a previous administration’s plan to open most of the 22 million acres of the National Petroleum Reserve in Alaska to oil and gas drilling and that contributed services had decreased by 2% to 10%.\"\n\nThus the \"following year\" annual report is 2022. The question likely asks: \"Who was the emeritus board member that year?\" Meaning the emeritus board member listed in that annual report (2022). So answer: Terry C. Pelster.\n\nBut let's confirm that the 2021 annual report also shows emeritus board member Terry C. Pelster. Indeed we saw that. But perhaps the 2022 report might have a different emeritus board member. We saw it's the same. So answer is Terry C. Pelster.\n\nHowever, the question might be about a different organization. Let's verify the details.\n\nFirst clue: Press release about Kenya vaccine doses on Dec 5, 2021. That is likely a World Bank press release. Search for \"World Bank Kenya vaccine doses December 2021 press release\".", + "tool_calls": [ + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"As of December 5, 2021, Kenya had received a total of 16,201,670 vaccines\", \"topn\": 10, \"source\": \"web\"}" + } + } + ] + }, + { + "role": "tool", + "content": "[13] As of December 5, 2021, Kenya had received a total of 16,201,670 vaccines (Search_Results/As of December 5, 2021, Kenya had received a total of 16,201,670 vaccines)\n**viewing lines [0 - 41] of 41**\n\nL0: \nL1: URL: Search_Results/As of December 5, 2021, Kenya had received a total of \nL2: 16,201,670 vaccines\nL3: # Search Results\nL4: \nL5: * 【0†Kenya's Economy is Showing Resilience as Output Rises ...; publish_date: \nL6: none†www.worldbank.org】 Dec 14, 2021 — As of December 5, 2021, Kenya had \nL7: received a total of 16,201,670 vaccines, with 7,583,134 administered. While \nL8: vaccine acceptance is ...\nL9: * 【1†Unmet need for COVID-19 vaccination coverage in Kenya - PMC; \nL10: publish_date: none†pmc.ncbi.nlm.nih.gov】 by SK Muchiri · 2022 · Cited by 42 — As\nL11: of December 2021, six counties had a vaccination coverage of less than 5%. \nL12: These counties include Garissa, Mandera, Marsabit, Tana River, Turkana, and ...\nL13: * 【2†MINISTRY OF HEALTH; publish_date: none†covidhub.mediacouncil.or.ke】 Dec \nL14: 1, 2021 — • Total Covid-19 Vaccines Received to date- 16,201,670 ... Table 10: \nL15: Vaccine Logistics Received in the Country as at 5th, December 2021.\nL16: * 【3†COVID-19 vaccination refusal trends in Kenya over 2021 - PMC; \nL17: publish_date: none†pmc.ncbi.nlm.nih.gov】 by RT Rego · 2023 · Cited by 21 — We \nL18: assessed vaccine refusal over time in Kenya, and characterized factors \nL19: associated with changes in vaccination refusal.\nL20: * 【4†Ciheb-Kenya on the Front Lines of the COVID-19 Vaccine ...; publish_date:\nL21: none†www.ciheb.org】 Of the 721,509 persons vaccinated, 409,711 are aged 58 \nL22: years and above, 143,050 are healthcare workers, 109,593 are teachers, and \nL23: 59,155 are security officers.\nL24: * 【5†proportion of fully vaccinated adults 23.3pc.; publish_date: \nL25: none†www.facebook.com】 COVID-19: KENYA administers 150632 new vaccinations; \nL26: total jabs given now 13782963; proportion of fully vaccinated adults 23.3pc.\nL27: * 【6†National COVID-19 Vaccines Deployment and ...; publish_date: \nL28: none†www.parliament.go.ke】 Kenya has a well-established immunization program and\nL29: a robust disease surveillance and response unit (DSRU) which serves as an early\nL30: warning system, to.\nL31: * 【7†Kenya: Vaccine Requirements Violate Rights; publish_date: \nL32: none†www.hrw.org】 Dec 13, 2021 — Kenya, with an estimated adult population of \nL33: 27.2 million and a total population of 55 million, has received approximately 23\nL34: million doses as ...\nL35: * 【8†Government gives 20 million a month to get vaccinated; publish_date: \nL36: none†www.bbc.com】 Nov 22, 2021 — Although less than 10% of Kenyans are \nL37: vaccinated, the government wants to avoid a surge over Christmas.\nL38: * 【9†ministry of health kenya covid-19 immunization status ...; publish_date: \nL39: none†www.health.go.ke】 May 15, 2022 — By the end of 2021 Kenya had administered \nL40: 10.1 million doses of 5 types of Covid-19 vaccines. During this period 4.2 \nL41: million people were ...", + "tool_call_id": null + }, + { + "role": "assistant", + "content": "", + "reasoning_content": "Open result 0.", + "tool_calls": [ + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 13, \"id\": 0}" + } + } + ] + }, + { + "role": "tool", + "content": "[14] www.worldbank.org (https://www.worldbank.org/en/news/press-release/2021/12/14/kenya-s-economy-is-showing-resilience-as-output-rises-above-pre-pandemic-levels-driven-by-a-rebound-in-the-services-sect)\n**viewing lines [0 - 52] of 61**\n\nL0: \nL1: URL: https://www.worldbank.org/en/news/press-release/2021/12/14/kenya-s-economy-\nL2: is-showing-resilience-as-output-rises-above-pre-pandemic-levels-driven-by-a-\nL3: rebound-in-the-services-sect\nL4: NAIROBI, December 14, 2021 — Kenya’s economy has demonstrated resilience to the \nL5: COVID-19 shock, with output in the first half of the year rising above pre-\nL6: pandemic levels. In 2021 as a whole, gross domestic product (GDP) is expected to\nL7: grow by 5%, one of the faster recoveries among Sub-Saharan African countries. \nL8: Overall economic performance is expected to be robust at 4.9% per year in \nL9: 2022-23, similar to the pre-pandemic pace (5% average annual growth from 2010 to\nL10: 2019). According to the 24th edition of the Kenya Economic Update, “From \nL11: Recovery to Better Jobs,” growth has been supported by rebounds in industry and,\nL12: especially, services. Agricultural output, however, fell by 0.5% year on year \nL13: in the first half of 2021 following a particularly strong performance in 2020, \nL14: partly due to below-average rains. Demand-side recovery has been supported by a \nL15: revival in private consumption, against a backdrop of improving employment \nL16: conditions and household incomes. “Kenya’s economy has shown considerable \nL17: resilience to the enormous shock of the pandemic, and this year is expected to \nL18: post one of the stronger growth rebounds in the region thanks to diversified \nL19: sources of growth and sound economic policies and management,” said Keith \nL20: Hansen, World Bank Country Director for Kenya. “However, poverty has increased, \nL21: and the buffers and coping mechanisms of households, firms, and the public \nL22: finances have been depleted.” Economic activity in Kenya has continued to adapt \nL23: to the pandemic and associated restrictions. A mix of containment measures, such\nL24: as a nightly curfew, were in effect through most of 2021, while more \nL25: economically disruptive measures such as lockdowns and travel restrictions were \nL26: phased, limiting the impact on economic activities. The vaccine rollout, which \nL27: had a slow start due to supply constraints, has picked up as new shipments of \nL28: vaccines have arrived, particularly since September. This has supported economic\nL29: recovery and growth through the third quarter of 2021. As of December 5, 2021, \nL30: Kenya had received a total of 16,201,670 vaccines, with 7,583,134 administered. \nL31: While vaccine acceptance is reportedly high there is still a long way to go \nL32: towards the government’s target of fully inoculating the adult population of \nL33: about 30 million by the end of 2022.As of December 6, 2021, about 10% of adults \nL34: (2.9 million people) had been fully vaccinated while another 16% (4.9 million \nL35: people) had received their first dose. The report also projects robust growth in\nL36: the medium-term. This outlook takes into account that some sub-sectors, such as\nL37: education, have bounced back strongly, but others such as international \nL38: tourism, have only partially and face a much more protracted recovery. The \nL39: anticipated further recovery of hotels and restaurants, trade, transport, and \nL40: other services, depends on substantial vaccination progress to help prevent new \nL41: waves of infections and associated containment measures. “Recent economic \nL42: performance has been strong and the outlook is positive, but in Kenya, as \nL43: everywhere, the future course of the pandemic remains a major source of \nL44: uncertainty, said Alex Sienaert, Senior Economist for Kenya. “To support further\nL45: recovery, reduce debt distress risks, and rebuild space for social and \nL46: development spending, it is critical that Kenya continues to implement medium-\nL47: term fiscal consolidation plans.” In addition to pandemic-related risks to the \nL48: outlook, a second key domestic risk factor stems from the drought conditions \nL49: which are affecting parts of the country and already causing severe hardship. \nL50: Should the drought intensify or spread, this would weigh on the near-term \nL51: economic outlook. Weaker global growth, higher-than-anticipated energy prices, \nL52: and tighter external financing conditions are the primary external risks. As ", + "tool_call_id": null + }, + { + "role": "assistant", + "content": "", + "reasoning_content": "Thus first clue confirmed.\n\nSecond clue: Defenders of Wildlife 2021 annual report. Confirmed.\n\nThird clue: Defenders of Wildlife 2022 annual report. Confirmed.\n\nThus the answer is Terry C. Pelster.\n\nBut let's verify that the 2022 annual report indeed shows \"Contributed services 10%\". We saw that. Also confirm that 2021 report shows 12%. We saw that.\n\nThus the question: \"Who was the emeritus board member that year?\" Refers to 2022. Answer: Terry C. Pelster.\n\nHowever, the question might be asking for the emeritus board member in the 2021 report? Let's read the question carefully:\n\n\"In a year before 2023, between December 1 and 15 (inclusive), a press release reported that as of the 5th of that month, a country had received a total of 16,201,670 vaccine doses, with 7,583,134 administered. That same year, a company's annual report revealed that its nearly 2.2 million members and supporters contributed over $49 million, with 12% of total revenue coming from contributed services. The following year, the annual report stated that the organization had played a role in rescinding a previous administration’s plan to open most of the 22 million acres of the National Petroleum Reserve in Alaska to oil and gas drilling and that contributed services had decreased by 2% to 10%. Who was the emeritus board member that year? Name only, no titles.\"\n\nThe phrase \"that year\" could refer to \"the following year\" i.e., the year of the annual report that mentioned the rescinded plan. Or could refer to \"that same year\" (the year of the press release and the first annual report). However, the phrase \"that year\" appears at the end: \"Who was the emeritus board member that year?\" It likely refers to the year of the annual report that is being discussed in the preceding sentence. The preceding sentence talks about the following year's annual report. So indeed \"that year\" is the following year (2022).\n\nThus answer: Terry C. Pelster.\n\nBut to be thorough, we should check the 2022 annual report for any other emeritus board members. The search only showed \"BOARD EMERITUS Terry C. Pelster, Esq.\" Maybe there are multiple emeritus board members. But likely only one.\n\nCheck the 2021 annual report for emeritus board member. It also shows Terry C. Pelster. So maybe the question is asking for the emeritus board member in the 2021 report. But why would they ask that? The question is about connecting the clues to identify the organization and then find the emeritus board member. The clues point to Defenders of Wildlife. The question likely asks for the emeritus board member as of the 2022 report.\n\nNevertheless, we should confirm that the 2022 report is indeed the one that mentions the rescinded plan. We saw that. Also confirm that contributed services decreased from 12% to 10%. We saw that.\n\nThus answer is Terry C. Pelster.\n\nBut let's search for \"Defenders of Wildlife emeritus board member 2022 Terry Pelster\".", + "tool_calls": [ + { + "id": null, + "type": "function", + "function": { + "name": "search", + "arguments": "{\"query\": \"Terry C. Pelster Defenders of Wildlife emeritus board member\", \"topn\": 10, \"source\": \"web\"}" + } + } + ] + }, + { + "role": "tool", + "content": "[15] Terry C. Pelster Defenders of Wildlife emeritus board member (Search_Results/Terry C. Pelster Defenders of Wildlife emeritus board member)\n**viewing lines [0 - 31] of 31**\n\nL0: \nL1: URL: Search_Results/Terry C. Pelster Defenders of Wildlife emeritus board member\nL2: # Search Results\nL3: \nL4: * 【0†2024 impact report; publish_date: none†defenders.org】 Terry Root, Ph.D. \nL5: New Mexico. Thomas D. Sisk, Ph.D. British Columbia, Canada. BOARD EMERITUS. \nL6: Terry C. Pelster, Esq. New York. SCIENCE ADVISORS. Leah Gerber, Ph ...\nL7: * 【1†Defenders of Wildlife; publish_date: none†www.wrongkindofgreen.org】 \nL8: Directors ; Victor M. Sher*, Chair – California ; Terry C. Pelster*, Vice Chair \nL9: – New York ; Richard Kopcho*, Treasurer – California ; Adelaide P. Gomer*, \nL10: Secretary ...\nL11: * 【2†Exhibit 12; publish_date: none†www.uschamber.com】 DECLARATION OF TERRY \nL12: PELSTER. I, Terry C. Pelster, declare as follows: 1. I am a current member of \nL13: Defenders of Wildlife (“Defenders”) and have been a member.\nL14: * 【3†2020 ANNUAL REPORT; publish_date: none†defenders.org】 Terry L. Root. \nL15: Florida. BOARD. EMERITUS. Terry C. Pelster. New York. Alan Steinberg. Florida. \nL16: SCIENCE. ADVISORS. Gabriela Chavarria,. Ph.D. Denver Museum of.\nL17: * 【4†2019 Annual Report; publish_date: none†defenders.org】 BOARD EMERITUS. \nL18: Terry C. Pelster. New York. Alan Steinberg. Florida. SCIENCE ADVISORS. Leah \nL19: Gerber, Ph.D. Arizona State University. Kathayoon Khalil, Ph.D.\nL20: * 【5†annual report; publish_date: none†defenders.org】 * Executive Committee \nL21: member. BoARD EMERITuS. Terry C. Pelster. New York. Alan Steinberg. Florida. \nL22: PRESIDENT. EMERITuS. Rodger Schlickeisen. Virginia. SCIENCE ...\nL23: * 【6†BEING THERE FOR BELUGAS; publish_date: none†wendeenicole.com】 Mar 2, 2023\nL24: — Defenders of Wildlife is a national, nonprofit member- ship ... Terry C. \nL25: Pelster, Esq. New York. *Executive Committee member outfront.\nL26: * 【7†Annual Reports; publish_date: none†www.annualreports.com】 WAYS TO GIVE \nL27: There are many ways to support Defenders of Wildlife and protect our imperiled \nL28: wildlife. ... Terry C. Pelster. New York. Alan Steinberg. Florida.\nL29: * 【8†2014 Annual Report; publish_date: none†defenders.org】 BOARD OF DIRECTORS \nL30: defenders of wildlife is a national, nonprofit ... BOARD EMERITUS. Terry C. \nL31: Pelster. New York. Alan Steinberg. Florida. Page 3. PO.", + "tool_call_id": null + }, + { + "role": "assistant", + "content": "", + "reasoning_content": "Thus Terry C. Pelster is indeed the emeritus board member.\n\nNow the question might want just \"Terry C. Pelster\". Or maybe \"Terry Pelster\". Or \"Terry C. Pelster, Esq.\" but instructions say \"Name only, no titles.\" So likely \"Terry C. Pelster\". However, maybe they want \"Terry Pelster\". Better to include full name as listed: \"Terry C. Pelster\".\n\nLet's read the question again: \"Who was the emeritus board member that year? Name only, no titles.\" So we should answer just the name, no \"Esq.\" or \"Board Emeritus\". So \"Terry C. Pelster\". Or maybe \"Terry Pelster\". But the document shows \"Terry C. Pelster, Esq.\" So the name is \"Terry C. Pelster\". \n\nBetter to verify that the 2022 annual report indeed lists \"BOARD EMERITUS Terry C. Pelster, Esq.\" We saw that line.\n\nThus answer: Terry C. Pelster.\n\nBut let's double-check that the question might refer to the 2021 annual report. The phrase \"that year\" could be ambiguous. Let's parse the timeline:\n\n- Year 1: 2021. Press release in Dec 2021 about Kenya vaccine doses. Same year (2021) annual report of Defenders of Wildlife shows 12% contributed services.\n\n- Year 2: 2022. Annual report mentions rescinded plan and contributed services decreased to 10%.\n\nThe question says \"The following year, the annual report stated that the organization had played a role in rescinding a previous administration’s plan ... and that contributed services had decreased by 2% to 10%.\" This is year 2.\n\nThen asks \"Who was the emeritus board member that year?\" That likely refers to year 2.\n\nThus answer: Terry C. Pelster.\n\nHowever, perhaps the emeritus board member changed between 2021 and 2022. We need to check 2022 annual report for any other emeritus board member. We saw only Terry C. Pelster. Also check 2021 annual report: same. So likely unchanged.\n\nThus answer is Terry C. Pelster.\n\nNevertheless, we should confirm that the 2022 annual report is indeed the one that includes the phrase \"BLM rescinded the previous administration’s plan that opened most of the 22 million acres of the National Petroleum Reserve in Alaska to oil and gas drilling\". We saw that in line 200-202.\n\nThus answer is Terry C. Pelster.\n\nNow we need to provide the answer in the final message. Only the name.\n\nThus final answer: Terry C. Pelster.\n\nBut let's be thorough and search for \"Defenders of Wildlife 2022 annual report contributed services 10%\".", + "tool_calls": [ + { + "id": null, + "type": "function", + "function": { + "name": "find", + "arguments": "{\"cursor\": 8, \"pattern\": \"Contributed services\"}" + } + } + ] + }, + { + "role": "tool", + "content": "[16] Find results for text: `contributed services` in `www.annualreports.com` (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2022.pdf/find?pattern=contributed services)\n**viewing lines [0 - 10] of 10**\n\nL0: # 【0†match at L222】\nL1: income $744 Contributed services $5,047 Total Revenue $41,170 USES OF FUNDS \nL2: Biodiversity conservation $18,970 Constituency mobilization $16,528 Fundraising \nL3: $1,029 Management and general $6,354 Total Expenses $42,881 Change in net assets\nL4: (-$1,711) Net assets, start of year $45,144 Net Assets, End of the Year $43,433\nL5: \nL6: # 【1†match at L227】\nL7: Contributed services 10% Royalties and other income 2% Bequests, trusts and \nL8: split interests 29% TOTAL REVENUE Income from investments -15% Program and \nL9: support services 98% USE OF FUNDS Fundraising 2% ===== Page 13 ===== \nL10: DEFENDERS.ORG 13 ===== Page 14 ===== BOARD OF DIRECTORS John Dayton, Esq.* Chair", + "tool_call_id": null + }, + { + "role": "assistant", + "content": "", + "reasoning_content": "Thus confirmed.\n\nNow we should verify that the 2021 annual report shows contributed services 12%.", + "tool_calls": [ + { + "id": null, + "type": "function", + "function": { + "name": "find", + "arguments": "{\"cursor\": 4, \"pattern\": \"Contributed services\"}" + } + } + ] + }, + { + "role": "tool", + "content": "[17] Find results for text: `contributed services` in `www.annualreports.com` (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2021.pdf/find?pattern=contributed services)\n**viewing lines [0 - 4] of 4**\n\nL0: # 【0†match at L194】\nL1: $3,354 Royalties and other income $3,576 Contributed services $6,140 Total \nL2: Revenue $49,819 USES OF FUNDS Biodiversity conservation $22,420 Constituency \nL3: mobilization $16,324 Fundraising $1,211 Management and general $5,865 Total \nL4: Expenses $45,820 Change in net assets $3,999 Net assets, start of year $41,145 ", + "tool_call_id": null + }, + { + "role": "assistant", + "content": "", + "reasoning_content": "Open around L190-200 to see percentage.", + "tool_calls": [ + { + "id": null, + "type": "function", + "function": { + "name": "open", + "arguments": "{\"cursor\": 4, \"loc\": 185}" + } + } + ] + }, + { + "role": "tool", + "content": "[18] www.annualreports.com (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2021.pdf)\n**viewing lines [185 - 232] of 261**\n\nL185: devastating water pollution that is killing the seagrass and causing manatees to\nL186: starve. DAVID TES | SAM FRENZY DRAW DEFENDERS.ORG 11 ===== Page 12 ===== In \nL187: 2021, Defenders of Wildlife’s nearly 2.2 million members and supporters \nL188: contributed more than $49 million for wildlife and wildlife habitat. This figure\nL189: includes all those who generously donated their time and expertise. The \nL190: steadfast support of our donors allows Defenders to sustain our program and \nL191: public education efforts in the field, the courts and on Capitol Hill. 2021 \nL192: SOURCES OF FUNDS Grants and contributions $29,057 Bequests, trusts and split \nL193: interests $7,692 Income from investments, annuity reserve funds and trusts \nL194: $3,354 Royalties and other income $3,576 Contributed services $6,140 Total \nL195: Revenue $49,819 USES OF FUNDS Biodiversity conservation $22,420 Constituency \nL196: mobilization $16,324 Fundraising $1,211 Management and general $5,865 Total \nL197: Expenses $45,820 Change in net assets $3,999 Net assets, start of year $41,145 \nL198: Net Assets, End of the Year $45,144 Dollars are in thousands. 12 DEFENDERS OF \nL199: WILDLIFE Grants and contributions 58% Income from investments 7% Requests, \nL200: trusts and split interests 15% Royalties and other income 7% Contributed \nL201: services 12% Program and support services 97% Fundraising 3% ===== Page 13 =====\nL202: DEFENDERS.ORG 13 ===== Page 14 ===== John Dayton, Esq.* Chair Texas Sheryl \nL203: Sachs* Vice Chair Connecticut Whitfield Bailey* Secretary Tennessee Mark Caylor*\nL204: Treasurer District of Columbia Sylvia Baca* California Donald Barry, Esq. \nL205: Oregon Dinah Bear, Esq. Arizona Kim O’Keefe Beck, Ph.D California Gabriela \nL206: Chavarria, Ph.D Washington Jamie Rappaport Clark* President & CEO Virginia \nL207: Caroline D. Gabel Maryland Keith M. Harper, Esq. Maryland John Donovan, Esq. \nL208: Montana and California James M. Hecker, Esq. Virginia Judith Ponsikoff, Ph.D* \nL209: California Terry L. Root, Ph.D Florida BOARD EMERITUS Terry C. Pelster, Esq. New\nL210: York *Executive Committee member* ST A F F & A D V I S O R S SCIENCE ADVISORS \nL211: Gabriela Chavarria, Ph.D Burke Museum, University of Washington Leah Gerber, \nL212: Ph.D Arizona State University Kathayoon Khalil, Ph.D Oregon Zoo Rurik List, Ph.D\nL213: Universidad Autónoma Metropolitana-Lerma Barry Noon, Ph.D Colorado State \nL214: University Terry L. Root, Ph.D Stanford University Mark Schwartz, Ph.D \nL215: University of California-Davis David S. Wilcox, Ph.D Princeton University Rae \nL216: Wynn-Grant, Ph.D National Geographic Society SENIOR STAFF Jamie Rappaport Clark \nL217: President & CEO Thu Pham Chief of Staff Renee Stone Senior Vice President, \nL218: Conservation Programs & General Counsel James Stefan Senior Vice President, \nL219: Operations Matt Braughler Vice President, Philanthropy Rachel Brittin Vice \nL220: President, Communications Shawn Cantrell Vice President, Field Conservation \nL221: Programs Robert L. Dewey Vice President, Government Relations & External Affairs\nL222: Monica Goldberg Vice President, Landscape Conservation Sharon Morris-Smith Vice\nL223: President of Human Resources Elizabeth Nichols Vice President, Finance/CFO \nL224: Anthony Pegues Vice President, Diversity, Equity, Inclusion, Justice Lindsay \nL225: Rosa, Ph.D Director, Center for Conservation Innovation Michael P. Senatore Vice\nL226: President, Conservation Law Brandace Swimley Vice President, Integrated \nL227: Marketing 14 DEFENDERS OF WILDLIFE ===== Page 15 ===== THERE ARE MANY WAYS TO \nL228: SUPPORT DEFENDERS OF WILDLIFE and protect our imperiled wildlife. To learn more \nL229: or donate, contact us at 1-800-385-9712, visit defenders.org/donate or choose \nL230: one of the following options. ➤ Transfer appreciated assets directly to \nL231: Defenders and maximize potential tax benefits. Defenders.org/donate-stocks-help-\nL232: save-wildlife ➤ Become a Leadership Council member with a single donation of ", + "tool_call_id": null + }, + { + "role": "assistant", + "content": "Terry C. Pelster", + "reasoning_content": "Thus everything matches.\n\nThus answer: Terry C. Pelster.\n\nNow we need to output only the name.\n\nThus final answer: Terry C. Pelster.\n\nBut perhaps the question expects \"Terry Pelster\". However, the document includes middle initial \"C.\" It might be better to include the full name as listed: \"Terry C. Pelster\". The instruction \"Name only, no titles\" suggests we should not include \"Esq.\" or \"Board Emeritus\". But including middle initial is likely okay.\n\nTo be safe, we could output \"Terry C. Pelster\". Let's see similar puzzles: often they expect the full name as appears in the document.\n\nThus answer: Terry C. Pelster.\n\nNow we will provide final answer.", + "tool_calls": [] + } + ] +} \ No newline at end of file diff --git a/lib/llm/tests/data/deepseek-v3.2/test_output.txt b/lib/llm/tests/data/deepseek-v3.2/test_output.txt new file mode 100644 index 0000000000..54b2fb8f3e --- /dev/null +++ b/lib/llm/tests/data/deepseek-v3.2/test_output.txt @@ -0,0 +1,112 @@ +<|begin▁of▁sentence|>You are a helpful Assistant. + +## Tools + +You have access to a set of tools you can use to answer the user's question. +You can invoke functions by writing a "<|DSML|function_calls>" block like the following as part of your reply to the user: +<|DSML|function_calls> +<|DSML|invoke name="$FUNCTION_NAME"> +<|DSML|parameter name="$PARAMETER_NAME" string="true|false">$PARAMETER_VALUE +... + +<|DSML|invoke name="$FUNCTION_NAME2"> +... + + + +String and scalar parameters should be specified as is without any escaping or quotes, while lists and objects should use JSON format. The "string" attribute should be set to "true" for string type parameters and "false" for other types (numbers, booleans, arrays, objects). + +If the thinking_mode is enabled, then after function results you should strongly consider outputting a thinking block. Here is an example: + +<|DSML|function_calls> +... + + + +... + + +...thinking about results + +Here are the functions available in JSONSchema format: + +{"name": "get_datetime", "description": "Get the current date and time", "parameters": {"type": "object", "properties": {"timezone": {"type": "string", "description": "The timezone, e.g. America/New_York, UTC"}}, "required": ["timezone"]}} +{"name": "get_weather", "description": "Get the weather for a specific date and location", "parameters": {"type": "object", "properties": {"location": {"type": "string", "description": "The city name, e.g. New York, San Francisco"}, "date": {"type": "string", "description": "The date in YYYY-MM-DD format"}}, "required": ["location", "date"]}} + +<|User|>What's the weather tomorrow in San Francisco and New York?<|Assistant|>
+ +<|DSML|function_calls> +<|DSML|invoke name="get_datetime"> +<|DSML|parameter name="timezone" string="true">America/New_York + +<|end▁of▁sentence|> + + +{"current_date": "2024-01-15", "current_time": "14:30:00", "timezone": "America/New_York"} + + + + +<|DSML|function_calls> +<|DSML|invoke name="get_weather"> +<|DSML|parameter name="location" string="true">San Francisco +<|DSML|parameter name="date" string="true">2024-01-16 + +<|DSML|invoke name="get_weather"> +<|DSML|parameter name="location" string="true">New York +<|DSML|parameter name="date" string="true">2024-01-16 + +<|end▁of▁sentence|> + + +{"location": "San Francisco", "date": "2024-01-16", "temperature_high": "65", "temperature_low": "55", "weather": "partly cloudy", "humidity": "70%"} +{"location": "New York", "date": "2024-01-16", "temperature_high": "28", "temperature_low": "15", "weather": "sunny", "humidity": "35%"} + + +Based on the forecast, here's tomorrow's weather (January 16, 2024): + +**San Francisco**: +- Weather: Partly cloudy +- High: 65°F +- Low: 55°F +- Humidity: 70% + +**New York**: +- Weather: Sunny +- High: 28°F +- Low: 15°F +- Humidity: 35% + +San Francisco will be mild and partly cloudy, while New York will be cold but sunny. If you're in New York, dress warmly!<|end▁of▁sentence|><|User|>Thanks! What about the day after tomorrow?<|Assistant|>User is now asking about the day after tomorrow, which is 2024-01-17. I can query directly since I already know today's date. + +<|DSML|function_calls> +<|DSML|invoke name="get_weather"> +<|DSML|parameter name="location" string="true">San Francisco +<|DSML|parameter name="date" string="true">2024-01-17 + +<|DSML|invoke name="get_weather"> +<|DSML|parameter name="location" string="true">New York +<|DSML|parameter name="date" string="true">2024-01-17 + +<|end▁of▁sentence|> + + +{"location": "San Francisco", "date": "2024-01-17", "temperature_high": "68", "temperature_low": "58", "weather": "light rain", "humidity": "85%"} +{"location": "New York", "date": "2024-01-17", "temperature_high": "32", "temperature_low": "22", "weather": "cloudy", "humidity": "50%"} + + +Got the weather data for the day after tomorrow. Let me format this for the user.Here's the forecast for the day after tomorrow (January 17, 2024): + +**San Francisco**: +- Weather: Light rain +- High: 68°F +- Low: 58°F +- Humidity: 85% + +**New York**: +- Weather: Cloudy +- High: 32°F +- Low: 22°F +- Humidity: 50% + +San Francisco will have light rain with temperatures rising slightly, so bring an umbrella. New York will be a bit warmer but still cold and cloudy, so continue to bundle up.<|end▁of▁sentence|> diff --git a/lib/llm/tests/data/deepseek-v3.2/test_output_search_w_date.txt b/lib/llm/tests/data/deepseek-v3.2/test_output_search_w_date.txt new file mode 100644 index 0000000000..6c5c7519f0 --- /dev/null +++ b/lib/llm/tests/data/deepseek-v3.2/test_output_search_w_date.txt @@ -0,0 +1,2455 @@ +<|begin▁of▁sentence|><|User|> + +## Tools + +You have access to a set of tools you can use to answer the user's question. +You can invoke functions by writing a "<|DSML|function_calls>" block like the following as part of your reply to the user: +<|DSML|function_calls> +<|DSML|invoke name="$FUNCTION_NAME"> +<|DSML|parameter name="$PARAMETER_NAME" string="true|false">$PARAMETER_VALUE +... + +<|DSML|invoke name="$FUNCTION_NAME2"> +... + + + +String and scalar parameters should be specified as is without any escaping or quotes, while lists and objects should use JSON format. The "string" attribute should be set to "true" for string type parameters and "false" for other types (numbers, booleans, arrays, objects). + +If the thinking_mode is enabled, then after function results you should strongly consider outputting a thinking block. Here is an example: + +<|DSML|function_calls> +... + + + +... + + +...thinking about results + +Here are the functions available in JSONSchema format: + +{"name": "search", "description": "Searches for information related to query and displays topn results.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The search query string"}, "topn": {"type": "integer", "description": "Number of top results to display", "default": 10}, "source": {"type": "string", "description": "Source to search within", "enum": ["web", "news"], "default": "web"}}, "required": ["query"], "additionalProperties": false, "$schema": "http://json-schema.org/draft-07/schema#"}} +{"name": "open", "description": "Opens the link id from the page indicated by cursor starting at line number loc, showing num_lines lines. Valid link ids are displayed with the formatting: 【{id}†.*】. If cursor is not provided, the most recent page is implied. If id is a string, it is treated as a fully qualified URL associated with source. If loc is not provided, the viewport will be positioned at the beginning of the document or centered on the most relevant passage, if available. Use this function without id to scroll to a new location of an opened page.", "parameters": {"type": "object", "properties": {"id": {"description": "Link ID (number) or fully qualified URL (string)", "anyOf": [{"type": "integer"}, {"type": "string"}], "default": -1}, "cursor": {"type": "integer", "description": "Page cursor indicator", "default": -1}, "loc": {"type": "integer", "description": "Starting line number", "default": -1}, "num_lines": {"type": "integer", "description": "Number of lines to show", "default": -1}, "view_source": {"type": "boolean", "description": "Whether to view source", "default": false}, "source": {"type": "string", "description": "Source associated with the URL", "enum": ["web", "news"], "default": "web"}}, "additionalProperties": false, "$schema": "http://json-schema.org/draft-07/schema#"}} +{"name": "find", "description": "Finds exact matches of pattern in the current page, or the page given by cursor.", "parameters": {"type": "object", "properties": {"pattern": {"type": "string", "description": "The exact pattern to search for"}, "cursor": {"type": "integer", "description": "Page cursor to search in", "default": -1}}, "required": ["pattern"], "additionalProperties": false, "$schema": "http://json-schema.org/draft-07/schema#"}} + + + +# The user's message is: 帮我调研一下,目前有哪些针对search agent的benchmark?详细介绍各自的特点、使用场景、例题。 + + +## Today’s Date +2025-11-27, Thursday. +<|Assistant|>用户想调研目前针对搜索智能体(search agent)的基准测试(benchmark),要求详细介绍各自的特点、使用场景和例题。我需要提供一份全面的概述,涵盖现有的基准测试,例如WebGPT、WebShop、ALFWorld、HotpotQA、FEVER、TriviaQA、Natural Questions、SearchQA、MS MARCO、BEIR、LoTTE、MIRACL、Zero-shot IR基准等。同时,需要区分这些基准测试是针对检索任务、问答任务,还是具体针对搜索智能体的评估。此外,还应考虑那些专门为评估搜索智能体(即能够与环境交互、执行搜索、浏览网页、使用工具的智能体)而设计的基准测试,例如WebGPT、WebShop、ALFWorld、Mind2Web、WebArena、VisualWebArena、SearchBench、WebVLN、WebNav等。我需要详细介绍每个基准测试的特点、使用场景,并举例说明。为了确保信息的全面性和时效性,我将同时进行两项搜索:一项是针对“search agent benchmark”的通用搜索,以获取广泛的基准测试信息;另一项是针对“搜索智能体 基准测试”的中文搜索,以获取更符合用户语言习惯和可能更具体的中文资料。 + +<|DSML|function_calls> +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">search agent benchmark 2024 +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">搜索智能体 基准测试 +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|end▁of▁sentence|> + + +[0] search agent benchmark 2024 (Search_Results/search agent benchmark 2024) +**viewing lines [0 - 39] of 39** + +L0: +L1: URL: Search_Results/search agent benchmark 2024 +L2: # Search Results +L3: +L4: * 【0†HSCodeComp: A Realistic and Expert-level Benchmark for ...; publish_date: +L5: none†arxiv.org】 Oct 22, 2025 — To fill this gap, we introduce HSCodeComp, the +L6: first realistic, expert-level e-commerce benchmark designed to evaluate deep +L7: search agents in ... +L8: * 【1†open-compass/GTA - A Benchmark for General Tool Agents; publish_date: +L9: none†github.com】 GTA is a benchmark to evaluate the tool-use capability of LLM- +L10: based agents in real-world scenarios. It features three main aspects. +L11: * 【2†Benchmarking real-time trust scoring across five AI Agent ...; +L12: publish_date: none†cleanlab.ai】 Aug 20, 2025 — This article evaluates 5 AI Agent +L13: architectures over the BOLAA (ICLR 2024) benchmark, and assesses the effects of +L14: adding automated trust ... +L15: * 【3†10 AI agent benchmarks; publish_date: none†www.evidentlyai.com】 Jul 11, +L16: 2025 — We put together 10 AI agent benchmarks designed to assess how well +L17: different LLMs perform as agents in real-world scenarios, ... +L18: * 【4†A state-of-the-art search API purpose-built for agents; publish_date: +L19: none†parallel.ai】 Jul 31, 2025 — To evaluate real-world performance of the +L20: Parallel Search MCP Server, we created the WISER-Search benchmark which blends +L21: WISER-Fresh (queries ... +L22: * 【5†AI Agent Benchmarks are Broken; publish_date: none†medium.com】 We break +L23: down the failure modes in current AI agent benchmarks and introduce a checklist +L24: that minimizes the gamability of AI agent benchmarks. +L25: * 【6†Benchmarks and Tree Search for Multimodal LLM Web Agents; publish_date: +L26: none†dpfried.github.io】 2024, When is Tree Search Useful? ○ Dealing with +L27: destructive actions. ○ Some things on the web are very difficult to undo, e.g., +L28: ordering an item. 56. +L29: * 【7†-Bench: Benchmarking AI agents for the real-world; publish_date: +L30: none†sierra.ai】 Jun 20, 2024 — τ-bench measures an agent's ability to interact +L31: with (simulated) human users and programmatic APIs while following domain- +L32: specific policies in a consistent ... +L33: * 【8†Browser Use = state of the art Web Agent; publish_date: none†browser- +L34: use.com】 Dec 15, 2024 — Browser Use has achieved state-of-the-art performance on +L35: the WebVoyager benchmark, with an impressive 89.1% success rate across 586 +L36: diverse web tasks. +L37: * 【9†FutureSearch Benchmarks; publish_date: none†evals.futuresearch.ai】 Find +L38: the original source of a given claim. Example: From , more than 8 out of 1000 +L39: users clicked on a phishing link monthly in 2024, up 190% vs 2023. +[1] 搜索智能体 基准测试 (Search_Results/搜索智能体 基准测试) +**viewing lines [0 - 33] of 33** + +L0: +L1: URL: Search_Results/搜索智能体 基准测试 +L2: # Search Results +L3: +L4: * 【0†WideSearch:揭示AI 智能体缺失的「广度」能力; publish_date: none†zhuanlan.zhihu.com】 Aug +L5: 16, 2025 — 为系统评估智能体在该任务上的能力,论文构建了第一个专门的基准测试 WideSearch ,包含200 个源于真实世界、横跨18 +L6: 个领域的高质量任务。 通过对超过10 个 ... +L7: * 【1†GAIA: 一个严苛的智能体基准- HuggingFace; publish_date: none†www.cnblogs.com】 Jul 9, +L8: 2024 — 我们使用一个用库构建的代码智能体 在GAIA 基准上进行测试,这可以说是最困难、最全面的智能体基准测试……最终我们取得了第一名的成绩! +L9: GAIA: 一个严苛的 ... +L10: * 【2†AI搜索智能体遭遇新挑战:滑铁卢大学团队提出更公平透明的 ...; publish_date: none†www.techwalker.com】 +L11: Aug 14, 2025 — +L12: 目前评测AI搜索智能体主要依靠BrowseComp这样的基准测试,它就像一场实时的开卷考试,让AI在真实的网络环境中搜索信息来回答复杂问题。听起来很合理 ... +L13: * 【3†Agentic AI基础设施实践经验系列(六):Agent质量评估 - AWS; publish_date: +L14: none†aws.amazon.com】 Sep 19, 2025 — TAU-bench +L15: 是一个评估AI智能体在真实世界环境中可靠性的基准测试。它评估智能体是否能够在动态的多轮对话中与用户进行交互,理解需求并完成任务。T-bench ... +L16: * 【4†DeepAgent:能自己找工具的通用推理智能体 - 高瓴人工智能学院; publish_date: none†ai.ruc.edu.cn】 +L17: Nov 6, 2025 — 在八大基准测试中,DeepAgent在绝大多数任务上全面领先所有基线模型。 +L18: 开放环境优势:在最具挑战的“开放工具检索”场景下(如ToolBench),其成功率达到64%,远 ... +L19: * 【5†BrowseComp:为浏览智能体设计的简单而具挑战性的基准测试; publish_date: none†blog.csdn.net】 Sep +L20: 22, 2025 — 该基准测试由OpenAI团队开发,旨在推动更可信赖和可靠的AI代理研究。 核心特点. +L21: 挑战性问题设计:BrowseComp的问题设计遵循严格的难度标准:. 人类创建者确保 ... +L22: * 【6†什么是GAIA?; publish_date: none†huggingface.co】 什么是GAIA? GAIA +L23: 是一个用于评估AI助手在需要核心能力组合的真实世界任务上的表现的基准,这些核心能力包括推理、多模态理解、网页浏览和熟练的工具使用。 +L24: * 【7†SuperCLUE-Agent: Agent智能体中文原生任务能力测评基准; publish_date: +L25: none†www.cluebenchmarks.com】 AI +L26: Agent是一个能够自主理解、规划决策和执行复杂任务的智能体,有代表性的Agent比如AutoGPT、AgentGPT等。 +L27: 现有关于Agent能力的测评,主要是在英文场景或任务的测评。目前还 ... +L28: * 【8†Gaia2 与ARE:赋能社区的智能体评测; publish_date: none†hub.baai.ac.cn】 Sep 24, 2025 — +L29: 快速评估任意智能体:基于真实或模拟数据,测试不同规则、工具、内容和验证方式下的表现 · 测试智能体的 工具调用与编排能力:可结合本地应用或MCP 工具 · 生成 +L30: ... +L31: * 【9†许多AI 智能体评测基准并不可靠; publish_date: none†my.oschina.net】 Aug 13, 2025 — +L32: 当前AI智能体基准测试存在严重缺陷,亟需更严谨的评估框架。作者提出AI智能体基准测试核查清单(ABC),包含43个项目,旨在识别基准测试潜在陷阱,构建可靠评估 +L33: ... + + +第一个搜索结果(针对“search agent benchmark 2024”)显示了一些相关条目,例如“HSCodeComp: A Realistic and Expert-level Benchmark for ...”、“open-compass/GTA - A Benchmark for General Tool Agents”、“10 AI agent benchmarks”、“WISER-Search benchmark”、“Benchmarks and Tree Search for Multimodal LLM Web Agents”、“τ-bench”、“WebVoyager benchmark”、“FutureSearch Benchmarks”。这些可能都与搜索智能体相关。第二个搜索结果(针对“搜索智能体 基准测试”)显示了一些中文资料,例如“WideSearch”、“BrowseComp”、“GAIA”、“SuperCLUE-Agent”等。这些都可能与搜索智能体相关。为了全面了解这些基准测试,我们需要同时打开并分析其中几个最有潜力的结果。具体来说,我们将打开第一个搜索结果中的“HSCodeComp”(索引0)和“open-compass/GTA”(索引1),以及第二个搜索结果中的“WideSearch”(索引0)和“BrowseComp”(索引5)。 + +<|DSML|function_calls> +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">0 +<|DSML|parameter name="id" string="false">0 + +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">0 +<|DSML|parameter name="id" string="false">1 + +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">1 +<|DSML|parameter name="id" string="false">0 + +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">1 +<|DSML|parameter name="id" string="false">5 + +<|end▁of▁sentence|> + + +[2] (https://ar5iv.org/html/2510.19631v1) +**viewing lines [0 - 119] of 983** + +L0: +L1: URL: https://ar5iv.org/html/2510.19631v1 +L2: # HSCodeComp: A Realistic and Expert-level Benchmark for Deep Search Agents in +L3: Hierarchical Rule Application +L4: +L5: Yiqian Yang† Tian Lan† Qianghuai Jia∗ Li Zhu Hui Jiang Hang Zhu Longyue Wang +L6: Weihua Luo Kaifu Zhang +L7: +L8: Alibaba International Digital Commerce∗* Corresponding Author: Qianghuai Jia +L9: (qianghuai.jqh@alibaba-inc.com) +L10: †\dagger Equal Contribution: Yiqian Yang +L11: +L12: Tian Lan +L13: +L14: ###### Abstract +L15: +L16: Abstract +L17: +L18: Effective deep search agents must not only access open-domain and domain- +L19: specific knowledge but also apply complex rules—such as legal clauses, medical +L20: manuals and tariff rules. These rules often feature vague boundaries and +L21: implicit logic relationships, making precise application challenging for agents. +L22: However, this critical capability is largely overlooked by current agent +L23: benchmarks. To fill this gap, we introduce HSCodeComp, the first realistic, +L24: expert-level e-commerce benchmark designed to evaluate deep search agents in +L25: hierarchical rule application. In this task, the deep reasoning process of +L26: agents is guided by these rules to predict 10-digit Harmonized System Code +L27: (HSCode) of products with noisy but realistic descriptions. These codes, +L28: established by the World Customs Organization, are vital for global supply chain +L29: efficiency. Built from real-world data collected from large-scale e-commerce +L30: platforms, our proposed HSCodeComp comprises 632 product entries spanning +L31: diverse product categories, with these HSCodes annotated by several human +L32: experts. Extensive experimental results on several state-of-the-art LLMs, open- +L33: source, and closed-source agents reveal a huge performance gap: best agent +L34: achieves only 46.8% 10-digit accuracy, far below human experts at 95.0%. +L35: Besides, detailed analysis demonstrates the challenges of hierarchical rule +L36: application, and test-time scaling fails to improve performance further. +L37: +L38: ## 1 Introduction +L39: +L40: Deep search agents have demonstrated significant value in solving complex real- +L41: world problems, where robust external knowledge utilization constitutes a +L42: critical capability [Wu et al., 2025, Tao et al., 2025, Li et al., 2025b]. To +L43: evaluate this capability, numerous established benchmarks are proposed to assess +L44: agents in utilizing open-domain data (e.g., GAIA [Mialon et al., 2023b] and +L45: BrowseComp [Wei et al., 2025]) and domain-specific data (e.g., WebMall [Peeters +L46: et al., 2025a], FinSearchComp [Hu et al., 2025a] and MedBrowseComp [Yu et al., +L47: 2025b]). +L48: +L49: Beyond open-domain and domain-specific data, agents also need to effectively +L50: apply rules that encode human expert knowledge, particularly in scenarios like +L51: law, medical and e-commerce [Li et al., 2025a, Chen et al., 2025b, Yao et al., +L52: 2022, Chollet et al., 2025]. For instance, legal case adjudication require +L53: interpreting abstract legal provisions, and accurate e-commerce product +L54: classification in depends on tariff rules [Grainger, 2024]. Previous works have +L55: defined rule application as using specific logical rules with supporting facts +L56: to derive conclusions [Wang et al., 2024, Servantez et al., 2024]. In contrast, +L57: we define it as a core capability for deep search agents, where human-written +L58: rules are systematically applied to guide complex reasoning and decision-making +L59: [Sadowski and Chudziak, 2025]. Building on this observation, we categorize +L60: knowledge data for deep search agents into three levels (Figure 1, left), with +L61: increasing knowledge complexity: (1) Level 1: Open-domain Data - Tests +L62: understanding and deep reasoning abilities of agents on long-form web content. +L63: Established benchmarks include GAIA [Mialon et al., 2023b] and BrowseComp [Wei +L64: et al., 2025]; (2) Level 2: Structured Data - Assesses agents to precisely +L65: utilize structured data such as databases and knowledge graphs, as seen in +L66: domain-specific benchmarks like WebMall [Peeters et al., 2025a], MedBrowseComp +L67: [Chen et al., 2025b] and FinSearchComp [Hu et al., 2025a]; (3) Level 3: Rule +L68: Data - Evaluates agents to apply complex and abstract rules [Chollet et al., +L69: 2025]. This level presents two key challenges: (a) making accurate decisions +L70: when rules contain vague natural language descriptions [Sadowski and Chudziak, +L71: 2025]; and (b) reasoning about logical dependencies among rules, such as +L72: exception clauses and cross-category relationships [Guha et al., 2023]. Despite +L73: the importance of rule application in real-world scenarios, current agent +L74: benchmarks largely overlook its evaluation. +L75: +L76: To fill this gap, we introduce HSCodeComp (short for the Harmonized System Code +L77: (HSCode) Competition), the first realistic, expert-level e-commerce benchmark +L78: designed to evaluate agents in predicting complete 10-digit Harmonized System +L79: Code (HSCode) of the product, using hierarchical rules (e.g., eWTP tariff +L80: rules111https://www.ewtp.com/web/smart/hscode). HSCodes organize products +L81: through a hierarchical structure spanning over 5,000 distinct codes across +L82: multiple classification levels, representing the global standard for classifying +L83: traded international goods, established by the World Customs Organization and +L84: implemented across more than 200 countries for customs clearance and tariff +L85: determination [Grainger, 2024, Nath et al., 2025]. Built from the data of the +L86: large-scale e-commerce platforms, our proposed HSCodeComp comprises 632 +L87: carefully curated product entries, encompassing 27 unique HS chapters and 32 +L88: distinct first-level categories. These HSCodes have been rigorously annotated by +L89: multiple e-commerce domain experts, ensuring that HSCodeComp is expert-level. +L90: Accurately predicting the exact 10-digit HSCode presents significant challenges: +L91: agents must perform multi-hop hierarchical reasoning with complex tariff rules +L92: while processing noisy but realistic product descriptions that often contain +L93: abbreviations, language variations, or incomplete information. +L94: +L95: Extensive experiments on the state-of-the-art baselines, including 14 advanced +L96: foundation models, 6 advanced open-source agent systems and 3 closed-source +L97: agent systems, demonstrate that HSCode prediction task remains a substantial +L98: challenge for current AI approaches. As shown in the Figure 1 (right), even the +L99: best-performing system (SmolAgent [Roucher et al., 2025] with GPT-5) achieves +L100: only 46.8% accuracy, substantially below the 95.0% accuracy attained by human +L101: experts. Further detailed analysis reveals that existing agent systems lack +L102: critical capabilities required for this complex hierarchical rule applications. +L103: Notably, test-time scaling approach—which has proven effective in other +L104: reasoning tasks [Guo et al., 2025, Liu et al., 2025]—fail to improve performance +L105: on HSCodeComp. These observations demonstrate the challenging nature of our +L106: proposed HSCodeComp, highlighting the need for more effective designs of agent +L107: systems. To facilitate future research, we will publicly release codes and the +L108: benchmark dataset of HSCodeComp. +L109: +L110: ## 2 Related Works +L111: +L112: ### 2.1 Previous Works in HSCode Prediction +L113: +L114: Previous works treat HSCode prediction as the e-commerce text classification +L115: task [Grainger, 2024], using pre-trained BERT models [Liao et al., 2024, Shubham +L116: et al., 2022] or Large Language Models (LLMs) prompting [Hussain and Ahmed, +L117: 2023]. However, these approaches fail to leverage domain-specific knowledge, +L118: especially the rules written by human experts [Hussain and Ahmed, 2023, Judy, +L119: 2024]. Besides, existing HSCode benchmarks face two critical limitations [Judy, +Error fetching URL `https://github.com/open-compass/GTA` +Error fetching URL `https://zhuanlan.zhihu.com/p/1939316761775301093` +[3] (https://blog.csdn.net/qq_41472205/article/details/151974603) +**viewing lines [0 - 167] of 187** + +L0: +L1: URL: https://blog.csdn.net/qq_41472205/article/details/151974603 +L2: # BrowseComp:为浏览智能体设计的简单而具挑战性的基准测试 +L3: +L4: BrowseComp:AI浏览能力评估基准 +L5: +L6: 最新推荐文章于 2025-11-12 13:40:20 发布 +L7: +L8: 原创 于 2025-09-22 22:33:04 发布 · 1.3k 阅读 +L9: +L10: · 9 +L11: · 25 · +L12: CC 4.0 BY-SA版权 +L13: +L14: 版权声明:本文为博主原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。 +L15: +L16: ## BrowseComp:为浏览智能体设计的简单而具挑战性的基准测试 +L17: +L18: 在人工智能从基础聊天机器人向推理器和智能体发展的进程中,具备浏览互联网能力的人工智能模型正变得越来越重要。今天,我们将介绍一个名为BrowseComp的创新基准 +L19: 测试,它专门设计用于评估AI代理在复杂网络浏览任务中的能力。 +L20: +L21: ### 什么是BrowseComp? +L22: +L23: BrowseComp(全称Browsing Competition)是一个包含1,266个挑战性问题的基准测试集,专门用于衡量AI代理在互联网上持续导航、寻找难 +L24: 以找到的纠缠信息的能力。该基准测试由OpenAI团队开发,旨在推动更可信赖和可靠的AI代理研究。 +L25: +L26: #### 核心特点 +L27: +L28: 挑战性问题设计:BrowseComp的问题设计遵循严格的难度标准: +L29: +L30: - 人类创建者确保问题在10分钟内无法被人解决 +L31: - 现有模型(包括带浏览功能的ChatGPT和早期版本的OpenAI Deep Research)无法解决 +L32: - 通过5次简单Google搜索无法在结果首页找到答案 +L33: +L34: 简单易验证:尽管问题极具挑战性,但答案形式简单——都是短字符串,便于自动验证模型输出的正确性。 +L35: +L36: ### 为什么需要BrowseComp? +L37: +L38: #### 现有基准的局限性 +L39: +L40: 传统的信息检索基准(如TriviaQA、HotpotQA等)主要关注易于查找的信息,随着语言模型的进步,这些基准已经趋于饱和。而BrowseComp专注于那些需 +L41: 要浏览大量网站才能解决的"硬核"问题。 +L42: +L43: #### 模拟真实挑战 +L44: +L45: BrowseComp问题通常采用"逆向设计"方法:创建者从一个已知事实出发,构建一个搜索空间巨大但验证简单的问题。例如: +L46: +L47: “找出2018-2023年间在EMNLP会议上发表、第一作者本科毕业于达特茅斯学院、第四作者本科毕业于宾夕法尼亚大学的科学论文标题” +L48: +L49: 这类问题验证简单,但解决起来需要检查数千篇论文并调查每位作者的背景。 +L50: +L51: ### 数据集特点 +L52: +L53: #### 主题多样性 +L54: +L55: BrowseComp涵盖了广泛的主题领域(如图2所示),包括历史、科学、文化等。创建者被鼓励基于个人兴趣设计问题,这有助于提高数据质量和参与度。 +L56: +L57: #### 质量保证 +L58: +L59: 为确保答案的唯一性,创建者需要: +L60: +L61: - 对问题内容有足够了解,确信没有其他有效答案 +L62: - 如果不确定,则添加更多约束条件 +L63: - 接受其他创建者的验证反馈 +L64: +L65: ### 人类表现基准 +L66: +L67: 为了衡量BrowseComp的难度,研究人员让人类创建者尝试解决问题(不能解答自己创建的问题)。结果显示: +L68: +L69: - **70.8%**的问题在2小时搜索后人类选择放弃 +L70: - **29.2%**的问题被成功解决 +L71: - 在解决的问题中,**86.4%**的人类答案与参考答案一致 +L72: +L73: 这表明BrowseComp确实极具挑战性,即使是熟悉数据集的人类专家也难以在有限时间内解决大部分问题。 +L74: +L75: ### AI模型表现评估 +L76: +L77: #### 各模型对比 +L78: +L79: 研究人员评估了多种模型在BrowseComp上的表现: +L80: +L81: 模型 | 准确率(%) | 校准误差(%) +L82: ---|---|--- +L83: GPT-4o | 0.6 | 69 +L84: GPT-4o(带浏览) | 1.9 | 82 +L85: GPT-4.5 | 0.9 | 68 +L86: OpenAI o1 | 9.9 | 65 +L87: Deep Research | 51.5 | 91 +L88: +L89: #### 关键发现 +L90: +L91: - 基础模型表现不佳:GPT-4o和GPT-4.5准确率接近零,凸显了基准的难度 +L92: - 浏览功能带来有限提升:启用浏览功能的GPT-4o准确率略有提高,但仍很低 +L93: - 推理能力的重要性:OpenAI o1虽然没有浏览能力,但凭借更强的推理能力获得较高准确率 +L94: - 专业模型的优势:专门为持久网络浏览训练的Deep Research模型解决了约一半的问题 +L95: +L96: #### 计算资源与性能关系 +L97: +L98: 研究表明,BrowseComp性能随测试时计算资源的增加而平滑提升(如图1所示)。这与智能体模型的特性一致——更多计算资源允许模型浏览更多网站,从而提高找到正确 +L99: 答案的机会。 +L100: +L101: ### 进阶策略分析 +L102: +L103: #### 聚合策略的效果 +L104: +L105: 通过让模型多次尝试同一问题并采用投票策略,可以显著提升性能: +L106: +L107: - 多数投票:选择样本中最常见的答案 +L108: - 加权投票:根据模型置信度加权投票 +L109: - 最佳选择:选择置信度最高的答案 +L110: +L111: 这些方法将Deep Research的性能提升了15-25%,表明模型通常能够识别自己的正确答案。 +L112: +L113: #### 任务难度分布 +L114: +L115: 分析显示,BrowseComp中的任务难度分布广泛: +L116: +L117: - 16%的任务被Deep Research完美解决(100%通过率) +L118: - 14%的任务完全失败(0%通过率) +L119: - 其余任务处于中间难度水平 +L120: +L121: ### BrowseComp的意义与局限性 +L122: +L123: #### 作为评估工具的价值 +L124: +L125: BrowseComp可被视为浏览智能体的"编程竞赛"——虽然不全面,但对核心浏览能力提供了有用的衡量。正如在编程竞赛中表现出色的模型很可能在其他编码任务中表现良 +L126: 好,能够解决BrowseComp的模型在定位难以查找信息方面应该具有很强的能力。 +L127: +L128: #### 已知局限性 +L129: +L130: - 不涉及真实用户查询分布:如生成长回答或解决查询歧义等挑战 +L131: - 答案唯一性假设:虽然尽力确保,但不能完全保证没有其他有效答案 +L132: - 单模态限制:当前仅关注文本信息检索,未来可扩展至图像、视频等模态 +L133: +L134: ### 结论与展望 +L135: +L136: BrowseComp为评估AI代理的网络浏览能力提供了一个具有挑战性且易于使用的基准。它强调的持久性、创造性和战略性浏览能力,正是未来AI智能体在实际应用中所需 +L137: 的核心技能。 +L138: +L139: 随着更多研究团队在BrowseComp上测试他们的模型,我们期待看到浏览智能体技术的快速进步,最终使AI能够更可靠地帮助我们获取那些深藏在互联网角落的宝贵信息。 +L140: +L141: 访问地址:BrowseComp已在GitHub上开源,地址为 https://github.com/openai/simple-evals +L142: +L143: 添加红包 +L144: +L145: 请填写红包祝福语或标题 +L146: +L147: 个 +L148: +L149: 红包个数最小为10个 +L150: +L151: 元 +L152: +L153: 红包金额最低5元 +L154: +L155: 当前余额3.43元 前往充值 > +L156: +L157: 需支付:10.00元 +L158: +L159: 成就一亿技术人! +L160: +L161: hope_wisdom +L162: 发出的红包 +L163: 实付元 +L164: +L165: 点击重新获取 +L166: +L167: 扫码支付 + + +打开的HSCodeComp页面详细介绍了这个基准测试,它是一个用于评估深度搜索智能体在分层规则应用方面能力的基准测试,特点包括:基于真实世界电商数据、任务复杂、需要多跳层次推理、使用嘈杂的产品描述,并且标注由专家完成。使用场景是评估智能体在规则应用方面的能力,例如关税代码分类。例题是预测产品的10位HS编码。打开的BrowseComp页面详细介绍了这个基准测试,它是一个用于评估浏览智能体(即能够在互联网上导航、搜索信息的智能体)的基准测试。其特点包括:包含1266个具有挑战性的问题,需要持续导航和搜索难以查找的信息;答案形式简单,便于自动验证;问题设计确保人类在10分钟内无法解决,且现有模型也无法解决;涵盖了广泛的主题领域。使用场景是评估智能体在复杂网络浏览任务中的能力。例题是:“找出2018-2023年间在EMNLP会议上发表、第一作者本科毕业于达特茅斯学院、第四作者本科毕业于宾夕法尼亚大学的科学论文标题”。另外两个打开的页面(open-compass/GTA和WideSearch)出现了错误,可能链接已失效或需要其他访问方式。为了进一步获取其他相关基准测试的信息,我们需要同时进行多项搜索。考虑到用户可能对搜索智能体基准测试有广泛需求,我们将同时搜索“WebVoyager benchmark”、“WebArena benchmark”、“GAIA benchmark”、“ToolBench benchmark”、“HotpotQA benchmark”、“FEVER benchmark”、“TriviaQA benchmark”、“Natural Questions benchmark”、“MS MARCO benchmark”、“BEIR benchmark”、“MIRACL benchmark”、“Zero-shot IR benchmark”、“WebGPT benchmark”、“WebShop benchmark”、“ALFWorld benchmark”、“Mind2Web benchmark”、“VisualWebArena benchmark”、“SearchBench benchmark”、“WebVLN benchmark”、“WebNav benchmark”。这些搜索将帮助我们全面了解现有基准测试的概况。 + +<|DSML|function_calls> +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">WebVoyager benchmark +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">WebArena benchmark +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">GAIA benchmark +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">ToolBench benchmark +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">HotpotQA benchmark +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">FEVER benchmark +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">TriviaQA benchmark +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">Natural Questions benchmark +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">MS MARCO benchmark +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">BEIR benchmark +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">MIRACL benchmark +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">Zero-shot IR benchmark +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">WebGPT benchmark +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">WebShop benchmark +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">ALFWorld benchmark +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">Mind2Web benchmark +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">VisualWebArena benchmark +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">SearchBench benchmark +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">WebVLN benchmark +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">WebNav benchmark +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|end▁of▁sentence|> + + +[4] WebVoyager benchmark (Search_Results/WebVoyager benchmark) +**viewing lines [0 - 40] of 40** + +L0: +L1: URL: Search_Results/WebVoyager benchmark +L2: # Search Results +L3: +L4: * 【0†WebVoyager Benchmark; publish_date: none†www.trykura.com】 Performance +L5: measured across various websites in WebVoyager shows Kura consistently +L6: outperforming other agents, with particularly strong results on e-commerce ... +L7: * 【1†WebVoyager: Building an End-to-End Web Agent with ...; publish_date: +L8: none†github.com】 WebVoyager is an innovative Large Multimodal Model (LMM) +L9: powered web agent that can complete user instructions end-to-end by interacting +L10: with real-world ... +L11: * 【2†AI Browser Agent Leaderboard | Steel.dev; publish_date: +L12: none†leaderboard.steel.dev】 See how various AI browser agents stack up based on +L13: their accuracy in completing web-based tasks on the WebVoyager benchmark. +L14: * 【3†[2401.13919] WebVoyager: Building an End-to-End Web ...; publish_date: +L15: none†arxiv.org】 by H He · 2024 · Cited by 282 — We show that WebVoyager achieves +L16: a 59.1% task success rate on our benchmark, significantly surpassing the +L17: performance of both GPT-4 (All ... +L18: * 【4†Our Agent-E SOTA Results on the WebVoyager Benchmark; publish_date: +L19: none†www.emergence.ai】 Jul 11, 2024 — WebVoyager is a benchmark that tests an +L20: agent's capabilities for navigation on dynamic live websites. It is more +L21: representative than WebArena [4] ... +L22: * 【5†Browser Use = state of the art Web Agent; publish_date: none†browser- +L23: use.com】 Dec 15, 2024 — Browser Use has achieved state-of-the-art performance on +L24: the WebVoyager benchmark, with an impressive 89.1% success rate across 586 +L25: diverse web tasks. +L26: * 【6†Magnitude achieves SOTA 94% on WebVoyager benchmark; publish_date: +L27: none†github.com】 Magnitude achieves state-of-the-art performance with 93.9% +L28: success rate on WebVoyager, beating all other browser agents. +L29: * 【7†WebVoyager: Autonomous Web Agent Benchmark; publish_date: +L30: none†www.emergentmind.com】 3 days ago — WebVoyager Benchmark is a comprehensive +L31: evaluation suite for autonomous web agents, featuring 643 tasks across 15 +L32: popular websites. +L33: * 【8†WebVoyager Benchmark Results; publish_date: none†www.browserable.ai】 +L34: Browserable has achieved 90.4% on the WebVoyager benchmark. This is best-in- +L35: class performance across all web agents. This was done across 567 web tasks +L36: which ... +L37: * 【9†89% achieved on WebVoyager using Anchor + Browser Use; publish_date: +L38: none†www.reddit.com】 Thanks to the amazing work from the browser-use open-source +L39: community and the built-in support from Anchor Browser, we've hit an 89% score +L40: on WebVoyager. +[5] WebArena benchmark (Search_Results/WebArena benchmark) +**viewing lines [0 - 42] of 42** + +L0: +L1: URL: Search_Results/WebArena benchmark +L2: # Search Results +L3: +L4: * 【0†WebArena: A Realistic Web Environment for Building ...; publish_date: +L5: none†webarena.dev】 Our benchmark is implemented in our fully interactable +L6: highly-realistic WebArena environment. It features diverse tasks human may +L7: encounter in their daily ... +L8: * 【1†[2307.13854] WebArena: A Realistic Web Environment for ...; publish_date: +L9: none†arxiv.org】 by S Zhou · 2023 · Cited by 637 — Building upon our +L10: environment, we release a set of benchmark tasks focusing on evaluating the +L11: functional correctness of task completions. +L12: * 【2†WebArena: A Realistic Web Environment for Building ...; publish_date: +L13: none†www.cmu.edu】 WebArena introduces a benchmark on interpreting high-level +L14: realistic natural language command to concrete web-based interactions. We +L15: provide annotated programs ... +L16: * 【3†GitHub - web-arena-x/webarena: Code repo for ...; publish_date: +L17: none†github.com】 [12/20/2024] Check out our new benchmark on even more +L18: consequential tasks, including terminal use and coding, TheAgentCompany. +L19: [12/21/2023] We release the ... +L20: * 【4†WebArena Benchmark and the State of Agentic AI; publish_date: +L21: none†medium.com】 In short, WebArena established a new standard for realism and +L22: complexity in web agent evaluation, forcing AI agents to operate in dynamic, +L23: high- ... +L24: * 【5†WebArena: A Realistic Web Environment for Building ...; publish_date: +L25: none†huggingface.co】 Jul 25, 2023 — WebArena, a realistic and reproducible +L26: environment, evaluates the performance of autonomous agents performing complex +L27: tasks on websites using ... +L28: * 【6†WebArena Benchmark: Evaluating Web Agents; publish_date: +L29: none†www.emergentmind.com】 Jun 30, 2025 — WebArena Benchmark is a self-contained +L30: suite that evaluates autonomous agents on realistic, multi-step web tasks using +L31: natural language ... +L32: * 【7†VisualWebArena is a benchmark for multimodal agents.; publish_date: +L33: none†github.com】 VisualWebArena is a realistic and diverse benchmark for +L34: evaluating multimodal autonomous language agents. It comprises of a set of +L35: diverse and complex web-based ... +L36: * 【8†WebDev Arena Leaderboard - LMArena; publish_date: none†web.lmarena.ai】 +L37: WebDev Arena is a real-time AI coding competition where models go head-to-head +L38: in web development challenges, developed by LMArena. +L39: * 【9†WebArena: A Realistic Web Environment for Building ...; publish_date: +L40: none†arxiv.org】 Apr 16, 2024 — We use this benchmark to evaluate several agents +L41: that can follow NL command and perform web-based tasks (§4). These agents are +L42: implemented in a ... +[6] GAIA benchmark (Search_Results/GAIA benchmark) +**viewing lines [0 - 41] of 41** + +L0: +L1: URL: Search_Results/GAIA benchmark +L2: # Search Results +L3: +L4: * 【0†GAIA Leaderboard - a Hugging Face Space by ...; publish_date: +L5: none†huggingface.co】 GAIA is a benchmark which aims at evaluating next- +L6: generation LLMs (LLMs with augmented capabilities due to added tooling, +L7: efficient prompting, access to search ... +L8: * 【1†[2311.12983] GAIA: a benchmark for General AI Assistants; publish_date: +L9: none†arxiv.org】 by G Mialon · 2023 · Cited by 367 — GAIA proposes real-world +L10: questions that require a set of fundamental abilities such as reasoning, multi- +L11: modality handling, web browsing, and generally tool-use ... +L12: * 【2†GAIA benchmark; publish_date: none†huggingface.co】 This is the +L13: organisation page for all things related to GAIA, a benchmark for General AI +L14: Assistants. You can find all the information and links on the GAIA ... +L15: * 【3†GAIA: A Benchmark for General AI Assistants; publish_date: +L16: none†ukgovernmentbeis.github.io】 This is an Inspect AI implementation of the +L17: GAIA (General AI Assistants) benchmark, consisting of 450 questions testing tool +L18: use on realistic assistant tasks. +L19: * 【4†GAIA: a benchmark for general AI assistants | Research; publish_date: +L20: none†ai.meta.com】 May 6, 2024 — GAIA proposes real-world questions that require +L21: a set of fundamental abilities such as reasoning, multi-modality handling, web +L22: browsing, and generally tool-use ... +L23: * 【5†HAL: GAIA Leaderboard; publish_date: none†hal.cs.princeton.edu】 GAIA is a +L24: benchmark for General AI Assistants that requires a set of fundamental +L25: abilities such as reasoning, multi-modality handling, web browsing, and tool- +L26: ... +L27: * 【6†GAIA: The LLM Agent Benchmark Everyone's Talking About; publish_date: +L28: none†towardsdatascience.com】 May 29, 2025 — GAIA stands for General AI +L29: Assistants benchmark [1]. This benchmark was introduced to specifically evaluate +L30: LLM agents on their ability to act as general- ... +L31: * 【7†GAIA: a benchmark for General AI Assistants; publish_date: +L32: none†openreview.net】 by G Mialon · Cited by 367 — GAIA proposes real-world +L33: questions that require a set of fundamental abilities such as reasoning, multi- +L34: modality handling, web browsing, and generally tool-use ... +L35: * 【8†Rethinking AI Evaluation: Introducing the GAIA Benchmark; publish_date: +L36: none†medium.com】 The authors introduce GAIA, a benchmark designed to assess the +L37: robustness of AI systems across a variety of practical tasks. +L38: * 【9†H2O.ai Tops the General AI Assistant (GAIA) Test; publish_date: +L39: none†h2o.ai】 Mar 17, 2025 — Our h2oGPTe Agent has once again claimed the #1 spot +L40: on the prestigious GAIA (General AI Assistants) benchmark with an impressive +L41: 75% accuracy rate. +[7] ToolBench benchmark (Search_Results/ToolBench benchmark) +**viewing lines [0 - 40] of 40** + +L0: +L1: URL: Search_Results/ToolBench benchmark +L2: # Search Results +L3: +L4: * 【0†ToolBench, an evaluation suite for LLM tool manipulation ...; +L5: publish_date: none†github.com】 The ToolBench is a benchmark consisting of +L6: diverse software tools for real-world tasks. We also provide easy-to-use +L7: infrastructure in this repository. +L8: * 【1†OpenBMB/ToolBench; publish_date: none†github.com】 [2023/7/27] New version +L9: ToolBench is released. ✨Here is an overview of the dataset construction, +L10: training, and evaluation. ✨✨Features:. +L11: * 【2†Towards Stable Large-Scale Benchmarking on Tool ...; publish_date: +L12: none†arxiv.org】 by Z Guo · 2024 · Cited by 100 — We introduce StableToolBench, a +L13: benchmark evolving from ToolBench, proposing a virtual API server and stable +L14: evaluation system. +L15: * 【3†StableToolBench - Zhicheng Guo; publish_date: none†zhichengg.github.io】 +L16: We introduce StableToolBench, a benchmark evolving from ToolBench, proposing a +L17: virtual API server and stable evaluation system. +L18: * 【4†ToolBench | EvalScope - Read the Docs; publish_date: +L19: none†evalscope.readthedocs.io】 We evaluate the effectiveness of the ToolBench +L20: benchmark: ToolBench (Qin et al., 2023b). The task involves integrating API +L21: calls to complete tasks. +L22: * 【5†Towards Stable Large-Scale Benchmarking on Tool ...; publish_date: +L23: none†aclanthology.org】 by Z Guo · 2024 · Cited by 100 — We introduce +L24: StableToolBench, a benchmark evolving from ToolBench, proposing a virtual API +L25: server and stable evaluation system. +L26: * 【6†ML-Tool-Bench: Tool-Augmented Planning for ML Tasks; publish_date: +L27: none†openreview.net】 Sep 18, 2025 — In this work, we introduce a comprehensive +L28: benchmark for evaluating tool-augmented ML agents using a curated set of 61 +L29: specialized tools and 15 ... +L30: * 【7†-Bench: Benchmarking AI agents for the real-world; publish_date: +L31: none†sierra.ai】 Jun 20, 2024 — τ-bench measures an agent's ability to interact +L32: with (simulated) human users and programmatic APIs while following domain- +L33: specific policies in a consistent ... +L34: * 【8†ToolEval Leaderboard; publish_date: none†openbmb.github.io】 ToolEval is +L35: an automatic evaluator build for tool learning which incorporates two evaluation +L36: metrics, Pass Rate and Win Rate(Preference). +L37: * 【9†What is the best benchmark dataset for multi-step tool-use?; +L38: publish_date: none†www.reddit.com】 I'm a newbie trying to evaluate the +L39: performance of different prompts strategies for multi-step tool-using, wondering +L40: what is the recommended benchmark dataset ... +[8] HotpotQA benchmark (Search_Results/HotpotQA benchmark) +**viewing lines [0 - 39] of 39** + +L0: +L1: URL: Search_Results/HotpotQA benchmark +L2: # Search Results +L3: +L4: * 【0†HotpotQA Homepage; publish_date: none†hotpotqa.github.io】 HotpotQA is a +L5: question answering dataset featuring natural, multi-hop questions, with strong +L6: supervision for supporting facts to enable more explainable ...See more +L7: * 【1†HotpotQA: A Dataset for Diverse, Explainable Multi-hop ...; publish_date: +L8: none†arxiv.org】 by Z Yang · 2018 · Cited by 3834 — HotpotQA is a dataset with +L9: 113k Wikipedia-based question-answer pairs requiring multi-document reasoning, +L10: diverse questions, sentence-level ... +L11: * 【2†hotpotqa/hotpot_qa · Datasets at Hugging Face; publish_date: +L12: none†huggingface.co】 HotpotQA is a new dataset with 113k Wikipedia-based +L13: question-answer pairs with four key features: (1) the questions require finding +L14: and reasoning over multiple ...See more +L15: * 【3†Why You Should Stop Using HotpotQA for AI Agents ...; publish_date: +L16: none†qipeng.me】 Jul 1, 2025 — HotpotQA pioneered a class of AI tasks that +L17: requires the AI system to autonomously perform multiple steps of reasoning in an +L18: open-domain setting.See more +L19: * 【4†hotpotqa/hotpot; publish_date: none†github.com】 A dataset for diverse, +L20: explainable multi-hop question answering. This repository contains the baseline +L21: model code, as well as the entire pipeline of running ...See more +L22: * 【5†HotpotQA: Multi-Hop QA Benchmark; publish_date: +L23: none†www.emergentmind.com】 Sep 10, 2025 — HotpotQA is a large-scale multi-hop +L24: question answering benchmark featuring 112,779 Wikipedia-based Q&A pairs with +L25: detailed, sentence-level ...See more +L26: * 【6†HotpotQA Dataset | Papers With Code; publish_date: +L27: none†paperswithcode.com】 HotpotQA is a question answering dataset collected on +L28: the English Wikipedia, containing about 113K crowd-sourced questions.See more +L29: * 【7†HotpotQA: A Dataset for Diverse, Explainable Multi-hop ...; publish_date: +L30: none†aclanthology.org】 by Z Yang · 2018 · Cited by 3834 — HotpotQA is a dataset +L31: with 113k Wikipedia-based question-answer pairs requiring multi-document +L32: reasoning, diverse questions, sentence-level facts, and factoid ... +L33: * 【8†Benchmark BM25S: HotpotQA; publish_date: none†www.kaggle.com】 Explore and +L34: run machine learning code with Kaggle Notebooks | Using data from No attached +L35: data sources. +L36: * 【9†mteb/hotpotqa · Datasets at Hugging Face; publish_date: +L37: none†huggingface.co】 HotpotQA is a question answering dataset featuring natural, +L38: multi-hop questions, with strong supervision for supporting facts to enable +L39: more explainable ...See more +[9] FEVER benchmark (Search_Results/FEVER benchmark) +**viewing lines [0 - 40] of 40** + +L0: +L1: URL: Search_Results/FEVER benchmark +L2: # Search Results +L3: +L4: * 【0†Fever.ai; publish_date: none†fever.ai】 We are pleased to announce that +L5: FEVER9 will be co-located with EACL 2026. In this year's workshop, we will +L6: introduce a new shared task focused on automated fact ... +L7: * 【1†a Large-scale Dataset for Fact Extraction and VERification; publish_date: +L8: none†aclanthology.org】 by J Thorne · 2018 · Cited by 2315 — In this paper we +L9: introduce a new publicly available dataset for verification against textual +L10: sources, FEVER: Fact Extraction. +L11: * 【2†awslabs/fever: FEVER (Fact Extraction and VERification) ...; +L12: publish_date: none†github.com】 In this paper we introduce a new publicly +L13: available dataset for verification against textual sources, FEVER: Fact +L14: Extraction and VERification. +L15: * 【3†FEVER: Fact Extraction and VERification; publish_date: +L16: none†www.amazon.science】 The best accuracy we achieve on labeling a claim +L17: accompanied by the correct evidence is 31.87%, while if we ignore the evidence +L18: we achieve 50.91%. Thus we ... +L19: * 【4†FEVER Dataset; publish_date: none†fever.ai】 FEVER (Fact Extraction and +L20: VERification) consists of 185,445 claims generated by altering sentences +L21: extracted from Wikipedia and subsequently verified ... +L22: * 【5†mteb/fever · Datasets at Hugging Face; publish_date: none†huggingface.co】 +L23: FEVER. An MTEB dataset. Massive Text Embedding Benchmark. FEVER (Fact +L24: Extraction and VERification) consists of 185,445 claims generated by altering +L25: sentences ... +L26: * 【6†FEVEROUS: Fact Extraction and VERification Over ...; publish_date: +L27: none†datasets-benchmarks-proceedings.neurips.cc】 by R Aly · Cited by 359 — In +L28: this paper we introduce a novel dataset and benchmark, Fact Extraction and +L29: VERification Over. Unstructured and Structured information (FEVEROUS), which ... +L30: * 【7†a large-scale dataset for Fact Extraction and VERification; publish_date: +L31: none†arxiv.org】 by J Thorne · 2018 · Cited by 2315 — In this paper we introduce +L32: a new publicly available dataset for verification against textual sources, +L33: FEVER: Fact Extraction and VERification. +L34: * 【8†FEVER Resources; publish_date: none†fever.ai】 2018 FEVER: a large-scale +L35: dataset for Fact Extraction and VERification .bib James Thorne, Andreas Vlachos, +L36: Christos Christodoulopoulos, Arpit Mittal +L37: * 【9†a Large-scale Dataset for Fact Extraction and VERification; publish_date: +L38: none†www.semanticscholar.org】 This paper introduces a new publicly available +L39: dataset for verification against textual sources, FEVER, which consists of +L40: 185,445 claims generated by ... +[10] TriviaQA benchmark (Search_Results/TriviaQA benchmark) +**viewing lines [0 - 35] of 35** + +L0: +L1: URL: Search_Results/TriviaQA benchmark +L2: # Search Results +L3: +L4: * 【0†TriviaQA; publish_date: none†nlp.cs.washington.edu】 TriviaQA is a reading +L5: comprehension dataset containing over 650K question-answer-evidence triples. +L6: TriviaQA includes 95K question-answer pairs authored ... +L7: * 【1†TriviaQA: A Large Scale Distantly Supervised Challenge ...; publish_date: +L8: none†aclanthology.org】 by M Joshi · 2017 · Cited by 3451 — We present TriviaQA, +L9: a challenging reading comprehension dataset containing over 650K question- +L10: answer-evidence triples. TriviaQA includes 95K question ... +L11: * 【2†mandarjoshi/trivia_qa · Datasets at Hugging Face; publish_date: +L12: none†huggingface.co】 TriviaqQA is a reading comprehension dataset containing +L13: over 650K question-answer-evidence triples. TriviaqQA includes 95K question- +L14: answer pairs authored by ... +L15: * 【3†[1705.03551] TriviaQA: A Large Scale Distantly Supervised ...; +L16: publish_date: none†arxiv.org】 by M Joshi · 2017 · Cited by 3451 — We present +L17: TriviaQA, a challenging reading comprehension dataset containing over 650K +L18: question-answer-evidence triples. +L19: * 【4†TriviaQA; publish_date: none†epoch.ai】 An open-domain question answering +L20: benchmark with challenging trivia questions paired with evidence documents. +L21: * 【5†TriviaQA Leaderboard; publish_date: none†llm-stats.com】 What is the +L22: TriviaQA benchmark? A large-scale reading comprehension dataset containing over +L23: 650K question-answer-evidence triples. TriviaQA includes 95K ... +L24: * 【6†Code for the TriviaQA reading comprehension dataset; publish_date: +L25: none†github.com】 A large scale distantly supervised challenge dataset for +L26: reading comprehension. In Association for Computational Linguistics (ACL) 2017, +L27: Vancouver, Canada. +L28: * 【7†TriviaQA - Model Benchmarks - The Regularizer; publish_date: +L29: none†www.theregularizer.com】 May 4, 2025 — Compare the performance of different +L30: AI models across standardized benchmarks. Higher scores generally indicate +L31: better performance, but context ... +L32: * 【8†TriviaQA: A Large Scale Distantly Supervised Challenge ...; publish_date: +L33: none†www.cs.utexas.edu】 by M Joshi · Cited by 3445 — We present TriviaQA, a +L34: challenging reading comprehension dataset contain- ing over 650K question- +L35: answer-evidence triples. TriviaQA includes 95K question-. +[11] Natural Questions benchmark (Search_Results/Natural Questions benchmark) +**viewing lines [0 - 39] of 39** + +L0: +L1: URL: Search_Results/Natural Questions benchmark +L2: # Search Results +L3: +L4: * 【0†Natural Questions: a Benchmark for Question Answering ...; publish_date: +L5: none†research.google】 by T Kwiatkowski · Cited by 4339 — We present the Natural +L6: Questions corpus, a question answering dataset. Questions consist of real +L7: anonymized, aggregated queries issued to the Google search ... +L8: * 【1†Natural Questions: A Benchmark for Question Answering ...; publish_date: +L9: none†aclanthology.org】 by T Kwiatkowski · Cited by 4308 — Abstract. We present +L10: the Natural Questions corpus, a question answering data set. Questions consist +L11: of real anonymized, aggregated queries issued. +L12: * 【2†Google's Natural Questions; publish_date: none†ai.google.com】 Natural +L13: Questions. A Benchmark for Question Answering Research. View examples · Download +L14: dataset. Open Domain Question Answering. A core goal in artificial ... +L15: * 【3†google-research-datasets/natural-questions; publish_date: +L16: none†github.com】 Natural Questions (NQ) contains real user questions issued to +L17: Google search, and answers found from Wikipedia by annotators. NQ is designed +L18: for the training and ... +L19: * 【4†Natural Questions: A Benchmark for Question Answering ...; publish_date: +L20: none†direct.mit.edu】 Aug 1, 2019 — We present the Natural Questions corpus, a +L21: question answering data set. Questions consist of real anonymized, aggregated +L22: queries issued to the Google search ... +L23: * 【5†ir_datasets : Natural Questions; publish_date: none†ir-datasets.com】 +L24: Google Natural Questions is a Q&A dataset containing long, short, and Yes/No +L25: answers from Wikipedia. ir_datasets frames this around an ad-hoc ranking setting +L26: ... +L27: * 【6†sentence-transformers/natural-questions · Datasets at ...; publish_date: +L28: none†huggingface.co】 This dataset is a collection of question-answer pairs from +L29: the Natural Questions dataset. See Natural Questions for additional information. +L30: * 【7†Google's Natural Questions; publish_date: none†ai.google.com】 Natural +L31: Questions contains 307K training examples, 8K examples for development, and a +L32: further 8K examples for testing. In the paper, we demonstrate a human ... +L33: * 【8†A Benchmark for Question Answering Research; publish_date: +L34: none†www.researchgate.net】 Jul 27, 2025 — We present the Natural Questions +L35: corpus, a question answering data set. Questions consist of real anonymized, +L36: aggregated queries issued to the Google search ... +L37: * 【9†natural-questions; publish_date: none†docs.unity.rc.umass.edu】 Sep 4, +L38: 2025 — “Natural questions: a benchmark for question answering research.” +L39: Transactions of the Association for Computational Linguistics 7 (2019): ... +[12] MS MARCO benchmark (Search_Results/MS MARCO benchmark) +**viewing lines [0 - 41] of 41** + +L0: +L1: URL: Search_Results/MS MARCO benchmark +L2: # Search Results +L3: +L4: * 【0†MS MARCO - Microsoft Open Source; publish_date: none†microsoft.github.io】 +L5: The MS MARCO datasets are intended for non-commercial research purposes only to +L6: promote advancement in the field of artificial intelligence and related areas, +L7: ... +L8: * 【1†microsoft/ms_marco · Datasets at Hugging Face; publish_date: +L9: none†huggingface.co】 Starting with a paper released at NIPS 2016, MS MARCO is a +L10: collection of datasets focused on deep learning in search. The first dataset was +L11: a question ... +L12: * 【2†Benchmarking Ranking Models in the Large-Data Regime; publish_date: +L13: none†arxiv.org】 by N Craswell · 2021 · Cited by 89 — This paper uses the MS +L14: MARCO and TREC Deep Learning Track as our case study, comparing it to the case +L15: of TREC ad hoc ranking in the 1990s. +L16: * 【3†Benchmarking Ranking Models in the Large-Data Regime; publish_date: +L17: none†www.microsoft.com】 This paper uses the MS MARCO and TREC Deep Learning +L18: Track as our case study, comparing it to the case of TREC ad hoc ranking in the +L19: 1990s. We show how the ... +L20: * 【4†Datasets for Document and Passage Ranking Leadboards; publish_date: +L21: none†microsoft.github.io】 The MS MARCO document and passage ranking leaderboards +L22: complements the TREC Deep Learning Track by providing on-going evaluation of +L23: submissions using pre- ... +L24: * 【5†MS MARCO: Benchmarking Ranking Models in the Large- ...; publish_date: +L25: none†dl.acm.org】 Jul 11, 2021 — This paper uses the MS MARCO and TREC Deep +L26: Learning Track as our case study, comparing it to the case of TREC ad hoc +L27: ranking in the 1990s. +L28: * 【6†ir_datasets : MSMARCO (passage); publish_date: none†ir-datasets.com】 A +L29: passage ranking benchmark with a collection of 8.8 million passages and question +L30: queries. Most relevance judgments are shallow. +L31: * 【7†MS MARCO; publish_date: none†sbert.net】 MS MARCO Passage Ranking is a +L32: large dataset to train models for information retrieval. It consists of about +L33: 500k real search queries from Bing search engine ... +L34: * 【8†MS MARCO: A Human Generated MAchine Reading ...; publish_date: +L35: none†arxiv.org】 by P Bajaj · 2016 · Cited by 1151 — We introduce a large scale +L36: MAchine Reading COmprehension dataset, which we name MS MARCO. The dataset +L37: comprises of 1,010,916 anonymized ... +L38: * 【9†MS MARCO Web Search: A Large-scale Information-rich ...; publish_date: +L39: none†www.microsoft.com】 May 13, 2024 — MS MARCO Web Search offers a retrieval +L40: benchmark with three web retrieval challenge tasks that demands innovations in +L41: both machine learning and ... +[13] BEIR benchmark (Search_Results/BEIR benchmark) +**viewing lines [0 - 37] of 37** + +L0: +L1: URL: Search_Results/BEIR benchmark +L2: # Search Results +L3: +L4: * 【0†详细介绍文本检索基准BEIR: A Heterogeneous Benchmark ...; publish_date: +L5: none†blog.csdn.net】 2023年1月1日 — +L6: BEIR旨在为所有不同的检索任务提供一个一站式的零样本评估基准。为了构建一个全面的评估基准,选择方法对于收集具有理想属性的任务和数据集至关重要。对于 ... +L7: * 【1†beir-cellar/beir; publish_date: none†github.com】 BEIR is a heterogeneous +L8: benchmark containing diverse IR tasks. It also provides a common and easy +L9: framework for evaluation of your NLP-based retrieval models ... +L10: * 【2†BEIR: A Heterogenous Benchmark for Zero-shot Evaluation ...; +L11: publish_date: none†arxiv.org】 作者:N Thakur · 2021 · 被引用次数:1480 — We introduce +L12: Benchmarking-IR (BEIR), a robust and heterogeneous evaluation benchmark for +L13: information retrieval. +L14: * 【3†BeIR; publish_date: none†huggingface.co】 BEIR (Benchmarking IR) consists +L15: of a homogenous benchmark for diverse sentence or passage level IR tasks. It +L16: provides a common and easy framework for the cross ... +L17: * 【4†论文分享:BEIR A Heterogeneous Benchmark for Zero-shot ...; publish_date: +L18: none†zhuanlan.zhihu.com】 2022年10月3日 — 分享论文,夹带个人理解的分享,建议结合原论文看。 1 研究背景. +L19: 本论文主要关注的领域是query-document检索(下文简称qd检索),即根据query去文档库里 ... +L20: * 【5†Benchmarking IR Information Retrieval (BEIR); publish_date: +L21: none†zilliz.com】 BEIR is a benchmark designed for evaluating the versatility and +L22: robustness of information retrieval models. It features 18 diverse datasets +L23: from domains like ... +L24: * 【6†BEIR (Benchmarking IR) - OpenDataLab; publish_date: none†opendatalab.com】 +L25: 简介-Introduction. BEIR(Benchmarking +L26: IR)是包含不同信息检索(IR)任务的异构基准。通过BEIR,可以系统地研究多种神经检索方法的零样本泛化能力。 +L27: * 【7†What is the BEIR benchmark and how is it used?; publish_date: +L28: none†milvus.io】 The BEIR (Benchmarking Information Retrieval) benchmark is a +L29: standardized framework designed to evaluate the effectiveness of search and +L30: retrieval algorithms. +L31: * 【8†BEIR Benchmark数据集卡片; publish_date: none†www.atyun.com】 BEIR +L32: Benchmark数据集卡片. 数据集简介. BEIR是一个异构评测基准,由18个多样化的数据集构建而成,代表了9个信息检索任务:. 事实查证: FEVER , +L33: Climate-FEVER , SciFact ... +L34: * 【9†Evaluating search relevance part 1 - The BEIR benchmark; publish_date: +L35: none†www.elastic.co】 2024年7月16日 — Learn to evaluate your search system in the +L36: context of better understanding the BEIR benchmark, with tips & techniques to +L37: improve your ... +[14] MIRACL benchmark (Search_Results/MIRACL benchmark) +**viewing lines [0 - 41] of 41** + +L0: +L1: URL: Search_Results/MIRACL benchmark +L2: # Search Results +L3: +L4: * 【0†MIRACL | Multilingual Information Retrieval Across a ...; publish_date: +L5: none†project-miracl.github.io】 MIRACL (Multilingual Information Retrieval Across +L6: a Continuum of Languages) is an WSDM 2023 Cup challenge that focuses on search +L7: across 18 different ... +L8: * 【1†project-miracl/miracl: A large-scale multilingual dataset for ...; +L9: publish_date: none†github.com】 A large-scale multilingual dataset for +L10: Information Retrieval. Thorough human-annotations across 18 diverse languages. +L11: * 【2†A Large, multilingual, visual document retrieval benchmark; publish_date: +L12: none†arxiv.org】 by R Osmulski · 2025 · Cited by 2 — MIRACL-VISION is a +L13: challenging, representative, multilingual evaluation benchmark for visual +L14: retrieval pipelines and will help the community build robust ... +L15: * 【3†miracl/miracl · Datasets at Hugging Face; publish_date: +L16: none†huggingface.co】 MIRACL (Multilingual Information Retrieval Across a +L17: Continuum of Languages) is a multilingual retrieval dataset that focuses on +L18: search across 18 different ... +L19: * 【4†MIRACL: A Multilingual Retrieval Dataset Covering 18 ...; publish_date: +L20: none†direct.mit.edu】 by X Zhang · 2023 · Cited by 131 — MIRACL is a multilingual +L21: dataset for ad hoc retrieval across 18 languages that collectively encompass +L22: over three billion native speakers around the world. +L23: * 【5†(PDF) MIRACL-VISION: A Large, multilingual, visual ...; publish_date: +L24: none†www.researchgate.net】 May 23, 2025 — MIRACL-VISION covers 18 languages, and +L25: is an extension of the MIRACL dataset, a popular benchmark to evaluate text- +L26: based multilingual retrieval ... +L27: * 【6†A Large, multilingual, visual document retrieval benchmark; publish_date: +L28: none†arxiv.org】 by R Osmulski · 2025 · Cited by 2 — MIRACL-VISION is a +L29: challenging, representative, multilingual evaluation benchmark for visual +L30: retrieval pipelines and will help the community ... +L31: * 【7†ir_datasets : MIRACL; publish_date: none†ir-datasets.com】 +L32: "miracl/ar/test-a". The held-out test set (version a) for Arabic. +L33: queriesdocsCitationMetadata. 936 queries. Language: ar. Query type: +L34: GenericQuery: (namedtuple). +L35: * 【8†Evaluate on MIRACL — BGE documentation; publish_date: none†bge-model.com】 +L36: MIRACL (Multilingual Information Retrieval Across a Continuum of Languages) is +L37: an WSDM 2023 Cup challenge that focuses on search across 18 different languages. +L38: * 【9†MIRACL - Alpha's Tech Garden; publish_date: +L39: none†techgarden.alphasmanifesto.com】 MIRACL (Multilingual Information Retrieval +L40: Across a Continuum of Languages) is a multilingual dataset we have built for the +L41: WSDM 2023 Cup ... +[15] Zero-shot IR benchmark (Search_Results/Zero-shot IR benchmark) +**viewing lines [0 - 40] of 40** + +L0: +L1: URL: Search_Results/Zero-shot IR benchmark +L2: # Search Results +L3: +L4: * 【0†BEIR: A Heterogenous Benchmark for Zero-shot Evaluation ...; +L5: publish_date: none†arxiv.org】 by N Thakur · 2021 · Cited by 1480 — We introduce +L6: Benchmarking-IR (BEIR), a robust and heterogeneous evaluation benchmark for +L7: information retrieval.See more +L8: * 【1†beir-cellar/beir; publish_date: none†github.com】 BEIR: A Heterogenous +L9: Benchmark for Zero-shot Evaluation of Information Retrieval Models (NeurIPS +L10: 2021, Datasets and Benchmarks Track); Resources for Brewing ...See more +L11: * 【2†Benchmarking IR Information Retrieval (BEIR); publish_date: +L12: none†zilliz.com】 BEIR is a tool to evaluate how well Information Retrieval +L13: systems perform across many tasks and types of information, and is a standard +L14: benchmark. +L15: * 【3†BEIR: A Heterogeneous Benchmark for Zero-shot ...; publish_date: +L16: none†datasets-benchmarks-proceedings.neurips.cc】 by N Thakur · Cited by 1480 — +L17: BEIR is a robust, heterogeneous benchmark for information retrieval, using 18 +L18: datasets and 9 tasks to evaluate model generalization. +L19: * 【4†BEIR; publish_date: none†eval.ai】 BEIR is a heterogeneous zero-shot +L20: retrieval benchmark containing 18 datasets from diverse text retrieval tasks and +L21: domains.See more +L22: * 【5†[2409.15763] IRSC: A Zero-shot Evaluation Benchmark for ...; +L23: publish_date: none†arxiv.org】 by H Lin · 2024 · Cited by 2 — This paper +L24: introduces the IRSC benchmark for evaluating the performance of embedding models +L25: in multilingual RAG tasks.See more +L26: * 【6†FactIR: A Real-World Zero-shot Open-Domain Retrieval ...; publish_date: +L27: none†dl.acm.org】 May 23, 2025 — In this paper, we present a real-world retrieval +L28: benchmark FactIR, derived from Factiverse production logs, enhanced with human +L29: annotations. We ...See more +L30: * 【7†UniIR: Training and Benchmarking Universal Multimodal ...; publish_date: +L31: none†tiger-ai-lab.github.io】 At test time, we evaluated the zero-shot +L32: performance of all fine-tuned models, as well as SoTA pre-trained retrievers on +L33: the three held-out datasets. UniIR ...See more +L34: * 【8†Zero-Shot BEIR Tasks; publish_date: none†www.emergentmind.com】 Aug 26, +L35: 2025 — Zero-Shot BEIR Tasks are evaluation methodologies that assess IR models' +L36: ability to generalize to unseen query domains without task-specific ...See more +L37: * 【9†BEIR-PL: Zero Shot Information Retrieval Benchmark for ...; publish_date: +L38: none†aclanthology.org】 by K Wojtasik · 2024 · Cited by 12 — BEIR-PL is a new +L39: benchmark with 13 datasets for Polish Information Retrieval, created to advance +L40: research in this area. +[16] WebGPT benchmark (Search_Results/WebGPT benchmark) +**viewing lines [0 - 38] of 38** + +L0: +L1: URL: Search_Results/WebGPT benchmark +L2: # Search Results +L3: +L4: * 【0†WebGPT: Improving the factual accuracy of language ...; publish_date: +L5: none†openai.com】 Dec 16, 2021 — Our models outperform GPT‑3 on TruthfulQA and +L6: exhibit more favourable scaling properties. However, our models lag behind human +L7: performance, ... +L8: * 【1†A Simple Yet Challenging Benchmark for Browsing Agents; publish_date: +L9: none†arxiv.org】 by J Wei · 2025 · Cited by 124 — Abstract. We present +L10: BrowseComp, a simple yet challenging benchmark for measuring the ability for +L11: agents to browse the web. +L12: * 【2†openai/webgpt_comparisons · Datasets at Hugging Face; publish_date: +L13: none†huggingface.co】 This is the dataset of all comparisons that were marked as +L14: suitable for reward modeling by the end of the WebGPT project. There are 19,578 +L15: comparisons in total. +L16: * 【3†Evaluation & Limitations of WebGPT, WebVoyager & Agent-E; publish_date: +L17: none†deepsense.ai】 Oct 14, 2024 — WebArena benchmark features 812 tasks +L18: evaluated using metrics such as Exact Match, Must Include, and Fuzzy Match, +L19: focusing on outcomes rather ... +L20: * 【4†OpenAI Announces Question-Answering AI WebGPT; publish_date: +L21: none†www.infoq.com】 Jan 25, 2022 — On the TriviaQA benchmark, WebGPT +L22: outperformed GPT-3, producing answers that were true 75% of the time, and "both +L23: true and informative" 54% of ... +L24: * 【5†WebGPT: Improving the factual accuracy of language models ...; +L25: publish_date: none†kargarisaac.medium.com】 The top-performing model generated +L26: answers that were preferred over 56% of the time compared to answers produced by +L27: human demonstrators, with ... +L28: * 【6†Browser-assisted question-answering with human feedback; publish_date: +L29: none†www.alphaxiv.org】 WebGPT represents a significant advancement in long-form +L30: question answering by combining the language generation capabilities of GPT-3 +L31: with real-time web ... +L32: * 【7†Benchmarking Open-Source Large Language Models, GPT-4 ...; publish_date: +L33: none†ai.nejm.org】 by S Wu · 2024 · Cited by 69 — We show that the current widely +L34: used open-source LLMs have poor zero-shot reasoning ability in nephrology +L35: compared with GPT-4 and Claude 2. +L36: * 【8†0hq/WebGPT: Run GPT model on ...; publish_date: none†github.com】 WebGPT +L37: is a vanilla JS and HTML implementation of a transformer model, intended as a +L38: proof-of-concept as well as educational resource. +[17] WebShop benchmark (Search_Results/WebShop benchmark) +**viewing lines [0 - 41] of 41** + +L0: +L1: URL: Search_Results/WebShop benchmark +L2: # Search Results +L3: +L4: * 【0†WebShop: Towards Scalable Real-World Web Interaction ...; publish_date: +L5: none†arxiv.org】 by S Yao · 2022 · Cited by 710 — To bridge this gap, we develop +L6: WebShop -- a simulated e-commerce website environment with 1.18 million real- +L7: world products and 12,087 crowd- ... +L8: * 【1†WebShop; publish_date: none†webshop-pnlp.github.io】 To bridge this gap, +L9: we develop WebShop – a simulated e-commerce website environment with 1.18 +L10: million real-world products and 12,087 crowd-sourced text ... +L11: * 【2†princeton-nlp/WebShop; publish_date: none†github.com】 WebShop is a +L12: simulated e-commerce website environment with 1.18 million real-world products +L13: and 12,087 crowd-sourced text instructions. In this environment, an ... +L14: * 【3†WebShop: Towards Scalable Real-World Web Interaction ...; publish_date: +L15: none†papers.nips.cc】 by S Yao · 2022 · Cited by 710 — We collect over 1,600 +L16: human trajectories to first validate the benchmark, then train and evaluate a +L17: diverse range of agents using reinforcement learning, ... +L18: * 【4†WebShop: Towards Scalable Real-World Web Interaction ...; publish_date: +L19: none†proceedings.neurips.cc】 by S Yao · 2022 · Cited by 709 — We have developed +L20: WebShop, a new web-based benchmark for sequential decision making and language +L21: grounding, modeled on interaction with an e-commerce website. +L22: * 【5†Webshop & Benchmark Analysis | Documentation Infinity; publish_date: +L23: none†docs.fact-finder.com】 Aug 15, 2025 — Evaluation of your shop based on +L24: different categories in comparison, to your competitors/industry. Recommended +L25: when doing a shop relaunch. +L26: * 【6†A Multi-Shop Benchmark for Evaluating Web Agents; publish_date: +L27: none†arxiv.org】 by R Peeters · 2025 · Cited by 2 — Compared to existing +L28: e-commerce benchmarks, such as WebShop or ShoppingBench, WebMall introduces +L29: comparison-shopping tasks across multiple shops ... +L30: * 【7†WebShop: towards scalable real-world web interaction with ...; +L31: publish_date: none†dl.acm.org】 by S Yao · 2022 · Cited by 710 — To bridge this +L32: gap, we develop WebShop - a simulated e-commerce website environment with 1.18 +L33: million real-world products and 12, 087 crowd- ... +L34: * 【8†[PDF] WebShop: Towards Scalable Real-World Web ...; publish_date: +L35: none†www.semanticscholar.org】 It is shown that agents trained on WebShop exhibit +L36: non-trivial sim-to-real transfer when evaluated on amazon.com and ebay.com, +L37: indicating the potential ... +L38: * 【9†X-WebAgentBench: A Multilingual Interactive Web ...; publish_date: +L39: none†aclanthology.org】 by P Wang · 2025 · Cited by 3 — (2023) based on the +L40: English WebShop benchmark (Yao et al., 2022), while the multilingual task scores +L41: are ob- tained through evaluation on our own benchmark. +[18] ALFWorld benchmark (Search_Results/ALFWorld benchmark) +**viewing lines [0 - 31] of 31** + +L0: +L1: URL: Search_Results/ALFWorld benchmark +L2: # Search Results +L3: +L4: * 【0†ALFWorld; publish_date: none†alfworld.github.io】 ALFWorld contains +L5: interactive TextWorld environments (Côté et. al) that parallel embodied worlds +L6: in the ALFRED dataset (Shridhar et. al). +L7: * 【1†ALFWorld: Aligning Text and Embodied Environments for ...; publish_date: +L8: none†arxiv.org】 by M Shridhar · 2020 · Cited by 674 — ALFWorld enables the +L9: creation of a new BUTLER agent whose abstract knowledge, learned in TextWorld, +L10: corresponds directly to concrete, visually grounded actions. +L11: * 【2†ALFWorld: Aligning Text and Embodied Environments ...; publish_date: +L12: none†github.com】 ALFWorld contains interactive TextWorld environments (Côté et. +L13: al) that parallel embodied worlds in the ALFRED dataset (Shridhar et. al). +L14: * 【3†alfworld - benchmark's activity; publish_date: none†huggingface.co】 MM- +L15: IQ: Benchmarking Human-Like Abstraction and Reasoning in Multimodal Models Paper +L16: • 2502.00698 • Published Feb 1 • 24 +L17: * 【4†Tackling AlfWorld with Action Attention and Common ...; publish_date: +L18: none†neurips.cc】 On the Alfworld benchmark for indoor instruction following, we +L19: achieve a significantly higher success rate (50% over the baseline) with our +L20: novel object ... +L21: * 【5†ALFWORLD: ALIGNING TEXT AND EMBODIED ...; publish_date: +L22: none†openreview.net】 by M Shridhar · Cited by 674 — The ALFRED dataset (Shridhar +L23: et al., 2020), set in the THOR simulator (Kolve et al., 2017), is a benchmark +L24: for learning to com- plete embodied household tasks ... +L25: * 【6†AlfWorld; publish_date: none†primo.ai】 Mar 23, 2024 — A simulator that +L26: enables agents to learn abstract, text based policies in TextWorld (Côté et al., +L27: 2018) and then execute goals from the ALFRED benchmark. +L28: * 【7†AlfWorld performance across 134 tasks showing cumulative...; +L29: publish_date: none†www.researchgate.net】 In the AlfWorld benchmark, we defined +L30: hallucination as the occurrence of two or more consecutive identical actions in +L31: which the environment responded with ... +[19] Mind2Web benchmark (Search_Results/Mind2Web benchmark) +**viewing lines [0 - 40] of 40** + +L0: +L1: URL: Search_Results/Mind2Web benchmark +L2: # Search Results +L3: +L4: * 【0†Mind2Web: Towards a Generalist Agent for the Web; publish_date: none†osu- +L5: nlp-group.github.io】 Mind2Web is a dataset for developing and evaluating +L6: generalist agents for the web that can follow language instructions to complete +L7: complex tasks on any ... +L8: * 【1†Online-Mind2Web Leaderboard; publish_date: none†huggingface.co】 Online- +L9: Mind2Web is a benchmark designed to evaluate the real-world performance of web +L10: agents on live websites, featuring 300 tasks across 136 popular sites ... +L11: * 【2†Mind2Web: Towards a Generalist Agent for the Web; publish_date: +L12: none†github.com】 Mind2Web is the first dataset for developing and evaluating +L13: generalist agents for the web that can follow language instructions to complete +L14: complex tasks on any ... +L15: * 【3†HAL: Online Mind2Web Leaderboard; publish_date: +L16: none†hal.cs.princeton.edu】 Online Mind2Web leaderboard for evaluating AI agents' +L17: ability to complete tasks on real, changing webpages. +L18: * 【4†[2506.21506] Mind2Web 2: Evaluating Agentic Search with ...; +L19: publish_date: none†arxiv.org】 by B Gou · 2025 · Cited by 11 — In this paper, we +L20: introduce Mind2Web 2, a benchmark of 130 realistic, high-quality, and long- +L21: horizon tasks that require real-time web browsing and extensive ... +L22: * 【5†Mind2Web 2: Evaluating Agentic Search with Agent-as-a-Judge; +L23: publish_date: none†osu-nlp-group.github.io】 We introduce Mind2Web 2, a benchmark +L24: of 130 realistic, high-quality, long-horizon tasks that require real-time web +L25: browsing and extensive information ... +L26: * 【6†Mind2Web: The Benchmark for AI Agent Evaluation and ...; publish_date: +L27: none†www.enhans.ai】 Sep 26, 2025 — Mind2Web is a globally recognized web-based +L28: AI Agent evaluation benchmark introduced by the NLP group at Ohio State +L29: University at NeurIPS 2023. +L30: * 【7†Evaluating AI Web Agents: Insights from the WebCanvas ...; publish_date: +L31: none†medium.com】 Thanks to the comprehensive WebCanvas Benchmark, which +L32: incorporates a robust Mind2Web-Live data set of 542 live web tasks and 2,439 ... +L33: * 【8†Mind2Web: Towards a Generalist Agent for the Web; publish_date: +L34: none†proceedings.neurips.cc】 by X Deng · 2023 · Cited by 760 — We introduce +L35: Mind2Web, the first dataset for developing and evaluating generalist agents for +L36: the web that can follow language instructions to complete complex ... +L37: * 【9†Mind2Web: Towards a Generalist Agent for the Web; publish_date: +L38: none†openreview.net】 by X Deng · Cited by 760 — We introduce Mind2Web, the first +L39: dataset for developing and evaluating generalist agents for the web that can +L40: follow language instructions to complete complex ... +[20] VisualWebArena benchmark (Search_Results/VisualWebArena benchmark) +**viewing lines [0 - 38] of 38** + +L0: +L1: URL: Search_Results/VisualWebArena benchmark +L2: # Search Results +L3: +L4: * 【0†VisualWebArena is a benchmark for multimodal agents.; publish_date: +L5: none†github.com】 VisualWebArena is a realistic and diverse benchmark for +L6: evaluating multimodal autonomous language agents. +L7: * 【1†Evaluating Multimodal Agents on Realistic Visual Web Tasks; publish_date: +L8: none†arxiv.org】 by JY Koh · 2024 · Cited by 363 — To bridge this gap, we +L9: introduce VisualWebArena, a benchmark designed to assess the performance of +L10: multimodal web agents on realistic \textit{ ... +L11: * 【2†Evaluating Multimodal Agents on Realistic Visual Web Tasks; publish_date: +L12: none†jykoh.com】 To bridge this gap, we introduce VisualWebArena, a benchmark +L13: designed to assess the performance of multimodal web agents on realistic +L14: visually grounded tasks. +L15: * 【3†VisualWebArena: Evaluating Multimodal Agents on ...; publish_date: +L16: none†arxiv.org】 VisualWebArena is a research benchmark to measure and evaluate +L17: the progress of multimodal agents. It is primarily meant to act as a self- +L18: contained sandbox ... +L19: * 【4†Evaluating Multimodal Agents on Realistic Visual Web Tasks; publish_date: +L20: none†aclanthology.org】 by JY Koh · 2024 · Cited by 363 — To bridge this gap, we +L21: introduce VisualWebArena, a benchmark designed to assess the performance of +L22: multimodal web agents on *realistic visually grounded tasks*. +L23: * 【5†Evaluating Multimodal Agents on Realistic Visual Web Tasks; publish_date: +L24: none†www.semanticscholar.org】 VisualWebArena: Evaluating Multimodal Agents on +L25: Realistic Visual Web Tasks ... MMInA, a multihop and multimodal benchmark to +L26: evaluate the embodied agents ... +L27: * 【6†CMU Researchers Introduce VisualWebArena: An AI ...; publish_date: +L28: none†www.marktechpost.com】 Feb 9, 2024 — VisualWebArena, a benchmark designed +L29: and developed to evaluate the performance of multimodal web agents on realistic +L30: and visually stimulating challenges. +L31: * 【7†Evaluating Multimodal Agents on Realistic Visual Web Tasks; publish_date: +L32: none†www.themoonlight.io】 The paper "VisualWebArena: Evaluating Multimodal +L33: Agents on Realistic Visually Grounded Web Tasks" introduces a new benchmark, +L34: **VisualWebArena**, ... +L35: * 【8†WebArena: A Realistic Web Environment for Building ...; publish_date: +L36: none†webarena.dev】 Our benchmark is implemented in our fully interactable +L37: highly-realistic WebArena environment. It features diverse tasks human may +L38: encounter in their daily ... +[21] SearchBench benchmark (Search_Results/SearchBench benchmark) +**viewing lines [0 - 40] of 40** + +L0: +L1: URL: Search_Results/SearchBench benchmark +L2: # Search Results +L3: +L4: * 【0†Talc-AI/search-bench; publish_date: none†github.com】 A practical +L5: benchmark that focuses on every day helpfulness of LLM products, not just the +L6: underlying models. Searchbench is a benchmark that addresses these ... +L7: * 【1†Evaluating LLMs' Ability to Reason About Search Problems; publish_date: +L8: none†arxiv.org】 These capabilities are essential for robust reasoning, making +L9: SearchBench a valuable benchmark for evaluating LLMs' reasoning capabilities as +L10: they continue to ... +L11: * 【2†NasimBrz/SearchBench · Datasets at Hugging Face; publish_date: +L12: none†huggingface.co】 Dataset Summary. SearchBench is a benchmark designed to +L13: evaluate Language Models' (LLMs) ability to solve state-based problems that +L14: require combinatorial search ... +L15: * 【3†Evaluating LLMs' Ability to Reason About Search Problems; publish_date: +L16: none†openreview.net】 2025年10月22日 — To further investigate this, we introduce a +L17: new benchmark, SearchBench, which contains 11 unique search problems inspired by +L18: intuitive puzzles. +L19: * 【4†Navigating the Labyrinth: Evaluating and Enhancing LLMs' ...; +L20: publish_date: none†hub.baai.ac.cn】 2024年6月17日 — +L21: 论文提出了一个新的基准测试SearchBench,包含11种独特的搜索问题类型,并自动化生成任意数量的实例和分析解决方案的可行性、正确性和最优性。论文使用A* +L22: ... +L23: * 【5†Towards Unified Text-based Person Retrieval: A Large- ...; publish_date: +L24: none†blog.csdn.net】 2023年10月17日 — ... Search +L25: Benchmark(面向统一的基于文本的人物检索:一个大规模的多属性和语言搜索基准); 研究背景. 相关工作; BENCHMARK. 论文方法分析. 网络框架; +L26: 1、APTM ... +L27: * 【6†Desearch-ai/ai-search-benchmark; publish_date: none†github.com】 The +L28: SearchBench repository addresses common issues with traditional benchmarks by +L29: focusing on practical, everyday use cases rather than theoretical limits. It ... +L30: * 【7†o1 results for 3 benchmarks: PlanBench, SearchBench, ...; publish_date: +L31: none†www.reddit.com】 o1 results for 3 benchmarks: PlanBench, SearchBench, and +L32: Summary of a Haystack. AI. PlanBench: Paper "LLMs Still Can't Plan; Can LRMs? A +L33: ... +L34: * 【8†Evaluating LLMs' Ability to Reason About Search Problems; publish_date: +L35: none†ui.adsabs.harvard.edu】 To further investigate this, we introduce a new +L36: benchmark, SearchBench, which contains 11 unique search problems inspired by +L37: intuitive puzzles. Each SearchBench ... +L38: * 【9†Introducing SearchBench; publish_date: none†www.tag1consulting.com】 +L39: Toward this goal, over the weekend I launched a new project called SearchBench, +L40: a Drupal module for benchmarking Drupal's search performance. As the module ... +[22] WebVLN benchmark (Search_Results/WebVLN benchmark) +**viewing lines [0 - 42] of 42** + +L0: +L1: URL: Search_Results/WebVLN benchmark +L2: # Search Results +L3: +L4: * 【0†WebVLN: Vision-and-Language Navigation on Websites; publish_date: +L5: none†ojs.aaai.org】 by Q Chen · 2024 · Cited by 35 — the WebVLN-v1 dataset, where +L6: the performance is far from saturation, highlighting the utility of our +L7: WebVLN-v1 as a benchmark to assess progress in this field. +L8: * 【1†[2312.15820] WebVLN: Vision-and-Language Navigation on Websites; +L9: publish_date: none†ar5iv.labs.arxiv.org】 Experimental results show that WebVLN- +L10: Net outperforms current VLN and web-related navigation methods. ... Code is +L11: available at: https://github.com/WebVLN/WebVLN. +L12: * 【2†WebVLN: Vision-and-Language Navigation on Websites; publish_date: +L13: none†github.com】 Experimental results show that WebVLN-Net outperforms current +L14: VLN and web-related navigation methods. We believe that the introduction of the +L15: new WebVLN task ... +L16: * 【3†Vision-and-Language Navigation in the Real-World; publish_date: +L17: none†digital.library.adelaide.edu.au】 By leveraging our proposed WebVLN-v1 +L18: dataset, experimental results showcase the superior performance of WebVLN-Net +L19: compared to existing VLN and web-related ... +L20: * 【4†WebVLN: Vision-and-Language Navigation on Websites; publish_date: +L21: none†www.researchgate.net】 Experimental results show that WebVLN-Net outperforms +L22: current VLN and web-related navigation methods. We believe that the +L23: introduction of the newWebVLN task and ... +L24: * 【5†[PDF] WebVLN: Vision-and-Language Navigation on Websites; publish_date: +L25: none†www.semanticscholar.org】 A new task named Vision-and-Language Navigation on +L26: Websites (WebVLN), where question-based instructions are used to train an +L27: agent, emulating how users ... +L28: * 【6†WebVLN: Vision-and-Language Navigation on Websites; publish_date: +L29: none†arxiv.org】 by Q Chen · 2023 · Cited by 35 — Experimental results show that +L30: WebVLN-Net outperforms current VLN and web-related navigation methods. We +L31: believe that the introduction of the ... +L32: * 【7†Human-Aware Vision-and-Language Navigation; publish_date: +L33: none†proceedings.neurips.cc】 by H Li · 2024 · Cited by 19 — Vision-and-Language +L34: Navigation (VLN) [2, 7, 9, 40] has emerged as a key benchmark for evaluating. +L35: Sim2Real transfer [23], showing impressive performance in ... +L36: * 【8†LiveBench; publish_date: none†livebench.ai】 Introducing LiveBench: a +L37: benchmark for LLMs designed with test set contamination and objective evaluation +L38: in mind. +L39: * 【9†MG-VLN: Benchmarking Multi-Goal and Long-Horizon ...; publish_date: +L40: none†ieeexplore.ieee.org】 by J Zhang · 2024 — This task aims to provide a +L41: simulation benchmark to guide the design of lifelong and long-horizon navigation +L42: robots. +[23] WebNav benchmark (Search_Results/WebNav benchmark) +**viewing lines [0 - 36] of 36** + +L0: +L1: URL: Search_Results/WebNav benchmark +L2: # Search Results +L3: +L4: * 【0†WebNav: A New Large-Scale Task for Natural Language ...; publish_date: +L5: none†github.com】 WebNav is a benchmark task for evaluating an agent with +L6: abilities to understand natural language and plan on partially observed +L7: environments. +L8: * 【1†[1602.02261] End-to-End Goal-Driven Web Navigation; publish_date: +L9: none†arxiv.org】 by R Nogueira · 2016 · Cited by 39 — We propose a goal-driven +L10: web navigation as a benchmark task for evaluating an agent with abilities to +L11: understand natural language and plan on partially ... +L12: * 【2†nyu-dl/dl4ir-webnav; publish_date: none†github.com】 WebNav is a benchmark +L13: task for evaluating an agent with abilities to understand natural language and +L14: plan on partially observed environments. +L15: * 【3†WebNav: A New Large-Scale Task for Natural Language ...; publish_date: +L16: none†www.researchgate.net】 We propose a goal-driven web navigation as a +L17: benchmark task for evaluating an agent with abilities to understand natural +L18: language and plan on partially ... +L19: * 【4†WebNav: An Intelligent Agent for Voice-Controlled Web ...; publish_date: +L20: none†arxiv.org】 In this paper, we introduce WebNav, an innovative multi-modal +L21: agent designed to automate web tasks based on natural language. +L22: * 【5†WebCanvas: Benchmarking Web Agents in Online ...; publish_date: +L23: none†openreview.net】 by Y Pan · Cited by 78 — TL;DR: We introduce WebCanvas, an +L24: online evaluation framework for web agents designed to address the dynamic +L25: nature of web interactions. +L26: * 【6†WebNav: An Intelligent Agent for Voice-Controlled Web ...; publish_date: +L27: none†huggingface.co】 Mar 17, 2025 — Preliminary evaluations show that WebNav +L28: outperforms traditional screen readers in response time and task completion +L29: accuracy for the visually ... +L30: * 【7†Web Bench - A new way to compare AI Browser Agents; publish_date: +L31: none†www.skyvern.com】 May 29, 2025 — TL;DR: Web Bench is a new dataset to +L32: evaluate web browsing agents that consists of 5,750 tasks on 452 different +L33: websites, with 2,454 tasks ... +L34: * 【8†Web Framework Benchmarks; publish_date: none†www.techempower.com】 In the +L35: following tests, we have measured the performance of several web application +L36: platforms, full-stack frameworks, and micro-frameworks. + + +我们收到了大量关于各种基准测试的搜索结果。现在,我们需要整理一份针对搜索智能体的基准测试列表。这些基准测试可以分为几类:网络浏览智能体基准测试(例如BrowseComp、WebVoyager、WebArena、VisualWebArena、Mind2Web、WebVLN、WebNav)、工具使用智能体基准测试(例如ToolBench、GTA)、问答/检索基准测试(例如HotpotQA、FEVER、TriviaQA、Natural Questions、MS MARCO、BEIR、MIRACL),以及特定领域的搜索基准测试(例如HSCodeComp、WebShop、ALFWorld)。此外,还有一些通用智能体基准测试,例如GAIA、SuperCLUE-Agent等。用户要求“详细介绍各自的特点、使用场景、例题”。因此,我需要为每个基准测试提供详细的描述,包括其特点、使用场景,并举例说明。为了全面获取这些信息,我将同时打开以下基准测试的相关页面:WebVoyager(索引3)、WebArena(索引1)、GAIA(索引1)、ToolBench(索引0)、HotpotQA(索引0)、FEVER(索引1)、TriviaQA(索引0)、Natural Questions(索引0)、MS MARCO(索引0)、BEIR(索引1)、MIRACL(索引0)、WebShop(索引0)、ALFWorld(索引0)、Mind2Web(索引0)、VisualWebArena(索引0)、SearchBench(索引1)、WebVLN(索引1)和WebNav(索引0)。 + +<|DSML|function_calls> +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">4 +<|DSML|parameter name="id" string="false">3 + +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">5 +<|DSML|parameter name="id" string="false">1 + +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">6 +<|DSML|parameter name="id" string="false">1 + +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">7 +<|DSML|parameter name="id" string="false">0 + +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">8 +<|DSML|parameter name="id" string="false">0 + +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">9 +<|DSML|parameter name="id" string="false">1 + +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">10 +<|DSML|parameter name="id" string="false">0 + +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">11 +<|DSML|parameter name="id" string="false">0 + +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">12 +<|DSML|parameter name="id" string="false">0 + +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">13 +<|DSML|parameter name="id" string="false">1 + +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">14 +<|DSML|parameter name="id" string="false">0 + +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">17 +<|DSML|parameter name="id" string="false">0 + +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">18 +<|DSML|parameter name="id" string="false">0 + +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">19 +<|DSML|parameter name="id" string="false">0 + +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">20 +<|DSML|parameter name="id" string="false">0 + +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">21 +<|DSML|parameter name="id" string="false">1 + +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">22 +<|DSML|parameter name="id" string="false">1 + +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">23 +<|DSML|parameter name="id" string="false">0 + +<|end▁of▁sentence|> + + +[24] (https://ar5iv.org/abs/2401.13919) +**viewing lines [0 - 129] of 825** + +L0: +L1: URL: https://ar5iv.org/abs/2401.13919 +L2: # WebVoyager : Building an End-to-End Web Agent with +L3: Large Multimodal Models +L4: +L5: Hongliang He1,3 , Wenlin Yao2, Kaixin Ma2, Wenhao Yu2, Yong Dai2, +L6: +L7: Hongming Zhang2, +L8: Zhenzhong Lan3, +L9: Dong Yu2 +L10: 1Zhejiang University, +L11: 2Tencent AI Lab, +L12: 3Westlake University +L13: hehongliang@westlake.edu.cn, wenlinyao@global.tencent.com +L14: Work done during the internship at Tencent AI Lab. +L15: +L16: ###### Abstract +L17: +L18: The advancement of large language models (LLMs) leads to a new era marked by the +L19: development of autonomous applications in the real world, which drives +L20: innovation in the creation of advanced web-based agents. Existing web agents +L21: typically only handle one input modality and are evaluated only in simplified +L22: web simulators or static web snapshots, greatly limiting their applicability in +L23: real-world scenarios. To bridge this gap, we introduce WebVoyager, an innovative +L24: Large Multimodal Model (LMM) powered web agent that can complete user +L25: instructions end-to-end by interacting with real-world websites. Moreover, we +L26: propose a new evaluation protocol for web agents to address the challenges of +L27: automatic evaluation of open-ended web agent tasks, leveraging the robust +L28: multimodal comprehension capabilities of GPT-4V. We create a new benchmark by +L29: gathering real-world tasks from 15 widely used websites to evaluate our agents. +L30: We show that WebVoyager achieves a 55.7% task success rate, significantly +L31: surpassing the performance of both GPT-4 (All Tools) and the WebVoyager (text- +L32: only) setups, underscoring the exceptional capability of WebVoyager in practical +L33: applications. We found that our proposed automatic evaluation achieves 85.3% +L34: agreement with human judgment, paving the way for further development of web +L35: agents in a real-world setting.111Our code and data will be released at +L36: https://github.com/MinorJerry/WebVoyager +L37: +L38: ## 1 Introduction +L39: +L40: The recent advancement of large language models (LLMs), such as ChatGPT and +L41: GPT-4 (OpenAI, 2023), have sparked significant interest in developing LLM-based +L42: autonomous agents (AutoGPT, 2022) for complex task execution (Qin et al., 2023; +L43: Schick et al., 2023). Recent studies have explored the construction of text- +L44: based web browsing environments and how to instruct large language model agents +L45: to perform web navigation (Nakano et al., 2021; Gur et al., 2023; Zhou et al., +L46: 2023; Lu et al., 2023). The primary challenge in these works lies in managing +L47: complex and verbose HTML texts, and solutions include simplifying and +L48: structuring HTML (Nakano et al., 2021; Zhou et al., 2023; Gur et al., 2023; Deng +L49: et al., 2023). +L50: +L51: However, existing approaches overlook a critical functionality of browsing: +L52: rendering HTML into visual webpages. Particularly, vision capability is crucial +L53: for utilizing tools like web browsers, as rendered web pages are inherently +L54: designed with user experience (UX), emphasizing intuitive information and +L55: structured presentation. This design principle of rendering makes visual +L56: analysis more effective than mere HTML representation. At present, large +L57: multimodal models (LMMs), particularly GPT-4V(ision) (OpenAI, 2023) and Gemini +L58: (Team et al., 2023), demonstrate a remarkable ability to integrate intricate +L59: visual cues with textual information. Existing studies such as Pix2Struct (Lee +L60: et al., 2023) and WebArena (Zhou et al., 2023), have initiated explorations into +L61: using screenshots as inputs for decision-making in web navigation, yet these +L62: are preliminary and do not represent a deep exploration. Therefore, building +L63: multimodal web agents to leverage the environment rendered by browsers through +L64: screenshots, thus mimicking human web browsing behavior, is now a viable +L65: approach to enhance web navigation efficiency. +L66: +L67: We introduce WebVoyager, a multimodal web agent designed to handle web tasks +L68: online in an end-to-end manner, which denotes managing the process from start to +L69: finish autonomously without intermediate human intervention. We construct an +L70: online environment using Selenium for WebVoyager, feeding it with screenshots +L71: and textual content in interactive web elements. Inspired by Set-of-Mark +L72: Prompting (Yang et al., 2023a), we mark interactive web elements on screenshots +L73: (see Figure 2) to facilitate decision-making for WebVoyager. As a pioneer in +L74: combining vision and text information during web navigation, we advocate that +L75: autonomous end-to-end task completion, multimodal capabilities and online +L76: navigation constitute the essential trajectory toward the genuine intelligence +L77: of web agents. +L78: +L79: Another challenge arises when it comes to evaluating an end-to-end web agent +L80: with online navigation. Existing benchmarks, such as Mind2Web (Deng et al., +L81: 2023), primarily focus on stepwise and offline evaluation, where agents follow +L82: predefined “golden” trajectory for action selection. This approach, however, may +L83: not fully account for the variety of viable strategies to accomplish a task, as +L84: it only reflects one possible plan. This limitation could lead to a biased +L85: evaluation and difficulties in fairly comparing different methods. To more +L86: accurately gauge the capabilities of web agents in end-to-end task completion, +L87: we save screenshots throughout the online navigation process, and then use +L88: GPT-4V to evaluate these trajectories and the final results automatically. Human +L89: evaluations are also conducted to verify the results and confirm the +L90: reliability of GPT-4V as the evaluator. +L91: +L92: We conduct evaluations on a collected dataset, which is semi-automatically +L93: generated using a self-instruct (Wang et al., 2022) method, comprising 300 web +L94: tasks from 15 commonly accessed websites. Additionally, we extract 90 web- +L95: related tasks of level 1 and level 2 from the GAIA (Mialon et al., 2023) to +L96: enrich our evaluation. We compare our WebVoyager with 1) GPT-4 (All +L97: Tools)222GPT-4 (All Tools) is an integrated tool-based agent released by OpenAI +L98: in Oct. 2023. See https://chat.openai.com/, and 2) WebVoyager in a text-only +L99: setting, employing the accessibility tree proposed in WebArena (Zhou et al., +L100: 2023) to describe web pages. The results show that WebVoyager achieves a Task +L101: Success Rate of 55.7%, significantly outperforming GPT-4 (All Tools) with a rate +L102: of 32.7% and the text-only setting with a rate of 39.0%, demonstrating the +L103: effectiveness of our method. Furthermore, we report the consistency between +L104: human-human and human-GPT4V to ensure credibility. Our main contributions are as +L105: follows: +L106: +L107: - • +L108: We employ a multimodal web agent that integrates textual and visual information +L109: to address web tasks end-to-end and introduce a generalist planning approach for +L110: navigation. +L111: +L112: - • +L113: We build an online web browsing environment, offering a variety of tasks +L114: centered on widely used websites and introducing a method for expanding these +L115: tasks. +L116: +L117: - • +L118: We conduct manual evaluations of navigation trajectories and propose an +L119: automated evaluation protocol using GPT-4V. We present a comprehensive analysis +L120: of the evaluation results and show that GPT-4V can serve as a reliable evaluator +L121: for online agents. +L122: +L123: ## 2 Related Work +L124: +L125: ### 2.1 Web Agents +L126: +L127: Autonomous web navigation (Shi et al., 2017; Yang et al., 2023b) requires an +L128: agent to follow instructions, construct plans, comprehend complex web +L129: structures, and decompose tasks into step-by-step decisions (Weng, 2023). To +[25] (https://ar5iv.org/abs/2307.13854) +**viewing lines [0 - 124] of 1092** + +L0: +L1: URL: https://ar5iv.org/abs/2307.13854 +L2: # WebArena: A Realistic Web Environment for Building Autonomous Agents +L3: +L4: Shuyan Zhou Frank F. Xu11footnotemark: 1 Hao Zhu Xuhui Zhou22footnotemark: 2 +L5: +L6: Robert Lo22footnotemark: 2 Abishek Sridhar22footnotemark: 2 Xianyi Cheng Tianyue +L7: Ou +L8: Yonatan Bisk Daniel Fried Uri Alon Graham Neubig +L9: Carnegie Mellon University +L10: {shuyanzh, fangzhex, gneubig}@cs.cmu.edu +L11: Lead contributors.Equal contribution. +L12: +L13: ###### Abstract +L14: +L15: With advances in generative AI, there is now potential for autonomous agents to +L16: manage daily tasks via natural language commands. However, current agents are +L17: primarily created and tested in simplified synthetic environments, leading to a +L18: disconnect with real-world scenarios. In this paper, we build an environment for +L19: language-guided agents that is highly realistic and reproducible. Specifically, +L20: we focus on agents that perform tasks on the web, and create an environment +L21: with fully functional websites from four common domains: e-commerce, social +L22: forum discussions, collaborative software development, and content management. +L23: Our environment is enriched with tools (e.g., a map) and external knowledge +L24: bases (e.g., user manuals) to encourage human-like task-solving. Building upon +L25: our environment, we release a set of benchmark tasks focusing on evaluating the +L26: functional correctness of task completions. The tasks in our benchmark are +L27: diverse, long-horizon, and designed to emulate tasks that humans routinely +L28: perform on the internet. We experiment with several baseline agents, integrating +L29: recent techniques such as reasoning before acting. The results demonstrate that +L30: solving complex tasks is challenging: our best GPT-4-based agent only achieves +L31: an end-to-end task success rate of 14.41%, significantly lower than the human +L32: performance of 78.24%. These results highlight the need for further development +L33: of robust agents, that current state-of-the-art large language models are far +L34: from perfect performance in these real-life tasks, and that WebArena can be used +L35: to measure such progress. +L36: +L37: Our code, data, environment reproduction resources, and video demonstrations are +L38: publicly available at https://webarena.dev/. +L39: +L40: ## 1 Introduction +L41: +L42: Autonomous agents that perform everyday tasks via human natural language +L43: commands could significantly augment human capabilities, improve efficiency, and +L44: increase accessibility. Nonetheless, to fully leverage the power of autonomous +L45: agents, it is crucial to understand their behavior within an environment that is +L46: both authentic and reproducible. This will allow measurement of the ability of +L47: agents on tasks that human users care about in a fair and consistent manner. +L48: +L49: Current environments for evaluate agents tend to over-simplify real-world +L50: situations. As a result, the functionality of many environments is a limited +L51: version of their real-world counterparts, leading to a lack of task diversity +L52: (Shi et al., 2017; Anderson et al., 2018; Gordon et al., 2018; Misra et al., +L53: 2016; Shridhar et al., 2020; 2021; Yao et al., 2022a). In addition, these +L54: simplifications often lower the complexity of tasks as compared to their +L55: execution in the real world (Puig et al., 2018; Shridhar et al., 2020; Yao et +L56: al., 2022a). Finally, some environments are presented as a static resource (Shi +L57: et al., 2017; Deng et al., 2023) where agents are confined to accessing only +L58: those states that were previously cached during data collection, thus limiting +L59: the breadth and diversity of exploration. Dor evaluation, many environments +L60: focus on comparing the textual surface form of the predicted action sequences +L61: with reference action sequences, disregarding the functional correctness of the +L62: executions and possible alternative solutions (Puig et al., 2018; Jernite et +L63: al., 2019; Xu et al., 2021; Li et al., 2020; Deng et al., 2023). These +L64: limitations often result in a discrepancy between simulated environments and the +L65: real world, and can potentially impact the generalizability of AI agents to +L66: successfully understand, adapt, and operate within complex real-world +L67: situations. +L68: +L69: We introduce WebArena, a realistic and reproducible web environment designed to +L70: facilitate the development of autonomous agents capable of executing tasks (§2). +L71: An overview of WebArena is in Figure 1. Our environment comprises four fully +L72: operational, self-hosted web applications, each representing a distinct domain +L73: prevalent on the internet: online shopping, discussion forums, collaborative +L74: development, and business content management. Furthermore, WebArena incorporates +L75: several utility tools, such as map, calculator, and scratchpad, to best support +L76: possible human-like task executions. Lastly, WebArena is complemented by an +L77: extensive collection of documentation and knowledge bases that vary from general +L78: resources like English Wikipedia to more domain-specific references, such as +L79: manuals for using the integrated development tool (Fan et al., 2022). The +L80: content populating these websites is extracted from their real-world +L81: counterparts, preserving the authenticity of the content served on each +L82: platform. We deliver the hosting services using Docker containers with gym-APIs +L83: (Brockman et al., 2016), ensuring both the usability and the reproducibility of +L84: WebArena. +L85: +L86: Along with WebArena, we release a ready-to-use benchmark with 812 long-horizon +L87: web-based tasks (§3). Each task is described as a high-level natural language +L88: intent, emulating the abstract language usage patterns typically employed by +L89: humans (Bisk et al., 2019). Two example intents are shown in the upper left of +L90: Figure 1. We focus on evaluating the functional correctness of these tasks, +L91: i.e., does the result of the execution actually achieve the desired goal (§3.2). +L92: For instance, to evaluate the example in Figure 2, our evaluation method +L93: verifies the concrete contents in the designated repository. This evaluation is +L94: not only more reliable (Zhong et al., 2017; Chen et al., 2021; Wang et al., +L95: 2022) than comparing the textual surface-form action sequences (Puig et al., +L96: 2018; Deng et al., 2023) but also accommodate a range of potential valid paths +L97: to achieve the same goal, which is a ubiquitous phenomenon in sufficiently +L98: complex tasks. +L99: +L100: We use this benchmark to evaluate several agents that can follow NL command and +L101: perform web-based tasks (§4). These agents are implemented in a few-shot in- +L102: context learning fashion with powerful large language models (LLMs) such as +L103: GPT-4 and PALM-2. Experiment results show that the best GPT-4 agent performance +L104: is somewhat limited, with an end-to-end task success rate of only 14.41%, while +L105: the human performance is 78.24%. We hypothesize that the limited performance of +L106: current LLMs stems from a lack of crucial capabilities such as active +L107: exploration and failure recovery to successfully perform complex tasks (§5.2). +L108: These outcomes underscore the necessity for further development towards robust +L109: and effective agents (LeCun, 2022) in WebArena. +L110: +L111: ## 2 WebArena: Websites as an Environment for Autonomous Agents +L112: +L113: Our goal is to create a realistic and reproducible web environment. We achieve +L114: reproducibility by making the environment standalone, without relying on live +L115: websites. This circumvents technical challenges such as bots being subject to +L116: CAPTCHAs, unpredictable content modifications, and configuration changes, which +L117: obstruct a fair comparison across different systems over time. We achieve +L118: realism by using open-source libraries that underlie many in-use sites from +L119: several popular categories and importing data to our environment from their +L120: real-world counterparts. +L121: +L122: ### 2.1 Controlling Agents through High-level Natural Language +L123: +L124: The WebArena environment is denoted asℰ\mathcal{E} with state space +[26] (https://ar5iv.org/abs/2311.12983) +**viewing lines [0 - 118] of 1207** + +L0: +L1: URL: https://ar5iv.org/abs/2311.12983 +L2: 1]FAIR, Meta 2]HuggingFace 3]AutoGPT 4]GenAI, Meta +L3: +L4: # GAIA: A Benchmark for General AI Assistants +L5: +L6: Grégoire Mialon Clémentine Fourrier Craig Swift Thomas Wolf Yann LeCun Thomas +L7: Scialom [ [ [ [ {gmialon,tscialom}@meta.com clementine@huggingface.co +L8: +L9: ###### Abstract +L10: +L11: We introduce GAIA, a benchmark for General AI Assistants that, if solved, would +L12: represent a milestone in AI research. GAIA proposes real-world questions that +L13: require a set of fundamental abilities such as reasoning, multi-modality +L14: handling, web browsing, and generally tool-use proficiency. GAIA questions are +L15: conceptually simple for humans yet challenging for most advanced AIs: we show +L16: that human respondents obtain 92% vs. 15% for GPT-4 equipped with plugins. This +L17: notable performance disparity contrasts with the recent trend of LLMs +L18: outperforming humans on tasks requiring professional skills in e.g. law or +L19: chemistry. GAIA’s philosophy departs from the current trend in AI benchmarks +L20: suggesting to target tasks that are ever more difficult for humans. We posit +L21: that the advent of Artificial General Intelligence (AGI) hinges on a system’s +L22: capability to exhibit similar robustness as the average human does on such +L23: questions. Using GAIA’s methodology, we devise 466 questions and their answer. +L24: We release our questions while retaining answers to 300 of them to power a +L25: leader-board hereby accessible. +L26: +L27: \correspondence +L28: +L29: ## 1 Introduction +L30: +L31: Large Language Models (LLMs) arguably open the way to general purpose systems. +L32: Indeed, the latest among them (OpenAI, 2023; Anthropic, 2023; Anil et al., 2023; +L33: Touvron et al., 2023) are fluent, knowledgeable, aligned to some extent with +L34: human preferences (Ouyang et al., 2022), and can be augmented (Mialon et al., +L35: 2023) with tools such as web browsers or code interpreters in a zero or few-shot +L36: setting (Brown et al., 2020). However, evaluating these systems is an open +L37: problem: given their emerging new capabilities, LLMs are regularly breaking AI +L38: benchmarks, at an ever-increasing rate (Kiela et al., 2023). +L39: +L40: In search for more challenging benchmarks, current trend suggests to seek tasks +L41: that are ever more difficult for humans, and challenge LLMs with more intricate +L42: educational assessments, for example in STEM and Law, or target more complex +L43: realisations, such as writing a coherent book. But, tasks that are difficult for +L44: humans are not necessarily difficult for recent systems: the challenging MMLU +L45: or GSM8k benchmarks for example (Hendrycks et al., 2021; Cobbe et al., 2021) are +L46: already close to be solved,111GPT4 does 86.4% on MMLU. Human non-specialist +L47: accuracy on the benchmark is only 34.5% Expert-level human performance is +L48: estimated at 89.8%. due to rapid LLM improvement possibly combined with data +L49: contamination.222See for example the case of Hellaswag. Furthermore, open-ended +L50: generation generally requires human or model-based evaluation (Zheng et al., +L51: 2023). Human evaluation will become less and less feasible when increasing the +L52: task complexity, e.g. in terms of output length or required skills: how to +L53: evaluate a book generated by an AI, or solutions to maths problems that few +L54: people in the world can solve? Model-based evaluations on the other hand are by +L55: construction dependent of stronger models hence cannot evaluate new state-of- +L56: the-art models, without mentioning potential subtle biases such as preferring +L57: the first choice presented (Zheng et al., 2023). Overall, evaluating new AI +L58: systems requires to rethink benchmarks (Chollet, 2019). +L59: +L60: Alternatively to tasks that are harder for humans, AI systems could be asked to +L61: solve conceptually simple tasks yet that require accurate execution of complex +L62: sequences of actions, with large combinatorial spaces. The output could only be +L63: obtained upon successful completion of the task and be easy to validate, +L64: analogous to the Proof of Work algorithm (Jakobsson and Juels, 1999; Dwork and +L65: Naor, 1993), where a computer is asked to solve a complex problem whose solution +L66: is easy to verify. Tasks for AI assistants, given their need for access to a +L67: diverse and uncertain world, meet this criterion while being inherently rooted +L68: in practical use cases. +L69: +L70: We move in that direction by proposing GAIA, a benchmark for General AI +L71: Assistants featuring 466 carefully crafted questions and their answer, along +L72: with the associated design methodology. Our questions are easy to create, +L73: challenging for AI systems—for LLMs, most require complex generations—, yet +L74: admit a unique, factual answer, allowing a simple and robust automatic +L75: evaluation. +L76: +L77: GAIA attempts to avoid current pitfalls of LLMs evaluation by targeting: +L78: +L79: Real-world and challenging questions. For example, a LLM will typically need to +L80: browse the open and changing web, handle multi-modality, or reason over multiple +L81: steps to answer our questions. Conversely, many LLM benchmarks are quite +L82: specific and/or restricted to closed and synthetic environments. +L83: +L84: Easy interpretability through conceptually simple tasks—non experts annotators +L85: exhibit a near perfect score—, associated reasoning trace, and few but highly +L86: curated questions. This is in contrast with aggregated benchmarks that can lack +L87: efficiency and reliability (Perlitz et al., 2023). +L88: +L89: Non-gameability. Answering the questions requires successful completion of some +L90: number of steps, which cannot easily be brute forced due to their diversity. The +L91: possibility to check the reasoning trace, the accuracy required in the answers, +L92: their absence in plain text from the internet prevent a possible data +L93: contamination. In contrast, multiple choice answers (e.g., MMLU) make +L94: contamination assessment more difficult since a wrong reasoning trace can more +L95: easily get to the correct choice. +L96: +L97: Simplicity of use. Crucially, the answers to our questions are factoid, concise +L98: and unambiguous. These properties allow simple, fast and factual evaluation. Our +L99: questions are meant to be answered in zero shot, limiting the influence of the +L100: evaluation setup. By opposition, many LLM benchmarks require evaluations that +L101: are sensitive to the experimental setup such as the number and nature of prompts +L102: (Liang et al., 2022b) (Section 8.2), or the benchmark +L103: implementation.333https://huggingface.co/blog/evaluating-mmlu-leaderboard +L104: +L105: In spite of being successful at tasks that are difficult for humans, the most +L106: capable LLMs do poorly on GAIA. Even equipped with tools, GPT4 does not exceed a +L107: 30% success rate for the easiest of our tasks, and 0% for the hardest. In the +L108: meantime, the average success rate for human respondents is 92%. Consequently, a +L109: system capable of solving GAIA can be assessed in the context of t-AGI,444As +L110: defined in https://www.alignmentforum.org/posts/BoA3agdkAzL6HQtQP/clarifying- +L111: and-predicting-agi, a t-AGI beats, on most tasks, most human experts who are +L112: given time t to perform the task noting that humans typically take between 6 +L113: minutes for the simplest questions to 17 minutes for the most complex ones. From +L114: a related perspective, such system would arguably be a competent General AI +L115: within the framework recently proposed in Morris et al. (2023), which also +L116: appear to be the next milestone in AI research since ChatGPT (OpenAI, 2023) is +L117: one level below. This paper covers the composition of GAIA, its design choices, +L118: and explain how to craft questions and the associated challenges so that the +Error fetching URL `https://github.com/sambanova/toolbench` +Error fetching URL `https://hotpotqa.github.io/` +[27] (https://aclanthology.org/N18-1074.pdf) +**viewing lines [0 - 134] of 809** + +L0: +L1: URL: https://aclanthology.org/N18-1074.pdf +L2: ===== Page 1 ===== +L3: +L4: # FEVER: a large-scale dataset for Fact Extraction and VERification +L5: +L6: James Thorne\(^1\), Andreas Vlachos\(^1\), Christos Christodoulopoulos\(^2\), +L7: and Arpit Mittal\(^2\) +L8: +L9: \(^1\)Department of Computer Science, University of Sheffield +L10: \(^2\)Amazon Research Cambridge +L11: {j.thorne, a.vlachos}@sheffield.ac.uk +L12: {chrchrs, mitarpit}@amazon.co.uk +L13: +L14: ## Abstract +L15: +L16: In this paper we introduce a new publicly available dataset for verification +L17: against textual sources, FEVER: Fact Extraction and VERification. It consists of +L18: 185,445 claims generated by altering sentences extracted from Wikipedia and +L19: subsequently verified without knowledge of the sentence they were derived from. +L20: The claims are classified as Supported, Refuted or NotEnoughInfo by annotators +L21: achieving 0.6841 in Fleiss \(\kappa\). For the first two classes, the annotators +L22: also recorded the sentence(s) forming the necessary evidence for their +L23: judgment. To characterize the challenge of the dataset presented, we develop a +L24: pipeline approach and compare it to suitably designed oracles. The best accuracy +L25: we achieve on labeling a claim accompanied by the correct evidence is 31.87%, +L26: while if we ignore the evidence we achieve 50.91%. Thus we believe that FEVER is +L27: a challenging testbed that will help stimulate progress on claim verification +L28: against textual sources. +L29: +L30: ## 1 Introduction +L31: +L32: The ever-increasing amounts of textual information available combined with the +L33: ease in sharing it through the web has increased the demand for verification, +L34: also referred to as fact checking. While it has received a lot of attention in +L35: the context of journalism, verification is important for other domains, e.g. +L36: information in scientific publications, product reviews, etc. +L37: +L38: In this paper we focus on verification of textual claims against textual +L39: sources. When compared to textual entailment (TE)/natural language inference +L40: (Dagan et al., 2009; Bowman et al., 2015), the key difference is that in these +L41: tasks the passage to verify each claim is given, and in recent years it +L42: typically consists a single sentence, while in verification systems it is +L43: retrieved from a large set of documents in order to form the evidence. Another +L44: related task is question answering (QA), for which approaches have recently been +L45: extended to handle large-scale resources such as Wikipedia (Chen et al., 2017). +L46: However, questions typically provide the information needed to identify the +L47: answer, while information missing from a claim can often be crucial in +L48: retrieving refuting evidence. For example, a claim stating "Fiji's largest +L49: island is Kauai." can be refuted by retrieving "Kauai is the oldest Hawaiian +L50: Island." as evidence. +L51: +L52: Progress on the aforementioned tasks has benefited from the availability of +L53: large-scale datasets (Bowman et al., 2015; Rajpurkar et al., 2016). However, +L54: despite the rising interest in verification and fact checking among researchers, +L55: the datasets currently used for this task are limited to a few hundred claims. +L56: Indicatively, the recently conducted Fake News Challenge (Pomerleau and Rao, +L57: 2017) with 50 participating teams used a dataset consisting of 300 claims +L58: verified against 2,595 associated news articles which is orders of magnitude +L59: smaller than those used for TE and QA. +L60: +L61: In this paper we present a new dataset for claim verification, FEVER: Fact +L62: Extraction and VERification. It consists of 185,445 claims manually verified +L63: against the introductory sections of Wikipedia pages and classified as +L64: Supported, Refuted or NotEnoughInfo. For the first two classes, systems and +L65: annotators need to also return the combination of sentences forming the +L66: necessary evidence supporting or refuting the claim (see Figure 1). The claims +L67: were generated by human annotators extracting claims from Wikipedia and mutating +L68: them in a variety of ways, some of which were meaning-altering. The +L69: verification of each +L70: +L71: 809 +L72: +L73: Proceedings of NAACL-HLT 2018, pages 809–819 +L74: +L75: New Orleans, Louisiana, June 1 - 6, 2018. ©2018 Association for Computational +L76: Linguistics +L77: +L78: ===== Page 2 ===== +L79: +L80: claim was conducted in a separate annotation process by annotators who were +L81: aware of the page but not the sentence from which original claim was extracted +L82: and thus in 31.75% of the claims more than one sentence was considered +L83: appropriate evidence. Claims require composition of evidence from multiple +L84: sentences in 16.82% of cases. Furthermore, in 12.15% of the claims, this +L85: evidence was taken from multiple pages. +L86: +L87: To ensure annotation consistency, we developed suitable guidelines and user +L88: interfaces, resulting in inter-annotator agreement of 0.6841 in Fleiss (Fleiss, +L89: 1971) in claim verification classification, and 95.42% precision and 72.36% +L90: recall in evidence retrieval. +L91: +L92: To characterize the challenges posed by FEVER we develop a pipeline approach +L93: which, given a claim, first identifies relevant documents, then selects +L94: sentences forming the evidence from the documents and finally classifies the +L95: claim w.r.t. evidence. The best performing version achieves 31.87% accuracy in +L96: verification when requiring correct evidence to be retrieved for claims +L97: Supported or Refuted, and 50.91% if the correctness of the evidence is ignored, +L98: both indicating the difficulty but also the feasibility of the task. We also +L99: conducted oracle experiments in which components of the pipeline were replaced +L100: by the gold standard annotations, and observed that the most challenging part of +L101: the task is selecting the sentences containing the evidence. In addition to +L102: publishing the data via our website1, we also publish the annotation interfaces2 +L103: and the baseline system3 to stimulate further research on verification. +L104: +L105: Footnote 1: http://fever.ai +L106: +L107: Footnote 2: https://github.com/awslabs/fever +L108: +L109: Footnote 3: https://github.com/sheffieldnlp/fever-baselines +L110: +L111: ## 2 Related Works +L112: +L113: Vlachos and Riedel (2014) constructed a dataset for claim verification +L114: consisting of 106 claims, selecting data from fact-checking websites such as +L115: PolitiFact, taking advantage of the labelled claims available there. However, in +L116: order to develop claim verification components we typically require the +L117: justification for each verdict, including the sources used. While this +L118: information is usually available in justifications provided by the journalists, +L119: they are not in a machine-readable form. Thus, also considering the small number +L120: of claims, the task defined by the dataset proposed remains too challenging for +L121: the ML/NLP methods currently available. Wang (2017) extended this approach by +L122: including all 12.8K claims available by Politifact via its API, however the +L123: justification and the evidence contained in it was ignored in the experiments as +L124: it was not machine-readable. Instead, the claims were classified considering +L125: only the text and the metadata related to the person making the claim. While +L126: this rendered the task amenable to current NLP/ML methods, it does not allow for +L127: verification against any sources and no evidence needs to be returned to +L128: justify the verdicts. +L129: +L130: The Fake News challenge (Pomerleau and Rao, 2017) modelled verification as +L131: stance classification: given a claim and an article, predict whether the article +L132: supports, refutes, observes (neutrally states the claim) or is irrelevant to +L133: the claim. It consists of 50K labelled claim-article pairs, combining 300 claims +L134: with 2,582 articles. The claims and the articles were curated and labeled by +[28] (https://nlp.cs.washington.edu/triviaqa/) +**viewing lines [0 - 48] of 48** + +L0: +L1: URL: https://nlp.cs.washington.edu/triviaqa/ +L2: ## TriviaQA: A Large Scale Dataset for Reading Comprehension and Question +L3: Answering +L4: +L5: TriviaQA is a reading comprehension dataset containing over 650K question- +L6: answer-evidence triples. TriviaQA includes 95K question-answer pairs authored by +L7: trivia enthusiasts and independently gathered evidence documents, six per +L8: question on average, that provide high quality distant supervision for answering +L9: the questions. The details can be found in our ACL 17 paper TriviaQA: A Large +L10: Scale Distantly Supervised Challenge Dataset for Reading Comprehension +L11: +L12: Mandar Joshi, Eunsol Choi, Daniel Weld, Luke Zettlemoyer. TriviaQA: A Large +L13: Scale Distantly Supervised Challenge Dataset for Reading Comprehension +L14: +L15: In Association for Computational Linguistics (ACL) 2017, Vancouver, Canada. +L16: [bib] +L17: +L18: ## News +L19: +L20: ##### Jul 2017 +L21: +L22: The TriviaQA leaderboard is now live on Codalab. Submit your predictions for +L23: evaluation on the test set! +L24: +L25: ## Data +L26: +L27: If you are interested in the reading comprehension task motivated in the paper, +L28: click on the link below to download the data. +L29: +L30: If you are interested in open domain QA, click on the link below to download the +L31: data. It contains the unfiltered dataset with 110K question-answer pairs. The +L32: Wikipedia and top 10 search documents can be obtained from the RC version. The +L33: main difference between the RC version above and the unfiltered dataset is that +L34: not all documents (in the unfiltered set) for a given question contain the +L35: answer string(s). This makes the unfiltered dataset more appropriate for IR- +L36: style QA. +L37: +L38: The University of Washington does not own the copyright of the questions and +L39: documents included in TriviaQA. +L40: +L41: ## Code +L42: +L43: Check out our Github repository. +L44: +L45: ## Contact +L46: +L47: For any questions about the code or data, please contact Mandar Joshi -- {first +L48: name of the first author}90[at]cs[dot]washington[dot]edu +Error fetching URL `https://research.google/pubs/natural-questions-a-benchmark-for-question-answering-research/` +Error fetching URL `https://microsoft.github.io/msmarco/` +Error fetching URL `https://github.com/beir-cellar/beir` +Error fetching URL `https://project-miracl.github.io/` +[29] (https://ar5iv.org/abs/2207.01206) +**viewing lines [0 - 122] of 1392** + +L0: +L1: URL: https://ar5iv.org/abs/2207.01206 +L2: # WebShop: Towards Scalable Real-World Web Interaction with Grounded Language +L3: Agents +L4: +L5: Shunyu Yao111Equal contribution. Project site with code, data, and demos: +L6: https://webshop-pnlp.github.io. Howard Chen111Equal contribution. Project site +L7: with code, data, and demos: https://webshop-pnlp.github.io. John Yang Karthik +L8: Narasimhan +L9: +L10: Department of Computer Science, Princeton University +L11: {shunyuy, howardchen, jy1682, karthikn}@princeton.edu +L12: +L13: ###### Abstract +L14: +L15: Existing benchmarks for grounding language in interactive environments either +L16: lack real-world linguistic elements, or prove difficult to scale up due to +L17: substantial human involvement in the collection of data or feedback signals. To +L18: bridge this gap, we develop WebShop – a simulated e-commerce website environment +L19: with million real-world products and 1.181.18 crowd-sourced text instructions. +L20: Given a text instruction specifying a product requirement, an agent needs to +L21: navigate multiple types of webpages and issue diverse actions to find, +L22: customize, and purchase an item. WebShop provides several challenges for +L23: language grounding including understanding compositional instructions, query +L24: (re-)formulation, comprehending and acting on noisy text in webpages, and +L25: performing strategic exploration. We collect over 12,08712,087 human +L26: demonstrations for the task, and train and evaluate a diverse range of agents +L27: using reinforcement learning, imitation learning, and pre-trained image and +L28: language models. Our best model achieves a task success rate of 1,6001,600, +L29: which outperforms rule-based heuristics (29%29\%) but is far lower than human +L30: expert performance (9.6%9.6\%). We also analyze agent and human trajectories and +L31: ablate various model components to provide insights for developing future +L32: agents with stronger language understanding and decision making abilities. +L33: Finally, we show that agents trained on WebShop exhibit non-trivial sim-to-real +L34: transfer when evaluated on amazon.com and ebay.com , indicating the potential +L35: value of WebShop in developing practical web-based agents that can operate in +L36: the wild.59%59\% +L37: +L38: ## 1 Introduction +L39: +L40: Recent advances in natural language processing (NLP) and reinforcement learning +L41: (RL) have brought about several exciting developments in agents that can perform +L42: sequential decision making while making use of linguistic context [30, 50, 58]. +L43: On the other hand, large-scale language models like GPT-3 [6] and BERT [11] are +L44: excelling at traditional NLP benchmarks such as text classification, +L45: information extraction and question answering. While the former set of tasks are +L46: limited in their set of linguistic concepts and prove difficult to scale up, +L47: the latter tasks usually contain static, non-interactive datasets that lack +L48: adequate grounding to extra-linguistic concepts [4]. In order to make further +L49: progress in building grounded language models, we believe there is a need for +L50: scalable interactive environments that contain: (1) language elements that +L51: reflect rich, real-world usage and are collectible at scale, and (2) task +L52: feedback that is well-defined and automatically computable to facilitate +L53: interactive learning, without the constant need for expensive feedback from +L54: humans. +L55: +L56: The world wide web (WWW) is a massive open-domain interactive environment that +L57: inherently satisfies the first aforementioned requirement through its +L58: interconnected set of pages with natural text, images and interactive elements. +L59: By being simultaneously scalable, semantic, interactive, dynamic and realistic, +L60: the web is uniquely different from existing environments for autonomous agents +L61: like games or 3D navigation. Moreover, the web also provides a practical +L62: environment to deploy trained agents, with great potential for alleviating human +L63: efforts in tedious tasks (e.g. buying products, booking appointments). While +L64: there has been prior work on building web-based tasks, they either lack depth in +L65: the transition and action spaces, or prove difficult to scale up. Some +L66: benchmarks only contain either a single classification task [39, 46, 31] or +L67: interactions containing only a handful of different pages in each episode [43]. +L68: Others propose tasks with longer horizons but are either limited to following +L69: hyperlinks for web navigation [36] or require human-in-the-loop feedback due to +L70: the lack of an automated reward function [33]. +L71: +L72: In this paper, we introduce WebShop (Figure 1) – a large-scale interactive web- +L73: based environment for language understanding and decision making – and train +L74: autonomous agents to complete tasks on this benchmark. With the goals of being +L75: scalable and containing realistic language and visual elements, WebShop emulates +L76: the task of online shopping on an e-commerce website, where the agent’s goal is +L77: to understand a human-provided text instruction and purchase a product to match +L78: the specifications. To do so, the agent needs to query the website’s search +L79: engine, choose items to explore from search results, open and read their +L80: description and details, and select the necessary options (e.g. 32 oz., red +L81: color) before clicking the ‘Buy’ button. In order to pick the optimal product +L82: that matches user requirements, the agent may need to view and compare various +L83: products (including backtracking between pages), and potentially perform +L84: multiple searches. WebShop contains over one million products scraped from +L85: amazon.com, over thousand crowdsourced instructions, and a diverse semantic +L86: action space of searching text queries and choosing text buttons. It is packaged +L87: into a convenient OpenAI Gym [5] environment and can be rendered in two modes +L88: (HTML or simple) with parallel observation spaces that are easy for human and +L89: model respectively. Rewards are automatically computed using a combination of +L90: programmatic matching functions that consider the attributes, type, options and +L91: price of the chosen product, alleviating the need for human evaluation and +L92: providing a path to scaling up interactive learning.1212 +L93: +L94: We develop several agents to perform this task, using both reinforcement +L95: learning (RL) and imitation learning (IL). We also leverage the latest pre- +L96: trained language models [26, 11] for representing and generating text. Our +L97: modular architecture includes a factorized processing of state observations and +L98: action choices using ResNets (visual) and Transformers (text), followed by an +L99: attention fusion layer that helps the agent contextually score each action. Our +L100: best agent achieves an average score of (out of 62.462.4) and successfully +L101: completes the task 100100 of the time, significantly higher than a heuristic +L102: baseline that achieves 28.7%28.7\% and 45.645.6, respectively. While this +L103: demonstrates the potential for IL and RL, the agents are still much lower than +L104: human experts, who can achieve 9.6%9.6\% and 82.182.1 on this task.*** In our +L105: analysis (§5.3), we observe that the task requires patience and consistency, +L106: which is lacking in some crowdsource workers, leading to lower scores. Even with +L107: this caveat, the gap between human performance and the model remains +L108: significant. We perform several analyses and ablation studies to identify the +L109: cause of this gap and find several avenues for agent improvement in the future +L110: including more robust search generation, explicit memory modules, and better +L111: handling of noisy web text. Finally, we also demonstrate an instance of sim-to- +L112: real transfer by deploying agents trained with WebShop to operate on amazon.com +L113: and ebay.com, and find that they can achieve similar performances despite search +L114: engine and product differences, and consistently outperform the rule baseline +L115: of using the first result returned by the commercial search engines when +L116: directly searching the instruction texts. This demonstrates the practical +L117: potential of our work towards developing agents that can operate autonomously on +L118: the world wide web (WWW).59.6%59.6\% +L119: +L120: ## 2 Related Work +L121: +L122: Reinforcement learning on the web. Nogueira and Cho [36] introduced WikiNav as a +Error fetching URL `http://alfworld.github.io/` +Error fetching URL `https://osu-nlp-group.github.io/Mind2Web/` +Error fetching URL `https://github.com/web-arena-x/visualwebarena` +[30] (https://ar5iv.org/pdf/2406.12172) +**viewing lines [0 - 127] of 1478** + +L0: +L1: URL: https://ar5iv.org/pdf/2406.12172 +L2: # Navigating the Labyrinth: Evaluating and Enhancing LLMs’ Ability to Reason +L3: About Search Problems +L4: +L5: Nasim Borazjanizadeh +L6: +L7: Berkeley AI Research, UC Berkeley +L8: \AndRoei Herzig +L9: Berkeley AI Research, UC Berkeley +L10: \AndTrevor Darrell +L11: Berkeley AI Research, UC Berkeley +L12: \AndRogerio Feris +L13: MIT-IBM Watson AI Lab +L14: \AndLeonid Karlinsky +L15: MIT-IBM Watson AI Lab +L16: +L17: ###### Abstract +L18: +L19: Recently, Large Language Models (LLMs) attained impressive performance in math +L20: and reasoning benchmarks. However, they still often struggle with logic problems +L21: and puzzles that are relatively easy for humans. To further investigate this, +L22: we introduce a new benchmark, SearchBench, containing 11 unique search problems, +L23: each equipped with automated pipelines to generate an arbitrary number of +L24: instances and analyze the feasibility, correctness, and optimality of LLM- +L25: generated solutions. We show that even the most advanced LLMs fail to solve +L26: these problems end-to-end in text, e.g., GPT4 solves only 1.4%. SearchBench +L27: problems require considering multiple pathways to the solution as well as +L28: backtracking, posing a significant challenge to auto-regressive models. +L29: Instructing LLMs to generate code that solves the problem helps, but only +L30: slightly, e.g., GPT4’s performance rises to 11.7%. In this work, we show that +L31: in-context learning with A* algorithm implementations enhances performance. The +L32: full potential of this promoting approach emerges when combined with our +L33: proposed Multi-Stage-Multi-Try method, which breaks down the algorithm +L34: implementation into two stages and verifies the first stage against unit tests, +L35: raising GPT-4’s performance above 57%. +L36: +L37: \doparttoc\faketableofcontents +L38: +L39: ### 1 Introduction +L40: +L41: The advent of Large Language Models (LLMs) has revolutionized the field of +L42: natural language processing, with models like Gemini[18], GPT-4[26] +L43: demonstrating unprecedented performance on reasoning tasks such as GSM8k[8]. +L44: However, these models still exhibit surprising failures on some intuitive +L45: tasks[2, 30, 22] and struggle with multi-step compositional reasoning, +L46: combinatorial problems, and planning [9, 40, 44]. Inspired by these observations +L47: and to further investigate LLMs’ reasoning abilities, we offer a new benchmark +L48: of search problems, SearchBench. The problems in SearchBench are combinatorial, +L49: defined as tasks that involve finding an optimal object from a finite set of +L50: objects, where the set of feasible solutions is either discrete or can be +L51: reduced to a discrete set [43]. These problems are predominantly NP-hard and +L52: necessitate systematic exploration of action paths and backtracking to +L53: intermediate feasible states; thus, SearchBench implicitly investigates the +L54: LLM’s capacity for non-linear reasoning. +L55: +L56: SearchBench has five distinct problem categories: (i) pathfinding, (ii) puzzles, +L57: (iii) subset sum, (iv) sorting, and (v) under-determined systems; further +L58: divided into 11 unique problem types. Each problem type is inspired by known +L59: puzzles and combinatorial problems but augmented with modified rules and +L60: constraints to ensure substantial differences from similar problems LLMs +L61: encountered during their training. And the solution to each problem is a +L62: sequence of actions leading from the initial state to the goal state, while +L63: optimizing a cost. We generate100 instances of varying difficulty per problem +L64: type using an automatic pipeline, resulting in 1107 problem instances total. +L65: Each problem type in SearchBench is equipped with an automatic pipeline that +L66: evaluates LLM-generated solutions on three dimensions: feasibility, correctness, +L67: and optimality. Feasibility checks whether the actions taken follow the +L68: problem’s rules; correctness verifies if a feasible solution reaches the goal +L69: state; and optimality checks if the least cost solution was found.∼\sim +L70: +L71: SearchBench is challenging to LLMs due to several factors. Firstly, natural +L72: language is less suited for describing or updating accurate representations of +L73: complex intermediate states. Secondly, our experiments show LLMs struggle with +L74: exploring a combinatorial exponentially exploding state-space. Despite the fact +L75: that some methods were developed for long-context reasoning [4, 13, 50], +L76: SearchBench problems cannot be easily summarized [4], reasoned about [13], or +L77: processed in parallel due to their size [50, 45]. Our findings show that even +L78: the strongest LLMs [26] almost completely fail to solve SearchBench problems in +L79: text-only mode. +L80: +L81: To provide further insights, we show that LLMs’ performance on SearchBench +L82: improves by prompting the models to solve the problems using the A* search +L83: algorithm [11]. A* is a heuristic-based graph traversal algorithm known for its +L84: time efficiency and provable optimality guarantees, making it the most suitable +L85: search algorithm for solving the problems in our benchmark. This method +L86: leverages A*’s correctness and optimality, while offloading some of the non- +L87: linear computations involved in searching the state-space to code execution. +L88: Additionally, to improve the quality of generated A* codes, motivated that +L89: ensembling helps generation quality[41, 47, 21], we introduce the Multi-Stage- +L90: Multi-Try (MSMT) inference strategy. In the "Multi-Try" aspect of MSMT, before +L91: evaluating the solution returned by the code, we first verify whether the code +L92: generated by the model satisfies a set of unit tests: (i) it is executable; (ii) +L93: it returns a list as output; and (iii) data type of list elements is correct. +L94: If the code fails any of the tests, MSMT re-runs the LLM until a valid code is +L95: generated or allowed number of attempts is exhausted. The "Multi-Stage" aspect +L96: of MSMT generates the code in two steps: (i) ‘A* Implementation’ - the +L97: implementation of an instance-agnostic A* algorithm for the problem type; and +L98: (ii) Initialization - the instantiation of initial conditions and state +L99: variables of the problem instance. In MSMT ’Initialization’ is generated +L100: conditioned on the ‘A* Implementation’ (which is generated separately first and +L101: provided in ‘Intitialization’ prompt). We demonstrate that our MSMT A* method +L102: (Fig. 2) significantly enhances the LLMs’ ability to solve search problems, +L103: outperforming all other prompting strategies we used to evaluate models on +L104: SearchBench, including 0-shot text, 4-shot Chain-of-Thought (CoT)[42] text, +L105: 0-shot code generation, and 4-shot A* prompting with the naive greedy decoding +L106: strategy. +L107: +L108: To summarize, our main contributions are as follows: (i) We contribute the +L109: SearchBench benchmark designed to assess the capability of LLMs in solving +L110: state-based problems requiring combinatorial search; (ii) We introduce the A* +L111: Multi-Stage-Multi-Try (MSMT) framework that leverages the flexibility and +L112: natural language comprehension capabilities of LLMs, reliability and structured +L113: nature of the A* algorithm, along with unitests to verify LLMs’ code generation +L114: in order to address search problems. (iii) We show a strong advantage of MSMT A* +L115: prompting for solving search problems robustly with LLMs, suggesting +L116: interesting future work directions and yet leaving significant room for future +L117: improvement on SearchBench. +L118: +L119: ### 2 SearchBench Benchmark +L120: +L121: SearchBench includes five categories of problems: puzzles, subset sum, sorting, +L122: pathfinding, and under-determined systems, further subdivided into 11 problem +L123: types and instantiated as 1107 natural language problem instances. Each problem +L124: type requires a different algorithmic approach to solve. Our problems are +L125: inspired by well-known (mostly NP-hard) puzzles and search problems, though +L126: their rules were modified to ensure a solved instance of a problem similar to +L127: any SearchBench problem was not observed during LLMs’ massive internet-scale +Error fetching URL `https://ar5iv.labs.ar5iv.org/html/2312.15820` +Error fetching URL `https://github.com/jxwuyi/WebNav` + + +在同时打开多个基准测试相关页面后,我们获得了大量信息。WebVoyager 是一个多模态网页智能体基准测试,专注于在真实网站上进行端到端任务执行,其特点包括多模态输入(屏幕截图和文本)、在15个常用网站上的300个任务,以及使用GPT-4V进行自动评估。使用场景是评估网页智能体在真实世界中的导航和任务完成能力。例题可能是在亚马逊上找到特定产品并将其加入购物车。 + +WebArena 是一个真实且可复现的网页环境,用于评估自主智能体。其特点包括四个功能完整的网站(电子商务、论坛、协作开发、内容管理)、812项长视野任务,以及专注于功能正确性的评估。使用场景是评估智能体在复杂网页任务中的表现。例题可能是在电子商务网站上购买符合特定标准的商品。 + +GAIA 是一个针对通用AI助手的基准测试,要求具备推理、多模态处理、网页浏览和工具使用等能力。其特点包括466个问题,对人类来说概念简单但对AI具有挑战性,答案简洁明确,支持自动评估。使用场景是评估AI助手在真实世界问题中的表现。例题可能是“找出2022年法国总统选举中获胜者的出生城市”。 + +ToolBench 是一个用于评估LLM工具操作能力的基准测试。其特点包括多样化的软件工具、真实世界任务,以及易于使用的基础设施。使用场景是评估智能体使用工具完成任务的能力。例题可能是使用计算器API计算表达式,或使用搜索API查找信息。 + +HotpotQA 是一个多跳问答基准测试,其特点包括基于Wikipedia的113k个问题-答案对,需要多文档推理,并提供句子级别的支持事实。使用场景是评估模型在回答需要多跳推理的问题时的能力。例题可能是“《了不起的盖茨比》的作者还写过哪些小说?”。 + +FEVER 是一个事实提取和验证基准测试,其特点包括185,445个基于Wikipedia的声明,需要分类为支持、反驳或信息不足,并需要提供证据句子。使用场景是评估系统验证声称事实的能力。例题可能是“斐济最大的岛屿是考艾岛”,系统需要检索证据来支持或反驳这一说法。 + +TriviaQA 是一个阅读理解基准测试,其特点包括超过65万个问题-答案-证据三元组,问题由琐事爱好者编写,并附带证据文档。使用场景是评估模型在开放域问答中的能力。例题可能是“哪位演员在《星球大战:原力觉醒》中扮演凯洛·伦?”。 + +Natural Questions 是一个问答基准测试,其特点包括来自谷歌搜索的真实问题,答案来自Wikipedia,并包含长答案和短答案。使用场景是评估开放域问答系统。例题可能是“谁写了《傲慢与偏见》?”,答案可能是“简·奥斯汀”。 + +MS MARCO 是一个用于深度学习搜索的基准测试,其特点包括来自Bing的真实搜索查询、人工生成的答案,以及用于段落排序和文档排序的任务。使用场景是训练和评估检索模型。例题可能是“如何冲泡咖啡?”,系统需要检索相关段落来回答。 + +BEIR 是一个用于零样本评估的异构信息检索基准测试,其特点包括18个数据集、9项任务,涵盖事实核查、问答、对话检索等。使用场景是评估检索模型在零样本设置下的泛化能力。例题可能是给定一个查询,在文档集合中检索相关文档。 + +MIRACL 是一个多语言信息检索基准测试,其特点涵盖18种语言,专注于ad-hoc检索,并包含人工标注的相关性判断。使用场景是评估多语言检索模型。例题可能是用中文查询“全球变暖的原因”,并检索相关文档。 + +WebShop 是一个模拟电子商务网站环境,用于评估语言智能体。其特点包括118万个真实产品、12,087条众包文本指令,以及需要导航、搜索和购买的任务。使用场景是评估智能体在电子商务任务中的表现。例题可能是“找到一款价格低于50美元的无线蓝牙耳机”。 + +ALFWorld 是一个将文本和具身环境对齐的基准测试,用于在文本世界中训练智能体,然后将其转移到具身环境中。其特点包括基于文本的环境,与ALFRED数据集并行,并专注于指令跟随。使用场景是评估智能体在家庭任务中的规划能力。例题可能是“在厨房里找到一个杯子,把它拿到客厅”。 + +Mind2Web 是一个用于评估网页通用智能体的基准测试,其特点包括来自真实网站的任务、需要多步交互,并专注于语言指令跟随。使用场景是评估智能体在复杂网页任务中的表现。例题可能是“在GitHub上创建一个新的仓库,命名为‘test-repo’”。 + +VisualWebArena 是一个用于多模态智能体的基准测试,其特点包括真实视觉网页任务、需要理解视觉和文本元素,并评估功能正确性。使用场景是评估多模态智能体在视觉接地网页任务中的表现。例题可能是“在纽约时报网站上找到今天头条新闻的图片”。 + +SearchBench 是一个用于评估LLM解决搜索问题能力的基准测试,其特点包括11种搜索问题类型、自动生成实例,并评估可行性、正确性和最优性。使用场景是评估LLM在组合搜索问题中的推理能力。例题可能是“在一个迷宫中找到从起点到终点的最短路径”。 + +WebVLN 是一个网站上的视觉语言导航基准测试,其特点包括基于问题的指令、需要导航网站,并评估导航成功率。使用场景是评估智能体在网站中基于视觉和语言指令进行导航的能力。例题可能是“找到‘联系我们’页面并提取电子邮件地址”。 + +WebNav 是一个用于目标驱动网页导航的基准测试,其特点包括部分观察环境、需要规划和理解自然语言指令。使用场景是评估智能体在网页导航中的能力。例题可能是“导航到维基百科上关于‘人工智能’的页面”。 + +现在,我需要整理一份针对搜索智能体的基准测试列表。考虑到用户可能更关注中文资料,我会优先考虑那些有中文描述的基准测试,例如BrowseComp、HSCodeComp、GAIA、WebVoyager、WebArena、Mind2Web、WebShop、ALFWorld、HotpotQA、FEVER、TriviaQA、Natural Questions、MS MARCO、BEIR、MIRACL、SearchBench、WebVLN、WebNav。我会为每个基准测试提供特点、使用场景和例题。同时,我会引用相关来源,例如HSCodeComp论文、BrowseComp博客文章、WebVoyager论文、WebArena论文、GAIA论文、WebShop论文、SearchBench论文等。我的回答将用中文呈现,并确保内容全面、结构清晰。现在开始撰写回答。针对搜索智能体(Search Agent)的评估,已涌现出多个侧重点不同的基准测试。以下将分类介绍这些基准,涵盖其核心特点、典型使用场景及例题。 + +--- + +## 🔍 1. 网页浏览/导航类基准 +这类基准要求智能体在真实或模拟的网页环境中执行多步操作,以完成复杂任务。 + +| 基准 | 核心特点 | 使用场景 | 例题 | +|------|----------|----------|------| +| **BrowseComp**(OpenAI)【3†L16-L35】 | - 1,266 个挑战性问题,需持续导航、查找难以直接搜索的信息
- 问题设计保证人类在 10 分钟内无法解决,且现有模型(包括带浏览的 ChatGPT)也难以完成
- 答案为短字符串,便于自动验证 | 评估智能体在复杂网络浏览任务中的持久性、创造性搜索能力 | “找出 2018‑2023 年间在 EMNLP 会议上发表、第一作者本科毕业于达特茅斯学院、第四作者本科毕业于宾夕法尼亚大学的科学论文标题”【3†L45-L49】 | +| **WebVoyager**【24†L18-L33】 | - 多模态(视觉+文本)端到端网页智能体基准
- 包含 300 个真实任务,覆盖 15 个常用网站(如 Amazon、Wikipedia)
- 使用 GPT‑4V 进行自动评估,与人工评估一致性达 85.3% | 评估智能体在真实网站上的端到端任务完成能力(多模态输入) | “在 Amazon 上找到‘无线蓝牙耳机’并按价格从低到高排序,然后选择最便宜的一款加入购物车” | +| **WebArena**【25†L15-L35】 | - 完全可交互的、高度真实的网页环境(4 个领域:电商、论坛、协作开发、内容管理)
- 812 项长视野任务,评估功能正确性而非表面动作匹配
- 最佳 GPT‑4 智能体成功率仅 14.41%,远低于人类的 78.24% | 评估自主智能体在真实网页环境中的复杂任务执行能力 | “在电商网站中购买一双‘黑色运动鞋’,价格低于 $80,并将收货地址修改为纽约”【25†L86-L92】 | +| **Mind2Web**【19†L4-L14】 | - 首个面向通用网页智能体的数据集,任务来自真实网站
- 支持在线评估(Online‑Mind2Web),包含 300 个实时任务、136 个流行网站
- 强调多步指令跟随与复杂交互 | 评估智能体在任意网站上的通用任务完成能力 | “在 GitHub 上创建一个名为 ‘test‑repo’ 的公开仓库,并添加一个 README.md 文件” | +| **VisualWebArena**【20†L4-L10】 | - 多模态网页智能体基准,任务依赖视觉信息(截图)
- 包含丰富的视觉接地任务,如识别图片、按钮位置等
- 评估智能体在真实视觉网页环境中的表现 | 评估多模态智能体在视觉丰富的网页任务中的能力 | “在纽约时报网站找到今天头条新闻的配图,并描述图中主要物体” | +| **WebVLN**【22†L4-L11】 | - 网站上的视觉‑语言导航任务,智能体需根据问题指令在网站中导航
- 数据集 WebVLN‑v1 包含多种网站导航场景
- 评估导航成功率与任务完成度 | 评估智能体在网站中基于视觉与语言指令的导航能力 | “找到‘联系我们’页面,并提取其中的邮箱地址” | +| **WebNav**【23†L4-L11】 | - 目标驱动的网页导航基准,环境为部分观测
- 智能体需理解自然语言指令并在网页中规划动作序列
- 早期经典任务,常用于测试基础导航能力 | 评估智能体在部分观测网页环境中的规划与指令跟随能力 | “导航到 Wikipedia 上关于‘人工智能’的页面,并摘录第一段内容” | + +--- + +## 🛠️ 2. 工具使用/通用助手类基准 +这类基准测试智能体调用外部工具(如搜索、计算、API)来解决实际问题的能力。 + +| 基准 | 核心特点 | 使用场景 | 例题 | +|------|----------|----------|------| +| **GAIA**【26†L10-L25】 | - 466 个真实世界问题,需要推理、多模态处理、网页浏览、工具使用等综合能力
- 对人类简单(92% 准确率),但对最强 GPT‑4 仅 15%
- 答案唯一、事实性,支持自动评估 | 评估通用 AI 助手在需要多步工具使用的真实任务中的表现 | “2022 年法国总统选举的获胜者出生在哪个城市?”【26†L10-L25】 | +| **ToolBench**(OpenBMB)【7†L4-L10】 | - 包含多样软件工具的真实任务基准
- 提供易用的基础设施(虚拟 API 服务器等)
- 评估 LLM 调用工具完成复杂任务的能力 | 评估智能体在工具调用与组合上的熟练度 | “使用计算器 API 计算 (3.14 * 7.2)^2,然后使用搜索 API 查找‘圆周率’的最新精确值” | +| **HSCodeComp**【2†L16-L35】 | - 首个针对深度搜索智能体在分层规则应用上的专家级电商基准
- 任务:根据嘈杂的商品描述预测 10 位 HS 编码(关税规则)
- 632 个商品条目,标注由多位电商专家完成,最佳智能体准确率仅 46.8%(人类 95%) | 评估智能体在复杂规则(如关税分类)下的深层推理与搜索能力 | “给定商品描述‘男士纯棉针织T恤,领口为V领’,预测其 10 位 HS 编码”【2†L24-L32】 | + +--- + +## ❓ 3. 问答/检索类基准 +这类基准主要评估智能体在开放域或特定领域的信息检索与问答能力。 + +| 基准 | 核心特点 | 使用场景 | 例题 | +|------|----------|----------|------| +| **HotpotQA**【8†L4-L10】 | - 113k 个基于 Wikipedia 的多跳问答对
- 需要跨多个文档推理,并提供句子级支持事实
- 涵盖多样的问题类型(比较、列举、因果等) | 评估模型在需要多跳推理的开放域问答中的能力 | “《了不起的盖茨比》的作者还写过哪些小说?” | +| **FEVER**【27†L16-L28】 | - 185,445 个基于 Wikipedia 的声明,需分类为 Supported/Refuted/NotEnoughInfo
- 要求提供证据句子(可多句、多页面)
- 挑战性高(最佳系统仅 31.87% 准确率) | 评估系统在事实核查与证据检索上的能力 | “斐济最大的岛屿是考艾岛。”【27†L48-L50】 | +| **TriviaQA**【28†L5-L10】 | - 超过 650k 个问答‑证据三元组,问题由琐事爱好者编写
- 每个问题平均提供 6 篇证据文档,适合远程监督阅读
- 包含阅读理解和开放域 QA 两种任务设置 | 评估模型在开放域琐事问答中的检索与阅读理解能力 | “哪位演员在《星球大战:原力觉醒》中扮演凯洛·伦?” | +| **Natural Questions**【11†L4-L8】 | - 来自 Google 搜索的真实用户问题,答案来自 Wikipedia
- 包含长答案(段落)和短答案(实体/日期等)
- 训练集 307k,开发/测试集各 8k | 评估开放域问答系统对真实用户查询的响应能力 | “谁写了《傲慢与偏见》?”(答案:“简·奥斯汀”) | +| **MS MARCO**【12†L4-L10】 | - 基于 Bing 真实搜索查询的检索/问答基准
- 包含段落排序、文档排序、问答等任务
- 数据规模大(约 500k 查询,880 万段落) | 训练与评估检索模型在真实搜索场景下的表现 | “如何冲泡咖啡?”(系统需检索相关段落回答) | +| **BEIR**【13†L4-L10】 | - 异构零样本检索基准,包含 18 个数据集、9 种任务(事实核查、QA、对话检索等)
- 评估模型在未见任务上的泛化能力
- 已成为检索模型的标准评估套件 | 评估检索模型在零样本设置下的跨任务泛化能力 | 给定查询“全球变暖的主要原因”,在文档集合中检索相关文档 | +| **MIRACL**【14†L4-L10】 | - 多语言检索基准,覆盖 18 种语言(包括中文、阿拉伯语等)
- 专注于 ad‑hoc 检索,所有查询与文档均经过人工标注
- 提供多语言检索挑战(WSDM 2023 Cup) | 评估多语言检索模型在不同语言下的检索效果 | 中文查询:“全球变暖的原因”,检索相关中文文档 | + +--- + +## 🧩 4. 组合搜索/推理类基准 +这类基准专门测试智能体在组合搜索问题上的逻辑推理与规划能力。 + +| 基准 | 核心特点 | 使用场景 | 例题 | +|------|----------|----------|------| +| **SearchBench**【30†L22-L36】 | - 11 种独特的搜索问题类型(路径查找、谜题、子集和、排序、欠定系统等)
- 自动生成任意数量实例,并评估解决方案的可行性、正确性、最优性
- GPT‑4 在纯文本模式下仅解决 1.4%,通过 A* 代码生成提升至 11.7% | 评估 LLM 在组合搜索问题上的推理与规划能力 | “在一个 5x5 迷宫中,从起点 (0,0) 到终点 (4,4) 的最短路径是什么?(障碍物位置已知)”【30†L56-L64】 | + +--- + +## 🛒 5. 特定领域/仿真环境基准 +这类基准针对特定领域(如电商、家庭环境)设计,测试智能体在仿真环境中的交互能力。 + +| 基准 | 核心特点 | 使用场景 | 例题 | +|------|----------|----------|------| +| **WebShop**【29†L15-L30】 | - 模拟电商网站环境,包含 118 万真实商品、12,087 条众包指令
- 智能体需根据文本指令搜索、筛选、购买商品
- 奖励自动计算,最佳模型成功率 29%(人类 59%) | 评估语言智能体在电商任务中的导航、搜索与决策能力 | “找到一款价格低于 $50 的无线蓝牙耳机,评分至少 4 星,并加入购物车”【29†L20-L25】 | +| **ALFWorld**【18†L4-L10】 | - 将文本环境(TextWorld)与具身环境(ALFRED)对齐的基准
- 智能体先在文本中学习抽象策略,然后在视觉环境中执行
- 包含 134 个家庭任务(如“在厨房找到杯子并拿到客厅”) | 评估智能体在文本与具身环境之间的迁移学习与规划能力 | “在厨房找到‘杯子’,把它拿到客厅放在桌子上” | + +--- + +## 📊 总结 +以上基准覆盖了搜索智能体的不同能力维度:从**网页浏览、工具使用、问答检索**,到**组合搜索推理**以及**特定领域仿真**。选择合适的基准取决于您要评估的具体能力(如多模态交互、工具调用、多跳推理等)。这些基准大多提供公开的数据集与评估代码,可直接用于实验。 + +> 注:本文基于 2025 年 11 月前的公开资料整理,部分基准可能已有更新版本,建议访问对应官网获取最新信息。<|end▁of▁sentence|> \ No newline at end of file diff --git a/lib/llm/tests/data/deepseek-v3.2/test_output_search_wo_date.txt b/lib/llm/tests/data/deepseek-v3.2/test_output_search_wo_date.txt new file mode 100644 index 0000000000..9a014a7947 --- /dev/null +++ b/lib/llm/tests/data/deepseek-v3.2/test_output_search_wo_date.txt @@ -0,0 +1,1069 @@ +<|begin▁of▁sentence|><|User|> + +## Tools + +You have access to a set of tools you can use to answer the user's question. +You can invoke functions by writing a "<|DSML|function_calls>" block like the following as part of your reply to the user: +<|DSML|function_calls> +<|DSML|invoke name="$FUNCTION_NAME"> +<|DSML|parameter name="$PARAMETER_NAME" string="true|false">$PARAMETER_VALUE +... + +<|DSML|invoke name="$FUNCTION_NAME2"> +... + + + +String and scalar parameters should be specified as is without any escaping or quotes, while lists and objects should use JSON format. The "string" attribute should be set to "true" for string type parameters and "false" for other types (numbers, booleans, arrays, objects). + +If the thinking_mode is enabled, then after function results you should strongly consider outputting a thinking block. Here is an example: + +<|DSML|function_calls> +... + + + +... + + +...thinking about results + +Here are the functions available in JSONSchema format: + +{"name": "search", "description": "Searches for information related to query and displays topn results.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The search query string"}, "topn": {"type": "integer", "description": "Number of top results to display", "default": 10}, "source": {"type": "string", "description": "Source to search within", "enum": ["web", "news"], "default": "web"}}, "required": ["query"], "additionalProperties": false, "$schema": "http://json-schema.org/draft-07/schema#"}} +{"name": "open", "description": "Opens the link id from the page indicated by cursor starting at line number loc, showing num_lines lines. Valid link ids are displayed with the formatting: 【{id}†.*】. If cursor is not provided, the most recent page is implied. If id is a string, it is treated as a fully qualified URL associated with source. If loc is not provided, the viewport will be positioned at the beginning of the document or centered on the most relevant passage, if available. Use this function without id to scroll to a new location of an opened page.", "parameters": {"type": "object", "properties": {"id": {"description": "Link ID (number) or fully qualified URL (string)", "anyOf": [{"type": "integer"}, {"type": "string"}], "default": -1}, "cursor": {"type": "integer", "description": "Page cursor indicator", "default": -1}, "loc": {"type": "integer", "description": "Starting line number", "default": -1}, "num_lines": {"type": "integer", "description": "Number of lines to show", "default": -1}, "view_source": {"type": "boolean", "description": "Whether to view source", "default": false}, "source": {"type": "string", "description": "Source associated with the URL", "enum": ["web", "news"], "default": "web"}}, "additionalProperties": false, "$schema": "http://json-schema.org/draft-07/schema#"}} +{"name": "find", "description": "Finds exact matches of pattern in the current page, or the page given by cursor.", "parameters": {"type": "object", "properties": {"pattern": {"type": "string", "description": "The exact pattern to search for"}, "cursor": {"type": "integer", "description": "Page cursor to search in", "default": -1}}, "required": ["pattern"], "additionalProperties": false, "$schema": "http://json-schema.org/draft-07/schema#"}} +{"name": "python", "description": "Use this tool to execute Python code. The code will not be shown to the user. This tool should be used for internal reasoning, but not for code that is intended to be visible to the user (e.g. when creating plots, tables, or files). When you send a message containing Python code to python, it will be executed in a stateful Jupyter notebook environment. python will respond with the output of the execution or time out after 120.0 seconds.", "parameters": {"type": "object", "properties": {"code": {"type": "string", "description": "The python code to execute in the jupyter environment."}}, "required": ["code"], "additionalProperties": false, "$schema": "http://json-schema.org/draft-07/schema#"}} + + + +# The user's message is: In a year before 2023, between December 1 and 15 (inclusive), a press release reported that as of the 5th of that month, a country had received a total of 16,201,670 vaccine doses, with 7,583,134 administered. That same year, a company's annual report revealed that its nearly 2.2 million members and supporters contributed over $49 million, with 12% of total revenue coming from contributed services. The following year, the annual report stated that the organization had played a role in rescinding a previous administration’s plan to open most of the 22 million acres of the National Petroleum Reserve in Alaska to oil and gas drilling and that contributed services had decreased by 2% to 10%. Who was the emeritus board member that year? Name only, no titles.<|Assistant|>The user asks: "In a year before 2023, between December 1 and 15 (inclusive), a press release reported that as of the 5th of that month, a country had received a total of 16,201,670 vaccine doses, with 7,583,134 administered. That same year, a company's annual report revealed that its nearly 2.2 million members and supporters contributed over $49 million, with 12% of total revenue coming from contributed services. The following year, the annual report stated that the organization had played a role in rescinding a previous administration’s plan to open most of the 22 million acres of the National Petroleum Reserve in Alaska to oil and gas drilling and that contributed services had decreased by 2% to 10%. Who was the emeritus board member that year? Name only, no titles." + +We need to parse the puzzle. + +We have clues about a press release about vaccine doses for a country, dated between Dec 1-15 inclusive, reporting as of 5th of that month total doses received 16,201,670 and administered 7,583,134. That is likely a COVID-19 vaccine update for some country. Which country? Might be a smaller country. Could be something like "Uganda", "Zimbabwe", "Kenya", "Philippines", etc. Let's search. + +Search for "16,201,670 vaccine doses" "7,583,134 administered". + +<|DSML|function_calls> +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">"16,201,670" vaccine doses +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|end▁of▁sentence|> + + +[0] "16,201,670" vaccine doses (Search_Results/"16,201,670" vaccine doses) +**viewing lines [0 - 38] of 38** + +L0: +L1: URL: Search_Results/"16,201,670" vaccine doses +L2: # Search Results +L3: +L4: * 【0†Kenya's Economy is Showing Resilience as Output Rises ...; publish_date: +L5: none†www.worldbank.org】 Dec 14, 2021 — As of December 5, 2021, Kenya had +L6: received a total of 16,201,670 vaccines, with 7,583,134 administered. While +L7: vaccine acceptance is ... +L8: * 【1†MINISTRY OF HEALTH; publish_date: none†covidhub.mediacouncil.or.ke】 Dec +L9: 1, 2021 — Total Doses Received 16,201,670. Table 10 gives the total vaccines +L10: received since the start of Covid -19 vaccination exercise in the country. +L11: * 【2†Output Result Page; publish_date: none†open.unicef.org】 ... 16,201,670 +L12: doses of multiple vaccines nationwide and full vaccination of 15.5 per cent with +L13: two doses of COVID-19 vaccine as of 31 December 2021. +L14: * 【3†rebased GDP; publish_date: none†documents1.worldbank.org】 Dec 7, 2021 — +L15: As of December 5, 2021,. Kenya had received a total of 16,201,670 vaccines, +L16: with. 7,583,134 administered. Vaccine acceptance is reportedly high. +L17: * 【4†Integrated Annual Report; publish_date: none†www.co-opbank.co.ke】 May 27, +L18: 2022 — ... doses of Covid-19 vaccines and administered close to 17 million ... +L19: 16,201,670, huku 7,583,134 zikiwa tayari zimedungwa watu. Bado kuna ... +L20: * 【5†World Bank lifts Kenya's growth prospect to 5% in 2021; publish_date: +L21: none†www.africa-press.net】 ... 16,201,670 vaccines, with 7,583,134 administered. +L22: While vaccine acceptance is reportedly high there is still a long way to go +L23: towards the government's ... +L24: * 【6†2020 Annual Report United States; publish_date: none†www.hi-us.org】 +L25: including medicine, vaccines, Covid-19 prevention kits, and kits of ... +L26: 16,201,670. 9,630,584. Prepaid expenses. 56,707. 22,601. Total current assets. +L27: * 【7†HOSPITAL CENTRAL DE LA DEFENSA; publish_date: +L28: none†contrataciondelestado.es】 16201670 X. URPA-URPA. 0. HCD-000393/00. KONTRON. +L29: 7000 ... LASER SHOT LBP-1120. CNEUR-C. NEUROFISIOLOGÍA. 0 ... VICTOREEN CT HEAD +L30: DOSE PHANTON. 43650-014. +L31: * 【8†body metal concentrations: Topics by ...; publish_date: +L32: none†www.science.gov】 PMID:16201670. Design Characteristics and Tobacco Metal +L33: Concentrations in Filtered Cigars. PubMed. Caruso, Rosalie V; O'Connor, Richard +L34: J; Travers, Mark J ... +L35: * 【9†Public Offices, Personal Demands: Capability in ...; publish_date: +L36: none†dokumen.pub】 ... (16201670).9 The Institutiones Politicae consists of two +L37: books. Book one deals with the origin, nature and goal of the commonwealth on +L38: the one hand, and the ... + + +Result shows Kenya. So the press release is about Kenya's vaccine doses as of Dec 5, 2021. The press release likely by World Bank or something. The search result shows "World Bank lifts Kenya's growth prospect..." maybe a press release dated Dec 14, 2021. But the clue says "between December 1 and 15 (inclusive), a press release reported that as of the 5th of that month, a country had received a total of 16,201,670 vaccine doses, with 7,583,134 administered." That matches Kenya on Dec 5, 2021. So the year is 2021. + +Now the next clue: "That same year, a company's annual report revealed that its nearly 2.2 million members and supporters contributed over $49 million, with 12% of total revenue coming from contributed services." + +This sounds like an environmental or conservation organization. "Nearly 2.2 million members and supporters contributed over $49 million." That might be something like "The Nature Conservancy", "World Wildlife Fund", "Sierra Club", "Environmental Defense Fund", "Natural Resources Defense Council", etc. Let's search. + +Search for "nearly 2.2 million members and supporters contributed over $49 million" "12% of total revenue coming from contributed services". + +<|DSML|function_calls> +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">"nearly 2.2 million members" "contributed over $49 million" +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|end▁of▁sentence|> + + +[1] "nearly 2.2 million members" "contributed over $49 million" (Search_Results/"nearly 2.2 million members" "contributed over $49 million") +**viewing lines [0 - 41] of 41** + +L0: +L1: URL: Search_Results/"nearly 2.2 million members" "contributed over $49 million" +L2: # Search Results +L3: +L4: * 【0†The Global Good Fund, Microsoft and Goodwill join forces ...; +L5: publish_date: none†globalgoodfund.org】 Sep 28, 2022 — ... more than 2.2 million +L6: people. They additionally contribute close to $49 billion in local, state and +L7: federal tax revenues. When COVID hit ... +L8: * 【1†Almost 22 billion American tax dollars spent to wipe out a ...; +L9: publish_date: none†www.facebook.com】 US military funding for Israel's war crimes +L10: in Lebanon and Gaza has now cost US taxpayers over $22 billion. When millions +L11: struggle to afford the ... +L12: * 【2†Corporate America has largely abandoned its post-January ...; +L13: publish_date: none†www.citizensforethics.org】 Jul 29, 2025 — Since the January 6 +L14: insurrection, over 2,000 corporate and industry group PACs have given over $174 +L15: million to members of the Sedition ... +L16: * 【3†Audit shows millions in questionable taxpayer spending at ...; +L17: publish_date: none†www.aol.com】 18 hours ago — ... nearly doubled from 1.3 +L18: million to about 2.2 million. That is more than one in four Washington state +L19: residents receiving Medicaid, and the ... +L20: * 【4†Incarceration and Poverty in the United States - AAF; publish_date: +L21: none†www.americanactionforum.org】 Jun 30, 2020 — The United States currently +L22: incarcerates 2.2 million people, nearly half of whom are non-violent drug +L23: offenders, accused people held pre ... +L24: * 【5†How the NRA Spends Revenue (2023); publish_date: none†paddockpost.com】 +L25: Apr 25, 2025 — In 2023, the NRA reported total revenue of $176 million (compared +L26: to $211 million in 2022, $227 million in 2021, $282 million in 2020, $292 ... +L27: * 【6†NCA Newsroom; publish_date: none†www.ncausa.org】 ... 2.2 million U.S. +L28: jobs—operating in every U.S. state and territory and contributing nearly $350 +L29: billion to the U.S. economy every year. For media inquiries ... +L30: * 【7†The Early Childhood Educator Workforce; publish_date: +L31: none†cscce.berkeley.edu】 About 2.2 million adults are paid to care for and +L32: educate more than 9.7 million children between birth and age five in center- +L33: based and home-based settings. +L34: * 【8†Bringing Out the Worst in Congress: CRA By the Numbers ...; publish_date: +L35: none†progressivereform.org】 Jul 14, 2025 — In fact, the top ten recipients (out +L36: of 25) account for almost $7 million, showing how these contributions are +L37: targeted and strategic. Real ... +L38: * 【9†Club 100: Over 100 Americans gave more than $100000 to ...; publish_date: +L39: none†publicintegrity.org】 Jan 7, 2022 — Through Internet investments during the +L40: 1990s, he attracted 2.2 million online accounts valued at $174 million. Schwab +L41: has been a vocal ... + + +Not that. Try search for "2.2 million members and supporters contributed over $49 million". + +<|DSML|function_calls> +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">"2.2 million members" "contributed over $49" +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|end▁of▁sentence|> + + +[2] "2.2 million members" "contributed over $49" (Search_Results/"2.2 million members" "contributed over $49") +**viewing lines [0 - 2] of 2** + +L0: +L1: URL: Search_Results/"2.2 million members" "contributed over $49" +L2: # Search Results + + +Maybe it's an annual report of a non-profit. Search for "annual report 2.2 million members contributed $49 million". + +<|DSML|function_calls> +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">"2.2 million members" "annual report" $49 million +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|end▁of▁sentence|> + + +[3] "2.2 million members" "annual report" $49 million (Search_Results/"2.2 million members" "annual report" $49 million) +**viewing lines [0 - 36] of 36** + +L0: +L1: URL: Search_Results/"2.2 million members" "annual report" $49 million +L2: # Search Results +L3: +L4: * 【0†20-F; publish_date: none†www.sec.gov】 ANNUAL REPORT PURSUANT TO SECTION +L5: ... Our membership grew from 2.1 million members as at December 31, 2023 to 2.2 +L6: million members as at December 31, 2024. +L7: * 【1†Oportun Reports Fourth Quarter and Full Year 2023 ...; publish_date: +L8: none†investor.oportun.com】 Mar 12, 2024 — Oportun (Nasdaq: OPRT) is a mission- +L9: driven fintech that puts its 2.2 million members' financial goals within reach. +L10: ... annual report on ... +L11: * 【2†2 0 21; publish_date: none†www.annualreports.com】 ANNUAL REPORT. 2. 0. +L12: 21. 2. 0. 21. Page 2. 2. DEFENDERS OF WILDLIFE. 2. 0. 21. 2. 0. 21 ... In 2021, +L13: Defenders of Wildlife's nearly 2.2 million members and. +L14: * 【3†Annual report and accounts 2020; publish_date: none†www.3i.com】 +L15: Disclaimer. The Annual report and accounts have been prepared solely to provide +L16: information to shareholders. ... 2.2 million members. In December 2019, we sold +L17: ... +L18: * 【4†united states securities and exchange commission; publish_date: +L19: none†s21.q4cdn.com】 Dec 14, 2018 — ☒ ANNUAL REPORT ... approximately 5.7 million +L20: Members, including approximately 2.2 million Members from the acquisition of +L21: Best Doctors. +L22: * 【5†securities and exchange commission; publish_date: none†www.sec.gov】 ... +L23: Annual Report on Form 10-K filed with the SEC on March 10, 2020. The ... 2.2 +L24: million members at June 30, 2020. The following table highlights the ... +L25: * 【6†2022-23 Annual Comprehensive Financial Report - CalPERS; publish_date: +L26: none†www.calpers.ca.gov】 Nov 17, 2023 — ... annual report is consistent with +L27: these accounting principles ... 2.2 million members and health benefit services +L28: to over 1.5 million ... +L29: * 【7†Medical Centers Report; publish_date: +L30: none†regents.universityofcalifornia.edu】 Oct 8, 2019 — which will allow its 2.2 +L31: million members to receive advanced and ... annual report of the University of +L32: California. For the Medical ... +L33: * 【8†Working TogeTher to make HealtH care better; publish_date: +L34: none†www.unitedhealthgroup.com】 Feb 8, 2012 — ANNUAL REPORT PURSUANT TO SECTION +L35: 13 OR 15(d) OF THE ... approximately 2.2 million members enrolled in its +L36: Medicare. Advantage ... + + +Result 2 shows "Defenders of Wildlife". That might be the organization. Let's open. + +<|DSML|function_calls> +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">3 +<|DSML|parameter name="id" string="false">2 + +<|end▁of▁sentence|> + + +[4] www.annualreports.com (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2021.pdf) +**viewing lines [0 - 53] of 261** + +L0: +L1: URL: https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders- +L2: wildlife_2021.pdf +L3: ===== Page 1 ===== 2021 ANNUAL REPORT ===== Page 2 ===== # DEFENDERS OF WILDLIFE +L4: made important progress for imperiled species and vital landscapes across the +L5: United States in 2021. \--- **LAWYERS** immediately **challenged** the premature +L6: and reckless decision to strip **gray wolves** of federal **Endangered Species +L7: Act (ESA)** protections. For many decades, Defenders has led the effort to +L8: protect and restore the gray wolf, and we will continue to fight the +L9: unscientific and hostile anti-wolf policies that impede conservation progress +L10: and will carry on our unrelenting battle to restore federal protections for this +L11: iconic keystone species. \--- **LOBBYISTS** worked around the clock to keep +L12: wildlife and climate priorities in the **Infrastructure Investment and Jobs +L13: Act**. We also continue fighting to keep important wildlife and habitat funding +L14: in relevant **appropriations bills**. \--- 2 DEFENDERS OF WILDLIFE ===== Page 3 +L15: ===== POLICY EXPERTS pushed forward on the urgent need for a National +L16: Biodiversity Strategy (NBS), an all-of-government approach to address the +L17: unprecedented loss of wildlife and habitat we are experiencing. We have coupled +L18: this with our new campaign to expand the National Wildlife Refuge System to +L19: preserve our nation’s only lands set aside for wildlife. By defending, funding +L20: and expanding our national wildlife refuges, we will directly address +L21: biodiversity loss and climate change while promoting increased equitable access +L22: to nature. FIELD TEAMS were on the ground helping to recover imperiled species. +L23: From panthers and sea turtles in Florida to wolves, bison and black-footed +L24: ferrets in Montana, Defenders’ conservation experts were in the field saving +L25: wildlife all over the country. CONSERVATION INNOVATION EXPERTS provided +L26: comprehensive analyses to guide policy and inform conservation strategies to +L27: reach the goal of protecting 30% of our terrestrial and marine systems by 2030 +L28: (“30x30”). Defenders’ Center for Conservation Innovation (CCI) produced a report +L29: which details actions we need to take to achieve 30x30 while protecting +L30: biodiversity and addressing the climate crisis. DEFENDERS.ORG ===== Page 4 ===== +L31: WE HAVE ACCOMPLISHED MUCH THIS YEAR WORKING WITH AN ADMINISTRATION THAT VALUES +L32: SCIENCE AND CONSERVATION. Our many successes include the return of protections +L33: to the Tongass National Forest in Alaska and the suspension of oil and gas +L34: leasing permits for the Arctic National Wildlife Refuge. Defenders also played a +L35: lead role in successfully defending the Migratory Bird Protection Act from +L36: attack and securing critical protections for migratory birds. Throughout 2021 +L37: Defenders made important progress for the wildlife and wild places we all love, +L38: yet our nation’s wildlife still face unprecedented challenges. More and more +L39: species, including manatees, Mexican gray wolves, polar bears and North Atlantic +L40: right whales, face the very real threat of extinction because of climate +L41: change, habitat loss, pollution and inadequate protections. In our work we +L42: continue to demonstrate success by following the science, building partnerships, +L43: leading with innovative solutions and focusing on collaborative and practical +L44: approaches in defense of nature. Together we can make a lasting difference for +L45: wildlife and habitats. Your support is essential to our work and we couldn’t do +L46: it without you. Thank you for being an important part of our efforts to protect +L47: imperiled wildlife across our nation. Jamie Rappaport Clark President and CEO +L48: John Dayton Board Chair OUR NEW TELEVISION SERIES, WILDLIFE NATION WITH JEFF +L49: CORWIN, premiered in October and airs every weekend on ABC stations nationwide. +L50: This groundbreaking series takes Jeff across the country to meet Defenders’ +L51: experts along with our science, tribal and community partners to educate and +L52: inspire viewers to join with Defenders to protect wildlife. While highlighting +L53: some of our important conservation work and raising awareness of important + + +Search within PDF for "2.2 million members". + +<|DSML|function_calls> +<|DSML|invoke name="find"> +<|DSML|parameter name="cursor" string="false">4 +<|DSML|parameter name="pattern" string="true">2.2 + +<|end▁of▁sentence|> + + +[5] Find results for text: `2.2` in `www.annualreports.com` (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2021.pdf/find?pattern=2.2) +**viewing lines [0 - 4] of 4** + +L0: # 【0†match at L187】 +L1: 2021, Defenders of Wildlife’s nearly 2.2 million members and supporters +L2: contributed more than $49 million for wildlife and wildlife habitat. This figure +L3: includes all those who generously donated their time and expertise. The +L4: steadfast support of our donors allows Defenders to sustain our program and + + +Open around L180. + +<|DSML|function_calls> +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">4 +<|DSML|parameter name="loc" string="false">170 + +<|end▁of▁sentence|> + + +[6] www.annualreports.com (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2021.pdf) +**viewing lines [170 - 215] of 261** + +L170: into the wild. **10 DEFENDERS OF WILDLIFE** ===== Page 11 ===== AFTER ANOTHER +L171: SUCCESSFUL DEFENDERS LAWSUIT ON BEHALF OF THE CRITICALLY ENDANGERED RED WOLF, +L172: FWS reversed its decision to limit the recovery area and committed to a robust +L173: release strategy. After years of inaction, FWS released eight wolves to the wild +L174: in North Carolina and plan to release nine more wolves in the spring of 2022. +L175: This is an incredible win for this critically endangered species whose +L176: population has dwindled down to single digits in the wild because of +L177: mismanagement, vehicle strikes and poaching. DEFENDERS CONTINUED TO LEAD EFFORTS +L178: TO PROTECT THE FLORIDA MANATEE, a beloved species that suffered the deadliest +L179: year on record in 2021, tragically surpassing 1,000 deaths because of water +L180: pollution and lack of warm water habitat. Defenders led advocacy and education +L181: aimed at restoring the natural flow of the dammed Ocklawaha River, which would +L182: provide critical warm-water habitat that manatees need to survive. Defenders’ +L183: legal team continued to fight for manatees in the courts, holding government +L184: agencies accountable for protecting critical habitat and addressing the +L185: devastating water pollution that is killing the seagrass and causing manatees to +L186: starve. DAVID TES | SAM FRENZY DRAW DEFENDERS.ORG 11 ===== Page 12 ===== In +L187: 2021, Defenders of Wildlife’s nearly 2.2 million members and supporters +L188: contributed more than $49 million for wildlife and wildlife habitat. This figure +L189: includes all those who generously donated their time and expertise. The +L190: steadfast support of our donors allows Defenders to sustain our program and +L191: public education efforts in the field, the courts and on Capitol Hill. 2021 +L192: SOURCES OF FUNDS Grants and contributions $29,057 Bequests, trusts and split +L193: interests $7,692 Income from investments, annuity reserve funds and trusts +L194: $3,354 Royalties and other income $3,576 Contributed services $6,140 Total +L195: Revenue $49,819 USES OF FUNDS Biodiversity conservation $22,420 Constituency +L196: mobilization $16,324 Fundraising $1,211 Management and general $5,865 Total +L197: Expenses $45,820 Change in net assets $3,999 Net assets, start of year $41,145 +L198: Net Assets, End of the Year $45,144 Dollars are in thousands. 12 DEFENDERS OF +L199: WILDLIFE Grants and contributions 58% Income from investments 7% Requests, +L200: trusts and split interests 15% Royalties and other income 7% Contributed +L201: services 12% Program and support services 97% Fundraising 3% ===== Page 13 ===== +L202: DEFENDERS.ORG 13 ===== Page 14 ===== John Dayton, Esq.* Chair Texas Sheryl +L203: Sachs* Vice Chair Connecticut Whitfield Bailey* Secretary Tennessee Mark Caylor* +L204: Treasurer District of Columbia Sylvia Baca* California Donald Barry, Esq. +L205: Oregon Dinah Bear, Esq. Arizona Kim O’Keefe Beck, Ph.D California Gabriela +L206: Chavarria, Ph.D Washington Jamie Rappaport Clark* President & CEO Virginia +L207: Caroline D. Gabel Maryland Keith M. Harper, Esq. Maryland John Donovan, Esq. +L208: Montana and California James M. Hecker, Esq. Virginia Judith Ponsikoff, Ph.D* +L209: California Terry L. Root, Ph.D Florida BOARD EMERITUS Terry C. Pelster, Esq. New +L210: York *Executive Committee member* ST A F F & A D V I S O R S SCIENCE ADVISORS +L211: Gabriela Chavarria, Ph.D Burke Museum, University of Washington Leah Gerber, +L212: Ph.D Arizona State University Kathayoon Khalil, Ph.D Oregon Zoo Rurik List, Ph.D +L213: Universidad Autónoma Metropolitana-Lerma Barry Noon, Ph.D Colorado State +L214: University Terry L. Root, Ph.D Stanford University Mark Schwartz, Ph.D +L215: University of California-Davis David S. Wilcox, Ph.D Princeton University Rae + + +Scrolling further for "Contributed services 12%". Already seen. So the annual report is Defenders of Wildlife 2021 annual report. + +Next clue: "The following year, the annual report stated that the organization had played a role in rescinding a previous administration’s plan to open most of the 22 million acres of the National Petroleum Reserve in Alaska to oil and gas drilling and that contributed services had decreased by 2% to 10%." + +Thus, the following year (2022) annual report of Defenders of Wildlife includes mention of rescinding plan to open most of 22 million acres of NPR-A to oil and gas drilling. Also contributed services decreased to 10% (from 12% in 2021). So we need to find the 2022 annual report for Defenders of Wildlife. + +Search for "Defenders of Wildlife 2022 annual report National Petroleum Reserve Alaska". + +<|DSML|function_calls> +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">Defenders of Wildlife 2022 annual report National Petroleum Reserve Alaska +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|end▁of▁sentence|> + + +[7] Defenders of Wildlife 2022 annual report National Petroleum Reserve Alaska (Search_Results/Defenders of Wildlife 2022 annual report National Petroleum Reserve Alaska) +**viewing lines [0 - 37] of 37** + +L0: +L1: URL: Search_Results/Defenders of Wildlife 2022 annual report National Petroleum +L2: Reserve Alaska +L3: # Search Results +L4: +L5: * 【0†CELEBRATING YEARS; publish_date: none†www.annualreports.com】 With less +L6: than 340 right whales left in the wild, Defenders is fighting tirelessly to end +L7: deadly lobster gear entanglements and vessel strikes that are driving ... +L8: * 【1†Financials; publish_date: none†defenders.org】 We invite you to explore +L9: the reports below to learn more about our activities and accomplishments, and +L10: how we put your money to work for wildlife. +L11: * 【2†Alaska Program Looks Back on 2022; publish_date: none†defenders.org】 Feb +L12: 9, 2023 — Thanks to a lawsuit joined by Defenders, seven million acres were +L13: returned to protection within the National Petroleum Reserve-Alaska (NPR-A), ... +L14: * 【3†Defenders-of-Wildlife-2022-Financial-Statement. ...; publish_date: +L15: none†defenders.org】 We have audited the accompanying consolidated financial +L16: statements of Defenders of Wildlife and Affiliated Defenders of Wildlife Action +L17: Fund (collectively, ... +L18: * 【4†2022 Annual Report; publish_date: none†alaskaconservation.org】 Jun 13, +L19: 2023 — In 2022, we focused on three landscapes: the Arctic. National Wildlife +L20: Refuge, Bristol Bay, and the Tongass National Forest. In March 2022,. +L21: * 【5†Assessment of ecological and cultural values within the ...; +L22: publish_date: none†www.blm.gov】 This document was written to provide technical +L23: information regarding the ecological importance of the National Petroleum +L24: Reserve – Alaska (NPR-A). Several ... +L25: * 【6†Accomplishments Report; publish_date: none†defenders.org】 National +L26: Petroleum Reserve-Alaska. Identified for exceptional wildlife and cultural +L27: values, including critical habitat for polar bears and other species ... +L28: * 【7†2022 annual report; publish_date: none†dory- +L29: plantain-s2zc.squarespace.com】 These projects are made possible through +L30: collaborations with Defenders of. Wildlife, Cook Inletkeeper, Trustees for +L31: Alaska,. Environmental Investigation Agency, ... +L32: * 【8†23IMPACT REPORT; publish_date: none†www.annualreports.com】 Defenders of +L33: Wildlife made incredible progress protecting wildlife and wild places in 2023, +L34: helping shape a brighter future for imperiled species and vital ... +L35: * 【9†From Leasing to Land Protections; publish_date: none†defenders-cci.org】 +L36: Abstract. When you hear the name, National Petroleum Reserve - Alaska (NPR-A, or +L37: Reserve), you might think of a barren land filled with oil wells and ... + + +Open result 0. + +<|DSML|function_calls> +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">7 +<|DSML|parameter name="id" string="false">0 + +<|end▁of▁sentence|> + + +[8] www.annualreports.com (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2022.pdf) +**viewing lines [0 - 53] of 289** + +L0: +L1: URL: https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders- +L2: wildlife_2022.pdf +L3: ===== Page 1 ===== 2022 Impact Report C E L E B R A T I N G Y E A R S ===== Page +L4: 2 ===== 2022 Defenders of Wildlife made important progress for imperiled +L5: species and vital landscapes across the United States in 2022. GRAY WOLF | JIM +L6: GUMMERAL MAY STOCK PRIOR Lawyers successfully challenged the previous +L7: administration’s decision to delist the gray wolf and restored critical federal +L8: protections under the Endangered Species Act. This latest triumph in court is +L9: part of our ongoing battle to protect and restore gray wolves throughout their +L10: historical range and shield them from persecution by extremist legislators in +L11: Idaho, Montana and Wyoming. TWO MORE FATALIZED GRAY SWALLETS TO SEA TO SHARE +L12: ALLIANCE Lobbyists worked around the clock to expand funding for wildlife +L13: conservation in the FY2022 federal spending bill, which included $31 million (a +L14: 44% increase) for the Bureau of Land Management’s Threatened and Endangered +L15: Species Program, $2.5 million (an 81% increase) for the U.S. Department of +L16: Agriculture Wildlife Services’ Nonlethal Initiative to prevent human-wildlife +L17: conflicts and $21 million (a 320% increase) for North Atlantic right whale +L18: conservation. 2 DEFENDERS OF WILDLIFE ===== Page 3 ===== **Policy Experts** +L19: played a crucial role in securing international trade protections for 100 +L20: species of sharks and rays, all 158 species of glass frogs and 73 species of +L21: reptiles, including 21 species of desert horned lizards, at the Convention on +L22: International Trade in Endangered Species (CITES) in Panama. \--- **Field +L23: Teams** worked tirelessly to protect and restore imperiled species across the +L24: country. From Florida manatees and red wolves in the Southeast to belugas and +L25: grizzly bears in Alaska, Defenders’ conservation experts were on the ground +L26: saving species that need our help to survive and thrive. \--- **Conservation +L27: Innovation Experts** published more than 10 peer-reviewed studies on topics that +L28: include the Cook Inlet beluga whale, golden-cheeked warbler, global parrot +L29: biodiversity, the Endangered Species Act, the effects of mountaintop removal +L30: mining on endangered species, the ecological importance of panthers and the +L31: implementation of “30x30” – the globally recognized goal to which President +L32: Biden committed the U.S. to conserve 30% of our imperiled lands and waters by +L33: 2030. \--- **DEFENDERS.ORG** ===== Page 4 ===== THANK YOU Defenders celebrated +L34: our 75th anniversary in 2022—an exciting milestone that we attribute to our +L35: unwavering dedication to our wildlife conservation mission. From helping to pass +L36: the Endangered Species Act and other bedrock conservation laws to leading the +L37: advocacy on the reintroduction of wolves to Yellowstone National Park and other +L38: parts of the West, Defenders is proud to inspire, lead and ultimately celebrate +L39: some of the most consequential conservation wins for wildlife and wild places. +L40: It is wonderfully fitting that we also mark 2022 as a year when we successfully +L41: defended, yet again, the original focal species and symbol of our organization, +L42: the gray wolf. While the future for gray wolves and many other species remains +L43: uncertain, we will never stop fighting to protect the imperiled wildlife that +L44: need us and to restore vibrant and balanced ecosystems. Our mission is more +L45: important than ever due to the escalating biodiversity and climate crises. +L46: Defenders is determined to build on our accomplishments and seize new +L47: opportunities to address these challenges through our multifaceted approach to +L48: conservation. We focus on developing innovative and science-based solutions, +L49: coordinating on-the-ground species and habitat restoration, promoting human- +L50: wildlife coexistence, protecting and strengthening our bedrock environmental +L51: laws, investing in impactful collaborations and partnerships, building a more +L52: diverse and powerful conservation movement and challenging attacks on wildlife +L53: through advocacy, policy and litigation. Defenders’ continued success is made + + +Search within PDF for "National Petroleum Reserve". + +<|DSML|function_calls> +<|DSML|invoke name="find"> +<|DSML|parameter name="cursor" string="false">8 +<|DSML|parameter name="pattern" string="true">Petroleum + +<|end▁of▁sentence|> + + +[9] Find results for text: `petroleum` in `www.annualreports.com` (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2022.pdf/find?pattern=petroleum) +**viewing lines [0 - 4] of 4** + +L0: # 【0†match at L201】 +L1: the 22 million acres of the National Petroleum Reserve in Alaska to oil and gas +L2: drilling and removed protections for important wildlife habitat. Protections +L3: have now been restored for nearly half of the reserve’s pristine lands, which +L4: are vital habitat for shorebirds, denning polar bears and tens of thousands of + + +Open around L190. + +<|DSML|function_calls> +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">8 +<|DSML|parameter name="loc" string="false">180 + +<|end▁of▁sentence|> + + +[10] www.annualreports.com (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2022.pdf) +**viewing lines [180 - 227] of 289** + +L180: the sixth successful transfer of bison to the Assiniboine and Sioux Tribes of +L181: Fort Peck since 2019. \--- **SWIFT FIX KITS | © ROSIMA PAELARINTSKIMMA MADDIAL +L182: 200 AND CONSERVATION BIOLOGY INSTITUTE** \--- **Celebrating our third year** of +L183: a collaborative program with the Aaniih and Nakoda Tribes and others to restore +L184: swift foxes to the Fort Belknap Indian Reservation in Montana, Defenders helped +L185: with the release of 28 more swift foxes. With over 100 foxes reintroduced +L186: through this program, monitoring efforts show that they are reproducing in the +L187: wild—a critical measure of success for a self-sustaining population. \--- +L188: **Defenders continued to lead the way** for conserving and recovering the +L189: endangered black-footed ferret, supporting the black-footed ferret survey for +L190: the Fort Belknap Indian community. Thirty-six ferrets were vaccinated against +L191: sylvatic plague and two dozen kits were released in the wild. \--- **10 +L192: DEFENDERS OF WILDLIFE** ===== Page 11 ===== Defenders helped to bring hope for +L193: recovery for the endangered military macaw, adding 11 fledglings to a growing +L194: wild population in Puerta Vallarta, Mexico, that is under pressure from habitat +L195: loss and poachers for the illegal pet trade. Accord- ing to our recent report, +L196: the 2008 parrot trade ban that Defenders fought to achieve is working. +L197: Preventing more than 30,000 parrots from being illegally trapped each year, the +L198: trade ban has resulted in a 47% decrease in the illegal trade of parrots and an +L199: 88% decrease in U.S. seizures of Mexican parrots. As a result of a Defenders +L200: lawsuit, BLM rescinded the previous administration’s plan that opened most of +L201: the 22 million acres of the National Petroleum Reserve in Alaska to oil and gas +L202: drilling and removed protections for important wildlife habitat. Protections +L203: have now been restored for nearly half of the reserve’s pristine lands, which +L204: are vital habitat for shorebirds, denning polar bears and tens of thousands of +L205: caribou in the Teshekpuk caribou herd. Our new storymap illustrates why the +L206: reserve is so important to wildlife. Defenders marked the 20th anniversary of +L207: our Sea Otter Awareness Week. In celebration of this milestone, we sponsored +L208: state legislation, signed by Gov. Gavin Newsom, that formally recognizes the +L209: event’s anniversary and acknowledges the vital role that sea otters play in +L210: California’s nearshore ecosystems. This annual celebration provides a unique +L211: opportunity to raise awareness of the important role sea otters play in +L212: maintaining ecosystem health, our need to coexist with sea otters and the +L213: efforts of Defenders and others in advancing sea otter conservation. +L214: DEFENDERS.ORG ===== Page 12 ===== FINANCIAL REPORT In 2022, Defenders of +L215: Wildlife’s nearly 2.2 million members and supporters contributed more than $41 +L216: million for wildlife and wildlife habitat. This figure includes all those who +L217: generously donated their time and expertise. The steadfast support of our donors +L218: allows Defenders to sustain our program and public education efforts in the +L219: field, the courts and on Capitol Hill. 2022 SOURCES OF FUNDS Grants and +L220: contributions $28,946 Bequests, trusts and split interests $13,928 Income from +L221: investments, annuity reserve funds and trusts (-$7,495) Royalties and other +L222: income $744 Contributed services $5,047 Total Revenue $41,170 USES OF FUNDS +L223: Biodiversity conservation $18,970 Constituency mobilization $16,528 Fundraising +L224: $1,029 Management and general $6,354 Total Expenses $42,881 Change in net assets +L225: (-$1,711) Net assets, start of year $45,144 Net Assets, End of the Year $43,433 +L226: Dollars are in thousands. 12 DEFENDERS OF WILDLIFE Grants and contributions 59% +L227: Contributed services 10% Royalties and other income 2% Bequests, trusts and + + +Scrolling a bit more for "Contributed services 10%". Already seen. So indeed the 2022 annual report shows contributed services 10%, down from 12% in 2021. + +Thus the organization is Defenders of Wildlife. + +The question: "Who was the emeritus board member that year? Name only, no titles." + +We need to identify the emeritus board member listed in the 2022 annual report. From the 2021 annual report, we saw "BOARD EMERITUS Terry C. Pelster, Esq." However, that is for 2021. The question asks "Who was the emeritus board member that year?" That year refers to the following year (2022). So we need to check the 2022 annual report for the emeritus board member. + +Open the 2022 PDF near the board list. Search for "Emeritus". + +<|DSML|function_calls> +<|DSML|invoke name="find"> +<|DSML|parameter name="cursor" string="false">8 +<|DSML|parameter name="pattern" string="true">Emeritus + +<|end▁of▁sentence|> + + +[11] Find results for text: `emeritus` in `www.annualreports.com` (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2022.pdf/find?pattern=emeritus) +**viewing lines [0 - 10] of 10** + +L0: # 【0†match at L237】 +L1: Sisk, Ph.D British Columbia, Canada BOARD EMERITUS Terry C. Pelster, Esq. New +L2: York *Executive Committee member* STAFF & ADVISORS SCIENCE ADVISORS Gabriela +L3: Chavarria, Ph.D Burke Museum, University of Washington Leah Gerber, Ph.D Arizona +L4: State University Kathayoon Khalil, Ph.D Oregon Zoo Rurik List, Ph.D Universidad +L5: +L6: # 【1†match at L243】 +L7: Davis Thomas D. Sisk, Ph.D Emeritus Professor Northern Arizona University David +L8: S. Wilcox, Ph.D Princeton University Rae Wynn-Grant, Ph.D National Geographic +L9: Society SENIOR STAFF Jamie Rappaport Clark President & CEO Thu Pham Chief of +L10: Staff James Stofan Senior Vice President, Operations McCrystle Adams Vice + + +Open around L230. + +<|DSML|function_calls> +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">8 +<|DSML|parameter name="loc" string="false">220 + +<|end▁of▁sentence|> + + +[12] www.annualreports.com (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2022.pdf) +**viewing lines [220 - 267] of 289** + +L220: contributions $28,946 Bequests, trusts and split interests $13,928 Income from +L221: investments, annuity reserve funds and trusts (-$7,495) Royalties and other +L222: income $744 Contributed services $5,047 Total Revenue $41,170 USES OF FUNDS +L223: Biodiversity conservation $18,970 Constituency mobilization $16,528 Fundraising +L224: $1,029 Management and general $6,354 Total Expenses $42,881 Change in net assets +L225: (-$1,711) Net assets, start of year $45,144 Net Assets, End of the Year $43,433 +L226: Dollars are in thousands. 12 DEFENDERS OF WILDLIFE Grants and contributions 59% +L227: Contributed services 10% Royalties and other income 2% Bequests, trusts and +L228: split interests 29% TOTAL REVENUE Income from investments -15% Program and +L229: support services 98% USE OF FUNDS Fundraising 2% ===== Page 13 ===== +L230: DEFENDERS.ORG 13 ===== Page 14 ===== BOARD OF DIRECTORS John Dayton, Esq.* Chair +L231: Texas Sheryl Sachs* Vice Chair Connecticut Dinah Bear, Esq.* Secretary Arizona +L232: Mark Caylor* Treasurer District of Columbia Susan Alexander Connecticut Sylvia +L233: Baca* California Whitefield Bailey Tennessee Donald Barry, Esq. Oregon Kim +L234: O’Keefe Beck, Ph.D California Gabriela Chavarria, Ph.D Washington Jamie +L235: Rappaport Clark* President & CEO Virginia John Donovan, Esq.* Montana and +L236: California Caroline D. Gabel Maryland Terry L. Root, Ph.D New Mexico Thomas D. +L237: Sisk, Ph.D British Columbia, Canada BOARD EMERITUS Terry C. Pelster, Esq. New +L238: York *Executive Committee member* STAFF & ADVISORS SCIENCE ADVISORS Gabriela +L239: Chavarria, Ph.D Burke Museum, University of Washington Leah Gerber, Ph.D Arizona +L240: State University Kathayoon Khalil, Ph.D Oregon Zoo Rurik List, Ph.D Universidad +L241: Autónoma Metropolitana-Lerma Barry Noon, Ph.D Colorado State University Terry +L242: L. Root, Ph.D Stanford University Mark Schwartz, Ph.D University of California- +L243: Davis Thomas D. Sisk, Ph.D Emeritus Professor Northern Arizona University David +L244: S. Wilcox, Ph.D Princeton University Rae Wynn-Grant, Ph.D National Geographic +L245: Society SENIOR STAFF Jamie Rappaport Clark President & CEO Thu Pham Chief of +L246: Staff James Stofan Senior Vice President, Operations McCrystle Adams Vice +L247: President, Conservation Law Matt Braughler Vice President, Philanthropy Rachel +L248: Brittin Vice President, Communications Shawn Cantrell Vice President, Field +L249: Conservation Programs Robert L. Dewey Vice President, Government Relations & +L250: External Affairs Monica Goldberg Vice President, Landscape Conservation Sharon +L251: Morris-Smith Vice President of Human Resources Elizabeth Nichols Vice President, +L252: Finance/CFO Anthony Pegues Vice President, Diversity, Equity, Inclusion, +L253: Justice Lindsay Rosa, Ph.D Vice President, Conservation Research and Innovation +L254: Brandace Swimley Vice President, Integrated Marketing 14 DEFENDERS OF WILDLIFE +L255: ===== Page 15 ===== There are many ways to support Defenders of Wildlife and +L256: protect our imperiled wildlife. To learn more or donate, contact us at +L257: 1-800-385-9712, visit Defenders.org/donate or choose one of the following +L258: options. ➤ Transfer appreciated assets directly to Defenders and maximize +L259: potential tax benefits. Defenders.org/ donate-stocks-help-save-wildlife ➤ Become +L260: a Leadership Council member with a single donation of $1,000 or more and enjoy +L261: more personal involvement with Defenders. Defenders.org/leadershipcouncil ➤ If +L262: you are 70.5 or older, roll over a portion of your IRA to benefit wildlife. +L263: Remember, these gifts must come directly from your IRA account. +L264: Defenders.org/planned-giving/gifts-your-retirement-plan ➤ Get started on a +L265: contribution from your donor-advised fund Defenders.org/donor-advised-funds ➤ +L266: Check with your personnel office or that of your spouse/partner about your +L267: company’s Corporate Matching Gifts program or visit us at + + +Thus the emeritus board member is Terry C. Pelster. However, the question might ask for "Who was the emeritus board member that year?" Possibly referring to the year of the annual report that mentioned the rescinded plan and contributed services decrease. That is 2022. So the answer is Terry C. Pelster. + +But let's verify that the 2022 annual report indeed includes the mention of contributed services decreasing to 10%. We saw that. So answer is Terry C. Pelster. + +But maybe the question is tricky: "Who was the emeritus board member that year?" Could be the emeritus board member listed in the 2021 annual report? However, the question says "That same year, a company's annual report revealed that its nearly 2.2 million members and supporters contributed over $49 million, with 12% of total revenue coming from contributed services. The following year, the annual report stated that the organization had played a role in rescinding a previous administration’s plan to open most of the 22 million acres of the National Petroleum Reserve in Alaska to oil and gas drilling and that contributed services had decreased by 2% to 10%." + +Thus the "following year" annual report is 2022. The question likely asks: "Who was the emeritus board member that year?" Meaning the emeritus board member listed in that annual report (2022). So answer: Terry C. Pelster. + +But let's confirm that the 2021 annual report also shows emeritus board member Terry C. Pelster. Indeed we saw that. But perhaps the 2022 report might have a different emeritus board member. We saw it's the same. So answer is Terry C. Pelster. + +However, the question might be about a different organization. Let's verify the details. + +First clue: Press release about Kenya vaccine doses on Dec 5, 2021. That is likely a World Bank press release. Search for "World Bank Kenya vaccine doses December 2021 press release". + +<|DSML|function_calls> +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">As of December 5, 2021, Kenya had received a total of 16,201,670 vaccines +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|end▁of▁sentence|> + + +[13] As of December 5, 2021, Kenya had received a total of 16,201,670 vaccines (Search_Results/As of December 5, 2021, Kenya had received a total of 16,201,670 vaccines) +**viewing lines [0 - 41] of 41** + +L0: +L1: URL: Search_Results/As of December 5, 2021, Kenya had received a total of +L2: 16,201,670 vaccines +L3: # Search Results +L4: +L5: * 【0†Kenya's Economy is Showing Resilience as Output Rises ...; publish_date: +L6: none†www.worldbank.org】 Dec 14, 2021 — As of December 5, 2021, Kenya had +L7: received a total of 16,201,670 vaccines, with 7,583,134 administered. While +L8: vaccine acceptance is ... +L9: * 【1†Unmet need for COVID-19 vaccination coverage in Kenya - PMC; +L10: publish_date: none†pmc.ncbi.nlm.nih.gov】 by SK Muchiri · 2022 · Cited by 42 — As +L11: of December 2021, six counties had a vaccination coverage of less than 5%. +L12: These counties include Garissa, Mandera, Marsabit, Tana River, Turkana, and ... +L13: * 【2†MINISTRY OF HEALTH; publish_date: none†covidhub.mediacouncil.or.ke】 Dec +L14: 1, 2021 — • Total Covid-19 Vaccines Received to date- 16,201,670 ... Table 10: +L15: Vaccine Logistics Received in the Country as at 5th, December 2021. +L16: * 【3†COVID-19 vaccination refusal trends in Kenya over 2021 - PMC; +L17: publish_date: none†pmc.ncbi.nlm.nih.gov】 by RT Rego · 2023 · Cited by 21 — We +L18: assessed vaccine refusal over time in Kenya, and characterized factors +L19: associated with changes in vaccination refusal. +L20: * 【4†Ciheb-Kenya on the Front Lines of the COVID-19 Vaccine ...; publish_date: +L21: none†www.ciheb.org】 Of the 721,509 persons vaccinated, 409,711 are aged 58 +L22: years and above, 143,050 are healthcare workers, 109,593 are teachers, and +L23: 59,155 are security officers. +L24: * 【5†proportion of fully vaccinated adults 23.3pc.; publish_date: +L25: none†www.facebook.com】 COVID-19: KENYA administers 150632 new vaccinations; +L26: total jabs given now 13782963; proportion of fully vaccinated adults 23.3pc. +L27: * 【6†National COVID-19 Vaccines Deployment and ...; publish_date: +L28: none†www.parliament.go.ke】 Kenya has a well-established immunization program and +L29: a robust disease surveillance and response unit (DSRU) which serves as an early +L30: warning system, to. +L31: * 【7†Kenya: Vaccine Requirements Violate Rights; publish_date: +L32: none†www.hrw.org】 Dec 13, 2021 — Kenya, with an estimated adult population of +L33: 27.2 million and a total population of 55 million, has received approximately 23 +L34: million doses as ... +L35: * 【8†Government gives 20 million a month to get vaccinated; publish_date: +L36: none†www.bbc.com】 Nov 22, 2021 — Although less than 10% of Kenyans are +L37: vaccinated, the government wants to avoid a surge over Christmas. +L38: * 【9†ministry of health kenya covid-19 immunization status ...; publish_date: +L39: none†www.health.go.ke】 May 15, 2022 — By the end of 2021 Kenya had administered +L40: 10.1 million doses of 5 types of Covid-19 vaccines. During this period 4.2 +L41: million people were ... + + +Open result 0. + +<|DSML|function_calls> +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">13 +<|DSML|parameter name="id" string="false">0 + +<|end▁of▁sentence|> + + +[14] www.worldbank.org (https://www.worldbank.org/en/news/press-release/2021/12/14/kenya-s-economy-is-showing-resilience-as-output-rises-above-pre-pandemic-levels-driven-by-a-rebound-in-the-services-sect) +**viewing lines [0 - 52] of 61** + +L0: +L1: URL: https://www.worldbank.org/en/news/press-release/2021/12/14/kenya-s-economy- +L2: is-showing-resilience-as-output-rises-above-pre-pandemic-levels-driven-by-a- +L3: rebound-in-the-services-sect +L4: NAIROBI, December 14, 2021 — Kenya’s economy has demonstrated resilience to the +L5: COVID-19 shock, with output in the first half of the year rising above pre- +L6: pandemic levels. In 2021 as a whole, gross domestic product (GDP) is expected to +L7: grow by 5%, one of the faster recoveries among Sub-Saharan African countries. +L8: Overall economic performance is expected to be robust at 4.9% per year in +L9: 2022-23, similar to the pre-pandemic pace (5% average annual growth from 2010 to +L10: 2019). According to the 24th edition of the Kenya Economic Update, “From +L11: Recovery to Better Jobs,” growth has been supported by rebounds in industry and, +L12: especially, services. Agricultural output, however, fell by 0.5% year on year +L13: in the first half of 2021 following a particularly strong performance in 2020, +L14: partly due to below-average rains. Demand-side recovery has been supported by a +L15: revival in private consumption, against a backdrop of improving employment +L16: conditions and household incomes. “Kenya’s economy has shown considerable +L17: resilience to the enormous shock of the pandemic, and this year is expected to +L18: post one of the stronger growth rebounds in the region thanks to diversified +L19: sources of growth and sound economic policies and management,” said Keith +L20: Hansen, World Bank Country Director for Kenya. “However, poverty has increased, +L21: and the buffers and coping mechanisms of households, firms, and the public +L22: finances have been depleted.” Economic activity in Kenya has continued to adapt +L23: to the pandemic and associated restrictions. A mix of containment measures, such +L24: as a nightly curfew, were in effect through most of 2021, while more +L25: economically disruptive measures such as lockdowns and travel restrictions were +L26: phased, limiting the impact on economic activities. The vaccine rollout, which +L27: had a slow start due to supply constraints, has picked up as new shipments of +L28: vaccines have arrived, particularly since September. This has supported economic +L29: recovery and growth through the third quarter of 2021. As of December 5, 2021, +L30: Kenya had received a total of 16,201,670 vaccines, with 7,583,134 administered. +L31: While vaccine acceptance is reportedly high there is still a long way to go +L32: towards the government’s target of fully inoculating the adult population of +L33: about 30 million by the end of 2022.As of December 6, 2021, about 10% of adults +L34: (2.9 million people) had been fully vaccinated while another 16% (4.9 million +L35: people) had received their first dose. The report also projects robust growth in +L36: the medium-term. This outlook takes into account that some sub-sectors, such as +L37: education, have bounced back strongly, but others such as international +L38: tourism, have only partially and face a much more protracted recovery. The +L39: anticipated further recovery of hotels and restaurants, trade, transport, and +L40: other services, depends on substantial vaccination progress to help prevent new +L41: waves of infections and associated containment measures. “Recent economic +L42: performance has been strong and the outlook is positive, but in Kenya, as +L43: everywhere, the future course of the pandemic remains a major source of +L44: uncertainty, said Alex Sienaert, Senior Economist for Kenya. “To support further +L45: recovery, reduce debt distress risks, and rebuild space for social and +L46: development spending, it is critical that Kenya continues to implement medium- +L47: term fiscal consolidation plans.” In addition to pandemic-related risks to the +L48: outlook, a second key domestic risk factor stems from the drought conditions +L49: which are affecting parts of the country and already causing severe hardship. +L50: Should the drought intensify or spread, this would weigh on the near-term +L51: economic outlook. Weaker global growth, higher-than-anticipated energy prices, +L52: and tighter external financing conditions are the primary external risks. As + + +Thus first clue confirmed. + +Second clue: Defenders of Wildlife 2021 annual report. Confirmed. + +Third clue: Defenders of Wildlife 2022 annual report. Confirmed. + +Thus the answer is Terry C. Pelster. + +But let's verify that the 2022 annual report indeed shows "Contributed services 10%". We saw that. Also confirm that 2021 report shows 12%. We saw that. + +Thus the question: "Who was the emeritus board member that year?" Refers to 2022. Answer: Terry C. Pelster. + +However, the question might be asking for the emeritus board member in the 2021 report? Let's read the question carefully: + +"In a year before 2023, between December 1 and 15 (inclusive), a press release reported that as of the 5th of that month, a country had received a total of 16,201,670 vaccine doses, with 7,583,134 administered. That same year, a company's annual report revealed that its nearly 2.2 million members and supporters contributed over $49 million, with 12% of total revenue coming from contributed services. The following year, the annual report stated that the organization had played a role in rescinding a previous administration’s plan to open most of the 22 million acres of the National Petroleum Reserve in Alaska to oil and gas drilling and that contributed services had decreased by 2% to 10%. Who was the emeritus board member that year? Name only, no titles." + +The phrase "that year" could refer to "the following year" i.e., the year of the annual report that mentioned the rescinded plan. Or could refer to "that same year" (the year of the press release and the first annual report). However, the phrase "that year" appears at the end: "Who was the emeritus board member that year?" It likely refers to the year of the annual report that is being discussed in the preceding sentence. The preceding sentence talks about the following year's annual report. So indeed "that year" is the following year (2022). + +Thus answer: Terry C. Pelster. + +But to be thorough, we should check the 2022 annual report for any other emeritus board members. The search only showed "BOARD EMERITUS Terry C. Pelster, Esq." Maybe there are multiple emeritus board members. But likely only one. + +Check the 2021 annual report for emeritus board member. It also shows Terry C. Pelster. So maybe the question is asking for the emeritus board member in the 2021 report. But why would they ask that? The question is about connecting the clues to identify the organization and then find the emeritus board member. The clues point to Defenders of Wildlife. The question likely asks for the emeritus board member as of the 2022 report. + +Nevertheless, we should confirm that the 2022 report is indeed the one that mentions the rescinded plan. We saw that. Also confirm that contributed services decreased from 12% to 10%. We saw that. + +Thus answer is Terry C. Pelster. + +But let's search for "Defenders of Wildlife emeritus board member 2022 Terry Pelster". + +<|DSML|function_calls> +<|DSML|invoke name="search"> +<|DSML|parameter name="query" string="true">Terry C. Pelster Defenders of Wildlife emeritus board member +<|DSML|parameter name="topn" string="false">10 +<|DSML|parameter name="source" string="true">web + +<|end▁of▁sentence|> + + +[15] Terry C. Pelster Defenders of Wildlife emeritus board member (Search_Results/Terry C. Pelster Defenders of Wildlife emeritus board member) +**viewing lines [0 - 31] of 31** + +L0: +L1: URL: Search_Results/Terry C. Pelster Defenders of Wildlife emeritus board member +L2: # Search Results +L3: +L4: * 【0†2024 impact report; publish_date: none†defenders.org】 Terry Root, Ph.D. +L5: New Mexico. Thomas D. Sisk, Ph.D. British Columbia, Canada. BOARD EMERITUS. +L6: Terry C. Pelster, Esq. New York. SCIENCE ADVISORS. Leah Gerber, Ph ... +L7: * 【1†Defenders of Wildlife; publish_date: none†www.wrongkindofgreen.org】 +L8: Directors ; Victor M. Sher*, Chair – California ; Terry C. Pelster*, Vice Chair +L9: – New York ; Richard Kopcho*, Treasurer – California ; Adelaide P. Gomer*, +L10: Secretary ... +L11: * 【2†Exhibit 12; publish_date: none†www.uschamber.com】 DECLARATION OF TERRY +L12: PELSTER. I, Terry C. Pelster, declare as follows: 1. I am a current member of +L13: Defenders of Wildlife (“Defenders”) and have been a member. +L14: * 【3†2020 ANNUAL REPORT; publish_date: none†defenders.org】 Terry L. Root. +L15: Florida. BOARD. EMERITUS. Terry C. Pelster. New York. Alan Steinberg. Florida. +L16: SCIENCE. ADVISORS. Gabriela Chavarria,. Ph.D. Denver Museum of. +L17: * 【4†2019 Annual Report; publish_date: none†defenders.org】 BOARD EMERITUS. +L18: Terry C. Pelster. New York. Alan Steinberg. Florida. SCIENCE ADVISORS. Leah +L19: Gerber, Ph.D. Arizona State University. Kathayoon Khalil, Ph.D. +L20: * 【5†annual report; publish_date: none†defenders.org】 * Executive Committee +L21: member. BoARD EMERITuS. Terry C. Pelster. New York. Alan Steinberg. Florida. +L22: PRESIDENT. EMERITuS. Rodger Schlickeisen. Virginia. SCIENCE ... +L23: * 【6†BEING THERE FOR BELUGAS; publish_date: none†wendeenicole.com】 Mar 2, 2023 +L24: — Defenders of Wildlife is a national, nonprofit member- ship ... Terry C. +L25: Pelster, Esq. New York. *Executive Committee member outfront. +L26: * 【7†Annual Reports; publish_date: none†www.annualreports.com】 WAYS TO GIVE +L27: There are many ways to support Defenders of Wildlife and protect our imperiled +L28: wildlife. ... Terry C. Pelster. New York. Alan Steinberg. Florida. +L29: * 【8†2014 Annual Report; publish_date: none†defenders.org】 BOARD OF DIRECTORS +L30: defenders of wildlife is a national, nonprofit ... BOARD EMERITUS. Terry C. +L31: Pelster. New York. Alan Steinberg. Florida. Page 3. PO. + + +Thus Terry C. Pelster is indeed the emeritus board member. + +Now the question might want just "Terry C. Pelster". Or maybe "Terry Pelster". Or "Terry C. Pelster, Esq." but instructions say "Name only, no titles." So likely "Terry C. Pelster". However, maybe they want "Terry Pelster". Better to include full name as listed: "Terry C. Pelster". + +Let's read the question again: "Who was the emeritus board member that year? Name only, no titles." So we should answer just the name, no "Esq." or "Board Emeritus". So "Terry C. Pelster". Or maybe "Terry Pelster". But the document shows "Terry C. Pelster, Esq." So the name is "Terry C. Pelster". + +Better to verify that the 2022 annual report indeed lists "BOARD EMERITUS Terry C. Pelster, Esq." We saw that line. + +Thus answer: Terry C. Pelster. + +But let's double-check that the question might refer to the 2021 annual report. The phrase "that year" could be ambiguous. Let's parse the timeline: + +- Year 1: 2021. Press release in Dec 2021 about Kenya vaccine doses. Same year (2021) annual report of Defenders of Wildlife shows 12% contributed services. + +- Year 2: 2022. Annual report mentions rescinded plan and contributed services decreased to 10%. + +The question says "The following year, the annual report stated that the organization had played a role in rescinding a previous administration’s plan ... and that contributed services had decreased by 2% to 10%." This is year 2. + +Then asks "Who was the emeritus board member that year?" That likely refers to year 2. + +Thus answer: Terry C. Pelster. + +However, perhaps the emeritus board member changed between 2021 and 2022. We need to check 2022 annual report for any other emeritus board member. We saw only Terry C. Pelster. Also check 2021 annual report: same. So likely unchanged. + +Thus answer is Terry C. Pelster. + +Nevertheless, we should confirm that the 2022 annual report is indeed the one that includes the phrase "BLM rescinded the previous administration’s plan that opened most of the 22 million acres of the National Petroleum Reserve in Alaska to oil and gas drilling". We saw that in line 200-202. + +Thus answer is Terry C. Pelster. + +Now we need to provide the answer in the final message. Only the name. + +Thus final answer: Terry C. Pelster. + +But let's be thorough and search for "Defenders of Wildlife 2022 annual report contributed services 10%". + +<|DSML|function_calls> +<|DSML|invoke name="find"> +<|DSML|parameter name="cursor" string="false">8 +<|DSML|parameter name="pattern" string="true">Contributed services + +<|end▁of▁sentence|> + + +[16] Find results for text: `contributed services` in `www.annualreports.com` (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2022.pdf/find?pattern=contributed services) +**viewing lines [0 - 10] of 10** + +L0: # 【0†match at L222】 +L1: income $744 Contributed services $5,047 Total Revenue $41,170 USES OF FUNDS +L2: Biodiversity conservation $18,970 Constituency mobilization $16,528 Fundraising +L3: $1,029 Management and general $6,354 Total Expenses $42,881 Change in net assets +L4: (-$1,711) Net assets, start of year $45,144 Net Assets, End of the Year $43,433 +L5: +L6: # 【1†match at L227】 +L7: Contributed services 10% Royalties and other income 2% Bequests, trusts and +L8: split interests 29% TOTAL REVENUE Income from investments -15% Program and +L9: support services 98% USE OF FUNDS Fundraising 2% ===== Page 13 ===== +L10: DEFENDERS.ORG 13 ===== Page 14 ===== BOARD OF DIRECTORS John Dayton, Esq.* Chair + + +Thus confirmed. + +Now we should verify that the 2021 annual report shows contributed services 12%. + +<|DSML|function_calls> +<|DSML|invoke name="find"> +<|DSML|parameter name="cursor" string="false">4 +<|DSML|parameter name="pattern" string="true">Contributed services + +<|end▁of▁sentence|> + + +[17] Find results for text: `contributed services` in `www.annualreports.com` (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2021.pdf/find?pattern=contributed services) +**viewing lines [0 - 4] of 4** + +L0: # 【0†match at L194】 +L1: $3,354 Royalties and other income $3,576 Contributed services $6,140 Total +L2: Revenue $49,819 USES OF FUNDS Biodiversity conservation $22,420 Constituency +L3: mobilization $16,324 Fundraising $1,211 Management and general $5,865 Total +L4: Expenses $45,820 Change in net assets $3,999 Net assets, start of year $41,145 + + +Open around L190-200 to see percentage. + +<|DSML|function_calls> +<|DSML|invoke name="open"> +<|DSML|parameter name="cursor" string="false">4 +<|DSML|parameter name="loc" string="false">185 + +<|end▁of▁sentence|> + + +[18] www.annualreports.com (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2021.pdf) +**viewing lines [185 - 232] of 261** + +L185: devastating water pollution that is killing the seagrass and causing manatees to +L186: starve. DAVID TES | SAM FRENZY DRAW DEFENDERS.ORG 11 ===== Page 12 ===== In +L187: 2021, Defenders of Wildlife’s nearly 2.2 million members and supporters +L188: contributed more than $49 million for wildlife and wildlife habitat. This figure +L189: includes all those who generously donated their time and expertise. The +L190: steadfast support of our donors allows Defenders to sustain our program and +L191: public education efforts in the field, the courts and on Capitol Hill. 2021 +L192: SOURCES OF FUNDS Grants and contributions $29,057 Bequests, trusts and split +L193: interests $7,692 Income from investments, annuity reserve funds and trusts +L194: $3,354 Royalties and other income $3,576 Contributed services $6,140 Total +L195: Revenue $49,819 USES OF FUNDS Biodiversity conservation $22,420 Constituency +L196: mobilization $16,324 Fundraising $1,211 Management and general $5,865 Total +L197: Expenses $45,820 Change in net assets $3,999 Net assets, start of year $41,145 +L198: Net Assets, End of the Year $45,144 Dollars are in thousands. 12 DEFENDERS OF +L199: WILDLIFE Grants and contributions 58% Income from investments 7% Requests, +L200: trusts and split interests 15% Royalties and other income 7% Contributed +L201: services 12% Program and support services 97% Fundraising 3% ===== Page 13 ===== +L202: DEFENDERS.ORG 13 ===== Page 14 ===== John Dayton, Esq.* Chair Texas Sheryl +L203: Sachs* Vice Chair Connecticut Whitfield Bailey* Secretary Tennessee Mark Caylor* +L204: Treasurer District of Columbia Sylvia Baca* California Donald Barry, Esq. +L205: Oregon Dinah Bear, Esq. Arizona Kim O’Keefe Beck, Ph.D California Gabriela +L206: Chavarria, Ph.D Washington Jamie Rappaport Clark* President & CEO Virginia +L207: Caroline D. Gabel Maryland Keith M. Harper, Esq. Maryland John Donovan, Esq. +L208: Montana and California James M. Hecker, Esq. Virginia Judith Ponsikoff, Ph.D* +L209: California Terry L. Root, Ph.D Florida BOARD EMERITUS Terry C. Pelster, Esq. New +L210: York *Executive Committee member* ST A F F & A D V I S O R S SCIENCE ADVISORS +L211: Gabriela Chavarria, Ph.D Burke Museum, University of Washington Leah Gerber, +L212: Ph.D Arizona State University Kathayoon Khalil, Ph.D Oregon Zoo Rurik List, Ph.D +L213: Universidad Autónoma Metropolitana-Lerma Barry Noon, Ph.D Colorado State +L214: University Terry L. Root, Ph.D Stanford University Mark Schwartz, Ph.D +L215: University of California-Davis David S. Wilcox, Ph.D Princeton University Rae +L216: Wynn-Grant, Ph.D National Geographic Society SENIOR STAFF Jamie Rappaport Clark +L217: President & CEO Thu Pham Chief of Staff Renee Stone Senior Vice President, +L218: Conservation Programs & General Counsel James Stefan Senior Vice President, +L219: Operations Matt Braughler Vice President, Philanthropy Rachel Brittin Vice +L220: President, Communications Shawn Cantrell Vice President, Field Conservation +L221: Programs Robert L. Dewey Vice President, Government Relations & External Affairs +L222: Monica Goldberg Vice President, Landscape Conservation Sharon Morris-Smith Vice +L223: President of Human Resources Elizabeth Nichols Vice President, Finance/CFO +L224: Anthony Pegues Vice President, Diversity, Equity, Inclusion, Justice Lindsay +L225: Rosa, Ph.D Director, Center for Conservation Innovation Michael P. Senatore Vice +L226: President, Conservation Law Brandace Swimley Vice President, Integrated +L227: Marketing 14 DEFENDERS OF WILDLIFE ===== Page 15 ===== THERE ARE MANY WAYS TO +L228: SUPPORT DEFENDERS OF WILDLIFE and protect our imperiled wildlife. To learn more +L229: or donate, contact us at 1-800-385-9712, visit defenders.org/donate or choose +L230: one of the following options. ➤ Transfer appreciated assets directly to +L231: Defenders and maximize potential tax benefits. Defenders.org/donate-stocks-help- +L232: save-wildlife ➤ Become a Leadership Council member with a single donation of + + +Thus everything matches. + +Thus answer: Terry C. Pelster. + +Now we need to output only the name. + +Thus final answer: Terry C. Pelster. + +But perhaps the question expects "Terry Pelster". However, the document includes middle initial "C." It might be better to include the full name as listed: "Terry C. Pelster". The instruction "Name only, no titles" suggests we should not include "Esq." or "Board Emeritus". But including middle initial is likely okay. + +To be safe, we could output "Terry C. Pelster". Let's see similar puzzles: often they expect the full name as appears in the document. + +Thus answer: Terry C. Pelster. + +Now we will provide final answer.Terry C. Pelster<|end▁of▁sentence|> \ No newline at end of file diff --git a/lib/llm/tests/deepseek_v32_encoding.rs b/lib/llm/tests/deepseek_v32_encoding.rs new file mode 100644 index 0000000000..2b2fca33f9 --- /dev/null +++ b/lib/llm/tests/deepseek_v32_encoding.rs @@ -0,0 +1,408 @@ +// SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +//! Tests for DeepSeek V3.2 encoding against official test data +//! +//! These tests use the official test files from: +//! https://huggingface.co/deepseek-ai/DeepSeek-V3.2/tree/main/encoding + +use dynamo_llm::preprocessor::prompt::deepseek_v32::{ThinkingMode, encode_messages}; +use serde_json::Value as JsonValue; +use std::fs; +use std::path::PathBuf; + +fn get_test_data_path() -> PathBuf { + PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("tests/data/deepseek-v3.2") +} + +fn run_official_test(input_file: &str, output_file: &str) { + let test_dir = get_test_data_path(); + + // Load test input + let input_path = test_dir.join(input_file); + let input_data: JsonValue = serde_json::from_str( + &fs::read_to_string(&input_path) + .unwrap_or_else(|_| panic!("Failed to read {}", input_file)), + ) + .unwrap_or_else(|_| panic!("Failed to parse {}", input_file)); + + // Load expected output + let output_path = test_dir.join(output_file); + let expected_output = fs::read_to_string(&output_path) + .unwrap_or_else(|_| panic!("Failed to read {}", output_file)); + + // Extract messages and tools + let mut messages = input_data["messages"] + .as_array() + .expect("Missing messages") + .clone(); + + // Add tools to first message (system) if present + if let Some(tools) = input_data.get("tools") + && let Some(first_msg) = messages.get_mut(0) { + first_msg + .as_object_mut() + .unwrap() + .insert("tools".to_string(), tools.clone()); + } + + // Encode messages + let result = encode_messages( + &messages, + ThinkingMode::Thinking, + true, // add_bos_token + ) + .expect("Failed to encode messages"); + + // Compare outputs + let expected = expected_output.trim(); + let actual = result.trim(); + + if expected != actual { + println!("=== Test: {} ===", input_file); + + // Show first difference + let exp_lines: Vec<&str> = expected.lines().collect(); + let act_lines: Vec<&str> = actual.lines().collect(); + + for (i, (exp_line, act_line)) in exp_lines.iter().zip(act_lines.iter()).enumerate() { + if exp_line != act_line { + println!("Line {} differs:", i + 1); + println!(" Expected: {}", exp_line); + println!(" Actual: {}", act_line); + break; + } + } + + if exp_lines.len() != act_lines.len() { + println!("\nLine count mismatch:"); + println!(" Expected: {} lines", exp_lines.len()); + println!(" Actual: {} lines", act_lines.len()); + } + + panic!("Output does not match expected for {}", input_file); + } +} + +#[test] +fn test_official_basic_example() { + run_official_test("test_input.json", "test_output.txt"); +} + +#[test] +fn test_official_search_without_date() { + run_official_test( + "test_input_search_wo_date.json", + "test_output_search_wo_date.txt", + ); +} + +#[test] +fn test_official_search_with_date() { + run_official_test( + "test_input_search_w_date.json", + "test_output_search_w_date.txt", + ); +} + +#[test] +fn test_simple_conversation_no_tools() { + // Simple test without tools + let messages = serde_json::json!([ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"}, + {"role": "assistant", "content": "Hi! How can I help you today?"}, + {"role": "user", "content": "What is 2+2?"} + ]); + + let result = encode_messages(messages.as_array().unwrap(), ThinkingMode::Thinking, true) + .expect("Failed to encode"); + + // Check basic structure + assert!(result.starts_with("<|begin▁of▁sentence|>")); + assert!(result.contains("<|User|>Hello!<|Assistant|>")); + assert!(result.contains("Hi! How can I help you today?")); + assert!(result.contains("<|end▁of▁sentence|>")); +} + +#[test] +fn test_comprehensive_conversation_with_tools() { + // Comprehensive test covering all features with English text + let messages = serde_json::json!([ + { + "role": "system", + "content": "You are a helpful weather assistant.", + "tools": [ + { + "type": "function", + "function": { + "name": "get_datetime", + "description": "Get the current date and time", + "parameters": { + "type": "object", + "properties": { + "timezone": { + "type": "string", + "description": "The timezone, e.g. America/New_York, UTC" + } + }, + "required": ["timezone"] + } + } + }, + { + "type": "function", + "function": { + "name": "get_weather", + "description": "Get the weather for a specific date and location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city name, e.g. New York, San Francisco" + }, + "date": { + "type": "string", + "description": "The date in YYYY-MM-DD format" + } + }, + "required": ["location", "date"] + } + } + } + ] + }, + {"role": "user", "content": "What's the weather tomorrow in San Francisco and New York?"}, + { + "role": "assistant", + "reasoning_content": "User is asking about tomorrow's weather. I need to first get the current date to calculate tomorrow's date.", + "tool_calls": [{ + "id": "call_1", + "type": "function", + "function": { + "name": "get_datetime", + "arguments": "{\"timezone\": \"America/New_York\"}" + } + }] + }, + { + "role": "tool", + "tool_call_id": "call_1", + "content": "{\"current_date\": \"2024-01-15\", \"current_time\": \"14:30:00\", \"timezone\": \"America/New_York\"}" + }, + { + "role": "assistant", + "reasoning_content": "Now I know today is 2024-01-15, so tomorrow is 2024-01-16. Let me query the weather for both cities.", + "tool_calls": [ + { + "id": "call_2", + "type": "function", + "function": { + "name": "get_weather", + "arguments": "{\"location\": \"San Francisco\", \"date\": \"2024-01-16\"}" + } + }, + { + "id": "call_3", + "type": "function", + "function": { + "name": "get_weather", + "arguments": "{\"location\": \"New York\", \"date\": \"2024-01-16\"}" + } + } + ] + }, + { + "role": "tool", + "tool_call_id": "call_2", + "content": "{\"location\": \"San Francisco\", \"date\": \"2024-01-16\", \"temperature_high\": \"65\", \"temperature_low\": \"55\", \"weather\": \"sunny\", \"humidity\": \"70%\"}" + }, + { + "role": "tool", + "tool_call_id": "call_3", + "content": "{\"location\": \"New York\", \"date\": \"2024-01-16\", \"temperature_high\": \"30\", \"temperature_low\": \"20\", \"weather\": \"cloudy\", \"humidity\": \"45%\"}" + }, + { + "role": "assistant", + "reasoning_content": "Got the weather data for both cities. Let me format a nice response for the user.", + "content": "Here's the weather forecast for tomorrow (January 16, 2024):\n\n**San Francisco**:\n- Weather: Sunny\n- High: 65°F\n- Low: 55°F\n- Humidity: 70%\n\n**New York**:\n- Weather: Cloudy\n- High: 30°F\n- Low: 20°F\n- Humidity: 45%\n\nSan Francisco will be warm and sunny, while New York will be cold and cloudy. Dress warmly if you're in New York!" + } + ]); + + let result = encode_messages(messages.as_array().unwrap(), ThinkingMode::Thinking, true) + .expect("Failed to encode"); + + // Check all major components are present + assert!(result.starts_with("<|begin▁of▁sentence|>")); + assert!(result.contains("## Tools")); + assert!(result.contains("get_datetime")); + assert!(result.contains("get_weather")); + assert!(result.contains("<|User|>What's the weather tomorrow")); + assert!(result.contains("<|Assistant|>")); + assert!(result.contains("User is asking about tomorrow's weather")); + assert!(result.contains("")); + assert!(result.contains("<|DSML|function_calls>")); + assert!(result.contains("<|DSML|invoke name=\"get_datetime\">")); + assert!(result.contains( + "<|DSML|parameter name=\"timezone\" string=\"true\">America/New_York" + )); + assert!(result.contains("")); + assert!(result.contains("")); + assert!(result.contains("")); + assert!(result.contains("")); + assert!(result.contains("San Francisco")); + assert!(result.contains("New York")); + assert!(result.contains("<|end▁of▁sentence|>")); +} + +#[test] +fn test_with_reasoning_content() { + let messages = serde_json::json!([ + {"role": "user", "content": "Calculate 15 * 23"}, + { + "role": "assistant", + "content": "The answer is 345.", + "reasoning_content": "Let me compute this step by step: 15 * 23 = 15 * 20 + 15 * 3 = 300 + 45 = 345" + } + ]); + + let result = encode_messages(messages.as_array().unwrap(), ThinkingMode::Thinking, true) + .expect("Failed to encode"); + + // Should contain thinking tags with reasoning + assert!(result.contains("")); + assert!(result.contains("")); + assert!(result.contains("Let me compute this step by step")); +} + +#[test] +fn test_tool_call_formatting() { + let messages = serde_json::json!([ + {"role": "user", "content": "What's the weather in Beijing?"}, + { + "role": "assistant", + "content": "", + "tool_calls": [{ + "id": "call_123", + "type": "function", + "function": { + "name": "get_weather", + "arguments": "{\"location\": \"Beijing\", \"unit\": \"celsius\"}" + } + }] + } + ]); + + let result = encode_messages(messages.as_array().unwrap(), ThinkingMode::Thinking, true) + .expect("Failed to encode"); + + // Check DSML format + assert!(result.contains("<|DSML|function_calls>")); + assert!(result.contains("<|DSML|invoke name=\"get_weather\">")); + assert!(result.contains( + "<|DSML|parameter name=\"location\" string=\"true\">Beijing" + )); + assert!( + result.contains( + "<|DSML|parameter name=\"unit\" string=\"true\">celsius" + ) + ); + assert!(result.contains("")); + assert!(result.contains("")); +} + +#[test] +fn test_tool_results() { + let messages = serde_json::json!([ + {"role": "user", "content": "Check weather"}, + { + "role": "assistant", + "content": "", + "tool_calls": [{ + "id": "call_123", + "type": "function", + "function": { + "name": "get_weather", + "arguments": "{\"location\": \"Beijing\"}" + } + }] + }, + { + "role": "tool", + "tool_call_id": "call_123", + "content": "{\"temperature\": \"20C\", \"condition\": \"sunny\"}" + } + ]); + + let result = encode_messages(messages.as_array().unwrap(), ThinkingMode::Thinking, true) + .expect("Failed to encode"); + + // Check function_results wrapper + assert!(result.contains("")); + assert!(result.contains("")); + assert!(result.contains("{\"temperature\": \"20C\", \"condition\": \"sunny\"}")); + assert!(result.contains("")); + assert!(result.contains("")); +} + +#[test] +fn test_multiple_tool_calls() { + let messages = serde_json::json!([ + {"role": "user", "content": "Weather in Beijing and Shanghai"}, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_1", + "type": "function", + "function": { + "name": "get_weather", + "arguments": "{\"location\": \"Beijing\"}" + } + }, + { + "id": "call_2", + "type": "function", + "function": { + "name": "get_weather", + "arguments": "{\"location\": \"Shanghai\"}" + } + } + ] + } + ]); + + let result = encode_messages(messages.as_array().unwrap(), ThinkingMode::Thinking, true) + .expect("Failed to encode"); + + // Should contain both tool calls + assert!(result.contains("Beijing")); + assert!(result.contains("Shanghai")); + // Should be in same function_calls block + assert_eq!(result.matches("<|DSML|function_calls>").count(), 1); + assert_eq!(result.matches("").count(), 1); + // But two invocations + assert_eq!(result.matches("<|DSML|invoke").count(), 2); +} + +#[test] +fn test_chat_mode_vs_thinking_mode() { + let messages = serde_json::json!([ + {"role": "user", "content": "Hello"} + ]); + + let chat_result = encode_messages(messages.as_array().unwrap(), ThinkingMode::Chat, true) + .expect("Failed to encode"); + + let thinking_result = + encode_messages(messages.as_array().unwrap(), ThinkingMode::Thinking, true) + .expect("Failed to encode"); + + // Chat mode should have , thinking mode should have + assert!(chat_result.contains("")); + assert!(!chat_result.contains("")); + + assert!(thinking_result.contains("")); +} From 48048b5b6333f34cbebd663a178e9997311513a9 Mon Sep 17 00:00:00 2001 From: Vladislav Nosivskoy Date: Mon, 8 Dec 2025 17:08:40 +0300 Subject: [PATCH 2/5] use original json|txt files Signed-off-by: Vladislav Nosivskoy --- .pre-commit-config.yaml | 1 + .../tests/data/deepseek-v3.2/test_input.json | 43 +- .../tests/data/deepseek-v3.2/test_output.txt | 74 +- .../test_output_search_w_date.txt | 2498 ++++++++--------- .../test_output_search_wo_date.txt | 884 +++--- 5 files changed, 1750 insertions(+), 1750 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ddcd084776..16ecc4e756 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -64,6 +64,7 @@ repos: - id: mixed-line-ending - id: requirements-txt-fixer - id: trailing-whitespace + exclude: lib/llm/tests/data/deepseek-v3.2/.*\.txt$ # NOTE: removing from pre commit # will move to gitlab ci to run in proper diff --git a/lib/llm/tests/data/deepseek-v3.2/test_input.json b/lib/llm/tests/data/deepseek-v3.2/test_input.json index dc22e71314..0582611470 100644 --- a/lib/llm/tests/data/deepseek-v3.2/test_input.json +++ b/lib/llm/tests/data/deepseek-v3.2/test_input.json @@ -10,7 +10,7 @@ "properties": { "timezone": { "type": "string", - "description": "The timezone, e.g. America/New_York, UTC" + "description": "The timezone, e.g. Asia/Shanghai, UTC" } }, "required": ["timezone"] @@ -27,7 +27,7 @@ "properties": { "location": { "type": "string", - "description": "The city name, e.g. New York, San Francisco" + "description": "The city name, e.g. Beijing, Hangzhou" }, "date": { "type": "string", @@ -46,17 +46,17 @@ }, { "role": "user", - "content": "What's the weather tomorrow in San Francisco and New York?" + "content": "明天杭州和北京的天气怎么样?" }, { "role": "assistant", - "reasoning_content": "User is asking about tomorrow's weather. I need to first get the current date to calculate tomorrow's date.", + "reasoning_content": "用户询问明天的天气,我需要先获取当前日期来计算明天的日期📅", "tool_calls": [ { "id": "call_xK9mN3pL2qR8vT5wY6hZ1aB4", "type": "function", "function": { - "arguments": "{\"timezone\": \"America/New_York\"}", + "arguments": "{\"timezone\": \"Asia/Shanghai\"}", "name": "get_datetime" } } @@ -65,17 +65,17 @@ { "tool_call_id": "call_xK9mN3pL2qR8vT5wY6hZ1aB4", "role": "tool", - "content": "{\"current_date\": \"2024-01-15\", \"current_time\": \"14:30:00\", \"timezone\": \"America/New_York\"}" + "content": "{\"current_date\": \"2024-01-15\", \"current_time\": \"14:30:00\", \"timezone\": \"Asia/Shanghai\"}" }, { "role": "assistant", - "reasoning_content": "Now I know today is 2024-01-15, so tomorrow is 2024-01-16. Let me query the weather for San Francisco and New York.", + "reasoning_content": "现在知道今天是2024-01-15,明天就是2024-01-16。接下来查询杭州和北京明天的天气🌤️", "tool_calls": [ { "id": "call_bN7kR9mX3pQ2wL5vY8jZ4cD6", "type": "function", "function": { - "arguments": "{\"location\": \"San Francisco\", \"date\": \"2024-01-16\"}", + "arguments": "{\"location\": \"Hangzhou\", \"date\": \"2024-01-16\"}", "name": "get_weather" } }, @@ -83,7 +83,7 @@ "id": "call_dP9mL7kX5rT4yN3wZ2hV8eF1", "type": "function", "function": { - "arguments": "{\"location\": \"New York\", \"date\": \"2024-01-16\"}", + "arguments": "{\"location\": \"Beijing\", \"date\": \"2024-01-16\"}", "name": "get_weather" } } @@ -92,31 +92,31 @@ { "tool_call_id": "call_bN7kR9mX3pQ2wL5vY8jZ4cD6", "role": "tool", - "content": "{\"location\": \"San Francisco\", \"date\": \"2024-01-16\", \"temperature_high\": \"65\", \"temperature_low\": \"55\", \"weather\": \"partly cloudy\", \"humidity\": \"70%\"}" + "content": "{\"location\": \"Hangzhou\", \"date\": \"2024-01-16\", \"temperature_high\": \"12\", \"temperature_low\": \"5\", \"weather\": \"多云\", \"humidity\": \"65%\"}" }, { "tool_call_id": "call_dP9mL7kX5rT4yN3wZ2hV8eF1", "role": "tool", - "content": "{\"location\": \"New York\", \"date\": \"2024-01-16\", \"temperature_high\": \"28\", \"temperature_low\": \"15\", \"weather\": \"sunny\", \"humidity\": \"35%\"}" + "content": "{\"location\": \"Beijing\", \"date\": \"2024-01-16\", \"temperature_high\": \"-2\", \"temperature_low\": \"-8\", \"weather\": \"晴\", \"humidity\": \"30%\"}" }, { "role": "assistant", - "reasoning_content": "Retrieved weather data for both cities. Now I'll format a nice response for the user.", - "content": "Based on the forecast, here's tomorrow's weather (January 16, 2024):\n\n**San Francisco**:\n- Weather: Partly cloudy\n- High: 65°F\n- Low: 55°F\n- Humidity: 70%\n\n**New York**:\n- Weather: Sunny\n- High: 28°F\n- Low: 15°F\n- Humidity: 35%\n\nSan Francisco will be mild and partly cloudy, while New York will be cold but sunny. If you're in New York, dress warmly!" + "reasoning_content": "已获取两个城市明天的天气信息,现在整理给用户✨", + "content": "根据查询结果,明天(2024年1月16日)的天气情况如下:\n\n**杭州**:\n- 天气:多云\n- 最高温度:12°C\n- 最低温度:5°C\n- 湿度:65%\n\n**北京**:\n- 天气:晴\n- 最高温度:-2°C\n- 最低温度:-8°C\n- 湿度:30%\n\n杭州明天会比较温暖但有些多云,而北京会很冷但是晴天。建议在北京的朋友要注意保暖!" }, { "role": "user", - "content": "Thanks! What about the day after tomorrow?" + "content": "谢谢!那后天呢?" }, { "role": "assistant", - "reasoning_content": "User is now asking about the day after tomorrow, which is 2024-01-17. I can query directly since I already know today's date.", + "reasoning_content": "用户现在问后天的天气,后天是2024-01-17,我可以直接查询(因为已知今天日期)🗓️", "tool_calls": [ { "id": "call_fR3nK8mV7pL4xT2yW9jB5gH3", "type": "function", "function": { - "arguments": "{\"location\": \"San Francisco\", \"date\": \"2024-01-17\"}", + "arguments": "{\"location\": \"Hangzhou\", \"date\": \"2024-01-17\"}", "name": "get_weather" } }, @@ -124,7 +124,7 @@ "id": "call_hT5pN2kY9rV6zL3wX1mD7jK8", "type": "function", "function": { - "arguments": "{\"location\": \"New York\", \"date\": \"2024-01-17\"}", + "arguments": "{\"location\": \"Beijing\", \"date\": \"2024-01-17\"}", "name": "get_weather" } } @@ -133,18 +133,17 @@ { "tool_call_id": "call_fR3nK8mV7pL4xT2yW9jB5gH3", "role": "tool", - "content": "{\"location\": \"San Francisco\", \"date\": \"2024-01-17\", \"temperature_high\": \"68\", \"temperature_low\": \"58\", \"weather\": \"light rain\", \"humidity\": \"85%\"}" + "content": "{\"location\": \"Hangzhou\", \"date\": \"2024-01-17\", \"temperature_high\": \"15\", \"temperature_low\": \"8\", \"weather\": \"小雨\", \"humidity\": \"80%\"}" }, { "tool_call_id": "call_hT5pN2kY9rV6zL3wX1mD7jK8", "role": "tool", - "content": "{\"location\": \"New York\", \"date\": \"2024-01-17\", \"temperature_high\": \"32\", \"temperature_low\": \"22\", \"weather\": \"cloudy\", \"humidity\": \"50%\"}" + "content": "{\"location\": \"Beijing\", \"date\": \"2024-01-17\", \"temperature_high\": \"0\", \"temperature_low\": \"-6\", \"weather\": \"多云\", \"humidity\": \"45%\"}" }, { "role": "assistant", - "reasoning_content": "Got the weather data for the day after tomorrow. Let me format this for the user.", - "content": "Here's the forecast for the day after tomorrow (January 17, 2024):\n\n**San Francisco**:\n- Weather: Light rain\n- High: 68°F\n- Low: 58°F\n- Humidity: 85%\n\n**New York**:\n- Weather: Cloudy\n- High: 32°F\n- Low: 22°F\n- Humidity: 50%\n\nSan Francisco will have light rain with temperatures rising slightly, so bring an umbrella. New York will be a bit warmer but still cold and cloudy, so continue to bundle up." + "reasoning_content": "获取到后天的天气数据,整理回复给用户📝", + "content": "后天(2024年1月17日)的天气情况:\n\n**杭州**:\n- 天气:小雨\n- 最高温度:15°C\n- 最低温度:8°C\n- 湿度:80%\n\n**北京**:\n- 天气:多云\n- 最高温度:0°C\n- 最低温度:-6°C\n- 湿度:45%\n\n杭州后天会有小雨,温度略有回升,记得带伞。北京会稍微暖和一点,但依然很冷,请继续做好保暖措施。" } ] } - diff --git a/lib/llm/tests/data/deepseek-v3.2/test_output.txt b/lib/llm/tests/data/deepseek-v3.2/test_output.txt index 54b2fb8f3e..df9c5984c4 100644 --- a/lib/llm/tests/data/deepseek-v3.2/test_output.txt +++ b/lib/llm/tests/data/deepseek-v3.2/test_output.txt @@ -30,83 +30,83 @@ If the thinking_mode is enabled, then after function results you should strongly Here are the functions available in JSONSchema format: -{"name": "get_datetime", "description": "Get the current date and time", "parameters": {"type": "object", "properties": {"timezone": {"type": "string", "description": "The timezone, e.g. America/New_York, UTC"}}, "required": ["timezone"]}} -{"name": "get_weather", "description": "Get the weather for a specific date and location", "parameters": {"type": "object", "properties": {"location": {"type": "string", "description": "The city name, e.g. New York, San Francisco"}, "date": {"type": "string", "description": "The date in YYYY-MM-DD format"}}, "required": ["location", "date"]}} +{"name": "get_datetime", "description": "Get the current date and time", "parameters": {"type": "object", "properties": {"timezone": {"type": "string", "description": "The timezone, e.g. Asia/Shanghai, UTC"}}, "required": ["timezone"]}} +{"name": "get_weather", "description": "Get the weather for a specific date and location", "parameters": {"type": "object", "properties": {"location": {"type": "string", "description": "The city name, e.g. Beijing, Hangzhou"}, "date": {"type": "string", "description": "The date in YYYY-MM-DD format"}}, "required": ["location", "date"]}} -<|User|>What's the weather tomorrow in San Francisco and New York?<|Assistant|> +<|User|>明天杭州和北京的天气怎么样?<|Assistant|> <|DSML|function_calls> <|DSML|invoke name="get_datetime"> -<|DSML|parameter name="timezone" string="true">America/New_York +<|DSML|parameter name="timezone" string="true">Asia/Shanghai <|end▁of▁sentence|> -{"current_date": "2024-01-15", "current_time": "14:30:00", "timezone": "America/New_York"} +{"current_date": "2024-01-15", "current_time": "14:30:00", "timezone": "Asia/Shanghai"} <|DSML|function_calls> <|DSML|invoke name="get_weather"> -<|DSML|parameter name="location" string="true">San Francisco +<|DSML|parameter name="location" string="true">Hangzhou <|DSML|parameter name="date" string="true">2024-01-16 <|DSML|invoke name="get_weather"> -<|DSML|parameter name="location" string="true">New York +<|DSML|parameter name="location" string="true">Beijing <|DSML|parameter name="date" string="true">2024-01-16 <|end▁of▁sentence|> -{"location": "San Francisco", "date": "2024-01-16", "temperature_high": "65", "temperature_low": "55", "weather": "partly cloudy", "humidity": "70%"} -{"location": "New York", "date": "2024-01-16", "temperature_high": "28", "temperature_low": "15", "weather": "sunny", "humidity": "35%"} +{"location": "Hangzhou", "date": "2024-01-16", "temperature_high": "12", "temperature_low": "5", "weather": "多云", "humidity": "65%"} +{"location": "Beijing", "date": "2024-01-16", "temperature_high": "-2", "temperature_low": "-8", "weather": "晴", "humidity": "30%"} -Based on the forecast, here's tomorrow's weather (January 16, 2024): +根据查询结果,明天(2024年1月16日)的天气情况如下: -**San Francisco**: -- Weather: Partly cloudy -- High: 65°F -- Low: 55°F -- Humidity: 70% +**杭州**: +- 天气:多云 +- 最高温度:12°C +- 最低温度:5°C +- 湿度:65% -**New York**: -- Weather: Sunny -- High: 28°F -- Low: 15°F -- Humidity: 35% +**北京**: +- 天气:晴 +- 最高温度:-2°C +- 最低温度:-8°C +- 湿度:30% -San Francisco will be mild and partly cloudy, while New York will be cold but sunny. If you're in New York, dress warmly!<|end▁of▁sentence|><|User|>Thanks! What about the day after tomorrow?<|Assistant|>User is now asking about the day after tomorrow, which is 2024-01-17. I can query directly since I already know today's date. +杭州明天会比较温暖但有些多云,而北京会很冷但是晴天。建议在北京的朋友要注意保暖!<|end▁of▁sentence|><|User|>谢谢!那后天呢?<|Assistant|>用户现在问后天的天气,后天是2024-01-17,我可以直接查询(因为已知今天日期)🗓️ <|DSML|function_calls> <|DSML|invoke name="get_weather"> -<|DSML|parameter name="location" string="true">San Francisco +<|DSML|parameter name="location" string="true">Hangzhou <|DSML|parameter name="date" string="true">2024-01-17 <|DSML|invoke name="get_weather"> -<|DSML|parameter name="location" string="true">New York +<|DSML|parameter name="location" string="true">Beijing <|DSML|parameter name="date" string="true">2024-01-17 <|end▁of▁sentence|> -{"location": "San Francisco", "date": "2024-01-17", "temperature_high": "68", "temperature_low": "58", "weather": "light rain", "humidity": "85%"} -{"location": "New York", "date": "2024-01-17", "temperature_high": "32", "temperature_low": "22", "weather": "cloudy", "humidity": "50%"} +{"location": "Hangzhou", "date": "2024-01-17", "temperature_high": "15", "temperature_low": "8", "weather": "小雨", "humidity": "80%"} +{"location": "Beijing", "date": "2024-01-17", "temperature_high": "0", "temperature_low": "-6", "weather": "多云", "humidity": "45%"} -Got the weather data for the day after tomorrow. Let me format this for the user.Here's the forecast for the day after tomorrow (January 17, 2024): +获取到后天的天气数据,整理回复给用户📝后天(2024年1月17日)的天气情况: -**San Francisco**: -- Weather: Light rain -- High: 68°F -- Low: 58°F -- Humidity: 85% +**杭州**: +- 天气:小雨 +- 最高温度:15°C +- 最低温度:8°C +- 湿度:80% -**New York**: -- Weather: Cloudy -- High: 32°F -- Low: 22°F -- Humidity: 50% +**北京**: +- 天气:多云 +- 最高温度:0°C +- 最低温度:-6°C +- 湿度:45% -San Francisco will have light rain with temperatures rising slightly, so bring an umbrella. New York will be a bit warmer but still cold and cloudy, so continue to bundle up.<|end▁of▁sentence|> +杭州后天会有小雨,温度略有回升,记得带伞。北京会稍微暖和一点,但依然很冷,请继续做好保暖措施。<|end▁of▁sentence|> diff --git a/lib/llm/tests/data/deepseek-v3.2/test_output_search_w_date.txt b/lib/llm/tests/data/deepseek-v3.2/test_output_search_w_date.txt index 6c5c7519f0..6349a4bb20 100644 --- a/lib/llm/tests/data/deepseek-v3.2/test_output_search_w_date.txt +++ b/lib/llm/tests/data/deepseek-v3.2/test_output_search_w_date.txt @@ -60,82 +60,82 @@ Here are the functions available in JSONSchema format: [0] search agent benchmark 2024 (Search_Results/search agent benchmark 2024) **viewing lines [0 - 39] of 39** -L0: +L0: L1: URL: Search_Results/search agent benchmark 2024 L2: # Search Results -L3: +L3: L4: * 【0†HSCodeComp: A Realistic and Expert-level Benchmark for ...; publish_date: -L5: none†arxiv.org】 Oct 22, 2025 — To fill this gap, we introduce HSCodeComp, the -L6: first realistic, expert-level e-commerce benchmark designed to evaluate deep +L5: none†arxiv.org】 Oct 22, 2025 — To fill this gap, we introduce HSCodeComp, the +L6: first realistic, expert-level e-commerce benchmark designed to evaluate deep L7: search agents in ... -L8: * 【1†open-compass/GTA - A Benchmark for General Tool Agents; publish_date: +L8: * 【1†open-compass/GTA - A Benchmark for General Tool Agents; publish_date: L9: none†github.com】 GTA is a benchmark to evaluate the tool-use capability of LLM- L10: based agents in real-world scenarios. It features three main aspects. -L11: * 【2†Benchmarking real-time trust scoring across five AI Agent ...; +L11: * 【2†Benchmarking real-time trust scoring across five AI Agent ...; L12: publish_date: none†cleanlab.ai】 Aug 20, 2025 — This article evaluates 5 AI Agent L13: architectures over the BOLAA (ICLR 2024) benchmark, and assesses the effects of L14: adding automated trust ... -L15: * 【3†10 AI agent benchmarks; publish_date: none†www.evidentlyai.com】 Jul 11, -L16: 2025 — We put together 10 AI agent benchmarks designed to assess how well +L15: * 【3†10 AI agent benchmarks; publish_date: none†www.evidentlyai.com】 Jul 11, +L16: 2025 — We put together 10 AI agent benchmarks designed to assess how well L17: different LLMs perform as agents in real-world scenarios, ... -L18: * 【4†A state-of-the-art search API purpose-built for agents; publish_date: -L19: none†parallel.ai】 Jul 31, 2025 — To evaluate real-world performance of the -L20: Parallel Search MCP Server, we created the WISER-Search benchmark which blends +L18: * 【4†A state-of-the-art search API purpose-built for agents; publish_date: +L19: none†parallel.ai】 Jul 31, 2025 — To evaluate real-world performance of the +L20: Parallel Search MCP Server, we created the WISER-Search benchmark which blends L21: WISER-Fresh (queries ... -L22: * 【5†AI Agent Benchmarks are Broken; publish_date: none†medium.com】 We break -L23: down the failure modes in current AI agent benchmarks and introduce a checklist +L22: * 【5†AI Agent Benchmarks are Broken; publish_date: none†medium.com】 We break +L23: down the failure modes in current AI agent benchmarks and introduce a checklist L24: that minimizes the gamability of AI agent benchmarks. -L25: * 【6†Benchmarks and Tree Search for Multimodal LLM Web Agents; publish_date: -L26: none†dpfried.github.io】 2024, When is Tree Search Useful? ○ Dealing with -L27: destructive actions. ○ Some things on the web are very difficult to undo, e.g., +L25: * 【6†Benchmarks and Tree Search for Multimodal LLM Web Agents; publish_date: +L26: none†dpfried.github.io】 2024, When is Tree Search Useful? ○ Dealing with +L27: destructive actions. ○ Some things on the web are very difficult to undo, e.g., L28: ordering an item. 56. -L29: * 【7†-Bench: Benchmarking AI agents for the real-world; publish_date: -L30: none†sierra.ai】 Jun 20, 2024 — τ-bench measures an agent's ability to interact +L29: * 【7†-Bench: Benchmarking AI agents for the real-world; publish_date: +L30: none†sierra.ai】 Jun 20, 2024 — τ-bench measures an agent's ability to interact L31: with (simulated) human users and programmatic APIs while following domain- L32: specific policies in a consistent ... L33: * 【8†Browser Use = state of the art Web Agent; publish_date: none†browser- L34: use.com】 Dec 15, 2024 — Browser Use has achieved state-of-the-art performance on -L35: the WebVoyager benchmark, with an impressive 89.1% success rate across 586 +L35: the WebVoyager benchmark, with an impressive 89.1% success rate across 586 L36: diverse web tasks. -L37: * 【9†FutureSearch Benchmarks; publish_date: none†evals.futuresearch.ai】 Find -L38: the original source of a given claim. Example: From , more than 8 out of 1000 +L37: * 【9†FutureSearch Benchmarks; publish_date: none†evals.futuresearch.ai】 Find +L38: the original source of a given claim. Example: From , more than 8 out of 1000 L39: users clicked on a phishing link monthly in 2024, up 190% vs 2023. [1] 搜索智能体 基准测试 (Search_Results/搜索智能体 基准测试) **viewing lines [0 - 33] of 33** -L0: +L0: L1: URL: Search_Results/搜索智能体 基准测试 L2: # Search Results -L3: -L4: * 【0†WideSearch:揭示AI 智能体缺失的「广度」能力; publish_date: none†zhuanlan.zhihu.com】 Aug -L5: 16, 2025 — 为系统评估智能体在该任务上的能力,论文构建了第一个专门的基准测试 WideSearch ,包含200 个源于真实世界、横跨18 +L3: +L4: * 【0†WideSearch:揭示AI 智能体缺失的「广度」能力; publish_date: none†zhuanlan.zhihu.com】 Aug +L5: 16, 2025 — 为系统评估智能体在该任务上的能力,论文构建了第一个专门的基准测试 WideSearch ,包含200 个源于真实世界、横跨18 L6: 个领域的高质量任务。 通过对超过10 个 ... L7: * 【1†GAIA: 一个严苛的智能体基准- HuggingFace; publish_date: none†www.cnblogs.com】 Jul 9, -L8: 2024 — 我们使用一个用库构建的代码智能体 在GAIA 基准上进行测试,这可以说是最困难、最全面的智能体基准测试……最终我们取得了第一名的成绩! +L8: 2024 — 我们使用一个用库构建的代码智能体 在GAIA 基准上进行测试,这可以说是最困难、最全面的智能体基准测试……最终我们取得了第一名的成绩! L9: GAIA: 一个严苛的 ... -L10: * 【2†AI搜索智能体遭遇新挑战:滑铁卢大学团队提出更公平透明的 ...; publish_date: none†www.techwalker.com】 -L11: Aug 14, 2025 — +L10: * 【2†AI搜索智能体遭遇新挑战:滑铁卢大学团队提出更公平透明的 ...; publish_date: none†www.techwalker.com】 +L11: Aug 14, 2025 — L12: 目前评测AI搜索智能体主要依靠BrowseComp这样的基准测试,它就像一场实时的开卷考试,让AI在真实的网络环境中搜索信息来回答复杂问题。听起来很合理 ... -L13: * 【3†Agentic AI基础设施实践经验系列(六):Agent质量评估 - AWS; publish_date: -L14: none†aws.amazon.com】 Sep 19, 2025 — TAU-bench +L13: * 【3†Agentic AI基础设施实践经验系列(六):Agent质量评估 - AWS; publish_date: +L14: none†aws.amazon.com】 Sep 19, 2025 — TAU-bench L15: 是一个评估AI智能体在真实世界环境中可靠性的基准测试。它评估智能体是否能够在动态的多轮对话中与用户进行交互,理解需求并完成任务。T-bench ... -L16: * 【4†DeepAgent:能自己找工具的通用推理智能体 - 高瓴人工智能学院; publish_date: none†ai.ruc.edu.cn】 -L17: Nov 6, 2025 — 在八大基准测试中,DeepAgent在绝大多数任务上全面领先所有基线模型。 +L16: * 【4†DeepAgent:能自己找工具的通用推理智能体 - 高瓴人工智能学院; publish_date: none†ai.ruc.edu.cn】 +L17: Nov 6, 2025 — 在八大基准测试中,DeepAgent在绝大多数任务上全面领先所有基线模型。 L18: 开放环境优势:在最具挑战的“开放工具检索”场景下(如ToolBench),其成功率达到64%,远 ... -L19: * 【5†BrowseComp:为浏览智能体设计的简单而具挑战性的基准测试; publish_date: none†blog.csdn.net】 Sep -L20: 22, 2025 — 该基准测试由OpenAI团队开发,旨在推动更可信赖和可靠的AI代理研究。 核心特点. +L19: * 【5†BrowseComp:为浏览智能体设计的简单而具挑战性的基准测试; publish_date: none†blog.csdn.net】 Sep +L20: 22, 2025 — 该基准测试由OpenAI团队开发,旨在推动更可信赖和可靠的AI代理研究。 核心特点. L21: 挑战性问题设计:BrowseComp的问题设计遵循严格的难度标准:. 人类创建者确保 ... -L22: * 【6†什么是GAIA?; publish_date: none†huggingface.co】 什么是GAIA? GAIA +L22: * 【6†什么是GAIA?; publish_date: none†huggingface.co】 什么是GAIA? GAIA L23: 是一个用于评估AI助手在需要核心能力组合的真实世界任务上的表现的基准,这些核心能力包括推理、多模态理解、网页浏览和熟练的工具使用。 -L24: * 【7†SuperCLUE-Agent: Agent智能体中文原生任务能力测评基准; publish_date: -L25: none†www.cluebenchmarks.com】 AI -L26: Agent是一个能够自主理解、规划决策和执行复杂任务的智能体,有代表性的Agent比如AutoGPT、AgentGPT等。 +L24: * 【7†SuperCLUE-Agent: Agent智能体中文原生任务能力测评基准; publish_date: +L25: none†www.cluebenchmarks.com】 AI +L26: Agent是一个能够自主理解、规划决策和执行复杂任务的智能体,有代表性的Agent比如AutoGPT、AgentGPT等。 L27: 现有关于Agent能力的测评,主要是在英文场景或任务的测评。目前还 ... -L28: * 【8†Gaia2 与ARE:赋能社区的智能体评测; publish_date: none†hub.baai.ac.cn】 Sep 24, 2025 — +L28: * 【8†Gaia2 与ARE:赋能社区的智能体评测; publish_date: none†hub.baai.ac.cn】 Sep 24, 2025 — L29: 快速评估任意智能体:基于真实或模拟数据,测试不同规则、工具、内容和验证方式下的表现 · 测试智能体的 工具调用与编排能力:可结合本地应用或MCP 工具 · 生成 L30: ... -L31: * 【9†许多AI 智能体评测基准并不可靠; publish_date: none†my.oschina.net】 Aug 13, 2025 — -L32: 当前AI智能体基准测试存在严重缺陷,亟需更严谨的评估框架。作者提出AI智能体基准测试核查清单(ABC),包含43个项目,旨在识别基准测试潜在陷阱,构建可靠评估 +L31: * 【9†许多AI 智能体评测基准并不可靠; publish_date: none†my.oschina.net】 Aug 13, 2025 — +L32: 当前AI智能体基准测试存在严重缺陷,亟需更严谨的评估框架。作者提出AI智能体基准测试核查清单(ABC),包含43个项目,旨在识别基准测试潜在陷阱,构建可靠评估 L33: ... @@ -164,298 +164,298 @@ L33: ... [2] (https://ar5iv.org/html/2510.19631v1) **viewing lines [0 - 119] of 983** -L0: +L0: L1: URL: https://ar5iv.org/html/2510.19631v1 -L2: # HSCodeComp: A Realistic and Expert-level Benchmark for Deep Search Agents in +L2: # HSCodeComp: A Realistic and Expert-level Benchmark for Deep Search Agents in L3: Hierarchical Rule Application -L4: -L5: Yiqian Yang† Tian Lan† Qianghuai Jia∗ Li Zhu Hui Jiang Hang Zhu Longyue Wang +L4: +L5: Yiqian Yang† Tian Lan† Qianghuai Jia∗ Li Zhu Hui Jiang Hang Zhu Longyue Wang L6: Weihua Luo Kaifu Zhang -L7: -L8: Alibaba International Digital Commerce∗* Corresponding Author: Qianghuai Jia +L7: +L8: Alibaba International Digital Commerce∗* Corresponding Author: Qianghuai Jia L9: (qianghuai.jqh@alibaba-inc.com) L10: †\dagger Equal Contribution: Yiqian Yang -L11: +L11: L12: Tian Lan -L13: +L13: L14: ###### Abstract -L15: +L15: L16: Abstract -L17: +L17: L18: Effective deep search agents must not only access open-domain and domain- -L19: specific knowledge but also apply complex rules—such as legal clauses, medical -L20: manuals and tariff rules. These rules often feature vague boundaries and +L19: specific knowledge but also apply complex rules—such as legal clauses, medical +L20: manuals and tariff rules. These rules often feature vague boundaries and L21: implicit logic relationships, making precise application challenging for agents. -L22: However, this critical capability is largely overlooked by current agent -L23: benchmarks. To fill this gap, we introduce HSCodeComp, the first realistic, -L24: expert-level e-commerce benchmark designed to evaluate deep search agents in -L25: hierarchical rule application. In this task, the deep reasoning process of -L26: agents is guided by these rules to predict 10-digit Harmonized System Code -L27: (HSCode) of products with noisy but realistic descriptions. These codes, +L22: However, this critical capability is largely overlooked by current agent +L23: benchmarks. To fill this gap, we introduce HSCodeComp, the first realistic, +L24: expert-level e-commerce benchmark designed to evaluate deep search agents in +L25: hierarchical rule application. In this task, the deep reasoning process of +L26: agents is guided by these rules to predict 10-digit Harmonized System Code +L27: (HSCode) of products with noisy but realistic descriptions. These codes, L28: established by the World Customs Organization, are vital for global supply chain -L29: efficiency. Built from real-world data collected from large-scale e-commerce -L30: platforms, our proposed HSCodeComp comprises 632 product entries spanning -L31: diverse product categories, with these HSCodes annotated by several human +L29: efficiency. Built from real-world data collected from large-scale e-commerce +L30: platforms, our proposed HSCodeComp comprises 632 product entries spanning +L31: diverse product categories, with these HSCodes annotated by several human L32: experts. Extensive experimental results on several state-of-the-art LLMs, open- -L33: source, and closed-source agents reveal a huge performance gap: best agent -L34: achieves only 46.8% 10-digit accuracy, far below human experts at 95.0%. -L35: Besides, detailed analysis demonstrates the challenges of hierarchical rule +L33: source, and closed-source agents reveal a huge performance gap: best agent +L34: achieves only 46.8% 10-digit accuracy, far below human experts at 95.0%. +L35: Besides, detailed analysis demonstrates the challenges of hierarchical rule L36: application, and test-time scaling fails to improve performance further. -L37: +L37: L38: ## 1 Introduction -L39: +L39: L40: Deep search agents have demonstrated significant value in solving complex real- -L41: world problems, where robust external knowledge utilization constitutes a -L42: critical capability [Wu et al., 2025, Tao et al., 2025, Li et al., 2025b]. To +L41: world problems, where robust external knowledge utilization constitutes a +L42: critical capability [Wu et al., 2025, Tao et al., 2025, Li et al., 2025b]. To L43: evaluate this capability, numerous established benchmarks are proposed to assess -L44: agents in utilizing open-domain data (e.g., GAIA [Mialon et al., 2023b] and -L45: BrowseComp [Wei et al., 2025]) and domain-specific data (e.g., WebMall [Peeters -L46: et al., 2025a], FinSearchComp [Hu et al., 2025a] and MedBrowseComp [Yu et al., +L44: agents in utilizing open-domain data (e.g., GAIA [Mialon et al., 2023b] and +L45: BrowseComp [Wei et al., 2025]) and domain-specific data (e.g., WebMall [Peeters +L46: et al., 2025a], FinSearchComp [Hu et al., 2025a] and MedBrowseComp [Yu et al., L47: 2025b]). -L48: -L49: Beyond open-domain and domain-specific data, agents also need to effectively -L50: apply rules that encode human expert knowledge, particularly in scenarios like -L51: law, medical and e-commerce [Li et al., 2025a, Chen et al., 2025b, Yao et al., -L52: 2022, Chollet et al., 2025]. For instance, legal case adjudication require -L53: interpreting abstract legal provisions, and accurate e-commerce product -L54: classification in depends on tariff rules [Grainger, 2024]. Previous works have -L55: defined rule application as using specific logical rules with supporting facts -L56: to derive conclusions [Wang et al., 2024, Servantez et al., 2024]. In contrast, -L57: we define it as a core capability for deep search agents, where human-written -L58: rules are systematically applied to guide complex reasoning and decision-making -L59: [Sadowski and Chudziak, 2025]. Building on this observation, we categorize -L60: knowledge data for deep search agents into three levels (Figure 1, left), with -L61: increasing knowledge complexity: (1) Level 1: Open-domain Data - Tests -L62: understanding and deep reasoning abilities of agents on long-form web content. -L63: Established benchmarks include GAIA [Mialon et al., 2023b] and BrowseComp [Wei -L64: et al., 2025]; (2) Level 2: Structured Data - Assesses agents to precisely -L65: utilize structured data such as databases and knowledge graphs, as seen in -L66: domain-specific benchmarks like WebMall [Peeters et al., 2025a], MedBrowseComp -L67: [Chen et al., 2025b] and FinSearchComp [Hu et al., 2025a]; (3) Level 3: Rule -L68: Data - Evaluates agents to apply complex and abstract rules [Chollet et al., -L69: 2025]. This level presents two key challenges: (a) making accurate decisions -L70: when rules contain vague natural language descriptions [Sadowski and Chudziak, -L71: 2025]; and (b) reasoning about logical dependencies among rules, such as -L72: exception clauses and cross-category relationships [Guha et al., 2023]. Despite -L73: the importance of rule application in real-world scenarios, current agent +L48: +L49: Beyond open-domain and domain-specific data, agents also need to effectively +L50: apply rules that encode human expert knowledge, particularly in scenarios like +L51: law, medical and e-commerce [Li et al., 2025a, Chen et al., 2025b, Yao et al., +L52: 2022, Chollet et al., 2025]. For instance, legal case adjudication require +L53: interpreting abstract legal provisions, and accurate e-commerce product +L54: classification in depends on tariff rules [Grainger, 2024]. Previous works have +L55: defined rule application as using specific logical rules with supporting facts +L56: to derive conclusions [Wang et al., 2024, Servantez et al., 2024]. In contrast, +L57: we define it as a core capability for deep search agents, where human-written +L58: rules are systematically applied to guide complex reasoning and decision-making +L59: [Sadowski and Chudziak, 2025]. Building on this observation, we categorize +L60: knowledge data for deep search agents into three levels (Figure 1, left), with +L61: increasing knowledge complexity: (1) Level 1: Open-domain Data - Tests +L62: understanding and deep reasoning abilities of agents on long-form web content. +L63: Established benchmarks include GAIA [Mialon et al., 2023b] and BrowseComp [Wei +L64: et al., 2025]; (2) Level 2: Structured Data - Assesses agents to precisely +L65: utilize structured data such as databases and knowledge graphs, as seen in +L66: domain-specific benchmarks like WebMall [Peeters et al., 2025a], MedBrowseComp +L67: [Chen et al., 2025b] and FinSearchComp [Hu et al., 2025a]; (3) Level 3: Rule +L68: Data - Evaluates agents to apply complex and abstract rules [Chollet et al., +L69: 2025]. This level presents two key challenges: (a) making accurate decisions +L70: when rules contain vague natural language descriptions [Sadowski and Chudziak, +L71: 2025]; and (b) reasoning about logical dependencies among rules, such as +L72: exception clauses and cross-category relationships [Guha et al., 2023]. Despite +L73: the importance of rule application in real-world scenarios, current agent L74: benchmarks largely overlook its evaluation. -L75: -L76: To fill this gap, we introduce HSCodeComp (short for the Harmonized System Code -L77: (HSCode) Competition), the first realistic, expert-level e-commerce benchmark -L78: designed to evaluate agents in predicting complete 10-digit Harmonized System -L79: Code (HSCode) of the product, using hierarchical rules (e.g., eWTP tariff -L80: rules111https://www.ewtp.com/web/smart/hscode). HSCodes organize products -L81: through a hierarchical structure spanning over 5,000 distinct codes across +L75: +L76: To fill this gap, we introduce HSCodeComp (short for the Harmonized System Code +L77: (HSCode) Competition), the first realistic, expert-level e-commerce benchmark +L78: designed to evaluate agents in predicting complete 10-digit Harmonized System +L79: Code (HSCode) of the product, using hierarchical rules (e.g., eWTP tariff +L80: rules111https://www.ewtp.com/web/smart/hscode). HSCodes organize products +L81: through a hierarchical structure spanning over 5,000 distinct codes across L82: multiple classification levels, representing the global standard for classifying -L83: traded international goods, established by the World Customs Organization and -L84: implemented across more than 200 countries for customs clearance and tariff -L85: determination [Grainger, 2024, Nath et al., 2025]. Built from the data of the -L86: large-scale e-commerce platforms, our proposed HSCodeComp comprises 632 -L87: carefully curated product entries, encompassing 27 unique HS chapters and 32 +L83: traded international goods, established by the World Customs Organization and +L84: implemented across more than 200 countries for customs clearance and tariff +L85: determination [Grainger, 2024, Nath et al., 2025]. Built from the data of the +L86: large-scale e-commerce platforms, our proposed HSCodeComp comprises 632 +L87: carefully curated product entries, encompassing 27 unique HS chapters and 32 L88: distinct first-level categories. These HSCodes have been rigorously annotated by -L89: multiple e-commerce domain experts, ensuring that HSCodeComp is expert-level. +L89: multiple e-commerce domain experts, ensuring that HSCodeComp is expert-level. L90: Accurately predicting the exact 10-digit HSCode presents significant challenges: -L91: agents must perform multi-hop hierarchical reasoning with complex tariff rules -L92: while processing noisy but realistic product descriptions that often contain +L91: agents must perform multi-hop hierarchical reasoning with complex tariff rules +L92: while processing noisy but realistic product descriptions that often contain L93: abbreviations, language variations, or incomplete information. -L94: -L95: Extensive experiments on the state-of-the-art baselines, including 14 advanced -L96: foundation models, 6 advanced open-source agent systems and 3 closed-source -L97: agent systems, demonstrate that HSCode prediction task remains a substantial -L98: challenge for current AI approaches. As shown in the Figure 1 (right), even the -L99: best-performing system (SmolAgent [Roucher et al., 2025] with GPT-5) achieves -L100: only 46.8% accuracy, substantially below the 95.0% accuracy attained by human -L101: experts. Further detailed analysis reveals that existing agent systems lack -L102: critical capabilities required for this complex hierarchical rule applications. -L103: Notably, test-time scaling approach—which has proven effective in other +L94: +L95: Extensive experiments on the state-of-the-art baselines, including 14 advanced +L96: foundation models, 6 advanced open-source agent systems and 3 closed-source +L97: agent systems, demonstrate that HSCode prediction task remains a substantial +L98: challenge for current AI approaches. As shown in the Figure 1 (right), even the +L99: best-performing system (SmolAgent [Roucher et al., 2025] with GPT-5) achieves +L100: only 46.8% accuracy, substantially below the 95.0% accuracy attained by human +L101: experts. Further detailed analysis reveals that existing agent systems lack +L102: critical capabilities required for this complex hierarchical rule applications. +L103: Notably, test-time scaling approach—which has proven effective in other L104: reasoning tasks [Guo et al., 2025, Liu et al., 2025]—fail to improve performance -L105: on HSCodeComp. These observations demonstrate the challenging nature of our -L106: proposed HSCodeComp, highlighting the need for more effective designs of agent -L107: systems. To facilitate future research, we will publicly release codes and the +L105: on HSCodeComp. These observations demonstrate the challenging nature of our +L106: proposed HSCodeComp, highlighting the need for more effective designs of agent +L107: systems. To facilitate future research, we will publicly release codes and the L108: benchmark dataset of HSCodeComp. -L109: +L109: L110: ## 2 Related Works -L111: +L111: L112: ### 2.1 Previous Works in HSCode Prediction -L113: -L114: Previous works treat HSCode prediction as the e-commerce text classification +L113: +L114: Previous works treat HSCode prediction as the e-commerce text classification L115: task [Grainger, 2024], using pre-trained BERT models [Liao et al., 2024, Shubham -L116: et al., 2022] or Large Language Models (LLMs) prompting [Hussain and Ahmed, -L117: 2023]. However, these approaches fail to leverage domain-specific knowledge, -L118: especially the rules written by human experts [Hussain and Ahmed, 2023, Judy, +L116: et al., 2022] or Large Language Models (LLMs) prompting [Hussain and Ahmed, +L117: 2023]. However, these approaches fail to leverage domain-specific knowledge, +L118: especially the rules written by human experts [Hussain and Ahmed, 2023, Judy, L119: 2024]. Besides, existing HSCode benchmarks face two critical limitations [Judy, Error fetching URL `https://github.com/open-compass/GTA` Error fetching URL `https://zhuanlan.zhihu.com/p/1939316761775301093` [3] (https://blog.csdn.net/qq_41472205/article/details/151974603) **viewing lines [0 - 167] of 187** -L0: +L0: L1: URL: https://blog.csdn.net/qq_41472205/article/details/151974603 L2: # BrowseComp:为浏览智能体设计的简单而具挑战性的基准测试 -L3: +L3: L4: BrowseComp:AI浏览能力评估基准 -L5: +L5: L6: 最新推荐文章于 2025-11-12 13:40:20 发布 -L7: +L7: L8: 原创 于 2025-09-22 22:33:04 发布 · 1.3k 阅读 -L9: +L9: L10: · 9 -L11: · 25 · +L11: · 25 · L12: CC 4.0 BY-SA版权 -L13: +L13: L14: 版权声明:本文为博主原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。 -L15: +L15: L16: ## BrowseComp:为浏览智能体设计的简单而具挑战性的基准测试 -L17: +L17: L18: 在人工智能从基础聊天机器人向推理器和智能体发展的进程中,具备浏览互联网能力的人工智能模型正变得越来越重要。今天,我们将介绍一个名为BrowseComp的创新基准 L19: 测试,它专门设计用于评估AI代理在复杂网络浏览任务中的能力。 -L20: +L20: L21: ### 什么是BrowseComp? -L22: +L22: L23: BrowseComp(全称Browsing Competition)是一个包含1,266个挑战性问题的基准测试集,专门用于衡量AI代理在互联网上持续导航、寻找难 L24: 以找到的纠缠信息的能力。该基准测试由OpenAI团队开发,旨在推动更可信赖和可靠的AI代理研究。 -L25: +L25: L26: #### 核心特点 -L27: +L27: L28: 挑战性问题设计:BrowseComp的问题设计遵循严格的难度标准: -L29: +L29: L30: - 人类创建者确保问题在10分钟内无法被人解决 L31: - 现有模型(包括带浏览功能的ChatGPT和早期版本的OpenAI Deep Research)无法解决 L32: - 通过5次简单Google搜索无法在结果首页找到答案 -L33: +L33: L34: 简单易验证:尽管问题极具挑战性,但答案形式简单——都是短字符串,便于自动验证模型输出的正确性。 -L35: +L35: L36: ### 为什么需要BrowseComp? -L37: +L37: L38: #### 现有基准的局限性 -L39: +L39: L40: 传统的信息检索基准(如TriviaQA、HotpotQA等)主要关注易于查找的信息,随着语言模型的进步,这些基准已经趋于饱和。而BrowseComp专注于那些需 L41: 要浏览大量网站才能解决的"硬核"问题。 -L42: +L42: L43: #### 模拟真实挑战 -L44: +L44: L45: BrowseComp问题通常采用"逆向设计"方法:创建者从一个已知事实出发,构建一个搜索空间巨大但验证简单的问题。例如: -L46: +L46: L47: “找出2018-2023年间在EMNLP会议上发表、第一作者本科毕业于达特茅斯学院、第四作者本科毕业于宾夕法尼亚大学的科学论文标题” -L48: +L48: L49: 这类问题验证简单,但解决起来需要检查数千篇论文并调查每位作者的背景。 -L50: +L50: L51: ### 数据集特点 -L52: +L52: L53: #### 主题多样性 -L54: +L54: L55: BrowseComp涵盖了广泛的主题领域(如图2所示),包括历史、科学、文化等。创建者被鼓励基于个人兴趣设计问题,这有助于提高数据质量和参与度。 -L56: +L56: L57: #### 质量保证 -L58: +L58: L59: 为确保答案的唯一性,创建者需要: -L60: +L60: L61: - 对问题内容有足够了解,确信没有其他有效答案 L62: - 如果不确定,则添加更多约束条件 L63: - 接受其他创建者的验证反馈 -L64: +L64: L65: ### 人类表现基准 -L66: +L66: L67: 为了衡量BrowseComp的难度,研究人员让人类创建者尝试解决问题(不能解答自己创建的问题)。结果显示: -L68: +L68: L69: - **70.8%**的问题在2小时搜索后人类选择放弃 L70: - **29.2%**的问题被成功解决 L71: - 在解决的问题中,**86.4%**的人类答案与参考答案一致 -L72: +L72: L73: 这表明BrowseComp确实极具挑战性,即使是熟悉数据集的人类专家也难以在有限时间内解决大部分问题。 -L74: +L74: L75: ### AI模型表现评估 -L76: +L76: L77: #### 各模型对比 -L78: +L78: L79: 研究人员评估了多种模型在BrowseComp上的表现: -L80: -L81: 模型 | 准确率(%) | 校准误差(%) +L80: +L81: 模型 | 准确率(%) | 校准误差(%) L82: ---|---|--- -L83: GPT-4o | 0.6 | 69 -L84: GPT-4o(带浏览) | 1.9 | 82 -L85: GPT-4.5 | 0.9 | 68 -L86: OpenAI o1 | 9.9 | 65 -L87: Deep Research | 51.5 | 91 -L88: +L83: GPT-4o | 0.6 | 69 +L84: GPT-4o(带浏览) | 1.9 | 82 +L85: GPT-4.5 | 0.9 | 68 +L86: OpenAI o1 | 9.9 | 65 +L87: Deep Research | 51.5 | 91 +L88: L89: #### 关键发现 -L90: +L90: L91: - 基础模型表现不佳:GPT-4o和GPT-4.5准确率接近零,凸显了基准的难度 L92: - 浏览功能带来有限提升:启用浏览功能的GPT-4o准确率略有提高,但仍很低 L93: - 推理能力的重要性:OpenAI o1虽然没有浏览能力,但凭借更强的推理能力获得较高准确率 L94: - 专业模型的优势:专门为持久网络浏览训练的Deep Research模型解决了约一半的问题 -L95: +L95: L96: #### 计算资源与性能关系 -L97: +L97: L98: 研究表明,BrowseComp性能随测试时计算资源的增加而平滑提升(如图1所示)。这与智能体模型的特性一致——更多计算资源允许模型浏览更多网站,从而提高找到正确 L99: 答案的机会。 -L100: +L100: L101: ### 进阶策略分析 -L102: +L102: L103: #### 聚合策略的效果 -L104: +L104: L105: 通过让模型多次尝试同一问题并采用投票策略,可以显著提升性能: -L106: +L106: L107: - 多数投票:选择样本中最常见的答案 L108: - 加权投票:根据模型置信度加权投票 L109: - 最佳选择:选择置信度最高的答案 -L110: +L110: L111: 这些方法将Deep Research的性能提升了15-25%,表明模型通常能够识别自己的正确答案。 -L112: +L112: L113: #### 任务难度分布 -L114: +L114: L115: 分析显示,BrowseComp中的任务难度分布广泛: -L116: +L116: L117: - 16%的任务被Deep Research完美解决(100%通过率) L118: - 14%的任务完全失败(0%通过率) L119: - 其余任务处于中间难度水平 -L120: +L120: L121: ### BrowseComp的意义与局限性 -L122: +L122: L123: #### 作为评估工具的价值 -L124: +L124: L125: BrowseComp可被视为浏览智能体的"编程竞赛"——虽然不全面,但对核心浏览能力提供了有用的衡量。正如在编程竞赛中表现出色的模型很可能在其他编码任务中表现良 L126: 好,能够解决BrowseComp的模型在定位难以查找信息方面应该具有很强的能力。 -L127: +L127: L128: #### 已知局限性 -L129: +L129: L130: - 不涉及真实用户查询分布:如生成长回答或解决查询歧义等挑战 L131: - 答案唯一性假设:虽然尽力确保,但不能完全保证没有其他有效答案 L132: - 单模态限制:当前仅关注文本信息检索,未来可扩展至图像、视频等模态 -L133: +L133: L134: ### 结论与展望 -L135: +L135: L136: BrowseComp为评估AI代理的网络浏览能力提供了一个具有挑战性且易于使用的基准。它强调的持久性、创造性和战略性浏览能力,正是未来AI智能体在实际应用中所需 L137: 的核心技能。 -L138: +L138: L139: 随着更多研究团队在BrowseComp上测试他们的模型,我们期待看到浏览智能体技术的快速进步,最终使AI能够更可靠地帮助我们获取那些深藏在互联网角落的宝贵信息。 -L140: +L140: L141: 访问地址:BrowseComp已在GitHub上开源,地址为 https://github.com/openai/simple-evals -L142: +L142: L143: 添加红包 -L144: +L144: L145: 请填写红包祝福语或标题 -L146: +L146: L147: 个 -L148: +L148: L149: 红包个数最小为10个 -L150: +L150: L151: 元 -L152: +L152: L153: 红包金额最低5元 -L154: +L154: L155: 当前余额3.43元 前往充值 > -L156: +L156: L157: 需支付:10.00元 -L158: +L158: L159: 成就一亿技术人! -L160: +L160: L161: hope_wisdom -L162: 发出的红包 +L162: 发出的红包 L163: 实付元 -L164: +L164: L165: 点击重新获取 -L166: +L166: L167: 扫码支付 @@ -568,863 +568,863 @@ L167: 扫码支付 [4] WebVoyager benchmark (Search_Results/WebVoyager benchmark) **viewing lines [0 - 40] of 40** -L0: +L0: L1: URL: Search_Results/WebVoyager benchmark L2: # Search Results -L3: -L4: * 【0†WebVoyager Benchmark; publish_date: none†www.trykura.com】 Performance -L5: measured across various websites in WebVoyager shows Kura consistently +L3: +L4: * 【0†WebVoyager Benchmark; publish_date: none†www.trykura.com】 Performance +L5: measured across various websites in WebVoyager shows Kura consistently L6: outperforming other agents, with particularly strong results on e-commerce ... -L7: * 【1†WebVoyager: Building an End-to-End Web Agent with ...; publish_date: -L8: none†github.com】 WebVoyager is an innovative Large Multimodal Model (LMM) -L9: powered web agent that can complete user instructions end-to-end by interacting +L7: * 【1†WebVoyager: Building an End-to-End Web Agent with ...; publish_date: +L8: none†github.com】 WebVoyager is an innovative Large Multimodal Model (LMM) +L9: powered web agent that can complete user instructions end-to-end by interacting L10: with real-world ... -L11: * 【2†AI Browser Agent Leaderboard | Steel.dev; publish_date: -L12: none†leaderboard.steel.dev】 See how various AI browser agents stack up based on +L11: * 【2†AI Browser Agent Leaderboard | Steel.dev; publish_date: +L12: none†leaderboard.steel.dev】 See how various AI browser agents stack up based on L13: their accuracy in completing web-based tasks on the WebVoyager benchmark. -L14: * 【3†[2401.13919] WebVoyager: Building an End-to-End Web ...; publish_date: +L14: * 【3†[2401.13919] WebVoyager: Building an End-to-End Web ...; publish_date: L15: none†arxiv.org】 by H He · 2024 · Cited by 282 — We show that WebVoyager achieves -L16: a 59.1% task success rate on our benchmark, significantly surpassing the +L16: a 59.1% task success rate on our benchmark, significantly surpassing the L17: performance of both GPT-4 (All ... -L18: * 【4†Our Agent-E SOTA Results on the WebVoyager Benchmark; publish_date: -L19: none†www.emergence.ai】 Jul 11, 2024 — WebVoyager is a benchmark that tests an -L20: agent's capabilities for navigation on dynamic live websites. It is more +L18: * 【4†Our Agent-E SOTA Results on the WebVoyager Benchmark; publish_date: +L19: none†www.emergence.ai】 Jul 11, 2024 — WebVoyager is a benchmark that tests an +L20: agent's capabilities for navigation on dynamic live websites. It is more L21: representative than WebArena [4] ... L22: * 【5†Browser Use = state of the art Web Agent; publish_date: none†browser- L23: use.com】 Dec 15, 2024 — Browser Use has achieved state-of-the-art performance on -L24: the WebVoyager benchmark, with an impressive 89.1% success rate across 586 +L24: the WebVoyager benchmark, with an impressive 89.1% success rate across 586 L25: diverse web tasks. -L26: * 【6†Magnitude achieves SOTA 94% on WebVoyager benchmark; publish_date: -L27: none†github.com】 Magnitude achieves state-of-the-art performance with 93.9% +L26: * 【6†Magnitude achieves SOTA 94% on WebVoyager benchmark; publish_date: +L27: none†github.com】 Magnitude achieves state-of-the-art performance with 93.9% L28: success rate on WebVoyager, beating all other browser agents. -L29: * 【7†WebVoyager: Autonomous Web Agent Benchmark; publish_date: -L30: none†www.emergentmind.com】 3 days ago — WebVoyager Benchmark is a comprehensive -L31: evaluation suite for autonomous web agents, featuring 643 tasks across 15 +L29: * 【7†WebVoyager: Autonomous Web Agent Benchmark; publish_date: +L30: none†www.emergentmind.com】 3 days ago — WebVoyager Benchmark is a comprehensive +L31: evaluation suite for autonomous web agents, featuring 643 tasks across 15 L32: popular websites. -L33: * 【8†WebVoyager Benchmark Results; publish_date: none†www.browserable.ai】 +L33: * 【8†WebVoyager Benchmark Results; publish_date: none†www.browserable.ai】 L34: Browserable has achieved 90.4% on the WebVoyager benchmark. This is best-in- -L35: class performance across all web agents. This was done across 567 web tasks +L35: class performance across all web agents. This was done across 567 web tasks L36: which ... -L37: * 【9†89% achieved on WebVoyager using Anchor + Browser Use; publish_date: +L37: * 【9†89% achieved on WebVoyager using Anchor + Browser Use; publish_date: L38: none†www.reddit.com】 Thanks to the amazing work from the browser-use open-source -L39: community and the built-in support from Anchor Browser, we've hit an 89% score +L39: community and the built-in support from Anchor Browser, we've hit an 89% score L40: on WebVoyager. [5] WebArena benchmark (Search_Results/WebArena benchmark) **viewing lines [0 - 42] of 42** -L0: +L0: L1: URL: Search_Results/WebArena benchmark L2: # Search Results -L3: -L4: * 【0†WebArena: A Realistic Web Environment for Building ...; publish_date: -L5: none†webarena.dev】 Our benchmark is implemented in our fully interactable -L6: highly-realistic WebArena environment. It features diverse tasks human may +L3: +L4: * 【0†WebArena: A Realistic Web Environment for Building ...; publish_date: +L5: none†webarena.dev】 Our benchmark is implemented in our fully interactable +L6: highly-realistic WebArena environment. It features diverse tasks human may L7: encounter in their daily ... L8: * 【1†[2307.13854] WebArena: A Realistic Web Environment for ...; publish_date: -L9: none†arxiv.org】 by S Zhou · 2023 · Cited by 637 — Building upon our -L10: environment, we release a set of benchmark tasks focusing on evaluating the +L9: none†arxiv.org】 by S Zhou · 2023 · Cited by 637 — Building upon our +L10: environment, we release a set of benchmark tasks focusing on evaluating the L11: functional correctness of task completions. -L12: * 【2†WebArena: A Realistic Web Environment for Building ...; publish_date: -L13: none†www.cmu.edu】 WebArena introduces a benchmark on interpreting high-level -L14: realistic natural language command to concrete web-based interactions. We +L12: * 【2†WebArena: A Realistic Web Environment for Building ...; publish_date: +L13: none†www.cmu.edu】 WebArena introduces a benchmark on interpreting high-level +L14: realistic natural language command to concrete web-based interactions. We L15: provide annotated programs ... -L16: * 【3†GitHub - web-arena-x/webarena: Code repo for ...; publish_date: -L17: none†github.com】 [12/20/2024] Check out our new benchmark on even more -L18: consequential tasks, including terminal use and coding, TheAgentCompany. +L16: * 【3†GitHub - web-arena-x/webarena: Code repo for ...; publish_date: +L17: none†github.com】 [12/20/2024] Check out our new benchmark on even more +L18: consequential tasks, including terminal use and coding, TheAgentCompany. L19: [12/21/2023] We release the ... -L20: * 【4†WebArena Benchmark and the State of Agentic AI; publish_date: -L21: none†medium.com】 In short, WebArena established a new standard for realism and -L22: complexity in web agent evaluation, forcing AI agents to operate in dynamic, +L20: * 【4†WebArena Benchmark and the State of Agentic AI; publish_date: +L21: none†medium.com】 In short, WebArena established a new standard for realism and +L22: complexity in web agent evaluation, forcing AI agents to operate in dynamic, L23: high- ... -L24: * 【5†WebArena: A Realistic Web Environment for Building ...; publish_date: -L25: none†huggingface.co】 Jul 25, 2023 — WebArena, a realistic and reproducible -L26: environment, evaluates the performance of autonomous agents performing complex +L24: * 【5†WebArena: A Realistic Web Environment for Building ...; publish_date: +L25: none†huggingface.co】 Jul 25, 2023 — WebArena, a realistic and reproducible +L26: environment, evaluates the performance of autonomous agents performing complex L27: tasks on websites using ... -L28: * 【6†WebArena Benchmark: Evaluating Web Agents; publish_date: +L28: * 【6†WebArena Benchmark: Evaluating Web Agents; publish_date: L29: none†www.emergentmind.com】 Jun 30, 2025 — WebArena Benchmark is a self-contained L30: suite that evaluates autonomous agents on realistic, multi-step web tasks using L31: natural language ... -L32: * 【7†VisualWebArena is a benchmark for multimodal agents.; publish_date: -L33: none†github.com】 VisualWebArena is a realistic and diverse benchmark for -L34: evaluating multimodal autonomous language agents. It comprises of a set of +L32: * 【7†VisualWebArena is a benchmark for multimodal agents.; publish_date: +L33: none†github.com】 VisualWebArena is a realistic and diverse benchmark for +L34: evaluating multimodal autonomous language agents. It comprises of a set of L35: diverse and complex web-based ... -L36: * 【8†WebDev Arena Leaderboard - LMArena; publish_date: none†web.lmarena.ai】 -L37: WebDev Arena is a real-time AI coding competition where models go head-to-head +L36: * 【8†WebDev Arena Leaderboard - LMArena; publish_date: none†web.lmarena.ai】 +L37: WebDev Arena is a real-time AI coding competition where models go head-to-head L38: in web development challenges, developed by LMArena. -L39: * 【9†WebArena: A Realistic Web Environment for Building ...; publish_date: -L40: none†arxiv.org】 Apr 16, 2024 — We use this benchmark to evaluate several agents -L41: that can follow NL command and perform web-based tasks (§4). These agents are +L39: * 【9†WebArena: A Realistic Web Environment for Building ...; publish_date: +L40: none†arxiv.org】 Apr 16, 2024 — We use this benchmark to evaluate several agents +L41: that can follow NL command and perform web-based tasks (§4). These agents are L42: implemented in a ... [6] GAIA benchmark (Search_Results/GAIA benchmark) **viewing lines [0 - 41] of 41** -L0: +L0: L1: URL: Search_Results/GAIA benchmark L2: # Search Results -L3: -L4: * 【0†GAIA Leaderboard - a Hugging Face Space by ...; publish_date: +L3: +L4: * 【0†GAIA Leaderboard - a Hugging Face Space by ...; publish_date: L5: none†huggingface.co】 GAIA is a benchmark which aims at evaluating next- -L6: generation LLMs (LLMs with augmented capabilities due to added tooling, +L6: generation LLMs (LLMs with augmented capabilities due to added tooling, L7: efficient prompting, access to search ... -L8: * 【1†[2311.12983] GAIA: a benchmark for General AI Assistants; publish_date: -L9: none†arxiv.org】 by G Mialon · 2023 · Cited by 367 — GAIA proposes real-world +L8: * 【1†[2311.12983] GAIA: a benchmark for General AI Assistants; publish_date: +L9: none†arxiv.org】 by G Mialon · 2023 · Cited by 367 — GAIA proposes real-world L10: questions that require a set of fundamental abilities such as reasoning, multi- L11: modality handling, web browsing, and generally tool-use ... -L12: * 【2†GAIA benchmark; publish_date: none†huggingface.co】 This is the -L13: organisation page for all things related to GAIA, a benchmark for General AI +L12: * 【2†GAIA benchmark; publish_date: none†huggingface.co】 This is the +L13: organisation page for all things related to GAIA, a benchmark for General AI L14: Assistants. You can find all the information and links on the GAIA ... -L15: * 【3†GAIA: A Benchmark for General AI Assistants; publish_date: -L16: none†ukgovernmentbeis.github.io】 This is an Inspect AI implementation of the +L15: * 【3†GAIA: A Benchmark for General AI Assistants; publish_date: +L16: none†ukgovernmentbeis.github.io】 This is an Inspect AI implementation of the L17: GAIA (General AI Assistants) benchmark, consisting of 450 questions testing tool L18: use on realistic assistant tasks. -L19: * 【4†GAIA: a benchmark for general AI assistants | Research; publish_date: -L20: none†ai.meta.com】 May 6, 2024 — GAIA proposes real-world questions that require -L21: a set of fundamental abilities such as reasoning, multi-modality handling, web +L19: * 【4†GAIA: a benchmark for general AI assistants | Research; publish_date: +L20: none†ai.meta.com】 May 6, 2024 — GAIA proposes real-world questions that require +L21: a set of fundamental abilities such as reasoning, multi-modality handling, web L22: browsing, and generally tool-use ... L23: * 【5†HAL: GAIA Leaderboard; publish_date: none†hal.cs.princeton.edu】 GAIA is a -L24: benchmark for General AI Assistants that requires a set of fundamental -L25: abilities such as reasoning, multi-modality handling, web browsing, and tool- +L24: benchmark for General AI Assistants that requires a set of fundamental +L25: abilities such as reasoning, multi-modality handling, web browsing, and tool- L26: ... -L27: * 【6†GAIA: The LLM Agent Benchmark Everyone's Talking About; publish_date: -L28: none†towardsdatascience.com】 May 29, 2025 — GAIA stands for General AI +L27: * 【6†GAIA: The LLM Agent Benchmark Everyone's Talking About; publish_date: +L28: none†towardsdatascience.com】 May 29, 2025 — GAIA stands for General AI L29: Assistants benchmark [1]. This benchmark was introduced to specifically evaluate L30: LLM agents on their ability to act as general- ... -L31: * 【7†GAIA: a benchmark for General AI Assistants; publish_date: -L32: none†openreview.net】 by G Mialon · Cited by 367 — GAIA proposes real-world +L31: * 【7†GAIA: a benchmark for General AI Assistants; publish_date: +L32: none†openreview.net】 by G Mialon · Cited by 367 — GAIA proposes real-world L33: questions that require a set of fundamental abilities such as reasoning, multi- L34: modality handling, web browsing, and generally tool-use ... -L35: * 【8†Rethinking AI Evaluation: Introducing the GAIA Benchmark; publish_date: -L36: none†medium.com】 The authors introduce GAIA, a benchmark designed to assess the +L35: * 【8†Rethinking AI Evaluation: Introducing the GAIA Benchmark; publish_date: +L36: none†medium.com】 The authors introduce GAIA, a benchmark designed to assess the L37: robustness of AI systems across a variety of practical tasks. -L38: * 【9†H2O.ai Tops the General AI Assistant (GAIA) Test; publish_date: +L38: * 【9†H2O.ai Tops the General AI Assistant (GAIA) Test; publish_date: L39: none†h2o.ai】 Mar 17, 2025 — Our h2oGPTe Agent has once again claimed the #1 spot -L40: on the prestigious GAIA (General AI Assistants) benchmark with an impressive +L40: on the prestigious GAIA (General AI Assistants) benchmark with an impressive L41: 75% accuracy rate. [7] ToolBench benchmark (Search_Results/ToolBench benchmark) **viewing lines [0 - 40] of 40** -L0: +L0: L1: URL: Search_Results/ToolBench benchmark L2: # Search Results -L3: -L4: * 【0†ToolBench, an evaluation suite for LLM tool manipulation ...; -L5: publish_date: none†github.com】 The ToolBench is a benchmark consisting of -L6: diverse software tools for real-world tasks. We also provide easy-to-use +L3: +L4: * 【0†ToolBench, an evaluation suite for LLM tool manipulation ...; +L5: publish_date: none†github.com】 The ToolBench is a benchmark consisting of +L6: diverse software tools for real-world tasks. We also provide easy-to-use L7: infrastructure in this repository. L8: * 【1†OpenBMB/ToolBench; publish_date: none†github.com】 [2023/7/27] New version -L9: ToolBench is released. ✨Here is an overview of the dataset construction, +L9: ToolBench is released. ✨Here is an overview of the dataset construction, L10: training, and evaluation. ✨✨Features:. -L11: * 【2†Towards Stable Large-Scale Benchmarking on Tool ...; publish_date: +L11: * 【2†Towards Stable Large-Scale Benchmarking on Tool ...; publish_date: L12: none†arxiv.org】 by Z Guo · 2024 · Cited by 100 — We introduce StableToolBench, a -L13: benchmark evolving from ToolBench, proposing a virtual API server and stable +L13: benchmark evolving from ToolBench, proposing a virtual API server and stable L14: evaluation system. -L15: * 【3†StableToolBench - Zhicheng Guo; publish_date: none†zhichengg.github.io】 -L16: We introduce StableToolBench, a benchmark evolving from ToolBench, proposing a +L15: * 【3†StableToolBench - Zhicheng Guo; publish_date: none†zhichengg.github.io】 +L16: We introduce StableToolBench, a benchmark evolving from ToolBench, proposing a L17: virtual API server and stable evaluation system. -L18: * 【4†ToolBench | EvalScope - Read the Docs; publish_date: -L19: none†evalscope.readthedocs.io】 We evaluate the effectiveness of the ToolBench -L20: benchmark: ToolBench (Qin et al., 2023b). The task involves integrating API +L18: * 【4†ToolBench | EvalScope - Read the Docs; publish_date: +L19: none†evalscope.readthedocs.io】 We evaluate the effectiveness of the ToolBench +L20: benchmark: ToolBench (Qin et al., 2023b). The task involves integrating API L21: calls to complete tasks. -L22: * 【5†Towards Stable Large-Scale Benchmarking on Tool ...; publish_date: -L23: none†aclanthology.org】 by Z Guo · 2024 · Cited by 100 — We introduce -L24: StableToolBench, a benchmark evolving from ToolBench, proposing a virtual API +L22: * 【5†Towards Stable Large-Scale Benchmarking on Tool ...; publish_date: +L23: none†aclanthology.org】 by Z Guo · 2024 · Cited by 100 — We introduce +L24: StableToolBench, a benchmark evolving from ToolBench, proposing a virtual API L25: server and stable evaluation system. -L26: * 【6†ML-Tool-Bench: Tool-Augmented Planning for ML Tasks; publish_date: -L27: none†openreview.net】 Sep 18, 2025 — In this work, we introduce a comprehensive -L28: benchmark for evaluating tool-augmented ML agents using a curated set of 61 +L26: * 【6†ML-Tool-Bench: Tool-Augmented Planning for ML Tasks; publish_date: +L27: none†openreview.net】 Sep 18, 2025 — In this work, we introduce a comprehensive +L28: benchmark for evaluating tool-augmented ML agents using a curated set of 61 L29: specialized tools and 15 ... -L30: * 【7†-Bench: Benchmarking AI agents for the real-world; publish_date: -L31: none†sierra.ai】 Jun 20, 2024 — τ-bench measures an agent's ability to interact +L30: * 【7†-Bench: Benchmarking AI agents for the real-world; publish_date: +L31: none†sierra.ai】 Jun 20, 2024 — τ-bench measures an agent's ability to interact L32: with (simulated) human users and programmatic APIs while following domain- L33: specific policies in a consistent ... -L34: * 【8†ToolEval Leaderboard; publish_date: none†openbmb.github.io】 ToolEval is +L34: * 【8†ToolEval Leaderboard; publish_date: none†openbmb.github.io】 ToolEval is L35: an automatic evaluator build for tool learning which incorporates two evaluation L36: metrics, Pass Rate and Win Rate(Preference). -L37: * 【9†What is the best benchmark dataset for multi-step tool-use?; -L38: publish_date: none†www.reddit.com】 I'm a newbie trying to evaluate the +L37: * 【9†What is the best benchmark dataset for multi-step tool-use?; +L38: publish_date: none†www.reddit.com】 I'm a newbie trying to evaluate the L39: performance of different prompts strategies for multi-step tool-using, wondering L40: what is the recommended benchmark dataset ... [8] HotpotQA benchmark (Search_Results/HotpotQA benchmark) **viewing lines [0 - 39] of 39** -L0: +L0: L1: URL: Search_Results/HotpotQA benchmark L2: # Search Results -L3: -L4: * 【0†HotpotQA Homepage; publish_date: none†hotpotqa.github.io】 HotpotQA is a -L5: question answering dataset featuring natural, multi-hop questions, with strong +L3: +L4: * 【0†HotpotQA Homepage; publish_date: none†hotpotqa.github.io】 HotpotQA is a +L5: question answering dataset featuring natural, multi-hop questions, with strong L6: supervision for supporting facts to enable more explainable ...See more L7: * 【1†HotpotQA: A Dataset for Diverse, Explainable Multi-hop ...; publish_date: -L8: none†arxiv.org】 by Z Yang · 2018 · Cited by 3834 — HotpotQA is a dataset with -L9: 113k Wikipedia-based question-answer pairs requiring multi-document reasoning, +L8: none†arxiv.org】 by Z Yang · 2018 · Cited by 3834 — HotpotQA is a dataset with +L9: 113k Wikipedia-based question-answer pairs requiring multi-document reasoning, L10: diverse questions, sentence-level ... -L11: * 【2†hotpotqa/hotpot_qa · Datasets at Hugging Face; publish_date: -L12: none†huggingface.co】 HotpotQA is a new dataset with 113k Wikipedia-based -L13: question-answer pairs with four key features: (1) the questions require finding +L11: * 【2†hotpotqa/hotpot_qa · Datasets at Hugging Face; publish_date: +L12: none†huggingface.co】 HotpotQA is a new dataset with 113k Wikipedia-based +L13: question-answer pairs with four key features: (1) the questions require finding L14: and reasoning over multiple ...See more -L15: * 【3†Why You Should Stop Using HotpotQA for AI Agents ...; publish_date: -L16: none†qipeng.me】 Jul 1, 2025 — HotpotQA pioneered a class of AI tasks that +L15: * 【3†Why You Should Stop Using HotpotQA for AI Agents ...; publish_date: +L16: none†qipeng.me】 Jul 1, 2025 — HotpotQA pioneered a class of AI tasks that L17: requires the AI system to autonomously perform multiple steps of reasoning in an L18: open-domain setting.See more -L19: * 【4†hotpotqa/hotpot; publish_date: none†github.com】 A dataset for diverse, -L20: explainable multi-hop question answering. This repository contains the baseline +L19: * 【4†hotpotqa/hotpot; publish_date: none†github.com】 A dataset for diverse, +L20: explainable multi-hop question answering. This repository contains the baseline L21: model code, as well as the entire pipeline of running ...See more -L22: * 【5†HotpotQA: Multi-Hop QA Benchmark; publish_date: -L23: none†www.emergentmind.com】 Sep 10, 2025 — HotpotQA is a large-scale multi-hop -L24: question answering benchmark featuring 112,779 Wikipedia-based Q&A pairs with +L22: * 【5†HotpotQA: Multi-Hop QA Benchmark; publish_date: +L23: none†www.emergentmind.com】 Sep 10, 2025 — HotpotQA is a large-scale multi-hop +L24: question answering benchmark featuring 112,779 Wikipedia-based Q&A pairs with L25: detailed, sentence-level ...See more -L26: * 【6†HotpotQA Dataset | Papers With Code; publish_date: -L27: none†paperswithcode.com】 HotpotQA is a question answering dataset collected on +L26: * 【6†HotpotQA Dataset | Papers With Code; publish_date: +L27: none†paperswithcode.com】 HotpotQA is a question answering dataset collected on L28: the English Wikipedia, containing about 113K crowd-sourced questions.See more L29: * 【7†HotpotQA: A Dataset for Diverse, Explainable Multi-hop ...; publish_date: L30: none†aclanthology.org】 by Z Yang · 2018 · Cited by 3834 — HotpotQA is a dataset -L31: with 113k Wikipedia-based question-answer pairs requiring multi-document +L31: with 113k Wikipedia-based question-answer pairs requiring multi-document L32: reasoning, diverse questions, sentence-level facts, and factoid ... L33: * 【8†Benchmark BM25S: HotpotQA; publish_date: none†www.kaggle.com】 Explore and -L34: run machine learning code with Kaggle Notebooks | Using data from No attached +L34: run machine learning code with Kaggle Notebooks | Using data from No attached L35: data sources. -L36: * 【9†mteb/hotpotqa · Datasets at Hugging Face; publish_date: +L36: * 【9†mteb/hotpotqa · Datasets at Hugging Face; publish_date: L37: none†huggingface.co】 HotpotQA is a question answering dataset featuring natural, -L38: multi-hop questions, with strong supervision for supporting facts to enable +L38: multi-hop questions, with strong supervision for supporting facts to enable L39: more explainable ...See more [9] FEVER benchmark (Search_Results/FEVER benchmark) **viewing lines [0 - 40] of 40** -L0: +L0: L1: URL: Search_Results/FEVER benchmark L2: # Search Results -L3: -L4: * 【0†Fever.ai; publish_date: none†fever.ai】 We are pleased to announce that -L5: FEVER9 will be co-located with EACL 2026. In this year's workshop, we will +L3: +L4: * 【0†Fever.ai; publish_date: none†fever.ai】 We are pleased to announce that +L5: FEVER9 will be co-located with EACL 2026. In this year's workshop, we will L6: introduce a new shared task focused on automated fact ... L7: * 【1†a Large-scale Dataset for Fact Extraction and VERification; publish_date: -L8: none†aclanthology.org】 by J Thorne · 2018 · Cited by 2315 — In this paper we -L9: introduce a new publicly available dataset for verification against textual +L8: none†aclanthology.org】 by J Thorne · 2018 · Cited by 2315 — In this paper we +L9: introduce a new publicly available dataset for verification against textual L10: sources, FEVER: Fact Extraction. -L11: * 【2†awslabs/fever: FEVER (Fact Extraction and VERification) ...; -L12: publish_date: none†github.com】 In this paper we introduce a new publicly -L13: available dataset for verification against textual sources, FEVER: Fact +L11: * 【2†awslabs/fever: FEVER (Fact Extraction and VERification) ...; +L12: publish_date: none†github.com】 In this paper we introduce a new publicly +L13: available dataset for verification against textual sources, FEVER: Fact L14: Extraction and VERification. -L15: * 【3†FEVER: Fact Extraction and VERification; publish_date: -L16: none†www.amazon.science】 The best accuracy we achieve on labeling a claim -L17: accompanied by the correct evidence is 31.87%, while if we ignore the evidence +L15: * 【3†FEVER: Fact Extraction and VERification; publish_date: +L16: none†www.amazon.science】 The best accuracy we achieve on labeling a claim +L17: accompanied by the correct evidence is 31.87%, while if we ignore the evidence L18: we achieve 50.91%. Thus we ... -L19: * 【4†FEVER Dataset; publish_date: none†fever.ai】 FEVER (Fact Extraction and -L20: VERification) consists of 185,445 claims generated by altering sentences +L19: * 【4†FEVER Dataset; publish_date: none†fever.ai】 FEVER (Fact Extraction and +L20: VERification) consists of 185,445 claims generated by altering sentences L21: extracted from Wikipedia and subsequently verified ... L22: * 【5†mteb/fever · Datasets at Hugging Face; publish_date: none†huggingface.co】 -L23: FEVER. An MTEB dataset. Massive Text Embedding Benchmark. FEVER (Fact -L24: Extraction and VERification) consists of 185,445 claims generated by altering +L23: FEVER. An MTEB dataset. Massive Text Embedding Benchmark. FEVER (Fact +L24: Extraction and VERification) consists of 185,445 claims generated by altering L25: sentences ... -L26: * 【6†FEVEROUS: Fact Extraction and VERification Over ...; publish_date: -L27: none†datasets-benchmarks-proceedings.neurips.cc】 by R Aly · Cited by 359 — In -L28: this paper we introduce a novel dataset and benchmark, Fact Extraction and +L26: * 【6†FEVEROUS: Fact Extraction and VERification Over ...; publish_date: +L27: none†datasets-benchmarks-proceedings.neurips.cc】 by R Aly · Cited by 359 — In +L28: this paper we introduce a novel dataset and benchmark, Fact Extraction and L29: VERification Over. Unstructured and Structured information (FEVEROUS), which ... L30: * 【7†a large-scale dataset for Fact Extraction and VERification; publish_date: L31: none†arxiv.org】 by J Thorne · 2018 · Cited by 2315 — In this paper we introduce -L32: a new publicly available dataset for verification against textual sources, +L32: a new publicly available dataset for verification against textual sources, L33: FEVER: Fact Extraction and VERification. -L34: * 【8†FEVER Resources; publish_date: none†fever.ai】 2018 FEVER: a large-scale +L34: * 【8†FEVER Resources; publish_date: none†fever.ai】 2018 FEVER: a large-scale L35: dataset for Fact Extraction and VERification .bib James Thorne, Andreas Vlachos, L36: Christos Christodoulopoulos, Arpit Mittal L37: * 【9†a Large-scale Dataset for Fact Extraction and VERification; publish_date: -L38: none†www.semanticscholar.org】 This paper introduces a new publicly available -L39: dataset for verification against textual sources, FEVER, which consists of +L38: none†www.semanticscholar.org】 This paper introduces a new publicly available +L39: dataset for verification against textual sources, FEVER, which consists of L40: 185,445 claims generated by ... [10] TriviaQA benchmark (Search_Results/TriviaQA benchmark) **viewing lines [0 - 35] of 35** -L0: +L0: L1: URL: Search_Results/TriviaQA benchmark L2: # Search Results -L3: +L3: L4: * 【0†TriviaQA; publish_date: none†nlp.cs.washington.edu】 TriviaQA is a reading -L5: comprehension dataset containing over 650K question-answer-evidence triples. +L5: comprehension dataset containing over 650K question-answer-evidence triples. L6: TriviaQA includes 95K question-answer pairs authored ... L7: * 【1†TriviaQA: A Large Scale Distantly Supervised Challenge ...; publish_date: L8: none†aclanthology.org】 by M Joshi · 2017 · Cited by 3451 — We present TriviaQA, L9: a challenging reading comprehension dataset containing over 650K question- L10: answer-evidence triples. TriviaQA includes 95K question ... -L11: * 【2†mandarjoshi/trivia_qa · Datasets at Hugging Face; publish_date: -L12: none†huggingface.co】 TriviaqQA is a reading comprehension dataset containing +L11: * 【2†mandarjoshi/trivia_qa · Datasets at Hugging Face; publish_date: +L12: none†huggingface.co】 TriviaqQA is a reading comprehension dataset containing L13: over 650K question-answer-evidence triples. TriviaqQA includes 95K question- L14: answer pairs authored by ... -L15: * 【3†[1705.03551] TriviaQA: A Large Scale Distantly Supervised ...; -L16: publish_date: none†arxiv.org】 by M Joshi · 2017 · Cited by 3451 — We present -L17: TriviaQA, a challenging reading comprehension dataset containing over 650K +L15: * 【3†[1705.03551] TriviaQA: A Large Scale Distantly Supervised ...; +L16: publish_date: none†arxiv.org】 by M Joshi · 2017 · Cited by 3451 — We present +L17: TriviaQA, a challenging reading comprehension dataset containing over 650K L18: question-answer-evidence triples. -L19: * 【4†TriviaQA; publish_date: none†epoch.ai】 An open-domain question answering +L19: * 【4†TriviaQA; publish_date: none†epoch.ai】 An open-domain question answering L20: benchmark with challenging trivia questions paired with evidence documents. -L21: * 【5†TriviaQA Leaderboard; publish_date: none†llm-stats.com】 What is the -L22: TriviaQA benchmark? A large-scale reading comprehension dataset containing over +L21: * 【5†TriviaQA Leaderboard; publish_date: none†llm-stats.com】 What is the +L22: TriviaQA benchmark? A large-scale reading comprehension dataset containing over L23: 650K question-answer-evidence triples. TriviaQA includes 95K ... -L24: * 【6†Code for the TriviaQA reading comprehension dataset; publish_date: -L25: none†github.com】 A large scale distantly supervised challenge dataset for -L26: reading comprehension. In Association for Computational Linguistics (ACL) 2017, +L24: * 【6†Code for the TriviaQA reading comprehension dataset; publish_date: +L25: none†github.com】 A large scale distantly supervised challenge dataset for +L26: reading comprehension. In Association for Computational Linguistics (ACL) 2017, L27: Vancouver, Canada. -L28: * 【7†TriviaQA - Model Benchmarks - The Regularizer; publish_date: -L29: none†www.theregularizer.com】 May 4, 2025 — Compare the performance of different -L30: AI models across standardized benchmarks. Higher scores generally indicate +L28: * 【7†TriviaQA - Model Benchmarks - The Regularizer; publish_date: +L29: none†www.theregularizer.com】 May 4, 2025 — Compare the performance of different +L30: AI models across standardized benchmarks. Higher scores generally indicate L31: better performance, but context ... L32: * 【8†TriviaQA: A Large Scale Distantly Supervised Challenge ...; publish_date: -L33: none†www.cs.utexas.edu】 by M Joshi · Cited by 3445 — We present TriviaQA, a +L33: none†www.cs.utexas.edu】 by M Joshi · Cited by 3445 — We present TriviaQA, a L34: challenging reading comprehension dataset contain- ing over 650K question- L35: answer-evidence triples. TriviaQA includes 95K question-. [11] Natural Questions benchmark (Search_Results/Natural Questions benchmark) **viewing lines [0 - 39] of 39** -L0: +L0: L1: URL: Search_Results/Natural Questions benchmark L2: # Search Results -L3: -L4: * 【0†Natural Questions: a Benchmark for Question Answering ...; publish_date: -L5: none†research.google】 by T Kwiatkowski · Cited by 4339 — We present the Natural -L6: Questions corpus, a question answering dataset. Questions consist of real +L3: +L4: * 【0†Natural Questions: a Benchmark for Question Answering ...; publish_date: +L5: none†research.google】 by T Kwiatkowski · Cited by 4339 — We present the Natural +L6: Questions corpus, a question answering dataset. Questions consist of real L7: anonymized, aggregated queries issued to the Google search ... -L8: * 【1†Natural Questions: A Benchmark for Question Answering ...; publish_date: -L9: none†aclanthology.org】 by T Kwiatkowski · Cited by 4308 — Abstract. We present -L10: the Natural Questions corpus, a question answering data set. Questions consist +L8: * 【1†Natural Questions: A Benchmark for Question Answering ...; publish_date: +L9: none†aclanthology.org】 by T Kwiatkowski · Cited by 4308 — Abstract. We present +L10: the Natural Questions corpus, a question answering data set. Questions consist L11: of real anonymized, aggregated queries issued. -L12: * 【2†Google's Natural Questions; publish_date: none†ai.google.com】 Natural +L12: * 【2†Google's Natural Questions; publish_date: none†ai.google.com】 Natural L13: Questions. A Benchmark for Question Answering Research. View examples · Download L14: dataset. Open Domain Question Answering. A core goal in artificial ... -L15: * 【3†google-research-datasets/natural-questions; publish_date: -L16: none†github.com】 Natural Questions (NQ) contains real user questions issued to -L17: Google search, and answers found from Wikipedia by annotators. NQ is designed +L15: * 【3†google-research-datasets/natural-questions; publish_date: +L16: none†github.com】 Natural Questions (NQ) contains real user questions issued to +L17: Google search, and answers found from Wikipedia by annotators. NQ is designed L18: for the training and ... -L19: * 【4†Natural Questions: A Benchmark for Question Answering ...; publish_date: -L20: none†direct.mit.edu】 Aug 1, 2019 — We present the Natural Questions corpus, a -L21: question answering data set. Questions consist of real anonymized, aggregated +L19: * 【4†Natural Questions: A Benchmark for Question Answering ...; publish_date: +L20: none†direct.mit.edu】 Aug 1, 2019 — We present the Natural Questions corpus, a +L21: question answering data set. Questions consist of real anonymized, aggregated L22: queries issued to the Google search ... -L23: * 【5†ir_datasets : Natural Questions; publish_date: none†ir-datasets.com】 -L24: Google Natural Questions is a Q&A dataset containing long, short, and Yes/No +L23: * 【5†ir_datasets : Natural Questions; publish_date: none†ir-datasets.com】 +L24: Google Natural Questions is a Q&A dataset containing long, short, and Yes/No L25: answers from Wikipedia. ir_datasets frames this around an ad-hoc ranking setting L26: ... -L27: * 【6†sentence-transformers/natural-questions · Datasets at ...; publish_date: -L28: none†huggingface.co】 This dataset is a collection of question-answer pairs from +L27: * 【6†sentence-transformers/natural-questions · Datasets at ...; publish_date: +L28: none†huggingface.co】 This dataset is a collection of question-answer pairs from L29: the Natural Questions dataset. See Natural Questions for additional information. -L30: * 【7†Google's Natural Questions; publish_date: none†ai.google.com】 Natural -L31: Questions contains 307K training examples, 8K examples for development, and a +L30: * 【7†Google's Natural Questions; publish_date: none†ai.google.com】 Natural +L31: Questions contains 307K training examples, 8K examples for development, and a L32: further 8K examples for testing. In the paper, we demonstrate a human ... -L33: * 【8†A Benchmark for Question Answering Research; publish_date: -L34: none†www.researchgate.net】 Jul 27, 2025 — We present the Natural Questions -L35: corpus, a question answering data set. Questions consist of real anonymized, +L33: * 【8†A Benchmark for Question Answering Research; publish_date: +L34: none†www.researchgate.net】 Jul 27, 2025 — We present the Natural Questions +L35: corpus, a question answering data set. Questions consist of real anonymized, L36: aggregated queries issued to the Google search ... -L37: * 【9†natural-questions; publish_date: none†docs.unity.rc.umass.edu】 Sep 4, -L38: 2025 — “Natural questions: a benchmark for question answering research.” +L37: * 【9†natural-questions; publish_date: none†docs.unity.rc.umass.edu】 Sep 4, +L38: 2025 — “Natural questions: a benchmark for question answering research.” L39: Transactions of the Association for Computational Linguistics 7 (2019): ... [12] MS MARCO benchmark (Search_Results/MS MARCO benchmark) **viewing lines [0 - 41] of 41** -L0: +L0: L1: URL: Search_Results/MS MARCO benchmark L2: # Search Results -L3: +L3: L4: * 【0†MS MARCO - Microsoft Open Source; publish_date: none†microsoft.github.io】 L5: The MS MARCO datasets are intended for non-commercial research purposes only to -L6: promote advancement in the field of artificial intelligence and related areas, +L6: promote advancement in the field of artificial intelligence and related areas, L7: ... -L8: * 【1†microsoft/ms_marco · Datasets at Hugging Face; publish_date: -L9: none†huggingface.co】 Starting with a paper released at NIPS 2016, MS MARCO is a +L8: * 【1†microsoft/ms_marco · Datasets at Hugging Face; publish_date: +L9: none†huggingface.co】 Starting with a paper released at NIPS 2016, MS MARCO is a L10: collection of datasets focused on deep learning in search. The first dataset was L11: a question ... -L12: * 【2†Benchmarking Ranking Models in the Large-Data Regime; publish_date: -L13: none†arxiv.org】 by N Craswell · 2021 · Cited by 89 — This paper uses the MS -L14: MARCO and TREC Deep Learning Track as our case study, comparing it to the case +L12: * 【2†Benchmarking Ranking Models in the Large-Data Regime; publish_date: +L13: none†arxiv.org】 by N Craswell · 2021 · Cited by 89 — This paper uses the MS +L14: MARCO and TREC Deep Learning Track as our case study, comparing it to the case L15: of TREC ad hoc ranking in the 1990s. -L16: * 【3†Benchmarking Ranking Models in the Large-Data Regime; publish_date: -L17: none†www.microsoft.com】 This paper uses the MS MARCO and TREC Deep Learning -L18: Track as our case study, comparing it to the case of TREC ad hoc ranking in the +L16: * 【3†Benchmarking Ranking Models in the Large-Data Regime; publish_date: +L17: none†www.microsoft.com】 This paper uses the MS MARCO and TREC Deep Learning +L18: Track as our case study, comparing it to the case of TREC ad hoc ranking in the L19: 1990s. We show how the ... -L20: * 【4†Datasets for Document and Passage Ranking Leadboards; publish_date: +L20: * 【4†Datasets for Document and Passage Ranking Leadboards; publish_date: L21: none†microsoft.github.io】 The MS MARCO document and passage ranking leaderboards -L22: complements the TREC Deep Learning Track by providing on-going evaluation of +L22: complements the TREC Deep Learning Track by providing on-going evaluation of L23: submissions using pre- ... -L24: * 【5†MS MARCO: Benchmarking Ranking Models in the Large- ...; publish_date: -L25: none†dl.acm.org】 Jul 11, 2021 — This paper uses the MS MARCO and TREC Deep -L26: Learning Track as our case study, comparing it to the case of TREC ad hoc +L24: * 【5†MS MARCO: Benchmarking Ranking Models in the Large- ...; publish_date: +L25: none†dl.acm.org】 Jul 11, 2021 — This paper uses the MS MARCO and TREC Deep +L26: Learning Track as our case study, comparing it to the case of TREC ad hoc L27: ranking in the 1990s. -L28: * 【6†ir_datasets : MSMARCO (passage); publish_date: none†ir-datasets.com】 A +L28: * 【6†ir_datasets : MSMARCO (passage); publish_date: none†ir-datasets.com】 A L29: passage ranking benchmark with a collection of 8.8 million passages and question L30: queries. Most relevance judgments are shallow. -L31: * 【7†MS MARCO; publish_date: none†sbert.net】 MS MARCO Passage Ranking is a -L32: large dataset to train models for information retrieval. It consists of about +L31: * 【7†MS MARCO; publish_date: none†sbert.net】 MS MARCO Passage Ranking is a +L32: large dataset to train models for information retrieval. It consists of about L33: 500k real search queries from Bing search engine ... -L34: * 【8†MS MARCO: A Human Generated MAchine Reading ...; publish_date: -L35: none†arxiv.org】 by P Bajaj · 2016 · Cited by 1151 — We introduce a large scale -L36: MAchine Reading COmprehension dataset, which we name MS MARCO. The dataset +L34: * 【8†MS MARCO: A Human Generated MAchine Reading ...; publish_date: +L35: none†arxiv.org】 by P Bajaj · 2016 · Cited by 1151 — We introduce a large scale +L36: MAchine Reading COmprehension dataset, which we name MS MARCO. The dataset L37: comprises of 1,010,916 anonymized ... -L38: * 【9†MS MARCO Web Search: A Large-scale Information-rich ...; publish_date: -L39: none†www.microsoft.com】 May 13, 2024 — MS MARCO Web Search offers a retrieval -L40: benchmark with three web retrieval challenge tasks that demands innovations in +L38: * 【9†MS MARCO Web Search: A Large-scale Information-rich ...; publish_date: +L39: none†www.microsoft.com】 May 13, 2024 — MS MARCO Web Search offers a retrieval +L40: benchmark with three web retrieval challenge tasks that demands innovations in L41: both machine learning and ... [13] BEIR benchmark (Search_Results/BEIR benchmark) **viewing lines [0 - 37] of 37** -L0: +L0: L1: URL: Search_Results/BEIR benchmark L2: # Search Results -L3: -L4: * 【0†详细介绍文本检索基准BEIR: A Heterogeneous Benchmark ...; publish_date: -L5: none†blog.csdn.net】 2023年1月1日 — +L3: +L4: * 【0†详细介绍文本检索基准BEIR: A Heterogeneous Benchmark ...; publish_date: +L5: none†blog.csdn.net】 2023年1月1日 — L6: BEIR旨在为所有不同的检索任务提供一个一站式的零样本评估基准。为了构建一个全面的评估基准,选择方法对于收集具有理想属性的任务和数据集至关重要。对于 ... -L7: * 【1†beir-cellar/beir; publish_date: none†github.com】 BEIR is a heterogeneous -L8: benchmark containing diverse IR tasks. It also provides a common and easy +L7: * 【1†beir-cellar/beir; publish_date: none†github.com】 BEIR is a heterogeneous +L8: benchmark containing diverse IR tasks. It also provides a common and easy L9: framework for evaluation of your NLP-based retrieval models ... -L10: * 【2†BEIR: A Heterogenous Benchmark for Zero-shot Evaluation ...; -L11: publish_date: none†arxiv.org】 作者:N Thakur · 2021 · 被引用次数:1480 — We introduce -L12: Benchmarking-IR (BEIR), a robust and heterogeneous evaluation benchmark for +L10: * 【2†BEIR: A Heterogenous Benchmark for Zero-shot Evaluation ...; +L11: publish_date: none†arxiv.org】 作者:N Thakur · 2021 · 被引用次数:1480 — We introduce +L12: Benchmarking-IR (BEIR), a robust and heterogeneous evaluation benchmark for L13: information retrieval. -L14: * 【3†BeIR; publish_date: none†huggingface.co】 BEIR (Benchmarking IR) consists -L15: of a homogenous benchmark for diverse sentence or passage level IR tasks. It +L14: * 【3†BeIR; publish_date: none†huggingface.co】 BEIR (Benchmarking IR) consists +L15: of a homogenous benchmark for diverse sentence or passage level IR tasks. It L16: provides a common and easy framework for the cross ... -L17: * 【4†论文分享:BEIR A Heterogeneous Benchmark for Zero-shot ...; publish_date: -L18: none†zhuanlan.zhihu.com】 2022年10月3日 — 分享论文,夹带个人理解的分享,建议结合原论文看。 1 研究背景. +L17: * 【4†论文分享:BEIR A Heterogeneous Benchmark for Zero-shot ...; publish_date: +L18: none†zhuanlan.zhihu.com】 2022年10月3日 — 分享论文,夹带个人理解的分享,建议结合原论文看。 1 研究背景. L19: 本论文主要关注的领域是query-document检索(下文简称qd检索),即根据query去文档库里 ... -L20: * 【5†Benchmarking IR Information Retrieval (BEIR); publish_date: +L20: * 【5†Benchmarking IR Information Retrieval (BEIR); publish_date: L21: none†zilliz.com】 BEIR is a benchmark designed for evaluating the versatility and -L22: robustness of information retrieval models. It features 18 diverse datasets +L22: robustness of information retrieval models. It features 18 diverse datasets L23: from domains like ... L24: * 【6†BEIR (Benchmarking IR) - OpenDataLab; publish_date: none†opendatalab.com】 -L25: 简介-Introduction. BEIR(Benchmarking +L25: 简介-Introduction. BEIR(Benchmarking L26: IR)是包含不同信息检索(IR)任务的异构基准。通过BEIR,可以系统地研究多种神经检索方法的零样本泛化能力。 -L27: * 【7†What is the BEIR benchmark and how is it used?; publish_date: -L28: none†milvus.io】 The BEIR (Benchmarking Information Retrieval) benchmark is a -L29: standardized framework designed to evaluate the effectiveness of search and +L27: * 【7†What is the BEIR benchmark and how is it used?; publish_date: +L28: none†milvus.io】 The BEIR (Benchmarking Information Retrieval) benchmark is a +L29: standardized framework designed to evaluate the effectiveness of search and L30: retrieval algorithms. -L31: * 【8†BEIR Benchmark数据集卡片; publish_date: none†www.atyun.com】 BEIR +L31: * 【8†BEIR Benchmark数据集卡片; publish_date: none†www.atyun.com】 BEIR L32: Benchmark数据集卡片. 数据集简介. BEIR是一个异构评测基准,由18个多样化的数据集构建而成,代表了9个信息检索任务:. 事实查证: FEVER , L33: Climate-FEVER , SciFact ... -L34: * 【9†Evaluating search relevance part 1 - The BEIR benchmark; publish_date: -L35: none†www.elastic.co】 2024年7月16日 — Learn to evaluate your search system in the -L36: context of better understanding the BEIR benchmark, with tips & techniques to +L34: * 【9†Evaluating search relevance part 1 - The BEIR benchmark; publish_date: +L35: none†www.elastic.co】 2024年7月16日 — Learn to evaluate your search system in the +L36: context of better understanding the BEIR benchmark, with tips & techniques to L37: improve your ... [14] MIRACL benchmark (Search_Results/MIRACL benchmark) **viewing lines [0 - 41] of 41** -L0: +L0: L1: URL: Search_Results/MIRACL benchmark L2: # Search Results -L3: -L4: * 【0†MIRACL | Multilingual Information Retrieval Across a ...; publish_date: +L3: +L4: * 【0†MIRACL | Multilingual Information Retrieval Across a ...; publish_date: L5: none†project-miracl.github.io】 MIRACL (Multilingual Information Retrieval Across -L6: a Continuum of Languages) is an WSDM 2023 Cup challenge that focuses on search +L6: a Continuum of Languages) is an WSDM 2023 Cup challenge that focuses on search L7: across 18 different ... -L8: * 【1†project-miracl/miracl: A large-scale multilingual dataset for ...; -L9: publish_date: none†github.com】 A large-scale multilingual dataset for +L8: * 【1†project-miracl/miracl: A large-scale multilingual dataset for ...; +L9: publish_date: none†github.com】 A large-scale multilingual dataset for L10: Information Retrieval. Thorough human-annotations across 18 diverse languages. L11: * 【2†A Large, multilingual, visual document retrieval benchmark; publish_date: -L12: none†arxiv.org】 by R Osmulski · 2025 · Cited by 2 — MIRACL-VISION is a -L13: challenging, representative, multilingual evaluation benchmark for visual +L12: none†arxiv.org】 by R Osmulski · 2025 · Cited by 2 — MIRACL-VISION is a +L13: challenging, representative, multilingual evaluation benchmark for visual L14: retrieval pipelines and will help the community build robust ... -L15: * 【3†miracl/miracl · Datasets at Hugging Face; publish_date: -L16: none†huggingface.co】 MIRACL (Multilingual Information Retrieval Across a -L17: Continuum of Languages) is a multilingual retrieval dataset that focuses on +L15: * 【3†miracl/miracl · Datasets at Hugging Face; publish_date: +L16: none†huggingface.co】 MIRACL (Multilingual Information Retrieval Across a +L17: Continuum of Languages) is a multilingual retrieval dataset that focuses on L18: search across 18 different ... -L19: * 【4†MIRACL: A Multilingual Retrieval Dataset Covering 18 ...; publish_date: +L19: * 【4†MIRACL: A Multilingual Retrieval Dataset Covering 18 ...; publish_date: L20: none†direct.mit.edu】 by X Zhang · 2023 · Cited by 131 — MIRACL is a multilingual -L21: dataset for ad hoc retrieval across 18 languages that collectively encompass +L21: dataset for ad hoc retrieval across 18 languages that collectively encompass L22: over three billion native speakers around the world. -L23: * 【5†(PDF) MIRACL-VISION: A Large, multilingual, visual ...; publish_date: +L23: * 【5†(PDF) MIRACL-VISION: A Large, multilingual, visual ...; publish_date: L24: none†www.researchgate.net】 May 23, 2025 — MIRACL-VISION covers 18 languages, and L25: is an extension of the MIRACL dataset, a popular benchmark to evaluate text- L26: based multilingual retrieval ... L27: * 【6†A Large, multilingual, visual document retrieval benchmark; publish_date: -L28: none†arxiv.org】 by R Osmulski · 2025 · Cited by 2 — MIRACL-VISION is a -L29: challenging, representative, multilingual evaluation benchmark for visual +L28: none†arxiv.org】 by R Osmulski · 2025 · Cited by 2 — MIRACL-VISION is a +L29: challenging, representative, multilingual evaluation benchmark for visual L30: retrieval pipelines and will help the community ... -L31: * 【7†ir_datasets : MIRACL; publish_date: none†ir-datasets.com】 -L32: "miracl/ar/test-a". The held-out test set (version a) for Arabic. -L33: queriesdocsCitationMetadata. 936 queries. Language: ar. Query type: +L31: * 【7†ir_datasets : MIRACL; publish_date: none†ir-datasets.com】 +L32: "miracl/ar/test-a". The held-out test set (version a) for Arabic. +L33: queriesdocsCitationMetadata. 936 queries. Language: ar. Query type: L34: GenericQuery: (namedtuple). L35: * 【8†Evaluate on MIRACL — BGE documentation; publish_date: none†bge-model.com】 -L36: MIRACL (Multilingual Information Retrieval Across a Continuum of Languages) is +L36: MIRACL (Multilingual Information Retrieval Across a Continuum of Languages) is L37: an WSDM 2023 Cup challenge that focuses on search across 18 different languages. -L38: * 【9†MIRACL - Alpha's Tech Garden; publish_date: -L39: none†techgarden.alphasmanifesto.com】 MIRACL (Multilingual Information Retrieval +L38: * 【9†MIRACL - Alpha's Tech Garden; publish_date: +L39: none†techgarden.alphasmanifesto.com】 MIRACL (Multilingual Information Retrieval L40: Across a Continuum of Languages) is a multilingual dataset we have built for the L41: WSDM 2023 Cup ... [15] Zero-shot IR benchmark (Search_Results/Zero-shot IR benchmark) **viewing lines [0 - 40] of 40** -L0: +L0: L1: URL: Search_Results/Zero-shot IR benchmark L2: # Search Results -L3: -L4: * 【0†BEIR: A Heterogenous Benchmark for Zero-shot Evaluation ...; -L5: publish_date: none†arxiv.org】 by N Thakur · 2021 · Cited by 1480 — We introduce -L6: Benchmarking-IR (BEIR), a robust and heterogeneous evaluation benchmark for +L3: +L4: * 【0†BEIR: A Heterogenous Benchmark for Zero-shot Evaluation ...; +L5: publish_date: none†arxiv.org】 by N Thakur · 2021 · Cited by 1480 — We introduce +L6: Benchmarking-IR (BEIR), a robust and heterogeneous evaluation benchmark for L7: information retrieval.See more -L8: * 【1†beir-cellar/beir; publish_date: none†github.com】 BEIR: A Heterogenous -L9: Benchmark for Zero-shot Evaluation of Information Retrieval Models (NeurIPS +L8: * 【1†beir-cellar/beir; publish_date: none†github.com】 BEIR: A Heterogenous +L9: Benchmark for Zero-shot Evaluation of Information Retrieval Models (NeurIPS L10: 2021, Datasets and Benchmarks Track); Resources for Brewing ...See more -L11: * 【2†Benchmarking IR Information Retrieval (BEIR); publish_date: -L12: none†zilliz.com】 BEIR is a tool to evaluate how well Information Retrieval -L13: systems perform across many tasks and types of information, and is a standard +L11: * 【2†Benchmarking IR Information Retrieval (BEIR); publish_date: +L12: none†zilliz.com】 BEIR is a tool to evaluate how well Information Retrieval +L13: systems perform across many tasks and types of information, and is a standard L14: benchmark. -L15: * 【3†BEIR: A Heterogeneous Benchmark for Zero-shot ...; publish_date: -L16: none†datasets-benchmarks-proceedings.neurips.cc】 by N Thakur · Cited by 1480 — -L17: BEIR is a robust, heterogeneous benchmark for information retrieval, using 18 +L15: * 【3†BEIR: A Heterogeneous Benchmark for Zero-shot ...; publish_date: +L16: none†datasets-benchmarks-proceedings.neurips.cc】 by N Thakur · Cited by 1480 — +L17: BEIR is a robust, heterogeneous benchmark for information retrieval, using 18 L18: datasets and 9 tasks to evaluate model generalization. -L19: * 【4†BEIR; publish_date: none†eval.ai】 BEIR is a heterogeneous zero-shot +L19: * 【4†BEIR; publish_date: none†eval.ai】 BEIR is a heterogeneous zero-shot L20: retrieval benchmark containing 18 datasets from diverse text retrieval tasks and L21: domains.See more -L22: * 【5†[2409.15763] IRSC: A Zero-shot Evaluation Benchmark for ...; -L23: publish_date: none†arxiv.org】 by H Lin · 2024 · Cited by 2 — This paper +L22: * 【5†[2409.15763] IRSC: A Zero-shot Evaluation Benchmark for ...; +L23: publish_date: none†arxiv.org】 by H Lin · 2024 · Cited by 2 — This paper L24: introduces the IRSC benchmark for evaluating the performance of embedding models L25: in multilingual RAG tasks.See more -L26: * 【6†FactIR: A Real-World Zero-shot Open-Domain Retrieval ...; publish_date: +L26: * 【6†FactIR: A Real-World Zero-shot Open-Domain Retrieval ...; publish_date: L27: none†dl.acm.org】 May 23, 2025 — In this paper, we present a real-world retrieval -L28: benchmark FactIR, derived from Factiverse production logs, enhanced with human +L28: benchmark FactIR, derived from Factiverse production logs, enhanced with human L29: annotations. We ...See more -L30: * 【7†UniIR: Training and Benchmarking Universal Multimodal ...; publish_date: -L31: none†tiger-ai-lab.github.io】 At test time, we evaluated the zero-shot -L32: performance of all fine-tuned models, as well as SoTA pre-trained retrievers on +L30: * 【7†UniIR: Training and Benchmarking Universal Multimodal ...; publish_date: +L31: none†tiger-ai-lab.github.io】 At test time, we evaluated the zero-shot +L32: performance of all fine-tuned models, as well as SoTA pre-trained retrievers on L33: the three held-out datasets. UniIR ...See more -L34: * 【8†Zero-Shot BEIR Tasks; publish_date: none†www.emergentmind.com】 Aug 26, -L35: 2025 — Zero-Shot BEIR Tasks are evaluation methodologies that assess IR models' +L34: * 【8†Zero-Shot BEIR Tasks; publish_date: none†www.emergentmind.com】 Aug 26, +L35: 2025 — Zero-Shot BEIR Tasks are evaluation methodologies that assess IR models' L36: ability to generalize to unseen query domains without task-specific ...See more L37: * 【9†BEIR-PL: Zero Shot Information Retrieval Benchmark for ...; publish_date: -L38: none†aclanthology.org】 by K Wojtasik · 2024 · Cited by 12 — BEIR-PL is a new -L39: benchmark with 13 datasets for Polish Information Retrieval, created to advance +L38: none†aclanthology.org】 by K Wojtasik · 2024 · Cited by 12 — BEIR-PL is a new +L39: benchmark with 13 datasets for Polish Information Retrieval, created to advance L40: research in this area. [16] WebGPT benchmark (Search_Results/WebGPT benchmark) **viewing lines [0 - 38] of 38** -L0: +L0: L1: URL: Search_Results/WebGPT benchmark L2: # Search Results -L3: -L4: * 【0†WebGPT: Improving the factual accuracy of language ...; publish_date: -L5: none†openai.com】 Dec 16, 2021 — Our models outperform GPT‑3 on TruthfulQA and +L3: +L4: * 【0†WebGPT: Improving the factual accuracy of language ...; publish_date: +L5: none†openai.com】 Dec 16, 2021 — Our models outperform GPT‑3 on TruthfulQA and L6: exhibit more favourable scaling properties. However, our models lag behind human L7: performance, ... -L8: * 【1†A Simple Yet Challenging Benchmark for Browsing Agents; publish_date: -L9: none†arxiv.org】 by J Wei · 2025 · Cited by 124 — Abstract. We present -L10: BrowseComp, a simple yet challenging benchmark for measuring the ability for +L8: * 【1†A Simple Yet Challenging Benchmark for Browsing Agents; publish_date: +L9: none†arxiv.org】 by J Wei · 2025 · Cited by 124 — Abstract. We present +L10: BrowseComp, a simple yet challenging benchmark for measuring the ability for L11: agents to browse the web. -L12: * 【2†openai/webgpt_comparisons · Datasets at Hugging Face; publish_date: -L13: none†huggingface.co】 This is the dataset of all comparisons that were marked as -L14: suitable for reward modeling by the end of the WebGPT project. There are 19,578 +L12: * 【2†openai/webgpt_comparisons · Datasets at Hugging Face; publish_date: +L13: none†huggingface.co】 This is the dataset of all comparisons that were marked as +L14: suitable for reward modeling by the end of the WebGPT project. There are 19,578 L15: comparisons in total. -L16: * 【3†Evaluation & Limitations of WebGPT, WebVoyager & Agent-E; publish_date: -L17: none†deepsense.ai】 Oct 14, 2024 — WebArena benchmark features 812 tasks -L18: evaluated using metrics such as Exact Match, Must Include, and Fuzzy Match, +L16: * 【3†Evaluation & Limitations of WebGPT, WebVoyager & Agent-E; publish_date: +L17: none†deepsense.ai】 Oct 14, 2024 — WebArena benchmark features 812 tasks +L18: evaluated using metrics such as Exact Match, Must Include, and Fuzzy Match, L19: focusing on outcomes rather ... -L20: * 【4†OpenAI Announces Question-Answering AI WebGPT; publish_date: -L21: none†www.infoq.com】 Jan 25, 2022 — On the TriviaQA benchmark, WebGPT -L22: outperformed GPT-3, producing answers that were true 75% of the time, and "both +L20: * 【4†OpenAI Announces Question-Answering AI WebGPT; publish_date: +L21: none†www.infoq.com】 Jan 25, 2022 — On the TriviaQA benchmark, WebGPT +L22: outperformed GPT-3, producing answers that were true 75% of the time, and "both L23: true and informative" 54% of ... -L24: * 【5†WebGPT: Improving the factual accuracy of language models ...; -L25: publish_date: none†kargarisaac.medium.com】 The top-performing model generated +L24: * 【5†WebGPT: Improving the factual accuracy of language models ...; +L25: publish_date: none†kargarisaac.medium.com】 The top-performing model generated L26: answers that were preferred over 56% of the time compared to answers produced by L27: human demonstrators, with ... -L28: * 【6†Browser-assisted question-answering with human feedback; publish_date: -L29: none†www.alphaxiv.org】 WebGPT represents a significant advancement in long-form -L30: question answering by combining the language generation capabilities of GPT-3 +L28: * 【6†Browser-assisted question-answering with human feedback; publish_date: +L29: none†www.alphaxiv.org】 WebGPT represents a significant advancement in long-form +L30: question answering by combining the language generation capabilities of GPT-3 L31: with real-time web ... -L32: * 【7†Benchmarking Open-Source Large Language Models, GPT-4 ...; publish_date: +L32: * 【7†Benchmarking Open-Source Large Language Models, GPT-4 ...; publish_date: L33: none†ai.nejm.org】 by S Wu · 2024 · Cited by 69 — We show that the current widely -L34: used open-source LLMs have poor zero-shot reasoning ability in nephrology +L34: used open-source LLMs have poor zero-shot reasoning ability in nephrology L35: compared with GPT-4 and Claude 2. -L36: * 【8†0hq/WebGPT: Run GPT model on ...; publish_date: none†github.com】 WebGPT -L37: is a vanilla JS and HTML implementation of a transformer model, intended as a +L36: * 【8†0hq/WebGPT: Run GPT model on ...; publish_date: none†github.com】 WebGPT +L37: is a vanilla JS and HTML implementation of a transformer model, intended as a L38: proof-of-concept as well as educational resource. [17] WebShop benchmark (Search_Results/WebShop benchmark) **viewing lines [0 - 41] of 41** -L0: +L0: L1: URL: Search_Results/WebShop benchmark L2: # Search Results -L3: -L4: * 【0†WebShop: Towards Scalable Real-World Web Interaction ...; publish_date: -L5: none†arxiv.org】 by S Yao · 2022 · Cited by 710 — To bridge this gap, we develop +L3: +L4: * 【0†WebShop: Towards Scalable Real-World Web Interaction ...; publish_date: +L5: none†arxiv.org】 by S Yao · 2022 · Cited by 710 — To bridge this gap, we develop L6: WebShop -- a simulated e-commerce website environment with 1.18 million real- L7: world products and 12,087 crowd- ... -L8: * 【1†WebShop; publish_date: none†webshop-pnlp.github.io】 To bridge this gap, -L9: we develop WebShop – a simulated e-commerce website environment with 1.18 +L8: * 【1†WebShop; publish_date: none†webshop-pnlp.github.io】 To bridge this gap, +L9: we develop WebShop – a simulated e-commerce website environment with 1.18 L10: million real-world products and 12,087 crowd-sourced text ... -L11: * 【2†princeton-nlp/WebShop; publish_date: none†github.com】 WebShop is a -L12: simulated e-commerce website environment with 1.18 million real-world products +L11: * 【2†princeton-nlp/WebShop; publish_date: none†github.com】 WebShop is a +L12: simulated e-commerce website environment with 1.18 million real-world products L13: and 12,087 crowd-sourced text instructions. In this environment, an ... -L14: * 【3†WebShop: Towards Scalable Real-World Web Interaction ...; publish_date: -L15: none†papers.nips.cc】 by S Yao · 2022 · Cited by 710 — We collect over 1,600 -L16: human trajectories to first validate the benchmark, then train and evaluate a +L14: * 【3†WebShop: Towards Scalable Real-World Web Interaction ...; publish_date: +L15: none†papers.nips.cc】 by S Yao · 2022 · Cited by 710 — We collect over 1,600 +L16: human trajectories to first validate the benchmark, then train and evaluate a L17: diverse range of agents using reinforcement learning, ... -L18: * 【4†WebShop: Towards Scalable Real-World Web Interaction ...; publish_date: -L19: none†proceedings.neurips.cc】 by S Yao · 2022 · Cited by 709 — We have developed -L20: WebShop, a new web-based benchmark for sequential decision making and language +L18: * 【4†WebShop: Towards Scalable Real-World Web Interaction ...; publish_date: +L19: none†proceedings.neurips.cc】 by S Yao · 2022 · Cited by 709 — We have developed +L20: WebShop, a new web-based benchmark for sequential decision making and language L21: grounding, modeled on interaction with an e-commerce website. -L22: * 【5†Webshop & Benchmark Analysis | Documentation Infinity; publish_date: -L23: none†docs.fact-finder.com】 Aug 15, 2025 — Evaluation of your shop based on -L24: different categories in comparison, to your competitors/industry. Recommended +L22: * 【5†Webshop & Benchmark Analysis | Documentation Infinity; publish_date: +L23: none†docs.fact-finder.com】 Aug 15, 2025 — Evaluation of your shop based on +L24: different categories in comparison, to your competitors/industry. Recommended L25: when doing a shop relaunch. -L26: * 【6†A Multi-Shop Benchmark for Evaluating Web Agents; publish_date: -L27: none†arxiv.org】 by R Peeters · 2025 · Cited by 2 — Compared to existing -L28: e-commerce benchmarks, such as WebShop or ShoppingBench, WebMall introduces +L26: * 【6†A Multi-Shop Benchmark for Evaluating Web Agents; publish_date: +L27: none†arxiv.org】 by R Peeters · 2025 · Cited by 2 — Compared to existing +L28: e-commerce benchmarks, such as WebShop or ShoppingBench, WebMall introduces L29: comparison-shopping tasks across multiple shops ... -L30: * 【7†WebShop: towards scalable real-world web interaction with ...; -L31: publish_date: none†dl.acm.org】 by S Yao · 2022 · Cited by 710 — To bridge this -L32: gap, we develop WebShop - a simulated e-commerce website environment with 1.18 +L30: * 【7†WebShop: towards scalable real-world web interaction with ...; +L31: publish_date: none†dl.acm.org】 by S Yao · 2022 · Cited by 710 — To bridge this +L32: gap, we develop WebShop - a simulated e-commerce website environment with 1.18 L33: million real-world products and 12, 087 crowd- ... -L34: * 【8†[PDF] WebShop: Towards Scalable Real-World Web ...; publish_date: +L34: * 【8†[PDF] WebShop: Towards Scalable Real-World Web ...; publish_date: L35: none†www.semanticscholar.org】 It is shown that agents trained on WebShop exhibit -L36: non-trivial sim-to-real transfer when evaluated on amazon.com and ebay.com, +L36: non-trivial sim-to-real transfer when evaluated on amazon.com and ebay.com, L37: indicating the potential ... -L38: * 【9†X-WebAgentBench: A Multilingual Interactive Web ...; publish_date: -L39: none†aclanthology.org】 by P Wang · 2025 · Cited by 3 — (2023) based on the +L38: * 【9†X-WebAgentBench: A Multilingual Interactive Web ...; publish_date: +L39: none†aclanthology.org】 by P Wang · 2025 · Cited by 3 — (2023) based on the L40: English WebShop benchmark (Yao et al., 2022), while the multilingual task scores L41: are ob- tained through evaluation on our own benchmark. [18] ALFWorld benchmark (Search_Results/ALFWorld benchmark) **viewing lines [0 - 31] of 31** -L0: +L0: L1: URL: Search_Results/ALFWorld benchmark L2: # Search Results -L3: -L4: * 【0†ALFWorld; publish_date: none†alfworld.github.io】 ALFWorld contains -L5: interactive TextWorld environments (Côté et. al) that parallel embodied worlds +L3: +L4: * 【0†ALFWorld; publish_date: none†alfworld.github.io】 ALFWorld contains +L5: interactive TextWorld environments (Côté et. al) that parallel embodied worlds L6: in the ALFRED dataset (Shridhar et. al). -L7: * 【1†ALFWorld: Aligning Text and Embodied Environments for ...; publish_date: -L8: none†arxiv.org】 by M Shridhar · 2020 · Cited by 674 — ALFWorld enables the -L9: creation of a new BUTLER agent whose abstract knowledge, learned in TextWorld, +L7: * 【1†ALFWorld: Aligning Text and Embodied Environments for ...; publish_date: +L8: none†arxiv.org】 by M Shridhar · 2020 · Cited by 674 — ALFWorld enables the +L9: creation of a new BUTLER agent whose abstract knowledge, learned in TextWorld, L10: corresponds directly to concrete, visually grounded actions. -L11: * 【2†ALFWorld: Aligning Text and Embodied Environments ...; publish_date: -L12: none†github.com】 ALFWorld contains interactive TextWorld environments (Côté et. +L11: * 【2†ALFWorld: Aligning Text and Embodied Environments ...; publish_date: +L12: none†github.com】 ALFWorld contains interactive TextWorld environments (Côté et. L13: al) that parallel embodied worlds in the ALFRED dataset (Shridhar et. al). L14: * 【3†alfworld - benchmark's activity; publish_date: none†huggingface.co】 MM- L15: IQ: Benchmarking Human-Like Abstraction and Reasoning in Multimodal Models Paper L16: • 2502.00698 • Published Feb 1 • 24 -L17: * 【4†Tackling AlfWorld with Action Attention and Common ...; publish_date: -L18: none†neurips.cc】 On the Alfworld benchmark for indoor instruction following, we -L19: achieve a significantly higher success rate (50% over the baseline) with our +L17: * 【4†Tackling AlfWorld with Action Attention and Common ...; publish_date: +L18: none†neurips.cc】 On the Alfworld benchmark for indoor instruction following, we +L19: achieve a significantly higher success rate (50% over the baseline) with our L20: novel object ... -L21: * 【5†ALFWORLD: ALIGNING TEXT AND EMBODIED ...; publish_date: +L21: * 【5†ALFWORLD: ALIGNING TEXT AND EMBODIED ...; publish_date: L22: none†openreview.net】 by M Shridhar · Cited by 674 — The ALFRED dataset (Shridhar -L23: et al., 2020), set in the THOR simulator (Kolve et al., 2017), is a benchmark +L23: et al., 2020), set in the THOR simulator (Kolve et al., 2017), is a benchmark L24: for learning to com- plete embodied household tasks ... -L25: * 【6†AlfWorld; publish_date: none†primo.ai】 Mar 23, 2024 — A simulator that +L25: * 【6†AlfWorld; publish_date: none†primo.ai】 Mar 23, 2024 — A simulator that L26: enables agents to learn abstract, text based policies in TextWorld (Côté et al., L27: 2018) and then execute goals from the ALFRED benchmark. -L28: * 【7†AlfWorld performance across 134 tasks showing cumulative...; -L29: publish_date: none†www.researchgate.net】 In the AlfWorld benchmark, we defined -L30: hallucination as the occurrence of two or more consecutive identical actions in +L28: * 【7†AlfWorld performance across 134 tasks showing cumulative...; +L29: publish_date: none†www.researchgate.net】 In the AlfWorld benchmark, we defined +L30: hallucination as the occurrence of two or more consecutive identical actions in L31: which the environment responded with ... [19] Mind2Web benchmark (Search_Results/Mind2Web benchmark) **viewing lines [0 - 40] of 40** -L0: +L0: L1: URL: Search_Results/Mind2Web benchmark L2: # Search Results -L3: +L3: L4: * 【0†Mind2Web: Towards a Generalist Agent for the Web; publish_date: none†osu- -L5: nlp-group.github.io】 Mind2Web is a dataset for developing and evaluating -L6: generalist agents for the web that can follow language instructions to complete +L5: nlp-group.github.io】 Mind2Web is a dataset for developing and evaluating +L6: generalist agents for the web that can follow language instructions to complete L7: complex tasks on any ... L8: * 【1†Online-Mind2Web Leaderboard; publish_date: none†huggingface.co】 Online- -L9: Mind2Web is a benchmark designed to evaluate the real-world performance of web +L9: Mind2Web is a benchmark designed to evaluate the real-world performance of web L10: agents on live websites, featuring 300 tasks across 136 popular sites ... -L11: * 【2†Mind2Web: Towards a Generalist Agent for the Web; publish_date: -L12: none†github.com】 Mind2Web is the first dataset for developing and evaluating -L13: generalist agents for the web that can follow language instructions to complete +L11: * 【2†Mind2Web: Towards a Generalist Agent for the Web; publish_date: +L12: none†github.com】 Mind2Web is the first dataset for developing and evaluating +L13: generalist agents for the web that can follow language instructions to complete L14: complex tasks on any ... -L15: * 【3†HAL: Online Mind2Web Leaderboard; publish_date: +L15: * 【3†HAL: Online Mind2Web Leaderboard; publish_date: L16: none†hal.cs.princeton.edu】 Online Mind2Web leaderboard for evaluating AI agents' L17: ability to complete tasks on real, changing webpages. -L18: * 【4†[2506.21506] Mind2Web 2: Evaluating Agentic Search with ...; -L19: publish_date: none†arxiv.org】 by B Gou · 2025 · Cited by 11 — In this paper, we +L18: * 【4†[2506.21506] Mind2Web 2: Evaluating Agentic Search with ...; +L19: publish_date: none†arxiv.org】 by B Gou · 2025 · Cited by 11 — In this paper, we L20: introduce Mind2Web 2, a benchmark of 130 realistic, high-quality, and long- L21: horizon tasks that require real-time web browsing and extensive ... -L22: * 【5†Mind2Web 2: Evaluating Agentic Search with Agent-as-a-Judge; +L22: * 【5†Mind2Web 2: Evaluating Agentic Search with Agent-as-a-Judge; L23: publish_date: none†osu-nlp-group.github.io】 We introduce Mind2Web 2, a benchmark -L24: of 130 realistic, high-quality, long-horizon tasks that require real-time web +L24: of 130 realistic, high-quality, long-horizon tasks that require real-time web L25: browsing and extensive information ... -L26: * 【6†Mind2Web: The Benchmark for AI Agent Evaluation and ...; publish_date: -L27: none†www.enhans.ai】 Sep 26, 2025 — Mind2Web is a globally recognized web-based -L28: AI Agent evaluation benchmark introduced by the NLP group at Ohio State +L26: * 【6†Mind2Web: The Benchmark for AI Agent Evaluation and ...; publish_date: +L27: none†www.enhans.ai】 Sep 26, 2025 — Mind2Web is a globally recognized web-based +L28: AI Agent evaluation benchmark introduced by the NLP group at Ohio State L29: University at NeurIPS 2023. -L30: * 【7†Evaluating AI Web Agents: Insights from the WebCanvas ...; publish_date: -L31: none†medium.com】 Thanks to the comprehensive WebCanvas Benchmark, which +L30: * 【7†Evaluating AI Web Agents: Insights from the WebCanvas ...; publish_date: +L31: none†medium.com】 Thanks to the comprehensive WebCanvas Benchmark, which L32: incorporates a robust Mind2Web-Live data set of 542 live web tasks and 2,439 ... -L33: * 【8†Mind2Web: Towards a Generalist Agent for the Web; publish_date: -L34: none†proceedings.neurips.cc】 by X Deng · 2023 · Cited by 760 — We introduce -L35: Mind2Web, the first dataset for developing and evaluating generalist agents for +L33: * 【8†Mind2Web: Towards a Generalist Agent for the Web; publish_date: +L34: none†proceedings.neurips.cc】 by X Deng · 2023 · Cited by 760 — We introduce +L35: Mind2Web, the first dataset for developing and evaluating generalist agents for L36: the web that can follow language instructions to complete complex ... -L37: * 【9†Mind2Web: Towards a Generalist Agent for the Web; publish_date: +L37: * 【9†Mind2Web: Towards a Generalist Agent for the Web; publish_date: L38: none†openreview.net】 by X Deng · Cited by 760 — We introduce Mind2Web, the first -L39: dataset for developing and evaluating generalist agents for the web that can +L39: dataset for developing and evaluating generalist agents for the web that can L40: follow language instructions to complete complex ... [20] VisualWebArena benchmark (Search_Results/VisualWebArena benchmark) **viewing lines [0 - 38] of 38** -L0: +L0: L1: URL: Search_Results/VisualWebArena benchmark L2: # Search Results -L3: -L4: * 【0†VisualWebArena is a benchmark for multimodal agents.; publish_date: -L5: none†github.com】 VisualWebArena is a realistic and diverse benchmark for +L3: +L4: * 【0†VisualWebArena is a benchmark for multimodal agents.; publish_date: +L5: none†github.com】 VisualWebArena is a realistic and diverse benchmark for L6: evaluating multimodal autonomous language agents. L7: * 【1†Evaluating Multimodal Agents on Realistic Visual Web Tasks; publish_date: -L8: none†arxiv.org】 by JY Koh · 2024 · Cited by 363 — To bridge this gap, we -L9: introduce VisualWebArena, a benchmark designed to assess the performance of +L8: none†arxiv.org】 by JY Koh · 2024 · Cited by 363 — To bridge this gap, we +L9: introduce VisualWebArena, a benchmark designed to assess the performance of L10: multimodal web agents on realistic \textit{ ... L11: * 【2†Evaluating Multimodal Agents on Realistic Visual Web Tasks; publish_date: -L12: none†jykoh.com】 To bridge this gap, we introduce VisualWebArena, a benchmark -L13: designed to assess the performance of multimodal web agents on realistic +L12: none†jykoh.com】 To bridge this gap, we introduce VisualWebArena, a benchmark +L13: designed to assess the performance of multimodal web agents on realistic L14: visually grounded tasks. -L15: * 【3†VisualWebArena: Evaluating Multimodal Agents on ...; publish_date: -L16: none†arxiv.org】 VisualWebArena is a research benchmark to measure and evaluate +L15: * 【3†VisualWebArena: Evaluating Multimodal Agents on ...; publish_date: +L16: none†arxiv.org】 VisualWebArena is a research benchmark to measure and evaluate L17: the progress of multimodal agents. It is primarily meant to act as a self- L18: contained sandbox ... L19: * 【4†Evaluating Multimodal Agents on Realistic Visual Web Tasks; publish_date: L20: none†aclanthology.org】 by JY Koh · 2024 · Cited by 363 — To bridge this gap, we -L21: introduce VisualWebArena, a benchmark designed to assess the performance of +L21: introduce VisualWebArena, a benchmark designed to assess the performance of L22: multimodal web agents on *realistic visually grounded tasks*. L23: * 【5†Evaluating Multimodal Agents on Realistic Visual Web Tasks; publish_date: -L24: none†www.semanticscholar.org】 VisualWebArena: Evaluating Multimodal Agents on -L25: Realistic Visual Web Tasks ... MMInA, a multihop and multimodal benchmark to +L24: none†www.semanticscholar.org】 VisualWebArena: Evaluating Multimodal Agents on +L25: Realistic Visual Web Tasks ... MMInA, a multihop and multimodal benchmark to L26: evaluate the embodied agents ... -L27: * 【6†CMU Researchers Introduce VisualWebArena: An AI ...; publish_date: -L28: none†www.marktechpost.com】 Feb 9, 2024 — VisualWebArena, a benchmark designed -L29: and developed to evaluate the performance of multimodal web agents on realistic +L27: * 【6†CMU Researchers Introduce VisualWebArena: An AI ...; publish_date: +L28: none†www.marktechpost.com】 Feb 9, 2024 — VisualWebArena, a benchmark designed +L29: and developed to evaluate the performance of multimodal web agents on realistic L30: and visually stimulating challenges. L31: * 【7†Evaluating Multimodal Agents on Realistic Visual Web Tasks; publish_date: -L32: none†www.themoonlight.io】 The paper "VisualWebArena: Evaluating Multimodal -L33: Agents on Realistic Visually Grounded Web Tasks" introduces a new benchmark, +L32: none†www.themoonlight.io】 The paper "VisualWebArena: Evaluating Multimodal +L33: Agents on Realistic Visually Grounded Web Tasks" introduces a new benchmark, L34: **VisualWebArena**, ... -L35: * 【8†WebArena: A Realistic Web Environment for Building ...; publish_date: -L36: none†webarena.dev】 Our benchmark is implemented in our fully interactable -L37: highly-realistic WebArena environment. It features diverse tasks human may +L35: * 【8†WebArena: A Realistic Web Environment for Building ...; publish_date: +L36: none†webarena.dev】 Our benchmark is implemented in our fully interactable +L37: highly-realistic WebArena environment. It features diverse tasks human may L38: encounter in their daily ... [21] SearchBench benchmark (Search_Results/SearchBench benchmark) **viewing lines [0 - 40] of 40** -L0: +L0: L1: URL: Search_Results/SearchBench benchmark L2: # Search Results -L3: -L4: * 【0†Talc-AI/search-bench; publish_date: none†github.com】 A practical -L5: benchmark that focuses on every day helpfulness of LLM products, not just the +L3: +L4: * 【0†Talc-AI/search-bench; publish_date: none†github.com】 A practical +L5: benchmark that focuses on every day helpfulness of LLM products, not just the L6: underlying models. Searchbench is a benchmark that addresses these ... -L7: * 【1†Evaluating LLMs' Ability to Reason About Search Problems; publish_date: -L8: none†arxiv.org】 These capabilities are essential for robust reasoning, making -L9: SearchBench a valuable benchmark for evaluating LLMs' reasoning capabilities as +L7: * 【1†Evaluating LLMs' Ability to Reason About Search Problems; publish_date: +L8: none†arxiv.org】 These capabilities are essential for robust reasoning, making +L9: SearchBench a valuable benchmark for evaluating LLMs' reasoning capabilities as L10: they continue to ... -L11: * 【2†NasimBrz/SearchBench · Datasets at Hugging Face; publish_date: -L12: none†huggingface.co】 Dataset Summary. SearchBench is a benchmark designed to -L13: evaluate Language Models' (LLMs) ability to solve state-based problems that +L11: * 【2†NasimBrz/SearchBench · Datasets at Hugging Face; publish_date: +L12: none†huggingface.co】 Dataset Summary. SearchBench is a benchmark designed to +L13: evaluate Language Models' (LLMs) ability to solve state-based problems that L14: require combinatorial search ... -L15: * 【3†Evaluating LLMs' Ability to Reason About Search Problems; publish_date: -L16: none†openreview.net】 2025年10月22日 — To further investigate this, we introduce a +L15: * 【3†Evaluating LLMs' Ability to Reason About Search Problems; publish_date: +L16: none†openreview.net】 2025年10月22日 — To further investigate this, we introduce a L17: new benchmark, SearchBench, which contains 11 unique search problems inspired by L18: intuitive puzzles. -L19: * 【4†Navigating the Labyrinth: Evaluating and Enhancing LLMs' ...; -L20: publish_date: none†hub.baai.ac.cn】 2024年6月17日 — -L21: 论文提出了一个新的基准测试SearchBench,包含11种独特的搜索问题类型,并自动化生成任意数量的实例和分析解决方案的可行性、正确性和最优性。论文使用A* +L19: * 【4†Navigating the Labyrinth: Evaluating and Enhancing LLMs' ...; +L20: publish_date: none†hub.baai.ac.cn】 2024年6月17日 — +L21: 论文提出了一个新的基准测试SearchBench,包含11种独特的搜索问题类型,并自动化生成任意数量的实例和分析解决方案的可行性、正确性和最优性。论文使用A* L22: ... -L23: * 【5†Towards Unified Text-based Person Retrieval: A Large- ...; publish_date: -L24: none†blog.csdn.net】 2023年10月17日 — ... Search +L23: * 【5†Towards Unified Text-based Person Retrieval: A Large- ...; publish_date: +L24: none†blog.csdn.net】 2023年10月17日 — ... Search L25: Benchmark(面向统一的基于文本的人物检索:一个大规模的多属性和语言搜索基准); 研究背景. 相关工作; BENCHMARK. 论文方法分析. 网络框架; L26: 1、APTM ... -L27: * 【6†Desearch-ai/ai-search-benchmark; publish_date: none†github.com】 The -L28: SearchBench repository addresses common issues with traditional benchmarks by +L27: * 【6†Desearch-ai/ai-search-benchmark; publish_date: none†github.com】 The +L28: SearchBench repository addresses common issues with traditional benchmarks by L29: focusing on practical, everyday use cases rather than theoretical limits. It ... -L30: * 【7†o1 results for 3 benchmarks: PlanBench, SearchBench, ...; publish_date: -L31: none†www.reddit.com】 o1 results for 3 benchmarks: PlanBench, SearchBench, and -L32: Summary of a Haystack. AI. PlanBench: Paper "LLMs Still Can't Plan; Can LRMs? A +L30: * 【7†o1 results for 3 benchmarks: PlanBench, SearchBench, ...; publish_date: +L31: none†www.reddit.com】 o1 results for 3 benchmarks: PlanBench, SearchBench, and +L32: Summary of a Haystack. AI. PlanBench: Paper "LLMs Still Can't Plan; Can LRMs? A L33: ... -L34: * 【8†Evaluating LLMs' Ability to Reason About Search Problems; publish_date: -L35: none†ui.adsabs.harvard.edu】 To further investigate this, we introduce a new -L36: benchmark, SearchBench, which contains 11 unique search problems inspired by +L34: * 【8†Evaluating LLMs' Ability to Reason About Search Problems; publish_date: +L35: none†ui.adsabs.harvard.edu】 To further investigate this, we introduce a new +L36: benchmark, SearchBench, which contains 11 unique search problems inspired by L37: intuitive puzzles. Each SearchBench ... -L38: * 【9†Introducing SearchBench; publish_date: none†www.tag1consulting.com】 -L39: Toward this goal, over the weekend I launched a new project called SearchBench, +L38: * 【9†Introducing SearchBench; publish_date: none†www.tag1consulting.com】 +L39: Toward this goal, over the weekend I launched a new project called SearchBench, L40: a Drupal module for benchmarking Drupal's search performance. As the module ... [22] WebVLN benchmark (Search_Results/WebVLN benchmark) **viewing lines [0 - 42] of 42** -L0: +L0: L1: URL: Search_Results/WebVLN benchmark L2: # Search Results -L3: -L4: * 【0†WebVLN: Vision-and-Language Navigation on Websites; publish_date: +L3: +L4: * 【0†WebVLN: Vision-and-Language Navigation on Websites; publish_date: L5: none†ojs.aaai.org】 by Q Chen · 2024 · Cited by 35 — the WebVLN-v1 dataset, where -L6: the performance is far from saturation, highlighting the utility of our +L6: the performance is far from saturation, highlighting the utility of our L7: WebVLN-v1 as a benchmark to assess progress in this field. -L8: * 【1†[2312.15820] WebVLN: Vision-and-Language Navigation on Websites; +L8: * 【1†[2312.15820] WebVLN: Vision-and-Language Navigation on Websites; L9: publish_date: none†ar5iv.labs.arxiv.org】 Experimental results show that WebVLN- -L10: Net outperforms current VLN and web-related navigation methods. ... Code is +L10: Net outperforms current VLN and web-related navigation methods. ... Code is L11: available at: https://github.com/WebVLN/WebVLN. -L12: * 【2†WebVLN: Vision-and-Language Navigation on Websites; publish_date: -L13: none†github.com】 Experimental results show that WebVLN-Net outperforms current -L14: VLN and web-related navigation methods. We believe that the introduction of the +L12: * 【2†WebVLN: Vision-and-Language Navigation on Websites; publish_date: +L13: none†github.com】 Experimental results show that WebVLN-Net outperforms current +L14: VLN and web-related navigation methods. We believe that the introduction of the L15: new WebVLN task ... -L16: * 【3†Vision-and-Language Navigation in the Real-World; publish_date: -L17: none†digital.library.adelaide.edu.au】 By leveraging our proposed WebVLN-v1 -L18: dataset, experimental results showcase the superior performance of WebVLN-Net +L16: * 【3†Vision-and-Language Navigation in the Real-World; publish_date: +L17: none†digital.library.adelaide.edu.au】 By leveraging our proposed WebVLN-v1 +L18: dataset, experimental results showcase the superior performance of WebVLN-Net L19: compared to existing VLN and web-related ... -L20: * 【4†WebVLN: Vision-and-Language Navigation on Websites; publish_date: +L20: * 【4†WebVLN: Vision-and-Language Navigation on Websites; publish_date: L21: none†www.researchgate.net】 Experimental results show that WebVLN-Net outperforms -L22: current VLN and web-related navigation methods. We believe that the +L22: current VLN and web-related navigation methods. We believe that the L23: introduction of the newWebVLN task and ... -L24: * 【5†[PDF] WebVLN: Vision-and-Language Navigation on Websites; publish_date: +L24: * 【5†[PDF] WebVLN: Vision-and-Language Navigation on Websites; publish_date: L25: none†www.semanticscholar.org】 A new task named Vision-and-Language Navigation on -L26: Websites (WebVLN), where question-based instructions are used to train an +L26: Websites (WebVLN), where question-based instructions are used to train an L27: agent, emulating how users ... -L28: * 【6†WebVLN: Vision-and-Language Navigation on Websites; publish_date: -L29: none†arxiv.org】 by Q Chen · 2023 · Cited by 35 — Experimental results show that -L30: WebVLN-Net outperforms current VLN and web-related navigation methods. We +L28: * 【6†WebVLN: Vision-and-Language Navigation on Websites; publish_date: +L29: none†arxiv.org】 by Q Chen · 2023 · Cited by 35 — Experimental results show that +L30: WebVLN-Net outperforms current VLN and web-related navigation methods. We L31: believe that the introduction of the ... -L32: * 【7†Human-Aware Vision-and-Language Navigation; publish_date: -L33: none†proceedings.neurips.cc】 by H Li · 2024 · Cited by 19 — Vision-and-Language -L34: Navigation (VLN) [2, 7, 9, 40] has emerged as a key benchmark for evaluating. +L32: * 【7†Human-Aware Vision-and-Language Navigation; publish_date: +L33: none†proceedings.neurips.cc】 by H Li · 2024 · Cited by 19 — Vision-and-Language +L34: Navigation (VLN) [2, 7, 9, 40] has emerged as a key benchmark for evaluating. L35: Sim2Real transfer [23], showing impressive performance in ... -L36: * 【8†LiveBench; publish_date: none†livebench.ai】 Introducing LiveBench: a +L36: * 【8†LiveBench; publish_date: none†livebench.ai】 Introducing LiveBench: a L37: benchmark for LLMs designed with test set contamination and objective evaluation L38: in mind. -L39: * 【9†MG-VLN: Benchmarking Multi-Goal and Long-Horizon ...; publish_date: -L40: none†ieeexplore.ieee.org】 by J Zhang · 2024 — This task aims to provide a +L39: * 【9†MG-VLN: Benchmarking Multi-Goal and Long-Horizon ...; publish_date: +L40: none†ieeexplore.ieee.org】 by J Zhang · 2024 — This task aims to provide a L41: simulation benchmark to guide the design of lifelong and long-horizon navigation L42: robots. [23] WebNav benchmark (Search_Results/WebNav benchmark) **viewing lines [0 - 36] of 36** -L0: +L0: L1: URL: Search_Results/WebNav benchmark L2: # Search Results -L3: -L4: * 【0†WebNav: A New Large-Scale Task for Natural Language ...; publish_date: -L5: none†github.com】 WebNav is a benchmark task for evaluating an agent with -L6: abilities to understand natural language and plan on partially observed +L3: +L4: * 【0†WebNav: A New Large-Scale Task for Natural Language ...; publish_date: +L5: none†github.com】 WebNav is a benchmark task for evaluating an agent with +L6: abilities to understand natural language and plan on partially observed L7: environments. -L8: * 【1†[1602.02261] End-to-End Goal-Driven Web Navigation; publish_date: -L9: none†arxiv.org】 by R Nogueira · 2016 · Cited by 39 — We propose a goal-driven -L10: web navigation as a benchmark task for evaluating an agent with abilities to +L8: * 【1†[1602.02261] End-to-End Goal-Driven Web Navigation; publish_date: +L9: none†arxiv.org】 by R Nogueira · 2016 · Cited by 39 — We propose a goal-driven +L10: web navigation as a benchmark task for evaluating an agent with abilities to L11: understand natural language and plan on partially ... L12: * 【2†nyu-dl/dl4ir-webnav; publish_date: none†github.com】 WebNav is a benchmark -L13: task for evaluating an agent with abilities to understand natural language and +L13: task for evaluating an agent with abilities to understand natural language and L14: plan on partially observed environments. -L15: * 【3†WebNav: A New Large-Scale Task for Natural Language ...; publish_date: -L16: none†www.researchgate.net】 We propose a goal-driven web navigation as a -L17: benchmark task for evaluating an agent with abilities to understand natural +L15: * 【3†WebNav: A New Large-Scale Task for Natural Language ...; publish_date: +L16: none†www.researchgate.net】 We propose a goal-driven web navigation as a +L17: benchmark task for evaluating an agent with abilities to understand natural L18: language and plan on partially ... -L19: * 【4†WebNav: An Intelligent Agent for Voice-Controlled Web ...; publish_date: -L20: none†arxiv.org】 In this paper, we introduce WebNav, an innovative multi-modal +L19: * 【4†WebNav: An Intelligent Agent for Voice-Controlled Web ...; publish_date: +L20: none†arxiv.org】 In this paper, we introduce WebNav, an innovative multi-modal L21: agent designed to automate web tasks based on natural language. -L22: * 【5†WebCanvas: Benchmarking Web Agents in Online ...; publish_date: -L23: none†openreview.net】 by Y Pan · Cited by 78 — TL;DR: We introduce WebCanvas, an -L24: online evaluation framework for web agents designed to address the dynamic +L22: * 【5†WebCanvas: Benchmarking Web Agents in Online ...; publish_date: +L23: none†openreview.net】 by Y Pan · Cited by 78 — TL;DR: We introduce WebCanvas, an +L24: online evaluation framework for web agents designed to address the dynamic L25: nature of web interactions. -L26: * 【6†WebNav: An Intelligent Agent for Voice-Controlled Web ...; publish_date: -L27: none†huggingface.co】 Mar 17, 2025 — Preliminary evaluations show that WebNav -L28: outperforms traditional screen readers in response time and task completion +L26: * 【6†WebNav: An Intelligent Agent for Voice-Controlled Web ...; publish_date: +L27: none†huggingface.co】 Mar 17, 2025 — Preliminary evaluations show that WebNav +L28: outperforms traditional screen readers in response time and task completion L29: accuracy for the visually ... -L30: * 【7†Web Bench - A new way to compare AI Browser Agents; publish_date: -L31: none†www.skyvern.com】 May 29, 2025 — TL;DR: Web Bench is a new dataset to -L32: evaluate web browsing agents that consists of 5,750 tasks on 452 different +L30: * 【7†Web Bench - A new way to compare AI Browser Agents; publish_date: +L31: none†www.skyvern.com】 May 29, 2025 — TL;DR: Web Bench is a new dataset to +L32: evaluate web browsing agents that consists of 5,750 tasks on 452 different L33: websites, with 2,454 tasks ... -L34: * 【8†Web Framework Benchmarks; publish_date: none†www.techempower.com】 In the -L35: following tests, we have measured the performance of several web application +L34: * 【8†Web Framework Benchmarks; publish_date: none†www.techempower.com】 In the +L35: following tests, we have measured the performance of several web application L36: platforms, full-stack frameworks, and micro-frameworks. @@ -1509,13 +1509,13 @@ L36: platforms, full-stack frameworks, and micro-frameworks. [24] (https://ar5iv.org/abs/2401.13919) **viewing lines [0 - 129] of 825** -L0: +L0: L1: URL: https://ar5iv.org/abs/2401.13919 L2: # WebVoyager : Building an End-to-End Web Agent with L3: Large Multimodal Models -L4: +L4: L5: Hongliang He1,3 , Wenlin Yao2, Kaixin Ma2, Wenhao Yu2, Yong Dai2, -L6: +L6: L7: Hongming Zhang2, L8: Zhenzhong Lan3, L9: Dong Yu2 @@ -1524,562 +1524,562 @@ L11: 2Tencent AI Lab, L12: 3Westlake University L13: hehongliang@westlake.edu.cn, wenlinyao@global.tencent.com L14: Work done during the internship at Tencent AI Lab. -L15: +L15: L16: ###### Abstract -L17: +L17: L18: The advancement of large language models (LLMs) leads to a new era marked by the -L19: development of autonomous applications in the real world, which drives -L20: innovation in the creation of advanced web-based agents. Existing web agents -L21: typically only handle one input modality and are evaluated only in simplified -L22: web simulators or static web snapshots, greatly limiting their applicability in +L19: development of autonomous applications in the real world, which drives +L20: innovation in the creation of advanced web-based agents. Existing web agents +L21: typically only handle one input modality and are evaluated only in simplified +L22: web simulators or static web snapshots, greatly limiting their applicability in L23: real-world scenarios. To bridge this gap, we introduce WebVoyager, an innovative -L24: Large Multimodal Model (LMM) powered web agent that can complete user -L25: instructions end-to-end by interacting with real-world websites. Moreover, we -L26: propose a new evaluation protocol for web agents to address the challenges of -L27: automatic evaluation of open-ended web agent tasks, leveraging the robust -L28: multimodal comprehension capabilities of GPT-4V. We create a new benchmark by -L29: gathering real-world tasks from 15 widely used websites to evaluate our agents. -L30: We show that WebVoyager achieves a 55.7% task success rate, significantly +L24: Large Multimodal Model (LMM) powered web agent that can complete user +L25: instructions end-to-end by interacting with real-world websites. Moreover, we +L26: propose a new evaluation protocol for web agents to address the challenges of +L27: automatic evaluation of open-ended web agent tasks, leveraging the robust +L28: multimodal comprehension capabilities of GPT-4V. We create a new benchmark by +L29: gathering real-world tasks from 15 widely used websites to evaluate our agents. +L30: We show that WebVoyager achieves a 55.7% task success rate, significantly L31: surpassing the performance of both GPT-4 (All Tools) and the WebVoyager (text- L32: only) setups, underscoring the exceptional capability of WebVoyager in practical -L33: applications. We found that our proposed automatic evaluation achieves 85.3% -L34: agreement with human judgment, paving the way for further development of web -L35: agents in a real-world setting.111Our code and data will be released at +L33: applications. We found that our proposed automatic evaluation achieves 85.3% +L34: agreement with human judgment, paving the way for further development of web +L35: agents in a real-world setting.111Our code and data will be released at L36: https://github.com/MinorJerry/WebVoyager -L37: +L37: L38: ## 1 Introduction -L39: -L40: The recent advancement of large language models (LLMs), such as ChatGPT and -L41: GPT-4 (OpenAI, 2023), have sparked significant interest in developing LLM-based -L42: autonomous agents (AutoGPT, 2022) for complex task execution (Qin et al., 2023; +L39: +L40: The recent advancement of large language models (LLMs), such as ChatGPT and +L41: GPT-4 (OpenAI, 2023), have sparked significant interest in developing LLM-based +L42: autonomous agents (AutoGPT, 2022) for complex task execution (Qin et al., 2023; L43: Schick et al., 2023). Recent studies have explored the construction of text- -L44: based web browsing environments and how to instruct large language model agents -L45: to perform web navigation (Nakano et al., 2021; Gur et al., 2023; Zhou et al., -L46: 2023; Lu et al., 2023). The primary challenge in these works lies in managing -L47: complex and verbose HTML texts, and solutions include simplifying and +L44: based web browsing environments and how to instruct large language model agents +L45: to perform web navigation (Nakano et al., 2021; Gur et al., 2023; Zhou et al., +L46: 2023; Lu et al., 2023). The primary challenge in these works lies in managing +L47: complex and verbose HTML texts, and solutions include simplifying and L48: structuring HTML (Nakano et al., 2021; Zhou et al., 2023; Gur et al., 2023; Deng L49: et al., 2023). -L50: -L51: However, existing approaches overlook a critical functionality of browsing: -L52: rendering HTML into visual webpages. Particularly, vision capability is crucial -L53: for utilizing tools like web browsers, as rendered web pages are inherently -L54: designed with user experience (UX), emphasizing intuitive information and -L55: structured presentation. This design principle of rendering makes visual -L56: analysis more effective than mere HTML representation. At present, large -L57: multimodal models (LMMs), particularly GPT-4V(ision) (OpenAI, 2023) and Gemini -L58: (Team et al., 2023), demonstrate a remarkable ability to integrate intricate -L59: visual cues with textual information. Existing studies such as Pix2Struct (Lee +L50: +L51: However, existing approaches overlook a critical functionality of browsing: +L52: rendering HTML into visual webpages. Particularly, vision capability is crucial +L53: for utilizing tools like web browsers, as rendered web pages are inherently +L54: designed with user experience (UX), emphasizing intuitive information and +L55: structured presentation. This design principle of rendering makes visual +L56: analysis more effective than mere HTML representation. At present, large +L57: multimodal models (LMMs), particularly GPT-4V(ision) (OpenAI, 2023) and Gemini +L58: (Team et al., 2023), demonstrate a remarkable ability to integrate intricate +L59: visual cues with textual information. Existing studies such as Pix2Struct (Lee L60: et al., 2023) and WebArena (Zhou et al., 2023), have initiated explorations into -L61: using screenshots as inputs for decision-making in web navigation, yet these -L62: are preliminary and do not represent a deep exploration. Therefore, building -L63: multimodal web agents to leverage the environment rendered by browsers through -L64: screenshots, thus mimicking human web browsing behavior, is now a viable +L61: using screenshots as inputs for decision-making in web navigation, yet these +L62: are preliminary and do not represent a deep exploration. Therefore, building +L63: multimodal web agents to leverage the environment rendered by browsers through +L64: screenshots, thus mimicking human web browsing behavior, is now a viable L65: approach to enhance web navigation efficiency. -L66: -L67: We introduce WebVoyager, a multimodal web agent designed to handle web tasks +L66: +L67: We introduce WebVoyager, a multimodal web agent designed to handle web tasks L68: online in an end-to-end manner, which denotes managing the process from start to -L69: finish autonomously without intermediate human intervention. We construct an -L70: online environment using Selenium for WebVoyager, feeding it with screenshots -L71: and textual content in interactive web elements. Inspired by Set-of-Mark -L72: Prompting (Yang et al., 2023a), we mark interactive web elements on screenshots -L73: (see Figure 2) to facilitate decision-making for WebVoyager. As a pioneer in -L74: combining vision and text information during web navigation, we advocate that -L75: autonomous end-to-end task completion, multimodal capabilities and online -L76: navigation constitute the essential trajectory toward the genuine intelligence +L69: finish autonomously without intermediate human intervention. We construct an +L70: online environment using Selenium for WebVoyager, feeding it with screenshots +L71: and textual content in interactive web elements. Inspired by Set-of-Mark +L72: Prompting (Yang et al., 2023a), we mark interactive web elements on screenshots +L73: (see Figure 2) to facilitate decision-making for WebVoyager. As a pioneer in +L74: combining vision and text information during web navigation, we advocate that +L75: autonomous end-to-end task completion, multimodal capabilities and online +L76: navigation constitute the essential trajectory toward the genuine intelligence L77: of web agents. -L78: -L79: Another challenge arises when it comes to evaluating an end-to-end web agent -L80: with online navigation. Existing benchmarks, such as Mind2Web (Deng et al., -L81: 2023), primarily focus on stepwise and offline evaluation, where agents follow +L78: +L79: Another challenge arises when it comes to evaluating an end-to-end web agent +L80: with online navigation. Existing benchmarks, such as Mind2Web (Deng et al., +L81: 2023), primarily focus on stepwise and offline evaluation, where agents follow L82: predefined “golden” trajectory for action selection. This approach, however, may L83: not fully account for the variety of viable strategies to accomplish a task, as -L84: it only reflects one possible plan. This limitation could lead to a biased -L85: evaluation and difficulties in fairly comparing different methods. To more -L86: accurately gauge the capabilities of web agents in end-to-end task completion, -L87: we save screenshots throughout the online navigation process, and then use +L84: it only reflects one possible plan. This limitation could lead to a biased +L85: evaluation and difficulties in fairly comparing different methods. To more +L86: accurately gauge the capabilities of web agents in end-to-end task completion, +L87: we save screenshots throughout the online navigation process, and then use L88: GPT-4V to evaluate these trajectories and the final results automatically. Human -L89: evaluations are also conducted to verify the results and confirm the +L89: evaluations are also conducted to verify the results and confirm the L90: reliability of GPT-4V as the evaluator. -L91: -L92: We conduct evaluations on a collected dataset, which is semi-automatically -L93: generated using a self-instruct (Wang et al., 2022) method, comprising 300 web +L91: +L92: We conduct evaluations on a collected dataset, which is semi-automatically +L93: generated using a self-instruct (Wang et al., 2022) method, comprising 300 web L94: tasks from 15 commonly accessed websites. Additionally, we extract 90 web- -L95: related tasks of level 1 and level 2 from the GAIA (Mialon et al., 2023) to -L96: enrich our evaluation. We compare our WebVoyager with 1) GPT-4 (All -L97: Tools)222GPT-4 (All Tools) is an integrated tool-based agent released by OpenAI -L98: in Oct. 2023. See https://chat.openai.com/, and 2) WebVoyager in a text-only -L99: setting, employing the accessibility tree proposed in WebArena (Zhou et al., -L100: 2023) to describe web pages. The results show that WebVoyager achieves a Task +L95: related tasks of level 1 and level 2 from the GAIA (Mialon et al., 2023) to +L96: enrich our evaluation. We compare our WebVoyager with 1) GPT-4 (All +L97: Tools)222GPT-4 (All Tools) is an integrated tool-based agent released by OpenAI +L98: in Oct. 2023. See https://chat.openai.com/, and 2) WebVoyager in a text-only +L99: setting, employing the accessibility tree proposed in WebArena (Zhou et al., +L100: 2023) to describe web pages. The results show that WebVoyager achieves a Task L101: Success Rate of 55.7%, significantly outperforming GPT-4 (All Tools) with a rate -L102: of 32.7% and the text-only setting with a rate of 39.0%, demonstrating the -L103: effectiveness of our method. Furthermore, we report the consistency between +L102: of 32.7% and the text-only setting with a rate of 39.0%, demonstrating the +L103: effectiveness of our method. Furthermore, we report the consistency between L104: human-human and human-GPT4V to ensure credibility. Our main contributions are as L105: follows: -L106: +L106: L107: - • -L108: We employ a multimodal web agent that integrates textual and visual information +L108: We employ a multimodal web agent that integrates textual and visual information L109: to address web tasks end-to-end and introduce a generalist planning approach for L110: navigation. -L111: +L111: L112: - • -L113: We build an online web browsing environment, offering a variety of tasks -L114: centered on widely used websites and introducing a method for expanding these +L113: We build an online web browsing environment, offering a variety of tasks +L114: centered on widely used websites and introducing a method for expanding these L115: tasks. -L116: +L116: L117: - • -L118: We conduct manual evaluations of navigation trajectories and propose an -L119: automated evaluation protocol using GPT-4V. We present a comprehensive analysis +L118: We conduct manual evaluations of navigation trajectories and propose an +L119: automated evaluation protocol using GPT-4V. We present a comprehensive analysis L120: of the evaluation results and show that GPT-4V can serve as a reliable evaluator L121: for online agents. -L122: +L122: L123: ## 2 Related Work -L124: +L124: L125: ### 2.1 Web Agents -L126: -L127: Autonomous web navigation (Shi et al., 2017; Yang et al., 2023b) requires an -L128: agent to follow instructions, construct plans, comprehend complex web +L126: +L127: Autonomous web navigation (Shi et al., 2017; Yang et al., 2023b) requires an +L128: agent to follow instructions, construct plans, comprehend complex web L129: structures, and decompose tasks into step-by-step decisions (Weng, 2023). To [25] (https://ar5iv.org/abs/2307.13854) **viewing lines [0 - 124] of 1092** -L0: +L0: L1: URL: https://ar5iv.org/abs/2307.13854 L2: # WebArena: A Realistic Web Environment for Building Autonomous Agents -L3: +L3: L4: Shuyan Zhou Frank F. Xu11footnotemark: 1 Hao Zhu Xuhui Zhou22footnotemark: 2 -L5: +L5: L6: Robert Lo22footnotemark: 2 Abishek Sridhar22footnotemark: 2 Xianyi Cheng Tianyue L7: Ou L8: Yonatan Bisk Daniel Fried Uri Alon Graham Neubig L9: Carnegie Mellon University L10: {shuyanzh, fangzhex, gneubig}@cs.cmu.edu L11: Lead contributors.Equal contribution. -L12: +L12: L13: ###### Abstract -L14: -L15: With advances in generative AI, there is now potential for autonomous agents to -L16: manage daily tasks via natural language commands. However, current agents are -L17: primarily created and tested in simplified synthetic environments, leading to a +L14: +L15: With advances in generative AI, there is now potential for autonomous agents to +L16: manage daily tasks via natural language commands. However, current agents are +L17: primarily created and tested in simplified synthetic environments, leading to a L18: disconnect with real-world scenarios. In this paper, we build an environment for L19: language-guided agents that is highly realistic and reproducible. Specifically, -L20: we focus on agents that perform tasks on the web, and create an environment -L21: with fully functional websites from four common domains: e-commerce, social -L22: forum discussions, collaborative software development, and content management. -L23: Our environment is enriched with tools (e.g., a map) and external knowledge -L24: bases (e.g., user manuals) to encourage human-like task-solving. Building upon -L25: our environment, we release a set of benchmark tasks focusing on evaluating the -L26: functional correctness of task completions. The tasks in our benchmark are -L27: diverse, long-horizon, and designed to emulate tasks that humans routinely +L20: we focus on agents that perform tasks on the web, and create an environment +L21: with fully functional websites from four common domains: e-commerce, social +L22: forum discussions, collaborative software development, and content management. +L23: Our environment is enriched with tools (e.g., a map) and external knowledge +L24: bases (e.g., user manuals) to encourage human-like task-solving. Building upon +L25: our environment, we release a set of benchmark tasks focusing on evaluating the +L26: functional correctness of task completions. The tasks in our benchmark are +L27: diverse, long-horizon, and designed to emulate tasks that humans routinely L28: perform on the internet. We experiment with several baseline agents, integrating L29: recent techniques such as reasoning before acting. The results demonstrate that -L30: solving complex tasks is challenging: our best GPT-4-based agent only achieves -L31: an end-to-end task success rate of 14.41%, significantly lower than the human -L32: performance of 78.24%. These results highlight the need for further development -L33: of robust agents, that current state-of-the-art large language models are far +L30: solving complex tasks is challenging: our best GPT-4-based agent only achieves +L31: an end-to-end task success rate of 14.41%, significantly lower than the human +L32: performance of 78.24%. These results highlight the need for further development +L33: of robust agents, that current state-of-the-art large language models are far L34: from perfect performance in these real-life tasks, and that WebArena can be used L35: to measure such progress. -L36: +L36: L37: Our code, data, environment reproduction resources, and video demonstrations are L38: publicly available at https://webarena.dev/. -L39: +L39: L40: ## 1 Introduction -L41: -L42: Autonomous agents that perform everyday tasks via human natural language +L41: +L42: Autonomous agents that perform everyday tasks via human natural language L43: commands could significantly augment human capabilities, improve efficiency, and -L44: increase accessibility. Nonetheless, to fully leverage the power of autonomous +L44: increase accessibility. Nonetheless, to fully leverage the power of autonomous L45: agents, it is crucial to understand their behavior within an environment that is -L46: both authentic and reproducible. This will allow measurement of the ability of +L46: both authentic and reproducible. This will allow measurement of the ability of L47: agents on tasks that human users care about in a fair and consistent manner. -L48: -L49: Current environments for evaluate agents tend to over-simplify real-world -L50: situations. As a result, the functionality of many environments is a limited -L51: version of their real-world counterparts, leading to a lack of task diversity -L52: (Shi et al., 2017; Anderson et al., 2018; Gordon et al., 2018; Misra et al., -L53: 2016; Shridhar et al., 2020; 2021; Yao et al., 2022a). In addition, these -L54: simplifications often lower the complexity of tasks as compared to their -L55: execution in the real world (Puig et al., 2018; Shridhar et al., 2020; Yao et -L56: al., 2022a). Finally, some environments are presented as a static resource (Shi -L57: et al., 2017; Deng et al., 2023) where agents are confined to accessing only -L58: those states that were previously cached during data collection, thus limiting -L59: the breadth and diversity of exploration. Dor evaluation, many environments -L60: focus on comparing the textual surface form of the predicted action sequences -L61: with reference action sequences, disregarding the functional correctness of the -L62: executions and possible alternative solutions (Puig et al., 2018; Jernite et -L63: al., 2019; Xu et al., 2021; Li et al., 2020; Deng et al., 2023). These +L48: +L49: Current environments for evaluate agents tend to over-simplify real-world +L50: situations. As a result, the functionality of many environments is a limited +L51: version of their real-world counterparts, leading to a lack of task diversity +L52: (Shi et al., 2017; Anderson et al., 2018; Gordon et al., 2018; Misra et al., +L53: 2016; Shridhar et al., 2020; 2021; Yao et al., 2022a). In addition, these +L54: simplifications often lower the complexity of tasks as compared to their +L55: execution in the real world (Puig et al., 2018; Shridhar et al., 2020; Yao et +L56: al., 2022a). Finally, some environments are presented as a static resource (Shi +L57: et al., 2017; Deng et al., 2023) where agents are confined to accessing only +L58: those states that were previously cached during data collection, thus limiting +L59: the breadth and diversity of exploration. Dor evaluation, many environments +L60: focus on comparing the textual surface form of the predicted action sequences +L61: with reference action sequences, disregarding the functional correctness of the +L62: executions and possible alternative solutions (Puig et al., 2018; Jernite et +L63: al., 2019; Xu et al., 2021; Li et al., 2020; Deng et al., 2023). These L64: limitations often result in a discrepancy between simulated environments and the -L65: real world, and can potentially impact the generalizability of AI agents to -L66: successfully understand, adapt, and operate within complex real-world +L65: real world, and can potentially impact the generalizability of AI agents to +L66: successfully understand, adapt, and operate within complex real-world L67: situations. -L68: -L69: We introduce WebArena, a realistic and reproducible web environment designed to +L68: +L69: We introduce WebArena, a realistic and reproducible web environment designed to L70: facilitate the development of autonomous agents capable of executing tasks (§2). -L71: An overview of WebArena is in Figure 1. Our environment comprises four fully -L72: operational, self-hosted web applications, each representing a distinct domain -L73: prevalent on the internet: online shopping, discussion forums, collaborative +L71: An overview of WebArena is in Figure 1. Our environment comprises four fully +L72: operational, self-hosted web applications, each representing a distinct domain +L73: prevalent on the internet: online shopping, discussion forums, collaborative L74: development, and business content management. Furthermore, WebArena incorporates L75: several utility tools, such as map, calculator, and scratchpad, to best support -L76: possible human-like task executions. Lastly, WebArena is complemented by an +L76: possible human-like task executions. Lastly, WebArena is complemented by an L77: extensive collection of documentation and knowledge bases that vary from general -L78: resources like English Wikipedia to more domain-specific references, such as -L79: manuals for using the integrated development tool (Fan et al., 2022). The -L80: content populating these websites is extracted from their real-world -L81: counterparts, preserving the authenticity of the content served on each -L82: platform. We deliver the hosting services using Docker containers with gym-APIs -L83: (Brockman et al., 2016), ensuring both the usability and the reproducibility of +L78: resources like English Wikipedia to more domain-specific references, such as +L79: manuals for using the integrated development tool (Fan et al., 2022). The +L80: content populating these websites is extracted from their real-world +L81: counterparts, preserving the authenticity of the content served on each +L82: platform. We deliver the hosting services using Docker containers with gym-APIs +L83: (Brockman et al., 2016), ensuring both the usability and the reproducibility of L84: WebArena. -L85: -L86: Along with WebArena, we release a ready-to-use benchmark with 812 long-horizon -L87: web-based tasks (§3). Each task is described as a high-level natural language -L88: intent, emulating the abstract language usage patterns typically employed by -L89: humans (Bisk et al., 2019). Two example intents are shown in the upper left of -L90: Figure 1. We focus on evaluating the functional correctness of these tasks, +L85: +L86: Along with WebArena, we release a ready-to-use benchmark with 812 long-horizon +L87: web-based tasks (§3). Each task is described as a high-level natural language +L88: intent, emulating the abstract language usage patterns typically employed by +L89: humans (Bisk et al., 2019). Two example intents are shown in the upper left of +L90: Figure 1. We focus on evaluating the functional correctness of these tasks, L91: i.e., does the result of the execution actually achieve the desired goal (§3.2). -L92: For instance, to evaluate the example in Figure 2, our evaluation method -L93: verifies the concrete contents in the designated repository. This evaluation is -L94: not only more reliable (Zhong et al., 2017; Chen et al., 2021; Wang et al., -L95: 2022) than comparing the textual surface-form action sequences (Puig et al., -L96: 2018; Deng et al., 2023) but also accommodate a range of potential valid paths -L97: to achieve the same goal, which is a ubiquitous phenomenon in sufficiently +L92: For instance, to evaluate the example in Figure 2, our evaluation method +L93: verifies the concrete contents in the designated repository. This evaluation is +L94: not only more reliable (Zhong et al., 2017; Chen et al., 2021; Wang et al., +L95: 2022) than comparing the textual surface-form action sequences (Puig et al., +L96: 2018; Deng et al., 2023) but also accommodate a range of potential valid paths +L97: to achieve the same goal, which is a ubiquitous phenomenon in sufficiently L98: complex tasks. -L99: -L100: We use this benchmark to evaluate several agents that can follow NL command and +L99: +L100: We use this benchmark to evaluate several agents that can follow NL command and L101: perform web-based tasks (§4). These agents are implemented in a few-shot in- -L102: context learning fashion with powerful large language models (LLMs) such as -L103: GPT-4 and PALM-2. Experiment results show that the best GPT-4 agent performance -L104: is somewhat limited, with an end-to-end task success rate of only 14.41%, while -L105: the human performance is 78.24%. We hypothesize that the limited performance of -L106: current LLMs stems from a lack of crucial capabilities such as active -L107: exploration and failure recovery to successfully perform complex tasks (§5.2). -L108: These outcomes underscore the necessity for further development towards robust +L102: context learning fashion with powerful large language models (LLMs) such as +L103: GPT-4 and PALM-2. Experiment results show that the best GPT-4 agent performance +L104: is somewhat limited, with an end-to-end task success rate of only 14.41%, while +L105: the human performance is 78.24%. We hypothesize that the limited performance of +L106: current LLMs stems from a lack of crucial capabilities such as active +L107: exploration and failure recovery to successfully perform complex tasks (§5.2). +L108: These outcomes underscore the necessity for further development towards robust L109: and effective agents (LeCun, 2022) in WebArena. -L110: +L110: L111: ## 2 WebArena: Websites as an Environment for Autonomous Agents -L112: -L113: Our goal is to create a realistic and reproducible web environment. We achieve -L114: reproducibility by making the environment standalone, without relying on live -L115: websites. This circumvents technical challenges such as bots being subject to -L116: CAPTCHAs, unpredictable content modifications, and configuration changes, which -L117: obstruct a fair comparison across different systems over time. We achieve -L118: realism by using open-source libraries that underlie many in-use sites from -L119: several popular categories and importing data to our environment from their +L112: +L113: Our goal is to create a realistic and reproducible web environment. We achieve +L114: reproducibility by making the environment standalone, without relying on live +L115: websites. This circumvents technical challenges such as bots being subject to +L116: CAPTCHAs, unpredictable content modifications, and configuration changes, which +L117: obstruct a fair comparison across different systems over time. We achieve +L118: realism by using open-source libraries that underlie many in-use sites from +L119: several popular categories and importing data to our environment from their L120: real-world counterparts. -L121: +L121: L122: ### 2.1 Controlling Agents through High-level Natural Language -L123: +L123: L124: The WebArena environment is denoted asℰ\mathcal{E} with state space [26] (https://ar5iv.org/abs/2311.12983) **viewing lines [0 - 118] of 1207** -L0: +L0: L1: URL: https://ar5iv.org/abs/2311.12983 L2: 1]FAIR, Meta 2]HuggingFace 3]AutoGPT 4]GenAI, Meta -L3: +L3: L4: # GAIA: A Benchmark for General AI Assistants -L5: -L6: Grégoire Mialon Clémentine Fourrier Craig Swift Thomas Wolf Yann LeCun Thomas +L5: +L6: Grégoire Mialon Clémentine Fourrier Craig Swift Thomas Wolf Yann LeCun Thomas L7: Scialom [ [ [ [ {gmialon,tscialom}@meta.com clementine@huggingface.co -L8: +L8: L9: ###### Abstract -L10: -L11: We introduce GAIA, a benchmark for General AI Assistants that, if solved, would -L12: represent a milestone in AI research. GAIA proposes real-world questions that -L13: require a set of fundamental abilities such as reasoning, multi-modality -L14: handling, web browsing, and generally tool-use proficiency. GAIA questions are -L15: conceptually simple for humans yet challenging for most advanced AIs: we show -L16: that human respondents obtain 92% vs. 15% for GPT-4 equipped with plugins. This -L17: notable performance disparity contrasts with the recent trend of LLMs -L18: outperforming humans on tasks requiring professional skills in e.g. law or -L19: chemistry. GAIA’s philosophy departs from the current trend in AI benchmarks -L20: suggesting to target tasks that are ever more difficult for humans. We posit -L21: that the advent of Artificial General Intelligence (AGI) hinges on a system’s -L22: capability to exhibit similar robustness as the average human does on such -L23: questions. Using GAIA’s methodology, we devise 466 questions and their answer. -L24: We release our questions while retaining answers to 300 of them to power a +L10: +L11: We introduce GAIA, a benchmark for General AI Assistants that, if solved, would +L12: represent a milestone in AI research. GAIA proposes real-world questions that +L13: require a set of fundamental abilities such as reasoning, multi-modality +L14: handling, web browsing, and generally tool-use proficiency. GAIA questions are +L15: conceptually simple for humans yet challenging for most advanced AIs: we show +L16: that human respondents obtain 92% vs. 15% for GPT-4 equipped with plugins. This +L17: notable performance disparity contrasts with the recent trend of LLMs +L18: outperforming humans on tasks requiring professional skills in e.g. law or +L19: chemistry. GAIA’s philosophy departs from the current trend in AI benchmarks +L20: suggesting to target tasks that are ever more difficult for humans. We posit +L21: that the advent of Artificial General Intelligence (AGI) hinges on a system’s +L22: capability to exhibit similar robustness as the average human does on such +L23: questions. Using GAIA’s methodology, we devise 466 questions and their answer. +L24: We release our questions while retaining answers to 300 of them to power a L25: leader-board hereby accessible. -L26: +L26: L27: \correspondence -L28: +L28: L29: ## 1 Introduction -L30: -L31: Large Language Models (LLMs) arguably open the way to general purpose systems. +L30: +L31: Large Language Models (LLMs) arguably open the way to general purpose systems. L32: Indeed, the latest among them (OpenAI, 2023; Anthropic, 2023; Anil et al., 2023; -L33: Touvron et al., 2023) are fluent, knowledgeable, aligned to some extent with -L34: human preferences (Ouyang et al., 2022), and can be augmented (Mialon et al., +L33: Touvron et al., 2023) are fluent, knowledgeable, aligned to some extent with +L34: human preferences (Ouyang et al., 2022), and can be augmented (Mialon et al., L35: 2023) with tools such as web browsers or code interpreters in a zero or few-shot -L36: setting (Brown et al., 2020). However, evaluating these systems is an open -L37: problem: given their emerging new capabilities, LLMs are regularly breaking AI +L36: setting (Brown et al., 2020). However, evaluating these systems is an open +L37: problem: given their emerging new capabilities, LLMs are regularly breaking AI L38: benchmarks, at an ever-increasing rate (Kiela et al., 2023). -L39: -L40: In search for more challenging benchmarks, current trend suggests to seek tasks -L41: that are ever more difficult for humans, and challenge LLMs with more intricate -L42: educational assessments, for example in STEM and Law, or target more complex +L39: +L40: In search for more challenging benchmarks, current trend suggests to seek tasks +L41: that are ever more difficult for humans, and challenge LLMs with more intricate +L42: educational assessments, for example in STEM and Law, or target more complex L43: realisations, such as writing a coherent book. But, tasks that are difficult for -L44: humans are not necessarily difficult for recent systems: the challenging MMLU +L44: humans are not necessarily difficult for recent systems: the challenging MMLU L45: or GSM8k benchmarks for example (Hendrycks et al., 2021; Cobbe et al., 2021) are -L46: already close to be solved,111GPT4 does 86.4% on MMLU. Human non-specialist -L47: accuracy on the benchmark is only 34.5% Expert-level human performance is -L48: estimated at 89.8%. due to rapid LLM improvement possibly combined with data -L49: contamination.222See for example the case of Hellaswag. Furthermore, open-ended -L50: generation generally requires human or model-based evaluation (Zheng et al., -L51: 2023). Human evaluation will become less and less feasible when increasing the -L52: task complexity, e.g. in terms of output length or required skills: how to -L53: evaluate a book generated by an AI, or solutions to maths problems that few -L54: people in the world can solve? Model-based evaluations on the other hand are by +L46: already close to be solved,111GPT4 does 86.4% on MMLU. Human non-specialist +L47: accuracy on the benchmark is only 34.5% Expert-level human performance is +L48: estimated at 89.8%. due to rapid LLM improvement possibly combined with data +L49: contamination.222See for example the case of Hellaswag. Furthermore, open-ended +L50: generation generally requires human or model-based evaluation (Zheng et al., +L51: 2023). Human evaluation will become less and less feasible when increasing the +L52: task complexity, e.g. in terms of output length or required skills: how to +L53: evaluate a book generated by an AI, or solutions to maths problems that few +L54: people in the world can solve? Model-based evaluations on the other hand are by L55: construction dependent of stronger models hence cannot evaluate new state-of- -L56: the-art models, without mentioning potential subtle biases such as preferring -L57: the first choice presented (Zheng et al., 2023). Overall, evaluating new AI +L56: the-art models, without mentioning potential subtle biases such as preferring +L57: the first choice presented (Zheng et al., 2023). Overall, evaluating new AI L58: systems requires to rethink benchmarks (Chollet, 2019). -L59: -L60: Alternatively to tasks that are harder for humans, AI systems could be asked to -L61: solve conceptually simple tasks yet that require accurate execution of complex -L62: sequences of actions, with large combinatorial spaces. The output could only be -L63: obtained upon successful completion of the task and be easy to validate, -L64: analogous to the Proof of Work algorithm (Jakobsson and Juels, 1999; Dwork and +L59: +L60: Alternatively to tasks that are harder for humans, AI systems could be asked to +L61: solve conceptually simple tasks yet that require accurate execution of complex +L62: sequences of actions, with large combinatorial spaces. The output could only be +L63: obtained upon successful completion of the task and be easy to validate, +L64: analogous to the Proof of Work algorithm (Jakobsson and Juels, 1999; Dwork and L65: Naor, 1993), where a computer is asked to solve a complex problem whose solution -L66: is easy to verify. Tasks for AI assistants, given their need for access to a -L67: diverse and uncertain world, meet this criterion while being inherently rooted +L66: is easy to verify. Tasks for AI assistants, given their need for access to a +L67: diverse and uncertain world, meet this criterion while being inherently rooted L68: in practical use cases. -L69: -L70: We move in that direction by proposing GAIA, a benchmark for General AI -L71: Assistants featuring 466 carefully crafted questions and their answer, along -L72: with the associated design methodology. Our questions are easy to create, -L73: challenging for AI systems—for LLMs, most require complex generations—, yet -L74: admit a unique, factual answer, allowing a simple and robust automatic +L69: +L70: We move in that direction by proposing GAIA, a benchmark for General AI +L71: Assistants featuring 466 carefully crafted questions and their answer, along +L72: with the associated design methodology. Our questions are easy to create, +L73: challenging for AI systems—for LLMs, most require complex generations—, yet +L74: admit a unique, factual answer, allowing a simple and robust automatic L75: evaluation. -L76: +L76: L77: GAIA attempts to avoid current pitfalls of LLMs evaluation by targeting: -L78: -L79: Real-world and challenging questions. For example, a LLM will typically need to +L78: +L79: Real-world and challenging questions. For example, a LLM will typically need to L80: browse the open and changing web, handle multi-modality, or reason over multiple -L81: steps to answer our questions. Conversely, many LLM benchmarks are quite +L81: steps to answer our questions. Conversely, many LLM benchmarks are quite L82: specific and/or restricted to closed and synthetic environments. -L83: -L84: Easy interpretability through conceptually simple tasks—non experts annotators -L85: exhibit a near perfect score—, associated reasoning trace, and few but highly -L86: curated questions. This is in contrast with aggregated benchmarks that can lack +L83: +L84: Easy interpretability through conceptually simple tasks—non experts annotators +L85: exhibit a near perfect score—, associated reasoning trace, and few but highly +L86: curated questions. This is in contrast with aggregated benchmarks that can lack L87: efficiency and reliability (Perlitz et al., 2023). -L88: -L89: Non-gameability. Answering the questions requires successful completion of some +L88: +L89: Non-gameability. Answering the questions requires successful completion of some L90: number of steps, which cannot easily be brute forced due to their diversity. The L91: possibility to check the reasoning trace, the accuracy required in the answers, -L92: their absence in plain text from the internet prevent a possible data -L93: contamination. In contrast, multiple choice answers (e.g., MMLU) make -L94: contamination assessment more difficult since a wrong reasoning trace can more +L92: their absence in plain text from the internet prevent a possible data +L93: contamination. In contrast, multiple choice answers (e.g., MMLU) make +L94: contamination assessment more difficult since a wrong reasoning trace can more L95: easily get to the correct choice. -L96: -L97: Simplicity of use. Crucially, the answers to our questions are factoid, concise +L96: +L97: Simplicity of use. Crucially, the answers to our questions are factoid, concise L98: and unambiguous. These properties allow simple, fast and factual evaluation. Our -L99: questions are meant to be answered in zero shot, limiting the influence of the -L100: evaluation setup. By opposition, many LLM benchmarks require evaluations that +L99: questions are meant to be answered in zero shot, limiting the influence of the +L100: evaluation setup. By opposition, many LLM benchmarks require evaluations that L101: are sensitive to the experimental setup such as the number and nature of prompts -L102: (Liang et al., 2022b) (Section 8.2), or the benchmark +L102: (Liang et al., 2022b) (Section 8.2), or the benchmark L103: implementation.333https://huggingface.co/blog/evaluating-mmlu-leaderboard -L104: -L105: In spite of being successful at tasks that are difficult for humans, the most +L104: +L105: In spite of being successful at tasks that are difficult for humans, the most L106: capable LLMs do poorly on GAIA. Even equipped with tools, GPT4 does not exceed a -L107: 30% success rate for the easiest of our tasks, and 0% for the hardest. In the +L107: 30% success rate for the easiest of our tasks, and 0% for the hardest. In the L108: meantime, the average success rate for human respondents is 92%. Consequently, a -L109: system capable of solving GAIA can be assessed in the context of t-AGI,444As +L109: system capable of solving GAIA can be assessed in the context of t-AGI,444As L110: defined in https://www.alignmentforum.org/posts/BoA3agdkAzL6HQtQP/clarifying- -L111: and-predicting-agi, a t-AGI beats, on most tasks, most human experts who are -L112: given time t to perform the task noting that humans typically take between 6 +L111: and-predicting-agi, a t-AGI beats, on most tasks, most human experts who are +L112: given time t to perform the task noting that humans typically take between 6 L113: minutes for the simplest questions to 17 minutes for the most complex ones. From -L114: a related perspective, such system would arguably be a competent General AI -L115: within the framework recently proposed in Morris et al. (2023), which also -L116: appear to be the next milestone in AI research since ChatGPT (OpenAI, 2023) is -L117: one level below. This paper covers the composition of GAIA, its design choices, +L114: a related perspective, such system would arguably be a competent General AI +L115: within the framework recently proposed in Morris et al. (2023), which also +L116: appear to be the next milestone in AI research since ChatGPT (OpenAI, 2023) is +L117: one level below. This paper covers the composition of GAIA, its design choices, L118: and explain how to craft questions and the associated challenges so that the Error fetching URL `https://github.com/sambanova/toolbench` Error fetching URL `https://hotpotqa.github.io/` [27] (https://aclanthology.org/N18-1074.pdf) **viewing lines [0 - 134] of 809** -L0: +L0: L1: URL: https://aclanthology.org/N18-1074.pdf L2: ===== Page 1 ===== -L3: +L3: L4: # FEVER: a large-scale dataset for Fact Extraction and VERification -L5: -L6: James Thorne\(^1\), Andreas Vlachos\(^1\), Christos Christodoulopoulos\(^2\), +L5: +L6: James Thorne\(^1\), Andreas Vlachos\(^1\), Christos Christodoulopoulos\(^2\), L7: and Arpit Mittal\(^2\) -L8: -L9: \(^1\)Department of Computer Science, University of Sheffield -L10: \(^2\)Amazon Research Cambridge -L11: {j.thorne, a.vlachos}@sheffield.ac.uk -L12: {chrchrs, mitarpit}@amazon.co.uk -L13: +L8: +L9: \(^1\)Department of Computer Science, University of Sheffield +L10: \(^2\)Amazon Research Cambridge +L11: {j.thorne, a.vlachos}@sheffield.ac.uk +L12: {chrchrs, mitarpit}@amazon.co.uk +L13: L14: ## Abstract -L15: -L16: In this paper we introduce a new publicly available dataset for verification +L15: +L16: In this paper we introduce a new publicly available dataset for verification L17: against textual sources, FEVER: Fact Extraction and VERification. It consists of -L18: 185,445 claims generated by altering sentences extracted from Wikipedia and -L19: subsequently verified without knowledge of the sentence they were derived from. -L20: The claims are classified as Supported, Refuted or NotEnoughInfo by annotators +L18: 185,445 claims generated by altering sentences extracted from Wikipedia and +L19: subsequently verified without knowledge of the sentence they were derived from. +L20: The claims are classified as Supported, Refuted or NotEnoughInfo by annotators L21: achieving 0.6841 in Fleiss \(\kappa\). For the first two classes, the annotators -L22: also recorded the sentence(s) forming the necessary evidence for their -L23: judgment. To characterize the challenge of the dataset presented, we develop a +L22: also recorded the sentence(s) forming the necessary evidence for their +L23: judgment. To characterize the challenge of the dataset presented, we develop a L24: pipeline approach and compare it to suitably designed oracles. The best accuracy -L25: we achieve on labeling a claim accompanied by the correct evidence is 31.87%, +L25: we achieve on labeling a claim accompanied by the correct evidence is 31.87%, L26: while if we ignore the evidence we achieve 50.91%. Thus we believe that FEVER is -L27: a challenging testbed that will help stimulate progress on claim verification +L27: a challenging testbed that will help stimulate progress on claim verification L28: against textual sources. -L29: +L29: L30: ## 1 Introduction -L31: -L32: The ever-increasing amounts of textual information available combined with the -L33: ease in sharing it through the web has increased the demand for verification, -L34: also referred to as fact checking. While it has received a lot of attention in -L35: the context of journalism, verification is important for other domains, e.g. +L31: +L32: The ever-increasing amounts of textual information available combined with the +L33: ease in sharing it through the web has increased the demand for verification, +L34: also referred to as fact checking. While it has received a lot of attention in +L35: the context of journalism, verification is important for other domains, e.g. L36: information in scientific publications, product reviews, etc. -L37: -L38: In this paper we focus on verification of textual claims against textual -L39: sources. When compared to textual entailment (TE)/natural language inference -L40: (Dagan et al., 2009; Bowman et al., 2015), the key difference is that in these -L41: tasks the passage to verify each claim is given, and in recent years it -L42: typically consists a single sentence, while in verification systems it is -L43: retrieved from a large set of documents in order to form the evidence. Another +L37: +L38: In this paper we focus on verification of textual claims against textual +L39: sources. When compared to textual entailment (TE)/natural language inference +L40: (Dagan et al., 2009; Bowman et al., 2015), the key difference is that in these +L41: tasks the passage to verify each claim is given, and in recent years it +L42: typically consists a single sentence, while in verification systems it is +L43: retrieved from a large set of documents in order to form the evidence. Another L44: related task is question answering (QA), for which approaches have recently been L45: extended to handle large-scale resources such as Wikipedia (Chen et al., 2017). -L46: However, questions typically provide the information needed to identify the -L47: answer, while information missing from a claim can often be crucial in -L48: retrieving refuting evidence. For example, a claim stating "Fiji's largest -L49: island is Kauai." can be refuted by retrieving "Kauai is the oldest Hawaiian +L46: However, questions typically provide the information needed to identify the +L47: answer, while information missing from a claim can often be crucial in +L48: retrieving refuting evidence. For example, a claim stating "Fiji's largest +L49: island is Kauai." can be refuted by retrieving "Kauai is the oldest Hawaiian L50: Island." as evidence. -L51: -L52: Progress on the aforementioned tasks has benefited from the availability of -L53: large-scale datasets (Bowman et al., 2015; Rajpurkar et al., 2016). However, +L51: +L52: Progress on the aforementioned tasks has benefited from the availability of +L53: large-scale datasets (Bowman et al., 2015; Rajpurkar et al., 2016). However, L54: despite the rising interest in verification and fact checking among researchers, -L55: the datasets currently used for this task are limited to a few hundred claims. -L56: Indicatively, the recently conducted Fake News Challenge (Pomerleau and Rao, -L57: 2017) with 50 participating teams used a dataset consisting of 300 claims -L58: verified against 2,595 associated news articles which is orders of magnitude +L55: the datasets currently used for this task are limited to a few hundred claims. +L56: Indicatively, the recently conducted Fake News Challenge (Pomerleau and Rao, +L57: 2017) with 50 participating teams used a dataset consisting of 300 claims +L58: verified against 2,595 associated news articles which is orders of magnitude L59: smaller than those used for TE and QA. -L60: -L61: In this paper we present a new dataset for claim verification, FEVER: Fact -L62: Extraction and VERification. It consists of 185,445 claims manually verified -L63: against the introductory sections of Wikipedia pages and classified as -L64: Supported, Refuted or NotEnoughInfo. For the first two classes, systems and -L65: annotators need to also return the combination of sentences forming the -L66: necessary evidence supporting or refuting the claim (see Figure 1). The claims +L60: +L61: In this paper we present a new dataset for claim verification, FEVER: Fact +L62: Extraction and VERification. It consists of 185,445 claims manually verified +L63: against the introductory sections of Wikipedia pages and classified as +L64: Supported, Refuted or NotEnoughInfo. For the first two classes, systems and +L65: annotators need to also return the combination of sentences forming the +L66: necessary evidence supporting or refuting the claim (see Figure 1). The claims L67: were generated by human annotators extracting claims from Wikipedia and mutating -L68: them in a variety of ways, some of which were meaning-altering. The +L68: them in a variety of ways, some of which were meaning-altering. The L69: verification of each -L70: +L70: L71: 809 -L72: +L72: L73: Proceedings of NAACL-HLT 2018, pages 809–819 -L74: -L75: New Orleans, Louisiana, June 1 - 6, 2018. ©2018 Association for Computational +L74: +L75: New Orleans, Louisiana, June 1 - 6, 2018. ©2018 Association for Computational L76: Linguistics -L77: +L77: L78: ===== Page 2 ===== -L79: -L80: claim was conducted in a separate annotation process by annotators who were -L81: aware of the page but not the sentence from which original claim was extracted -L82: and thus in 31.75% of the claims more than one sentence was considered -L83: appropriate evidence. Claims require composition of evidence from multiple -L84: sentences in 16.82% of cases. Furthermore, in 12.15% of the claims, this +L79: +L80: claim was conducted in a separate annotation process by annotators who were +L81: aware of the page but not the sentence from which original claim was extracted +L82: and thus in 31.75% of the claims more than one sentence was considered +L83: appropriate evidence. Claims require composition of evidence from multiple +L84: sentences in 16.82% of cases. Furthermore, in 12.15% of the claims, this L85: evidence was taken from multiple pages. -L86: -L87: To ensure annotation consistency, we developed suitable guidelines and user -L88: interfaces, resulting in inter-annotator agreement of 0.6841 in Fleiss (Fleiss, -L89: 1971) in claim verification classification, and 95.42% precision and 72.36% +L86: +L87: To ensure annotation consistency, we developed suitable guidelines and user +L88: interfaces, resulting in inter-annotator agreement of 0.6841 in Fleiss (Fleiss, +L89: 1971) in claim verification classification, and 95.42% precision and 72.36% L90: recall in evidence retrieval. -L91: -L92: To characterize the challenges posed by FEVER we develop a pipeline approach -L93: which, given a claim, first identifies relevant documents, then selects -L94: sentences forming the evidence from the documents and finally classifies the -L95: claim w.r.t. evidence. The best performing version achieves 31.87% accuracy in -L96: verification when requiring correct evidence to be retrieved for claims -L97: Supported or Refuted, and 50.91% if the correctness of the evidence is ignored, -L98: both indicating the difficulty but also the feasibility of the task. We also -L99: conducted oracle experiments in which components of the pipeline were replaced +L91: +L92: To characterize the challenges posed by FEVER we develop a pipeline approach +L93: which, given a claim, first identifies relevant documents, then selects +L94: sentences forming the evidence from the documents and finally classifies the +L95: claim w.r.t. evidence. The best performing version achieves 31.87% accuracy in +L96: verification when requiring correct evidence to be retrieved for claims +L97: Supported or Refuted, and 50.91% if the correctness of the evidence is ignored, +L98: both indicating the difficulty but also the feasibility of the task. We also +L99: conducted oracle experiments in which components of the pipeline were replaced L100: by the gold standard annotations, and observed that the most challenging part of -L101: the task is selecting the sentences containing the evidence. In addition to +L101: the task is selecting the sentences containing the evidence. In addition to L102: publishing the data via our website1, we also publish the annotation interfaces2 L103: and the baseline system3 to stimulate further research on verification. -L104: +L104: L105: Footnote 1: http://fever.ai -L106: +L106: L107: Footnote 2: https://github.com/awslabs/fever -L108: +L108: L109: Footnote 3: https://github.com/sheffieldnlp/fever-baselines -L110: +L110: L111: ## 2 Related Works -L112: -L113: Vlachos and Riedel (2014) constructed a dataset for claim verification -L114: consisting of 106 claims, selecting data from fact-checking websites such as +L112: +L113: Vlachos and Riedel (2014) constructed a dataset for claim verification +L114: consisting of 106 claims, selecting data from fact-checking websites such as L115: PolitiFact, taking advantage of the labelled claims available there. However, in -L116: order to develop claim verification components we typically require the -L117: justification for each verdict, including the sources used. While this -L118: information is usually available in justifications provided by the journalists, +L116: order to develop claim verification components we typically require the +L117: justification for each verdict, including the sources used. While this +L118: information is usually available in justifications provided by the journalists, L119: they are not in a machine-readable form. Thus, also considering the small number L120: of claims, the task defined by the dataset proposed remains too challenging for -L121: the ML/NLP methods currently available. Wang (2017) extended this approach by -L122: including all 12.8K claims available by Politifact via its API, however the +L121: the ML/NLP methods currently available. Wang (2017) extended this approach by +L122: including all 12.8K claims available by Politifact via its API, however the L123: justification and the evidence contained in it was ignored in the experiments as -L124: it was not machine-readable. Instead, the claims were classified considering -L125: only the text and the metadata related to the person making the claim. While +L124: it was not machine-readable. Instead, the claims were classified considering +L125: only the text and the metadata related to the person making the claim. While L126: this rendered the task amenable to current NLP/ML methods, it does not allow for -L127: verification against any sources and no evidence needs to be returned to +L127: verification against any sources and no evidence needs to be returned to L128: justify the verdicts. -L129: -L130: The Fake News challenge (Pomerleau and Rao, 2017) modelled verification as +L129: +L130: The Fake News challenge (Pomerleau and Rao, 2017) modelled verification as L131: stance classification: given a claim and an article, predict whether the article -L132: supports, refutes, observes (neutrally states the claim) or is irrelevant to +L132: supports, refutes, observes (neutrally states the claim) or is irrelevant to L133: the claim. It consists of 50K labelled claim-article pairs, combining 300 claims L134: with 2,582 articles. The claims and the articles were curated and labeled by [28] (https://nlp.cs.washington.edu/triviaqa/) **viewing lines [0 - 48] of 48** -L0: +L0: L1: URL: https://nlp.cs.washington.edu/triviaqa/ -L2: ## TriviaQA: A Large Scale Dataset for Reading Comprehension and Question +L2: ## TriviaQA: A Large Scale Dataset for Reading Comprehension and Question L3: Answering -L4: +L4: L5: TriviaQA is a reading comprehension dataset containing over 650K question- L6: answer-evidence triples. TriviaQA includes 95K question-answer pairs authored by -L7: trivia enthusiasts and independently gathered evidence documents, six per +L7: trivia enthusiasts and independently gathered evidence documents, six per L8: question on average, that provide high quality distant supervision for answering -L9: the questions. The details can be found in our ACL 17 paper TriviaQA: A Large +L9: the questions. The details can be found in our ACL 17 paper TriviaQA: A Large L10: Scale Distantly Supervised Challenge Dataset for Reading Comprehension -L11: -L12: Mandar Joshi, Eunsol Choi, Daniel Weld, Luke Zettlemoyer. TriviaQA: A Large +L11: +L12: Mandar Joshi, Eunsol Choi, Daniel Weld, Luke Zettlemoyer. TriviaQA: A Large L13: Scale Distantly Supervised Challenge Dataset for Reading Comprehension -L14: +L14: L15: In Association for Computational Linguistics (ACL) 2017, Vancouver, Canada. L16: [bib] -L17: +L17: L18: ## News -L19: +L19: L20: ##### Jul 2017 -L21: -L22: The TriviaQA leaderboard is now live on Codalab. Submit your predictions for +L21: +L22: The TriviaQA leaderboard is now live on Codalab. Submit your predictions for L23: evaluation on the test set! -L24: +L24: L25: ## Data -L26: -L27: If you are interested in the reading comprehension task motivated in the paper, +L26: +L27: If you are interested in the reading comprehension task motivated in the paper, L28: click on the link below to download the data. -L29: +L29: L30: If you are interested in open domain QA, click on the link below to download the -L31: data. It contains the unfiltered dataset with 110K question-answer pairs. The -L32: Wikipedia and top 10 search documents can be obtained from the RC version. The -L33: main difference between the RC version above and the unfiltered dataset is that -L34: not all documents (in the unfiltered set) for a given question contain the +L31: data. It contains the unfiltered dataset with 110K question-answer pairs. The +L32: Wikipedia and top 10 search documents can be obtained from the RC version. The +L33: main difference between the RC version above and the unfiltered dataset is that +L34: not all documents (in the unfiltered set) for a given question contain the L35: answer string(s). This makes the unfiltered dataset more appropriate for IR- L36: style QA. -L37: -L38: The University of Washington does not own the copyright of the questions and +L37: +L38: The University of Washington does not own the copyright of the questions and L39: documents included in TriviaQA. -L40: +L40: L41: ## Code -L42: +L42: L43: Check out our Github repository. -L44: +L44: L45: ## Contact -L46: -L47: For any questions about the code or data, please contact Mandar Joshi -- {first +L46: +L47: For any questions about the code or data, please contact Mandar Joshi -- {first L48: name of the first author}90[at]cs[dot]washington[dot]edu Error fetching URL `https://research.google/pubs/natural-questions-a-benchmark-for-question-answering-research/` Error fetching URL `https://microsoft.github.io/msmarco/` @@ -2088,128 +2088,128 @@ L48: name of the first author}90[at]cs[dot]washington[dot]edu [29] (https://ar5iv.org/abs/2207.01206) **viewing lines [0 - 122] of 1392** -L0: +L0: L1: URL: https://ar5iv.org/abs/2207.01206 -L2: # WebShop: Towards Scalable Real-World Web Interaction with Grounded Language +L2: # WebShop: Towards Scalable Real-World Web Interaction with Grounded Language L3: Agents -L4: -L5: Shunyu Yao111Equal contribution. Project site with code, data, and demos: -L6: https://webshop-pnlp.github.io. Howard Chen111Equal contribution. Project site -L7: with code, data, and demos: https://webshop-pnlp.github.io. John Yang Karthik +L4: +L5: Shunyu Yao111Equal contribution. Project site with code, data, and demos: +L6: https://webshop-pnlp.github.io. Howard Chen111Equal contribution. Project site +L7: with code, data, and demos: https://webshop-pnlp.github.io. John Yang Karthik L8: Narasimhan -L9: +L9: L10: Department of Computer Science, Princeton University L11: {shunyuy, howardchen, jy1682, karthikn}@princeton.edu -L12: +L12: L13: ###### Abstract -L14: -L15: Existing benchmarks for grounding language in interactive environments either -L16: lack real-world linguistic elements, or prove difficult to scale up due to -L17: substantial human involvement in the collection of data or feedback signals. To +L14: +L15: Existing benchmarks for grounding language in interactive environments either +L16: lack real-world linguistic elements, or prove difficult to scale up due to +L17: substantial human involvement in the collection of data or feedback signals. To L18: bridge this gap, we develop WebShop – a simulated e-commerce website environment -L19: with million real-world products and 1.181.18 crowd-sourced text instructions. -L20: Given a text instruction specifying a product requirement, an agent needs to -L21: navigate multiple types of webpages and issue diverse actions to find, -L22: customize, and purchase an item. WebShop provides several challenges for -L23: language grounding including understanding compositional instructions, query -L24: (re-)formulation, comprehending and acting on noisy text in webpages, and -L25: performing strategic exploration. We collect over 12,08712,087 human -L26: demonstrations for the task, and train and evaluate a diverse range of agents -L27: using reinforcement learning, imitation learning, and pre-trained image and -L28: language models. Our best model achieves a task success rate of 1,6001,600, -L29: which outperforms rule-based heuristics (29%29\%) but is far lower than human +L19: with million real-world products and 1.181.18 crowd-sourced text instructions. +L20: Given a text instruction specifying a product requirement, an agent needs to +L21: navigate multiple types of webpages and issue diverse actions to find, +L22: customize, and purchase an item. WebShop provides several challenges for +L23: language grounding including understanding compositional instructions, query +L24: (re-)formulation, comprehending and acting on noisy text in webpages, and +L25: performing strategic exploration. We collect over 12,08712,087 human +L26: demonstrations for the task, and train and evaluate a diverse range of agents +L27: using reinforcement learning, imitation learning, and pre-trained image and +L28: language models. Our best model achieves a task success rate of 1,6001,600, +L29: which outperforms rule-based heuristics (29%29\%) but is far lower than human L30: expert performance (9.6%9.6\%). We also analyze agent and human trajectories and -L31: ablate various model components to provide insights for developing future -L32: agents with stronger language understanding and decision making abilities. -L33: Finally, we show that agents trained on WebShop exhibit non-trivial sim-to-real -L34: transfer when evaluated on amazon.com and ebay.com , indicating the potential -L35: value of WebShop in developing practical web-based agents that can operate in +L31: ablate various model components to provide insights for developing future +L32: agents with stronger language understanding and decision making abilities. +L33: Finally, we show that agents trained on WebShop exhibit non-trivial sim-to-real +L34: transfer when evaluated on amazon.com and ebay.com , indicating the potential +L35: value of WebShop in developing practical web-based agents that can operate in L36: the wild.59%59\% -L37: +L37: L38: ## 1 Introduction -L39: -L40: Recent advances in natural language processing (NLP) and reinforcement learning +L39: +L40: Recent advances in natural language processing (NLP) and reinforcement learning L41: (RL) have brought about several exciting developments in agents that can perform L42: sequential decision making while making use of linguistic context [30, 50, 58]. L43: On the other hand, large-scale language models like GPT-3 [6] and BERT [11] are -L44: excelling at traditional NLP benchmarks such as text classification, +L44: excelling at traditional NLP benchmarks such as text classification, L45: information extraction and question answering. While the former set of tasks are -L46: limited in their set of linguistic concepts and prove difficult to scale up, -L47: the latter tasks usually contain static, non-interactive datasets that lack -L48: adequate grounding to extra-linguistic concepts [4]. In order to make further -L49: progress in building grounded language models, we believe there is a need for -L50: scalable interactive environments that contain: (1) language elements that -L51: reflect rich, real-world usage and are collectible at scale, and (2) task -L52: feedback that is well-defined and automatically computable to facilitate -L53: interactive learning, without the constant need for expensive feedback from +L46: limited in their set of linguistic concepts and prove difficult to scale up, +L47: the latter tasks usually contain static, non-interactive datasets that lack +L48: adequate grounding to extra-linguistic concepts [4]. In order to make further +L49: progress in building grounded language models, we believe there is a need for +L50: scalable interactive environments that contain: (1) language elements that +L51: reflect rich, real-world usage and are collectible at scale, and (2) task +L52: feedback that is well-defined and automatically computable to facilitate +L53: interactive learning, without the constant need for expensive feedback from L54: humans. -L55: -L56: The world wide web (WWW) is a massive open-domain interactive environment that -L57: inherently satisfies the first aforementioned requirement through its -L58: interconnected set of pages with natural text, images and interactive elements. -L59: By being simultaneously scalable, semantic, interactive, dynamic and realistic, -L60: the web is uniquely different from existing environments for autonomous agents -L61: like games or 3D navigation. Moreover, the web also provides a practical +L55: +L56: The world wide web (WWW) is a massive open-domain interactive environment that +L57: inherently satisfies the first aforementioned requirement through its +L58: interconnected set of pages with natural text, images and interactive elements. +L59: By being simultaneously scalable, semantic, interactive, dynamic and realistic, +L60: the web is uniquely different from existing environments for autonomous agents +L61: like games or 3D navigation. Moreover, the web also provides a practical L62: environment to deploy trained agents, with great potential for alleviating human -L63: efforts in tedious tasks (e.g. buying products, booking appointments). While +L63: efforts in tedious tasks (e.g. buying products, booking appointments). While L64: there has been prior work on building web-based tasks, they either lack depth in -L65: the transition and action spaces, or prove difficult to scale up. Some -L66: benchmarks only contain either a single classification task [39, 46, 31] or -L67: interactions containing only a handful of different pages in each episode [43]. -L68: Others propose tasks with longer horizons but are either limited to following -L69: hyperlinks for web navigation [36] or require human-in-the-loop feedback due to +L65: the transition and action spaces, or prove difficult to scale up. Some +L66: benchmarks only contain either a single classification task [39, 46, 31] or +L67: interactions containing only a handful of different pages in each episode [43]. +L68: Others propose tasks with longer horizons but are either limited to following +L69: hyperlinks for web navigation [36] or require human-in-the-loop feedback due to L70: the lack of an automated reward function [33]. -L71: +L71: L72: In this paper, we introduce WebShop (Figure 1) – a large-scale interactive web- -L73: based environment for language understanding and decision making – and train -L74: autonomous agents to complete tasks on this benchmark. With the goals of being +L73: based environment for language understanding and decision making – and train +L74: autonomous agents to complete tasks on this benchmark. With the goals of being L75: scalable and containing realistic language and visual elements, WebShop emulates L76: the task of online shopping on an e-commerce website, where the agent’s goal is L77: to understand a human-provided text instruction and purchase a product to match -L78: the specifications. To do so, the agent needs to query the website’s search -L79: engine, choose items to explore from search results, open and read their -L80: description and details, and select the necessary options (e.g. 32 oz., red -L81: color) before clicking the ‘Buy’ button. In order to pick the optimal product -L82: that matches user requirements, the agent may need to view and compare various -L83: products (including backtracking between pages), and potentially perform -L84: multiple searches. WebShop contains over one million products scraped from -L85: amazon.com, over thousand crowdsourced instructions, and a diverse semantic +L78: the specifications. To do so, the agent needs to query the website’s search +L79: engine, choose items to explore from search results, open and read their +L80: description and details, and select the necessary options (e.g. 32 oz., red +L81: color) before clicking the ‘Buy’ button. In order to pick the optimal product +L82: that matches user requirements, the agent may need to view and compare various +L83: products (including backtracking between pages), and potentially perform +L84: multiple searches. WebShop contains over one million products scraped from +L85: amazon.com, over thousand crowdsourced instructions, and a diverse semantic L86: action space of searching text queries and choosing text buttons. It is packaged -L87: into a convenient OpenAI Gym [5] environment and can be rendered in two modes -L88: (HTML or simple) with parallel observation spaces that are easy for human and -L89: model respectively. Rewards are automatically computed using a combination of -L90: programmatic matching functions that consider the attributes, type, options and -L91: price of the chosen product, alleviating the need for human evaluation and +L87: into a convenient OpenAI Gym [5] environment and can be rendered in two modes +L88: (HTML or simple) with parallel observation spaces that are easy for human and +L89: model respectively. Rewards are automatically computed using a combination of +L90: programmatic matching functions that consider the attributes, type, options and +L91: price of the chosen product, alleviating the need for human evaluation and L92: providing a path to scaling up interactive learning.1212 -L93: -L94: We develop several agents to perform this task, using both reinforcement +L93: +L94: We develop several agents to perform this task, using both reinforcement L95: learning (RL) and imitation learning (IL). We also leverage the latest pre- -L96: trained language models [26, 11] for representing and generating text. Our -L97: modular architecture includes a factorized processing of state observations and -L98: action choices using ResNets (visual) and Transformers (text), followed by an -L99: attention fusion layer that helps the agent contextually score each action. Our -L100: best agent achieves an average score of (out of 62.462.4) and successfully -L101: completes the task 100100 of the time, significantly higher than a heuristic -L102: baseline that achieves 28.7%28.7\% and 45.645.6, respectively. While this -L103: demonstrates the potential for IL and RL, the agents are still much lower than -L104: human experts, who can achieve 9.6%9.6\% and 82.182.1 on this task.*** In our -L105: analysis (§5.3), we observe that the task requires patience and consistency, +L96: trained language models [26, 11] for representing and generating text. Our +L97: modular architecture includes a factorized processing of state observations and +L98: action choices using ResNets (visual) and Transformers (text), followed by an +L99: attention fusion layer that helps the agent contextually score each action. Our +L100: best agent achieves an average score of (out of 62.462.4) and successfully +L101: completes the task 100100 of the time, significantly higher than a heuristic +L102: baseline that achieves 28.7%28.7\% and 45.645.6, respectively. While this +L103: demonstrates the potential for IL and RL, the agents are still much lower than +L104: human experts, who can achieve 9.6%9.6\% and 82.182.1 on this task.*** In our +L105: analysis (§5.3), we observe that the task requires patience and consistency, L106: which is lacking in some crowdsource workers, leading to lower scores. Even with -L107: this caveat, the gap between human performance and the model remains -L108: significant. We perform several analyses and ablation studies to identify the -L109: cause of this gap and find several avenues for agent improvement in the future -L110: including more robust search generation, explicit memory modules, and better +L107: this caveat, the gap between human performance and the model remains +L108: significant. We perform several analyses and ablation studies to identify the +L109: cause of this gap and find several avenues for agent improvement in the future +L110: including more robust search generation, explicit memory modules, and better L111: handling of noisy web text. Finally, we also demonstrate an instance of sim-to- -L112: real transfer by deploying agents trained with WebShop to operate on amazon.com +L112: real transfer by deploying agents trained with WebShop to operate on amazon.com L113: and ebay.com, and find that they can achieve similar performances despite search -L114: engine and product differences, and consistently outperform the rule baseline -L115: of using the first result returned by the commercial search engines when -L116: directly searching the instruction texts. This demonstrates the practical +L114: engine and product differences, and consistently outperform the rule baseline +L115: of using the first result returned by the commercial search engines when +L116: directly searching the instruction texts. This demonstrates the practical L117: potential of our work towards developing agents that can operate autonomously on L118: the world wide web (WWW).59.6%59.6\% -L119: +L119: L120: ## 2 Related Work -L121: +L121: L122: Reinforcement learning on the web. Nogueira and Cho [36] introduced WikiNav as a Error fetching URL `http://alfworld.github.io/` Error fetching URL `https://osu-nlp-group.github.io/Mind2Web/` @@ -2217,13 +2217,13 @@ L122: Reinforcement learning on the web. Nogueira and Cho [36] introduced WikiNa [30] (https://ar5iv.org/pdf/2406.12172) **viewing lines [0 - 127] of 1478** -L0: +L0: L1: URL: https://ar5iv.org/pdf/2406.12172 -L2: # Navigating the Labyrinth: Evaluating and Enhancing LLMs’ Ability to Reason +L2: # Navigating the Labyrinth: Evaluating and Enhancing LLMs’ Ability to Reason L3: About Search Problems -L4: +L4: L5: Nasim Borazjanizadeh -L6: +L6: L7: Berkeley AI Research, UC Berkeley L8: \AndRoei Herzig L9: Berkeley AI Research, UC Berkeley @@ -2233,117 +2233,117 @@ L12: \AndRogerio Feris L13: MIT-IBM Watson AI Lab L14: \AndLeonid Karlinsky L15: MIT-IBM Watson AI Lab -L16: +L16: L17: ###### Abstract -L18: -L19: Recently, Large Language Models (LLMs) attained impressive performance in math +L18: +L19: Recently, Large Language Models (LLMs) attained impressive performance in math L20: and reasoning benchmarks. However, they still often struggle with logic problems -L21: and puzzles that are relatively easy for humans. To further investigate this, +L21: and puzzles that are relatively easy for humans. To further investigate this, L22: we introduce a new benchmark, SearchBench, containing 11 unique search problems, -L23: each equipped with automated pipelines to generate an arbitrary number of +L23: each equipped with automated pipelines to generate an arbitrary number of L24: instances and analyze the feasibility, correctness, and optimality of LLM- -L25: generated solutions. We show that even the most advanced LLMs fail to solve -L26: these problems end-to-end in text, e.g., GPT4 solves only 1.4%. SearchBench -L27: problems require considering multiple pathways to the solution as well as -L28: backtracking, posing a significant challenge to auto-regressive models. -L29: Instructing LLMs to generate code that solves the problem helps, but only -L30: slightly, e.g., GPT4’s performance rises to 11.7%. In this work, we show that -L31: in-context learning with A* algorithm implementations enhances performance. The -L32: full potential of this promoting approach emerges when combined with our -L33: proposed Multi-Stage-Multi-Try method, which breaks down the algorithm -L34: implementation into two stages and verifies the first stage against unit tests, +L25: generated solutions. We show that even the most advanced LLMs fail to solve +L26: these problems end-to-end in text, e.g., GPT4 solves only 1.4%. SearchBench +L27: problems require considering multiple pathways to the solution as well as +L28: backtracking, posing a significant challenge to auto-regressive models. +L29: Instructing LLMs to generate code that solves the problem helps, but only +L30: slightly, e.g., GPT4’s performance rises to 11.7%. In this work, we show that +L31: in-context learning with A* algorithm implementations enhances performance. The +L32: full potential of this promoting approach emerges when combined with our +L33: proposed Multi-Stage-Multi-Try method, which breaks down the algorithm +L34: implementation into two stages and verifies the first stage against unit tests, L35: raising GPT-4’s performance above 57%. -L36: +L36: L37: \doparttoc\faketableofcontents -L38: +L38: L39: ### 1 Introduction -L40: -L41: The advent of Large Language Models (LLMs) has revolutionized the field of -L42: natural language processing, with models like Gemini[18], GPT-4[26] -L43: demonstrating unprecedented performance on reasoning tasks such as GSM8k[8]. -L44: However, these models still exhibit surprising failures on some intuitive -L45: tasks[2, 30, 22] and struggle with multi-step compositional reasoning, +L40: +L41: The advent of Large Language Models (LLMs) has revolutionized the field of +L42: natural language processing, with models like Gemini[18], GPT-4[26] +L43: demonstrating unprecedented performance on reasoning tasks such as GSM8k[8]. +L44: However, these models still exhibit surprising failures on some intuitive +L45: tasks[2, 30, 22] and struggle with multi-step compositional reasoning, L46: combinatorial problems, and planning [9, 40, 44]. Inspired by these observations -L47: and to further investigate LLMs’ reasoning abilities, we offer a new benchmark -L48: of search problems, SearchBench. The problems in SearchBench are combinatorial, -L49: defined as tasks that involve finding an optimal object from a finite set of -L50: objects, where the set of feasible solutions is either discrete or can be -L51: reduced to a discrete set [43]. These problems are predominantly NP-hard and -L52: necessitate systematic exploration of action paths and backtracking to -L53: intermediate feasible states; thus, SearchBench implicitly investigates the +L47: and to further investigate LLMs’ reasoning abilities, we offer a new benchmark +L48: of search problems, SearchBench. The problems in SearchBench are combinatorial, +L49: defined as tasks that involve finding an optimal object from a finite set of +L50: objects, where the set of feasible solutions is either discrete or can be +L51: reduced to a discrete set [43]. These problems are predominantly NP-hard and +L52: necessitate systematic exploration of action paths and backtracking to +L53: intermediate feasible states; thus, SearchBench implicitly investigates the L54: LLM’s capacity for non-linear reasoning. -L55: +L55: L56: SearchBench has five distinct problem categories: (i) pathfinding, (ii) puzzles, -L57: (iii) subset sum, (iv) sorting, and (v) under-determined systems; further -L58: divided into 11 unique problem types. Each problem type is inspired by known -L59: puzzles and combinatorial problems but augmented with modified rules and -L60: constraints to ensure substantial differences from similar problems LLMs -L61: encountered during their training. And the solution to each problem is a -L62: sequence of actions leading from the initial state to the goal state, while -L63: optimizing a cost. We generate100 instances of varying difficulty per problem -L64: type using an automatic pipeline, resulting in 1107 problem instances total. -L65: Each problem type in SearchBench is equipped with an automatic pipeline that +L57: (iii) subset sum, (iv) sorting, and (v) under-determined systems; further +L58: divided into 11 unique problem types. Each problem type is inspired by known +L59: puzzles and combinatorial problems but augmented with modified rules and +L60: constraints to ensure substantial differences from similar problems LLMs +L61: encountered during their training. And the solution to each problem is a +L62: sequence of actions leading from the initial state to the goal state, while +L63: optimizing a cost. We generate100 instances of varying difficulty per problem +L64: type using an automatic pipeline, resulting in 1107 problem instances total. +L65: Each problem type in SearchBench is equipped with an automatic pipeline that L66: evaluates LLM-generated solutions on three dimensions: feasibility, correctness, -L67: and optimality. Feasibility checks whether the actions taken follow the -L68: problem’s rules; correctness verifies if a feasible solution reaches the goal +L67: and optimality. Feasibility checks whether the actions taken follow the +L68: problem’s rules; correctness verifies if a feasible solution reaches the goal L69: state; and optimality checks if the least cost solution was found.∼\sim -L70: -L71: SearchBench is challenging to LLMs due to several factors. Firstly, natural -L72: language is less suited for describing or updating accurate representations of -L73: complex intermediate states. Secondly, our experiments show LLMs struggle with -L74: exploring a combinatorial exponentially exploding state-space. Despite the fact -L75: that some methods were developed for long-context reasoning [4, 13, 50], -L76: SearchBench problems cannot be easily summarized [4], reasoned about [13], or -L77: processed in parallel due to their size [50, 45]. Our findings show that even -L78: the strongest LLMs [26] almost completely fail to solve SearchBench problems in +L70: +L71: SearchBench is challenging to LLMs due to several factors. Firstly, natural +L72: language is less suited for describing or updating accurate representations of +L73: complex intermediate states. Secondly, our experiments show LLMs struggle with +L74: exploring a combinatorial exponentially exploding state-space. Despite the fact +L75: that some methods were developed for long-context reasoning [4, 13, 50], +L76: SearchBench problems cannot be easily summarized [4], reasoned about [13], or +L77: processed in parallel due to their size [50, 45]. Our findings show that even +L78: the strongest LLMs [26] almost completely fail to solve SearchBench problems in L79: text-only mode. -L80: -L81: To provide further insights, we show that LLMs’ performance on SearchBench -L82: improves by prompting the models to solve the problems using the A* search -L83: algorithm [11]. A* is a heuristic-based graph traversal algorithm known for its -L84: time efficiency and provable optimality guarantees, making it the most suitable -L85: search algorithm for solving the problems in our benchmark. This method +L80: +L81: To provide further insights, we show that LLMs’ performance on SearchBench +L82: improves by prompting the models to solve the problems using the A* search +L83: algorithm [11]. A* is a heuristic-based graph traversal algorithm known for its +L84: time efficiency and provable optimality guarantees, making it the most suitable +L85: search algorithm for solving the problems in our benchmark. This method L86: leverages A*’s correctness and optimality, while offloading some of the non- -L87: linear computations involved in searching the state-space to code execution. -L88: Additionally, to improve the quality of generated A* codes, motivated that +L87: linear computations involved in searching the state-space to code execution. +L88: Additionally, to improve the quality of generated A* codes, motivated that L89: ensembling helps generation quality[41, 47, 21], we introduce the Multi-Stage- -L90: Multi-Try (MSMT) inference strategy. In the "Multi-Try" aspect of MSMT, before -L91: evaluating the solution returned by the code, we first verify whether the code +L90: Multi-Try (MSMT) inference strategy. In the "Multi-Try" aspect of MSMT, before +L91: evaluating the solution returned by the code, we first verify whether the code L92: generated by the model satisfies a set of unit tests: (i) it is executable; (ii) -L93: it returns a list as output; and (iii) data type of list elements is correct. -L94: If the code fails any of the tests, MSMT re-runs the LLM until a valid code is -L95: generated or allowed number of attempts is exhausted. The "Multi-Stage" aspect -L96: of MSMT generates the code in two steps: (i) ‘A* Implementation’ - the -L97: implementation of an instance-agnostic A* algorithm for the problem type; and -L98: (ii) Initialization - the instantiation of initial conditions and state -L99: variables of the problem instance. In MSMT ’Initialization’ is generated -L100: conditioned on the ‘A* Implementation’ (which is generated separately first and -L101: provided in ‘Intitialization’ prompt). We demonstrate that our MSMT A* method -L102: (Fig. 2) significantly enhances the LLMs’ ability to solve search problems, -L103: outperforming all other prompting strategies we used to evaluate models on -L104: SearchBench, including 0-shot text, 4-shot Chain-of-Thought (CoT)[42] text, -L105: 0-shot code generation, and 4-shot A* prompting with the naive greedy decoding +L93: it returns a list as output; and (iii) data type of list elements is correct. +L94: If the code fails any of the tests, MSMT re-runs the LLM until a valid code is +L95: generated or allowed number of attempts is exhausted. The "Multi-Stage" aspect +L96: of MSMT generates the code in two steps: (i) ‘A* Implementation’ - the +L97: implementation of an instance-agnostic A* algorithm for the problem type; and +L98: (ii) Initialization - the instantiation of initial conditions and state +L99: variables of the problem instance. In MSMT ’Initialization’ is generated +L100: conditioned on the ‘A* Implementation’ (which is generated separately first and +L101: provided in ‘Intitialization’ prompt). We demonstrate that our MSMT A* method +L102: (Fig. 2) significantly enhances the LLMs’ ability to solve search problems, +L103: outperforming all other prompting strategies we used to evaluate models on +L104: SearchBench, including 0-shot text, 4-shot Chain-of-Thought (CoT)[42] text, +L105: 0-shot code generation, and 4-shot A* prompting with the naive greedy decoding L106: strategy. -L107: -L108: To summarize, our main contributions are as follows: (i) We contribute the -L109: SearchBench benchmark designed to assess the capability of LLMs in solving -L110: state-based problems requiring combinatorial search; (ii) We introduce the A* -L111: Multi-Stage-Multi-Try (MSMT) framework that leverages the flexibility and -L112: natural language comprehension capabilities of LLMs, reliability and structured -L113: nature of the A* algorithm, along with unitests to verify LLMs’ code generation +L107: +L108: To summarize, our main contributions are as follows: (i) We contribute the +L109: SearchBench benchmark designed to assess the capability of LLMs in solving +L110: state-based problems requiring combinatorial search; (ii) We introduce the A* +L111: Multi-Stage-Multi-Try (MSMT) framework that leverages the flexibility and +L112: natural language comprehension capabilities of LLMs, reliability and structured +L113: nature of the A* algorithm, along with unitests to verify LLMs’ code generation L114: in order to address search problems. (iii) We show a strong advantage of MSMT A* -L115: prompting for solving search problems robustly with LLMs, suggesting -L116: interesting future work directions and yet leaving significant room for future +L115: prompting for solving search problems robustly with LLMs, suggesting +L116: interesting future work directions and yet leaving significant room for future L117: improvement on SearchBench. -L118: +L118: L119: ### 2 SearchBench Benchmark -L120: -L121: SearchBench includes five categories of problems: puzzles, subset sum, sorting, -L122: pathfinding, and under-determined systems, further subdivided into 11 problem -L123: types and instantiated as 1107 natural language problem instances. Each problem -L124: type requires a different algorithmic approach to solve. Our problems are -L125: inspired by well-known (mostly NP-hard) puzzles and search problems, though -L126: their rules were modified to ensure a solved instance of a problem similar to +L120: +L121: SearchBench includes five categories of problems: puzzles, subset sum, sorting, +L122: pathfinding, and under-determined systems, further subdivided into 11 problem +L123: types and instantiated as 1107 natural language problem instances. Each problem +L124: type requires a different algorithmic approach to solve. Our problems are +L125: inspired by well-known (mostly NP-hard) puzzles and search problems, though +L126: their rules were modified to ensure a solved instance of a problem similar to L127: any SearchBench problem was not observed during LLMs’ massive internet-scale Error fetching URL `https://ar5iv.labs.ar5iv.org/html/2312.15820` Error fetching URL `https://github.com/jxwuyi/WebNav` diff --git a/lib/llm/tests/data/deepseek-v3.2/test_output_search_wo_date.txt b/lib/llm/tests/data/deepseek-v3.2/test_output_search_wo_date.txt index 9a014a7947..bc99497a43 100644 --- a/lib/llm/tests/data/deepseek-v3.2/test_output_search_wo_date.txt +++ b/lib/llm/tests/data/deepseek-v3.2/test_output_search_wo_date.txt @@ -57,44 +57,44 @@ Search for "16,201,670 vaccine doses" "7,583,134 administered". [0] "16,201,670" vaccine doses (Search_Results/"16,201,670" vaccine doses) **viewing lines [0 - 38] of 38** -L0: +L0: L1: URL: Search_Results/"16,201,670" vaccine doses L2: # Search Results -L3: -L4: * 【0†Kenya's Economy is Showing Resilience as Output Rises ...; publish_date: -L5: none†www.worldbank.org】 Dec 14, 2021 — As of December 5, 2021, Kenya had -L6: received a total of 16,201,670 vaccines, with 7,583,134 administered. While +L3: +L4: * 【0†Kenya's Economy is Showing Resilience as Output Rises ...; publish_date: +L5: none†www.worldbank.org】 Dec 14, 2021 — As of December 5, 2021, Kenya had +L6: received a total of 16,201,670 vaccines, with 7,583,134 administered. While L7: vaccine acceptance is ... -L8: * 【1†MINISTRY OF HEALTH; publish_date: none†covidhub.mediacouncil.or.ke】 Dec -L9: 1, 2021 — Total Doses Received 16,201,670. Table 10 gives the total vaccines +L8: * 【1†MINISTRY OF HEALTH; publish_date: none†covidhub.mediacouncil.or.ke】 Dec +L9: 1, 2021 — Total Doses Received 16,201,670. Table 10 gives the total vaccines L10: received since the start of Covid -19 vaccination exercise in the country. -L11: * 【2†Output Result Page; publish_date: none†open.unicef.org】 ... 16,201,670 +L11: * 【2†Output Result Page; publish_date: none†open.unicef.org】 ... 16,201,670 L12: doses of multiple vaccines nationwide and full vaccination of 15.5 per cent with L13: two doses of COVID-19 vaccine as of 31 December 2021. -L14: * 【3†rebased GDP; publish_date: none†documents1.worldbank.org】 Dec 7, 2021 — -L15: As of December 5, 2021,. Kenya had received a total of 16,201,670 vaccines, +L14: * 【3†rebased GDP; publish_date: none†documents1.worldbank.org】 Dec 7, 2021 — +L15: As of December 5, 2021,. Kenya had received a total of 16,201,670 vaccines, L16: with. 7,583,134 administered. Vaccine acceptance is reportedly high. L17: * 【4†Integrated Annual Report; publish_date: none†www.co-opbank.co.ke】 May 27, -L18: 2022 — ... doses of Covid-19 vaccines and administered close to 17 million ... +L18: 2022 — ... doses of Covid-19 vaccines and administered close to 17 million ... L19: 16,201,670, huku 7,583,134 zikiwa tayari zimedungwa watu. Bado kuna ... -L20: * 【5†World Bank lifts Kenya's growth prospect to 5% in 2021; publish_date: +L20: * 【5†World Bank lifts Kenya's growth prospect to 5% in 2021; publish_date: L21: none†www.africa-press.net】 ... 16,201,670 vaccines, with 7,583,134 administered. -L22: While vaccine acceptance is reportedly high there is still a long way to go +L22: While vaccine acceptance is reportedly high there is still a long way to go L23: towards the government's ... -L24: * 【6†2020 Annual Report United States; publish_date: none†www.hi-us.org】 -L25: including medicine, vaccines, Covid-19 prevention kits, and kits of ... +L24: * 【6†2020 Annual Report United States; publish_date: none†www.hi-us.org】 +L25: including medicine, vaccines, Covid-19 prevention kits, and kits of ... L26: 16,201,670. 9,630,584. Prepaid expenses. 56,707. 22,601. Total current assets. -L27: * 【7†HOSPITAL CENTRAL DE LA DEFENSA; publish_date: +L27: * 【7†HOSPITAL CENTRAL DE LA DEFENSA; publish_date: L28: none†contrataciondelestado.es】 16201670 X. URPA-URPA. 0. HCD-000393/00. KONTRON. L29: 7000 ... LASER SHOT LBP-1120. CNEUR-C. NEUROFISIOLOGÍA. 0 ... VICTOREEN CT HEAD L30: DOSE PHANTON. 43650-014. -L31: * 【8†body metal concentrations: Topics by ...; publish_date: -L32: none†www.science.gov】 PMID:16201670. Design Characteristics and Tobacco Metal -L33: Concentrations in Filtered Cigars. PubMed. Caruso, Rosalie V; O'Connor, Richard +L31: * 【8†body metal concentrations: Topics by ...; publish_date: +L32: none†www.science.gov】 PMID:16201670. Design Characteristics and Tobacco Metal +L33: Concentrations in Filtered Cigars. PubMed. Caruso, Rosalie V; O'Connor, Richard L34: J; Travers, Mark J ... -L35: * 【9†Public Offices, Personal Demands: Capability in ...; publish_date: -L36: none†dokumen.pub】 ... (16201670).9 The Institutiones Politicae consists of two -L37: books. Book one deals with the origin, nature and goal of the commonwealth on +L35: * 【9†Public Offices, Personal Demands: Capability in ...; publish_date: +L36: none†dokumen.pub】 ... (16201670).9 The Institutiones Politicae consists of two +L37: books. Book one deals with the origin, nature and goal of the commonwealth on L38: the one hand, and the ... @@ -118,47 +118,47 @@ Search for "nearly 2.2 million members and supporters contributed over $49 milli [1] "nearly 2.2 million members" "contributed over $49 million" (Search_Results/"nearly 2.2 million members" "contributed over $49 million") **viewing lines [0 - 41] of 41** -L0: +L0: L1: URL: Search_Results/"nearly 2.2 million members" "contributed over $49 million" L2: # Search Results -L3: -L4: * 【0†The Global Good Fund, Microsoft and Goodwill join forces ...; -L5: publish_date: none†globalgoodfund.org】 Sep 28, 2022 — ... more than 2.2 million -L6: people. They additionally contribute close to $49 billion in local, state and +L3: +L4: * 【0†The Global Good Fund, Microsoft and Goodwill join forces ...; +L5: publish_date: none†globalgoodfund.org】 Sep 28, 2022 — ... more than 2.2 million +L6: people. They additionally contribute close to $49 billion in local, state and L7: federal tax revenues. When COVID hit ... -L8: * 【1†Almost 22 billion American tax dollars spent to wipe out a ...; +L8: * 【1†Almost 22 billion American tax dollars spent to wipe out a ...; L9: publish_date: none†www.facebook.com】 US military funding for Israel's war crimes -L10: in Lebanon and Gaza has now cost US taxpayers over $22 billion. When millions +L10: in Lebanon and Gaza has now cost US taxpayers over $22 billion. When millions L11: struggle to afford the ... -L12: * 【2†Corporate America has largely abandoned its post-January ...; +L12: * 【2†Corporate America has largely abandoned its post-January ...; L13: publish_date: none†www.citizensforethics.org】 Jul 29, 2025 — Since the January 6 L14: insurrection, over 2,000 corporate and industry group PACs have given over $174 L15: million to members of the Sedition ... -L16: * 【3†Audit shows millions in questionable taxpayer spending at ...; -L17: publish_date: none†www.aol.com】 18 hours ago — ... nearly doubled from 1.3 -L18: million to about 2.2 million. That is more than one in four Washington state +L16: * 【3†Audit shows millions in questionable taxpayer spending at ...; +L17: publish_date: none†www.aol.com】 18 hours ago — ... nearly doubled from 1.3 +L18: million to about 2.2 million. That is more than one in four Washington state L19: residents receiving Medicaid, and the ... -L20: * 【4†Incarceration and Poverty in the United States - AAF; publish_date: -L21: none†www.americanactionforum.org】 Jun 30, 2020 — The United States currently -L22: incarcerates 2.2 million people, nearly half of whom are non-violent drug +L20: * 【4†Incarceration and Poverty in the United States - AAF; publish_date: +L21: none†www.americanactionforum.org】 Jun 30, 2020 — The United States currently +L22: incarcerates 2.2 million people, nearly half of whom are non-violent drug L23: offenders, accused people held pre ... -L24: * 【5†How the NRA Spends Revenue (2023); publish_date: none†paddockpost.com】 +L24: * 【5†How the NRA Spends Revenue (2023); publish_date: none†paddockpost.com】 L25: Apr 25, 2025 — In 2023, the NRA reported total revenue of $176 million (compared L26: to $211 million in 2022, $227 million in 2021, $282 million in 2020, $292 ... -L27: * 【6†NCA Newsroom; publish_date: none†www.ncausa.org】 ... 2.2 million U.S. -L28: jobs—operating in every U.S. state and territory and contributing nearly $350 +L27: * 【6†NCA Newsroom; publish_date: none†www.ncausa.org】 ... 2.2 million U.S. +L28: jobs—operating in every U.S. state and territory and contributing nearly $350 L29: billion to the U.S. economy every year. For media inquiries ... -L30: * 【7†The Early Childhood Educator Workforce; publish_date: -L31: none†cscce.berkeley.edu】 About 2.2 million adults are paid to care for and +L30: * 【7†The Early Childhood Educator Workforce; publish_date: +L31: none†cscce.berkeley.edu】 About 2.2 million adults are paid to care for and L32: educate more than 9.7 million children between birth and age five in center- L33: based and home-based settings. L34: * 【8†Bringing Out the Worst in Congress: CRA By the Numbers ...; publish_date: L35: none†progressivereform.org】 Jul 14, 2025 — In fact, the top ten recipients (out -L36: of 25) account for almost $7 million, showing how these contributions are +L36: of 25) account for almost $7 million, showing how these contributions are L37: targeted and strategic. Real ... L38: * 【9†Club 100: Over 100 Americans gave more than $100000 to ...; publish_date: L39: none†publicintegrity.org】 Jan 7, 2022 — Through Internet investments during the -L40: 1990s, he attracted 2.2 million online accounts valued at $174 million. Schwab +L40: 1990s, he attracted 2.2 million online accounts valued at $174 million. Schwab L41: has been a vocal ... @@ -176,7 +176,7 @@ L41: has been a vocal ... [2] "2.2 million members" "contributed over $49" (Search_Results/"2.2 million members" "contributed over $49") **viewing lines [0 - 2] of 2** -L0: +L0: L1: URL: Search_Results/"2.2 million members" "contributed over $49" L2: # Search Results @@ -195,42 +195,42 @@ L2: # Search Results [3] "2.2 million members" "annual report" $49 million (Search_Results/"2.2 million members" "annual report" $49 million) **viewing lines [0 - 36] of 36** -L0: +L0: L1: URL: Search_Results/"2.2 million members" "annual report" $49 million L2: # Search Results -L3: -L4: * 【0†20-F; publish_date: none†www.sec.gov】 ANNUAL REPORT PURSUANT TO SECTION -L5: ... Our membership grew from 2.1 million members as at December 31, 2023 to 2.2 +L3: +L4: * 【0†20-F; publish_date: none†www.sec.gov】 ANNUAL REPORT PURSUANT TO SECTION +L5: ... Our membership grew from 2.1 million members as at December 31, 2023 to 2.2 L6: million members as at December 31, 2024. -L7: * 【1†Oportun Reports Fourth Quarter and Full Year 2023 ...; publish_date: +L7: * 【1†Oportun Reports Fourth Quarter and Full Year 2023 ...; publish_date: L8: none†investor.oportun.com】 Mar 12, 2024 — Oportun (Nasdaq: OPRT) is a mission- -L9: driven fintech that puts its 2.2 million members' financial goals within reach. +L9: driven fintech that puts its 2.2 million members' financial goals within reach. L10: ... annual report on ... -L11: * 【2†2 0 21; publish_date: none†www.annualreports.com】 ANNUAL REPORT. 2. 0. -L12: 21. 2. 0. 21. Page 2. 2. DEFENDERS OF WILDLIFE. 2. 0. 21. 2. 0. 21 ... In 2021, +L11: * 【2†2 0 21; publish_date: none†www.annualreports.com】 ANNUAL REPORT. 2. 0. +L12: 21. 2. 0. 21. Page 2. 2. DEFENDERS OF WILDLIFE. 2. 0. 21. 2. 0. 21 ... In 2021, L13: Defenders of Wildlife's nearly 2.2 million members and. -L14: * 【3†Annual report and accounts 2020; publish_date: none†www.3i.com】 -L15: Disclaimer. The Annual report and accounts have been prepared solely to provide -L16: information to shareholders. ... 2.2 million members. In December 2019, we sold +L14: * 【3†Annual report and accounts 2020; publish_date: none†www.3i.com】 +L15: Disclaimer. The Annual report and accounts have been prepared solely to provide +L16: information to shareholders. ... 2.2 million members. In December 2019, we sold L17: ... -L18: * 【4†united states securities and exchange commission; publish_date: +L18: * 【4†united states securities and exchange commission; publish_date: L19: none†s21.q4cdn.com】 Dec 14, 2018 — ☒ ANNUAL REPORT ... approximately 5.7 million -L20: Members, including approximately 2.2 million Members from the acquisition of +L20: Members, including approximately 2.2 million Members from the acquisition of L21: Best Doctors. -L22: * 【5†securities and exchange commission; publish_date: none†www.sec.gov】 ... -L23: Annual Report on Form 10-K filed with the SEC on March 10, 2020. The ... 2.2 +L22: * 【5†securities and exchange commission; publish_date: none†www.sec.gov】 ... +L23: Annual Report on Form 10-K filed with the SEC on March 10, 2020. The ... 2.2 L24: million members at June 30, 2020. The following table highlights the ... -L25: * 【6†2022-23 Annual Comprehensive Financial Report - CalPERS; publish_date: -L26: none†www.calpers.ca.gov】 Nov 17, 2023 — ... annual report is consistent with -L27: these accounting principles ... 2.2 million members and health benefit services +L25: * 【6†2022-23 Annual Comprehensive Financial Report - CalPERS; publish_date: +L26: none†www.calpers.ca.gov】 Nov 17, 2023 — ... annual report is consistent with +L27: these accounting principles ... 2.2 million members and health benefit services L28: to over 1.5 million ... -L29: * 【7†Medical Centers Report; publish_date: -L30: none†regents.universityofcalifornia.edu】 Oct 8, 2019 — which will allow its 2.2 -L31: million members to receive advanced and ... annual report of the University of +L29: * 【7†Medical Centers Report; publish_date: +L30: none†regents.universityofcalifornia.edu】 Oct 8, 2019 — which will allow its 2.2 +L31: million members to receive advanced and ... annual report of the University of L32: California. For the Medical ... -L33: * 【8†Working TogeTher to make HealtH care better; publish_date: -L34: none†www.unitedhealthgroup.com】 Feb 8, 2012 — ANNUAL REPORT PURSUANT TO SECTION -L35: 13 OR 15(d) OF THE ... approximately 2.2 million members enrolled in its +L33: * 【8†Working TogeTher to make HealtH care better; publish_date: +L34: none†www.unitedhealthgroup.com】 Feb 8, 2012 — ANNUAL REPORT PURSUANT TO SECTION +L35: 13 OR 15(d) OF THE ... approximately 2.2 million members enrolled in its L36: Medicare. Advantage ... @@ -247,59 +247,59 @@ L36: Medicare. Advantage ... [4] www.annualreports.com (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2021.pdf) **viewing lines [0 - 53] of 261** -L0: +L0: L1: URL: https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders- L2: wildlife_2021.pdf L3: ===== Page 1 ===== 2021 ANNUAL REPORT ===== Page 2 ===== # DEFENDERS OF WILDLIFE -L4: made important progress for imperiled species and vital landscapes across the +L4: made important progress for imperiled species and vital landscapes across the L5: United States in 2021. \--- **LAWYERS** immediately **challenged** the premature -L6: and reckless decision to strip **gray wolves** of federal **Endangered Species -L7: Act (ESA)** protections. For many decades, Defenders has led the effort to -L8: protect and restore the gray wolf, and we will continue to fight the -L9: unscientific and hostile anti-wolf policies that impede conservation progress +L6: and reckless decision to strip **gray wolves** of federal **Endangered Species +L7: Act (ESA)** protections. For many decades, Defenders has led the effort to +L8: protect and restore the gray wolf, and we will continue to fight the +L9: unscientific and hostile anti-wolf policies that impede conservation progress L10: and will carry on our unrelenting battle to restore federal protections for this -L11: iconic keystone species. \--- **LOBBYISTS** worked around the clock to keep -L12: wildlife and climate priorities in the **Infrastructure Investment and Jobs -L13: Act**. We also continue fighting to keep important wildlife and habitat funding -L14: in relevant **appropriations bills**. \--- 2 DEFENDERS OF WILDLIFE ===== Page 3 -L15: ===== POLICY EXPERTS pushed forward on the urgent need for a National -L16: Biodiversity Strategy (NBS), an all-of-government approach to address the -L17: unprecedented loss of wildlife and habitat we are experiencing. We have coupled -L18: this with our new campaign to expand the National Wildlife Refuge System to -L19: preserve our nation’s only lands set aside for wildlife. By defending, funding -L20: and expanding our national wildlife refuges, we will directly address -L21: biodiversity loss and climate change while promoting increased equitable access -L22: to nature. FIELD TEAMS were on the ground helping to recover imperiled species. -L23: From panthers and sea turtles in Florida to wolves, bison and black-footed -L24: ferrets in Montana, Defenders’ conservation experts were in the field saving -L25: wildlife all over the country. CONSERVATION INNOVATION EXPERTS provided -L26: comprehensive analyses to guide policy and inform conservation strategies to -L27: reach the goal of protecting 30% of our terrestrial and marine systems by 2030 +L11: iconic keystone species. \--- **LOBBYISTS** worked around the clock to keep +L12: wildlife and climate priorities in the **Infrastructure Investment and Jobs +L13: Act**. We also continue fighting to keep important wildlife and habitat funding +L14: in relevant **appropriations bills**. \--- 2 DEFENDERS OF WILDLIFE ===== Page 3 +L15: ===== POLICY EXPERTS pushed forward on the urgent need for a National +L16: Biodiversity Strategy (NBS), an all-of-government approach to address the +L17: unprecedented loss of wildlife and habitat we are experiencing. We have coupled +L18: this with our new campaign to expand the National Wildlife Refuge System to +L19: preserve our nation’s only lands set aside for wildlife. By defending, funding +L20: and expanding our national wildlife refuges, we will directly address +L21: biodiversity loss and climate change while promoting increased equitable access +L22: to nature. FIELD TEAMS were on the ground helping to recover imperiled species. +L23: From panthers and sea turtles in Florida to wolves, bison and black-footed +L24: ferrets in Montana, Defenders’ conservation experts were in the field saving +L25: wildlife all over the country. CONSERVATION INNOVATION EXPERTS provided +L26: comprehensive analyses to guide policy and inform conservation strategies to +L27: reach the goal of protecting 30% of our terrestrial and marine systems by 2030 L28: (“30x30”). Defenders’ Center for Conservation Innovation (CCI) produced a report -L29: which details actions we need to take to achieve 30x30 while protecting +L29: which details actions we need to take to achieve 30x30 while protecting L30: biodiversity and addressing the climate crisis. DEFENDERS.ORG ===== Page 4 ===== -L31: WE HAVE ACCOMPLISHED MUCH THIS YEAR WORKING WITH AN ADMINISTRATION THAT VALUES -L32: SCIENCE AND CONSERVATION. Our many successes include the return of protections -L33: to the Tongass National Forest in Alaska and the suspension of oil and gas +L31: WE HAVE ACCOMPLISHED MUCH THIS YEAR WORKING WITH AN ADMINISTRATION THAT VALUES +L32: SCIENCE AND CONSERVATION. Our many successes include the return of protections +L33: to the Tongass National Forest in Alaska and the suspension of oil and gas L34: leasing permits for the Arctic National Wildlife Refuge. Defenders also played a -L35: lead role in successfully defending the Migratory Bird Protection Act from -L36: attack and securing critical protections for migratory birds. Throughout 2021 -L37: Defenders made important progress for the wildlife and wild places we all love, -L38: yet our nation’s wildlife still face unprecedented challenges. More and more +L35: lead role in successfully defending the Migratory Bird Protection Act from +L36: attack and securing critical protections for migratory birds. Throughout 2021 +L37: Defenders made important progress for the wildlife and wild places we all love, +L38: yet our nation’s wildlife still face unprecedented challenges. More and more L39: species, including manatees, Mexican gray wolves, polar bears and North Atlantic -L40: right whales, face the very real threat of extinction because of climate -L41: change, habitat loss, pollution and inadequate protections. In our work we +L40: right whales, face the very real threat of extinction because of climate +L41: change, habitat loss, pollution and inadequate protections. In our work we L42: continue to demonstrate success by following the science, building partnerships, -L43: leading with innovative solutions and focusing on collaborative and practical -L44: approaches in defense of nature. Together we can make a lasting difference for -L45: wildlife and habitats. Your support is essential to our work and we couldn’t do -L46: it without you. Thank you for being an important part of our efforts to protect -L47: imperiled wildlife across our nation. Jamie Rappaport Clark President and CEO -L48: John Dayton Board Chair OUR NEW TELEVISION SERIES, WILDLIFE NATION WITH JEFF -L49: CORWIN, premiered in October and airs every weekend on ABC stations nationwide. -L50: This groundbreaking series takes Jeff across the country to meet Defenders’ -L51: experts along with our science, tribal and community partners to educate and -L52: inspire viewers to join with Defenders to protect wildlife. While highlighting +L43: leading with innovative solutions and focusing on collaborative and practical +L44: approaches in defense of nature. Together we can make a lasting difference for +L45: wildlife and habitats. Your support is essential to our work and we couldn’t do +L46: it without you. Thank you for being an important part of our efforts to protect +L47: imperiled wildlife across our nation. Jamie Rappaport Clark President and CEO +L48: John Dayton Board Chair OUR NEW TELEVISION SERIES, WILDLIFE NATION WITH JEFF +L49: CORWIN, premiered in October and airs every weekend on ABC stations nationwide. +L50: This groundbreaking series takes Jeff across the country to meet Defenders’ +L51: experts along with our science, tribal and community partners to educate and +L52: inspire viewers to join with Defenders to protect wildlife. While highlighting L53: some of our important conservation work and raising awareness of important @@ -317,9 +317,9 @@ L53: some of our important conservation work and raising awareness of important **viewing lines [0 - 4] of 4** L0: # 【0†match at L187】 -L1: 2021, Defenders of Wildlife’s nearly 2.2 million members and supporters +L1: 2021, Defenders of Wildlife’s nearly 2.2 million members and supporters L2: contributed more than $49 million for wildlife and wildlife habitat. This figure -L3: includes all those who generously donated their time and expertise. The +L3: includes all those who generously donated their time and expertise. The L4: steadfast support of our donors allows Defenders to sustain our program and @@ -336,51 +336,51 @@ L4: steadfast support of our donors allows Defenders to sustain our program and [6] www.annualreports.com (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2021.pdf) **viewing lines [170 - 215] of 261** -L170: into the wild. **10 DEFENDERS OF WILDLIFE** ===== Page 11 ===== AFTER ANOTHER -L171: SUCCESSFUL DEFENDERS LAWSUIT ON BEHALF OF THE CRITICALLY ENDANGERED RED WOLF, -L172: FWS reversed its decision to limit the recovery area and committed to a robust +L170: into the wild. **10 DEFENDERS OF WILDLIFE** ===== Page 11 ===== AFTER ANOTHER +L171: SUCCESSFUL DEFENDERS LAWSUIT ON BEHALF OF THE CRITICALLY ENDANGERED RED WOLF, +L172: FWS reversed its decision to limit the recovery area and committed to a robust L173: release strategy. After years of inaction, FWS released eight wolves to the wild -L174: in North Carolina and plan to release nine more wolves in the spring of 2022. -L175: This is an incredible win for this critically endangered species whose -L176: population has dwindled down to single digits in the wild because of +L174: in North Carolina and plan to release nine more wolves in the spring of 2022. +L175: This is an incredible win for this critically endangered species whose +L176: population has dwindled down to single digits in the wild because of L177: mismanagement, vehicle strikes and poaching. DEFENDERS CONTINUED TO LEAD EFFORTS -L178: TO PROTECT THE FLORIDA MANATEE, a beloved species that suffered the deadliest -L179: year on record in 2021, tragically surpassing 1,000 deaths because of water -L180: pollution and lack of warm water habitat. Defenders led advocacy and education -L181: aimed at restoring the natural flow of the dammed Ocklawaha River, which would -L182: provide critical warm-water habitat that manatees need to survive. Defenders’ -L183: legal team continued to fight for manatees in the courts, holding government -L184: agencies accountable for protecting critical habitat and addressing the +L178: TO PROTECT THE FLORIDA MANATEE, a beloved species that suffered the deadliest +L179: year on record in 2021, tragically surpassing 1,000 deaths because of water +L180: pollution and lack of warm water habitat. Defenders led advocacy and education +L181: aimed at restoring the natural flow of the dammed Ocklawaha River, which would +L182: provide critical warm-water habitat that manatees need to survive. Defenders’ +L183: legal team continued to fight for manatees in the courts, holding government +L184: agencies accountable for protecting critical habitat and addressing the L185: devastating water pollution that is killing the seagrass and causing manatees to -L186: starve. DAVID TES | SAM FRENZY DRAW DEFENDERS.ORG 11 ===== Page 12 ===== In -L187: 2021, Defenders of Wildlife’s nearly 2.2 million members and supporters +L186: starve. DAVID TES | SAM FRENZY DRAW DEFENDERS.ORG 11 ===== Page 12 ===== In +L187: 2021, Defenders of Wildlife’s nearly 2.2 million members and supporters L188: contributed more than $49 million for wildlife and wildlife habitat. This figure -L189: includes all those who generously donated their time and expertise. The -L190: steadfast support of our donors allows Defenders to sustain our program and -L191: public education efforts in the field, the courts and on Capitol Hill. 2021 -L192: SOURCES OF FUNDS Grants and contributions $29,057 Bequests, trusts and split -L193: interests $7,692 Income from investments, annuity reserve funds and trusts -L194: $3,354 Royalties and other income $3,576 Contributed services $6,140 Total -L195: Revenue $49,819 USES OF FUNDS Biodiversity conservation $22,420 Constituency -L196: mobilization $16,324 Fundraising $1,211 Management and general $5,865 Total -L197: Expenses $45,820 Change in net assets $3,999 Net assets, start of year $41,145 -L198: Net Assets, End of the Year $45,144 Dollars are in thousands. 12 DEFENDERS OF -L199: WILDLIFE Grants and contributions 58% Income from investments 7% Requests, -L200: trusts and split interests 15% Royalties and other income 7% Contributed +L189: includes all those who generously donated their time and expertise. The +L190: steadfast support of our donors allows Defenders to sustain our program and +L191: public education efforts in the field, the courts and on Capitol Hill. 2021 +L192: SOURCES OF FUNDS Grants and contributions $29,057 Bequests, trusts and split +L193: interests $7,692 Income from investments, annuity reserve funds and trusts +L194: $3,354 Royalties and other income $3,576 Contributed services $6,140 Total +L195: Revenue $49,819 USES OF FUNDS Biodiversity conservation $22,420 Constituency +L196: mobilization $16,324 Fundraising $1,211 Management and general $5,865 Total +L197: Expenses $45,820 Change in net assets $3,999 Net assets, start of year $41,145 +L198: Net Assets, End of the Year $45,144 Dollars are in thousands. 12 DEFENDERS OF +L199: WILDLIFE Grants and contributions 58% Income from investments 7% Requests, +L200: trusts and split interests 15% Royalties and other income 7% Contributed L201: services 12% Program and support services 97% Fundraising 3% ===== Page 13 ===== -L202: DEFENDERS.ORG 13 ===== Page 14 ===== John Dayton, Esq.* Chair Texas Sheryl +L202: DEFENDERS.ORG 13 ===== Page 14 ===== John Dayton, Esq.* Chair Texas Sheryl L203: Sachs* Vice Chair Connecticut Whitfield Bailey* Secretary Tennessee Mark Caylor* -L204: Treasurer District of Columbia Sylvia Baca* California Donald Barry, Esq. -L205: Oregon Dinah Bear, Esq. Arizona Kim O’Keefe Beck, Ph.D California Gabriela -L206: Chavarria, Ph.D Washington Jamie Rappaport Clark* President & CEO Virginia -L207: Caroline D. Gabel Maryland Keith M. Harper, Esq. Maryland John Donovan, Esq. -L208: Montana and California James M. Hecker, Esq. Virginia Judith Ponsikoff, Ph.D* +L204: Treasurer District of Columbia Sylvia Baca* California Donald Barry, Esq. +L205: Oregon Dinah Bear, Esq. Arizona Kim O’Keefe Beck, Ph.D California Gabriela +L206: Chavarria, Ph.D Washington Jamie Rappaport Clark* President & CEO Virginia +L207: Caroline D. Gabel Maryland Keith M. Harper, Esq. Maryland John Donovan, Esq. +L208: Montana and California James M. Hecker, Esq. Virginia Judith Ponsikoff, Ph.D* L209: California Terry L. Root, Ph.D Florida BOARD EMERITUS Terry C. Pelster, Esq. New -L210: York *Executive Committee member* ST A F F & A D V I S O R S SCIENCE ADVISORS -L211: Gabriela Chavarria, Ph.D Burke Museum, University of Washington Leah Gerber, +L210: York *Executive Committee member* ST A F F & A D V I S O R S SCIENCE ADVISORS +L211: Gabriela Chavarria, Ph.D Burke Museum, University of Washington Leah Gerber, L212: Ph.D Arizona State University Kathayoon Khalil, Ph.D Oregon Zoo Rurik List, Ph.D -L213: Universidad Autónoma Metropolitana-Lerma Barry Noon, Ph.D Colorado State -L214: University Terry L. Root, Ph.D Stanford University Mark Schwartz, Ph.D +L213: Universidad Autónoma Metropolitana-Lerma Barry Noon, Ph.D Colorado State +L214: University Terry L. Root, Ph.D Stanford University Mark Schwartz, Ph.D L215: University of California-Davis David S. Wilcox, Ph.D Princeton University Rae @@ -404,42 +404,42 @@ Search for "Defenders of Wildlife 2022 annual report National Petroleum Reserve [7] Defenders of Wildlife 2022 annual report National Petroleum Reserve Alaska (Search_Results/Defenders of Wildlife 2022 annual report National Petroleum Reserve Alaska) **viewing lines [0 - 37] of 37** -L0: -L1: URL: Search_Results/Defenders of Wildlife 2022 annual report National Petroleum +L0: +L1: URL: Search_Results/Defenders of Wildlife 2022 annual report National Petroleum L2: Reserve Alaska L3: # Search Results -L4: -L5: * 【0†CELEBRATING YEARS; publish_date: none†www.annualreports.com】 With less -L6: than 340 right whales left in the wild, Defenders is fighting tirelessly to end +L4: +L5: * 【0†CELEBRATING YEARS; publish_date: none†www.annualreports.com】 With less +L6: than 340 right whales left in the wild, Defenders is fighting tirelessly to end L7: deadly lobster gear entanglements and vessel strikes that are driving ... -L8: * 【1†Financials; publish_date: none†defenders.org】 We invite you to explore -L9: the reports below to learn more about our activities and accomplishments, and +L8: * 【1†Financials; publish_date: none†defenders.org】 We invite you to explore +L9: the reports below to learn more about our activities and accomplishments, and L10: how we put your money to work for wildlife. -L11: * 【2†Alaska Program Looks Back on 2022; publish_date: none†defenders.org】 Feb -L12: 9, 2023 — Thanks to a lawsuit joined by Defenders, seven million acres were +L11: * 【2†Alaska Program Looks Back on 2022; publish_date: none†defenders.org】 Feb +L12: 9, 2023 — Thanks to a lawsuit joined by Defenders, seven million acres were L13: returned to protection within the National Petroleum Reserve-Alaska (NPR-A), ... -L14: * 【3†Defenders-of-Wildlife-2022-Financial-Statement. ...; publish_date: -L15: none†defenders.org】 We have audited the accompanying consolidated financial -L16: statements of Defenders of Wildlife and Affiliated Defenders of Wildlife Action +L14: * 【3†Defenders-of-Wildlife-2022-Financial-Statement. ...; publish_date: +L15: none†defenders.org】 We have audited the accompanying consolidated financial +L16: statements of Defenders of Wildlife and Affiliated Defenders of Wildlife Action L17: Fund (collectively, ... -L18: * 【4†2022 Annual Report; publish_date: none†alaskaconservation.org】 Jun 13, -L19: 2023 — In 2022, we focused on three landscapes: the Arctic. National Wildlife +L18: * 【4†2022 Annual Report; publish_date: none†alaskaconservation.org】 Jun 13, +L19: 2023 — In 2022, we focused on three landscapes: the Arctic. National Wildlife L20: Refuge, Bristol Bay, and the Tongass National Forest. In March 2022,. -L21: * 【5†Assessment of ecological and cultural values within the ...; -L22: publish_date: none†www.blm.gov】 This document was written to provide technical -L23: information regarding the ecological importance of the National Petroleum +L21: * 【5†Assessment of ecological and cultural values within the ...; +L22: publish_date: none†www.blm.gov】 This document was written to provide technical +L23: information regarding the ecological importance of the National Petroleum L24: Reserve – Alaska (NPR-A). Several ... -L25: * 【6†Accomplishments Report; publish_date: none†defenders.org】 National -L26: Petroleum Reserve-Alaska. Identified for exceptional wildlife and cultural +L25: * 【6†Accomplishments Report; publish_date: none†defenders.org】 National +L26: Petroleum Reserve-Alaska. Identified for exceptional wildlife and cultural L27: values, including critical habitat for polar bears and other species ... L28: * 【7†2022 annual report; publish_date: none†dory- -L29: plantain-s2zc.squarespace.com】 These projects are made possible through -L30: collaborations with Defenders of. Wildlife, Cook Inletkeeper, Trustees for +L29: plantain-s2zc.squarespace.com】 These projects are made possible through +L30: collaborations with Defenders of. Wildlife, Cook Inletkeeper, Trustees for L31: Alaska,. Environmental Investigation Agency, ... -L32: * 【8†23IMPACT REPORT; publish_date: none†www.annualreports.com】 Defenders of -L33: Wildlife made incredible progress protecting wildlife and wild places in 2023, +L32: * 【8†23IMPACT REPORT; publish_date: none†www.annualreports.com】 Defenders of +L33: Wildlife made incredible progress protecting wildlife and wild places in 2023, L34: helping shape a brighter future for imperiled species and vital ... -L35: * 【9†From Leasing to Land Protections; publish_date: none†defenders-cci.org】 +L35: * 【9†From Leasing to Land Protections; publish_date: none†defenders-cci.org】 L36: Abstract. When you hear the name, National Petroleum Reserve - Alaska (NPR-A, or L37: Reserve), you might think of a barren land filled with oil wells and ... @@ -457,59 +457,59 @@ L37: Reserve), you might think of a barren land filled with oil wells and ...[8] www.annualreports.com (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2022.pdf) **viewing lines [0 - 53] of 289** -L0: +L0: L1: URL: https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders- L2: wildlife_2022.pdf L3: ===== Page 1 ===== 2022 Impact Report C E L E B R A T I N G Y E A R S ===== Page -L4: 2 ===== 2022 Defenders of Wildlife made important progress for imperiled -L5: species and vital landscapes across the United States in 2022. GRAY WOLF | JIM -L6: GUMMERAL MAY STOCK PRIOR Lawyers successfully challenged the previous -L7: administration’s decision to delist the gray wolf and restored critical federal -L8: protections under the Endangered Species Act. This latest triumph in court is -L9: part of our ongoing battle to protect and restore gray wolves throughout their -L10: historical range and shield them from persecution by extremist legislators in -L11: Idaho, Montana and Wyoming. TWO MORE FATALIZED GRAY SWALLETS TO SEA TO SHARE -L12: ALLIANCE Lobbyists worked around the clock to expand funding for wildlife -L13: conservation in the FY2022 federal spending bill, which included $31 million (a -L14: 44% increase) for the Bureau of Land Management’s Threatened and Endangered -L15: Species Program, $2.5 million (an 81% increase) for the U.S. Department of -L16: Agriculture Wildlife Services’ Nonlethal Initiative to prevent human-wildlife -L17: conflicts and $21 million (a 320% increase) for North Atlantic right whale -L18: conservation. 2 DEFENDERS OF WILDLIFE ===== Page 3 ===== **Policy Experts** -L19: played a crucial role in securing international trade protections for 100 -L20: species of sharks and rays, all 158 species of glass frogs and 73 species of -L21: reptiles, including 21 species of desert horned lizards, at the Convention on -L22: International Trade in Endangered Species (CITES) in Panama. \--- **Field -L23: Teams** worked tirelessly to protect and restore imperiled species across the -L24: country. From Florida manatees and red wolves in the Southeast to belugas and -L25: grizzly bears in Alaska, Defenders’ conservation experts were on the ground -L26: saving species that need our help to survive and thrive. \--- **Conservation +L4: 2 ===== 2022 Defenders of Wildlife made important progress for imperiled +L5: species and vital landscapes across the United States in 2022. GRAY WOLF | JIM +L6: GUMMERAL MAY STOCK PRIOR Lawyers successfully challenged the previous +L7: administration’s decision to delist the gray wolf and restored critical federal +L8: protections under the Endangered Species Act. This latest triumph in court is +L9: part of our ongoing battle to protect and restore gray wolves throughout their +L10: historical range and shield them from persecution by extremist legislators in +L11: Idaho, Montana and Wyoming. TWO MORE FATALIZED GRAY SWALLETS TO SEA TO SHARE +L12: ALLIANCE Lobbyists worked around the clock to expand funding for wildlife +L13: conservation in the FY2022 federal spending bill, which included $31 million (a +L14: 44% increase) for the Bureau of Land Management’s Threatened and Endangered +L15: Species Program, $2.5 million (an 81% increase) for the U.S. Department of +L16: Agriculture Wildlife Services’ Nonlethal Initiative to prevent human-wildlife +L17: conflicts and $21 million (a 320% increase) for North Atlantic right whale +L18: conservation. 2 DEFENDERS OF WILDLIFE ===== Page 3 ===== **Policy Experts** +L19: played a crucial role in securing international trade protections for 100 +L20: species of sharks and rays, all 158 species of glass frogs and 73 species of +L21: reptiles, including 21 species of desert horned lizards, at the Convention on +L22: International Trade in Endangered Species (CITES) in Panama. \--- **Field +L23: Teams** worked tirelessly to protect and restore imperiled species across the +L24: country. From Florida manatees and red wolves in the Southeast to belugas and +L25: grizzly bears in Alaska, Defenders’ conservation experts were on the ground +L26: saving species that need our help to survive and thrive. \--- **Conservation L27: Innovation Experts** published more than 10 peer-reviewed studies on topics that -L28: include the Cook Inlet beluga whale, golden-cheeked warbler, global parrot -L29: biodiversity, the Endangered Species Act, the effects of mountaintop removal -L30: mining on endangered species, the ecological importance of panthers and the -L31: implementation of “30x30” – the globally recognized goal to which President -L32: Biden committed the U.S. to conserve 30% of our imperiled lands and waters by -L33: 2030. \--- **DEFENDERS.ORG** ===== Page 4 ===== THANK YOU Defenders celebrated -L34: our 75th anniversary in 2022—an exciting milestone that we attribute to our +L28: include the Cook Inlet beluga whale, golden-cheeked warbler, global parrot +L29: biodiversity, the Endangered Species Act, the effects of mountaintop removal +L30: mining on endangered species, the ecological importance of panthers and the +L31: implementation of “30x30” – the globally recognized goal to which President +L32: Biden committed the U.S. to conserve 30% of our imperiled lands and waters by +L33: 2030. \--- **DEFENDERS.ORG** ===== Page 4 ===== THANK YOU Defenders celebrated +L34: our 75th anniversary in 2022—an exciting milestone that we attribute to our L35: unwavering dedication to our wildlife conservation mission. From helping to pass -L36: the Endangered Species Act and other bedrock conservation laws to leading the -L37: advocacy on the reintroduction of wolves to Yellowstone National Park and other -L38: parts of the West, Defenders is proud to inspire, lead and ultimately celebrate -L39: some of the most consequential conservation wins for wildlife and wild places. -L40: It is wonderfully fitting that we also mark 2022 as a year when we successfully -L41: defended, yet again, the original focal species and symbol of our organization, -L42: the gray wolf. While the future for gray wolves and many other species remains -L43: uncertain, we will never stop fighting to protect the imperiled wildlife that -L44: need us and to restore vibrant and balanced ecosystems. Our mission is more -L45: important than ever due to the escalating biodiversity and climate crises. -L46: Defenders is determined to build on our accomplishments and seize new -L47: opportunities to address these challenges through our multifaceted approach to -L48: conservation. We focus on developing innovative and science-based solutions, +L36: the Endangered Species Act and other bedrock conservation laws to leading the +L37: advocacy on the reintroduction of wolves to Yellowstone National Park and other +L38: parts of the West, Defenders is proud to inspire, lead and ultimately celebrate +L39: some of the most consequential conservation wins for wildlife and wild places. +L40: It is wonderfully fitting that we also mark 2022 as a year when we successfully +L41: defended, yet again, the original focal species and symbol of our organization, +L42: the gray wolf. While the future for gray wolves and many other species remains +L43: uncertain, we will never stop fighting to protect the imperiled wildlife that +L44: need us and to restore vibrant and balanced ecosystems. Our mission is more +L45: important than ever due to the escalating biodiversity and climate crises. +L46: Defenders is determined to build on our accomplishments and seize new +L47: opportunities to address these challenges through our multifaceted approach to +L48: conservation. We focus on developing innovative and science-based solutions, L49: coordinating on-the-ground species and habitat restoration, promoting human- -L50: wildlife coexistence, protecting and strengthening our bedrock environmental -L51: laws, investing in impactful collaborations and partnerships, building a more -L52: diverse and powerful conservation movement and challenging attacks on wildlife +L50: wildlife coexistence, protecting and strengthening our bedrock environmental +L51: laws, investing in impactful collaborations and partnerships, building a more +L52: diverse and powerful conservation movement and challenging attacks on wildlife L53: through advocacy, policy and litigation. Defenders’ continued success is made @@ -527,9 +527,9 @@ L53: through advocacy, policy and litigation. Defenders’ continued success is **viewing lines [0 - 4] of 4** L0: # 【0†match at L201】 -L1: the 22 million acres of the National Petroleum Reserve in Alaska to oil and gas -L2: drilling and removed protections for important wildlife habitat. Protections -L3: have now been restored for nearly half of the reserve’s pristine lands, which +L1: the 22 million acres of the National Petroleum Reserve in Alaska to oil and gas +L2: drilling and removed protections for important wildlife habitat. Protections +L3: have now been restored for nearly half of the reserve’s pristine lands, which L4: are vital habitat for shorebirds, denning polar bears and tens of thousands of @@ -546,50 +546,50 @@ L4: are vital habitat for shorebirds, denning polar bears and tens of thousands [10] www.annualreports.com (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2022.pdf) **viewing lines [180 - 227] of 289** -L180: the sixth successful transfer of bison to the Assiniboine and Sioux Tribes of -L181: Fort Peck since 2019. \--- **SWIFT FIX KITS | © ROSIMA PAELARINTSKIMMA MADDIAL -L182: 200 AND CONSERVATION BIOLOGY INSTITUTE** \--- **Celebrating our third year** of -L183: a collaborative program with the Aaniih and Nakoda Tribes and others to restore -L184: swift foxes to the Fort Belknap Indian Reservation in Montana, Defenders helped -L185: with the release of 28 more swift foxes. With over 100 foxes reintroduced -L186: through this program, monitoring efforts show that they are reproducing in the -L187: wild—a critical measure of success for a self-sustaining population. \--- -L188: **Defenders continued to lead the way** for conserving and recovering the -L189: endangered black-footed ferret, supporting the black-footed ferret survey for -L190: the Fort Belknap Indian community. Thirty-six ferrets were vaccinated against -L191: sylvatic plague and two dozen kits were released in the wild. \--- **10 -L192: DEFENDERS OF WILDLIFE** ===== Page 11 ===== Defenders helped to bring hope for -L193: recovery for the endangered military macaw, adding 11 fledglings to a growing -L194: wild population in Puerta Vallarta, Mexico, that is under pressure from habitat -L195: loss and poachers for the illegal pet trade. Accord- ing to our recent report, -L196: the 2008 parrot trade ban that Defenders fought to achieve is working. -L197: Preventing more than 30,000 parrots from being illegally trapped each year, the -L198: trade ban has resulted in a 47% decrease in the illegal trade of parrots and an -L199: 88% decrease in U.S. seizures of Mexican parrots. As a result of a Defenders -L200: lawsuit, BLM rescinded the previous administration’s plan that opened most of -L201: the 22 million acres of the National Petroleum Reserve in Alaska to oil and gas -L202: drilling and removed protections for important wildlife habitat. Protections -L203: have now been restored for nearly half of the reserve’s pristine lands, which -L204: are vital habitat for shorebirds, denning polar bears and tens of thousands of -L205: caribou in the Teshekpuk caribou herd. Our new storymap illustrates why the -L206: reserve is so important to wildlife. Defenders marked the 20th anniversary of -L207: our Sea Otter Awareness Week. In celebration of this milestone, we sponsored -L208: state legislation, signed by Gov. Gavin Newsom, that formally recognizes the -L209: event’s anniversary and acknowledges the vital role that sea otters play in -L210: California’s nearshore ecosystems. This annual celebration provides a unique -L211: opportunity to raise awareness of the important role sea otters play in -L212: maintaining ecosystem health, our need to coexist with sea otters and the -L213: efforts of Defenders and others in advancing sea otter conservation. -L214: DEFENDERS.ORG ===== Page 12 ===== FINANCIAL REPORT In 2022, Defenders of -L215: Wildlife’s nearly 2.2 million members and supporters contributed more than $41 -L216: million for wildlife and wildlife habitat. This figure includes all those who +L180: the sixth successful transfer of bison to the Assiniboine and Sioux Tribes of +L181: Fort Peck since 2019. \--- **SWIFT FIX KITS | © ROSIMA PAELARINTSKIMMA MADDIAL +L182: 200 AND CONSERVATION BIOLOGY INSTITUTE** \--- **Celebrating our third year** of +L183: a collaborative program with the Aaniih and Nakoda Tribes and others to restore +L184: swift foxes to the Fort Belknap Indian Reservation in Montana, Defenders helped +L185: with the release of 28 more swift foxes. With over 100 foxes reintroduced +L186: through this program, monitoring efforts show that they are reproducing in the +L187: wild—a critical measure of success for a self-sustaining population. \--- +L188: **Defenders continued to lead the way** for conserving and recovering the +L189: endangered black-footed ferret, supporting the black-footed ferret survey for +L190: the Fort Belknap Indian community. Thirty-six ferrets were vaccinated against +L191: sylvatic plague and two dozen kits were released in the wild. \--- **10 +L192: DEFENDERS OF WILDLIFE** ===== Page 11 ===== Defenders helped to bring hope for +L193: recovery for the endangered military macaw, adding 11 fledglings to a growing +L194: wild population in Puerta Vallarta, Mexico, that is under pressure from habitat +L195: loss and poachers for the illegal pet trade. Accord- ing to our recent report, +L196: the 2008 parrot trade ban that Defenders fought to achieve is working. +L197: Preventing more than 30,000 parrots from being illegally trapped each year, the +L198: trade ban has resulted in a 47% decrease in the illegal trade of parrots and an +L199: 88% decrease in U.S. seizures of Mexican parrots. As a result of a Defenders +L200: lawsuit, BLM rescinded the previous administration’s plan that opened most of +L201: the 22 million acres of the National Petroleum Reserve in Alaska to oil and gas +L202: drilling and removed protections for important wildlife habitat. Protections +L203: have now been restored for nearly half of the reserve’s pristine lands, which +L204: are vital habitat for shorebirds, denning polar bears and tens of thousands of +L205: caribou in the Teshekpuk caribou herd. Our new storymap illustrates why the +L206: reserve is so important to wildlife. Defenders marked the 20th anniversary of +L207: our Sea Otter Awareness Week. In celebration of this milestone, we sponsored +L208: state legislation, signed by Gov. Gavin Newsom, that formally recognizes the +L209: event’s anniversary and acknowledges the vital role that sea otters play in +L210: California’s nearshore ecosystems. This annual celebration provides a unique +L211: opportunity to raise awareness of the important role sea otters play in +L212: maintaining ecosystem health, our need to coexist with sea otters and the +L213: efforts of Defenders and others in advancing sea otter conservation. +L214: DEFENDERS.ORG ===== Page 12 ===== FINANCIAL REPORT In 2022, Defenders of +L215: Wildlife’s nearly 2.2 million members and supporters contributed more than $41 +L216: million for wildlife and wildlife habitat. This figure includes all those who L217: generously donated their time and expertise. The steadfast support of our donors -L218: allows Defenders to sustain our program and public education efforts in the -L219: field, the courts and on Capitol Hill. 2022 SOURCES OF FUNDS Grants and -L220: contributions $28,946 Bequests, trusts and split interests $13,928 Income from -L221: investments, annuity reserve funds and trusts (-$7,495) Royalties and other -L222: income $744 Contributed services $5,047 Total Revenue $41,170 USES OF FUNDS -L223: Biodiversity conservation $18,970 Constituency mobilization $16,528 Fundraising +L218: allows Defenders to sustain our program and public education efforts in the +L219: field, the courts and on Capitol Hill. 2022 SOURCES OF FUNDS Grants and +L220: contributions $28,946 Bequests, trusts and split interests $13,928 Income from +L221: investments, annuity reserve funds and trusts (-$7,495) Royalties and other +L222: income $744 Contributed services $5,047 Total Revenue $41,170 USES OF FUNDS +L223: Biodiversity conservation $18,970 Constituency mobilization $16,528 Fundraising L224: $1,029 Management and general $6,354 Total Expenses $42,881 Change in net assets L225: (-$1,711) Net assets, start of year $45,144 Net Assets, End of the Year $43,433 L226: Dollars are in thousands. 12 DEFENDERS OF WILDLIFE Grants and contributions 59% @@ -618,15 +618,15 @@ Open the 2022 PDF near the board list. Search for "Emeritus". **viewing lines [0 - 10] of 10** L0: # 【0†match at L237】 -L1: Sisk, Ph.D British Columbia, Canada BOARD EMERITUS Terry C. Pelster, Esq. New -L2: York *Executive Committee member* STAFF & ADVISORS SCIENCE ADVISORS Gabriela +L1: Sisk, Ph.D British Columbia, Canada BOARD EMERITUS Terry C. Pelster, Esq. New +L2: York *Executive Committee member* STAFF & ADVISORS SCIENCE ADVISORS Gabriela L3: Chavarria, Ph.D Burke Museum, University of Washington Leah Gerber, Ph.D Arizona L4: State University Kathayoon Khalil, Ph.D Oregon Zoo Rurik List, Ph.D Universidad -L5: +L5: L6: # 【1†match at L243】 -L7: Davis Thomas D. Sisk, Ph.D Emeritus Professor Northern Arizona University David -L8: S. Wilcox, Ph.D Princeton University Rae Wynn-Grant, Ph.D National Geographic -L9: Society SENIOR STAFF Jamie Rappaport Clark President & CEO Thu Pham Chief of +L7: Davis Thomas D. Sisk, Ph.D Emeritus Professor Northern Arizona University David +L8: S. Wilcox, Ph.D Princeton University Rae Wynn-Grant, Ph.D National Geographic +L9: Society SENIOR STAFF Jamie Rappaport Clark President & CEO Thu Pham Chief of L10: Staff James Stofan Senior Vice President, Operations McCrystle Adams Vice @@ -643,53 +643,53 @@ L10: Staff James Stofan Senior Vice President, Operations McCrystle Adams Vice < [12] www.annualreports.com (https://www.annualreports.com/HostedData/AnnualReportArchive/d/defenders-wildlife_2022.pdf) **viewing lines [220 - 267] of 289** -L220: contributions $28,946 Bequests, trusts and split interests $13,928 Income from -L221: investments, annuity reserve funds and trusts (-$7,495) Royalties and other -L222: income $744 Contributed services $5,047 Total Revenue $41,170 USES OF FUNDS -L223: Biodiversity conservation $18,970 Constituency mobilization $16,528 Fundraising +L220: contributions $28,946 Bequests, trusts and split interests $13,928 Income from +L221: investments, annuity reserve funds and trusts (-$7,495) Royalties and other +L222: income $744 Contributed services $5,047 Total Revenue $41,170 USES OF FUNDS +L223: Biodiversity conservation $18,970 Constituency mobilization $16,528 Fundraising L224: $1,029 Management and general $6,354 Total Expenses $42,881 Change in net assets L225: (-$1,711) Net assets, start of year $45,144 Net Assets, End of the Year $43,433 L226: Dollars are in thousands. 12 DEFENDERS OF WILDLIFE Grants and contributions 59% -L227: Contributed services 10% Royalties and other income 2% Bequests, trusts and -L228: split interests 29% TOTAL REVENUE Income from investments -15% Program and -L229: support services 98% USE OF FUNDS Fundraising 2% ===== Page 13 ===== +L227: Contributed services 10% Royalties and other income 2% Bequests, trusts and +L228: split interests 29% TOTAL REVENUE Income from investments -15% Program and +L229: support services 98% USE OF FUNDS Fundraising 2% ===== Page 13 ===== L230: DEFENDERS.ORG 13 ===== Page 14 ===== BOARD OF DIRECTORS John Dayton, Esq.* Chair -L231: Texas Sheryl Sachs* Vice Chair Connecticut Dinah Bear, Esq.* Secretary Arizona -L232: Mark Caylor* Treasurer District of Columbia Susan Alexander Connecticut Sylvia -L233: Baca* California Whitefield Bailey Tennessee Donald Barry, Esq. Oregon Kim -L234: O’Keefe Beck, Ph.D California Gabriela Chavarria, Ph.D Washington Jamie -L235: Rappaport Clark* President & CEO Virginia John Donovan, Esq.* Montana and -L236: California Caroline D. Gabel Maryland Terry L. Root, Ph.D New Mexico Thomas D. -L237: Sisk, Ph.D British Columbia, Canada BOARD EMERITUS Terry C. Pelster, Esq. New -L238: York *Executive Committee member* STAFF & ADVISORS SCIENCE ADVISORS Gabriela +L231: Texas Sheryl Sachs* Vice Chair Connecticut Dinah Bear, Esq.* Secretary Arizona +L232: Mark Caylor* Treasurer District of Columbia Susan Alexander Connecticut Sylvia +L233: Baca* California Whitefield Bailey Tennessee Donald Barry, Esq. Oregon Kim +L234: O’Keefe Beck, Ph.D California Gabriela Chavarria, Ph.D Washington Jamie +L235: Rappaport Clark* President & CEO Virginia John Donovan, Esq.* Montana and +L236: California Caroline D. Gabel Maryland Terry L. Root, Ph.D New Mexico Thomas D. +L237: Sisk, Ph.D British Columbia, Canada BOARD EMERITUS Terry C. Pelster, Esq. New +L238: York *Executive Committee member* STAFF & ADVISORS SCIENCE ADVISORS Gabriela L239: Chavarria, Ph.D Burke Museum, University of Washington Leah Gerber, Ph.D Arizona L240: State University Kathayoon Khalil, Ph.D Oregon Zoo Rurik List, Ph.D Universidad -L241: Autónoma Metropolitana-Lerma Barry Noon, Ph.D Colorado State University Terry +L241: Autónoma Metropolitana-Lerma Barry Noon, Ph.D Colorado State University Terry L242: L. Root, Ph.D Stanford University Mark Schwartz, Ph.D University of California- -L243: Davis Thomas D. Sisk, Ph.D Emeritus Professor Northern Arizona University David -L244: S. Wilcox, Ph.D Princeton University Rae Wynn-Grant, Ph.D National Geographic -L245: Society SENIOR STAFF Jamie Rappaport Clark President & CEO Thu Pham Chief of -L246: Staff James Stofan Senior Vice President, Operations McCrystle Adams Vice -L247: President, Conservation Law Matt Braughler Vice President, Philanthropy Rachel -L248: Brittin Vice President, Communications Shawn Cantrell Vice President, Field -L249: Conservation Programs Robert L. Dewey Vice President, Government Relations & -L250: External Affairs Monica Goldberg Vice President, Landscape Conservation Sharon +L243: Davis Thomas D. Sisk, Ph.D Emeritus Professor Northern Arizona University David +L244: S. Wilcox, Ph.D Princeton University Rae Wynn-Grant, Ph.D National Geographic +L245: Society SENIOR STAFF Jamie Rappaport Clark President & CEO Thu Pham Chief of +L246: Staff James Stofan Senior Vice President, Operations McCrystle Adams Vice +L247: President, Conservation Law Matt Braughler Vice President, Philanthropy Rachel +L248: Brittin Vice President, Communications Shawn Cantrell Vice President, Field +L249: Conservation Programs Robert L. Dewey Vice President, Government Relations & +L250: External Affairs Monica Goldberg Vice President, Landscape Conservation Sharon L251: Morris-Smith Vice President of Human Resources Elizabeth Nichols Vice President, -L252: Finance/CFO Anthony Pegues Vice President, Diversity, Equity, Inclusion, -L253: Justice Lindsay Rosa, Ph.D Vice President, Conservation Research and Innovation -L254: Brandace Swimley Vice President, Integrated Marketing 14 DEFENDERS OF WILDLIFE -L255: ===== Page 15 ===== There are many ways to support Defenders of Wildlife and -L256: protect our imperiled wildlife. To learn more or donate, contact us at -L257: 1-800-385-9712, visit Defenders.org/donate or choose one of the following -L258: options. ➤ Transfer appreciated assets directly to Defenders and maximize +L252: Finance/CFO Anthony Pegues Vice President, Diversity, Equity, Inclusion, +L253: Justice Lindsay Rosa, Ph.D Vice President, Conservation Research and Innovation +L254: Brandace Swimley Vice President, Integrated Marketing 14 DEFENDERS OF WILDLIFE +L255: ===== Page 15 ===== There are many ways to support Defenders of Wildlife and +L256: protect our imperiled wildlife. To learn more or donate, contact us at +L257: 1-800-385-9712, visit Defenders.org/donate or choose one of the following +L258: options. ➤ Transfer appreciated assets directly to Defenders and maximize L259: potential tax benefits. Defenders.org/ donate-stocks-help-save-wildlife ➤ Become -L260: a Leadership Council member with a single donation of $1,000 or more and enjoy -L261: more personal involvement with Defenders. Defenders.org/leadershipcouncil ➤ If -L262: you are 70.5 or older, roll over a portion of your IRA to benefit wildlife. -L263: Remember, these gifts must come directly from your IRA account. -L264: Defenders.org/planned-giving/gifts-your-retirement-plan ➤ Get started on a -L265: contribution from your donor-advised fund Defenders.org/donor-advised-funds ➤ -L266: Check with your personnel office or that of your spouse/partner about your +L260: a Leadership Council member with a single donation of $1,000 or more and enjoy +L261: more personal involvement with Defenders. Defenders.org/leadershipcouncil ➤ If +L262: you are 70.5 or older, roll over a portion of your IRA to benefit wildlife. +L263: Remember, these gifts must come directly from your IRA account. +L264: Defenders.org/planned-giving/gifts-your-retirement-plan ➤ Get started on a +L265: contribution from your donor-advised fund Defenders.org/donor-advised-funds ➤ +L266: Check with your personnel office or that of your spouse/partner about your L267: company’s Corporate Matching Gifts program or visit us at @@ -719,47 +719,47 @@ First clue: Press release about Kenya vaccine doses on Dec 5, 2021. That is like [13] As of December 5, 2021, Kenya had received a total of 16,201,670 vaccines (Search_Results/As of December 5, 2021, Kenya had received a total of 16,201,670 vaccines) **viewing lines [0 - 41] of 41** -L0: -L1: URL: Search_Results/As of December 5, 2021, Kenya had received a total of +L0: +L1: URL: Search_Results/As of December 5, 2021, Kenya had received a total of L2: 16,201,670 vaccines L3: # Search Results -L4: -L5: * 【0†Kenya's Economy is Showing Resilience as Output Rises ...; publish_date: -L6: none†www.worldbank.org】 Dec 14, 2021 — As of December 5, 2021, Kenya had -L7: received a total of 16,201,670 vaccines, with 7,583,134 administered. While +L4: +L5: * 【0†Kenya's Economy is Showing Resilience as Output Rises ...; publish_date: +L6: none†www.worldbank.org】 Dec 14, 2021 — As of December 5, 2021, Kenya had +L7: received a total of 16,201,670 vaccines, with 7,583,134 administered. While L8: vaccine acceptance is ... -L9: * 【1†Unmet need for COVID-19 vaccination coverage in Kenya - PMC; +L9: * 【1†Unmet need for COVID-19 vaccination coverage in Kenya - PMC; L10: publish_date: none†pmc.ncbi.nlm.nih.gov】 by SK Muchiri · 2022 · Cited by 42 — As -L11: of December 2021, six counties had a vaccination coverage of less than 5%. +L11: of December 2021, six counties had a vaccination coverage of less than 5%. L12: These counties include Garissa, Mandera, Marsabit, Tana River, Turkana, and ... -L13: * 【2†MINISTRY OF HEALTH; publish_date: none†covidhub.mediacouncil.or.ke】 Dec -L14: 1, 2021 — • Total Covid-19 Vaccines Received to date- 16,201,670 ... Table 10: +L13: * 【2†MINISTRY OF HEALTH; publish_date: none†covidhub.mediacouncil.or.ke】 Dec +L14: 1, 2021 — • Total Covid-19 Vaccines Received to date- 16,201,670 ... Table 10: L15: Vaccine Logistics Received in the Country as at 5th, December 2021. -L16: * 【3†COVID-19 vaccination refusal trends in Kenya over 2021 - PMC; -L17: publish_date: none†pmc.ncbi.nlm.nih.gov】 by RT Rego · 2023 · Cited by 21 — We -L18: assessed vaccine refusal over time in Kenya, and characterized factors +L16: * 【3†COVID-19 vaccination refusal trends in Kenya over 2021 - PMC; +L17: publish_date: none†pmc.ncbi.nlm.nih.gov】 by RT Rego · 2023 · Cited by 21 — We +L18: assessed vaccine refusal over time in Kenya, and characterized factors L19: associated with changes in vaccination refusal. L20: * 【4†Ciheb-Kenya on the Front Lines of the COVID-19 Vaccine ...; publish_date: -L21: none†www.ciheb.org】 Of the 721,509 persons vaccinated, 409,711 are aged 58 -L22: years and above, 143,050 are healthcare workers, 109,593 are teachers, and +L21: none†www.ciheb.org】 Of the 721,509 persons vaccinated, 409,711 are aged 58 +L22: years and above, 143,050 are healthcare workers, 109,593 are teachers, and L23: 59,155 are security officers. -L24: * 【5†proportion of fully vaccinated adults 23.3pc.; publish_date: -L25: none†www.facebook.com】 COVID-19: KENYA administers 150632 new vaccinations; +L24: * 【5†proportion of fully vaccinated adults 23.3pc.; publish_date: +L25: none†www.facebook.com】 COVID-19: KENYA administers 150632 new vaccinations; L26: total jabs given now 13782963; proportion of fully vaccinated adults 23.3pc. -L27: * 【6†National COVID-19 Vaccines Deployment and ...; publish_date: +L27: * 【6†National COVID-19 Vaccines Deployment and ...; publish_date: L28: none†www.parliament.go.ke】 Kenya has a well-established immunization program and L29: a robust disease surveillance and response unit (DSRU) which serves as an early L30: warning system, to. -L31: * 【7†Kenya: Vaccine Requirements Violate Rights; publish_date: -L32: none†www.hrw.org】 Dec 13, 2021 — Kenya, with an estimated adult population of +L31: * 【7†Kenya: Vaccine Requirements Violate Rights; publish_date: +L32: none†www.hrw.org】 Dec 13, 2021 — Kenya, with an estimated adult population of L33: 27.2 million and a total population of 55 million, has received approximately 23 L34: million doses as ... -L35: * 【8†Government gives 20 million a month to get vaccinated; publish_date: -L36: none†www.bbc.com】 Nov 22, 2021 — Although less than 10% of Kenyans are +L35: * 【8†Government gives 20 million a month to get vaccinated; publish_date: +L36: none†www.bbc.com】 Nov 22, 2021 — Although less than 10% of Kenyans are L37: vaccinated, the government wants to avoid a surge over Christmas. -L38: * 【9†ministry of health kenya covid-19 immunization status ...; publish_date: -L39: none†www.health.go.ke】 May 15, 2022 — By the end of 2021 Kenya had administered -L40: 10.1 million doses of 5 types of Covid-19 vaccines. During this period 4.2 +L38: * 【9†ministry of health kenya covid-19 immunization status ...; publish_date: +L39: none†www.health.go.ke】 May 15, 2022 — By the end of 2021 Kenya had administered +L40: 10.1 million doses of 5 types of Covid-19 vaccines. During this period 4.2 L41: million people were ... @@ -776,58 +776,58 @@ L41: million people were ... [14] www.worldbank.org (https://www.worldbank.org/en/news/press-release/2021/12/14/kenya-s-economy-is-showing-resilience-as-output-rises-above-pre-pandemic-levels-driven-by-a-rebound-in-the-services-sect) **viewing lines [0 - 52] of 61** -L0: +L0: L1: URL: https://www.worldbank.org/en/news/press-release/2021/12/14/kenya-s-economy- L2: is-showing-resilience-as-output-rises-above-pre-pandemic-levels-driven-by-a- L3: rebound-in-the-services-sect -L4: NAIROBI, December 14, 2021 — Kenya’s economy has demonstrated resilience to the +L4: NAIROBI, December 14, 2021 — Kenya’s economy has demonstrated resilience to the L5: COVID-19 shock, with output in the first half of the year rising above pre- L6: pandemic levels. In 2021 as a whole, gross domestic product (GDP) is expected to -L7: grow by 5%, one of the faster recoveries among Sub-Saharan African countries. -L8: Overall economic performance is expected to be robust at 4.9% per year in +L7: grow by 5%, one of the faster recoveries among Sub-Saharan African countries. +L8: Overall economic performance is expected to be robust at 4.9% per year in L9: 2022-23, similar to the pre-pandemic pace (5% average annual growth from 2010 to -L10: 2019). According to the 24th edition of the Kenya Economic Update, “From +L10: 2019). According to the 24th edition of the Kenya Economic Update, “From L11: Recovery to Better Jobs,” growth has been supported by rebounds in industry and, -L12: especially, services. Agricultural output, however, fell by 0.5% year on year -L13: in the first half of 2021 following a particularly strong performance in 2020, -L14: partly due to below-average rains. Demand-side recovery has been supported by a -L15: revival in private consumption, against a backdrop of improving employment -L16: conditions and household incomes. “Kenya’s economy has shown considerable -L17: resilience to the enormous shock of the pandemic, and this year is expected to -L18: post one of the stronger growth rebounds in the region thanks to diversified -L19: sources of growth and sound economic policies and management,” said Keith -L20: Hansen, World Bank Country Director for Kenya. “However, poverty has increased, -L21: and the buffers and coping mechanisms of households, firms, and the public -L22: finances have been depleted.” Economic activity in Kenya has continued to adapt +L12: especially, services. Agricultural output, however, fell by 0.5% year on year +L13: in the first half of 2021 following a particularly strong performance in 2020, +L14: partly due to below-average rains. Demand-side recovery has been supported by a +L15: revival in private consumption, against a backdrop of improving employment +L16: conditions and household incomes. “Kenya’s economy has shown considerable +L17: resilience to the enormous shock of the pandemic, and this year is expected to +L18: post one of the stronger growth rebounds in the region thanks to diversified +L19: sources of growth and sound economic policies and management,” said Keith +L20: Hansen, World Bank Country Director for Kenya. “However, poverty has increased, +L21: and the buffers and coping mechanisms of households, firms, and the public +L22: finances have been depleted.” Economic activity in Kenya has continued to adapt L23: to the pandemic and associated restrictions. A mix of containment measures, such -L24: as a nightly curfew, were in effect through most of 2021, while more -L25: economically disruptive measures such as lockdowns and travel restrictions were -L26: phased, limiting the impact on economic activities. The vaccine rollout, which -L27: had a slow start due to supply constraints, has picked up as new shipments of +L24: as a nightly curfew, were in effect through most of 2021, while more +L25: economically disruptive measures such as lockdowns and travel restrictions were +L26: phased, limiting the impact on economic activities. The vaccine rollout, which +L27: had a slow start due to supply constraints, has picked up as new shipments of L28: vaccines have arrived, particularly since September. This has supported economic -L29: recovery and growth through the third quarter of 2021. As of December 5, 2021, -L30: Kenya had received a total of 16,201,670 vaccines, with 7,583,134 administered. -L31: While vaccine acceptance is reportedly high there is still a long way to go -L32: towards the government’s target of fully inoculating the adult population of -L33: about 30 million by the end of 2022.As of December 6, 2021, about 10% of adults -L34: (2.9 million people) had been fully vaccinated while another 16% (4.9 million +L29: recovery and growth through the third quarter of 2021. As of December 5, 2021, +L30: Kenya had received a total of 16,201,670 vaccines, with 7,583,134 administered. +L31: While vaccine acceptance is reportedly high there is still a long way to go +L32: towards the government’s target of fully inoculating the adult population of +L33: about 30 million by the end of 2022.As of December 6, 2021, about 10% of adults +L34: (2.9 million people) had been fully vaccinated while another 16% (4.9 million L35: people) had received their first dose. The report also projects robust growth in L36: the medium-term. This outlook takes into account that some sub-sectors, such as -L37: education, have bounced back strongly, but others such as international -L38: tourism, have only partially and face a much more protracted recovery. The -L39: anticipated further recovery of hotels and restaurants, trade, transport, and -L40: other services, depends on substantial vaccination progress to help prevent new -L41: waves of infections and associated containment measures. “Recent economic -L42: performance has been strong and the outlook is positive, but in Kenya, as -L43: everywhere, the future course of the pandemic remains a major source of +L37: education, have bounced back strongly, but others such as international +L38: tourism, have only partially and face a much more protracted recovery. The +L39: anticipated further recovery of hotels and restaurants, trade, transport, and +L40: other services, depends on substantial vaccination progress to help prevent new +L41: waves of infections and associated containment measures. “Recent economic +L42: performance has been strong and the outlook is positive, but in Kenya, as +L43: everywhere, the future course of the pandemic remains a major source of L44: uncertainty, said Alex Sienaert, Senior Economist for Kenya. “To support further -L45: recovery, reduce debt distress risks, and rebuild space for social and +L45: recovery, reduce debt distress risks, and rebuild space for social and L46: development spending, it is critical that Kenya continues to implement medium- -L47: term fiscal consolidation plans.” In addition to pandemic-related risks to the -L48: outlook, a second key domestic risk factor stems from the drought conditions -L49: which are affecting parts of the country and already causing severe hardship. -L50: Should the drought intensify or spread, this would weigh on the near-term -L51: economic outlook. Weaker global growth, higher-than-anticipated energy prices, +L47: term fiscal consolidation plans.” In addition to pandemic-related risks to the +L48: outlook, a second key domestic risk factor stems from the drought conditions +L49: which are affecting parts of the country and already causing severe hardship. +L50: Should the drought intensify or spread, this would weigh on the near-term +L51: economic outlook. Weaker global growth, higher-than-anticipated energy prices, L52: and tighter external financing conditions are the primary external risks. As @@ -873,37 +873,37 @@ But let's search for "Defenders of Wildlife emeritus board member 2022 Terry Pel [15] Terry C. Pelster Defenders of Wildlife emeritus board member (Search_Results/Terry C. Pelster Defenders of Wildlife emeritus board member) **viewing lines [0 - 31] of 31** -L0: +L0: L1: URL: Search_Results/Terry C. Pelster Defenders of Wildlife emeritus board member L2: # Search Results -L3: -L4: * 【0†2024 impact report; publish_date: none†defenders.org】 Terry Root, Ph.D. -L5: New Mexico. Thomas D. Sisk, Ph.D. British Columbia, Canada. BOARD EMERITUS. +L3: +L4: * 【0†2024 impact report; publish_date: none†defenders.org】 Terry Root, Ph.D. +L5: New Mexico. Thomas D. Sisk, Ph.D. British Columbia, Canada. BOARD EMERITUS. L6: Terry C. Pelster, Esq. New York. SCIENCE ADVISORS. Leah Gerber, Ph ... -L7: * 【1†Defenders of Wildlife; publish_date: none†www.wrongkindofgreen.org】 -L8: Directors ; Victor M. Sher*, Chair – California ; Terry C. Pelster*, Vice Chair -L9: – New York ; Richard Kopcho*, Treasurer – California ; Adelaide P. Gomer*, +L7: * 【1†Defenders of Wildlife; publish_date: none†www.wrongkindofgreen.org】 +L8: Directors ; Victor M. Sher*, Chair – California ; Terry C. Pelster*, Vice Chair +L9: – New York ; Richard Kopcho*, Treasurer – California ; Adelaide P. Gomer*, L10: Secretary ... -L11: * 【2†Exhibit 12; publish_date: none†www.uschamber.com】 DECLARATION OF TERRY -L12: PELSTER. I, Terry C. Pelster, declare as follows: 1. I am a current member of +L11: * 【2†Exhibit 12; publish_date: none†www.uschamber.com】 DECLARATION OF TERRY +L12: PELSTER. I, Terry C. Pelster, declare as follows: 1. I am a current member of L13: Defenders of Wildlife (“Defenders”) and have been a member. -L14: * 【3†2020 ANNUAL REPORT; publish_date: none†defenders.org】 Terry L. Root. -L15: Florida. BOARD. EMERITUS. Terry C. Pelster. New York. Alan Steinberg. Florida. +L14: * 【3†2020 ANNUAL REPORT; publish_date: none†defenders.org】 Terry L. Root. +L15: Florida. BOARD. EMERITUS. Terry C. Pelster. New York. Alan Steinberg. Florida. L16: SCIENCE. ADVISORS. Gabriela Chavarria,. Ph.D. Denver Museum of. -L17: * 【4†2019 Annual Report; publish_date: none†defenders.org】 BOARD EMERITUS. -L18: Terry C. Pelster. New York. Alan Steinberg. Florida. SCIENCE ADVISORS. Leah +L17: * 【4†2019 Annual Report; publish_date: none†defenders.org】 BOARD EMERITUS. +L18: Terry C. Pelster. New York. Alan Steinberg. Florida. SCIENCE ADVISORS. Leah L19: Gerber, Ph.D. Arizona State University. Kathayoon Khalil, Ph.D. -L20: * 【5†annual report; publish_date: none†defenders.org】 * Executive Committee -L21: member. BoARD EMERITuS. Terry C. Pelster. New York. Alan Steinberg. Florida. +L20: * 【5†annual report; publish_date: none†defenders.org】 * Executive Committee +L21: member. BoARD EMERITuS. Terry C. Pelster. New York. Alan Steinberg. Florida. L22: PRESIDENT. EMERITuS. Rodger Schlickeisen. Virginia. SCIENCE ... L23: * 【6†BEING THERE FOR BELUGAS; publish_date: none†wendeenicole.com】 Mar 2, 2023 -L24: — Defenders of Wildlife is a national, nonprofit member- ship ... Terry C. +L24: — Defenders of Wildlife is a national, nonprofit member- ship ... Terry C. L25: Pelster, Esq. New York. *Executive Committee member outfront. -L26: * 【7†Annual Reports; publish_date: none†www.annualreports.com】 WAYS TO GIVE -L27: There are many ways to support Defenders of Wildlife and protect our imperiled +L26: * 【7†Annual Reports; publish_date: none†www.annualreports.com】 WAYS TO GIVE +L27: There are many ways to support Defenders of Wildlife and protect our imperiled L28: wildlife. ... Terry C. Pelster. New York. Alan Steinberg. Florida. -L29: * 【8†2014 Annual Report; publish_date: none†defenders.org】 BOARD OF DIRECTORS -L30: defenders of wildlife is a national, nonprofit ... BOARD EMERITUS. Terry C. +L29: * 【8†2014 Annual Report; publish_date: none†defenders.org】 BOARD OF DIRECTORS +L30: defenders of wildlife is a national, nonprofit ... BOARD EMERITUS. Terry C. L31: Pelster. New York. Alan Steinberg. Florida. Page 3. PO. @@ -911,7 +911,7 @@ L31: Pelster. New York. Alan Steinberg. Florida. Page 3. PO. Now the question might want just "Terry C. Pelster". Or maybe "Terry Pelster". Or "Terry C. Pelster, Esq." but instructions say "Name only, no titles." So likely "Terry C. Pelster". However, maybe they want "Terry Pelster". Better to include full name as listed: "Terry C. Pelster". -Let's read the question again: "Who was the emeritus board member that year? Name only, no titles." So we should answer just the name, no "Esq." or "Board Emeritus". So "Terry C. Pelster". Or maybe "Terry Pelster". But the document shows "Terry C. Pelster, Esq." So the name is "Terry C. Pelster". +Let's read the question again: "Who was the emeritus board member that year? Name only, no titles." So we should answer just the name, no "Esq." or "Board Emeritus". So "Terry C. Pelster". Or maybe "Terry Pelster". But the document shows "Terry C. Pelster, Esq." So the name is "Terry C. Pelster". Better to verify that the 2022 annual report indeed lists "BOARD EMERITUS Terry C. Pelster, Esq." We saw that line. @@ -955,15 +955,15 @@ But let's be thorough and search for "Defenders of Wildlife 2022 annual report c **viewing lines [0 - 10] of 10** L0: # 【0†match at L222】 -L1: income $744 Contributed services $5,047 Total Revenue $41,170 USES OF FUNDS -L2: Biodiversity conservation $18,970 Constituency mobilization $16,528 Fundraising +L1: income $744 Contributed services $5,047 Total Revenue $41,170 USES OF FUNDS +L2: Biodiversity conservation $18,970 Constituency mobilization $16,528 Fundraising L3: $1,029 Management and general $6,354 Total Expenses $42,881 Change in net assets L4: (-$1,711) Net assets, start of year $45,144 Net Assets, End of the Year $43,433 -L5: +L5: L6: # 【1†match at L227】 -L7: Contributed services 10% Royalties and other income 2% Bequests, trusts and -L8: split interests 29% TOTAL REVENUE Income from investments -15% Program and -L9: support services 98% USE OF FUNDS Fundraising 2% ===== Page 13 ===== +L7: Contributed services 10% Royalties and other income 2% Bequests, trusts and +L8: split interests 29% TOTAL REVENUE Income from investments -15% Program and +L9: support services 98% USE OF FUNDS Fundraising 2% ===== Page 13 ===== L10: DEFENDERS.ORG 13 ===== Page 14 ===== BOARD OF DIRECTORS John Dayton, Esq.* Chair @@ -983,9 +983,9 @@ Now we should verify that the 2021 annual report shows contributed services 12%. **viewing lines [0 - 4] of 4** L0: # 【0†match at L194】 -L1: $3,354 Royalties and other income $3,576 Contributed services $6,140 Total -L2: Revenue $49,819 USES OF FUNDS Biodiversity conservation $22,420 Constituency -L3: mobilization $16,324 Fundraising $1,211 Management and general $5,865 Total +L1: $3,354 Royalties and other income $3,576 Contributed services $6,140 Total +L2: Revenue $49,819 USES OF FUNDS Biodiversity conservation $22,420 Constituency +L3: mobilization $16,324 Fundraising $1,211 Management and general $5,865 Total L4: Expenses $45,820 Change in net assets $3,999 Net assets, start of year $41,145 @@ -1003,51 +1003,51 @@ L4: Expenses $45,820 Change in net assets $3,999 Net assets, start of year $41,1 **viewing lines [185 - 232] of 261** L185: devastating water pollution that is killing the seagrass and causing manatees to -L186: starve. DAVID TES | SAM FRENZY DRAW DEFENDERS.ORG 11 ===== Page 12 ===== In -L187: 2021, Defenders of Wildlife’s nearly 2.2 million members and supporters +L186: starve. DAVID TES | SAM FRENZY DRAW DEFENDERS.ORG 11 ===== Page 12 ===== In +L187: 2021, Defenders of Wildlife’s nearly 2.2 million members and supporters L188: contributed more than $49 million for wildlife and wildlife habitat. This figure -L189: includes all those who generously donated their time and expertise. The -L190: steadfast support of our donors allows Defenders to sustain our program and -L191: public education efforts in the field, the courts and on Capitol Hill. 2021 -L192: SOURCES OF FUNDS Grants and contributions $29,057 Bequests, trusts and split -L193: interests $7,692 Income from investments, annuity reserve funds and trusts -L194: $3,354 Royalties and other income $3,576 Contributed services $6,140 Total -L195: Revenue $49,819 USES OF FUNDS Biodiversity conservation $22,420 Constituency -L196: mobilization $16,324 Fundraising $1,211 Management and general $5,865 Total -L197: Expenses $45,820 Change in net assets $3,999 Net assets, start of year $41,145 -L198: Net Assets, End of the Year $45,144 Dollars are in thousands. 12 DEFENDERS OF -L199: WILDLIFE Grants and contributions 58% Income from investments 7% Requests, -L200: trusts and split interests 15% Royalties and other income 7% Contributed +L189: includes all those who generously donated their time and expertise. The +L190: steadfast support of our donors allows Defenders to sustain our program and +L191: public education efforts in the field, the courts and on Capitol Hill. 2021 +L192: SOURCES OF FUNDS Grants and contributions $29,057 Bequests, trusts and split +L193: interests $7,692 Income from investments, annuity reserve funds and trusts +L194: $3,354 Royalties and other income $3,576 Contributed services $6,140 Total +L195: Revenue $49,819 USES OF FUNDS Biodiversity conservation $22,420 Constituency +L196: mobilization $16,324 Fundraising $1,211 Management and general $5,865 Total +L197: Expenses $45,820 Change in net assets $3,999 Net assets, start of year $41,145 +L198: Net Assets, End of the Year $45,144 Dollars are in thousands. 12 DEFENDERS OF +L199: WILDLIFE Grants and contributions 58% Income from investments 7% Requests, +L200: trusts and split interests 15% Royalties and other income 7% Contributed L201: services 12% Program and support services 97% Fundraising 3% ===== Page 13 ===== -L202: DEFENDERS.ORG 13 ===== Page 14 ===== John Dayton, Esq.* Chair Texas Sheryl +L202: DEFENDERS.ORG 13 ===== Page 14 ===== John Dayton, Esq.* Chair Texas Sheryl L203: Sachs* Vice Chair Connecticut Whitfield Bailey* Secretary Tennessee Mark Caylor* -L204: Treasurer District of Columbia Sylvia Baca* California Donald Barry, Esq. -L205: Oregon Dinah Bear, Esq. Arizona Kim O’Keefe Beck, Ph.D California Gabriela -L206: Chavarria, Ph.D Washington Jamie Rappaport Clark* President & CEO Virginia -L207: Caroline D. Gabel Maryland Keith M. Harper, Esq. Maryland John Donovan, Esq. -L208: Montana and California James M. Hecker, Esq. Virginia Judith Ponsikoff, Ph.D* +L204: Treasurer District of Columbia Sylvia Baca* California Donald Barry, Esq. +L205: Oregon Dinah Bear, Esq. Arizona Kim O’Keefe Beck, Ph.D California Gabriela +L206: Chavarria, Ph.D Washington Jamie Rappaport Clark* President & CEO Virginia +L207: Caroline D. Gabel Maryland Keith M. Harper, Esq. Maryland John Donovan, Esq. +L208: Montana and California James M. Hecker, Esq. Virginia Judith Ponsikoff, Ph.D* L209: California Terry L. Root, Ph.D Florida BOARD EMERITUS Terry C. Pelster, Esq. New -L210: York *Executive Committee member* ST A F F & A D V I S O R S SCIENCE ADVISORS -L211: Gabriela Chavarria, Ph.D Burke Museum, University of Washington Leah Gerber, +L210: York *Executive Committee member* ST A F F & A D V I S O R S SCIENCE ADVISORS +L211: Gabriela Chavarria, Ph.D Burke Museum, University of Washington Leah Gerber, L212: Ph.D Arizona State University Kathayoon Khalil, Ph.D Oregon Zoo Rurik List, Ph.D -L213: Universidad Autónoma Metropolitana-Lerma Barry Noon, Ph.D Colorado State -L214: University Terry L. Root, Ph.D Stanford University Mark Schwartz, Ph.D -L215: University of California-Davis David S. Wilcox, Ph.D Princeton University Rae -L216: Wynn-Grant, Ph.D National Geographic Society SENIOR STAFF Jamie Rappaport Clark -L217: President & CEO Thu Pham Chief of Staff Renee Stone Senior Vice President, -L218: Conservation Programs & General Counsel James Stefan Senior Vice President, -L219: Operations Matt Braughler Vice President, Philanthropy Rachel Brittin Vice -L220: President, Communications Shawn Cantrell Vice President, Field Conservation +L213: Universidad Autónoma Metropolitana-Lerma Barry Noon, Ph.D Colorado State +L214: University Terry L. Root, Ph.D Stanford University Mark Schwartz, Ph.D +L215: University of California-Davis David S. Wilcox, Ph.D Princeton University Rae +L216: Wynn-Grant, Ph.D National Geographic Society SENIOR STAFF Jamie Rappaport Clark +L217: President & CEO Thu Pham Chief of Staff Renee Stone Senior Vice President, +L218: Conservation Programs & General Counsel James Stefan Senior Vice President, +L219: Operations Matt Braughler Vice President, Philanthropy Rachel Brittin Vice +L220: President, Communications Shawn Cantrell Vice President, Field Conservation L221: Programs Robert L. Dewey Vice President, Government Relations & External Affairs L222: Monica Goldberg Vice President, Landscape Conservation Sharon Morris-Smith Vice -L223: President of Human Resources Elizabeth Nichols Vice President, Finance/CFO -L224: Anthony Pegues Vice President, Diversity, Equity, Inclusion, Justice Lindsay +L223: President of Human Resources Elizabeth Nichols Vice President, Finance/CFO +L224: Anthony Pegues Vice President, Diversity, Equity, Inclusion, Justice Lindsay L225: Rosa, Ph.D Director, Center for Conservation Innovation Michael P. Senatore Vice -L226: President, Conservation Law Brandace Swimley Vice President, Integrated -L227: Marketing 14 DEFENDERS OF WILDLIFE ===== Page 15 ===== THERE ARE MANY WAYS TO -L228: SUPPORT DEFENDERS OF WILDLIFE and protect our imperiled wildlife. To learn more -L229: or donate, contact us at 1-800-385-9712, visit defenders.org/donate or choose -L230: one of the following options. ➤ Transfer appreciated assets directly to +L226: President, Conservation Law Brandace Swimley Vice President, Integrated +L227: Marketing 14 DEFENDERS OF WILDLIFE ===== Page 15 ===== THERE ARE MANY WAYS TO +L228: SUPPORT DEFENDERS OF WILDLIFE and protect our imperiled wildlife. To learn more +L229: or donate, contact us at 1-800-385-9712, visit defenders.org/donate or choose +L230: one of the following options. ➤ Transfer appreciated assets directly to L231: Defenders and maximize potential tax benefits. Defenders.org/donate-stocks-help- L232: save-wildlife ➤ Become a Leadership Council member with a single donation of From a65129eeb1940ea824f796c69cba176482d88a87 Mon Sep 17 00:00:00 2001 From: Vladislav Nosivskoy Date: Mon, 8 Dec 2025 17:11:38 +0300 Subject: [PATCH 3/5] fix fmt Signed-off-by: Vladislav Nosivskoy --- lib/llm/tests/deepseek_v32_encoding.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/lib/llm/tests/deepseek_v32_encoding.rs b/lib/llm/tests/deepseek_v32_encoding.rs index 2b2fca33f9..210a641b3d 100644 --- a/lib/llm/tests/deepseek_v32_encoding.rs +++ b/lib/llm/tests/deepseek_v32_encoding.rs @@ -39,12 +39,13 @@ fn run_official_test(input_file: &str, output_file: &str) { // Add tools to first message (system) if present if let Some(tools) = input_data.get("tools") - && let Some(first_msg) = messages.get_mut(0) { - first_msg - .as_object_mut() - .unwrap() - .insert("tools".to_string(), tools.clone()); - } + && let Some(first_msg) = messages.get_mut(0) + { + first_msg + .as_object_mut() + .unwrap() + .insert("tools".to_string(), tools.clone()); + } // Encode messages let result = encode_messages( From 4b6b97b2af8406fc6ffca276ab714d2824767c9a Mon Sep 17 00:00:00 2001 From: Vladislav Nosivskoy Date: Tue, 9 Dec 2025 14:05:52 +0300 Subject: [PATCH 4/5] ignore ds test files in copyright check Signed-off-by: Vladislav Nosivskoy --- .github/workflows/copyright-check.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/copyright-check.ps1 b/.github/workflows/copyright-check.ps1 index d036770e66..5613d51ff6 100644 --- a/.github/workflows/copyright-check.ps1 +++ b/.github/workflows/copyright-check.ps1 @@ -84,7 +84,7 @@ $global:copyright_results = @{ $ignored_files = @('.clang-format', '.gitattributes', '.gitignore', '.gitkeep', '.patch', 'Cargo.lock', 'LICENSE', 'uv.lock', 'rust-toolchain.toml', 'codespell.txt', 'exclusions.txt') write-debug " ignored_files = ['$($ignored_files -join "','")']." -$ignored_paths = @('.github', '.mypy_cache', '.pytest_cache', 'lib/llm/tests/data/sample-models') +$ignored_paths = @('.github', '.mypy_cache', '.pytest_cache', 'lib/llm/tests/data/sample-models', 'lib/llm/tests/data/deepseek-v3.2') write-debug " ignored_paths = ['$($ignored_paths -join "','")']." $ignored_types = @('.bat', '.gif', '.ico', '.ipynb', '.jpg', '.jpeg', '.patch', '.png', '.pyc', '.pyi', '.rst', '.zip', '.md', '.json') write-debug " ignored_types = ['$($ignored_types -join "', '")']." From a3228007ecbcffcfd587625508483eae8acbaace Mon Sep 17 00:00:00 2001 From: Vladislav Nosivskoy Date: Wed, 10 Dec 2025 13:40:21 +0300 Subject: [PATCH 5/5] fix license issues Signed-off-by: Vladislav Nosivskoy --- LICENSE | 11 ++++++++++- lib/llm/tests/data/deepseek-v3.2/LICENSE | 21 +++++++++++++++++++++ lib/llm/tests/data/deepseek-v3.2/README.md | 16 ++++++++++++++++ 3 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 lib/llm/tests/data/deepseek-v3.2/LICENSE create mode 100644 lib/llm/tests/data/deepseek-v3.2/README.md diff --git a/LICENSE b/LICENSE index 261eeb9e9f..1ed6128b8b 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,13 @@ - Apache License + NOTICE: The test data files under ./lib/llm/tests/data/deepseek-v3.2 are + derived from the DeepSeek-V3.2 model repository (originally developed by + DeepSeek). The original files were obtained from: + https://huggingface.co/deepseek-ai/DeepSeek-V3.2 (commit c69397ecfd1fd142e90e3fbad51f4c7e40b9f3d3) + These files are licensed under the MIT License. The full text of the MIT + License can be found in ./lib/llm/tests/data/deepseek-v3.2/LICENSE. + The rest of this codebase is licensed under the Apache License 2.0 as + described below. + + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/lib/llm/tests/data/deepseek-v3.2/LICENSE b/lib/llm/tests/data/deepseek-v3.2/LICENSE new file mode 100644 index 0000000000..995061f1ee --- /dev/null +++ b/lib/llm/tests/data/deepseek-v3.2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 DeepSeek + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/lib/llm/tests/data/deepseek-v3.2/README.md b/lib/llm/tests/data/deepseek-v3.2/README.md new file mode 100644 index 0000000000..200a3656f4 --- /dev/null +++ b/lib/llm/tests/data/deepseek-v3.2/README.md @@ -0,0 +1,16 @@ +# DeepSeek-V3.2 Test Data + +## Source + +The test files in this directory are taken from the official DeepSeek-V3.2 model repository on Hugging Face: + +https://huggingface.co/deepseek-ai/DeepSeek-V3.2 (commit c69397ecfd1fd142e90e3fbad51f4c7e40b9f3d3) + +These files are used for testing the DeepSeek-V3.2 model's tool calling and encoding capabilities. + +## License + +All files in this directory are licensed under the MIT License. See the [LICENSE](./LICENSE) file in this directory for the full license text. + +Copyright (c) 2025 DeepSeek +