From e1d7f07b022803c6b83c9768df40654d8aab9dc0 Mon Sep 17 00:00:00 2001 From: glassBead Date: Thu, 7 Aug 2025 16:12:02 -0500 Subject: [PATCH] docs(sdk/integrations): add integrations docs (index, ai-sdk, llm-providers) --- sdk/integrations/ai-sdk.mdx | 422 +++++++++++++++++++++++++++ sdk/integrations/index.mdx | 343 ++++++++++++++++++++++ sdk/integrations/llm-providers.mdx | 447 +++++++++++++++++++++++++++++ 3 files changed, 1212 insertions(+) create mode 100644 sdk/integrations/ai-sdk.mdx create mode 100644 sdk/integrations/index.mdx create mode 100644 sdk/integrations/llm-providers.mdx diff --git a/sdk/integrations/ai-sdk.mdx b/sdk/integrations/ai-sdk.mdx new file mode 100644 index 0000000..6361c24 --- /dev/null +++ b/sdk/integrations/ai-sdk.mdx @@ -0,0 +1,422 @@ +--- +title: "Vercel AI SDK Integration" +description: "Use MCP tools with Vercel AI SDK for AI-powered applications" +--- + +The Smithery SDK provides seamless integration with [Vercel AI SDK](https://sdk.vercel.ai/), allowing you to use MCP tools in AI applications with full type safety and automatic updates. + +## Functions + +### watchTools + +Watches for tool changes and maintains an up-to-date tool registry: + +```typescript +function watchTools(client: ToolClient): Promise> +``` + +### listTools + +Gets the current list of tools formatted for AI SDK: + +```typescript +function listTools(client: ToolClient): Promise> +``` + +## Basic Setup + +```typescript +import { createTransport, watchTools, wrapError } from "@smithery/sdk" +import { Client } from "@modelcontextprotocol/sdk/client/index.js" +import { generateText } from "ai" +import { openai } from "@ai-sdk/openai" + +// 1. Connect to MCP server +const transport = createTransport("https://my-server.smithery.ai") +const client = wrapError(new Client({ + name: "ai-app", + version: "1.0.0" +}, { + capabilities: {} +})) + +await client.connect(transport) + +// 2. Get MCP tools for AI SDK +const tools = await watchTools(client) + +// 3. Use with AI SDK +const result = await generateText({ + model: openai("gpt-4"), + tools, + prompt: "Your prompt here" +}) +``` + +## Tool Watching + +The `watchTools` function automatically updates when tools change: + +```typescript +// Initial tool setup +const tools = await watchTools(client) +console.log("Available tools:", Object.keys(tools)) + +// Tools automatically update when server changes +// No need to manually refresh + +// Use in your AI application +async function handleUserQuery(query: string) { + return generateText({ + model: openai("gpt-4"), + tools, // Always up-to-date + prompt: query + }) +} +``` + +## Tool Format + +MCP tools are automatically converted to AI SDK format: + +```typescript +// MCP tool definition +{ + name: "search_database", + description: "Search the customer database", + inputSchema: { + type: "object", + properties: { + query: { type: "string", description: "Search query" }, + limit: { type: "number", description: "Max results" } + }, + required: ["query"] + } +} + +// Converted to AI SDK tool +{ + description: "Search the customer database", + parameters: z.object({ + query: z.string().describe("Search query"), + limit: z.number().describe("Max results").optional() + }), + execute: async (args) => { + // Automatic MCP tool execution + } +} +``` + +## Usage Examples + +### Chat Application + +```typescript +import { generateText } from "ai" +import { openai } from "@ai-sdk/openai" + +export async function createAIAssistant(serverUrl: string) { + // Setup MCP connection + const transport = createTransport(serverUrl) + const client = new Client({ name: "assistant", version: "1.0.0" }) + await client.connect(transport) + + // Get tools + const tools = await watchTools(client) + + // Chat function + return async function chat(messages: any[]) { + const response = await generateText({ + model: openai("gpt-4"), + tools, + messages, + toolChoice: "auto" + }) + + return response + } +} + +// Usage +const assistant = await createAIAssistant("assistant.smithery.ai") +const response = await assistant([ + { role: "user", content: "What's in the database?" } +]) +``` + +### Streaming Responses + +```typescript +import { generateTextStream } from "ai" + +const tools = await watchTools(client) + +const stream = await generateTextStream({ + model: openai("gpt-4"), + tools, + prompt: "Analyze the latest sales data", + onToolCall: async ({ toolCall }) => { + console.log(`Calling tool: ${toolCall.toolName}`) + } +}) + +// Stream the response +for await (const part of stream) { + switch (part.type) { + case "text-delta": + process.stdout.write(part.textDelta) + break + case "tool-call": + console.log("\nTool called:", part.toolName) + break + case "tool-result": + console.log("Tool result:", part.result) + break + } +} +``` + +### Tool Selection + +```typescript +const tools = await watchTools(client) + +// Let model choose tools +const autoResponse = await generateText({ + model: openai("gpt-4"), + tools, + toolChoice: "auto", + prompt: "Get weather for NYC" +}) + +// Require tool use +const requiredResponse = await generateText({ + model: openai("gpt-4"), + tools, + toolChoice: "required", + prompt: "Calculate the fibonacci sequence" +}) + +// Specific tool +const specificResponse = await generateText({ + model: openai("gpt-4"), + tools, + toolChoice: { type: "tool", toolName: "calculate" }, + prompt: "What's 2 + 2?" +}) +``` + +## Advanced Patterns + +### Tool Filtering + +Filter tools before passing to AI: + +```typescript +const allTools = await watchTools(client) + +// Filter by capability +const dataTools = Object.fromEntries( + Object.entries(allTools).filter(([name]) => + name.startsWith("data_") || name.includes("query") + ) +) + +// Filter by description +const safeTools = Object.fromEntries( + Object.entries(allTools).filter(([_, tool]) => + !tool.description.includes("delete") && + !tool.description.includes("drop") + ) +) +``` + +### Multiple MCP Servers + +Combine tools from multiple servers: + +```typescript +// Connect to multiple servers +async function connectToServers(servers: string[]) { + const clients = await Promise.all( + servers.map(async (server) => { + const transport = createTransport(server) + const client = new Client({ + name: "multi-server-client", + version: "1.0.0" + }) + await client.connect(transport) + return client + }) + ) + + // Combine tools from all servers + const allTools = {} + for (const client of clients) { + const tools = await watchTools(client) + Object.assign(allTools, tools) + } + + return allTools +} + +// Usage +const tools = await connectToServers([ + "search.smithery.ai", + "database.smithery.ai", + "analytics.smithery.ai" +]) +``` + +### Error Handling + +```typescript +import { wrapError } from "@smithery/sdk" + +const tools = await watchTools(client) + +try { + const result = await generateText({ + model: openai("gpt-4"), + tools, + prompt: "Process the data", + onToolCall: async ({ toolCall }) => { + try { + return await toolCall.execute() + } catch (error) { + // Wrap MCP errors for AI SDK + throw wrapError(error) + } + } + }) +} catch (error) { + if (error.code === "TOOL_EXECUTION_ERROR") { + console.error("Tool failed:", error.toolName, error.details) + } +} +``` + +### Tool Middleware + +Add logging or validation to tool calls: + +```typescript +const tools = await watchTools(client) + +// Wrap tools with middleware +const wrappedTools = Object.fromEntries( + Object.entries(tools).map(([name, tool]) => [ + name, + { + ...tool, + execute: async (args: any) => { + console.log(`[${new Date().toISOString()}] Calling ${name}:`, args) + + const start = Date.now() + try { + const result = await tool.execute(args) + console.log(`[${name}] Completed in ${Date.now() - start}ms`) + return result + } catch (error) { + console.error(`[${name}] Failed:`, error) + throw error + } + } + } + ]) +) +``` + +## React Integration + +```typescript +import { useChat } from "ai/react" +import { useEffect, useState } from "react" + +function ChatComponent({ serverUrl }: { serverUrl: string }) { + const [tools, setTools] = useState>({}) + + // Setup MCP connection + useEffect(() => { + async function setup() { + const transport = createTransport(serverUrl) + const client = new Client({ name: "chat-ui", version: "1.0.0" }) + await client.connect(transport) + + const mcpTools = await watchTools(client) + setTools(mcpTools) + } + + setup() + }, [serverUrl]) + + // Use AI SDK chat hook + const { messages, input, handleInputChange, handleSubmit } = useChat({ + api: "/api/chat", + body: { + tools: Object.keys(tools) // Send tool names to API + } + }) + + return ( +
+ {messages.map(m => ( +
+ {m.role}: {m.content} + {m.toolInvocations?.map((tool, i) => ( +
Tool: {tool.toolName}
+ ))} +
+ ))} + +
+ + +
+
+ ) +} +``` + +## Tool Execution Details + +When a tool is called through AI SDK: + +1. **Parameter validation** - AI SDK validates against the schema +2. **Automatic execution** - The tool is called via MCP client +3. **Result formatting** - Results are formatted for the model +4. **Error handling** - Errors are caught and wrapped + +```typescript +// Behind the scenes +async function execute(args: inferParameters) { + try { + // Validate args against schema + const validated = parameters.parse(args) + + // Call MCP tool + const result = await client.callTool(name, validated) + + // Format result for AI SDK + return { + content: result.content + } + } catch (error) { + // Wrap error for AI SDK + throw wrapError(error) + } +} +``` + +## Performance Tips + +1. **Use watchTools once** - Set up watching at app initialization +2. **Cache tool definitions** - Tools rarely change during runtime +3. **Batch tool calls** - Some models can call multiple tools in parallel +4. **Monitor updates** - Log when tools change to debug issues + +## Related + +- [Integrations overview](/sdk/integrations) - All integration options +- [LLM providers](/sdk/integrations/llm-providers) - Provider-specific setup +- [Client setup](/sdk/client) - MCP client configuration \ No newline at end of file diff --git a/sdk/integrations/index.mdx b/sdk/integrations/index.mdx new file mode 100644 index 0000000..1218bb8 --- /dev/null +++ b/sdk/integrations/index.mdx @@ -0,0 +1,343 @@ +--- +title: "Integrations" +description: "Connect Smithery MCP servers with Vercel AI SDK and LLM providers" +--- + +The Smithery SDK provides seamless integrations with popular AI frameworks and LLM providers, allowing you to expose MCP tools to AI applications. + +## Available Integrations + + + + Integrate MCP tools with Vercel's AI SDK + + + Connect to OpenAI, Anthropic, and more + + + +## Overview + +MCP tools can be exposed to AI applications through various integration patterns: + +1. **Direct Integration** - Use MCP tools directly in AI applications +2. **Tool Conversion** - Convert MCP tools to AI SDK format +3. **Provider Wrappers** - Wrap LLM providers with MCP capabilities + +## Quick Example + +### Vercel AI SDK Integration + +```typescript +import { createTransport } from "@smithery/sdk" +import { watchTools } from "@smithery/sdk" +import { Client } from "@modelcontextprotocol/sdk/client/index.js" +import { generateText } from "ai" +import { openai } from "@ai-sdk/openai" + +// Connect to MCP server +const transport = createTransport("weather-api.smithery.ai") +const client = new Client({ name: "ai-app", version: "1.0.0" }) +await client.connect(transport) + +// Watch for tool changes +const tools = await watchTools(client) + +// Use with AI SDK +const result = await generateText({ + model: openai("gpt-4"), + prompt: "What's the weather in San Francisco?", + tools // MCP tools automatically available +}) +``` + +### Direct Tool Usage + +```typescript +// List available MCP tools +const availableTools = await client.listTools() +console.log("Available tools:", availableTools) + +// Call MCP tool directly +const weather = await client.callTool("get_weather", { + city: "San Francisco" +}) +``` + +## Integration Patterns + +### 1. Tool Discovery + +Automatically discover and use MCP tools: + +```typescript +import { listTools } from "@smithery/sdk" + +async function discoverTools(client: Client) { + // Get current tools + const tools = await listTools(client) + + // Tools are automatically formatted for AI SDK + return tools +} +``` + +### 2. Dynamic Tool Loading + +Tools that update based on server changes: + +```typescript +import { watchTools } from "@smithery/sdk" + +// Tools automatically update when server changes +const tools = await watchTools(client) + +// Set up notification handler for changes +client.setNotificationHandler(ToolListChangedNotificationSchema, async () => { + console.log("Tools updated!") + // Re-fetch tools or update UI +}) +``` + +### 3. Error Handling + +Patch the client so tool errors are returned as `isError` results instead of thrown exceptions: + +```typescript +import { wrapError } from "@smithery/sdk" + +const client = wrapError(new Client({ name: "ai-app", version: "1.0.0" })) +await client.connect(transport) + +// Now callTool returns { isError: true, content: [...] } on failures +const result = await client.callTool({ name: "process_data", arguments: { data } }) +if (result.isError) { + console.error("Tool failed:", result.content) +} +``` + +## Provider-Specific Integration + +### OpenAI Function Calling + +```typescript +import { openai } from "@ai-sdk/openai" +import { generateText } from "ai" + +const tools = await watchTools(client) + +const result = await generateText({ + model: openai("gpt-4"), + messages: [ + { role: "user", content: "Analyze the sales data" } + ], + tools, + toolChoice: "auto" // Let model decide which tools to use +}) +``` + +### Anthropic Tool Use + +```typescript +import { anthropic } from "@ai-sdk/anthropic" + +const tools = await watchTools(client) + +const result = await generateText({ + model: anthropic("claude-3-opus"), + messages: [ + { role: "user", content: "Search for recent AI papers" } + ], + tools, + toolChoice: { type: "tool", toolName: "search_papers" } +}) +``` + +## Advanced Patterns + +### Tool Filtering + +Filter tools based on capabilities: + +```typescript +const allTools = await listTools(client) + +// Filter tools by category +const searchTools = Object.fromEntries( + Object.entries(allTools).filter(([name, tool]) => + name.includes("search") || tool.description.includes("search") + ) +) + +// Use filtered tools +const result = await generateText({ + model: openai("gpt-4"), + tools: searchTools, + prompt: "Find information about quantum computing" +}) +``` + +### Tool Composition + +Combine multiple MCP servers: + +```typescript +// Connect to multiple servers +const weatherClient = await connectToServer("weather.smithery.ai") +const searchClient = await connectToServer("search.smithery.ai") +const dbClient = await connectToServer("database.smithery.ai") + +// Combine tools +const allTools = { + ...await listTools(weatherClient), + ...await listTools(searchClient), + ...await listTools(dbClient) +} + +// Use combined tools +const result = await generateText({ + model: openai("gpt-4"), + tools: allTools, + prompt: "What's the weather for cities in our customer database?" +}) +``` + +### Streaming Responses + +Handle streaming tool responses: + +```typescript +const stream = await generateTextStream({ + model: openai("gpt-4"), + tools, + prompt: "Generate a report using the data" +}) + +for await (const chunk of stream) { + if (chunk.type === "tool-result") { + console.log("Tool called:", chunk.toolName) + console.log("Result:", chunk.result) + } else if (chunk.type === "text-delta") { + process.stdout.write(chunk.textDelta) + } +} +``` + +## Best Practices + +1. **Cache tool definitions** - Avoid re-fetching unchanged tools +2. **Handle tool updates** - Use `watchTools` for dynamic servers +3. **Error gracefully** - Wrap MCP errors for AI SDK compatibility +4. **Filter appropriately** - Only expose relevant tools to the model +5. **Monitor usage** - Track which tools are called most frequently + +## Common Use Cases + +### 1. Augmented Chatbots + +```typescript +// Chatbot with access to company tools +const tools = await watchTools(mcpClient) + +async function handleUserMessage(message: string) { + const response = await generateText({ + model: openai("gpt-4"), + tools, + messages: [ + { role: "system", content: "You are a helpful assistant with access to company tools." }, + { role: "user", content: message } + ] + }) + + return response.text +} +``` + +### 2. Automated Workflows + +```typescript +// Workflow automation with MCP tools +async function runWorkflow(task: string) { + const tools = await listTools(mcpClient) + + const plan = await generateObject({ + model: openai("gpt-4"), + schema: z.object({ + steps: z.array(z.object({ + tool: z.string(), + arguments: z.record(z.any()), + description: z.string() + })) + }), + prompt: `Plan how to: ${task}` + }) + + // Execute plan + for (const step of plan.object.steps) { + const result = await mcpClient.callTool(step.tool, step.arguments) + console.log(`✓ ${step.description}`) + } +} +``` + +### 3. Data Analysis Assistant + +```typescript +// AI assistant with data tools +const tools = { + ...await listTools(databaseClient), + ...await listTools(analyticsClient), + ...await listTools(visualizationClient) +} + +async function analyzeData(query: string) { + return generateText({ + model: anthropic("claude-3-opus"), + tools, + prompt: `Analyze and visualize: ${query}`, + toolChoice: "required" // Must use tools + }) +} +``` + +## Troubleshooting + +### Tools Not Appearing + +```typescript +// Debug tool discovery +const tools = await client.listTools() +console.log("Raw MCP tools:", tools) + +const aiTools = await listTools(client) +console.log("Converted AI tools:", Object.keys(aiTools)) +``` + +### Tool Execution Errors + +```typescript +try { + const result = await client.callTool("my_tool", args) +} catch (error) { + if (error.code === "TOOL_NOT_FOUND") { + // Re-fetch tools + const tools = await watchTools(client) + } else if (error.code === "INVALID_PARAMS") { + console.error("Tool parameters:", error.details) + } +} +``` + +## Related + +- [AI SDK integration guide](/sdk/integrations/ai-sdk) - Detailed Vercel AI SDK integration +- [LLM provider setup](/sdk/integrations/llm-providers) - Provider-specific configuration +- [Client documentation](/sdk/client) - MCP client setup \ No newline at end of file diff --git a/sdk/integrations/llm-providers.mdx b/sdk/integrations/llm-providers.mdx new file mode 100644 index 0000000..dfd41a8 --- /dev/null +++ b/sdk/integrations/llm-providers.mdx @@ -0,0 +1,447 @@ +--- +title: "LLM Provider Integration" +description: "Connect MCP tools to OpenAI, Anthropic, and other LLM providers" +--- + +The Smithery SDK includes integration modules for popular LLM providers, enabling seamless use of MCP tools with different AI models. + +## Available Providers + +### OpenAI Integration + +Located at `@smithery/sdk/client/integrations/llm/openai`: + +```typescript +import { OpenAIChatAdapter } from "@smithery/sdk/client/integrations/llm/openai" + +const openaiAdapter = new OpenAIChatAdapter(client) +const openaiTools = await openaiAdapter.listTools() +// ... call your OpenAI client, then pass the response to: +const toolMessages = await openaiAdapter.callTool(completion) +``` + +### Anthropic Integration + +Located at `@smithery/sdk/client/integrations/llm/anthropic`: + +```typescript +import { AnthropicChatAdapter } from "@smithery/sdk/client/integrations/llm/anthropic" + +const anthropicAdapter = new AnthropicChatAdapter(client) +const tools = await anthropicAdapter.listTools() +// ... call Anthropic, then pass the message to: +const toolResults = await anthropicAdapter.callTool(response) +``` + +## Integration Patterns + +### Direct Provider Usage + +Use MCP tools directly with provider SDKs: + +```typescript +import OpenAI from "openai" +import { createTransport } from "@smithery/sdk" +import { Client } from "@modelcontextprotocol/sdk/client/index.js" + +// Setup MCP client +const transport = createTransport("tools.smithery.ai") +const mcpClient = new Client({ name: "openai-app", version: "1.0.0" }) +await mcpClient.connect(transport) + +// Get available tools +const tools = await mcpClient.listTools() + +// Setup OpenAI with MCP tools +const openai = new OpenAI({ + apiKey: process.env.OPENAI_API_KEY +}) + +// Convert MCP tools to OpenAI format +const openaiTools = tools.tools.map(tool => ({ + type: "function" as const, + function: { + name: tool.name, + description: tool.description, + parameters: tool.inputSchema + } +})) + +// Use with OpenAI +const completion = await openai.chat.completions.create({ + model: "gpt-4", + messages: [ + { role: "user", content: "What's the weather in NYC?" } + ], + tools: openaiTools, + tool_choice: "auto" +}) + +// Handle tool calls +if (completion.choices[0].message.tool_calls) { + for (const toolCall of completion.choices[0].message.tool_calls) { + const result = await mcpClient.callTool( + toolCall.function.name, + JSON.parse(toolCall.function.arguments) + ) + console.log("Tool result:", result) + } +} +``` + +### Anthropic Tool Use + +```typescript +import Anthropic from "@anthropic-ai/sdk" + +// Setup Anthropic client +const anthropic = new Anthropic({ + apiKey: process.env.ANTHROPIC_API_KEY +}) + +// Convert MCP tools to Anthropic format +const anthropicTools = tools.tools.map(tool => ({ + name: tool.name, + description: tool.description, + input_schema: tool.inputSchema +})) + +// Use with Claude +const response = await anthropic.messages.create({ + model: "claude-3-opus-20240229", + max_tokens: 1000, + messages: [ + { role: "user", content: "Analyze the sales data from last quarter" } + ], + tools: anthropicTools +}) + +// Handle tool use +if (response.content[0].type === "tool_use") { + const toolUse = response.content[0] + const result = await mcpClient.callTool( + toolUse.name, + toolUse.input + ) +} +``` + +## Unified Interface + +Create a unified interface for multiple providers: + +```typescript +interface LLMProvider { + name: string + callWithTools(prompt: string, tools: any[]): Promise +} + +class OpenAIProvider implements LLMProvider { + name = "openai" + + constructor(private openai: OpenAI, private mcpClient: Client) {} + + async callWithTools(prompt: string, mcpTools: any[]) { + const tools = mcpTools.map(tool => ({ + type: "function" as const, + function: { + name: tool.name, + description: tool.description, + parameters: tool.inputSchema + } + })) + + const completion = await this.openai.chat.completions.create({ + model: "gpt-4", + messages: [{ role: "user", content: prompt }], + tools + }) + + // Process tool calls + const toolCalls = completion.choices[0].message.tool_calls || [] + const results = [] + + for (const call of toolCalls) { + const result = await this.mcpClient.callTool( + call.function.name, + JSON.parse(call.function.arguments) + ) + results.push(result) + } + + return { + text: completion.choices[0].message.content, + toolResults: results + } + } +} + +class AnthropicProvider implements LLMProvider { + name = "anthropic" + + constructor(private anthropic: Anthropic, private mcpClient: Client) {} + + async callWithTools(prompt: string, mcpTools: any[]) { + const tools = mcpTools.map(tool => ({ + name: tool.name, + description: tool.description, + input_schema: tool.inputSchema + })) + + const response = await this.anthropic.messages.create({ + model: "claude-3-opus-20240229", + max_tokens: 1000, + messages: [{ role: "user", content: prompt }], + tools + }) + + // Process tool use + const results = [] + for (const content of response.content) { + if (content.type === "tool_use") { + const result = await this.mcpClient.callTool( + content.name, + content.input + ) + results.push(result) + } + } + + return { + text: response.content.find(c => c.type === "text")?.text, + toolResults: results + } + } +} +``` + +## Streaming with Tool Calls + +### OpenAI Streaming + +```typescript +const stream = await openai.chat.completions.create({ + model: "gpt-4", + messages: [{ role: "user", content: "Analyze data and create a report" }], + tools: openaiTools, + stream: true +}) + +let toolCalls: any[] = [] + +for await (const chunk of stream) { + // Handle text chunks + if (chunk.choices[0]?.delta?.content) { + process.stdout.write(chunk.choices[0].delta.content) + } + + // Accumulate tool calls + if (chunk.choices[0]?.delta?.tool_calls) { + for (const toolCall of chunk.choices[0].delta.tool_calls) { + if (!toolCalls[toolCall.index]) { + toolCalls[toolCall.index] = { + id: toolCall.id, + function: { name: "", arguments: "" } + } + } + + if (toolCall.function?.name) { + toolCalls[toolCall.index].function.name = toolCall.function.name + } + + if (toolCall.function?.arguments) { + toolCalls[toolCall.index].function.arguments += toolCall.function.arguments + } + } + } +} + +// Execute accumulated tool calls +for (const toolCall of toolCalls) { + if (toolCall) { + const result = await mcpClient.callTool( + toolCall.function.name, + JSON.parse(toolCall.function.arguments) + ) + console.log(`\nTool ${toolCall.function.name} result:`, result) + } +} +``` + +## Error Handling + +### Provider-Specific Error Handling + +```typescript +import { wrapError } from "@smithery/sdk" + +async function callWithErrorHandling(provider: LLMProvider, prompt: string) { + try { + return await provider.callWithTools(prompt, tools) + } catch (error) { + if (error.code === "rate_limit_exceeded") { + // Handle rate limiting + await new Promise(resolve => setTimeout(resolve, 60000)) + return callWithErrorHandling(provider, prompt) + } + + if (error.code === "invalid_api_key") { + throw new Error("Please check your API key configuration") + } + + // Wrap other errors + throw wrapError(error) + } +} +``` + +### Tool Execution Errors + +```typescript +async function executeToolSafely(client: Client, name: string, args: any) { + try { + return await client.callTool(name, args) + } catch (error) { + console.error(`Tool ${name} failed:`, error) + + // Return error as tool result + return { + content: [{ + type: "text", + text: `Error executing ${name}: ${error.message}` + }] + } + } +} +``` + +## Provider Comparison + +| Feature | OpenAI | Anthropic | Notes | +|---------|---------|-----------|-------| +| Tool format | Functions | Tools | Different schema structure | +| Streaming | ✅ Full support | ✅ Full support | Both support streaming with tools | +| Parallel calls | ✅ Yes | ✅ Yes | Can call multiple tools | +| Error handling | Detailed | Detailed | Both provide good error info | + +## Best Practices + +### 1. Abstract Provider Details + +```typescript +class LLMService { + private providers: Map = new Map() + + registerProvider(provider: LLMProvider) { + this.providers.set(provider.name, provider) + } + + async query(prompt: string, options: { provider?: string } = {}) { + const providerName = options.provider || "openai" + const provider = this.providers.get(providerName) + + if (!provider) { + throw new Error(`Unknown provider: ${providerName}`) + } + + const tools = await this.mcpClient.listTools() + return provider.callWithTools(prompt, tools.tools) + } +} +``` + +### 2. Cache Tool Definitions + +```typescript +class CachedMCPClient { + private toolCache: any[] = null + private cacheTime: number = 0 + private cacheDuration = 60000 // 1 minute + + constructor(private client: Client) {} + + async getTools() { + const now = Date.now() + + if (!this.toolCache || now - this.cacheTime > this.cacheDuration) { + const result = await this.client.listTools() + this.toolCache = result.tools + this.cacheTime = now + } + + return this.toolCache + } +} +``` + +### 3. Handle Provider Limits + +```typescript +class RateLimitedProvider { + private callCount = 0 + private resetTime = Date.now() + 60000 + private maxCalls = 60 + + async call(fn: () => Promise) { + const now = Date.now() + + if (now > this.resetTime) { + this.callCount = 0 + this.resetTime = now + 60000 + } + + if (this.callCount >= this.maxCalls) { + const waitTime = this.resetTime - now + throw new Error(`Rate limit exceeded. Wait ${waitTime}ms`) + } + + this.callCount++ + return fn() + } +} +``` + +## Testing + +### Mock Provider for Testing + +```typescript +class MockLLMProvider implements LLMProvider { + name = "mock" + + constructor( + private responses: Map, + private mcpClient: Client + ) {} + + async callWithTools(prompt: string, tools: any[]) { + // Return predefined response + const response = this.responses.get(prompt) || { + text: "Mock response", + shouldCallTool: true, + toolName: tools[0]?.name, + toolArgs: {} + } + + const toolResults = [] + if (response.shouldCallTool && response.toolName) { + const result = await this.mcpClient.callTool( + response.toolName, + response.toolArgs + ) + toolResults.push(result) + } + + return { + text: response.text, + toolResults + } + } +} +``` + +## Related + +- [AI SDK integration](/sdk/integrations/ai-sdk) - Vercel AI SDK specifics +- [Integration overview](/sdk/integrations) - All integration options +- [Error handling](/sdk/patterns/errors) - Error wrapping utilities \ No newline at end of file