Skip to content

Commit 8298495

Browse files
authored
feat(cloudflare,vercel-edge): Add support for Google Gen AI instrumentation (getsentry#17723)
Adds support for Google GenAI manual instrumentation in @sentry/cloudflare and @sentry/vercel-edge. To instrument the Google GenAI client, wrap it with Sentry.instrumentGoogleGenAIClient and set recording settings. ``` import * as Sentry from '@sentry/cloudflare'; import { GoogleGenAI } from '@google/genai'; const genAI = new GoogleGenAI({ apiKey: 'your-api-key' }); const client = Sentry.instrumentGoogleGenAIClient(genAI, { recordInputs: true, recordOutputs: true }); // use the wrapped client with models api const model = client.models.generateContent({ model: 'gemini-1.5-pro', contents: [{ role: 'user', parts: [{ text: 'Hello!' }] }] }); // or use chat functionality const chat = client.chats.create({ model: 'gemini-1.5-flash' }); const response = await chat.sendMessage({ message: 'Tell me a joke' }); ```
1 parent 61b3f97 commit 8298495

File tree

8 files changed

+275
-2
lines changed

8 files changed

+275
-2
lines changed
Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
import * as Sentry from '@sentry/cloudflare';
2+
import type { GoogleGenAIClient } from '@sentry/core';
3+
import { MockGoogleGenAI } from './mocks';
4+
5+
interface Env {
6+
SENTRY_DSN: string;
7+
}
8+
9+
const mockClient = new MockGoogleGenAI({
10+
apiKey: 'mock-api-key',
11+
});
12+
13+
const client: GoogleGenAIClient = Sentry.instrumentGoogleGenAIClient(mockClient);
14+
15+
export default Sentry.withSentry(
16+
(env: Env) => ({
17+
dsn: env.SENTRY_DSN,
18+
tracesSampleRate: 1.0,
19+
}),
20+
{
21+
async fetch(_request, _env, _ctx) {
22+
// Test 1: chats.create and sendMessage flow
23+
const chat = client.chats.create({
24+
model: 'gemini-1.5-pro',
25+
config: {
26+
temperature: 0.8,
27+
topP: 0.9,
28+
maxOutputTokens: 150,
29+
},
30+
history: [
31+
{
32+
role: 'user',
33+
parts: [{ text: 'Hello, how are you?' }],
34+
},
35+
],
36+
});
37+
38+
const chatResponse = await chat.sendMessage({
39+
message: 'Tell me a joke',
40+
});
41+
42+
// Test 2: models.generateContent
43+
const modelResponse = await client.models.generateContent({
44+
model: 'gemini-1.5-flash',
45+
config: {
46+
temperature: 0.7,
47+
topP: 0.9,
48+
maxOutputTokens: 100,
49+
},
50+
contents: [
51+
{
52+
role: 'user',
53+
parts: [{ text: 'What is the capital of France?' }],
54+
},
55+
],
56+
});
57+
58+
return new Response(JSON.stringify({ chatResponse, modelResponse }));
59+
},
60+
},
61+
);
Lines changed: 128 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,128 @@
1+
import type { GoogleGenAIChat, GoogleGenAIClient, GoogleGenAIResponse } from '@sentry/core';
2+
3+
export class MockGoogleGenAI implements GoogleGenAIClient {
4+
public models: {
5+
generateContent: (...args: unknown[]) => Promise<GoogleGenAIResponse>;
6+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
7+
generateContentStream: (...args: unknown[]) => Promise<AsyncGenerator<GoogleGenAIResponse, any, unknown>>;
8+
};
9+
public chats: {
10+
create: (...args: unknown[]) => GoogleGenAIChat;
11+
};
12+
public apiKey: string;
13+
14+
public constructor(config: { apiKey: string }) {
15+
this.apiKey = config.apiKey;
16+
17+
// models.generateContent functionality
18+
this.models = {
19+
generateContent: async (...args: unknown[]) => {
20+
const params = args[0] as { model: string; contents?: unknown };
21+
// Simulate processing time
22+
await new Promise(resolve => setTimeout(resolve, 10));
23+
24+
if (params.model === 'error-model') {
25+
const error = new Error('Model not found');
26+
(error as unknown as { status: number }).status = 404;
27+
(error as unknown as { headers: Record<string, string> }).headers = { 'x-request-id': 'mock-request-123' };
28+
throw error;
29+
}
30+
31+
return {
32+
candidates: [
33+
{
34+
content: {
35+
parts: [
36+
{
37+
text: 'Hello from Google GenAI mock!',
38+
},
39+
],
40+
role: 'model',
41+
},
42+
finishReason: 'stop',
43+
index: 0,
44+
},
45+
],
46+
usageMetadata: {
47+
promptTokenCount: 8,
48+
candidatesTokenCount: 12,
49+
totalTokenCount: 20,
50+
},
51+
};
52+
},
53+
generateContentStream: async () => {
54+
// Return a promise that resolves to an async generator
55+
return (async function* (): AsyncGenerator<GoogleGenAIResponse, any, unknown> {
56+
yield {
57+
candidates: [
58+
{
59+
content: {
60+
parts: [{ text: 'Streaming response' }],
61+
role: 'model',
62+
},
63+
finishReason: 'stop',
64+
index: 0,
65+
},
66+
],
67+
};
68+
})();
69+
},
70+
};
71+
72+
// chats.create implementation
73+
this.chats = {
74+
create: (...args: unknown[]) => {
75+
const params = args[0] as { model: string; config?: Record<string, unknown> };
76+
const model = params.model;
77+
78+
return {
79+
modelVersion: model,
80+
sendMessage: async (..._messageArgs: unknown[]) => {
81+
// Simulate processing time
82+
await new Promise(resolve => setTimeout(resolve, 10));
83+
84+
return {
85+
candidates: [
86+
{
87+
content: {
88+
parts: [
89+
{
90+
text: 'This is a joke from the chat!',
91+
},
92+
],
93+
role: 'model',
94+
},
95+
finishReason: 'stop',
96+
index: 0,
97+
},
98+
],
99+
usageMetadata: {
100+
promptTokenCount: 8,
101+
candidatesTokenCount: 12,
102+
totalTokenCount: 20,
103+
},
104+
modelVersion: model, // Include model version in response
105+
};
106+
},
107+
sendMessageStream: async () => {
108+
// Return a promise that resolves to an async generator
109+
return (async function* (): AsyncGenerator<GoogleGenAIResponse, any, unknown> {
110+
yield {
111+
candidates: [
112+
{
113+
content: {
114+
parts: [{ text: 'Streaming chat response' }],
115+
role: 'model',
116+
},
117+
finishReason: 'stop',
118+
index: 0,
119+
},
120+
],
121+
};
122+
})();
123+
},
124+
};
125+
},
126+
};
127+
}
128+
}
Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
import { expect, it } from 'vitest';
2+
import { createRunner } from '../../../runner';
3+
4+
// These tests are not exhaustive because the instrumentation is
5+
// already tested in the node integration tests and we merely
6+
// want to test that the instrumentation does not break in our
7+
// cloudflare SDK.
8+
9+
it('traces Google GenAI chat creation and message sending', async () => {
10+
const runner = createRunner(__dirname)
11+
.ignore('event')
12+
.expect(envelope => {
13+
const transactionEvent = envelope[1]?.[0]?.[1] as any;
14+
15+
expect(transactionEvent.transaction).toBe('GET /');
16+
expect(transactionEvent.spans).toEqual(
17+
expect.arrayContaining([
18+
// First span - chats.create
19+
expect.objectContaining({
20+
data: expect.objectContaining({
21+
'gen_ai.operation.name': 'chat',
22+
'sentry.op': 'gen_ai.chat',
23+
'sentry.origin': 'auto.ai.google_genai',
24+
'gen_ai.system': 'google_genai',
25+
'gen_ai.request.model': 'gemini-1.5-pro',
26+
'gen_ai.request.temperature': 0.8,
27+
'gen_ai.request.top_p': 0.9,
28+
'gen_ai.request.max_tokens': 150,
29+
}),
30+
description: 'chat gemini-1.5-pro create',
31+
op: 'gen_ai.chat',
32+
origin: 'auto.ai.google_genai',
33+
}),
34+
// Second span - chat.sendMessage
35+
expect.objectContaining({
36+
data: expect.objectContaining({
37+
'gen_ai.operation.name': 'chat',
38+
'sentry.op': 'gen_ai.chat',
39+
'sentry.origin': 'auto.ai.google_genai',
40+
'gen_ai.system': 'google_genai',
41+
'gen_ai.request.model': 'gemini-1.5-pro',
42+
'gen_ai.usage.input_tokens': 8,
43+
'gen_ai.usage.output_tokens': 12,
44+
'gen_ai.usage.total_tokens': 20,
45+
}),
46+
description: 'chat gemini-1.5-pro',
47+
op: 'gen_ai.chat',
48+
origin: 'auto.ai.google_genai',
49+
}),
50+
// Third span - models.generateContent
51+
expect.objectContaining({
52+
data: expect.objectContaining({
53+
'gen_ai.operation.name': 'models',
54+
'sentry.op': 'gen_ai.models',
55+
'sentry.origin': 'auto.ai.google_genai',
56+
'gen_ai.system': 'google_genai',
57+
'gen_ai.request.model': 'gemini-1.5-flash',
58+
'gen_ai.request.temperature': 0.7,
59+
'gen_ai.request.top_p': 0.9,
60+
'gen_ai.request.max_tokens': 100,
61+
'gen_ai.usage.input_tokens': 8,
62+
'gen_ai.usage.output_tokens': 12,
63+
'gen_ai.usage.total_tokens': 20,
64+
}),
65+
description: 'models gemini-1.5-flash',
66+
op: 'gen_ai.models',
67+
origin: 'auto.ai.google_genai',
68+
}),
69+
]),
70+
);
71+
})
72+
.start();
73+
await runner.makeRequest('get', '/');
74+
await runner.completed();
75+
});
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
{
2+
"name": "worker-name",
3+
"compatibility_date": "2025-06-17",
4+
"main": "index.ts",
5+
"compatibility_flags": ["nodejs_compat"],
6+
}

packages/cloudflare/src/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,7 @@ export {
7070
// eslint-disable-next-line deprecation/deprecation
7171
inboundFiltersIntegration,
7272
instrumentOpenAiClient,
73+
instrumentGoogleGenAIClient,
7374
instrumentAnthropicAiClient,
7475
eventFiltersIntegration,
7576
linkedErrorsIntegration,

packages/core/src/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -134,6 +134,7 @@ export { instrumentAnthropicAiClient } from './utils/anthropic-ai';
134134
export { ANTHROPIC_AI_INTEGRATION_NAME } from './utils/anthropic-ai/constants';
135135
export { instrumentGoogleGenAIClient } from './utils/google-genai';
136136
export { GOOGLE_GENAI_INTEGRATION_NAME } from './utils/google-genai/constants';
137+
export type { GoogleGenAIResponse } from './utils/google-genai/types';
137138
export type { OpenAiClient, OpenAiOptions, InstrumentedMethod } from './utils/openai/types';
138139
export type {
139140
AnthropicAiClient,

packages/core/src/utils/google-genai/index.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -292,10 +292,10 @@ function createDeepProxy<T extends object>(target: T, currentPath = '', options:
292292
*
293293
* @example
294294
* ```typescript
295-
* import { GoogleGenerativeAI } from '@google/genai';
295+
* import { GoogleGenAI } from '@google/genai';
296296
* import { instrumentGoogleGenAIClient } from '@sentry/core';
297297
*
298-
* const genAI = new GoogleGenerativeAI({ apiKey: process.env.GOOGLE_GENAI_API_KEY });
298+
* const genAI = new GoogleGenAI({ apiKey: process.env.GOOGLE_GENAI_API_KEY });
299299
* const instrumentedClient = instrumentGoogleGenAIClient(genAI);
300300
*
301301
* // Now both chats.create and sendMessage will be instrumented

packages/vercel-edge/src/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,7 @@ export {
7070
// eslint-disable-next-line deprecation/deprecation
7171
inboundFiltersIntegration,
7272
instrumentOpenAiClient,
73+
instrumentGoogleGenAIClient,
7374
instrumentAnthropicAiClient,
7475
eventFiltersIntegration,
7576
linkedErrorsIntegration,

0 commit comments

Comments
 (0)