Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
dcfff66
feat(instrumentation-openai): add instrumentation of openai SDK
anuraaga Jul 17, 2025
b58453e
Fix manifest
anuraaga Jul 17, 2025
152c2aa
Fix types
anuraaga Jul 18, 2025
20e7bad
Merge branch 'main' into openai-inst
anuraaga Jul 18, 2025
8284f96
version:update
anuraaga Jul 18, 2025
d023355
Merge branch 'openai-inst' of https://github.com/anuraaga/opentelemet…
anuraaga Jul 18, 2025
c8e58c6
Cleanups
anuraaga Jul 19, 2025
380b165
Merge branch 'main' of https://github.com/open-telemetry/opentelemetr…
anuraaga Jul 19, 2025
22e806a
Fix
anuraaga Jul 19, 2025
16d44e6
Update packages/instrumentation-openai/src/utils.ts
anuraaga Jul 31, 2025
267be04
Fix debug
anuraaga Jul 31, 2025
44311d7
Merge branch 'openai-inst' of https://github.com/anuraaga/opentelemet…
anuraaga Jul 31, 2025
c6ac4ac
Comment
anuraaga Jul 31, 2025
a69be29
Regenerate semconv
anuraaga Jul 31, 2025
52c26b5
Format
anuraaga Jul 31, 2025
3ee2ed6
Fix
anuraaga Jul 31, 2025
65c2a5f
Cleanup
anuraaga Aug 1, 2025
5b0e035
git add
anuraaga Aug 1, 2025
cc1ada2
README
anuraaga Aug 1, 2025
bb28e1a
Match expect version
anuraaga Aug 1, 2025
6b90002
Merge branch 'main' of https://github.com/open-telemetry/opentelemetr…
anuraaga Aug 1, 2025
c035ed2
manage to restore package-lock
anuraaga Aug 1, 2025
d6c427b
Owner
anuraaga Aug 7, 2025
36dd80b
Merge branch 'main' of https://github.com/open-telemetry/opentelemetr…
anuraaga Aug 7, 2025
e89981e
Update .github.amrom.workers.devponent_owners.yml
anuraaga Aug 20, 2025
1cd03cb
Merge branch 'main' into openai-inst
anuraaga Aug 20, 2025
6d77251
Merge branch 'main' into openai-inst
pichlermarc Aug 21, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Regenerate semconv
  • Loading branch information
anuraaga committed Jul 31, 2025
commit a69be299445473e70f460c530d45caf13a0ce0cf
15 changes: 10 additions & 5 deletions packages/instrumentation-openai/src/instrumentation.ts
Original file line number Diff line number Diff line change
Expand Up @@ -61,11 +61,6 @@ import {
ATTR_GEN_AI_TOKEN_TYPE,
ATTR_GEN_AI_USAGE_INPUT_TOKENS,
ATTR_GEN_AI_USAGE_OUTPUT_TOKENS,
EVENT_GEN_AI_ASSISTANT_MESSAGE,
EVENT_GEN_AI_CHOICE,
EVENT_GEN_AI_SYSTEM_MESSAGE,
EVENT_GEN_AI_TOOL_MESSAGE,
EVENT_GEN_AI_USER_MESSAGE,
METRIC_GEN_AI_CLIENT_OPERATION_DURATION,
METRIC_GEN_AI_CLIENT_TOKEN_USAGE,
} from './semconv';
Expand All @@ -82,6 +77,16 @@ import {
GenAIToolMessageEventBody,
} from './internal-types';



// The JS semconv package doesn't yet emit constants for event names.
// TODO: otel-js issue for semconv pkg not including event names
export const EVENT_GEN_AI_SYSTEM_MESSAGE = 'gen_ai.system.message';
export const EVENT_GEN_AI_USER_MESSAGE = 'gen_ai.user.message';
export const EVENT_GEN_AI_ASSISTANT_MESSAGE = 'gen_ai.assistant.message';
export const EVENT_GEN_AI_TOOL_MESSAGE = 'gen_ai.tool.message';
export const EVENT_GEN_AI_CHOICE = 'gen_ai.choice';

export class OpenAIInstrumentation extends InstrumentationBase<OpenAIInstrumentationConfig> {
private _genaiClientOperationDuration!: Histogram;
private _genaiClientTokenUsage!: Histogram;
Expand Down
231 changes: 190 additions & 41 deletions packages/instrumentation-openai/src/semconv.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,56 +15,205 @@
*/

/*
* Copyright The OpenTelemetry Authors
* This file contains a copy of unstable semantic convention definitions
* used by this package.
* @see https://github.com/open-telemetry/opentelemetry-js/tree/main/semantic-conventions#unstable-semconv
*/

/**
* Identifies the class / type of event.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* @example browser.mouse.click
* @example device.app.lifecycle
*
* https://www.apache.org/licenses/LICENSE-2.0
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* @deprecated Replaced by EventName top-level field on the LogRecord.
*/
export const ATTR_SERVER_ADDRESS = 'server.address';
export const ATTR_SERVER_PORT = 'server.port';
export const ATTR_EVENT_NAME = 'event.name' as const;

// -- Unstable semconv
/**
* The name of the operation being performed.
*
* @note If one of the predefined values applies, but specific system uses a different name it's **RECOMMENDED** to document it in the semantic conventions for specific GenAI system and use system-specific name in the instrumentation. If a different name is not documented, instrumentation libraries **SHOULD** use applicable predefined value.
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_OPERATION_NAME = 'gen_ai.operation.name' as const;

export const ATTR_EVENT_NAME = 'event.name';
export const ATTR_GEN_AI_OPERATION_NAME = 'gen_ai.operation.name';
/**
* The encoding formats requested in an embeddings operation, if specified.
*
* @example ["base64"]
* @example ["float", "binary"]
*
* @note In some GenAI systems the encoding formats are called embedding types. Also, some GenAI systems only accept a single format per request.
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_REQUEST_ENCODING_FORMATS =
'gen_ai.request.encoding_formats' as const;

/**
* The frequency penalty setting for the GenAI request.
*
* @example 0.1
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_REQUEST_FREQUENCY_PENALTY =
'gen_ai.request.frequency_penalty';
export const ATTR_GEN_AI_REQUEST_MAX_TOKENS = 'gen_ai.request.max_tokens';
export const ATTR_GEN_AI_REQUEST_MODEL = 'gen_ai.request.model';
'gen_ai.request.frequency_penalty' as const;

/**
* The maximum number of tokens the model generates for a request.
*
* @example 100
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_REQUEST_MAX_TOKENS =
'gen_ai.request.max_tokens' as const;

/**
* The name of the GenAI model a request is being made to.
*
* @example "gpt-4"
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_REQUEST_MODEL = 'gen_ai.request.model' as const;

/**
* The presence penalty setting for the GenAI request.
*
* @example 0.1
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_REQUEST_PRESENCE_PENALTY =
'gen_ai.request.presence_penalty';
export const ATTR_GEN_AI_REQUEST_TEMPERATURE = 'gen_ai.request.temperature';
'gen_ai.request.presence_penalty' as const;

/**
* List of sequences that the model will use to stop generating further tokens.
*
* @example ["forest", "lived"]
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_REQUEST_STOP_SEQUENCES =
'gen_ai.request.stop_sequences';
export const ATTR_GEN_AI_REQUEST_TOP_P = 'gen_ai.request.top_p';
'gen_ai.request.stop_sequences' as const;

/**
* The temperature setting for the GenAI request.
*
* @example 0.0
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_REQUEST_TEMPERATURE =
'gen_ai.request.temperature' as const;

/**
* The top_p sampling setting for the GenAI request.
*
* @example 1.0
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_REQUEST_TOP_P = 'gen_ai.request.top_p' as const;

/**
* Array of reasons the model stopped generating tokens, corresponding to each generation received.
*
* @example ["stop"]
* @example ["stop", "length"]
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_RESPONSE_FINISH_REASONS =
'gen_ai.response.finish_reasons';
export const ATTR_GEN_AI_RESPONSE_ID = 'gen_ai.response.id';
export const ATTR_GEN_AI_RESPONSE_MODEL = 'gen_ai.response.model';
export const ATTR_GEN_AI_SYSTEM = 'gen_ai.system';
export const ATTR_GEN_AI_TOKEN_TYPE = 'gen_ai.token.type';
export const ATTR_GEN_AI_USAGE_INPUT_TOKENS = 'gen_ai.usage.input_tokens';
export const ATTR_GEN_AI_USAGE_OUTPUT_TOKENS = 'gen_ai.usage.output_tokens';
'gen_ai.response.finish_reasons' as const;

/**
* The unique identifier for the completion.
*
* @example chatcmpl-123
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_RESPONSE_ID = 'gen_ai.response.id' as const;

/**
* The name of the model that generated the response.
*
* @example gpt-4-0613
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_RESPONSE_MODEL = 'gen_ai.response.model' as const;

/**
* The Generative AI product as identified by the client or server instrumentation.
*
* @example "openai"
*
* @note The `gen_ai.system` describes a family of GenAI models with specific model identified
* by `gen_ai.request.model` and `gen_ai.response.model` attributes.
*
* The actual GenAI product may differ from the one identified by the client.
* Multiple systems, including Azure OpenAI and Gemini, are accessible by OpenAI client
* libraries. In such cases, the `gen_ai.system` is set to `openai` based on the
* instrumentation's best knowledge, instead of the actual system. The `server.address`
* attribute may help identify the actual system in use for `openai`.
*
* For custom model, a custom friendly name **SHOULD** be used.
* If none of these options apply, the `gen_ai.system` **SHOULD** be set to `_OTHER`.
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_SYSTEM = 'gen_ai.system' as const;

/**
* The type of token being counted.
*
* @example input
* @example output
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_TOKEN_TYPE = 'gen_ai.token.type' as const;

/**
* The number of tokens used in the GenAI input (prompt).
*
* @example 100
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_USAGE_INPUT_TOKENS =
'gen_ai.usage.input_tokens' as const;

/**
* The number of tokens used in the GenAI response (completion).
*
* @example 180
*
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const ATTR_GEN_AI_USAGE_OUTPUT_TOKENS =
'gen_ai.usage.output_tokens' as const;

/**
* GenAI operation duration
*
* @experimental This metric is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const METRIC_GEN_AI_CLIENT_OPERATION_DURATION =
'gen_ai.client.operation.duration';
export const METRIC_GEN_AI_CLIENT_TOKEN_USAGE = 'gen_ai.client.token.usage';
'gen_ai.client.operation.duration' as const;

export const ATTR_GEN_AI_REQUEST_ENCODING_FORMATS =
'gen_ai.request.encoding_formats';

// The JS semconv package doesn't yet emit constants for event names.
// TODO: otel-js issue for semconv pkg not including event names
export const EVENT_GEN_AI_SYSTEM_MESSAGE = 'gen_ai.system.message';
export const EVENT_GEN_AI_USER_MESSAGE = 'gen_ai.user.message';
export const EVENT_GEN_AI_ASSISTANT_MESSAGE = 'gen_ai.assistant.message';
export const EVENT_GEN_AI_TOOL_MESSAGE = 'gen_ai.tool.message';
export const EVENT_GEN_AI_CHOICE = 'gen_ai.choice';
/**
* Measures number of input and output tokens used
*
* @experimental This metric is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
*/
export const METRIC_GEN_AI_CLIENT_TOKEN_USAGE =
'gen_ai.client.token.usage' as const;
Loading