Skip to content

Commit 9121250

Browse files
authored
feat(ai): Expose provider metadata as an attribute on exported OTEL spans (#7080)
## Background Right now there doesn't appear to be a way for tracing providers to get accurate metadata if users are using Anthropic/OpenAI/Gemini's prompt caching features. The tokens used to create/read caches are returned in a providerMetadata field on the response that does not seem to be passed anywhere into the exported spans. ## Summary Adds a new `ai.response.providerMetadata` span attribute that exposes this response field. ## Verification Ran `pnpm build` and modified the `anthropic-cache-control.ts` example. Saw the new span attribute present: ``` { resource: { attributes: { 'service.name': 'unknown_service:/usr/local/bin/node', 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '2.0.1' } }, instrumentationScope: { name: 'ai', version: undefined, schemaUrl: undefined }, ... attributes: { 'operation.name': 'ai.generateText', 'ai.operationId': 'ai.generateText', 'ai.model.provider': 'anthropic.messages', 'ai.model.id': 'claude-3-5-sonnet-20240620', 'ai.settings.maxRetries': 2, 'ai.prompt': `...`, 'ai.response.providerMetadata': '{"anthropic":{"cacheCreationInputTokens":2157}}', 'ai.usage.promptTokens': 10, 'ai.usage.completionTokens': 456 }, ... } ``` ## Related Issues Fixes #7079
1 parent 97fedf9 commit 9121250

File tree

14 files changed

+65
-0
lines changed

14 files changed

+65
-0
lines changed

.changeset/selfish-masks-jog.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
'ai': patch
3+
---
4+
5+
Expose provider metadata as an attribute on exported OTEL spans

content/docs/03-ai-sdk-core/60-telemetry.mdx

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -257,6 +257,7 @@ Many spans that use LLMs (`ai.generateText`, `ai.generateText.doGenerate`, `ai.s
257257
- `ai.model.id`: the id of the model
258258
- `ai.model.provider`: the provider of the model
259259
- `ai.request.headers.*`: the request headers that were passed in through `headers`
260+
- `ai.response.providerMetadata`: provider specific metadata returned with the generation response
260261
- `ai.settings.maxRetries`: the maximum number of retries that were set
261262
- `ai.telemetry.functionId`: the functionId that was set through `telemetry.functionId`
262263
- `ai.telemetry.metadata.*`: the metadata that was passed in through `telemetry.metadata`

packages/ai/core/generate-object/__snapshots__/generate-object.test.ts.snap

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ exports[`telemetry > should record telemetry data when enabled 1`] = `
5858
"ai.request.headers.header2": "value2",
5959
"ai.response.finishReason": "stop",
6060
"ai.response.object": "{"content":"Hello, world!"}",
61+
"ai.response.providerMetadata": "{"testProvider":{"testKey":"testValue"}}",
6162
"ai.schema": "{"$schema":"http://json-schema.org/draft-07/schema#","type":"object","properties":{"content":{"type":"string"}},"required":["content"],"additionalProperties":false}",
6263
"ai.schema.description": "test description",
6364
"ai.schema.name": "test-name",
@@ -91,6 +92,7 @@ exports[`telemetry > should record telemetry data when enabled 1`] = `
9192
"ai.response.id": "test-id-from-model",
9293
"ai.response.model": "test-response-model-id",
9394
"ai.response.object": "{ "content": "Hello, world!" }",
95+
"ai.response.providerMetadata": "{"testProvider":{"testKey":"testValue"}}",
9496
"ai.response.timestamp": "1970-01-01T00:00:10.000Z",
9597
"ai.settings.frequencyPenalty": 0.3,
9698
"ai.settings.maxRetries": 2,

packages/ai/core/generate-object/__snapshots__/stream-object.test.ts.snap

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,11 @@ exports[`streamObject > output = "object" > result.fullStream > should send full
9696
},
9797
{
9898
"finishReason": "stop",
99+
"providerMetadata": {
100+
"testProvider": {
101+
"testKey": "testValue",
102+
},
103+
},
99104
"response": {
100105
"id": "id-0",
101106
"modelId": "mock-model-id",
@@ -181,6 +186,7 @@ exports[`streamObject > telemetry > should record telemetry data when enabled 1`
181186
"ai.request.headers.header1": "value1",
182187
"ai.request.headers.header2": "value2",
183188
"ai.response.object": "{"content":"Hello, world!"}",
189+
"ai.response.providerMetadata": "{"testProvider":{"testKey":"testValue"}}",
184190
"ai.schema": "{"$schema":"http://json-schema.org/draft-07/schema#","type":"object","properties":{"content":{"type":"string"}},"required":["content"],"additionalProperties":false}",
185191
"ai.schema.description": "test description",
186192
"ai.schema.name": "test-name",
@@ -215,6 +221,7 @@ exports[`streamObject > telemetry > should record telemetry data when enabled 1`
215221
"ai.response.id": "id-0",
216222
"ai.response.model": "mock-model-id",
217223
"ai.response.object": "{"content":"Hello, world!"}",
224+
"ai.response.providerMetadata": "{"testProvider":{"testKey":"testValue"}}",
218225
"ai.response.timestamp": "1970-01-01T00:00:00.000Z",
219226
"ai.settings.frequencyPenalty": 0.3,
220227
"ai.settings.maxRetries": 2,

packages/ai/core/generate-object/generate-object.test.ts

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -910,6 +910,11 @@ describe('telemetry', () => {
910910
timestamp: new Date(10000),
911911
modelId: 'test-response-model-id',
912912
},
913+
providerMetadata: {
914+
testProvider: {
915+
testKey: 'testValue',
916+
},
917+
},
913918
}),
914919
}),
915920
schema: z.object({ content: z.string() }),

packages/ai/core/generate-object/generate-object.ts

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -387,6 +387,9 @@ Default and recommended: 'auto' (best mode for the model).
387387
'ai.response.model': responseData.modelId,
388388
'ai.response.timestamp':
389389
responseData.timestamp.toISOString(),
390+
'ai.response.providerMetadata': JSON.stringify(
391+
result.providerMetadata,
392+
),
390393

391394
// TODO rename telemetry attributes to inputTokens and outputTokens
392395
'ai.usage.promptTokens': result.usage.inputTokens,
@@ -486,6 +489,9 @@ Default and recommended: 'auto' (best mode for the model).
486489
'ai.response.object': {
487490
output: () => JSON.stringify(object),
488491
},
492+
'ai.response.providerMetadata': JSON.stringify(
493+
resultProviderMetadata,
494+
),
489495

490496
// TODO rename telemetry attributes to inputTokens and outputTokens
491497
'ai.usage.promptTokens': usage.inputTokens,

packages/ai/core/generate-object/stream-object.test.ts

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,11 @@ function createTestModel({
5555
type: 'finish',
5656
finishReason: 'stop',
5757
usage: testUsage,
58+
providerMetadata: {
59+
testProvider: {
60+
testKey: 'testValue',
61+
},
62+
},
5863
},
5964
]),
6065
request = undefined,
@@ -1394,6 +1399,11 @@ describe('streamObject', () => {
13941399
type: 'finish',
13951400
finishReason: 'stop',
13961401
usage: testUsage,
1402+
providerMetadata: {
1403+
testProvider: {
1404+
testKey: 'testValue',
1405+
},
1406+
},
13971407
},
13981408
]),
13991409
}),

packages/ai/core/generate-object/stream-object.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -758,6 +758,8 @@ class DefaultStreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM>
758758
'ai.response.model': fullResponse.modelId,
759759
'ai.response.timestamp':
760760
fullResponse.timestamp.toISOString(),
761+
'ai.response.providerMetadata':
762+
JSON.stringify(providerMetadata),
761763

762764
'ai.usage.inputTokens': finalUsage.inputTokens,
763765
'ai.usage.outputTokens': finalUsage.outputTokens,
@@ -793,6 +795,8 @@ class DefaultStreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM>
793795
'ai.response.object': {
794796
output: () => JSON.stringify(object),
795797
},
798+
'ai.response.providerMetadata':
799+
JSON.stringify(providerMetadata),
796800
},
797801
}),
798802
);

packages/ai/core/generate-text/__snapshots__/generate-text.test.ts.snap

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1115,6 +1115,7 @@ exports[`generateText > telemetry > should record telemetry data when enabled 1`
11151115
"ai.request.headers.header1": "value1",
11161116
"ai.request.headers.header2": "value2",
11171117
"ai.response.finishReason": "stop",
1118+
"ai.response.providerMetadata": "{"testProvider":{"testKey":"testValue"}}",
11181119
"ai.response.text": "Hello, world!",
11191120
"ai.settings.frequencyPenalty": 0.3,
11201121
"ai.settings.maxRetries": 2,
@@ -1147,6 +1148,7 @@ exports[`generateText > telemetry > should record telemetry data when enabled 1`
11471148
"ai.response.finishReason": "stop",
11481149
"ai.response.id": "test-id-from-model",
11491150
"ai.response.model": "test-response-model-id",
1151+
"ai.response.providerMetadata": "{"testProvider":{"testKey":"testValue"}}",
11501152
"ai.response.text": "Hello, world!",
11511153
"ai.response.timestamp": "1970-01-01T00:00:10.000Z",
11521154
"ai.settings.frequencyPenalty": 0.3,

packages/ai/core/generate-text/__snapshots__/stream-text.test.ts.snap

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,7 @@ exports[`streamText > options.transform > with base transformation > telemetry s
142142
"ai.operationId": "ai.streamText",
143143
"ai.prompt": "{"prompt":"test-input"}",
144144
"ai.response.finishReason": "stop",
145+
"ai.response.providerMetadata": "{"testProvider":{"testKey":"TEST VALUE"}}",
145146
"ai.response.text": "HELLO, WORLD!",
146147
"ai.response.toolCalls": "[{"type":"tool-call","toolCallId":"call-1","toolName":"tool1","input":{"value":"VALUE"}}]",
147148
"ai.settings.maxRetries": 2,
@@ -169,6 +170,7 @@ exports[`streamText > options.transform > with base transformation > telemetry s
169170
"ai.response.model": "mock-model-id",
170171
"ai.response.msToFinish": 500,
171172
"ai.response.msToFirstChunk": 100,
173+
"ai.response.providerMetadata": "{"testProvider":{"testKey":"testValue"}}",
172174
"ai.response.text": "Hello, world!",
173175
"ai.response.timestamp": "1970-01-01T00:00:00.000Z",
174176
"ai.response.toolCalls": "[{"type":"tool-call","toolCallId":"call-1","toolName":"tool1","input":{"value":"VALUE"}}]",
@@ -846,6 +848,7 @@ exports[`streamText > telemetry > should record telemetry data when enabled 1`]
846848
"ai.request.headers.header1": "value1",
847849
"ai.request.headers.header2": "value2",
848850
"ai.response.finishReason": "stop",
851+
"ai.response.providerMetadata": "{"testProvider":{"testKey":"testValue"}}",
849852
"ai.response.text": "Hello, world!",
850853
"ai.settings.frequencyPenalty": 0.3,
851854
"ai.settings.maxRetries": 2,
@@ -882,6 +885,7 @@ exports[`streamText > telemetry > should record telemetry data when enabled 1`]
882885
"ai.response.model": "mock-model-id",
883886
"ai.response.msToFinish": 500,
884887
"ai.response.msToFirstChunk": 100,
888+
"ai.response.providerMetadata": "{"testProvider":{"testKey":"testValue"}}",
885889
"ai.response.text": "Hello, world!",
886890
"ai.response.timestamp": "1970-01-01T00:00:00.000Z",
887891
"ai.settings.frequencyPenalty": 0.3,

0 commit comments

Comments
 (0)