Skip to content

Commit d48395d

Browse files
committed
fix: Vercel ai import-in-the-middle patching
1 parent f0c9458 commit d48395d

File tree

4 files changed

+201
-121
lines changed

4 files changed

+201
-121
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://[email protected]/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
transport: loggingTransport,
9+
});
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
import * as Sentry from '@sentry/node';
2+
import { generateText } from 'ai';
3+
import { MockLanguageModelV1 } from 'ai/test';
4+
5+
async function run() {
6+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
7+
await generateText({
8+
model: new MockLanguageModelV1({
9+
doGenerate: async () => ({
10+
rawCall: { rawPrompt: null, rawSettings: {} },
11+
finishReason: 'stop',
12+
usage: { promptTokens: 10, completionTokens: 20 },
13+
text: 'First span here!',
14+
}),
15+
}),
16+
prompt: 'Where is the first span?',
17+
});
18+
19+
// This span should have input and output prompts attached because telemetry is explicitly enabled.
20+
await generateText({
21+
experimental_telemetry: { isEnabled: true },
22+
model: new MockLanguageModelV1({
23+
doGenerate: async () => ({
24+
rawCall: { rawPrompt: null, rawSettings: {} },
25+
finishReason: 'stop',
26+
usage: { promptTokens: 10, completionTokens: 20 },
27+
text: 'Second span here!',
28+
}),
29+
}),
30+
prompt: 'Where is the second span?',
31+
});
32+
33+
// This span should not be captured because we've disabled telemetry
34+
await generateText({
35+
experimental_telemetry: { isEnabled: false },
36+
model: new MockLanguageModelV1({
37+
doGenerate: async () => ({
38+
rawCall: { rawPrompt: null, rawSettings: {} },
39+
finishReason: 'stop',
40+
usage: { promptTokens: 10, completionTokens: 20 },
41+
text: 'Third span here!',
42+
}),
43+
}),
44+
prompt: 'Where is the third span?',
45+
});
46+
});
47+
}
48+
49+
run();
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import { join } from 'node:path';
12
import { afterAll, describe, expect, test } from 'vitest';
23
import { cleanupChildProcesses, createRunner } from '../../../utils/runner';
34

@@ -7,125 +8,133 @@ describe('ai', () => {
78
cleanupChildProcesses();
89
});
910

10-
test('creates ai related spans', async () => {
11-
const EXPECTED_TRANSACTION = {
12-
transaction: 'main',
13-
spans: expect.arrayContaining([
14-
expect.objectContaining({
15-
data: expect.objectContaining({
16-
'ai.completion_tokens.used': 20,
17-
'ai.model.id': 'mock-model-id',
18-
'ai.model.provider': 'mock-provider',
19-
'ai.model_id': 'mock-model-id',
20-
'ai.operationId': 'ai.generateText',
21-
'ai.pipeline.name': 'generateText',
22-
'ai.prompt_tokens.used': 10,
23-
'ai.response.finishReason': 'stop',
24-
'ai.settings.maxRetries': 2,
25-
'ai.settings.maxSteps': 1,
26-
'ai.streaming': false,
27-
'ai.total_tokens.used': 30,
28-
'ai.usage.completionTokens': 20,
29-
'ai.usage.promptTokens': 10,
30-
'operation.name': 'ai.generateText',
31-
'sentry.op': 'ai.pipeline.generateText',
32-
'sentry.origin': 'auto.vercelai.otel',
33-
}),
34-
description: 'generateText',
35-
op: 'ai.pipeline.generateText',
36-
origin: 'auto.vercelai.otel',
37-
status: 'ok',
11+
const EXPECTED_TRANSACTION = {
12+
transaction: 'main',
13+
spans: expect.arrayContaining([
14+
expect.objectContaining({
15+
data: expect.objectContaining({
16+
'ai.completion_tokens.used': 20,
17+
'ai.model.id': 'mock-model-id',
18+
'ai.model.provider': 'mock-provider',
19+
'ai.model_id': 'mock-model-id',
20+
'ai.operationId': 'ai.generateText',
21+
'ai.pipeline.name': 'generateText',
22+
'ai.prompt_tokens.used': 10,
23+
'ai.response.finishReason': 'stop',
24+
'ai.settings.maxRetries': 2,
25+
'ai.settings.maxSteps': 1,
26+
'ai.streaming': false,
27+
'ai.total_tokens.used': 30,
28+
'ai.usage.completionTokens': 20,
29+
'ai.usage.promptTokens': 10,
30+
'operation.name': 'ai.generateText',
31+
'sentry.op': 'ai.pipeline.generateText',
32+
'sentry.origin': 'auto.vercelai.otel',
3833
}),
39-
expect.objectContaining({
40-
data: expect.objectContaining({
41-
'sentry.origin': 'auto.vercelai.otel',
42-
'sentry.op': 'ai.run.doGenerate',
43-
'operation.name': 'ai.generateText.doGenerate',
44-
'ai.operationId': 'ai.generateText.doGenerate',
45-
'ai.model.provider': 'mock-provider',
46-
'ai.model.id': 'mock-model-id',
47-
'ai.settings.maxRetries': 2,
48-
'gen_ai.system': 'mock-provider',
49-
'gen_ai.request.model': 'mock-model-id',
50-
'ai.pipeline.name': 'generateText.doGenerate',
51-
'ai.model_id': 'mock-model-id',
52-
'ai.streaming': false,
53-
'ai.response.finishReason': 'stop',
54-
'ai.response.model': 'mock-model-id',
55-
'ai.usage.promptTokens': 10,
56-
'ai.usage.completionTokens': 20,
57-
'gen_ai.response.finish_reasons': ['stop'],
58-
'gen_ai.usage.input_tokens': 10,
59-
'gen_ai.usage.output_tokens': 20,
60-
'ai.completion_tokens.used': 20,
61-
'ai.prompt_tokens.used': 10,
62-
'ai.total_tokens.used': 30,
63-
}),
64-
description: 'generateText.doGenerate',
65-
op: 'ai.run.doGenerate',
66-
origin: 'auto.vercelai.otel',
67-
status: 'ok',
34+
description: 'generateText',
35+
op: 'ai.pipeline.generateText',
36+
origin: 'auto.vercelai.otel',
37+
status: 'ok',
38+
}),
39+
expect.objectContaining({
40+
data: expect.objectContaining({
41+
'sentry.origin': 'auto.vercelai.otel',
42+
'sentry.op': 'ai.run.doGenerate',
43+
'operation.name': 'ai.generateText.doGenerate',
44+
'ai.operationId': 'ai.generateText.doGenerate',
45+
'ai.model.provider': 'mock-provider',
46+
'ai.model.id': 'mock-model-id',
47+
'ai.settings.maxRetries': 2,
48+
'gen_ai.system': 'mock-provider',
49+
'gen_ai.request.model': 'mock-model-id',
50+
'ai.pipeline.name': 'generateText.doGenerate',
51+
'ai.model_id': 'mock-model-id',
52+
'ai.streaming': false,
53+
'ai.response.finishReason': 'stop',
54+
'ai.response.model': 'mock-model-id',
55+
'ai.usage.promptTokens': 10,
56+
'ai.usage.completionTokens': 20,
57+
'gen_ai.response.finish_reasons': ['stop'],
58+
'gen_ai.usage.input_tokens': 10,
59+
'gen_ai.usage.output_tokens': 20,
60+
'ai.completion_tokens.used': 20,
61+
'ai.prompt_tokens.used': 10,
62+
'ai.total_tokens.used': 30,
6863
}),
69-
expect.objectContaining({
70-
data: expect.objectContaining({
71-
'ai.completion_tokens.used': 20,
72-
'ai.model.id': 'mock-model-id',
73-
'ai.model.provider': 'mock-provider',
74-
'ai.model_id': 'mock-model-id',
75-
'ai.prompt': '{"prompt":"Where is the second span?"}',
76-
'ai.operationId': 'ai.generateText',
77-
'ai.pipeline.name': 'generateText',
78-
'ai.prompt_tokens.used': 10,
79-
'ai.response.finishReason': 'stop',
80-
'ai.input_messages': '{"prompt":"Where is the second span?"}',
81-
'ai.settings.maxRetries': 2,
82-
'ai.settings.maxSteps': 1,
83-
'ai.streaming': false,
84-
'ai.total_tokens.used': 30,
85-
'ai.usage.completionTokens': 20,
86-
'ai.usage.promptTokens': 10,
87-
'operation.name': 'ai.generateText',
88-
'sentry.op': 'ai.pipeline.generateText',
89-
'sentry.origin': 'auto.vercelai.otel',
90-
}),
91-
description: 'generateText',
92-
op: 'ai.pipeline.generateText',
93-
origin: 'auto.vercelai.otel',
94-
status: 'ok',
64+
description: 'generateText.doGenerate',
65+
op: 'ai.run.doGenerate',
66+
origin: 'auto.vercelai.otel',
67+
status: 'ok',
68+
}),
69+
expect.objectContaining({
70+
data: expect.objectContaining({
71+
'ai.completion_tokens.used': 20,
72+
'ai.model.id': 'mock-model-id',
73+
'ai.model.provider': 'mock-provider',
74+
'ai.model_id': 'mock-model-id',
75+
'ai.prompt': '{"prompt":"Where is the second span?"}',
76+
'ai.operationId': 'ai.generateText',
77+
'ai.pipeline.name': 'generateText',
78+
'ai.prompt_tokens.used': 10,
79+
'ai.response.finishReason': 'stop',
80+
'ai.input_messages': '{"prompt":"Where is the second span?"}',
81+
'ai.settings.maxRetries': 2,
82+
'ai.settings.maxSteps': 1,
83+
'ai.streaming': false,
84+
'ai.total_tokens.used': 30,
85+
'ai.usage.completionTokens': 20,
86+
'ai.usage.promptTokens': 10,
87+
'operation.name': 'ai.generateText',
88+
'sentry.op': 'ai.pipeline.generateText',
89+
'sentry.origin': 'auto.vercelai.otel',
9590
}),
96-
expect.objectContaining({
97-
data: expect.objectContaining({
98-
'sentry.origin': 'auto.vercelai.otel',
99-
'sentry.op': 'ai.run.doGenerate',
100-
'operation.name': 'ai.generateText.doGenerate',
101-
'ai.operationId': 'ai.generateText.doGenerate',
102-
'ai.model.provider': 'mock-provider',
103-
'ai.model.id': 'mock-model-id',
104-
'ai.settings.maxRetries': 2,
105-
'gen_ai.system': 'mock-provider',
106-
'gen_ai.request.model': 'mock-model-id',
107-
'ai.pipeline.name': 'generateText.doGenerate',
108-
'ai.model_id': 'mock-model-id',
109-
'ai.streaming': false,
110-
'ai.response.finishReason': 'stop',
111-
'ai.response.model': 'mock-model-id',
112-
'ai.usage.promptTokens': 10,
113-
'ai.usage.completionTokens': 20,
114-
'gen_ai.response.finish_reasons': ['stop'],
115-
'gen_ai.usage.input_tokens': 10,
116-
'gen_ai.usage.output_tokens': 20,
117-
'ai.completion_tokens.used': 20,
118-
'ai.prompt_tokens.used': 10,
119-
'ai.total_tokens.used': 30,
120-
}),
121-
description: 'generateText.doGenerate',
122-
op: 'ai.run.doGenerate',
123-
origin: 'auto.vercelai.otel',
124-
status: 'ok',
91+
description: 'generateText',
92+
op: 'ai.pipeline.generateText',
93+
origin: 'auto.vercelai.otel',
94+
status: 'ok',
95+
}),
96+
expect.objectContaining({
97+
data: expect.objectContaining({
98+
'sentry.origin': 'auto.vercelai.otel',
99+
'sentry.op': 'ai.run.doGenerate',
100+
'operation.name': 'ai.generateText.doGenerate',
101+
'ai.operationId': 'ai.generateText.doGenerate',
102+
'ai.model.provider': 'mock-provider',
103+
'ai.model.id': 'mock-model-id',
104+
'ai.settings.maxRetries': 2,
105+
'gen_ai.system': 'mock-provider',
106+
'gen_ai.request.model': 'mock-model-id',
107+
'ai.pipeline.name': 'generateText.doGenerate',
108+
'ai.model_id': 'mock-model-id',
109+
'ai.streaming': false,
110+
'ai.response.finishReason': 'stop',
111+
'ai.response.model': 'mock-model-id',
112+
'ai.usage.promptTokens': 10,
113+
'ai.usage.completionTokens': 20,
114+
'gen_ai.response.finish_reasons': ['stop'],
115+
'gen_ai.usage.input_tokens': 10,
116+
'gen_ai.usage.output_tokens': 20,
117+
'ai.completion_tokens.used': 20,
118+
'ai.prompt_tokens.used': 10,
119+
'ai.total_tokens.used': 30,
125120
}),
126-
]),
127-
};
121+
description: 'generateText.doGenerate',
122+
op: 'ai.run.doGenerate',
123+
origin: 'auto.vercelai.otel',
124+
status: 'ok',
125+
}),
126+
]),
127+
};
128128

129+
test('creates ai related spans - cjs', async () => {
129130
await createRunner(__dirname, 'scenario.js').expect({ transaction: EXPECTED_TRANSACTION }).start().completed();
130131
});
132+
133+
test('creates ai related spans - esm', async () => {
134+
await createRunner(__dirname, 'scenario.mjs')
135+
.withFlags('--import', join(__dirname, 'instrument.mjs'))
136+
.expect({ transaction: EXPECTED_TRANSACTION })
137+
.start()
138+
.completed();
139+
});
131140
});

packages/node/src/integrations/tracing/vercelai/instrumentation.ts

+20-7
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ export class SentryVercelAiInstrumentation extends InstrumentationBase {
6666
this._callbacks.forEach(callback => callback());
6767
this._callbacks = [];
6868

69-
function generatePatch(name: string) {
69+
function generatePatch(originalMethod: (...args: MethodArgs) => unknown) {
7070
return (...args: MethodArgs) => {
7171
const existingExperimentalTelemetry = args[0].experimental_telemetry || {};
7272
const isEnabled = existingExperimentalTelemetry.isEnabled;
@@ -83,15 +83,28 @@ export class SentryVercelAiInstrumentation extends InstrumentationBase {
8383
}
8484

8585
// @ts-expect-error we know that the method exists
86-
return moduleExports[name].apply(this, args);
86+
return originalMethod.apply(this, args);
8787
};
8888
}
8989

90-
const patchedModuleExports = INSTRUMENTED_METHODS.reduce((acc, curr) => {
91-
acc[curr] = generatePatch(curr);
92-
return acc;
93-
}, {} as PatchedModuleExports);
90+
// Is this an ESM module?
91+
// https://tc39.es/ecma262/#sec-module-namespace-objects
92+
if (Object.prototype.toString.call(moduleExports) === '[object Module]') {
93+
// In ESM we take the usual route and just replace the exports we want to instrument
94+
for (const method of INSTRUMENTED_METHODS) {
95+
moduleExports[method] = generatePatch(moduleExports[method]);
96+
}
9497

95-
return { ...moduleExports, ...patchedModuleExports };
98+
return moduleExports;
99+
} else {
100+
// In CJS we can't replace the exports in the original module because they
101+
// don't have setters, so we create a new object with the same properties
102+
const patchedModuleExports = INSTRUMENTED_METHODS.reduce((acc, curr) => {
103+
acc[curr] = generatePatch(moduleExports[curr]);
104+
return acc;
105+
}, {} as PatchedModuleExports);
106+
107+
return { ...moduleExports, ...patchedModuleExports };
108+
}
96109
}
97110
}

0 commit comments

Comments
 (0)