diff --git a/dev-packages/node-integration-tests/suites/tracing/ai/instrument.mjs b/dev-packages/node-integration-tests/suites/tracing/ai/instrument.mjs new file mode 100644 index 000000000000..46a27dd03b74 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/ai/instrument.mjs @@ -0,0 +1,9 @@ +import * as Sentry from '@sentry/node'; +import { loggingTransport } from '@sentry-internal/node-integration-tests'; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + release: '1.0', + tracesSampleRate: 1.0, + transport: loggingTransport, +}); diff --git a/dev-packages/node-integration-tests/suites/tracing/ai/scenario.mjs b/dev-packages/node-integration-tests/suites/tracing/ai/scenario.mjs new file mode 100644 index 000000000000..9df798eed59e --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/ai/scenario.mjs @@ -0,0 +1,49 @@ +import * as Sentry from '@sentry/node'; +import { generateText } from 'ai'; +import { MockLanguageModelV1 } from 'ai/test'; + +async function run() { + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { + await generateText({ + model: new MockLanguageModelV1({ + doGenerate: async () => ({ + rawCall: { rawPrompt: null, rawSettings: {} }, + finishReason: 'stop', + usage: { promptTokens: 10, completionTokens: 20 }, + text: 'First span here!', + }), + }), + prompt: 'Where is the first span?', + }); + + // This span should have input and output prompts attached because telemetry is explicitly enabled. + await generateText({ + experimental_telemetry: { isEnabled: true }, + model: new MockLanguageModelV1({ + doGenerate: async () => ({ + rawCall: { rawPrompt: null, rawSettings: {} }, + finishReason: 'stop', + usage: { promptTokens: 10, completionTokens: 20 }, + text: 'Second span here!', + }), + }), + prompt: 'Where is the second span?', + }); + + // This span should not be captured because we've disabled telemetry + await generateText({ + experimental_telemetry: { isEnabled: false }, + model: new MockLanguageModelV1({ + doGenerate: async () => ({ + rawCall: { rawPrompt: null, rawSettings: {} }, + finishReason: 'stop', + usage: { promptTokens: 10, completionTokens: 20 }, + text: 'Third span here!', + }), + }), + prompt: 'Where is the third span?', + }); + }); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/ai/test.ts b/dev-packages/node-integration-tests/suites/tracing/ai/test.ts index 6263d2d26a05..bb380febab78 100644 --- a/dev-packages/node-integration-tests/suites/tracing/ai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/ai/test.ts @@ -1,3 +1,4 @@ +import { join } from 'node:path'; import { afterAll, describe, expect, test } from 'vitest'; import { cleanupChildProcesses, createRunner } from '../../../utils/runner'; @@ -7,125 +8,133 @@ describe('ai', () => { cleanupChildProcesses(); }); - test('creates ai related spans', async () => { - const EXPECTED_TRANSACTION = { - transaction: 'main', - spans: expect.arrayContaining([ - expect.objectContaining({ - data: expect.objectContaining({ - 'ai.completion_tokens.used': 20, - 'ai.model.id': 'mock-model-id', - 'ai.model.provider': 'mock-provider', - 'ai.model_id': 'mock-model-id', - 'ai.operationId': 'ai.generateText', - 'ai.pipeline.name': 'generateText', - 'ai.prompt_tokens.used': 10, - 'ai.response.finishReason': 'stop', - 'ai.settings.maxRetries': 2, - 'ai.settings.maxSteps': 1, - 'ai.streaming': false, - 'ai.total_tokens.used': 30, - 'ai.usage.completionTokens': 20, - 'ai.usage.promptTokens': 10, - 'operation.name': 'ai.generateText', - 'sentry.op': 'ai.pipeline.generateText', - 'sentry.origin': 'auto.vercelai.otel', - }), - description: 'generateText', - op: 'ai.pipeline.generateText', - origin: 'auto.vercelai.otel', - status: 'ok', + const EXPECTED_TRANSACTION = { + transaction: 'main', + spans: expect.arrayContaining([ + expect.objectContaining({ + data: expect.objectContaining({ + 'ai.completion_tokens.used': 20, + 'ai.model.id': 'mock-model-id', + 'ai.model.provider': 'mock-provider', + 'ai.model_id': 'mock-model-id', + 'ai.operationId': 'ai.generateText', + 'ai.pipeline.name': 'generateText', + 'ai.prompt_tokens.used': 10, + 'ai.response.finishReason': 'stop', + 'ai.settings.maxRetries': 2, + 'ai.settings.maxSteps': 1, + 'ai.streaming': false, + 'ai.total_tokens.used': 30, + 'ai.usage.completionTokens': 20, + 'ai.usage.promptTokens': 10, + 'operation.name': 'ai.generateText', + 'sentry.op': 'ai.pipeline.generateText', + 'sentry.origin': 'auto.vercelai.otel', }), - expect.objectContaining({ - data: expect.objectContaining({ - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'ai.run.doGenerate', - 'operation.name': 'ai.generateText.doGenerate', - 'ai.operationId': 'ai.generateText.doGenerate', - 'ai.model.provider': 'mock-provider', - 'ai.model.id': 'mock-model-id', - 'ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', - 'gen_ai.request.model': 'mock-model-id', - 'ai.pipeline.name': 'generateText.doGenerate', - 'ai.model_id': 'mock-model-id', - 'ai.streaming': false, - 'ai.response.finishReason': 'stop', - 'ai.response.model': 'mock-model-id', - 'ai.usage.promptTokens': 10, - 'ai.usage.completionTokens': 20, - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'ai.completion_tokens.used': 20, - 'ai.prompt_tokens.used': 10, - 'ai.total_tokens.used': 30, - }), - description: 'generateText.doGenerate', - op: 'ai.run.doGenerate', - origin: 'auto.vercelai.otel', - status: 'ok', + description: 'generateText', + op: 'ai.pipeline.generateText', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + expect.objectContaining({ + data: expect.objectContaining({ + 'sentry.origin': 'auto.vercelai.otel', + 'sentry.op': 'ai.run.doGenerate', + 'operation.name': 'ai.generateText.doGenerate', + 'ai.operationId': 'ai.generateText.doGenerate', + 'ai.model.provider': 'mock-provider', + 'ai.model.id': 'mock-model-id', + 'ai.settings.maxRetries': 2, + 'gen_ai.system': 'mock-provider', + 'gen_ai.request.model': 'mock-model-id', + 'ai.pipeline.name': 'generateText.doGenerate', + 'ai.model_id': 'mock-model-id', + 'ai.streaming': false, + 'ai.response.finishReason': 'stop', + 'ai.response.model': 'mock-model-id', + 'ai.usage.promptTokens': 10, + 'ai.usage.completionTokens': 20, + 'gen_ai.response.finish_reasons': ['stop'], + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 20, + 'ai.completion_tokens.used': 20, + 'ai.prompt_tokens.used': 10, + 'ai.total_tokens.used': 30, }), - expect.objectContaining({ - data: expect.objectContaining({ - 'ai.completion_tokens.used': 20, - 'ai.model.id': 'mock-model-id', - 'ai.model.provider': 'mock-provider', - 'ai.model_id': 'mock-model-id', - 'ai.prompt': '{"prompt":"Where is the second span?"}', - 'ai.operationId': 'ai.generateText', - 'ai.pipeline.name': 'generateText', - 'ai.prompt_tokens.used': 10, - 'ai.response.finishReason': 'stop', - 'ai.input_messages': '{"prompt":"Where is the second span?"}', - 'ai.settings.maxRetries': 2, - 'ai.settings.maxSteps': 1, - 'ai.streaming': false, - 'ai.total_tokens.used': 30, - 'ai.usage.completionTokens': 20, - 'ai.usage.promptTokens': 10, - 'operation.name': 'ai.generateText', - 'sentry.op': 'ai.pipeline.generateText', - 'sentry.origin': 'auto.vercelai.otel', - }), - description: 'generateText', - op: 'ai.pipeline.generateText', - origin: 'auto.vercelai.otel', - status: 'ok', + description: 'generateText.doGenerate', + op: 'ai.run.doGenerate', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + expect.objectContaining({ + data: expect.objectContaining({ + 'ai.completion_tokens.used': 20, + 'ai.model.id': 'mock-model-id', + 'ai.model.provider': 'mock-provider', + 'ai.model_id': 'mock-model-id', + 'ai.prompt': '{"prompt":"Where is the second span?"}', + 'ai.operationId': 'ai.generateText', + 'ai.pipeline.name': 'generateText', + 'ai.prompt_tokens.used': 10, + 'ai.response.finishReason': 'stop', + 'ai.input_messages': '{"prompt":"Where is the second span?"}', + 'ai.settings.maxRetries': 2, + 'ai.settings.maxSteps': 1, + 'ai.streaming': false, + 'ai.total_tokens.used': 30, + 'ai.usage.completionTokens': 20, + 'ai.usage.promptTokens': 10, + 'operation.name': 'ai.generateText', + 'sentry.op': 'ai.pipeline.generateText', + 'sentry.origin': 'auto.vercelai.otel', }), - expect.objectContaining({ - data: expect.objectContaining({ - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'ai.run.doGenerate', - 'operation.name': 'ai.generateText.doGenerate', - 'ai.operationId': 'ai.generateText.doGenerate', - 'ai.model.provider': 'mock-provider', - 'ai.model.id': 'mock-model-id', - 'ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', - 'gen_ai.request.model': 'mock-model-id', - 'ai.pipeline.name': 'generateText.doGenerate', - 'ai.model_id': 'mock-model-id', - 'ai.streaming': false, - 'ai.response.finishReason': 'stop', - 'ai.response.model': 'mock-model-id', - 'ai.usage.promptTokens': 10, - 'ai.usage.completionTokens': 20, - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'ai.completion_tokens.used': 20, - 'ai.prompt_tokens.used': 10, - 'ai.total_tokens.used': 30, - }), - description: 'generateText.doGenerate', - op: 'ai.run.doGenerate', - origin: 'auto.vercelai.otel', - status: 'ok', + description: 'generateText', + op: 'ai.pipeline.generateText', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + expect.objectContaining({ + data: expect.objectContaining({ + 'sentry.origin': 'auto.vercelai.otel', + 'sentry.op': 'ai.run.doGenerate', + 'operation.name': 'ai.generateText.doGenerate', + 'ai.operationId': 'ai.generateText.doGenerate', + 'ai.model.provider': 'mock-provider', + 'ai.model.id': 'mock-model-id', + 'ai.settings.maxRetries': 2, + 'gen_ai.system': 'mock-provider', + 'gen_ai.request.model': 'mock-model-id', + 'ai.pipeline.name': 'generateText.doGenerate', + 'ai.model_id': 'mock-model-id', + 'ai.streaming': false, + 'ai.response.finishReason': 'stop', + 'ai.response.model': 'mock-model-id', + 'ai.usage.promptTokens': 10, + 'ai.usage.completionTokens': 20, + 'gen_ai.response.finish_reasons': ['stop'], + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 20, + 'ai.completion_tokens.used': 20, + 'ai.prompt_tokens.used': 10, + 'ai.total_tokens.used': 30, }), - ]), - }; + description: 'generateText.doGenerate', + op: 'ai.run.doGenerate', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + ]), + }; + test('creates ai related spans - cjs', async () => { await createRunner(__dirname, 'scenario.js').expect({ transaction: EXPECTED_TRANSACTION }).start().completed(); }); + + test('creates ai related spans - esm', async () => { + await createRunner(__dirname, 'scenario.mjs') + .withFlags('--import', join(__dirname, 'instrument.mjs')) + .expect({ transaction: EXPECTED_TRANSACTION }) + .start() + .completed(); + }); }); diff --git a/packages/node/src/integrations/tracing/vercelai/instrumentation.ts b/packages/node/src/integrations/tracing/vercelai/instrumentation.ts index 1b8a60db9aca..e4f8a5ba25ae 100644 --- a/packages/node/src/integrations/tracing/vercelai/instrumentation.ts +++ b/packages/node/src/integrations/tracing/vercelai/instrumentation.ts @@ -66,7 +66,7 @@ export class SentryVercelAiInstrumentation extends InstrumentationBase { this._callbacks.forEach(callback => callback()); this._callbacks = []; - function generatePatch(name: string) { + function generatePatch(originalMethod: (...args: MethodArgs) => unknown) { return (...args: MethodArgs) => { const existingExperimentalTelemetry = args[0].experimental_telemetry || {}; const isEnabled = existingExperimentalTelemetry.isEnabled; @@ -83,15 +83,28 @@ export class SentryVercelAiInstrumentation extends InstrumentationBase { } // @ts-expect-error we know that the method exists - return moduleExports[name].apply(this, args); + return originalMethod.apply(this, args); }; } - const patchedModuleExports = INSTRUMENTED_METHODS.reduce((acc, curr) => { - acc[curr] = generatePatch(curr); - return acc; - }, {} as PatchedModuleExports); + // Is this an ESM module? + // https://tc39.es/ecma262/#sec-module-namespace-objects + if (Object.prototype.toString.call(moduleExports) === '[object Module]') { + // In ESM we take the usual route and just replace the exports we want to instrument + for (const method of INSTRUMENTED_METHODS) { + moduleExports[method] = generatePatch(moduleExports[method]); + } - return { ...moduleExports, ...patchedModuleExports }; + return moduleExports; + } else { + // In CJS we can't replace the exports in the original module because they + // don't have setters, so we create a new object with the same properties + const patchedModuleExports = INSTRUMENTED_METHODS.reduce((acc, curr) => { + acc[curr] = generatePatch(moduleExports[curr]); + return acc; + }, {} as PatchedModuleExports); + + return { ...moduleExports, ...patchedModuleExports }; + } } }