Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import * as Sentry from '@sentry/node';
import { generateText } from 'ai';
import { MockLanguageModelV1 } from 'ai/test';

async function run() {
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
const largeContent1 = 'A'.repeat(15000); // ~15KB
const largeContent2 = 'B'.repeat(15000); // ~15KB
const largeContent3 = 'C'.repeat(25000) + 'D'.repeat(25000); // ~50KB (will be truncated)

// Test 1: Messages array with large last message that gets truncated
// Only the last message should be kept, and it should be truncated to only Cs
await generateText({
experimental_telemetry: { isEnabled: true },
model: new MockLanguageModelV1({
doGenerate: async () => ({
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: 'stop',
usage: { promptTokens: 10, completionTokens: 5 },
text: 'Response to truncated messages',
}),
}),
messages: [
{ role: 'user', content: largeContent1 },
{ role: 'assistant', content: largeContent2 },
{ role: 'user', content: largeContent3 },
],
});

// Test 2: Messages array where last message is small and kept intact
const smallContent = 'This is a small message that fits within the limit';
await generateText({
experimental_telemetry: { isEnabled: true },
model: new MockLanguageModelV1({
doGenerate: async () => ({
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: 'stop',
usage: { promptTokens: 10, completionTokens: 5 },
text: 'Response to small message',
}),
}),
messages: [
{ role: 'user', content: largeContent1 },
{ role: 'assistant', content: largeContent2 },
{ role: 'user', content: smallContent },
],
});
});
}

run();
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import {
GEN_AI_INPUT_MESSAGES_ATTRIBUTE,
GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE,
GEN_AI_OPERATION_NAME_ATTRIBUTE,
GEN_AI_PROMPT_ATTRIBUTE,
GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE,
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE,
Expand Down Expand Up @@ -90,7 +89,6 @@ describe('Vercel AI integration', () => {
// Third span - explicit telemetry enabled, should record inputs/outputs regardless of sendDefaultPii
expect.objectContaining({
data: {
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}',
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]',
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
Expand All @@ -105,7 +103,7 @@ describe('Vercel AI integration', () => {
'vercel.ai.model.provider': 'mock-provider',
'vercel.ai.operationId': 'ai.generateText',
'vercel.ai.pipeline.name': 'generateText',
'vercel.ai.prompt': '{"prompt":"Where is the second span?"}',
'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]',
'vercel.ai.response.finishReason': 'stop',
'vercel.ai.settings.maxRetries': 2,
'vercel.ai.settings.maxSteps': 1,
Expand Down Expand Up @@ -230,7 +228,6 @@ describe('Vercel AI integration', () => {
// First span - no telemetry config, should enable telemetry AND record inputs/outputs when sendDefaultPii: true
expect.objectContaining({
data: {
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the first span?"}',
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]',
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
Expand All @@ -245,7 +242,7 @@ describe('Vercel AI integration', () => {
'vercel.ai.model.provider': 'mock-provider',
'vercel.ai.operationId': 'ai.generateText',
'vercel.ai.pipeline.name': 'generateText',
'vercel.ai.prompt': '{"prompt":"Where is the first span?"}',
'vercel.ai.prompt': '[{"role":"user","content":"Where is the first span?"}]',
'vercel.ai.response.finishReason': 'stop',
'vercel.ai.settings.maxRetries': 2,
'vercel.ai.settings.maxSteps': 1,
Expand Down Expand Up @@ -303,7 +300,6 @@ describe('Vercel AI integration', () => {
// Third span - explicitly enabled telemetry, should record inputs/outputs regardless of sendDefaultPii
expect.objectContaining({
data: {
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}',
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]',
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
Expand All @@ -318,7 +314,7 @@ describe('Vercel AI integration', () => {
'vercel.ai.model.provider': 'mock-provider',
'vercel.ai.operationId': 'ai.generateText',
'vercel.ai.pipeline.name': 'generateText',
'vercel.ai.prompt': '{"prompt":"Where is the second span?"}',
'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]',
'vercel.ai.response.finishReason': 'stop',
'vercel.ai.settings.maxRetries': 2,
'vercel.ai.settings.maxSteps': 1,
Expand Down Expand Up @@ -375,7 +371,6 @@ describe('Vercel AI integration', () => {
// Fifth span - tool call generateText span (should include prompts when sendDefaultPii: true)
expect.objectContaining({
data: {
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"What is the weather in San Francisco?"}',
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]',
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
Expand All @@ -391,7 +386,7 @@ describe('Vercel AI integration', () => {
'vercel.ai.model.provider': 'mock-provider',
'vercel.ai.operationId': 'ai.generateText',
'vercel.ai.pipeline.name': 'generateText',
'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}',
'vercel.ai.prompt': '[{"role":"user","content":"What is the weather in San Francisco?"}]',
'vercel.ai.response.finishReason': 'tool-calls',
'vercel.ai.settings.maxRetries': 2,
'vercel.ai.settings.maxSteps': 1,
Expand Down Expand Up @@ -796,4 +791,43 @@ describe('Vercel AI integration', () => {
});
},
);

createEsmAndCjsTests(
__dirname,
'scenario-message-truncation.mjs',
'instrument-with-pii.mjs',
(createRunner, test) => {
test('truncates messages when they exceed byte limit', async () => {
await createRunner()
.ignore('event')
.expect({
transaction: {
transaction: 'main',
spans: expect.arrayContaining([
// First call: Last message truncated (only C's remain, D's are cropped)
expect.objectContaining({
data: expect.objectContaining({
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3,
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[.*"(?:text|content)":"C+".*\]$/),
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Response to truncated messages',
}),
}),
// Second call: Last message is small and kept intact
expect.objectContaining({
data: expect.objectContaining({
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3,
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringContaining(
'This is a small message that fits within the limit',
),
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Response to small message',
}),
}),
]),
},
})
.start()
.completed();
});
},
);
});
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import {
GEN_AI_INPUT_MESSAGES_ATTRIBUTE,
GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE,
GEN_AI_OPERATION_NAME_ATTRIBUTE,
GEN_AI_PROMPT_ATTRIBUTE,
GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE,
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE,
Expand Down Expand Up @@ -92,12 +91,11 @@ describe('Vercel AI integration (V5)', () => {
'vercel.ai.model.provider': 'mock-provider',
'vercel.ai.operationId': 'ai.generateText',
'vercel.ai.pipeline.name': 'generateText',
'vercel.ai.prompt': '{"prompt":"Where is the second span?"}',
'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]',
'vercel.ai.response.finishReason': 'stop',
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String),
'vercel.ai.settings.maxRetries': 2,
'vercel.ai.streaming': false,
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}',
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]',
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
Expand Down Expand Up @@ -229,14 +227,13 @@ describe('Vercel AI integration (V5)', () => {
'vercel.ai.model.provider': 'mock-provider',
'vercel.ai.operationId': 'ai.generateText',
'vercel.ai.pipeline.name': 'generateText',
'vercel.ai.prompt': '{"prompt":"Where is the first span?"}',
'vercel.ai.prompt': '[{"role":"user","content":"Where is the first span?"}]',
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]',
'vercel.ai.response.finishReason': 'stop',
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!',
'vercel.ai.settings.maxRetries': 2,
'vercel.ai.streaming': false,
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the first span?"}',
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
Expand Down Expand Up @@ -290,14 +287,13 @@ describe('Vercel AI integration (V5)', () => {
'vercel.ai.model.provider': 'mock-provider',
'vercel.ai.operationId': 'ai.generateText',
'vercel.ai.pipeline.name': 'generateText',
'vercel.ai.prompt': '{"prompt":"Where is the second span?"}',
'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]',
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]',
'vercel.ai.response.finishReason': 'stop',
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String),
'vercel.ai.settings.maxRetries': 2,
'vercel.ai.streaming': false,
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}',
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
Expand Down Expand Up @@ -350,14 +346,13 @@ describe('Vercel AI integration (V5)', () => {
'vercel.ai.model.provider': 'mock-provider',
'vercel.ai.operationId': 'ai.generateText',
'vercel.ai.pipeline.name': 'generateText',
'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}',
'vercel.ai.prompt': '[{"role":"user","content":"What is the weather in San Francisco?"}]',
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]',
'vercel.ai.response.finishReason': 'tool-calls',
[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String),
'vercel.ai.settings.maxRetries': 2,
'vercel.ai.streaming': false,
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"What is the weather in San Francisco?"}',
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import { afterAll, describe, expect } from 'vitest';
import {
GEN_AI_INPUT_MESSAGES_ATTRIBUTE,
GEN_AI_OPERATION_NAME_ATTRIBUTE,
GEN_AI_PROMPT_ATTRIBUTE,
GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE,
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE,
Expand Down Expand Up @@ -93,13 +92,12 @@ describe('Vercel AI integration (V6)', () => {
'vercel.ai.model.provider': 'mock-provider',
'vercel.ai.operationId': 'ai.generateText',
'vercel.ai.pipeline.name': 'generateText',
'vercel.ai.prompt': '{"prompt":"Where is the second span?"}',
'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]',
'vercel.ai.request.headers.user-agent': expect.any(String),
'vercel.ai.response.finishReason': 'stop',
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String),
'vercel.ai.settings.maxRetries': 2,
'vercel.ai.streaming': false,
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}',
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]',
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
Expand Down Expand Up @@ -232,14 +230,13 @@ describe('Vercel AI integration (V6)', () => {
'vercel.ai.model.provider': 'mock-provider',
'vercel.ai.operationId': 'ai.generateText',
'vercel.ai.pipeline.name': 'generateText',
'vercel.ai.prompt': '{"prompt":"Where is the first span?"}',
'vercel.ai.prompt': '[{"role":"user","content":"Where is the first span?"}]',
'vercel.ai.request.headers.user-agent': expect.any(String),
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]',
'vercel.ai.response.finishReason': 'stop',
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!',
'vercel.ai.settings.maxRetries': 2,
'vercel.ai.streaming': false,
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the first span?"}',
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
Expand Down Expand Up @@ -293,14 +290,13 @@ describe('Vercel AI integration (V6)', () => {
'vercel.ai.model.provider': 'mock-provider',
'vercel.ai.operationId': 'ai.generateText',
'vercel.ai.pipeline.name': 'generateText',
'vercel.ai.prompt': '{"prompt":"Where is the second span?"}',
'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]',
'vercel.ai.request.headers.user-agent': expect.any(String),
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]',
'vercel.ai.response.finishReason': 'stop',
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String),
'vercel.ai.settings.maxRetries': 2,
'vercel.ai.streaming': false,
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}',
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
Expand Down Expand Up @@ -353,14 +349,13 @@ describe('Vercel AI integration (V6)', () => {
'vercel.ai.model.provider': 'mock-provider',
'vercel.ai.operationId': 'ai.generateText',
'vercel.ai.pipeline.name': 'generateText',
'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}',
'vercel.ai.prompt': '[{"role":"user","content":"What is the weather in San Francisco?"}]',
'vercel.ai.request.headers.user-agent': expect.any(String),
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]',
'vercel.ai.response.finishReason': 'tool-calls',
[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String),
'vercel.ai.settings.maxRetries': 2,
'vercel.ai.streaming': false,
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"What is the weather in San Francisco?"}',
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
Expand Down
31 changes: 22 additions & 9 deletions packages/core/src/tracing/vercel-ai/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,16 @@ export function convertPromptToMessages(prompt: string): { role: string; content
try {
const p = JSON.parse(prompt);
if (!!p && typeof p === 'object') {
// Handle messages array format: { messages: [...] }
const { messages } = p as { messages?: unknown };
if (Array.isArray(messages)) {
return messages.filter(
(m: unknown): m is { role: string; content: string } =>
!!m && typeof m === 'object' && 'role' in m && 'content' in m,
);
}

// Handle prompt/system string format: { prompt: "...", system: "..." }
const { prompt, system } = p;
if (typeof prompt === 'string' || typeof system === 'string') {
const messages: { role: string; content: string }[] = [];
Expand All @@ -133,16 +143,13 @@ export function convertPromptToMessages(prompt: string): { role: string; content
* invoke_agent op
*/
export function requestMessagesFromPrompt(span: Span, attributes: SpanAttributes): void {
if (attributes[AI_PROMPT_ATTRIBUTE]) {
const truncatedPrompt = getTruncatedJsonString(attributes[AI_PROMPT_ATTRIBUTE] as string | string[]);
span.setAttribute('gen_ai.prompt', truncatedPrompt);
}
const prompt = attributes[AI_PROMPT_ATTRIBUTE];
if (
typeof prompt === 'string' &&
typeof attributes[AI_PROMPT_ATTRIBUTE] === 'string' &&
!attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE] &&
!attributes[AI_PROMPT_MESSAGES_ATTRIBUTE]
) {
// No messages array is present, so we need to convert the prompt to the proper messages format
const prompt = attributes[AI_PROMPT_ATTRIBUTE];
const messages = convertPromptToMessages(prompt);
if (messages.length) {
const { systemInstructions, filteredMessages } = extractSystemInstructions(messages);
Expand All @@ -152,12 +159,16 @@ export function requestMessagesFromPrompt(span: Span, attributes: SpanAttributes
}

const filteredLength = Array.isArray(filteredMessages) ? filteredMessages.length : 0;
const truncatedMessages = getTruncatedJsonString(filteredMessages);

span.setAttributes({
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: getTruncatedJsonString(filteredMessages),
[AI_PROMPT_ATTRIBUTE]: truncatedMessages,
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: truncatedMessages,
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: filteredLength,
});
}
} else if (typeof attributes[AI_PROMPT_MESSAGES_ATTRIBUTE] === 'string') {
// In this case we already get a properly formatted messages array, this is the preferred way to get the messages
try {
const messages = JSON.parse(attributes[AI_PROMPT_MESSAGES_ATTRIBUTE]);
if (Array.isArray(messages)) {
Expand All @@ -168,9 +179,11 @@ export function requestMessagesFromPrompt(span: Span, attributes: SpanAttributes
}

const filteredLength = Array.isArray(filteredMessages) ? filteredMessages.length : 0;
const truncatedMessages = getTruncatedJsonString(filteredMessages);

span.setAttributes({
[AI_PROMPT_MESSAGES_ATTRIBUTE]: undefined,
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: getTruncatedJsonString(filteredMessages),
[AI_PROMPT_MESSAGES_ATTRIBUTE]: truncatedMessages,
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: truncatedMessages,
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: filteredLength,
});
}
Expand Down
Loading
Loading