From ee791f37c3d5adef1a159e79e3a36f31d99e7a7d Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Fri, 2 May 2025 10:17:26 +0200 Subject: [PATCH 1/7] 1 --- .../generate-text/run-tools-transformation.ts | 4 +- .../prompt/convert-to-core-messages.test.ts | 6 +- .../core/prompt/convert-to-core-messages.ts | 4 +- .../convert-to-language-model-prompt.ts | 6 +- .../ai/core/prompt/detect-prompt-type.test.ts | 6 +- packages/ai/core/prompt/index.ts | 12 +- packages/ai/core/prompt/message.ts | 108 ++++++++++++++---- packages/ai/core/prompt/prompt.ts | 4 +- packages/ai/core/prompt/standardize-prompt.ts | 10 +- packages/ai/core/tool/tool.ts | 4 +- 10 files changed, 118 insertions(+), 46 deletions(-) diff --git a/packages/ai/core/generate-text/run-tools-transformation.ts b/packages/ai/core/generate-text/run-tools-transformation.ts index 03bebac6cf29..d36899b046b2 100644 --- a/packages/ai/core/generate-text/run-tools-transformation.ts +++ b/packages/ai/core/generate-text/run-tools-transformation.ts @@ -4,7 +4,7 @@ import { } from '@ai-sdk/provider'; import { Tracer } from '@opentelemetry/api'; import { ToolExecutionError } from '../../errors'; -import { CoreMessage } from '../prompt/message'; +import { ModelMessage } from '../prompt/message'; import { assembleOperationName } from '../telemetry/assemble-operation-name'; import { recordSpan } from '../telemetry/record-span'; import { selectTelemetryAttributes } from '../telemetry/select-telemetry-attributes'; @@ -76,7 +76,7 @@ export function runToolsTransformation({ tracer: Tracer; telemetry: TelemetrySettings | undefined; system: string | undefined; - messages: CoreMessage[]; + messages: ModelMessage[]; abortSignal: AbortSignal | undefined; repairToolCall: ToolCallRepairFunction | undefined; }): ReadableStream> { diff --git a/packages/ai/core/prompt/convert-to-core-messages.test.ts b/packages/ai/core/prompt/convert-to-core-messages.test.ts index 1fe670dae5dd..257fe41fe83a 100644 --- a/packages/ai/core/prompt/convert-to-core-messages.test.ts +++ b/packages/ai/core/prompt/convert-to-core-messages.test.ts @@ -1,7 +1,7 @@ import { z } from 'zod'; import { tool } from '../tool/tool'; import { convertToCoreMessages } from './convert-to-core-messages'; -import { CoreMessage } from './message'; +import { ModelMessage } from './message'; describe('convertToCoreMessages', () => { describe('system message', () => { @@ -159,7 +159,7 @@ describe('convertToCoreMessages', () => { { type: 'text', text: 'Hello, human!' }, ], }, - ] satisfies CoreMessage[]); + ] satisfies ModelMessage[]); }); it('should convert an assistant message with file parts', () => { @@ -188,7 +188,7 @@ describe('convertToCoreMessages', () => { }, ], }, - ] satisfies CoreMessage[]); + ] satisfies ModelMessage[]); }); it('should handle assistant message with tool invocations', () => { diff --git a/packages/ai/core/prompt/convert-to-core-messages.ts b/packages/ai/core/prompt/convert-to-core-messages.ts index 83499362505a..ad293cd7f575 100644 --- a/packages/ai/core/prompt/convert-to-core-messages.ts +++ b/packages/ai/core/prompt/convert-to-core-messages.ts @@ -7,7 +7,7 @@ import { UIMessage, } from '../types'; import { ToolResultPart } from './content-part'; -import { AssistantContent, CoreMessage } from './message'; +import { AssistantContent, ModelMessage } from './message'; import { MessageConversionError } from './message-conversion-error'; /** @@ -19,7 +19,7 @@ export function convertToCoreMessages( options?: { tools?: TOOLS }, ) { const tools = options?.tools ?? ({} as TOOLS); - const coreMessages: CoreMessage[] = []; + const coreMessages: ModelMessage[] = []; for (let i = 0; i < messages.length; i++) { const message = messages[i]; diff --git a/packages/ai/core/prompt/convert-to-language-model-prompt.ts b/packages/ai/core/prompt/convert-to-language-model-prompt.ts index e9b84a585e3e..db808017fae7 100644 --- a/packages/ai/core/prompt/convert-to-language-model-prompt.ts +++ b/packages/ai/core/prompt/convert-to-language-model-prompt.ts @@ -5,7 +5,7 @@ import { LanguageModelV2TextPart, } from '@ai-sdk/provider'; import { download } from '../../util/download'; -import { CoreMessage } from '../prompt/message'; +import { ModelMessage } from '../prompt/message'; import { detectMediaType, imageMediaTypeSignatures, @@ -52,7 +52,7 @@ export async function convertToLanguageModelPrompt({ * available if the model does not support URLs, null otherwise. */ export function convertToLanguageModelMessage( - message: CoreMessage, + message: ModelMessage, downloadedAssets: Record< string, { mediaType: string | undefined; data: Uint8Array } @@ -175,7 +175,7 @@ export function convertToLanguageModelMessage( * Downloads images and files from URLs in the messages. */ async function downloadAssets( - messages: CoreMessage[], + messages: ModelMessage[], downloadImplementation: typeof download, supportedUrls: Record, ): Promise< diff --git a/packages/ai/core/prompt/detect-prompt-type.test.ts b/packages/ai/core/prompt/detect-prompt-type.test.ts index 36d1614c78c3..469dd497c0d9 100644 --- a/packages/ai/core/prompt/detect-prompt-type.test.ts +++ b/packages/ai/core/prompt/detect-prompt-type.test.ts @@ -1,6 +1,6 @@ import { UIMessage } from '../types'; import { detectPromptType } from './detect-prompt-type'; -import type { CoreMessage } from './message'; +import type { ModelMessage } from './message'; it('should return "other" for invalid inputs', () => { expect(detectPromptType(null as any)).toBe('other'); @@ -27,7 +27,7 @@ it('should detect UI messages with file parts', () => { }); it('should detect core messages with array content', () => { - const messages: CoreMessage[] = [ + const messages: ModelMessage[] = [ { role: 'user', content: [{ type: 'text', text: 'Hello' }], @@ -37,7 +37,7 @@ it('should detect core messages with array content', () => { }); it('should detect core messages with providerOptions', () => { - const messages: CoreMessage[] = [ + const messages: ModelMessage[] = [ { role: 'system', content: 'System prompt', diff --git a/packages/ai/core/prompt/index.ts b/packages/ai/core/prompt/index.ts index 760c76d258e7..a72ca197ed0e 100644 --- a/packages/ai/core/prompt/index.ts +++ b/packages/ai/core/prompt/index.ts @@ -1,5 +1,6 @@ export { appendClientMessage } from './append-client-message'; export { appendResponseMessages } from './append-response-messages'; +export type { CallSettings } from './call-settings'; export type { FilePart, ImagePart, @@ -10,21 +11,30 @@ export type { export { convertToCoreMessages } from './convert-to-core-messages'; export type { DataContent } from './data-content'; export { + assistantModelMessageSchema, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, + modelMessageSchema, + systemModelMessageSchema, + toolModelMessageSchema, + userModelMessageSchema, } from './message'; export type { AssistantContent, + AssistantModelMessage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, + ModelMessage, + SystemModelMessage, ToolContent, + ToolModelMessage, UserContent, + UserModelMessage, } from './message'; export type { Prompt } from './prompt'; -export type { CallSettings } from './call-settings'; diff --git a/packages/ai/core/prompt/message.ts b/packages/ai/core/prompt/message.ts index 53a994d47038..8e3208d75f13 100644 --- a/packages/ai/core/prompt/message.ts +++ b/packages/ai/core/prompt/message.ts @@ -25,7 +25,7 @@ import { to increase the resilience against prompt injection attacks, and because not all providers support several system messages. */ -export type CoreSystemMessage = { +export type SystemModelMessage = { role: 'system'; content: string; @@ -37,16 +37,30 @@ functionality that can be fully encapsulated in the provider. providerOptions?: ProviderOptions; }; -export const coreSystemMessageSchema: z.ZodType = z.object({ - role: z.literal('system'), - content: z.string(), - providerOptions: providerMetadataSchema.optional(), -}); +/** +@deprecated Use `SystemModelMessage` instead. + */ +// TODO remove in AI SDK 6 +export type CoreSystemMessage = SystemModelMessage; + +export const systemModelMessageSchema: z.ZodType = z.object( + { + role: z.literal('system'), + content: z.string(), + providerOptions: providerMetadataSchema.optional(), + }, +); + +/** +@deprecated Use `systemModelMessageSchema` instead. + */ +// TODO remove in AI SDK 6 +export const coreSystemMessageSchema = systemModelMessageSchema; /** A user message. It can contain text or a combination of text and images. */ -export type CoreUserMessage = { +export type UserModelMessage = { role: 'user'; content: UserContent; @@ -58,7 +72,13 @@ functionality that can be fully encapsulated in the provider. providerOptions?: ProviderOptions; }; -export const coreUserMessageSchema: z.ZodType = z.object({ +/** +@deprecated Use `UserModelMessage` instead. + */ +// TODO remove in AI SDK 6 +export type CoreUserMessage = UserModelMessage; + +export const userModelMessageSchema: z.ZodType = z.object({ role: z.literal('user'), content: z.union([ z.string(), @@ -67,6 +87,12 @@ export const coreUserMessageSchema: z.ZodType = z.object({ providerOptions: providerMetadataSchema.optional(), }); +/** +@deprecated Use `userModelMessageSchema` instead. + */ +// TODO remove in AI SDK 6 +export const coreUserMessageSchema = userModelMessageSchema; + /** Content of a user message. It can be a string or an array of text and image parts. */ @@ -75,7 +101,7 @@ export type UserContent = string | Array; /** An assistant message. It can contain text, tool calls, or a combination of text and tool calls. */ -export type CoreAssistantMessage = { +export type AssistantModelMessage = { role: 'assistant'; content: AssistantContent; @@ -87,7 +113,13 @@ functionality that can be fully encapsulated in the provider. providerOptions?: ProviderOptions; }; -export const coreAssistantMessageSchema: z.ZodType = +/** +@deprecated Use `AssistantModelMessage` instead. + */ +// TODO remove in AI SDK 6 +export type CoreAssistantMessage = AssistantModelMessage; + +export const assistantModelMessageSchema: z.ZodType = z.object({ role: z.literal('assistant'), content: z.union([ @@ -104,6 +136,12 @@ export const coreAssistantMessageSchema: z.ZodType = providerOptions: providerMetadataSchema.optional(), }); +/** +@deprecated Use `assistantModelMessageSchema` instead. + */ +// TODO remove in AI SDK 6 +export const coreAssistantMessageSchema = assistantModelMessageSchema; + /** Content of an assistant message. It can be a string or an array of text, image, reasoning, redacted reasoning, and tool call parts. @@ -115,7 +153,7 @@ export type AssistantContent = /** A tool message. It contains the result of one or more tool calls. */ -export type CoreToolMessage = { +export type ToolModelMessage = { role: 'tool'; content: ToolContent; @@ -127,12 +165,24 @@ functionality that can be fully encapsulated in the provider. providerOptions?: ProviderOptions; }; -export const coreToolMessageSchema: z.ZodType = z.object({ +/** +@deprecated Use `ToolModelMessage` instead. + */ +// TODO remove in AI SDK 6 +export type CoreToolMessage = ToolModelMessage; + +export const toolModelMessageSchema: z.ZodType = z.object({ role: z.literal('tool'), content: z.array(toolResultPartSchema), providerOptions: providerMetadataSchema.optional(), }); +/** +@deprecated Use `toolModelMessageSchema` instead. + */ +// TODO remove in AI SDK 6 +export const coreToolMessageSchema = toolModelMessageSchema; + /** Content of a tool message. It is an array of tool result parts. */ @@ -142,15 +192,27 @@ export type ToolContent = Array; A message that can be used in the `messages` field of a prompt. It can be a user message, an assistant message, or a tool message. */ -export type CoreMessage = - | CoreSystemMessage - | CoreUserMessage - | CoreAssistantMessage - | CoreToolMessage; - -export const coreMessageSchema: z.ZodType = z.union([ - coreSystemMessageSchema, - coreUserMessageSchema, - coreAssistantMessageSchema, - coreToolMessageSchema, +export type ModelMessage = + | SystemModelMessage + | UserModelMessage + | AssistantModelMessage + | ToolModelMessage; + +/** +@deprecated Use `ModelMessage` instead. + */ +// TODO remove in AI SDK 6 +export type CoreMessage = ModelMessage; + +export const modelMessageSchema: z.ZodType = z.union([ + systemModelMessageSchema, + userModelMessageSchema, + assistantModelMessageSchema, + toolModelMessageSchema, ]); + +/** +@deprecated Use `modelMessageSchema` instead. + */ +// TODO remove in AI SDK 6 +export const coreMessageSchema: z.ZodType = modelMessageSchema; diff --git a/packages/ai/core/prompt/prompt.ts b/packages/ai/core/prompt/prompt.ts index 5821f426ebff..ba65315d5a66 100644 --- a/packages/ai/core/prompt/prompt.ts +++ b/packages/ai/core/prompt/prompt.ts @@ -1,5 +1,5 @@ import { UIMessage } from '../types'; -import { CoreMessage } from './message'; +import { ModelMessage } from './message'; /** Prompt part of the AI function options. @@ -19,5 +19,5 @@ A simple text prompt. You can either use `prompt` or `messages` but not both. /** A list of messages. You can either use `prompt` or `messages` but not both. */ - messages?: Array | Array>; + messages?: Array | Array>; }; diff --git a/packages/ai/core/prompt/standardize-prompt.ts b/packages/ai/core/prompt/standardize-prompt.ts index 3a1323b16344..8ccc11cab218 100644 --- a/packages/ai/core/prompt/standardize-prompt.ts +++ b/packages/ai/core/prompt/standardize-prompt.ts @@ -5,7 +5,7 @@ import { z } from 'zod'; import { ToolSet } from '../generate-text/tool-set'; import { convertToCoreMessages } from './convert-to-core-messages'; import { detectPromptType } from './detect-prompt-type'; -import { CoreMessage, coreMessageSchema } from './message'; +import { ModelMessage, modelMessageSchema } from './message'; import { Prompt } from './prompt'; export type StandardizedPrompt = { @@ -17,7 +17,7 @@ export type StandardizedPrompt = { /** * Messages. */ - messages: CoreMessage[]; + messages: ModelMessage[]; }; export async function standardizePrompt({ @@ -81,12 +81,12 @@ export async function standardizePrompt({ }); } - const messages: CoreMessage[] = + const messages: ModelMessage[] = promptType === 'ui-messages' ? convertToCoreMessages(prompt.messages as Omit[], { tools, }) - : (prompt.messages as CoreMessage[]); + : (prompt.messages as ModelMessage[]); if (messages.length === 0) { throw new InvalidPromptError({ @@ -97,7 +97,7 @@ export async function standardizePrompt({ const validationResult = await safeValidateTypes({ value: messages, - schema: z.array(coreMessageSchema), + schema: z.array(modelMessageSchema), }); if (!validationResult.success) { diff --git a/packages/ai/core/tool/tool.ts b/packages/ai/core/tool/tool.ts index f3f01a7f467f..144d1619d769 100644 --- a/packages/ai/core/tool/tool.ts +++ b/packages/ai/core/tool/tool.ts @@ -1,5 +1,5 @@ import { ToolResultContent } from '../prompt/tool-result-content'; -import { CoreMessage } from '../prompt/message'; +import { ModelMessage } from '../prompt/message'; import { z } from 'zod'; import { Schema } from '../util'; import { JSONObject, JSONValue } from '@ai-sdk/provider'; @@ -16,7 +16,7 @@ export interface ToolExecutionOptions { * Messages that were sent to the language model to initiate the response that contained the tool call. * The messages **do not** include the system prompt nor the assistant response that contained the tool call. */ - messages: CoreMessage[]; + messages: ModelMessage[]; /** * An optional abort signal that indicates that the overall operation should be aborted. From 635e1da7625bbc8933203405ecd61fc13170280d Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Fri, 2 May 2025 10:21:38 +0200 Subject: [PATCH 2/7] 2 --- .../ai/core/generate-text/generate-text.ts | 10 +++--- .../ai/core/generate-text/parse-tool-call.ts | 4 +-- .../ai/core/generate-text/tool-call-repair.ts | 4 +-- .../convert-to-language-model-prompt.ts | 6 ++-- ...t.ts => convert-to-model-messages.test.ts} | 34 +++++++++---------- ...ssages.ts => convert-to-model-messages.ts} | 10 ++++-- packages/ai/core/prompt/index.ts | 5 ++- packages/ai/core/prompt/standardize-prompt.ts | 8 ++--- 8 files changed, 45 insertions(+), 36 deletions(-) rename packages/ai/core/prompt/{convert-to-core-messages.test.ts => convert-to-model-messages.test.ts} (94%) rename packages/ai/core/prompt/{convert-to-core-messages.ts => convert-to-model-messages.ts} (96%) diff --git a/packages/ai/core/generate-text/generate-text.ts b/packages/ai/core/generate-text/generate-text.ts index 15561ff04bad..9b79d561470b 100644 --- a/packages/ai/core/generate-text/generate-text.ts +++ b/packages/ai/core/generate-text/generate-text.ts @@ -7,8 +7,9 @@ import { Tracer } from '@opentelemetry/api'; import { InvalidArgumentError } from '../../errors/invalid-argument-error'; import { NoOutputSpecifiedError } from '../../errors/no-output-specified-error'; import { ToolExecutionError } from '../../errors/tool-execution-error'; -import { CoreAssistantMessage, CoreMessage } from '../prompt'; +import { AssistantModelMessage, ModelMessage } from '../prompt'; import { CallSettings } from '../prompt/call-settings'; +import { ReasoningPart } from '../prompt/content-part'; import { convertToLanguageModelPrompt } from '../prompt/convert-to-language-model-prompt'; import { prepareCallSettings } from '../prompt/prepare-call-settings'; import { prepareRetries } from '../prompt/prepare-retries'; @@ -33,14 +34,13 @@ import { GenerateTextResult } from './generate-text-result'; import { DefaultGeneratedFile, GeneratedFile } from './generated-file'; import { Output } from './output'; import { parseToolCall } from './parse-tool-call'; -import { convertReasoningContentToParts, asReasoningText } from './reasoning'; +import { asReasoningText, convertReasoningContentToParts } from './reasoning'; import { ResponseMessage, StepResult } from './step-result'; import { toResponseMessages } from './to-response-messages'; import { ToolCallArray } from './tool-call'; import { ToolCallRepairFunction } from './tool-call-repair'; import { ToolResultArray } from './tool-result'; import { ToolSet } from './tool-set'; -import { ReasoningPart } from '../prompt/content-part'; const originalGenerateId = createIdGenerator({ prefix: 'aitxt', @@ -539,7 +539,7 @@ A function that attempts to repair a tool call that failed to parse. // so we can assume that there is a single last assistant message: const lastMessage = responseMessages[ responseMessages.length - 1 - ] as CoreAssistantMessage; + ] as AssistantModelMessage; if (typeof lastMessage.content === 'string') { lastMessage.content += stepText; @@ -667,7 +667,7 @@ async function executeTools({ tools: TOOLS; tracer: Tracer; telemetry: TelemetrySettings | undefined; - messages: CoreMessage[]; + messages: ModelMessage[]; abortSignal: AbortSignal | undefined; }): Promise> { const toolResults = await Promise.all( diff --git a/packages/ai/core/generate-text/parse-tool-call.ts b/packages/ai/core/generate-text/parse-tool-call.ts index 10e072097543..91a3e76048f1 100644 --- a/packages/ai/core/generate-text/parse-tool-call.ts +++ b/packages/ai/core/generate-text/parse-tool-call.ts @@ -3,7 +3,7 @@ import { safeParseJSON, safeValidateTypes } from '@ai-sdk/provider-utils'; import { InvalidToolArgumentsError } from '../../errors/invalid-tool-arguments-error'; import { NoSuchToolError } from '../../errors/no-such-tool-error'; import { ToolCallRepairError } from '../../errors/tool-call-repair-error'; -import { CoreMessage } from '../prompt'; +import { ModelMessage } from '../prompt'; import { asSchema } from '../util'; import { ToolCallUnion } from './tool-call'; import { ToolCallRepairFunction } from './tool-call-repair'; @@ -20,7 +20,7 @@ export async function parseToolCall({ tools: TOOLS | undefined; repairToolCall: ToolCallRepairFunction | undefined; system: string | undefined; - messages: CoreMessage[]; + messages: ModelMessage[]; }): Promise> { if (tools == null) { throw new NoSuchToolError({ toolName: toolCall.toolName }); diff --git a/packages/ai/core/generate-text/tool-call-repair.ts b/packages/ai/core/generate-text/tool-call-repair.ts index 48ea1fc01155..92f7a56a2dec 100644 --- a/packages/ai/core/generate-text/tool-call-repair.ts +++ b/packages/ai/core/generate-text/tool-call-repair.ts @@ -1,7 +1,7 @@ import { JSONSchema7, LanguageModelV2ToolCall } from '@ai-sdk/provider'; import { InvalidToolArgumentsError } from '../../errors/invalid-tool-arguments-error'; import { NoSuchToolError } from '../../errors/no-such-tool-error'; -import { CoreMessage } from '../prompt'; +import { ModelMessage } from '../prompt'; import { ToolSet } from './tool-set'; /** @@ -19,7 +19,7 @@ import { ToolSet } from './tool-set'; */ export type ToolCallRepairFunction = (options: { system: string | undefined; - messages: CoreMessage[]; + messages: ModelMessage[]; toolCall: LanguageModelV2ToolCall; tools: TOOLS; parameterSchema: (options: { toolName: string }) => JSONSchema7; diff --git a/packages/ai/core/prompt/convert-to-language-model-prompt.ts b/packages/ai/core/prompt/convert-to-language-model-prompt.ts index db808017fae7..69ca86c237ef 100644 --- a/packages/ai/core/prompt/convert-to-language-model-prompt.ts +++ b/packages/ai/core/prompt/convert-to-language-model-prompt.ts @@ -4,6 +4,7 @@ import { LanguageModelV2Prompt, LanguageModelV2TextPart, } from '@ai-sdk/provider'; +import { isUrlSupported } from '@ai-sdk/provider-utils'; import { download } from '../../util/download'; import { ModelMessage } from '../prompt/message'; import { @@ -17,7 +18,6 @@ import { } from './data-content'; import { InvalidMessageRoleError } from './invalid-message-role-error'; import { StandardizedPrompt } from './standardize-prompt'; -import { isUrlSupported } from '@ai-sdk/provider-utils'; export async function convertToLanguageModelPrompt({ prompt, @@ -45,9 +45,9 @@ export async function convertToLanguageModelPrompt({ } /** - * Convert a CoreMessage to a LanguageModelV2Message. + * Convert a ModelMessage to a LanguageModelV2Message. * - * @param message The CoreMessage to convert. + * @param message The ModelMessage to convert. * @param downloadedAssets A map of URLs to their downloaded data. Only * available if the model does not support URLs, null otherwise. */ diff --git a/packages/ai/core/prompt/convert-to-core-messages.test.ts b/packages/ai/core/prompt/convert-to-model-messages.test.ts similarity index 94% rename from packages/ai/core/prompt/convert-to-core-messages.test.ts rename to packages/ai/core/prompt/convert-to-model-messages.test.ts index 257fe41fe83a..73f194ac3ee3 100644 --- a/packages/ai/core/prompt/convert-to-core-messages.test.ts +++ b/packages/ai/core/prompt/convert-to-model-messages.test.ts @@ -1,12 +1,12 @@ import { z } from 'zod'; import { tool } from '../tool/tool'; -import { convertToCoreMessages } from './convert-to-core-messages'; +import { convertToModelMessages } from './convert-to-model-messages'; import { ModelMessage } from './message'; -describe('convertToCoreMessages', () => { +describe('convertToModelMessages', () => { describe('system message', () => { it('should convert a simple system message', () => { - const result = convertToCoreMessages([ + const result = convertToModelMessages([ { role: 'system', content: 'System message', @@ -20,7 +20,7 @@ describe('convertToCoreMessages', () => { describe('user message', () => { it('should convert a simple user message', () => { - const result = convertToCoreMessages([ + const result = convertToModelMessages([ { role: 'user', content: 'Hello, AI!', @@ -44,7 +44,7 @@ describe('convertToCoreMessages', () => { }); it('should prefer content in parts when content is empty', () => { - const result = convertToCoreMessages([ + const result = convertToModelMessages([ { role: 'user', content: '', // empty content @@ -66,7 +66,7 @@ describe('convertToCoreMessages', () => { }); it('should handle user message file parts', () => { - const result = convertToCoreMessages([ + const result = convertToModelMessages([ { role: 'user', content: 'Check this image', @@ -99,7 +99,7 @@ describe('convertToCoreMessages', () => { describe('assistant message', () => { it('should convert a simple assistant message', () => { - const result = convertToCoreMessages([ + const result = convertToModelMessages([ { role: 'assistant', content: '', // empty content @@ -116,7 +116,7 @@ describe('convertToCoreMessages', () => { }); it('should convert an assistant message with reasoning', () => { - const result = convertToCoreMessages([ + const result = convertToModelMessages([ { role: 'assistant', content: '', // empty content @@ -163,7 +163,7 @@ describe('convertToCoreMessages', () => { }); it('should convert an assistant message with file parts', () => { - const result = convertToCoreMessages([ + const result = convertToModelMessages([ { role: 'assistant', content: '', // empty content @@ -192,7 +192,7 @@ describe('convertToCoreMessages', () => { }); it('should handle assistant message with tool invocations', () => { - const result = convertToCoreMessages([ + const result = convertToModelMessages([ { role: 'assistant', content: '', // empty content @@ -227,7 +227,7 @@ describe('convertToCoreMessages', () => { }), }; - const result = convertToCoreMessages( + const result = convertToModelMessages( [ { role: 'assistant', @@ -255,7 +255,7 @@ describe('convertToCoreMessages', () => { }); it('should handle conversation with an assistant message that has empty tool invocations', () => { - const result = convertToCoreMessages([ + const result = convertToModelMessages([ { role: 'user', content: 'text1', @@ -279,7 +279,7 @@ describe('convertToCoreMessages', () => { }), }; - const result = convertToCoreMessages( + const result = convertToModelMessages( [ { role: 'assistant', @@ -347,7 +347,7 @@ describe('convertToCoreMessages', () => { }), }; - const result = convertToCoreMessages( + const result = convertToModelMessages( [ { role: 'assistant', @@ -412,7 +412,7 @@ describe('convertToCoreMessages', () => { describe('multiple messages', () => { it('should handle a conversation with multiple messages', () => { - const result = convertToCoreMessages([ + const result = convertToModelMessages([ { role: 'user', content: "What's the weather like?", @@ -471,7 +471,7 @@ describe('convertToCoreMessages', () => { }), }; - const result = convertToCoreMessages( + const result = convertToModelMessages( [ { role: 'assistant', @@ -541,7 +541,7 @@ describe('convertToCoreMessages', () => { describe('error handling', () => { it('should throw an error for unhandled roles', () => { expect(() => { - convertToCoreMessages([ + convertToModelMessages([ { role: 'unknown' as any, content: 'unknown role message', diff --git a/packages/ai/core/prompt/convert-to-core-messages.ts b/packages/ai/core/prompt/convert-to-model-messages.ts similarity index 96% rename from packages/ai/core/prompt/convert-to-core-messages.ts rename to packages/ai/core/prompt/convert-to-model-messages.ts index ad293cd7f575..4e207bf93223 100644 --- a/packages/ai/core/prompt/convert-to-core-messages.ts +++ b/packages/ai/core/prompt/convert-to-model-messages.ts @@ -14,10 +14,10 @@ import { MessageConversionError } from './message-conversion-error'; Converts an array of messages from useChat into an array of CoreMessages that can be used with the AI core functions (e.g. `streamText`). */ -export function convertToCoreMessages( +export function convertToModelMessages( messages: Array>, options?: { tools?: TOOLS }, -) { +): ModelMessage[] { const tools = options?.tools ?? ({} as TOOLS); const coreMessages: ModelMessage[] = []; @@ -217,3 +217,9 @@ export function convertToCoreMessages( return coreMessages; } + +/** +@deprecated Use `convertToModelMessages` instead. + */ +// TODO remove in AI SDK 6 +export const convertToCoreMessages = convertToModelMessages; diff --git a/packages/ai/core/prompt/index.ts b/packages/ai/core/prompt/index.ts index a72ca197ed0e..21377e4341ca 100644 --- a/packages/ai/core/prompt/index.ts +++ b/packages/ai/core/prompt/index.ts @@ -8,7 +8,10 @@ export type { ToolCallPart, ToolResultPart, } from './content-part'; -export { convertToCoreMessages } from './convert-to-core-messages'; +export { + convertToCoreMessages, + convertToModelMessages, +} from './convert-to-model-messages'; export type { DataContent } from './data-content'; export { assistantModelMessageSchema, diff --git a/packages/ai/core/prompt/standardize-prompt.ts b/packages/ai/core/prompt/standardize-prompt.ts index 8ccc11cab218..c8792484a21f 100644 --- a/packages/ai/core/prompt/standardize-prompt.ts +++ b/packages/ai/core/prompt/standardize-prompt.ts @@ -3,7 +3,7 @@ import { safeValidateTypes } from '@ai-sdk/provider-utils'; import { UIMessage } from '../types'; import { z } from 'zod'; import { ToolSet } from '../generate-text/tool-set'; -import { convertToCoreMessages } from './convert-to-core-messages'; +import { convertToModelMessages } from './convert-to-model-messages'; import { detectPromptType } from './detect-prompt-type'; import { ModelMessage, modelMessageSchema } from './message'; import { Prompt } from './prompt'; @@ -77,13 +77,13 @@ export async function standardizePrompt({ if (promptType === 'other') { throw new InvalidPromptError({ prompt, - message: 'messages must be an array of CoreMessage or UIMessage', + message: 'messages must be an array of ModelMessage or UIMessage', }); } const messages: ModelMessage[] = promptType === 'ui-messages' - ? convertToCoreMessages(prompt.messages as Omit[], { + ? convertToModelMessages(prompt.messages as Omit[], { tools, }) : (prompt.messages as ModelMessage[]); @@ -103,7 +103,7 @@ export async function standardizePrompt({ if (!validationResult.success) { throw new InvalidPromptError({ prompt, - message: 'messages must be an array of CoreMessage or UIMessage', + message: 'messages must be an array of ModelMessage or UIMessage', cause: validationResult.error, }); } From b044805cf66e6a6ba36bc9358eb90345b7b59c05 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Fri, 2 May 2025 10:34:34 +0200 Subject: [PATCH 3/7] 3 --- .../11-generate-text-with-chat-prompt.mdx | 8 +++--- .../01-next/24-stream-text-multistep.mdx | 2 +- .../cookbook/01-next/75-human-in-the-loop.mdx | 4 +-- content/docs/02-foundations/03-prompts.mdx | 4 +-- content/docs/02-getting-started/06-nodejs.mdx | 20 ++++++------- content/docs/02-guides/03-slackbot.mdx | 8 +++--- .../15-tools-and-tool-calling.mdx | 4 +-- .../03-chatbot-message-persistence.mdx | 4 +-- .../docs/05-ai-sdk-rsc/10-migrating-to-ui.mdx | 4 +-- .../01-ai-sdk-core/01-generate-text.mdx | 4 +-- .../01-ai-sdk-core/02-stream-text.mdx | 4 +-- .../07-reference/01-ai-sdk-core/20-tool.mdx | 2 +- .../01-ai-sdk-core/30-core-message.mdx | 12 ++++---- .../31-convert-to-core-messages.mdx | 28 +++++++++---------- .../docs/07-reference/02-ai-sdk-ui/index.mdx | 6 ++-- .../29-migration-guide-4-0.mdx | 4 +-- .../10-use-chat-tools-no-response.mdx | 10 +++---- .../01-ai-sdk-providers/02-openai.mdx | 2 +- .../01-ai-sdk-providers/05-anthropic.mdx | 2 +- .../generate-text/amazon-bedrock-chatbot.ts | 4 +-- .../amazon-bedrock-reasoning-chatbot.ts | 4 +-- .../src/generate-text/anthropic-chatbot.ts | 4 +-- .../anthropic-reasoning-chatbot.ts | 4 +-- .../src/generate-text/cohere-chatbot.ts | 4 +-- .../google-chatbot-image-output.ts | 4 +-- .../google-vertex-anthropic-chatbot.ts | 4 +-- .../src/generate-text/mistral-chatbot.ts | 4 +-- .../generate-text/openai-responses-chatbot.ts | 4 +-- .../amazon-bedrock-cache-point-tool-call.ts | 4 +-- .../src/stream-text/amazon-bedrock-chatbot.ts | 4 +-- .../amazon-bedrock-reasoning-chatbot.ts | 4 +-- .../stream-text/amazon-bedrock-tool-call.ts | 4 +-- .../src/stream-text/anthropic-chatbot.ts | 4 +-- .../anthropic-reasoning-chatbot.ts | 4 +-- .../src/stream-text/cerebras-tool-call.ts | 4 +-- .../ai-core/src/stream-text/cohere-chatbot.ts | 4 +-- .../cohere-tool-call-empty-params.ts | 4 +-- .../src/stream-text/cohere-tool-call.ts | 4 +-- .../src/stream-text/deepseek-tool-call.ts | 4 +-- .../google-chatbot-image-output.ts | 4 +-- .../ai-core/src/stream-text/google-chatbot.ts | 4 +-- .../google-vertex-anthropic-chatbot.ts | 4 +-- .../google-vertex-anthropic-tool-call.ts | 4 +-- .../src/stream-text/mistral-chatbot.ts | 4 +-- .../ai-core/src/stream-text/openai-chatbot.ts | 4 +-- .../openai-compatible-togetherai-tool-call.ts | 4 +-- .../stream-text/openai-responses-chatbot.ts | 4 +-- .../src/stream-text/togetherai-tool-call.ts | 4 +-- .../ai-core/src/stream-text/xai-chatbot.ts | 4 +-- .../ai-core/src/stream-text/xai-tool-call.ts | 4 +-- .../app/api/generate-chat/route.ts | 4 +-- .../app/api/stream-chat/route.ts | 4 +-- .../pages/chat/generate-chat/index.tsx | 4 +-- .../api/use-chat-human-in-the-loop/utils.ts | 4 +-- .../use-chat-streamdata-multistep/route.ts | 4 +-- .../next-openai/app/stream-ui/actions.tsx | 6 ++-- 56 files changed, 141 insertions(+), 141 deletions(-) diff --git a/content/cookbook/01-next/11-generate-text-with-chat-prompt.mdx b/content/cookbook/01-next/11-generate-text-with-chat-prompt.mdx index c0106db9ba1c..ce6824c86140 100644 --- a/content/cookbook/01-next/11-generate-text-with-chat-prompt.mdx +++ b/content/cookbook/01-next/11-generate-text-with-chat-prompt.mdx @@ -31,12 +31,12 @@ Let's start by creating a simple chat interface with an input field that sends t ```tsx filename='app/page.tsx' 'use client'; -import { CoreMessage } from 'ai'; +import { ModelMessage } from 'ai'; import { useState } from 'react'; export default function Page() { const [input, setInput] = useState(''); - const [messages, setMessages] = useState([]); + const [messages, setMessages] = useState([]); return (
@@ -90,11 +90,11 @@ export default function Page() { Next, let's create the `/api/chat` endpoint that generates the assistant's response based on the conversation history. ```typescript filename='app/api/chat/route.ts' -import { CoreMessage, generateText } from 'ai'; +import { ModelMessage, generateText } from 'ai'; import { openai } from '@ai-sdk/openai'; export async function POST(req: Request) { - const { messages }: { messages: CoreMessage[] } = await req.json(); + const { messages }: { messages: ModelMessage[] } = await req.json(); const { response } = await generateText({ model: openai('gpt-4'), diff --git a/content/cookbook/01-next/24-stream-text-multistep.mdx b/content/cookbook/01-next/24-stream-text-multistep.mdx index 2e3acdf8fdba..ff1f3244466a 100644 --- a/content/cookbook/01-next/24-stream-text-multistep.mdx +++ b/content/cookbook/01-next/24-stream-text-multistep.mdx @@ -55,7 +55,7 @@ export async function POST(req: Request) { 'You are a helpful assistant with a different system prompt. Repeat the extract user goal in your answer.', // continue the workflow stream with the messages from the previous step: messages: [ - ...convertToCoreMessages(messages), + ...convertToModelMessages(messages), ...(await result1.response).messages, ], }); diff --git a/content/cookbook/01-next/75-human-in-the-loop.mdx b/content/cookbook/01-next/75-human-in-the-loop.mdx index dacb8121e68a..a390623239ab 100644 --- a/content/cookbook/01-next/75-human-in-the-loop.mdx +++ b/content/cookbook/01-next/75-human-in-the-loop.mdx @@ -336,7 +336,7 @@ The solution above is low-level and not very friendly to use in a production env import { formatDataStreamPart, Message, - convertToCoreMessages, + convertToModelMessages, DataStreamWriter, ToolExecutionOptions, ToolSet, @@ -419,7 +419,7 @@ export async function processToolCalls< const toolInstance = executeFunctions[toolName]; if (toolInstance) { result = await toolInstance(toolInvocation.args, { - messages: convertToCoreMessages(messages), + messages: convertToModelMessages(messages), toolCallId: toolInvocation.toolCallId, }); } else { diff --git a/content/docs/02-foundations/03-prompts.mdx b/content/docs/02-foundations/03-prompts.mdx index dfb7e82b7a2f..3282866964b0 100644 --- a/content/docs/02-foundations/03-prompts.mdx +++ b/content/docs/02-foundations/03-prompts.mdx @@ -164,9 +164,9 @@ const messages = [ AI SDK UI hooks like [`useChat`](/docs/reference/ai-sdk-ui/use-chat) return arrays of `UIMessage` objects, which do not support provider options. We recommend using the - [`convertToCoreMessages`](/docs/reference/ai-sdk-ui/convert-to-core-messages) + [`convertToModelMessages`](/docs/reference/ai-sdk-ui/convert-to-core-messages) function to convert `UIMessage` objects to - [`CoreMessage`](/docs/reference/ai-sdk-core/core-message) objects before + [`ModelMessage`](/docs/reference/ai-sdk-core/core-message) objects before applying or appending message(s) or message parts with `providerOptions`. diff --git a/content/docs/02-getting-started/06-nodejs.mdx b/content/docs/02-getting-started/06-nodejs.mdx index f5ec7edce2de..f01fcaa47a1c 100644 --- a/content/docs/02-getting-started/06-nodejs.mdx +++ b/content/docs/02-getting-started/06-nodejs.mdx @@ -76,7 +76,7 @@ Create an `index.ts` file in the root of your project and add the following code ```ts filename="index.ts" import { openai } from '@ai-sdk/openai'; -import { CoreMessage, streamText } from 'ai'; +import { ModelMessage, streamText } from 'ai'; import dotenv from 'dotenv'; import * as readline from 'node:readline/promises'; @@ -87,7 +87,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { while (true) { @@ -151,7 +151,7 @@ Modify your `index.ts` file to include the new weather tool: ```ts filename="index.ts" highlight="2,4,25-38" import { openai } from '@ai-sdk/openai'; -import { CoreMessage, streamText, tool } from 'ai'; +import { ModelMessage, streamText, tool } from 'ai'; import dotenv from 'dotenv'; import { z } from 'zod'; import * as readline from 'node:readline/promises'; @@ -163,7 +163,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { while (true) { @@ -221,7 +221,7 @@ Notice the blank "assistant" response? This is because instead of generating a t ```typescript highlight="47-48" import { openai } from '@ai-sdk/openai'; -import { CoreMessage, streamText, tool } from 'ai'; +import { ModelMessage, streamText, tool } from 'ai'; import dotenv from 'dotenv'; import { z } from 'zod'; import * as readline from 'node:readline/promises'; @@ -233,7 +233,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { while (true) { @@ -291,7 +291,7 @@ Modify your `index.ts` file to include the `maxSteps` option: ```ts filename="index.ts" highlight="39-42" import { openai } from '@ai-sdk/openai'; -import { CoreMessage, streamText, tool } from 'ai'; +import { ModelMessage, streamText, tool } from 'ai'; import dotenv from 'dotenv'; import { z } from 'zod'; import * as readline from 'node:readline/promises'; @@ -303,7 +303,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { while (true) { @@ -364,7 +364,7 @@ Update your `index.ts` file to add a new tool to convert the temperature from Ce ```ts filename="index.ts" highlight="38-49" import { openai } from '@ai-sdk/openai'; -import { CoreMessage, streamText, tool } from 'ai'; +import { ModelMessage, streamText, tool } from 'ai'; import dotenv from 'dotenv'; import { z } from 'zod'; import * as readline from 'node:readline/promises'; @@ -376,7 +376,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { while (true) { diff --git a/content/docs/02-guides/03-slackbot.mdx b/content/docs/02-guides/03-slackbot.mdx index faa9efd8926e..71d7a59b61ee 100644 --- a/content/docs/02-guides/03-slackbot.mdx +++ b/content/docs/02-guides/03-slackbot.mdx @@ -318,10 +318,10 @@ Here's how to implement it: ```typescript filename="lib/generate-response.ts" import { openai } from '@ai-sdk/openai'; -import { CoreMessage, generateText } from 'ai'; +import { ModelMessage, generateText } from 'ai'; export const generateResponse = async ( - messages: CoreMessage[], + messages: ModelMessage[], updateStatus?: (status: string) => void, ) => { const { text } = await generateText({ @@ -349,12 +349,12 @@ The real power of the AI SDK comes from tools that enable your bot to perform ac ```typescript filename="lib/generate-response.ts" import { openai } from '@ai-sdk/openai'; -import { CoreMessage, generateText, tool } from 'ai'; +import { ModelMessage, generateText, tool } from 'ai'; import { z } from 'zod'; import { exa } from './utils'; export const generateResponse = async ( - messages: CoreMessage[], + messages: ModelMessage[], updateStatus?: (status: string) => void, ) => { const { text } = await generateText({ diff --git a/content/docs/03-ai-sdk-core/15-tools-and-tool-calling.mdx b/content/docs/03-ai-sdk-core/15-tools-and-tool-calling.mdx index 54bbf0359245..410d1eff8aa6 100644 --- a/content/docs/03-ai-sdk-core/15-tools-and-tool-calling.mdx +++ b/content/docs/03-ai-sdk-core/15-tools-and-tool-calling.mdx @@ -191,12 +191,12 @@ Both `generateText` and `streamText` have a `response.messages` property that yo add the assistant and tool messages to your conversation history. It is also available in the `onFinish` callback of `streamText`. -The `response.messages` property contains an array of `CoreMessage` objects that you can add to your conversation history: +The `response.messages` property contains an array of `ModelMessage` objects that you can add to your conversation history: ```ts import { generateText } from 'ai'; -const messages: CoreMessage[] = [ +const messages: ModelMessage[] = [ // ... ]; diff --git a/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx b/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx index 68a7fffaf887..62ba096ab858 100644 --- a/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx +++ b/content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx @@ -124,14 +124,14 @@ We have enabled the `sendExtraMessageFields` option to send the id and createdAt meaning that we store messages in the `useChat` message format. - The `useChat` message format is different from the `CoreMessage` format. The + The `useChat` message format is different from the `ModelMessage` format. The `useChat` message format is designed for frontend display, and contains additional fields such as `id` and `createdAt`. We recommend storing the messages in the `useChat` message format. Storing messages is done in the `onFinish` callback of the `streamText` function. -`onFinish` receives the messages from the AI response as a `CoreMessage[]`, +`onFinish` receives the messages from the AI response as a `ModelMessage[]`, and we use the [`appendResponseMessages`](/docs/reference/ai-sdk-ui/append-response-messages) helper to append the AI response messages to the chat messages. diff --git a/content/docs/05-ai-sdk-rsc/10-migrating-to-ui.mdx b/content/docs/05-ai-sdk-rsc/10-migrating-to-ui.mdx index 2e6f05c57411..38437344a8c7 100644 --- a/content/docs/05-ai-sdk-rsc/10-migrating-to-ui.mdx +++ b/content/docs/05-ai-sdk-rsc/10-migrating-to-ui.mdx @@ -483,12 +483,12 @@ With AI SDK UI, you will save chats using the `onFinish` callback function of `s ```ts filename="@/app/api/chat/route.ts" import { openai } from '@ai-sdk/openai'; import { saveChat } from '@/utils/queries'; -import { streamText, convertToCoreMessages } from 'ai'; +import { streamText, convertToModelMessages } from 'ai'; export async function POST(request) { const { id, messages } = await request.json(); - const coreMessages = convertToCoreMessages(messages); + const coreMessages = convertToModelMessages(messages); const result = streamText({ model: openai('gpt-4o'), diff --git a/content/docs/07-reference/01-ai-sdk-core/01-generate-text.mdx b/content/docs/07-reference/01-ai-sdk-core/01-generate-text.mdx index bd16fd47fa7d..fe09ba3aeaf1 100644 --- a/content/docs/07-reference/01-ai-sdk-core/01-generate-text.mdx +++ b/content/docs/07-reference/01-ai-sdk-core/01-generate-text.mdx @@ -333,7 +333,7 @@ To see `generateText` in action, check out [these examples](#examples). }, { name: 'messages', - type: 'CoreMessage[]', + type: 'ModelMessage[]', description: 'Messages that were sent to the language model to initiate the response that contained the tool call. The messages do not include the system prompt nor the assistant response that contained the tool call.', }, @@ -533,7 +533,7 @@ To see `generateText` in action, check out [these examples](#examples). }, { name: 'messages', - type: 'CoreMessage[]', + type: 'ModelMessage[]', description: 'The messages in the current generation step.', }, { diff --git a/content/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx b/content/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx index f58422a3ebd5..8437ce015cc6 100644 --- a/content/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx +++ b/content/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx @@ -334,7 +334,7 @@ To see `streamText` in action, check out [these examples](#examples). }, { name: 'messages', - type: 'CoreMessage[]', + type: 'ModelMessage[]', description: 'Messages that were sent to the language model to initiate the response that contained the tool call. The messages do not include the system prompt nor the assistant response that contained the tool call.', }, @@ -577,7 +577,7 @@ To see `streamText` in action, check out [these examples](#examples). }, { name: 'messages', - type: 'CoreMessage[]', + type: 'ModelMessage[]', description: 'The messages in the current generation step.', }, { diff --git a/content/docs/07-reference/01-ai-sdk-core/20-tool.mdx b/content/docs/07-reference/01-ai-sdk-core/20-tool.mdx index 6117f26ec1be..4c1d72c0d310 100644 --- a/content/docs/07-reference/01-ai-sdk-core/20-tool.mdx +++ b/content/docs/07-reference/01-ai-sdk-core/20-tool.mdx @@ -77,7 +77,7 @@ export const weatherTool = tool({ }, { name: "messages", - type: "CoreMessage[]", + type: "ModelMessage[]", description: "Messages that were sent to the language model to initiate the response that contained the tool call. The messages do not include the system prompt nor the assistant response that contained the tool call." }, { diff --git a/content/docs/07-reference/01-ai-sdk-core/30-core-message.mdx b/content/docs/07-reference/01-ai-sdk-core/30-core-message.mdx index 43fa20a218e6..78b031bd0844 100644 --- a/content/docs/07-reference/01-ai-sdk-core/30-core-message.mdx +++ b/content/docs/07-reference/01-ai-sdk-core/30-core-message.mdx @@ -1,16 +1,16 @@ --- -title: CoreMessage +title: ModelMessage description: Message types for AI SDK Core (API Reference) --- -# `CoreMessage` +# `ModelMessage` -`CoreMessage` represents the fundamental message structure used with AI SDK Core functions. +`ModelMessage` represents the fundamental message structure used with AI SDK Core functions. It encompasses various message types that can be used in the `messages` field of any AI SDK Core functions. -You can access the Zod schema for `CoreMessage` with the `coreMessageSchema` export. +You can access the Zod schema for `ModelMessage` with the `coreMessageSchema` export. -## `CoreMessage` Types +## `ModelMessage` Types ### `CoreSystemMessage` @@ -75,7 +75,7 @@ type ToolContent = Array; You can access the Zod schema for `CoreToolMessage` with the `coreToolMessageSchema` export. -## `CoreMessage` Parts +## `ModelMessage` Parts ### `TextPart` diff --git a/content/docs/07-reference/02-ai-sdk-ui/31-convert-to-core-messages.mdx b/content/docs/07-reference/02-ai-sdk-ui/31-convert-to-core-messages.mdx index b381e43e7b67..7306a80158fd 100644 --- a/content/docs/07-reference/02-ai-sdk-ui/31-convert-to-core-messages.mdx +++ b/content/docs/07-reference/02-ai-sdk-ui/31-convert-to-core-messages.mdx @@ -1,27 +1,27 @@ --- -title: convertToCoreMessages -description: Convert useChat messages to CoreMessages for AI core functions (API Reference) +title: convertToModelMessages +description: Convert useChat messages to ModelMessages for AI core functions (API Reference) --- -# `convertToCoreMessages()` +# `convertToModelMessages()` - The `convertToCoreMessages` function is no longer required. The AI SDK now - automatically converts the incoming messages to the `CoreMessage` format. + The `convertToModelMessages` function is no longer required. The AI SDK now + automatically converts the incoming messages to the `ModelMessage` format. -The `convertToCoreMessages` function is used to transform an array of UI messages from the `useChat` hook into an array of `CoreMessage` objects. These `CoreMessage` objects are compatible with AI core functions like `streamText`. +The `convertToModelMessages` function is used to transform an array of UI messages from the `useChat` hook into an array of `ModelMessage` objects. These `ModelMessage` objects are compatible with AI core functions like `streamText`. ```ts filename="app/api/chat/route.ts" import { openai } from '@ai-sdk/openai'; -import { convertToCoreMessages, streamText } from 'ai'; +import { convertToModelMessages, streamText } from 'ai'; export async function POST(req: Request) { const { messages } = await req.json(); const result = streamText({ model: openai('gpt-4o'), - messages: convertToCoreMessages(messages), + messages: convertToModelMessages(messages), }); return result.toDataStreamResponse(); @@ -30,7 +30,7 @@ export async function POST(req: Request) { ## Import - + ## API Signature @@ -55,21 +55,21 @@ export async function POST(req: Request) { ### Returns -An array of [`CoreMessage`](/docs/reference/ai-sdk-core/core-message) objects. +An array of [`ModelMessage`](/docs/reference/ai-sdk-core/core-message) objects. ## Multi-modal Tool Responses -The `convertToCoreMessages` function supports tools that can return multi-modal content. This is useful when tools need to return non-text content like images. +The `convertToModelMessages` function supports tools that can return multi-modal content. This is useful when tools need to return non-text content like images. ```ts import { tool } from 'ai'; @@ -83,7 +83,7 @@ const screenshotTool = tool({ const result = streamText({ model: openai('gpt-4'), - messages: convertToCoreMessages(messages, { + messages: convertToModelMessages(messages, { tools: { screenshot: screenshotTool, }, diff --git a/content/docs/07-reference/02-ai-sdk-ui/index.mdx b/content/docs/07-reference/02-ai-sdk-ui/index.mdx index ce87f31cdc82..05188114b683 100644 --- a/content/docs/07-reference/02-ai-sdk-ui/index.mdx +++ b/content/docs/07-reference/02-ai-sdk-ui/index.mdx @@ -31,15 +31,15 @@ AI SDK UI contains the following hooks: href: '/docs/reference/ai-sdk-ui/use-object', }, { - title: 'convertToCoreMessages', + title: 'convertToModelMessages', description: - 'Convert useChat messages to CoreMessages for AI core functions.', + 'Convert useChat messages to ModelMessages for AI core functions.', href: '/docs/reference/ai-sdk-ui/convert-to-core-messages', }, { title: 'appendResponseMessages', description: - 'Append CoreMessage[] from an AI response to an existing array of UI messages.', + 'Append ModelMessage[] from an AI response to an existing array of UI messages.', href: '/docs/reference/ai-sdk-ui/append-response-messages', }, { diff --git a/content/docs/08-migration-guides/29-migration-guide-4-0.mdx b/content/docs/08-migration-guides/29-migration-guide-4-0.mdx index 6218cf43dd14..0434cdfb4246 100644 --- a/content/docs/08-migration-guides/29-migration-guide-4-0.mdx +++ b/content/docs/08-migration-guides/29-migration-guide-4-0.mdx @@ -344,7 +344,7 @@ const id = generateId(); // now 16 characters The following types have been removed: -- `ExperimentalMessage` (use `CoreMessage` instead) +- `ExperimentalMessage` (use `ModelMessage` instead) - `ExperimentalUserMessage` (use `CoreUserMessage` instead) - `ExperimentalAssistantMessage` (use `CoreAssistantMessage` instead) - `ExperimentalToolMessage` (use `CoreToolMessage` instead) @@ -360,7 +360,7 @@ import { ```ts filename="AI SDK 4.0" import { - CoreMessage, + ModelMessage, CoreUserMessage, CoreAssistantMessage, CoreToolMessage, diff --git a/content/docs/09-troubleshooting/10-use-chat-tools-no-response.mdx b/content/docs/09-troubleshooting/10-use-chat-tools-no-response.mdx index 12fb4f9e87cf..fe304ce5cc15 100644 --- a/content/docs/09-troubleshooting/10-use-chat-tools-no-response.mdx +++ b/content/docs/09-troubleshooting/10-use-chat-tools-no-response.mdx @@ -12,27 +12,27 @@ When I log the incoming messages on the server, I can see the tool call and the ## Background -The `useChat` hook uses a message structure (`Message`) that pre-dates the AI SDK Core message structure (`CoreMessage`). +The `useChat` hook uses a message structure (`Message`) that pre-dates the AI SDK Core message structure (`ModelMessage`). ## Solution This solution is outdated. The AI SDK now automatically converts the incoming - messages to the `CoreMessage` format. + messages to the `ModelMessage` format. -To resolve this issue, convert the incoming messages to the `CoreMessage` format using the [`convertToCoreMessages`](/docs/reference/ai-sdk-ui/convert-to-core-messages) function. +To resolve this issue, convert the incoming messages to the `ModelMessage` format using the [`convertToModelMessages`](/docs/reference/ai-sdk-ui/convert-to-core-messages) function. ```tsx highlight="9" import { openai } from '@ai-sdk/openai'; -import { convertToCoreMessages, streamText } from 'ai'; +import { convertToModelMessages, streamText } from 'ai'; export async function POST(req: Request) { const { messages } = await req.json(); const result = streamText({ model: openai('gpt-4o'), - messages: convertToCoreMessages(messages), + messages: convertToModelMessages(messages), }); return result.toDataStreamResponse(); diff --git a/content/providers/01-ai-sdk-providers/02-openai.mdx b/content/providers/01-ai-sdk-providers/02-openai.mdx index cef5c86bffe3..cb13fd6df6ec 100644 --- a/content/providers/01-ai-sdk-providers/02-openai.mdx +++ b/content/providers/01-ai-sdk-providers/02-openai.mdx @@ -438,7 +438,7 @@ const result = await generateText({ Because the `UIMessage` type (used by AI SDK UI hooks like `useChat`) does not - support the `providerOptions` property, you can use `convertToCoreMessages` + support the `providerOptions` property, you can use `convertToModelMessages` first before passing the messages to functions like `generateText` or `streamText`. For more details on `providerOptions` usage, see [here](/docs/foundations/prompts#provider-options). diff --git a/content/providers/01-ai-sdk-providers/05-anthropic.mdx b/content/providers/01-ai-sdk-providers/05-anthropic.mdx index 82cf47d52cda..84c2c8fccd0d 100644 --- a/content/providers/01-ai-sdk-providers/05-anthropic.mdx +++ b/content/providers/01-ai-sdk-providers/05-anthropic.mdx @@ -222,7 +222,7 @@ For more on prompt caching with Anthropic, see [Anthropic's Cache Control docume Because the `UIMessage` type (used by AI SDK UI hooks like `useChat`) does not - support the `providerOptions` property, you can use `convertToCoreMessages` + support the `providerOptions` property, you can use `convertToModelMessages` first before passing the messages to functions like `generateText` or `streamText`. For more details on `providerOptions` usage, see [here](/docs/foundations/prompts#provider-options). diff --git a/examples/ai-core/src/generate-text/amazon-bedrock-chatbot.ts b/examples/ai-core/src/generate-text/amazon-bedrock-chatbot.ts index c4b354e95fab..74519949d3ec 100644 --- a/examples/ai-core/src/generate-text/amazon-bedrock-chatbot.ts +++ b/examples/ai-core/src/generate-text/amazon-bedrock-chatbot.ts @@ -1,5 +1,5 @@ import { bedrock } from '@ai-sdk/amazon-bedrock'; -import { CoreMessage, generateText } from 'ai'; +import { ModelMessage, generateText } from 'ai'; import 'dotenv/config'; import * as readline from 'node:readline/promises'; import { weatherTool } from '../tools/weather-tool'; @@ -9,7 +9,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { let toolResponseAvailable = false; diff --git a/examples/ai-core/src/generate-text/amazon-bedrock-reasoning-chatbot.ts b/examples/ai-core/src/generate-text/amazon-bedrock-reasoning-chatbot.ts index 8d0483d5ac37..c4b649ac3c5f 100644 --- a/examples/ai-core/src/generate-text/amazon-bedrock-reasoning-chatbot.ts +++ b/examples/ai-core/src/generate-text/amazon-bedrock-reasoning-chatbot.ts @@ -1,5 +1,5 @@ import { bedrock } from '@ai-sdk/amazon-bedrock'; -import { CoreMessage, generateText } from 'ai'; +import { ModelMessage, generateText } from 'ai'; import 'dotenv/config'; import * as readline from 'node:readline/promises'; import { weatherTool } from '../tools/weather-tool'; @@ -9,7 +9,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { while (true) { diff --git a/examples/ai-core/src/generate-text/anthropic-chatbot.ts b/examples/ai-core/src/generate-text/anthropic-chatbot.ts index 56be9c02b987..386fb737c7eb 100644 --- a/examples/ai-core/src/generate-text/anthropic-chatbot.ts +++ b/examples/ai-core/src/generate-text/anthropic-chatbot.ts @@ -1,5 +1,5 @@ import { anthropic } from '@ai-sdk/anthropic'; -import { CoreMessage, generateText } from 'ai'; +import { ModelMessage, generateText } from 'ai'; import 'dotenv/config'; import * as readline from 'node:readline/promises'; import { weatherTool } from '../tools/weather-tool'; @@ -9,7 +9,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { let toolResponseAvailable = false; diff --git a/examples/ai-core/src/generate-text/anthropic-reasoning-chatbot.ts b/examples/ai-core/src/generate-text/anthropic-reasoning-chatbot.ts index 81892d4aac9e..19641de8e3fe 100644 --- a/examples/ai-core/src/generate-text/anthropic-reasoning-chatbot.ts +++ b/examples/ai-core/src/generate-text/anthropic-reasoning-chatbot.ts @@ -1,5 +1,5 @@ import { createAnthropic, AnthropicProviderOptions } from '@ai-sdk/anthropic'; -import { CoreMessage, generateText } from 'ai'; +import { ModelMessage, generateText } from 'ai'; import 'dotenv/config'; import * as readline from 'node:readline/promises'; import { weatherTool } from '../tools/weather-tool'; @@ -21,7 +21,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { while (true) { diff --git a/examples/ai-core/src/generate-text/cohere-chatbot.ts b/examples/ai-core/src/generate-text/cohere-chatbot.ts index 667aa6f00d40..1d99b7c832b2 100644 --- a/examples/ai-core/src/generate-text/cohere-chatbot.ts +++ b/examples/ai-core/src/generate-text/cohere-chatbot.ts @@ -1,4 +1,4 @@ -import { CoreMessage, generateText } from 'ai'; +import { ModelMessage, generateText } from 'ai'; import 'dotenv/config'; import * as readline from 'node:readline/promises'; import { weatherTool } from '../tools/weather-tool'; @@ -9,7 +9,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { let toolResponseAvailable = false; diff --git a/examples/ai-core/src/generate-text/google-chatbot-image-output.ts b/examples/ai-core/src/generate-text/google-chatbot-image-output.ts index 1a4211672a64..d736d86471cd 100644 --- a/examples/ai-core/src/generate-text/google-chatbot-image-output.ts +++ b/examples/ai-core/src/generate-text/google-chatbot-image-output.ts @@ -1,5 +1,5 @@ import { google } from '@ai-sdk/google'; -import { CoreMessage, generateText } from 'ai'; +import { ModelMessage, generateText } from 'ai'; import 'dotenv/config'; import * as readline from 'node:readline/promises'; import { presentImages } from '../lib/present-image'; @@ -9,7 +9,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { while (true) { diff --git a/examples/ai-core/src/generate-text/google-vertex-anthropic-chatbot.ts b/examples/ai-core/src/generate-text/google-vertex-anthropic-chatbot.ts index c6aeaa8228b1..27422951bb68 100644 --- a/examples/ai-core/src/generate-text/google-vertex-anthropic-chatbot.ts +++ b/examples/ai-core/src/generate-text/google-vertex-anthropic-chatbot.ts @@ -1,6 +1,6 @@ import 'dotenv/config'; import { vertexAnthropic } from '@ai-sdk/google-vertex/anthropic'; -import { CoreMessage, generateText } from 'ai'; +import { ModelMessage, generateText } from 'ai'; import * as readline from 'node:readline/promises'; import { weatherTool } from '../tools/weather-tool'; @@ -9,7 +9,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { let toolResponseAvailable = false; diff --git a/examples/ai-core/src/generate-text/mistral-chatbot.ts b/examples/ai-core/src/generate-text/mistral-chatbot.ts index 73aa0614c9fa..64bd09c4b5af 100644 --- a/examples/ai-core/src/generate-text/mistral-chatbot.ts +++ b/examples/ai-core/src/generate-text/mistral-chatbot.ts @@ -1,5 +1,5 @@ import { mistral } from '@ai-sdk/mistral'; -import { CoreMessage, generateText } from 'ai'; +import { ModelMessage, generateText } from 'ai'; import 'dotenv/config'; import * as readline from 'node:readline/promises'; import { weatherTool } from '../tools/weather-tool'; @@ -9,7 +9,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { let toolResponseAvailable = false; diff --git a/examples/ai-core/src/generate-text/openai-responses-chatbot.ts b/examples/ai-core/src/generate-text/openai-responses-chatbot.ts index 0dec41d286fd..75189524b1ee 100644 --- a/examples/ai-core/src/generate-text/openai-responses-chatbot.ts +++ b/examples/ai-core/src/generate-text/openai-responses-chatbot.ts @@ -1,5 +1,5 @@ import { openai } from '@ai-sdk/openai'; -import { CoreMessage, generateText } from 'ai'; +import { ModelMessage, generateText } from 'ai'; import 'dotenv/config'; import * as readline from 'node:readline/promises'; import { weatherTool } from '../tools/weather-tool'; @@ -9,7 +9,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { let toolResponseAvailable = false; diff --git a/examples/ai-core/src/stream-text/amazon-bedrock-cache-point-tool-call.ts b/examples/ai-core/src/stream-text/amazon-bedrock-cache-point-tool-call.ts index 7adbdc1b3a4e..4449d051439c 100644 --- a/examples/ai-core/src/stream-text/amazon-bedrock-cache-point-tool-call.ts +++ b/examples/ai-core/src/stream-text/amazon-bedrock-cache-point-tool-call.ts @@ -2,14 +2,14 @@ import { bedrock } from '@ai-sdk/amazon-bedrock'; import { streamText, tool, - CoreMessage, + ModelMessage, ToolCallPart, ToolResultPart, } from 'ai'; import 'dotenv/config'; import { z } from 'zod'; -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; const weatherTool = tool({ description: 'Get the weather in a location', diff --git a/examples/ai-core/src/stream-text/amazon-bedrock-chatbot.ts b/examples/ai-core/src/stream-text/amazon-bedrock-chatbot.ts index c0c159ee4112..d215ff060a19 100644 --- a/examples/ai-core/src/stream-text/amazon-bedrock-chatbot.ts +++ b/examples/ai-core/src/stream-text/amazon-bedrock-chatbot.ts @@ -1,5 +1,5 @@ import { bedrock } from '@ai-sdk/amazon-bedrock'; -import { CoreMessage, streamText, tool } from 'ai'; +import { ModelMessage, streamText, tool } from 'ai'; import 'dotenv/config'; import * as readline from 'node:readline/promises'; import { z } from 'zod'; @@ -9,7 +9,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { while (true) { diff --git a/examples/ai-core/src/stream-text/amazon-bedrock-reasoning-chatbot.ts b/examples/ai-core/src/stream-text/amazon-bedrock-reasoning-chatbot.ts index 6d8ad213f028..8a658d43d3cf 100644 --- a/examples/ai-core/src/stream-text/amazon-bedrock-reasoning-chatbot.ts +++ b/examples/ai-core/src/stream-text/amazon-bedrock-reasoning-chatbot.ts @@ -1,5 +1,5 @@ import { createAmazonBedrock } from '@ai-sdk/amazon-bedrock'; -import { CoreMessage, streamText, tool } from 'ai'; +import { ModelMessage, streamText, tool } from 'ai'; import 'dotenv/config'; import * as readline from 'node:readline/promises'; import { z } from 'zod'; @@ -21,7 +21,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { while (true) { diff --git a/examples/ai-core/src/stream-text/amazon-bedrock-tool-call.ts b/examples/ai-core/src/stream-text/amazon-bedrock-tool-call.ts index c07daa405f09..bf47a0d81d09 100644 --- a/examples/ai-core/src/stream-text/amazon-bedrock-tool-call.ts +++ b/examples/ai-core/src/stream-text/amazon-bedrock-tool-call.ts @@ -1,9 +1,9 @@ import { bedrock } from '@ai-sdk/amazon-bedrock'; -import { streamText, CoreMessage, ToolCallPart, ToolResultPart } from 'ai'; +import { streamText, ModelMessage, ToolCallPart, ToolResultPart } from 'ai'; import 'dotenv/config'; import { weatherTool } from '../tools/weather-tool'; -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { let toolResponseAvailable = false; diff --git a/examples/ai-core/src/stream-text/anthropic-chatbot.ts b/examples/ai-core/src/stream-text/anthropic-chatbot.ts index ed77a432ec74..9839504fb4df 100644 --- a/examples/ai-core/src/stream-text/anthropic-chatbot.ts +++ b/examples/ai-core/src/stream-text/anthropic-chatbot.ts @@ -1,5 +1,5 @@ import { anthropic } from '@ai-sdk/anthropic'; -import { CoreMessage, streamText, tool } from 'ai'; +import { ModelMessage, streamText, tool } from 'ai'; import 'dotenv/config'; import * as readline from 'node:readline/promises'; import { z } from 'zod'; @@ -9,7 +9,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { while (true) { diff --git a/examples/ai-core/src/stream-text/anthropic-reasoning-chatbot.ts b/examples/ai-core/src/stream-text/anthropic-reasoning-chatbot.ts index db9ae678d597..116d017dd033 100644 --- a/examples/ai-core/src/stream-text/anthropic-reasoning-chatbot.ts +++ b/examples/ai-core/src/stream-text/anthropic-reasoning-chatbot.ts @@ -1,5 +1,5 @@ import { AnthropicProviderOptions, createAnthropic } from '@ai-sdk/anthropic'; -import { CoreMessage, streamText, tool } from 'ai'; +import { ModelMessage, streamText, tool } from 'ai'; import 'dotenv/config'; import * as readline from 'node:readline/promises'; import { z } from 'zod'; @@ -21,7 +21,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { while (true) { diff --git a/examples/ai-core/src/stream-text/cerebras-tool-call.ts b/examples/ai-core/src/stream-text/cerebras-tool-call.ts index 22eda757344b..a6f1784295fd 100644 --- a/examples/ai-core/src/stream-text/cerebras-tool-call.ts +++ b/examples/ai-core/src/stream-text/cerebras-tool-call.ts @@ -1,9 +1,9 @@ import { cerebras } from '@ai-sdk/cerebras'; -import { streamText, CoreMessage, ToolCallPart, ToolResultPart } from 'ai'; +import { streamText, ModelMessage, ToolCallPart, ToolResultPart } from 'ai'; import 'dotenv/config'; import { weatherTool } from '../tools/weather-tool'; -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { let toolResponseAvailable = false; diff --git a/examples/ai-core/src/stream-text/cohere-chatbot.ts b/examples/ai-core/src/stream-text/cohere-chatbot.ts index cbade4727670..277ade42daf7 100644 --- a/examples/ai-core/src/stream-text/cohere-chatbot.ts +++ b/examples/ai-core/src/stream-text/cohere-chatbot.ts @@ -1,5 +1,5 @@ import { cohere } from '@ai-sdk/cohere'; -import { CoreMessage, streamText, tool } from 'ai'; +import { ModelMessage, streamText, tool } from 'ai'; import 'dotenv/config'; import * as readline from 'node:readline/promises'; import { z } from 'zod'; @@ -9,7 +9,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { while (true) { diff --git a/examples/ai-core/src/stream-text/cohere-tool-call-empty-params.ts b/examples/ai-core/src/stream-text/cohere-tool-call-empty-params.ts index 73b28802d692..3e79375563f4 100644 --- a/examples/ai-core/src/stream-text/cohere-tool-call-empty-params.ts +++ b/examples/ai-core/src/stream-text/cohere-tool-call-empty-params.ts @@ -1,7 +1,7 @@ import { cohere } from '@ai-sdk/cohere'; import { streamText, - CoreMessage, + ModelMessage, ToolCallPart, ToolResultPart, tool, @@ -9,7 +9,7 @@ import { import 'dotenv/config'; import { z } from 'zod'; -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { let toolResponseAvailable = false; diff --git a/examples/ai-core/src/stream-text/cohere-tool-call.ts b/examples/ai-core/src/stream-text/cohere-tool-call.ts index e8f201d63110..88f251b1e3ef 100644 --- a/examples/ai-core/src/stream-text/cohere-tool-call.ts +++ b/examples/ai-core/src/stream-text/cohere-tool-call.ts @@ -1,9 +1,9 @@ import { cohere } from '@ai-sdk/cohere'; -import { streamText, CoreMessage, ToolCallPart, ToolResultPart } from 'ai'; +import { streamText, ModelMessage, ToolCallPart, ToolResultPart } from 'ai'; import 'dotenv/config'; import { weatherTool } from '../tools/weather-tool'; -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { let toolResponseAvailable = false; diff --git a/examples/ai-core/src/stream-text/deepseek-tool-call.ts b/examples/ai-core/src/stream-text/deepseek-tool-call.ts index 66de64dbcc3e..d61e026cd12f 100644 --- a/examples/ai-core/src/stream-text/deepseek-tool-call.ts +++ b/examples/ai-core/src/stream-text/deepseek-tool-call.ts @@ -1,9 +1,9 @@ import { deepseek } from '@ai-sdk/deepseek'; -import { streamText, CoreMessage, ToolCallPart, ToolResultPart } from 'ai'; +import { streamText, ModelMessage, ToolCallPart, ToolResultPart } from 'ai'; import 'dotenv/config'; import { weatherTool } from '../tools/weather-tool'; -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { let toolResponseAvailable = false; diff --git a/examples/ai-core/src/stream-text/google-chatbot-image-output.ts b/examples/ai-core/src/stream-text/google-chatbot-image-output.ts index 957d3f3284b4..7ad72ec7bc0f 100644 --- a/examples/ai-core/src/stream-text/google-chatbot-image-output.ts +++ b/examples/ai-core/src/stream-text/google-chatbot-image-output.ts @@ -1,5 +1,5 @@ import { google } from '@ai-sdk/google'; -import { CoreMessage, streamText } from 'ai'; +import { ModelMessage, streamText } from 'ai'; import 'dotenv/config'; import * as readline from 'node:readline/promises'; import { presentImages } from '../lib/present-image'; @@ -9,7 +9,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { while (true) { diff --git a/examples/ai-core/src/stream-text/google-chatbot.ts b/examples/ai-core/src/stream-text/google-chatbot.ts index 608bf1e1fe24..07b92497591c 100644 --- a/examples/ai-core/src/stream-text/google-chatbot.ts +++ b/examples/ai-core/src/stream-text/google-chatbot.ts @@ -1,5 +1,5 @@ import { google } from '@ai-sdk/google'; -import { CoreMessage, streamText, tool } from 'ai'; +import { ModelMessage, streamText, tool } from 'ai'; import 'dotenv/config'; import * as readline from 'node:readline/promises'; import { z } from 'zod'; @@ -9,7 +9,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { while (true) { diff --git a/examples/ai-core/src/stream-text/google-vertex-anthropic-chatbot.ts b/examples/ai-core/src/stream-text/google-vertex-anthropic-chatbot.ts index 94162734d758..97323891a7ec 100644 --- a/examples/ai-core/src/stream-text/google-vertex-anthropic-chatbot.ts +++ b/examples/ai-core/src/stream-text/google-vertex-anthropic-chatbot.ts @@ -1,6 +1,6 @@ import 'dotenv/config'; import { vertexAnthropic } from '@ai-sdk/google-vertex/anthropic'; -import { CoreMessage, streamText, tool } from 'ai'; +import { ModelMessage, streamText, tool } from 'ai'; import * as readline from 'node:readline/promises'; import { z } from 'zod'; @@ -9,7 +9,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { while (true) { diff --git a/examples/ai-core/src/stream-text/google-vertex-anthropic-tool-call.ts b/examples/ai-core/src/stream-text/google-vertex-anthropic-tool-call.ts index f80edc9fa7e6..dd72a9c8693a 100644 --- a/examples/ai-core/src/stream-text/google-vertex-anthropic-tool-call.ts +++ b/examples/ai-core/src/stream-text/google-vertex-anthropic-tool-call.ts @@ -1,9 +1,9 @@ import 'dotenv/config'; import { vertexAnthropic } from '@ai-sdk/google-vertex/anthropic'; -import { streamText, CoreMessage, ToolCallPart, ToolResultPart } from 'ai'; +import { streamText, ModelMessage, ToolCallPart, ToolResultPart } from 'ai'; import { weatherTool } from '../tools/weather-tool'; -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { let toolResponseAvailable = false; diff --git a/examples/ai-core/src/stream-text/mistral-chatbot.ts b/examples/ai-core/src/stream-text/mistral-chatbot.ts index 51a07d4757bd..58e32c99314e 100644 --- a/examples/ai-core/src/stream-text/mistral-chatbot.ts +++ b/examples/ai-core/src/stream-text/mistral-chatbot.ts @@ -1,5 +1,5 @@ import { mistral } from '@ai-sdk/mistral'; -import { CoreMessage, streamText, tool } from 'ai'; +import { ModelMessage, streamText, tool } from 'ai'; import 'dotenv/config'; import * as readline from 'node:readline/promises'; import { z } from 'zod'; @@ -9,7 +9,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { while (true) { diff --git a/examples/ai-core/src/stream-text/openai-chatbot.ts b/examples/ai-core/src/stream-text/openai-chatbot.ts index 08286c07951a..b71915df1b52 100644 --- a/examples/ai-core/src/stream-text/openai-chatbot.ts +++ b/examples/ai-core/src/stream-text/openai-chatbot.ts @@ -1,5 +1,5 @@ import { openai } from '@ai-sdk/openai'; -import { CoreMessage, streamText, tool } from 'ai'; +import { ModelMessage, streamText, tool } from 'ai'; import 'dotenv/config'; import * as readline from 'node:readline/promises'; import { z } from 'zod'; @@ -9,7 +9,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { while (true) { diff --git a/examples/ai-core/src/stream-text/openai-compatible-togetherai-tool-call.ts b/examples/ai-core/src/stream-text/openai-compatible-togetherai-tool-call.ts index d7250b83b6f1..aa354e22508a 100644 --- a/examples/ai-core/src/stream-text/openai-compatible-togetherai-tool-call.ts +++ b/examples/ai-core/src/stream-text/openai-compatible-togetherai-tool-call.ts @@ -1,9 +1,9 @@ import 'dotenv/config'; import { createOpenAICompatible } from '@ai-sdk/openai-compatible'; -import { streamText, CoreMessage, ToolCallPart, ToolResultPart } from 'ai'; +import { streamText, ModelMessage, ToolCallPart, ToolResultPart } from 'ai'; import { weatherTool } from '../tools/weather-tool'; -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { let toolResponseAvailable = false; diff --git a/examples/ai-core/src/stream-text/openai-responses-chatbot.ts b/examples/ai-core/src/stream-text/openai-responses-chatbot.ts index b9b22f17c6d8..09b677620fea 100644 --- a/examples/ai-core/src/stream-text/openai-responses-chatbot.ts +++ b/examples/ai-core/src/stream-text/openai-responses-chatbot.ts @@ -1,5 +1,5 @@ import { openai } from '@ai-sdk/openai'; -import { CoreMessage, streamText, tool } from 'ai'; +import { ModelMessage, streamText, tool } from 'ai'; import 'dotenv/config'; import * as readline from 'node:readline/promises'; import { z } from 'zod'; @@ -9,7 +9,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { while (true) { diff --git a/examples/ai-core/src/stream-text/togetherai-tool-call.ts b/examples/ai-core/src/stream-text/togetherai-tool-call.ts index 9db8f6f7342f..9edf0d250234 100644 --- a/examples/ai-core/src/stream-text/togetherai-tool-call.ts +++ b/examples/ai-core/src/stream-text/togetherai-tool-call.ts @@ -1,9 +1,9 @@ import { togetherai } from '@ai-sdk/togetherai'; -import { streamText, CoreMessage, ToolCallPart, ToolResultPart } from 'ai'; +import { streamText, ModelMessage, ToolCallPart, ToolResultPart } from 'ai'; import 'dotenv/config'; import { weatherTool } from '../tools/weather-tool'; -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { let toolResponseAvailable = false; diff --git a/examples/ai-core/src/stream-text/xai-chatbot.ts b/examples/ai-core/src/stream-text/xai-chatbot.ts index c642b56afca3..437d6aff0f33 100644 --- a/examples/ai-core/src/stream-text/xai-chatbot.ts +++ b/examples/ai-core/src/stream-text/xai-chatbot.ts @@ -1,5 +1,5 @@ import { xai } from '@ai-sdk/xai'; -import { CoreMessage, streamText, tool } from 'ai'; +import { ModelMessage, streamText, tool } from 'ai'; import 'dotenv/config'; import * as readline from 'node:readline/promises'; import { z } from 'zod'; @@ -9,7 +9,7 @@ const terminal = readline.createInterface({ output: process.stdout, }); -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { while (true) { diff --git a/examples/ai-core/src/stream-text/xai-tool-call.ts b/examples/ai-core/src/stream-text/xai-tool-call.ts index 609c7958f658..167288d6866a 100644 --- a/examples/ai-core/src/stream-text/xai-tool-call.ts +++ b/examples/ai-core/src/stream-text/xai-tool-call.ts @@ -1,9 +1,9 @@ import { xai } from '@ai-sdk/xai'; -import { streamText, CoreMessage, ToolCallPart, ToolResultPart } from 'ai'; +import { streamText, ModelMessage, ToolCallPart, ToolResultPart } from 'ai'; import 'dotenv/config'; import { weatherTool } from '../tools/weather-tool'; -const messages: CoreMessage[] = []; +const messages: ModelMessage[] = []; async function main() { let toolResponseAvailable = false; diff --git a/examples/next-openai-pages/app/api/generate-chat/route.ts b/examples/next-openai-pages/app/api/generate-chat/route.ts index 41f7bf46d066..0f3f4a0bc938 100644 --- a/examples/next-openai-pages/app/api/generate-chat/route.ts +++ b/examples/next-openai-pages/app/api/generate-chat/route.ts @@ -1,8 +1,8 @@ -import { CoreMessage, generateText } from 'ai'; +import { ModelMessage, generateText } from 'ai'; import { openai } from '@ai-sdk/openai'; export async function POST(req: Request) { - const { messages }: { messages: CoreMessage[] } = await req.json(); + const { messages }: { messages: ModelMessage[] } = await req.json(); const { response } = await generateText({ model: openai('gpt-4'), diff --git a/examples/next-openai-pages/app/api/stream-chat/route.ts b/examples/next-openai-pages/app/api/stream-chat/route.ts index 7e7061f1afb0..0f6a316d1e52 100644 --- a/examples/next-openai-pages/app/api/stream-chat/route.ts +++ b/examples/next-openai-pages/app/api/stream-chat/route.ts @@ -1,8 +1,8 @@ -import { CoreMessage, streamText } from 'ai'; +import { ModelMessage, streamText } from 'ai'; import { openai } from '@ai-sdk/openai'; export async function POST(req: Request) { - const { messages }: { messages: CoreMessage[] } = await req.json(); + const { messages }: { messages: ModelMessage[] } = await req.json(); const result = streamText({ model: openai('gpt-4'), diff --git a/examples/next-openai-pages/pages/chat/generate-chat/index.tsx b/examples/next-openai-pages/pages/chat/generate-chat/index.tsx index d6c414350558..9f3f68a71c1a 100644 --- a/examples/next-openai-pages/pages/chat/generate-chat/index.tsx +++ b/examples/next-openai-pages/pages/chat/generate-chat/index.tsx @@ -1,9 +1,9 @@ -import { CoreMessage } from 'ai'; +import { ModelMessage } from 'ai'; import { useState } from 'react'; export default function Page() { const [input, setInput] = useState(''); - const [messages, setMessages] = useState([]); + const [messages, setMessages] = useState([]); return (
diff --git a/examples/next-openai/app/api/use-chat-human-in-the-loop/utils.ts b/examples/next-openai/app/api/use-chat-human-in-the-loop/utils.ts index 11a91a3a0c18..e09650225afd 100644 --- a/examples/next-openai/app/api/use-chat-human-in-the-loop/utils.ts +++ b/examples/next-openai/app/api/use-chat-human-in-the-loop/utils.ts @@ -1,5 +1,5 @@ import { - convertToCoreMessages, + convertToModelMessages, DataStreamWriter, formatDataStreamPart, Tool, @@ -84,7 +84,7 @@ export async function processToolCalls< const toolInstance = executeFunctions[toolName]; if (toolInstance) { result = await toolInstance(toolInvocation.args, { - messages: convertToCoreMessages(messages), + messages: convertToModelMessages(messages), toolCallId: toolInvocation.toolCallId, }); } else { diff --git a/examples/next-openai/app/api/use-chat-streamdata-multistep/route.ts b/examples/next-openai/app/api/use-chat-streamdata-multistep/route.ts index 55b42c468b31..77f0aba03d07 100644 --- a/examples/next-openai/app/api/use-chat-streamdata-multistep/route.ts +++ b/examples/next-openai/app/api/use-chat-streamdata-multistep/route.ts @@ -1,6 +1,6 @@ import { openai } from '@ai-sdk/openai'; import { - convertToCoreMessages, + convertToModelMessages, createDataStreamResponse, streamText, tool, @@ -42,7 +42,7 @@ export async function POST(req: Request) { 'You are a helpful assistant with a different system prompt. Repeat the extract user goal in your answer.', // continue the workflow stream with the messages from the previous step: messages: [ - ...convertToCoreMessages(messages), + ...convertToModelMessages(messages), ...(await result1.response).messages, ], }); diff --git a/examples/next-openai/app/stream-ui/actions.tsx b/examples/next-openai/app/stream-ui/actions.tsx index bace01b6b522..296805291f9f 100644 --- a/examples/next-openai/app/stream-ui/actions.tsx +++ b/examples/next-openai/app/stream-ui/actions.tsx @@ -1,5 +1,5 @@ import { openai } from '@ai-sdk/openai'; -import { CoreMessage, generateId } from 'ai'; +import { ModelMessage, generateId } from 'ai'; import { createAI, createStreamableValue, @@ -41,7 +41,7 @@ export async function submitUserMessage(content: string) { system: 'You are a weather assistant.', messages: aiState .get() - .messages.map(({ role, content }) => ({ role, content }) as CoreMessage), + .messages.map(({ role, content }) => ({ role, content }) as ModelMessage), text: ({ content, done, delta }) => { if (!textStream) { @@ -98,7 +98,7 @@ export async function submitUserMessage(content: string) { }; } -export type ClientMessage = CoreMessage & { +export type ClientMessage = ModelMessage & { id: string; }; From 0876dfe81f3faca89db65417e9b0ae4a5f192e3a Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Fri, 2 May 2025 10:36:29 +0200 Subject: [PATCH 4/7] test --- .../convert-to-model-messages.test.ts.snap | 415 ++++++++++++++++++ 1 file changed, 415 insertions(+) create mode 100644 packages/ai/core/prompt/__snapshots__/convert-to-model-messages.test.ts.snap diff --git a/packages/ai/core/prompt/__snapshots__/convert-to-model-messages.test.ts.snap b/packages/ai/core/prompt/__snapshots__/convert-to-model-messages.test.ts.snap new file mode 100644 index 000000000000..dfc7ebe8f099 --- /dev/null +++ b/packages/ai/core/prompt/__snapshots__/convert-to-model-messages.test.ts.snap @@ -0,0 +1,415 @@ +// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html + +exports[`convertToModelMessages > assistant message > should handle assistant message with tool invocations 1`] = ` +[ + { + "content": [ + { + "text": "Let me calculate that for you.", + "type": "text", + }, + { + "args": { + "numbers": [ + 1, + 2, + ], + "operation": "add", + }, + "toolCallId": "call1", + "toolName": "calculator", + "type": "tool-call", + }, + ], + "role": "assistant", + }, + { + "content": [ + { + "result": "3", + "toolCallId": "call1", + "toolName": "calculator", + "type": "tool-result", + }, + ], + "role": "tool", + }, +] +`; + +exports[`convertToModelMessages > assistant message > should handle assistant message with tool invocations that have multi-part responses 1`] = ` +[ + { + "content": [ + { + "text": "Let me calculate that for you.", + "type": "text", + }, + { + "args": {}, + "toolCallId": "call1", + "toolName": "screenshot", + "type": "tool-call", + }, + ], + "role": "assistant", + }, + { + "content": [ + { + "experimental_content": [ + { + "data": "imgbase64", + "type": "image", + }, + ], + "result": [ + { + "data": "imgbase64", + "type": "image", + }, + ], + "toolCallId": "call1", + "toolName": "screenshot", + "type": "tool-result", + }, + ], + "role": "tool", + }, +] +`; + +exports[`convertToModelMessages > assistant message > should handle conversation with an assistant message that has empty tool invocations 1`] = ` +[ + { + "content": [ + { + "text": "text1", + "type": "text", + }, + ], + "role": "user", + }, + { + "content": [ + { + "text": "text2", + "type": "text", + }, + ], + "role": "assistant", + }, +] +`; + +exports[`convertToModelMessages > assistant message > should handle conversation with mix of tool invocations and text 1`] = ` +[ + { + "content": [ + { + "text": "i am gonna use tool1", + "type": "text", + }, + { + "args": { + "value": "value-1", + }, + "toolCallId": "call-1", + "toolName": "screenshot", + "type": "tool-call", + }, + ], + "role": "assistant", + }, + { + "content": [ + { + "result": "result-1", + "toolCallId": "call-1", + "toolName": "screenshot", + "type": "tool-result", + }, + ], + "role": "tool", + }, + { + "content": [ + { + "text": "i am gonna use tool2 and tool3", + "type": "text", + }, + { + "args": { + "value": "value-2", + }, + "toolCallId": "call-2", + "toolName": "screenshot", + "type": "tool-call", + }, + { + "args": { + "value": "value-3", + }, + "toolCallId": "call-3", + "toolName": "screenshot", + "type": "tool-call", + }, + ], + "role": "assistant", + }, + { + "content": [ + { + "result": "result-2", + "toolCallId": "call-2", + "toolName": "screenshot", + "type": "tool-result", + }, + { + "result": "result-3", + "toolCallId": "call-3", + "toolName": "screenshot", + "type": "tool-result", + }, + ], + "role": "tool", + }, + { + "content": [ + { + "args": { + "value": "value-4", + }, + "toolCallId": "call-4", + "toolName": "screenshot", + "type": "tool-call", + }, + ], + "role": "assistant", + }, + { + "content": [ + { + "result": "result-4", + "toolCallId": "call-4", + "toolName": "screenshot", + "type": "tool-result", + }, + ], + "role": "tool", + }, + { + "content": [ + { + "text": "final response", + "type": "text", + }, + ], + "role": "assistant", + }, +] +`; + +exports[`convertToModelMessages > assistant message > should handle conversation with multiple tool invocations that have step information 1`] = ` +[ + { + "content": [ + { + "text": "response", + "type": "text", + }, + { + "args": { + "value": "value-1", + }, + "toolCallId": "call-1", + "toolName": "screenshot", + "type": "tool-call", + }, + ], + "role": "assistant", + }, + { + "content": [ + { + "result": "result-1", + "toolCallId": "call-1", + "toolName": "screenshot", + "type": "tool-result", + }, + ], + "role": "tool", + }, + { + "content": [ + { + "args": { + "value": "value-2", + }, + "toolCallId": "call-2", + "toolName": "screenshot", + "type": "tool-call", + }, + { + "args": { + "value": "value-3", + }, + "toolCallId": "call-3", + "toolName": "screenshot", + "type": "tool-call", + }, + ], + "role": "assistant", + }, + { + "content": [ + { + "result": "result-2", + "toolCallId": "call-2", + "toolName": "screenshot", + "type": "tool-result", + }, + { + "result": "result-3", + "toolCallId": "call-3", + "toolName": "screenshot", + "type": "tool-result", + }, + ], + "role": "tool", + }, + { + "content": [ + { + "args": { + "value": "value-4", + }, + "toolCallId": "call-4", + "toolName": "screenshot", + "type": "tool-call", + }, + ], + "role": "assistant", + }, + { + "content": [ + { + "result": "result-4", + "toolCallId": "call-4", + "toolName": "screenshot", + "type": "tool-result", + }, + ], + "role": "tool", + }, +] +`; + +exports[`convertToModelMessages > multiple messages > should handle conversation with multiple tool invocations and user message at the end 1`] = ` +[ + { + "content": [ + { + "args": { + "value": "value-1", + }, + "toolCallId": "call-1", + "toolName": "screenshot", + "type": "tool-call", + }, + ], + "role": "assistant", + }, + { + "content": [ + { + "result": "result-1", + "toolCallId": "call-1", + "toolName": "screenshot", + "type": "tool-result", + }, + ], + "role": "tool", + }, + { + "content": [ + { + "args": { + "value": "value-2", + }, + "toolCallId": "call-2", + "toolName": "screenshot", + "type": "tool-call", + }, + { + "args": { + "value": "value-3", + }, + "toolCallId": "call-3", + "toolName": "screenshot", + "type": "tool-call", + }, + ], + "role": "assistant", + }, + { + "content": [ + { + "result": "result-2", + "toolCallId": "call-2", + "toolName": "screenshot", + "type": "tool-result", + }, + { + "result": "result-3", + "toolCallId": "call-3", + "toolName": "screenshot", + "type": "tool-result", + }, + ], + "role": "tool", + }, + { + "content": [ + { + "args": { + "value": "value-4", + }, + "toolCallId": "call-4", + "toolName": "screenshot", + "type": "tool-call", + }, + ], + "role": "assistant", + }, + { + "content": [ + { + "result": "result-4", + "toolCallId": "call-4", + "toolName": "screenshot", + "type": "tool-result", + }, + ], + "role": "tool", + }, + { + "content": [ + { + "text": "response", + "type": "text", + }, + ], + "role": "assistant", + }, + { + "content": [ + { + "text": "Thanks!", + "type": "text", + }, + ], + "role": "user", + }, +] +`; From 5aafdb6cea3489180322275c42b8b726d4593706 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Fri, 2 May 2025 10:36:42 +0200 Subject: [PATCH 5/7] snap --- .../convert-to-core-messages.test.ts.snap | 415 ------------------ 1 file changed, 415 deletions(-) delete mode 100644 packages/ai/core/prompt/__snapshots__/convert-to-core-messages.test.ts.snap diff --git a/packages/ai/core/prompt/__snapshots__/convert-to-core-messages.test.ts.snap b/packages/ai/core/prompt/__snapshots__/convert-to-core-messages.test.ts.snap deleted file mode 100644 index b6f8bee9f4ff..000000000000 --- a/packages/ai/core/prompt/__snapshots__/convert-to-core-messages.test.ts.snap +++ /dev/null @@ -1,415 +0,0 @@ -// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html - -exports[`convertToCoreMessages > assistant message > should handle assistant message with tool invocations 1`] = ` -[ - { - "content": [ - { - "text": "Let me calculate that for you.", - "type": "text", - }, - { - "args": { - "numbers": [ - 1, - 2, - ], - "operation": "add", - }, - "toolCallId": "call1", - "toolName": "calculator", - "type": "tool-call", - }, - ], - "role": "assistant", - }, - { - "content": [ - { - "result": "3", - "toolCallId": "call1", - "toolName": "calculator", - "type": "tool-result", - }, - ], - "role": "tool", - }, -] -`; - -exports[`convertToCoreMessages > assistant message > should handle assistant message with tool invocations that have multi-part responses 1`] = ` -[ - { - "content": [ - { - "text": "Let me calculate that for you.", - "type": "text", - }, - { - "args": {}, - "toolCallId": "call1", - "toolName": "screenshot", - "type": "tool-call", - }, - ], - "role": "assistant", - }, - { - "content": [ - { - "experimental_content": [ - { - "data": "imgbase64", - "type": "image", - }, - ], - "result": [ - { - "data": "imgbase64", - "type": "image", - }, - ], - "toolCallId": "call1", - "toolName": "screenshot", - "type": "tool-result", - }, - ], - "role": "tool", - }, -] -`; - -exports[`convertToCoreMessages > assistant message > should handle conversation with an assistant message that has empty tool invocations 1`] = ` -[ - { - "content": [ - { - "text": "text1", - "type": "text", - }, - ], - "role": "user", - }, - { - "content": [ - { - "text": "text2", - "type": "text", - }, - ], - "role": "assistant", - }, -] -`; - -exports[`convertToCoreMessages > assistant message > should handle conversation with mix of tool invocations and text 1`] = ` -[ - { - "content": [ - { - "text": "i am gonna use tool1", - "type": "text", - }, - { - "args": { - "value": "value-1", - }, - "toolCallId": "call-1", - "toolName": "screenshot", - "type": "tool-call", - }, - ], - "role": "assistant", - }, - { - "content": [ - { - "result": "result-1", - "toolCallId": "call-1", - "toolName": "screenshot", - "type": "tool-result", - }, - ], - "role": "tool", - }, - { - "content": [ - { - "text": "i am gonna use tool2 and tool3", - "type": "text", - }, - { - "args": { - "value": "value-2", - }, - "toolCallId": "call-2", - "toolName": "screenshot", - "type": "tool-call", - }, - { - "args": { - "value": "value-3", - }, - "toolCallId": "call-3", - "toolName": "screenshot", - "type": "tool-call", - }, - ], - "role": "assistant", - }, - { - "content": [ - { - "result": "result-2", - "toolCallId": "call-2", - "toolName": "screenshot", - "type": "tool-result", - }, - { - "result": "result-3", - "toolCallId": "call-3", - "toolName": "screenshot", - "type": "tool-result", - }, - ], - "role": "tool", - }, - { - "content": [ - { - "args": { - "value": "value-4", - }, - "toolCallId": "call-4", - "toolName": "screenshot", - "type": "tool-call", - }, - ], - "role": "assistant", - }, - { - "content": [ - { - "result": "result-4", - "toolCallId": "call-4", - "toolName": "screenshot", - "type": "tool-result", - }, - ], - "role": "tool", - }, - { - "content": [ - { - "text": "final response", - "type": "text", - }, - ], - "role": "assistant", - }, -] -`; - -exports[`convertToCoreMessages > assistant message > should handle conversation with multiple tool invocations that have step information 1`] = ` -[ - { - "content": [ - { - "text": "response", - "type": "text", - }, - { - "args": { - "value": "value-1", - }, - "toolCallId": "call-1", - "toolName": "screenshot", - "type": "tool-call", - }, - ], - "role": "assistant", - }, - { - "content": [ - { - "result": "result-1", - "toolCallId": "call-1", - "toolName": "screenshot", - "type": "tool-result", - }, - ], - "role": "tool", - }, - { - "content": [ - { - "args": { - "value": "value-2", - }, - "toolCallId": "call-2", - "toolName": "screenshot", - "type": "tool-call", - }, - { - "args": { - "value": "value-3", - }, - "toolCallId": "call-3", - "toolName": "screenshot", - "type": "tool-call", - }, - ], - "role": "assistant", - }, - { - "content": [ - { - "result": "result-2", - "toolCallId": "call-2", - "toolName": "screenshot", - "type": "tool-result", - }, - { - "result": "result-3", - "toolCallId": "call-3", - "toolName": "screenshot", - "type": "tool-result", - }, - ], - "role": "tool", - }, - { - "content": [ - { - "args": { - "value": "value-4", - }, - "toolCallId": "call-4", - "toolName": "screenshot", - "type": "tool-call", - }, - ], - "role": "assistant", - }, - { - "content": [ - { - "result": "result-4", - "toolCallId": "call-4", - "toolName": "screenshot", - "type": "tool-result", - }, - ], - "role": "tool", - }, -] -`; - -exports[`convertToCoreMessages > multiple messages > should handle conversation with multiple tool invocations and user message at the end 1`] = ` -[ - { - "content": [ - { - "args": { - "value": "value-1", - }, - "toolCallId": "call-1", - "toolName": "screenshot", - "type": "tool-call", - }, - ], - "role": "assistant", - }, - { - "content": [ - { - "result": "result-1", - "toolCallId": "call-1", - "toolName": "screenshot", - "type": "tool-result", - }, - ], - "role": "tool", - }, - { - "content": [ - { - "args": { - "value": "value-2", - }, - "toolCallId": "call-2", - "toolName": "screenshot", - "type": "tool-call", - }, - { - "args": { - "value": "value-3", - }, - "toolCallId": "call-3", - "toolName": "screenshot", - "type": "tool-call", - }, - ], - "role": "assistant", - }, - { - "content": [ - { - "result": "result-2", - "toolCallId": "call-2", - "toolName": "screenshot", - "type": "tool-result", - }, - { - "result": "result-3", - "toolCallId": "call-3", - "toolName": "screenshot", - "type": "tool-result", - }, - ], - "role": "tool", - }, - { - "content": [ - { - "args": { - "value": "value-4", - }, - "toolCallId": "call-4", - "toolName": "screenshot", - "type": "tool-call", - }, - ], - "role": "assistant", - }, - { - "content": [ - { - "result": "result-4", - "toolCallId": "call-4", - "toolName": "screenshot", - "type": "tool-result", - }, - ], - "role": "tool", - }, - { - "content": [ - { - "text": "response", - "type": "text", - }, - ], - "role": "assistant", - }, - { - "content": [ - { - "text": "Thanks!", - "type": "text", - }, - ], - "role": "user", - }, -] -`; From f6fd7dbea447c707ed511638ae562241a63506f9 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Fri, 2 May 2025 10:37:45 +0200 Subject: [PATCH 6/7] docs --- ...-to-core-messages.mdx => 31-convert-to-model-messages.mdx} | 2 +- content/docs/07-reference/02-ai-sdk-ui/index.mdx | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) rename content/docs/07-reference/02-ai-sdk-ui/{31-convert-to-core-messages.mdx => 31-convert-to-model-messages.mdx} (96%) diff --git a/content/docs/07-reference/02-ai-sdk-ui/31-convert-to-core-messages.mdx b/content/docs/07-reference/02-ai-sdk-ui/31-convert-to-model-messages.mdx similarity index 96% rename from content/docs/07-reference/02-ai-sdk-ui/31-convert-to-core-messages.mdx rename to content/docs/07-reference/02-ai-sdk-ui/31-convert-to-model-messages.mdx index 7306a80158fd..a52e212a0711 100644 --- a/content/docs/07-reference/02-ai-sdk-ui/31-convert-to-core-messages.mdx +++ b/content/docs/07-reference/02-ai-sdk-ui/31-convert-to-model-messages.mdx @@ -1,6 +1,6 @@ --- title: convertToModelMessages -description: Convert useChat messages to ModelMessages for AI core functions (API Reference) +description: Convert useChat messages to ModelMessages for AI functions (API Reference) --- # `convertToModelMessages()` diff --git a/content/docs/07-reference/02-ai-sdk-ui/index.mdx b/content/docs/07-reference/02-ai-sdk-ui/index.mdx index 05188114b683..f974ae41fe84 100644 --- a/content/docs/07-reference/02-ai-sdk-ui/index.mdx +++ b/content/docs/07-reference/02-ai-sdk-ui/index.mdx @@ -33,8 +33,8 @@ AI SDK UI contains the following hooks: { title: 'convertToModelMessages', description: - 'Convert useChat messages to ModelMessages for AI core functions.', - href: '/docs/reference/ai-sdk-ui/convert-to-core-messages', + 'Convert useChat messages to ModelMessages for AI functions.', + href: '/docs/reference/ai-sdk-ui/convert-to-model-messages', }, { title: 'appendResponseMessages', From 1aaefcd2f4f6b3bbe82bcec6d6ea697badc1b401 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Fri, 2 May 2025 10:51:02 +0200 Subject: [PATCH 7/7] cs --- .changeset/young-dingos-march.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changeset/young-dingos-march.md diff --git a/.changeset/young-dingos-march.md b/.changeset/young-dingos-march.md new file mode 100644 index 000000000000..342d097a0f86 --- /dev/null +++ b/.changeset/young-dingos-march.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): rename CoreMessage to ModelMessage