diff --git a/.changeset/afraid-moles-cross.md b/.changeset/afraid-moles-cross.md new file mode 100644 index 000000000000..1fd84e7e2e93 --- /dev/null +++ b/.changeset/afraid-moles-cross.md @@ -0,0 +1,6 @@ +--- +'@ai-sdk/rsc': major +'ai': major +--- + +chore(@ai-sdk/rsc): extract to separate package diff --git a/.changeset/angry-crabs-develop.md b/.changeset/angry-crabs-develop.md new file mode 100644 index 000000000000..e5d5a59ef1eb --- /dev/null +++ b/.changeset/angry-crabs-develop.md @@ -0,0 +1,6 @@ +--- +'@ai-sdk/provider-utils': patch +'ai': patch +--- + +chore(provider-utils): move ToolResultContent to provider-utils diff --git a/.changeset/angry-kings-dance.md b/.changeset/angry-kings-dance.md new file mode 100644 index 000000000000..93fe75f93f22 --- /dev/null +++ b/.changeset/angry-kings-dance.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/mistral': patch +--- + +chore(providers/mistral): convert to providerOptions diff --git a/.changeset/angry-plants-sin.md b/.changeset/angry-plants-sin.md new file mode 100644 index 000000000000..bb11e985e49e --- /dev/null +++ b/.changeset/angry-plants-sin.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/amazon-bedrock': patch +--- + +chore(providers/bedrock): convert to providerOptions diff --git a/.changeset/angry-poems-learn.md b/.changeset/angry-poems-learn.md new file mode 100644 index 000000000000..22456f013a8d --- /dev/null +++ b/.changeset/angry-poems-learn.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/azure': patch +--- + +feat (provider/azure): add OpenAI responses API support diff --git a/.changeset/angry-timers-drive.md b/.changeset/angry-timers-drive.md new file mode 100644 index 000000000000..35b3b941af7f --- /dev/null +++ b/.changeset/angry-timers-drive.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/google-vertex': patch +--- + +feat (provider/google-vertex): add imagen-3.0-generate-002 diff --git a/.changeset/beige-ligers-kneel.md b/.changeset/beige-ligers-kneel.md new file mode 100644 index 000000000000..fe3a654a5dc5 --- /dev/null +++ b/.changeset/beige-ligers-kneel.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +feat(smooth-stream): chunking callbacks diff --git a/.changeset/beige-penguins-greet.md b/.changeset/beige-penguins-greet.md new file mode 100644 index 000000000000..22e6868c97d1 --- /dev/null +++ b/.changeset/beige-penguins-greet.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/deepinfra': patch +--- + +feat (providers/deepinfra): add llama 4 models diff --git a/.changeset/beige-socks-stare.md b/.changeset/beige-socks-stare.md new file mode 100644 index 000000000000..19aacc127011 --- /dev/null +++ b/.changeset/beige-socks-stare.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore: rename reasoning to reasoningText etc diff --git a/.changeset/big-impalas-grin.md b/.changeset/big-impalas-grin.md new file mode 100644 index 000000000000..8a46de6d2914 --- /dev/null +++ b/.changeset/big-impalas-grin.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): remove automatic conversion of UI messages to model messages diff --git a/.changeset/big-panthers-judge.md b/.changeset/big-panthers-judge.md new file mode 100644 index 000000000000..f4adee6276df --- /dev/null +++ b/.changeset/big-panthers-judge.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +chore(providers/openai): convert to providerOptions diff --git a/.changeset/brave-numbers-drive.md b/.changeset/brave-numbers-drive.md new file mode 100644 index 000000000000..73092e8df7c4 --- /dev/null +++ b/.changeset/brave-numbers-drive.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +feat (provider/openai): o4 updates for responses api diff --git a/.changeset/brown-eagles-tickle.md b/.changeset/brown-eagles-tickle.md new file mode 100644 index 000000000000..fab85dcc478b --- /dev/null +++ b/.changeset/brown-eagles-tickle.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +feat (ui): add generics to ui message stream parts diff --git a/.changeset/brown-poems-boil.md b/.changeset/brown-poems-boil.md new file mode 100644 index 000000000000..a410ee5b8a78 --- /dev/null +++ b/.changeset/brown-poems-boil.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +chore (ai): remove provider re-exports diff --git a/.changeset/calm-boats-complain.md b/.changeset/calm-boats-complain.md new file mode 100644 index 000000000000..c5ff66eeaaef --- /dev/null +++ b/.changeset/calm-boats-complain.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +chore: restructure language model supported urls diff --git a/.changeset/calm-dragons-drive.md b/.changeset/calm-dragons-drive.md new file mode 100644 index 000000000000..303c19276d7e --- /dev/null +++ b/.changeset/calm-dragons-drive.md @@ -0,0 +1,7 @@ +--- +'ai': major +--- + +- remove setting temperature to `0` by default +- remove `null` option from `DefaultSettingsMiddleware` +- remove setting defaults for `temperature` and `stopSequences` in `ai` to enable middleware changes diff --git a/.changeset/chatty-ladybugs-nail.md b/.changeset/chatty-ladybugs-nail.md new file mode 100644 index 000000000000..d8a65bccb168 --- /dev/null +++ b/.changeset/chatty-ladybugs-nail.md @@ -0,0 +1,6 @@ +--- +'@ai-sdk/rsc': patch +'ai': patch +--- + +chore (rsc): move HANGING_STREAM_WARNING_TIME constant into @ai-sdk/rsc package diff --git a/.changeset/chatty-steaks-search.md b/.changeset/chatty-steaks-search.md new file mode 100644 index 000000000000..220da73ccbe7 --- /dev/null +++ b/.changeset/chatty-steaks-search.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +feat (ai): add array support to stopWhen diff --git a/.changeset/chilled-clocks-brush.md b/.changeset/chilled-clocks-brush.md new file mode 100644 index 000000000000..3caf015e704e --- /dev/null +++ b/.changeset/chilled-clocks-brush.md @@ -0,0 +1,7 @@ +--- +'@ai-sdk/llamaindex': patch +'@ai-sdk/langchain': patch +'ai': patch +--- + +chore (ai): push stream-callbacks into langchain/llamaindex adapters diff --git a/.changeset/chilled-queens-remember.md b/.changeset/chilled-queens-remember.md new file mode 100644 index 000000000000..9cbbf64e7935 --- /dev/null +++ b/.changeset/chilled-queens-remember.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +chore (provider): cleanup request and rawRequest (language model v2) diff --git a/.changeset/chilly-chairs-press.md b/.changeset/chilly-chairs-press.md new file mode 100644 index 000000000000..772e7c35674d --- /dev/null +++ b/.changeset/chilly-chairs-press.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/gateway': patch +--- + +feat (providers/gateway): include deployment and request id diff --git a/.changeset/chilly-teachers-brush.md b/.changeset/chilly-teachers-brush.md new file mode 100644 index 000000000000..c6aef1dd0c1a --- /dev/null +++ b/.changeset/chilly-teachers-brush.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/anthropic': patch +--- + +feat (provider/anthropic): json response schema support via tool calls diff --git a/.changeset/chilly-tips-know.md b/.changeset/chilly-tips-know.md new file mode 100644 index 000000000000..fbc06b484670 --- /dev/null +++ b/.changeset/chilly-tips-know.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/google': patch +--- + +feat (provider/google): add new gemini models diff --git a/.changeset/clean-ants-brake.md b/.changeset/clean-ants-brake.md new file mode 100644 index 000000000000..2301c2ffea1b --- /dev/null +++ b/.changeset/clean-ants-brake.md @@ -0,0 +1,8 @@ +--- +'@ai-sdk/svelte': major +'@ai-sdk/react': major +'@ai-sdk/vue': major +'ai': major +--- + +feat (ui): typed tool parts in ui messages diff --git a/.changeset/clean-numbers-cover.md b/.changeset/clean-numbers-cover.md new file mode 100644 index 000000000000..590b5df0eec2 --- /dev/null +++ b/.changeset/clean-numbers-cover.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider-utils': patch +--- + +feat(provider-utils): add TestServerCall#requestCredentials diff --git a/.changeset/clever-coats-invite.md b/.changeset/clever-coats-invite.md new file mode 100644 index 000000000000..ef5236e06c12 --- /dev/null +++ b/.changeset/clever-coats-invite.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': patch +--- + +release alpha.4 diff --git a/.changeset/clever-games-report.md b/.changeset/clever-games-report.md new file mode 100644 index 000000000000..6b199f2e24d5 --- /dev/null +++ b/.changeset/clever-games-report.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/gladia': patch +--- + +fix (provider/gladia): correct workspace dependencies diff --git a/.changeset/cold-bags-move.md b/.changeset/cold-bags-move.md new file mode 100644 index 000000000000..e895dd7909f5 --- /dev/null +++ b/.changeset/cold-bags-move.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': patch +--- + +update to LanguageModelV2ProviderDefinedClientTool to add server side tool later on diff --git a/.changeset/cool-buckets-shout.md b/.changeset/cool-buckets-shout.md new file mode 100644 index 000000000000..a56d34b8e0c0 --- /dev/null +++ b/.changeset/cool-buckets-shout.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +chore (provider): merge rawRequest into request (language model v2) diff --git a/.changeset/cool-bulldogs-fix.md b/.changeset/cool-bulldogs-fix.md new file mode 100644 index 000000000000..151eab609fc1 --- /dev/null +++ b/.changeset/cool-bulldogs-fix.md @@ -0,0 +1,6 @@ +--- +'@ai-sdk/provider': major +'ai': major +--- + +chore (provider,ai): tools have input/output instead of args,result diff --git a/.changeset/cool-gifts-film.md b/.changeset/cool-gifts-film.md new file mode 100644 index 000000000000..68e599500ec2 --- /dev/null +++ b/.changeset/cool-gifts-film.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +chore(providers/openai): update embedding model to use providerOptions diff --git a/.changeset/cool-shrimps-kick.md b/.changeset/cool-shrimps-kick.md new file mode 100644 index 000000000000..86f310cf47f9 --- /dev/null +++ b/.changeset/cool-shrimps-kick.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider-utils': major +--- + +remove deprecated `CoreToolCall` and `CoreToolResult` types diff --git a/.changeset/cuddly-eels-perform.md b/.changeset/cuddly-eels-perform.md new file mode 100644 index 000000000000..b441fa12e67f --- /dev/null +++ b/.changeset/cuddly-eels-perform.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): change source ui message parts to source-url diff --git a/.changeset/cuddly-icons-kick.md b/.changeset/cuddly-icons-kick.md new file mode 100644 index 000000000000..6450dd0ce5ea --- /dev/null +++ b/.changeset/cuddly-icons-kick.md @@ -0,0 +1,32 @@ +--- +'@ai-sdk/provider-utils': major +'@ai-sdk/google-vertex': major +'@ai-sdk/anthropic': major +'@ai-sdk/react': major +'@ai-sdk/vue': major +'ai': major +'@ai-sdk/amazon-bedrock': major +'@ai-sdk/azure': major +'@ai-sdk/cerebras': major +'@ai-sdk/codemod': major +'@ai-sdk/cohere': major +'@ai-sdk/deepinfra': major +'@ai-sdk/deepseek': major +'@ai-sdk/fal': major +'@ai-sdk/fireworks': major +'@ai-sdk/google': major +'@ai-sdk/groq': major +'@ai-sdk/luma': major +'@ai-sdk/mistral': major +'@ai-sdk/openai': major +'@ai-sdk/openai-compatible': major +'@ai-sdk/perplexity': major +'@ai-sdk/provider': major +'@ai-sdk/replicate': major +'@ai-sdk/svelte': major +'@ai-sdk/togetherai': major +'@ai-sdk/valibot': major +'@ai-sdk/xai': major +--- + +AI SDK 5 diff --git a/.changeset/cuddly-kangaroos-double.md b/.changeset/cuddly-kangaroos-double.md new file mode 100644 index 000000000000..b965d923bfa4 --- /dev/null +++ b/.changeset/cuddly-kangaroos-double.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/amazon-bedrock': patch +--- + +chore(providers/bedrock): use camelCase for providerOptions diff --git a/.changeset/curly-peaches-clap.md b/.changeset/curly-peaches-clap.md new file mode 100644 index 000000000000..df761a3de49c --- /dev/null +++ b/.changeset/curly-peaches-clap.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/cohere': patch +--- + +fix (provider/cohere): tool calling diff --git a/.changeset/curvy-lobsters-share.md b/.changeset/curvy-lobsters-share.md new file mode 100644 index 000000000000..26cf23c436cf --- /dev/null +++ b/.changeset/curvy-lobsters-share.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): remove "data" UIMessage role diff --git a/.changeset/dirty-eggs-breathe.md b/.changeset/dirty-eggs-breathe.md new file mode 100644 index 000000000000..20db775b75c2 --- /dev/null +++ b/.changeset/dirty-eggs-breathe.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +refactoring (ai): restructure message metadata transfer diff --git a/.changeset/dull-candles-trade.md b/.changeset/dull-candles-trade.md new file mode 100644 index 000000000000..800c598d5d5a --- /dev/null +++ b/.changeset/dull-candles-trade.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai-compatible': patch +--- + +chore(openai-compatible): remove simulateStreaming diff --git a/.changeset/dull-points-mate.md b/.changeset/dull-points-mate.md new file mode 100644 index 000000000000..9db19ca02ef3 --- /dev/null +++ b/.changeset/dull-points-mate.md @@ -0,0 +1,6 @@ +--- +'@ai-sdk/fal': patch +'ai': patch +--- + +feat(providers/fal): add transcribe diff --git a/.changeset/eight-emus-push.md b/.changeset/eight-emus-push.md new file mode 100644 index 000000000000..9c88f0096937 --- /dev/null +++ b/.changeset/eight-emus-push.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +chore: move warnings into stream-start part (spec) diff --git a/.changeset/eight-months-sip.md b/.changeset/eight-months-sip.md new file mode 100644 index 000000000000..2e76d888d867 --- /dev/null +++ b/.changeset/eight-months-sip.md @@ -0,0 +1,14 @@ +--- +'@ai-sdk/provider-utils': patch +'ai': patch +--- + +feat: support for zod v4 for schema validation + +All these methods now accept both a zod v4 and zod v3 schemas for validation: + +- `generateObject()` +- `streamObject()` +- `generateText()` +- `experimental_useObject()` from `@ai-sdk/react` +- `streamUI()` from `@ai-sdk/rsc` diff --git a/.changeset/eighty-flowers-design.md b/.changeset/eighty-flowers-design.md new file mode 100644 index 000000000000..980e07a1e166 --- /dev/null +++ b/.changeset/eighty-flowers-design.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +feat (ai): expose ui message stream headers diff --git a/.changeset/eighty-pugs-sip.md b/.changeset/eighty-pugs-sip.md new file mode 100644 index 000000000000..ca8c1fe9d9b1 --- /dev/null +++ b/.changeset/eighty-pugs-sip.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': patch +--- + +release alpha.15 diff --git a/.changeset/eighty-seals-search.md b/.changeset/eighty-seals-search.md new file mode 100644 index 000000000000..40c6eee0472a --- /dev/null +++ b/.changeset/eighty-seals-search.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/xai': patch +--- + +fix(providers/xai): edit supported models for structured output diff --git a/.changeset/eleven-lobsters-rescue.md b/.changeset/eleven-lobsters-rescue.md new file mode 100644 index 000000000000..591a7d207840 --- /dev/null +++ b/.changeset/eleven-lobsters-rescue.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider-utils': patch +--- + +refactor (provider-utils): move `customAlphabet()` method from `nanoid` into codebase diff --git a/.changeset/eleven-pets-clean.md b/.changeset/eleven-pets-clean.md new file mode 100644 index 000000000000..79b870a1977a --- /dev/null +++ b/.changeset/eleven-pets-clean.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': patch +--- + +chore (provider): tweak provider definition diff --git a/.changeset/empty-fireants-learn.md b/.changeset/empty-fireants-learn.md new file mode 100644 index 000000000000..09e803176f89 --- /dev/null +++ b/.changeset/empty-fireants-learn.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): remove exports of internal ui functions diff --git a/.changeset/empty-flowers-sniff.md b/.changeset/empty-flowers-sniff.md new file mode 100644 index 000000000000..a4f0ce53e697 --- /dev/null +++ b/.changeset/empty-flowers-sniff.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/google': patch +--- + +update supportedUrls to only support native URL diff --git a/.changeset/empty-pets-jump.md b/.changeset/empty-pets-jump.md new file mode 100644 index 000000000000..8193bfdeae5d --- /dev/null +++ b/.changeset/empty-pets-jump.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +chore(providers/openai-transcription): switch to providerOptions diff --git a/.changeset/empty-walls-rest.md b/.changeset/empty-walls-rest.md new file mode 100644 index 000000000000..c412dceab73d --- /dev/null +++ b/.changeset/empty-walls-rest.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +Add reasoning-part-finish parts for reasoning models in the responses API diff --git a/.changeset/fair-bikes-hear.md b/.changeset/fair-bikes-hear.md new file mode 100644 index 000000000000..3e284eb59434 --- /dev/null +++ b/.changeset/fair-bikes-hear.md @@ -0,0 +1,7 @@ +--- +'@ai-sdk/svelte': major +'@ai-sdk/react': major +'ai': major +--- + +chore (ui): inline/remove ChatRequest type diff --git a/.changeset/fair-cobras-tan.md b/.changeset/fair-cobras-tan.md new file mode 100644 index 000000000000..72a9f7ccf495 --- /dev/null +++ b/.changeset/fair-cobras-tan.md @@ -0,0 +1,12 @@ +--- +'@ai-sdk/openai-compatible': patch +'@ai-sdk/amazon-bedrock': patch +'@ai-sdk/togetherai': patch +'@ai-sdk/deepinfra': patch +'@ai-sdk/fireworks': patch +'@ai-sdk/cerebras': patch +'@ai-sdk/deepseek': patch +'@ai-sdk/xai': patch +--- + +feat(providers/openai-compatible): convert to providerOptions diff --git a/.changeset/fair-cups-travel.md b/.changeset/fair-cups-travel.md new file mode 100644 index 000000000000..e39ab1ede755 --- /dev/null +++ b/.changeset/fair-cups-travel.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/revai': patch +--- + +feat(providers/revai): add transcribe diff --git a/.changeset/fair-swans-kneel.md b/.changeset/fair-swans-kneel.md new file mode 100644 index 000000000000..92c22f8dba62 --- /dev/null +++ b/.changeset/fair-swans-kneel.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/google-vertex': patch +--- + +chore(providers/google-vertex): update embedding model to use providerOptions diff --git a/.changeset/famous-eggs-camp.md b/.changeset/famous-eggs-camp.md new file mode 100644 index 000000000000..2447d32217af --- /dev/null +++ b/.changeset/famous-eggs-camp.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/perplexity': patch +--- + +feat (provider/perplexity): add sonar-deep-research model diff --git a/.changeset/famous-fans-provide.md b/.changeset/famous-fans-provide.md new file mode 100644 index 000000000000..6e724ec58ced --- /dev/null +++ b/.changeset/famous-fans-provide.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +chore(providers/llamaindex): extract to separate package diff --git a/.changeset/famous-peas-arrive.md b/.changeset/famous-peas-arrive.md new file mode 100644 index 000000000000..52382ddcddc0 --- /dev/null +++ b/.changeset/famous-peas-arrive.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): stable activeTools diff --git a/.changeset/famous-shrimps-fail.md b/.changeset/famous-shrimps-fail.md new file mode 100644 index 000000000000..691ce6603fc0 --- /dev/null +++ b/.changeset/famous-shrimps-fail.md @@ -0,0 +1,9 @@ +--- +'@ai-sdk/provider-utils': major +'@ai-sdk/svelte': major +'@ai-sdk/react': major +'@ai-sdk/vue': major +'ai': major +--- + +feat (ui): introduce ChatStore and ChatTransport diff --git a/.changeset/famous-ties-train.md b/.changeset/famous-ties-train.md new file mode 100644 index 000000000000..d80c1683a6ec --- /dev/null +++ b/.changeset/famous-ties-train.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +feat (providers/openai): add gpt-image-1 model id to image settings diff --git a/.changeset/fast-students-turn.md b/.changeset/fast-students-turn.md new file mode 100644 index 000000000000..1a26061afd09 --- /dev/null +++ b/.changeset/fast-students-turn.md @@ -0,0 +1,6 @@ +--- +'@ai-sdk/openai-compatible': patch +'@ai-sdk/xai': patch +--- + +fix(providers/xai): return actual usage when streaming instead of NaN diff --git a/.changeset/fast-toys-dream.md b/.changeset/fast-toys-dream.md new file mode 100644 index 000000000000..25b8401d1b98 --- /dev/null +++ b/.changeset/fast-toys-dream.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/google': patch +--- + +feat(providers/google): Add taskType support for Text Embedding Models diff --git a/.changeset/few-jobs-mate.md b/.changeset/few-jobs-mate.md new file mode 100644 index 000000000000..6aa40d92b5c2 --- /dev/null +++ b/.changeset/few-jobs-mate.md @@ -0,0 +1,8 @@ +--- +'@ai-sdk/svelte': major +'@ai-sdk/react': major +'@ai-sdk/vue': major +'ai': major +--- + +chore (ui): remove managed chat inputs diff --git a/.changeset/few-kangaroos-remember.md b/.changeset/few-kangaroos-remember.md new file mode 100644 index 000000000000..2b2678a7b27f --- /dev/null +++ b/.changeset/few-kangaroos-remember.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/deepgram': patch +--- + +feat(providers/deepgram): add transcribe diff --git a/.changeset/few-pianos-pay.md b/.changeset/few-pianos-pay.md new file mode 100644 index 000000000000..ff3618bd4929 --- /dev/null +++ b/.changeset/few-pianos-pay.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': patch +--- + +release alpha.8 diff --git a/.changeset/fifty-camels-visit.md b/.changeset/fifty-camels-visit.md new file mode 100644 index 000000000000..907ba6519b8b --- /dev/null +++ b/.changeset/fifty-camels-visit.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/azure': patch +--- + +feat(providers/azure): add transcribe diff --git a/.changeset/fifty-shrimps-kick.md b/.changeset/fifty-shrimps-kick.md new file mode 100644 index 000000000000..8d9e94d49dc4 --- /dev/null +++ b/.changeset/fifty-shrimps-kick.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/google-vertex': patch +--- + +feat (google-vertex): Set `.providerMetaData` for image model responses diff --git a/.changeset/five-ravens-hammer.md b/.changeset/five-ravens-hammer.md new file mode 100644 index 000000000000..f753de79b960 --- /dev/null +++ b/.changeset/five-ravens-hammer.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/google': patch +--- + +feat (providers/google): add thinking config to provider options diff --git a/.changeset/fix-env-mutation.md b/.changeset/fix-env-mutation.md new file mode 100644 index 000000000000..86cd47b5e2d3 --- /dev/null +++ b/.changeset/fix-env-mutation.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +fix (ai/mcp): prevent mutation of customEnv diff --git a/.changeset/flat-plums-bake.md b/.changeset/flat-plums-bake.md new file mode 100644 index 000000000000..d7092eca4757 --- /dev/null +++ b/.changeset/flat-plums-bake.md @@ -0,0 +1,5 @@ +--- +'ai': minor +--- + +feat (core): Add finishReason field to NoObjectGeneratedError diff --git a/.changeset/fluffy-pets-pump.md b/.changeset/fluffy-pets-pump.md new file mode 100644 index 000000000000..94bd962becb6 --- /dev/null +++ b/.changeset/fluffy-pets-pump.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +adding support for gpt-4o-search-preview and handling unsupported parameters diff --git a/.changeset/forty-kangaroos-pull.md b/.changeset/forty-kangaroos-pull.md new file mode 100644 index 000000000000..bbab7a97d3ce --- /dev/null +++ b/.changeset/forty-kangaroos-pull.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): remove StreamCallbacks.onCompletion diff --git a/.changeset/fresh-forks-punch.md b/.changeset/fresh-forks-punch.md new file mode 100644 index 000000000000..0837217f3291 --- /dev/null +++ b/.changeset/fresh-forks-punch.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai-compatible': patch +--- + +fix (provider/openai-compatible): change tool_call type schema to nullish diff --git a/.changeset/fresh-otters-chew.md b/.changeset/fresh-otters-chew.md new file mode 100644 index 000000000000..001dc398a008 --- /dev/null +++ b/.changeset/fresh-otters-chew.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +chore (ai): make ui stream parts value optional when it's not required diff --git a/.changeset/fresh-swans-march.md b/.changeset/fresh-swans-march.md new file mode 100644 index 000000000000..2603793c5928 --- /dev/null +++ b/.changeset/fresh-swans-march.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): remove getUIText helper diff --git a/.changeset/friendly-otters-sneeze.md b/.changeset/friendly-otters-sneeze.md new file mode 100644 index 000000000000..14851defb958 --- /dev/null +++ b/.changeset/friendly-otters-sneeze.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/anthropic': patch +--- + +feat(anthropic/citation): text support for citations diff --git a/.changeset/funny-cows-sin.md b/.changeset/funny-cows-sin.md new file mode 100644 index 000000000000..7c0764a1d1c5 --- /dev/null +++ b/.changeset/funny-cows-sin.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +feat(streamObject): add enum support diff --git a/.changeset/funny-mayflies-yawn.md b/.changeset/funny-mayflies-yawn.md new file mode 100644 index 000000000000..61e6b324d60c --- /dev/null +++ b/.changeset/funny-mayflies-yawn.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): remove useChat keepLastMessageOnError diff --git a/.changeset/fuzzy-comics-listen.md b/.changeset/fuzzy-comics-listen.md new file mode 100644 index 000000000000..e37d3446c458 --- /dev/null +++ b/.changeset/fuzzy-comics-listen.md @@ -0,0 +1,5 @@ +--- +'ai': minor +--- + +feat (ai): add content to generateText result diff --git a/.changeset/fuzzy-lies-explain.md b/.changeset/fuzzy-lies-explain.md new file mode 100644 index 000000000000..492a35fde72d --- /dev/null +++ b/.changeset/fuzzy-lies-explain.md @@ -0,0 +1,24 @@ +--- +'@ai-sdk/openai-compatible': patch +'@ai-sdk/amazon-bedrock': patch +'@ai-sdk/google-vertex': patch +'@ai-sdk/perplexity': patch +'@ai-sdk/togetherai': patch +'@ai-sdk/anthropic': patch +'@ai-sdk/deepinfra': patch +'@ai-sdk/fireworks': patch +'@ai-sdk/cerebras': patch +'@ai-sdk/deepseek': patch +'@ai-sdk/gateway': patch +'@ai-sdk/mistral': patch +'@ai-sdk/cohere': patch +'@ai-sdk/google': patch +'@ai-sdk/openai': patch +'@ai-sdk/vercel': patch +'@ai-sdk/azure': patch +'@ai-sdk/groq': patch +'@ai-sdk/xai': patch +'@ai-sdk/provider': patch +--- + +feat: add raw chunk support diff --git a/.changeset/fuzzy-shoes-act.md b/.changeset/fuzzy-shoes-act.md new file mode 100644 index 000000000000..8ff9a149ac02 --- /dev/null +++ b/.changeset/fuzzy-shoes-act.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +feat (providers/openai): add gpt-4.1 models diff --git a/.changeset/gentle-gorillas-mate.md b/.changeset/gentle-gorillas-mate.md new file mode 100644 index 000000000000..48d00079cc1f --- /dev/null +++ b/.changeset/gentle-gorillas-mate.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +chore (provider): refactor usage (language model v2) diff --git a/.changeset/gentle-mayflies-call.md b/.changeset/gentle-mayflies-call.md new file mode 100644 index 000000000000..7c2d5116e401 --- /dev/null +++ b/.changeset/gentle-mayflies-call.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +feat (ui): add onFinish to createUIMessageStream diff --git a/.changeset/gentle-toys-smile.md b/.changeset/gentle-toys-smile.md new file mode 100644 index 000000000000..0d1edccb0a12 --- /dev/null +++ b/.changeset/gentle-toys-smile.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +fix(utils/detect-mimetype): add support for detecting id3 tags diff --git a/.changeset/gold-planes-cheer.md b/.changeset/gold-planes-cheer.md new file mode 100644 index 000000000000..31b2689bde0d --- /dev/null +++ b/.changeset/gold-planes-cheer.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider-utils': patch +--- + +chore (provider-utils): use eventsource-parser library diff --git a/.changeset/good-students-sin.md b/.changeset/good-students-sin.md new file mode 100644 index 000000000000..fb20e5e1b2b9 --- /dev/null +++ b/.changeset/good-students-sin.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/anthropic': patch +--- + +feat(provider/anthropic): add PDF citation support with document sources for streamText diff --git a/.changeset/good-swans-heal.md b/.changeset/good-swans-heal.md new file mode 100644 index 000000000000..22d25378ac22 --- /dev/null +++ b/.changeset/good-swans-heal.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): improve consistency of generate text result, stream text result, and step result diff --git a/.changeset/great-hats-hide.md b/.changeset/great-hats-hide.md new file mode 100644 index 000000000000..1db301ee34c7 --- /dev/null +++ b/.changeset/great-hats-hide.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +fix (ai): respect content order in toResponseMessages diff --git a/.changeset/great-mangos-scream.md b/.changeset/great-mangos-scream.md new file mode 100644 index 000000000000..d031f567c811 --- /dev/null +++ b/.changeset/great-mangos-scream.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +chore (ai): move maxSteps into UseChatOptions diff --git a/.changeset/great-poets-attack.md b/.changeset/great-poets-attack.md new file mode 100644 index 000000000000..8d82ef74b449 --- /dev/null +++ b/.changeset/great-poets-attack.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +feat (providers/openai): add o3 and o4-mini models diff --git a/.changeset/green-deers-scream.md b/.changeset/green-deers-scream.md new file mode 100644 index 000000000000..debc42602568 --- /dev/null +++ b/.changeset/green-deers-scream.md @@ -0,0 +1,7 @@ +--- +'@ai-sdk/react': major +'@ai-sdk/vue': major +'ai': major +--- + +chore (ui): remove useAssistant hook (**breaking change**) diff --git a/.changeset/green-dogs-kick.md b/.changeset/green-dogs-kick.md new file mode 100644 index 000000000000..451da3e2ffcf --- /dev/null +++ b/.changeset/green-dogs-kick.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +feat (ai): introduce GLOBAL_DEFAULT_PROVIDER diff --git a/.changeset/green-grapes-rhyme.md b/.changeset/green-grapes-rhyme.md new file mode 100644 index 000000000000..366e01f24360 --- /dev/null +++ b/.changeset/green-grapes-rhyme.md @@ -0,0 +1,7 @@ +--- +'@ai-sdk/openai-compatible': patch +'@ai-sdk/openai': patch +'@ai-sdk/azure': patch +--- + +chore(providers/openai): update completion model to use providerOptions diff --git a/.changeset/green-pots-collect.md b/.changeset/green-pots-collect.md new file mode 100644 index 000000000000..0ca5667af5d7 --- /dev/null +++ b/.changeset/green-pots-collect.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': patch +--- + +chore (provider): change getSupportedUrls to supportedUrls (language model v2) diff --git a/.changeset/happy-ads-happen.md b/.changeset/happy-ads-happen.md new file mode 100644 index 000000000000..73746138471d --- /dev/null +++ b/.changeset/happy-ads-happen.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): send reasoning to the client by default diff --git a/.changeset/happy-countries-dream.md b/.changeset/happy-countries-dream.md new file mode 100644 index 000000000000..8e441522e63f --- /dev/null +++ b/.changeset/happy-countries-dream.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +fix (ui/react): update messages immediately with the submitted user message diff --git a/.changeset/happy-kangaroos-roll.md b/.changeset/happy-kangaroos-roll.md new file mode 100644 index 000000000000..8b0e54911d9c --- /dev/null +++ b/.changeset/happy-kangaroos-roll.md @@ -0,0 +1,8 @@ +--- +'@ai-sdk/provider-utils': patch +'@ai-sdk/provider': patch +'@ai-sdk/openai': patch +'ai': patch +--- + +feat: add transcription with experimental_transcribe diff --git a/.changeset/healthy-humans-burn.md b/.changeset/healthy-humans-burn.md new file mode 100644 index 000000000000..66b0e16546fc --- /dev/null +++ b/.changeset/healthy-humans-burn.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/gateway': patch +--- + +feat (providers/gateway): share common gateway error transform logic diff --git a/.changeset/heavy-ducks-join.md b/.changeset/heavy-ducks-join.md new file mode 100644 index 000000000000..90c2c0608a3f --- /dev/null +++ b/.changeset/heavy-ducks-join.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): remove onResponse callback diff --git a/.changeset/heavy-ligers-lay.md b/.changeset/heavy-ligers-lay.md new file mode 100644 index 000000000000..b87f18bc7826 --- /dev/null +++ b/.changeset/heavy-ligers-lay.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/vue': patch +--- + +fix (ai-sdk/vue): fix status reactivity diff --git a/.changeset/heavy-pens-destroy.md b/.changeset/heavy-pens-destroy.md new file mode 100644 index 000000000000..ff2bc6427cf1 --- /dev/null +++ b/.changeset/heavy-pens-destroy.md @@ -0,0 +1,16 @@ +--- +'@ai-sdk/openai-compatible': patch +'@ai-sdk/amazon-bedrock': patch +'@ai-sdk/google-vertex': patch +'@ai-sdk/togetherai': patch +'@ai-sdk/deepinfra': patch +'@ai-sdk/fireworks': patch +'@ai-sdk/provider': patch +'@ai-sdk/mistral': patch +'@ai-sdk/cohere': patch +'@ai-sdk/google': patch +'@ai-sdk/openai': patch +'ai': patch +--- + +chore(embedding-model): add v2 interface diff --git a/.changeset/hip-eagles-attend.md b/.changeset/hip-eagles-attend.md new file mode 100644 index 000000000000..78aff528dea9 --- /dev/null +++ b/.changeset/hip-eagles-attend.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): replace `Message` with `UIMessage` diff --git a/.changeset/hip-rocks-mix.md b/.changeset/hip-rocks-mix.md new file mode 100644 index 000000000000..6b1229425754 --- /dev/null +++ b/.changeset/hip-rocks-mix.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +feat (ai): support string model ids through gateway diff --git a/.changeset/hot-colts-hear.md b/.changeset/hot-colts-hear.md new file mode 100644 index 000000000000..aefec2c85b6d --- /dev/null +++ b/.changeset/hot-colts-hear.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +feat (ai): improve prompt validation error message diff --git a/.changeset/hot-singers-help.md b/.changeset/hot-singers-help.md new file mode 100644 index 000000000000..ad44f66ea4a1 --- /dev/null +++ b/.changeset/hot-singers-help.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): rename default provider global to AI_SDK_DEFAULT_PROVIDER diff --git a/.changeset/huge-cloths-burn.md b/.changeset/huge-cloths-burn.md new file mode 100644 index 000000000000..bdb13e564d46 --- /dev/null +++ b/.changeset/huge-cloths-burn.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/google': patch +--- + +fix (provider/google): allow "OFF" for Google HarmBlockThreshold diff --git a/.changeset/hungry-bears-glow.md b/.changeset/hungry-bears-glow.md new file mode 100644 index 000000000000..facb46f1d788 --- /dev/null +++ b/.changeset/hungry-bears-glow.md @@ -0,0 +1,11 @@ +--- +'@ai-sdk/openai-compatible': patch +'@ai-sdk/google-vertex': patch +'@ai-sdk/provider': patch +'@ai-sdk/mistral': patch +'@ai-sdk/cohere': patch +'@ai-sdk/google': patch +'@ai-sdk/openai': patch +--- + +feat(embedding-model-v2): add response body field diff --git a/.changeset/hungry-frogs-eat.md b/.changeset/hungry-frogs-eat.md new file mode 100644 index 000000000000..1c69101907e5 --- /dev/null +++ b/.changeset/hungry-frogs-eat.md @@ -0,0 +1,6 @@ +--- +'@ai-sdk/google-vertex': patch +'@ai-sdk/anthropic': patch +--- + +chore(providers/anthropic): switch to providerOptions diff --git a/.changeset/hungry-frogs-raise.md b/.changeset/hungry-frogs-raise.md new file mode 100644 index 000000000000..88f9fb1c9c83 --- /dev/null +++ b/.changeset/hungry-frogs-raise.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai-compatible': patch +--- + +feat(providers/xai): add reasoningEffort provider option diff --git a/.changeset/hungry-hotels-hunt.md b/.changeset/hungry-hotels-hunt.md new file mode 100644 index 000000000000..53175e9de7ef --- /dev/null +++ b/.changeset/hungry-hotels-hunt.md @@ -0,0 +1,7 @@ +--- +'@ai-sdk/amazon-bedrock': patch +'@ai-sdk/provider': patch +'ai': patch +--- + +feat(embed-many): respect supportsParallelCalls & concurrency diff --git a/.changeset/hungry-pets-hear.md b/.changeset/hungry-pets-hear.md new file mode 100644 index 000000000000..a971cdca9127 --- /dev/null +++ b/.changeset/hungry-pets-hear.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +chore: rename mimeType to mediaType diff --git a/.changeset/hungry-trains-compete.md b/.changeset/hungry-trains-compete.md new file mode 100644 index 000000000000..a37d0be31523 --- /dev/null +++ b/.changeset/hungry-trains-compete.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/react': patch +--- + +fix (react): integrate addToolResult into UseChatHelpers type without intersection diff --git a/.changeset/hungry-zebras-applaud.md b/.changeset/hungry-zebras-applaud.md new file mode 100644 index 000000000000..f86fc3d5e9fc --- /dev/null +++ b/.changeset/hungry-zebras-applaud.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +remove deprecated `experimental_wrapLanguageModel` diff --git a/.changeset/itchy-bats-breathe.md b/.changeset/itchy-bats-breathe.md new file mode 100644 index 000000000000..b6f4c7c370a6 --- /dev/null +++ b/.changeset/itchy-bats-breathe.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): remove content from ui messages diff --git a/.changeset/itchy-cars-relax.md b/.changeset/itchy-cars-relax.md new file mode 100644 index 000000000000..3920252c482b --- /dev/null +++ b/.changeset/itchy-cars-relax.md @@ -0,0 +1,6 @@ +--- +'@ai-sdk/provider': major +'ai': major +--- + +chore: refactor file towards source pattern (spec) diff --git a/.changeset/itchy-deers-jog.md b/.changeset/itchy-deers-jog.md new file mode 100644 index 000000000000..d438067231c3 --- /dev/null +++ b/.changeset/itchy-deers-jog.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +chore: remove logprobs diff --git a/.changeset/itchy-pumas-wave.md b/.changeset/itchy-pumas-wave.md new file mode 100644 index 000000000000..307071061ee7 --- /dev/null +++ b/.changeset/itchy-pumas-wave.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +feat(provider/openai): add serviceTier option for flex processing diff --git a/.changeset/khaki-bears-drop.md b/.changeset/khaki-bears-drop.md new file mode 100644 index 000000000000..2fece2f7f833 --- /dev/null +++ b/.changeset/khaki-bears-drop.md @@ -0,0 +1,8 @@ +--- +'@ai-sdk/provider': patch +'@ai-sdk/openai': patch +'@ai-sdk/azure': patch +'ai': patch +--- + +chore(providers/openai): enable structuredOutputs by default & switch to provider option diff --git a/.changeset/khaki-sheep-sparkle.md b/.changeset/khaki-sheep-sparkle.md new file mode 100644 index 000000000000..9ef44b00128c --- /dev/null +++ b/.changeset/khaki-sheep-sparkle.md @@ -0,0 +1,6 @@ +--- +'@ai-sdk/anthropic': patch +'@ai-sdk/provider': patch +--- + +feat(anthropic): add server-side web search support diff --git a/.changeset/khaki-tomatoes-think.md b/.changeset/khaki-tomatoes-think.md new file mode 100644 index 000000000000..e8c0e18d6e98 --- /dev/null +++ b/.changeset/khaki-tomatoes-think.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +fix (ai): do not send id with start unless specified diff --git a/.changeset/large-peas-eat.md b/.changeset/large-peas-eat.md new file mode 100644 index 000000000000..81075bfef3e1 --- /dev/null +++ b/.changeset/large-peas-eat.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): remove `data` and `allowEmptySubmit` from `ChatRequestOptions` diff --git a/.changeset/large-ties-own.md b/.changeset/large-ties-own.md new file mode 100644 index 000000000000..28988788a7e9 --- /dev/null +++ b/.changeset/large-ties-own.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +fix(react-native): support experimental_attachments without FileList global diff --git a/.changeset/late-brooms-suffer.md b/.changeset/late-brooms-suffer.md new file mode 100644 index 000000000000..1f6a948aa1f2 --- /dev/null +++ b/.changeset/late-brooms-suffer.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +chore(ai/generateObject): simplify function signature diff --git a/.changeset/late-foxes-battle.md b/.changeset/late-foxes-battle.md new file mode 100644 index 000000000000..21d56afcb38b --- /dev/null +++ b/.changeset/late-foxes-battle.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider-utils': patch +--- + +refactor (provider-utils): copy relevant code from `secure-json-parse` into codebase diff --git a/.changeset/lazy-ducks-cheat.md b/.changeset/lazy-ducks-cheat.md new file mode 100644 index 000000000000..563bddf4bb73 --- /dev/null +++ b/.changeset/lazy-ducks-cheat.md @@ -0,0 +1,11 @@ +--- +'@ai-sdk/openai-compatible': patch +'@ai-sdk/provider-utils': patch +'@ai-sdk/google-vertex': patch +'@ai-sdk/anthropic': patch +'@ai-sdk/google': patch +'@ai-sdk/openai': patch +'ai': patch +--- + +fix(packages): export node10 compatible types diff --git a/.changeset/lemon-actors-invite.md b/.changeset/lemon-actors-invite.md new file mode 100644 index 000000000000..4a726ea2a3e1 --- /dev/null +++ b/.changeset/lemon-actors-invite.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): replace useChat attachments with file ui parts diff --git a/.changeset/lemon-terms-hug.md b/.changeset/lemon-terms-hug.md new file mode 100644 index 000000000000..3585ea8b54d9 --- /dev/null +++ b/.changeset/lemon-terms-hug.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +fix(ai): remove jsondiffpatch dependency diff --git a/.changeset/lemon-yaks-move.md b/.changeset/lemon-yaks-move.md new file mode 100644 index 000000000000..4412d4b4ea00 --- /dev/null +++ b/.changeset/lemon-yaks-move.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/mistral': patch +--- + +feat(mistral): added magistral reasoning models diff --git a/.changeset/light-books-poke.md b/.changeset/light-books-poke.md new file mode 100644 index 000000000000..6ecff2f6ba52 --- /dev/null +++ b/.changeset/light-books-poke.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): flatten ui message stream parts diff --git a/.changeset/light-rules-film.md b/.changeset/light-rules-film.md new file mode 100644 index 000000000000..72c71c7b7e3f --- /dev/null +++ b/.changeset/light-rules-film.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +fix (ai/core): refactor `toResponseMessages` to filter out empty string/content diff --git a/.changeset/little-bobcats-jog.md b/.changeset/little-bobcats-jog.md new file mode 100644 index 000000000000..109a1797589d --- /dev/null +++ b/.changeset/little-bobcats-jog.md @@ -0,0 +1,5 @@ +--- +'ai': minor +--- + +feat (ai): add content to streamText result diff --git a/.changeset/little-carrots-speak.md b/.changeset/little-carrots-speak.md new file mode 100644 index 000000000000..0332b14e2064 --- /dev/null +++ b/.changeset/little-carrots-speak.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/vercel': patch +--- + +feat (providers/vercel): initial vercel provider diff --git a/.changeset/little-tips-occur.md b/.changeset/little-tips-occur.md new file mode 100644 index 000000000000..31fec00b6f34 --- /dev/null +++ b/.changeset/little-tips-occur.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): separate TextStreamChatTransport diff --git a/.changeset/little-zebras-suffer.md b/.changeset/little-zebras-suffer.md new file mode 100644 index 000000000000..2087758f0b68 --- /dev/null +++ b/.changeset/little-zebras-suffer.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +fix: propagate openai transcription fixes diff --git a/.changeset/lovely-garlics-promise.md b/.changeset/lovely-garlics-promise.md new file mode 100644 index 000000000000..c2cf7ac2c91c --- /dev/null +++ b/.changeset/lovely-garlics-promise.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/gateway': patch +--- + +feat (providers/gateway): initial gateway provider diff --git a/.changeset/many-beans-exercise.md b/.changeset/many-beans-exercise.md new file mode 100644 index 000000000000..6523a5b2f27b --- /dev/null +++ b/.changeset/many-beans-exercise.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +feat(embedding-model-v2/embedMany): add response body field diff --git a/.changeset/many-toes-glow.md b/.changeset/many-toes-glow.md new file mode 100644 index 000000000000..2f63db0cead3 --- /dev/null +++ b/.changeset/many-toes-glow.md @@ -0,0 +1,14 @@ +--- +'@ai-sdk/openai-compatible': patch +'@ai-sdk/amazon-bedrock': patch +'@ai-sdk/google-vertex': patch +'@ai-sdk/togetherai': patch +'@ai-sdk/deepinfra': patch +'@ai-sdk/fireworks': patch +'@ai-sdk/replicate': patch +'@ai-sdk/luma': patch +'@ai-sdk/fal': patch +'ai': patch +--- + +fix (image-model): `specificationVersion: v1` -> `v2` diff --git a/.changeset/mean-files-talk.md b/.changeset/mean-files-talk.md new file mode 100644 index 000000000000..3579b921f5aa --- /dev/null +++ b/.changeset/mean-files-talk.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +feat (ai): add data ui part schemas diff --git a/.changeset/mean-monkeys-sip.md b/.changeset/mean-monkeys-sip.md new file mode 100644 index 000000000000..6c78e545fe25 --- /dev/null +++ b/.changeset/mean-monkeys-sip.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +feat (providers/openai): add support for reasoning summaries diff --git a/.changeset/metal-insects-tease.md b/.changeset/metal-insects-tease.md new file mode 100644 index 000000000000..8311ca729e3c --- /dev/null +++ b/.changeset/metal-insects-tease.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +chore: refactor reasoning parts (spec) diff --git a/.changeset/modern-kings-smoke.md b/.changeset/modern-kings-smoke.md new file mode 100644 index 000000000000..2e64edd76ed3 --- /dev/null +++ b/.changeset/modern-kings-smoke.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/lmnt': patch +--- + +feat(providers/lmnt): add speech diff --git a/.changeset/moody-rings-remember.md b/.changeset/moody-rings-remember.md new file mode 100644 index 000000000000..c1ea4807ae7a --- /dev/null +++ b/.changeset/moody-rings-remember.md @@ -0,0 +1,18 @@ +--- +'@ai-sdk/openai-compatible': patch +'@ai-sdk/amazon-bedrock': patch +'@ai-sdk/google-vertex': patch +'@ai-sdk/togetherai': patch +'@ai-sdk/deepinfra': patch +'@ai-sdk/fireworks': patch +'@ai-sdk/replicate': patch +'@ai-sdk/provider': patch +'@ai-sdk/openai': patch +'@ai-sdk/azure': patch +'@ai-sdk/luma': patch +'@ai-sdk/fal': patch +'@ai-sdk/xai': patch +'ai': patch +--- + +refactor (image-model): rename `ImageModelV1` to `ImageModelV2` diff --git a/.changeset/moody-yaks-love.md b/.changeset/moody-yaks-love.md new file mode 100644 index 000000000000..13661d7d9fa4 --- /dev/null +++ b/.changeset/moody-yaks-love.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/google': patch +--- + +chore(providers/google): update embedding model to use providerOptions diff --git a/.changeset/nasty-lobsters-shave.md b/.changeset/nasty-lobsters-shave.md new file mode 100644 index 000000000000..1b7ba955bed7 --- /dev/null +++ b/.changeset/nasty-lobsters-shave.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): remove redundant `mimeType` property diff --git a/.changeset/nasty-spiders-sparkle.md b/.changeset/nasty-spiders-sparkle.md new file mode 100644 index 000000000000..7245e14bdfbf --- /dev/null +++ b/.changeset/nasty-spiders-sparkle.md @@ -0,0 +1,7 @@ +--- +'@ai-sdk/provider': patch +'@ai-sdk/openai': patch +'ai': patch +--- + +core (ai): change transcription model mimeType to mediaType diff --git a/.changeset/nasty-trains-beg.md b/.changeset/nasty-trains-beg.md new file mode 100644 index 000000000000..07cbbef43bcd --- /dev/null +++ b/.changeset/nasty-trains-beg.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): remove ui message toolInvocations property diff --git a/.changeset/neat-pillows-occur.md b/.changeset/neat-pillows-occur.md new file mode 100644 index 000000000000..9a2712ed8dfb --- /dev/null +++ b/.changeset/neat-pillows-occur.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': patch +--- + +chore (provider): extract shared provider options and metadata (spec) diff --git a/.changeset/nervous-maps-fix.md b/.changeset/nervous-maps-fix.md new file mode 100644 index 000000000000..ba88d7e078e0 --- /dev/null +++ b/.changeset/nervous-maps-fix.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider-utils': major +--- + +chore (provider-utils): rename TestServerCall.requestBody to requestBodyJson diff --git a/.changeset/new-pens-remain.md b/.changeset/new-pens-remain.md new file mode 100644 index 000000000000..71deb261d0cf --- /dev/null +++ b/.changeset/new-pens-remain.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +fix: avoid job executor deadlock when adding tool result diff --git a/.changeset/new-vans-obey.md b/.changeset/new-vans-obey.md new file mode 100644 index 000000000000..2c4aaddf9559 --- /dev/null +++ b/.changeset/new-vans-obey.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': patch +--- + +chore (provider): extract LanguageModelV2File diff --git a/.changeset/nice-tips-walk.md b/.changeset/nice-tips-walk.md new file mode 100644 index 000000000000..7a276da9a766 --- /dev/null +++ b/.changeset/nice-tips-walk.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +chore: restructure reasoning support diff --git a/.changeset/nine-jars-hammer.md b/.changeset/nine-jars-hammer.md new file mode 100644 index 000000000000..e578c3b13b6a --- /dev/null +++ b/.changeset/nine-jars-hammer.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): remove deprecated experimental_providerMetadata diff --git a/.changeset/nine-pillows-hug.md b/.changeset/nine-pillows-hug.md new file mode 100644 index 000000000000..bccb1f0a0e6f --- /dev/null +++ b/.changeset/nine-pillows-hug.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/react': patch +--- + +feat (ui/react): support resuming an ongoing stream diff --git a/.changeset/nine-rivers-compete.md b/.changeset/nine-rivers-compete.md new file mode 100644 index 000000000000..b516d01ff6f1 --- /dev/null +++ b/.changeset/nine-rivers-compete.md @@ -0,0 +1,6 @@ +--- +'@ai-sdk/google-vertex': patch +'@ai-sdk/google': patch +--- + +chore(providers/google): switch to providerOptions diff --git a/.changeset/ninety-cameras-wonder.md b/.changeset/ninety-cameras-wonder.md new file mode 100644 index 000000000000..c7c31940af71 --- /dev/null +++ b/.changeset/ninety-cameras-wonder.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ui): rename experimental_resume to resumeStream diff --git a/.changeset/odd-tables-cross.md b/.changeset/odd-tables-cross.md new file mode 100644 index 000000000000..bae251f1cf43 --- /dev/null +++ b/.changeset/odd-tables-cross.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/google': patch +--- + +fix(providers/google): accept nullish in safetyRatings diff --git a/.changeset/odd-vans-suffer.md b/.changeset/odd-vans-suffer.md new file mode 100644 index 000000000000..2f6a430c9e9b --- /dev/null +++ b/.changeset/odd-vans-suffer.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/xai': patch +--- + +chore (providers/xai): update grok-3 model aliases diff --git a/.changeset/old-moons-kiss.md b/.changeset/old-moons-kiss.md new file mode 100644 index 000000000000..8faa48e335e5 --- /dev/null +++ b/.changeset/old-moons-kiss.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +remove deprecated `CoreTool*` types diff --git a/.changeset/olive-candles-compare.md b/.changeset/olive-candles-compare.md new file mode 100644 index 000000000000..b0971eec9273 --- /dev/null +++ b/.changeset/olive-candles-compare.md @@ -0,0 +1,7 @@ +--- +'@ai-sdk/provider': patch +'@ai-sdk/openai': patch +'ai': patch +--- + +feat: add speech with experimental_generateSpeech diff --git a/.changeset/olive-ducks-carry.md b/.changeset/olive-ducks-carry.md new file mode 100644 index 000000000000..89a57586c535 --- /dev/null +++ b/.changeset/olive-ducks-carry.md @@ -0,0 +1,9 @@ +--- +'@ai-sdk/provider': patch +'@ai-sdk/mistral': patch +'@ai-sdk/openai': patch +'@ai-sdk/azure': patch +'ai': patch +--- + +chore(embedding-models): remove remaining settings diff --git a/.changeset/olive-wombats-pretend.md b/.changeset/olive-wombats-pretend.md new file mode 100644 index 000000000000..c8698a34a28b --- /dev/null +++ b/.changeset/olive-wombats-pretend.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/anthropic': patch +--- + +fix (provider/anthropic): return stop finish reason for json output with tool diff --git a/.changeset/orange-bags-stare.md b/.changeset/orange-bags-stare.md new file mode 100644 index 000000000000..6de6e7565df4 --- /dev/null +++ b/.changeset/orange-bags-stare.md @@ -0,0 +1,9 @@ +--- +'ai': patch +--- + +### Fix use with Google APIs + zod v4's `.literal()` schema + +Before [zod@3.25.49](https://github.com/colinhacks/zod/releases/tag/v3.25.49), requests to Google's APIs failed due to a missing `type` in the provided schema. The problem has been resolved for the `ai` SDK by bumping our `zod` peer dependencies to `^3.25.49`. + +pull request: https://github.com/vercel/ai/pull/6609 diff --git a/.changeset/pink-deers-switch.md b/.changeset/pink-deers-switch.md new file mode 100644 index 000000000000..84d524aeb623 --- /dev/null +++ b/.changeset/pink-deers-switch.md @@ -0,0 +1,6 @@ +--- +'@ai-sdk/google-vertex': patch +'@ai-sdk/google': patch +--- + +feat: add provider option schemas for vertex imagegen and google genai diff --git a/.changeset/pink-mangos-tickle.md b/.changeset/pink-mangos-tickle.md new file mode 100644 index 000000000000..988f511bf4c7 --- /dev/null +++ b/.changeset/pink-mangos-tickle.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +chore(providers/openai): re-introduce logprobs as providerMetadata diff --git a/.changeset/plenty-bears-run.md b/.changeset/plenty-bears-run.md new file mode 100644 index 000000000000..42ed27e2f01d --- /dev/null +++ b/.changeset/plenty-bears-run.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ui): data stream protocol v2 with SSEs diff --git a/.changeset/plenty-dingos-bow.md b/.changeset/plenty-dingos-bow.md new file mode 100644 index 000000000000..64dd364e6314 --- /dev/null +++ b/.changeset/plenty-dingos-bow.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +feat (ai): allow sync prepareStep diff --git a/.changeset/plenty-radios-travel.md b/.changeset/plenty-radios-travel.md new file mode 100644 index 000000000000..2355d851978f --- /dev/null +++ b/.changeset/plenty-radios-travel.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): refactor and use chatstore in svelte diff --git a/.changeset/poor-bees-do.md b/.changeset/poor-bees-do.md new file mode 100644 index 000000000000..c059d28637ee --- /dev/null +++ b/.changeset/poor-bees-do.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +feat (ai): improved error messages when using gateway diff --git a/.changeset/poor-bobcats-sort.md b/.changeset/poor-bobcats-sort.md new file mode 100644 index 000000000000..3b87a965ebc2 --- /dev/null +++ b/.changeset/poor-bobcats-sort.md @@ -0,0 +1,47 @@ +--- +'@ai-sdk/azure': major +'@ai-sdk/deepinfra': major +'@ai-sdk/fal': major +'@ai-sdk/fireworks': major +'@ai-sdk/google-vertex': major +'@ai-sdk/luma': major +'@ai-sdk/openai-compatible': major +'@ai-sdk/openai': major +'@ai-sdk/replicate': major +'@ai-sdk/togetherai': major +'@ai-sdk/xai': major +'ai': major +--- + +### Move Image Model Settings into generate options + +Image Models no longer have settings. Instead, `maxImagesPerCall` can be passed directly to `generateImage()`. All other image settings can be passed to `providerOptions[provider]`. + +Before + +```js +await generateImage({ + model: luma.image('photon-flash-1', { + maxImagesPerCall: 5, + pollIntervalMillis: 500, + }), + prompt, + n: 10, +}); +``` + +After + +```js +await generateImage({ + model: luma.image('photon-flash-1'), + prompt, + n: 10, + maxImagesPerCall: 5, + providerOptions: { + luma: { pollIntervalMillis: 5 }, + }, +}); +``` + +Pull Request: https://github.com/vercel/ai/pull/6180 diff --git a/.changeset/poor-kids-lick.md b/.changeset/poor-kids-lick.md new file mode 100644 index 000000000000..9b4cbe08be93 --- /dev/null +++ b/.changeset/poor-kids-lick.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): remove sendExtraMessageFields diff --git a/.changeset/popular-plums-begin.md b/.changeset/popular-plums-begin.md new file mode 100644 index 000000000000..f3d9f6b143d4 --- /dev/null +++ b/.changeset/popular-plums-begin.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +feat (ai): simplify default provider setup diff --git a/.changeset/pre.json b/.changeset/pre.json new file mode 100644 index 000000000000..47b1be6b06bc --- /dev/null +++ b/.changeset/pre.json @@ -0,0 +1,378 @@ +{ + "mode": "pre", + "tag": "alpha", + "initialVersions": { + "@example/ai-core": "0.0.0", + "@example/express": "0.0.0", + "@example/fastify": "0.0.0", + "@example/hono": "0.0.0", + "@example/mcp": "0.0.0", + "@example/nest": "0.0.0", + "@example/next-fastapi": "0.0.0", + "@example/next-google-vertex": "0.0.0", + "@example/next-langchain": "0.0.0", + "@example/next-openai": "0.0.0", + "@example/next-openai-kasada-bot-protection": "0.0.0", + "@example/next-openai-pages": "0.0.0", + "@example/next-openai-telemetry": "0.0.0", + "@example/next-openai-telemetry-sentry": "0.0.0", + "@example/next-openai-rate-limits": "0.0.0", + "@example/node-http-server": "0.0.0", + "@example/nuxt-openai": "0.0.0", + "@example/sveltekit-openai": "0.0.0", + "ai": "5.0.0-canary.24", + "@ai-sdk/amazon-bedrock": "3.0.0-canary.19", + "@ai-sdk/anthropic": "2.0.0-canary.19", + "@ai-sdk/assemblyai": "0.0.1-canary.7", + "@ai-sdk/azure": "2.0.0-canary.21", + "@ai-sdk/cerebras": "1.0.0-canary.19", + "@ai-sdk/codemod": "2.0.0-canary.0", + "@ai-sdk/cohere": "2.0.0-canary.20", + "@ai-sdk/deepgram": "1.0.0-canary.12", + "@ai-sdk/deepinfra": "1.0.0-canary.19", + "@ai-sdk/deepseek": "1.0.0-canary.19", + "@ai-sdk/elevenlabs": "1.0.0-canary.12", + "@ai-sdk/fal": "1.0.0-canary.20", + "@ai-sdk/fireworks": "1.0.0-canary.19", + "@ai-sdk/gateway": "0.0.0", + "@ai-sdk/gladia": "1.0.0-canary.9", + "@ai-sdk/google": "2.0.0-canary.20", + "@ai-sdk/google-vertex": "3.0.0-canary.20", + "@ai-sdk/groq": "2.0.0-canary.20", + "@ai-sdk/hume": "1.0.0-canary.12", + "@ai-sdk/langchain": "1.0.0-canary.12", + "@ai-sdk/llamaindex": "1.0.0-canary.12", + "@ai-sdk/lmnt": "1.0.0-canary.12", + "@ai-sdk/luma": "1.0.0-canary.19", + "@ai-sdk/mistral": "2.0.0-canary.19", + "@ai-sdk/openai": "2.0.0-canary.20", + "@ai-sdk/openai-compatible": "1.0.0-canary.19", + "@ai-sdk/perplexity": "2.0.0-canary.19", + "@ai-sdk/provider": "2.0.0-canary.14", + "@ai-sdk/provider-utils": "3.0.0-canary.19", + "@ai-sdk/react": "2.0.0-canary.23", + "@ai-sdk/replicate": "1.0.0-canary.19", + "@ai-sdk/revai": "1.0.0-canary.12", + "@ai-sdk/rsc": "1.0.0-canary.22", + "ai-core-e2e-next-server": "0.0.0", + "@ai-sdk/svelte": "3.0.0-canary.23", + "@ai-sdk/togetherai": "1.0.0-canary.19", + "@ai-sdk/valibot": "1.0.0-canary.21", + "@ai-sdk/vercel": "0.0.0", + "@ai-sdk/vue": "2.0.0-canary.23", + "@ai-sdk/xai": "2.0.0-canary.19", + "analyze-downloads": "0.0.0", + "eslint-config-vercel-ai": "0.0.0", + "generate-llms-txt": "0.0.0", + "@vercel/ai-tsconfig": "0.0.0", + "@example/next": "0.0.0" + }, + "changesets": [ + "afraid-moles-cross", + "angry-crabs-develop", + "angry-kings-dance", + "angry-plants-sin", + "angry-poems-learn", + "angry-timers-drive", + "beige-ligers-kneel", + "beige-penguins-greet", + "beige-socks-stare", + "big-impalas-grin", + "big-panthers-judge", + "brave-numbers-drive", + "brown-eagles-tickle", + "brown-poems-boil", + "calm-boats-complain", + "calm-dragons-drive", + "chatty-ladybugs-nail", + "chatty-steaks-search", + "chilled-clocks-brush", + "chilled-queens-remember", + "chilly-chairs-press", + "chilly-teachers-brush", + "chilly-tips-know", + "clean-ants-brake", + "clean-numbers-cover", + "clever-coats-invite", + "clever-games-report", + "cold-bags-move", + "cool-buckets-shout", + "cool-bulldogs-fix", + "cool-gifts-film", + "cool-shrimps-kick", + "cuddly-eels-perform", + "cuddly-icons-kick", + "cuddly-kangaroos-double", + "curly-peaches-clap", + "curvy-lobsters-share", + "dirty-eggs-breathe", + "dull-candles-trade", + "dull-points-mate", + "eight-emus-push", + "eight-months-sip", + "eighty-flowers-design", + "eighty-pugs-sip", + "eighty-seals-search", + "eleven-lobsters-rescue", + "eleven-pets-clean", + "empty-fireants-learn", + "empty-flowers-sniff", + "empty-pets-jump", + "empty-walls-rest", + "fair-bikes-hear", + "fair-cobras-tan", + "fair-cups-travel", + "fair-swans-kneel", + "famous-eggs-camp", + "famous-fans-provide", + "famous-peas-arrive", + "famous-shrimps-fail", + "famous-ties-train", + "fast-students-turn", + "fast-toys-dream", + "few-jobs-mate", + "few-kangaroos-remember", + "few-pianos-pay", + "fifty-camels-visit", + "fifty-shrimps-kick", + "five-ravens-hammer", + "fix-env-mutation", + "flat-plums-bake", + "fluffy-pets-pump", + "forty-kangaroos-pull", + "fresh-forks-punch", + "fresh-otters-chew", + "fresh-swans-march", + "friendly-otters-sneeze", + "funny-cows-sin", + "funny-mayflies-yawn", + "fuzzy-comics-listen", + "fuzzy-lies-explain", + "fuzzy-shoes-act", + "gentle-gorillas-mate", + "gentle-mayflies-call", + "gentle-toys-smile", + "gold-planes-cheer", + "good-students-sin", + "good-swans-heal", + "great-hats-hide", + "great-mangos-scream", + "great-poets-attack", + "green-deers-scream", + "green-dogs-kick", + "green-grapes-rhyme", + "green-pots-collect", + "happy-ads-happen", + "happy-countries-dream", + "happy-kangaroos-roll", + "healthy-humans-burn", + "heavy-ducks-join", + "heavy-ligers-lay", + "heavy-pens-destroy", + "hip-eagles-attend", + "hip-rocks-mix", + "hot-colts-hear", + "hot-singers-help", + "huge-cloths-burn", + "hungry-bears-glow", + "hungry-frogs-eat", + "hungry-frogs-raise", + "hungry-hotels-hunt", + "hungry-pets-hear", + "hungry-trains-compete", + "hungry-zebras-applaud", + "itchy-bats-breathe", + "itchy-cars-relax", + "itchy-deers-jog", + "itchy-pumas-wave", + "khaki-bears-drop", + "khaki-sheep-sparkle", + "khaki-tomatoes-think", + "large-peas-eat", + "large-ties-own", + "late-brooms-suffer", + "late-foxes-battle", + "lazy-ducks-cheat", + "lemon-actors-invite", + "lemon-terms-hug", + "lemon-yaks-move", + "light-books-poke", + "light-rules-film", + "little-bobcats-jog", + "little-carrots-speak", + "little-tips-occur", + "little-zebras-suffer", + "lovely-garlics-promise", + "many-beans-exercise", + "many-toes-glow", + "mean-files-talk", + "mean-monkeys-sip", + "metal-insects-tease", + "modern-kings-smoke", + "moody-rings-remember", + "moody-yaks-love", + "nasty-lobsters-shave", + "nasty-spiders-sparkle", + "nasty-trains-beg", + "neat-pillows-occur", + "nervous-maps-fix", + "new-pens-remain", + "new-vans-obey", + "nice-tips-walk", + "nine-jars-hammer", + "nine-pillows-hug", + "nine-rivers-compete", + "ninety-cameras-wonder", + "odd-tables-cross", + "odd-vans-suffer", + "old-moons-kiss", + "olive-candles-compare", + "olive-ducks-carry", + "olive-wombats-pretend", + "orange-bags-stare", + "pink-deers-switch", + "pink-mangos-tickle", + "plenty-bears-run", + "plenty-dingos-bow", + "plenty-radios-travel", + "poor-bees-do", + "poor-bobcats-sort", + "poor-kids-lick", + "popular-plums-begin", + "pretty-bikes-appear", + "pretty-doors-promise", + "pretty-jars-reflect", + "pretty-plants-watch", + "pretty-pugs-eat", + "proud-buckets-guess", + "proud-cows-bathe", + "proud-dancers-doubt", + "purple-rocks-cover", + "quick-toys-help", + "quiet-glasses-double", + "rare-foxes-build", + "real-apes-lick", + "real-fireants-smell", + "red-frogs-cheer", + "red-worms-help", + "rich-days-call", + "rotten-boats-doubt", + "rotten-peaches-doubt", + "rotten-tomatoes-smoke", + "rotten-walls-provide", + "rude-badgers-roll", + "rude-bugs-run", + "rude-rivers-hide", + "selfish-rice-own", + "selfish-wasps-applaud", + "serious-clouds-cheer", + "serious-numbers-teach", + "serious-taxis-invent", + "serious-trains-raise", + "seven-beans-push", + "seven-dancers-crash", + "seven-fans-speak", + "seven-hornets-peel", + "seven-pens-itch", + "seven-tools-type", + "shaggy-experts-warn", + "shaggy-singers-promise", + "sharp-apes-tickle", + "sharp-mangos-relate", + "sharp-ties-kneel", + "shiny-dolphins-lie", + "shiny-parents-know", + "shy-lamps-visit", + "silent-nails-taste", + "silent-paws-decide", + "silver-vans-march", + "six-garlics-sin", + "six-moose-know", + "six-olives-rest", + "slimy-chefs-play", + "slow-donuts-study", + "slow-laws-end", + "slow-pants-buy", + "slow-windows-ring", + "smart-keys-check", + "smart-swans-drive", + "smooth-carpets-bathe", + "smooth-mirrors-kneel", + "sour-bananas-remain", + "sour-mails-cheer", + "sour-radios-boil", + "sour-rockets-greet", + "sour-trains-remember", + "spicy-mangos-brush", + "spicy-shoes-matter", + "spotty-swans-know", + "stale-cherries-heal", + "stale-tools-exercise", + "strange-apricots-enjoy", + "strange-camels-decide", + "strange-flies-remember", + "strong-windows-wave", + "stupid-pots-laugh", + "sweet-lobsters-type", + "sweet-turtles-kiss", + "swift-countries-applaud", + "swift-geckos-joke", + "swift-ghosts-itch", + "swift-needles-sniff", + "swift-turtles-rhyme", + "tall-garlics-sit", + "tall-rice-flash", + "tasty-starfishes-swim", + "ten-ligers-turn", + "ten-students-yell", + "tender-buses-glow", + "tender-comics-rescue", + "tender-lizards-switch", + "tender-tables-trade", + "thick-chairs-remain", + "thick-melons-talk", + "thick-parents-grab", + "thin-items-knock", + "thin-numbers-shave", + "three-jars-fix", + "three-pans-move", + "tiny-deers-kick", + "tough-mugs-fail", + "tough-suns-eat", + "tricky-hats-fly", + "tricky-lions-deliver", + "tricky-ravens-kick", + "tricky-snakes-leave", + "tricky-zebras-cover", + "twelve-baboons-sing", + "twelve-kids-travel", + "twelve-pianos-destroy", + "twelve-stingrays-behave", + "twelve-waves-stare", + "two-otters-divide", + "two-otters-whisper", + "two-roses-think", + "unlucky-bikes-fry", + "unlucky-bobcats-wash", + "unlucky-cherries-rescue", + "unlucky-kiwis-build", + "unlucky-toes-laugh", + "violet-taxis-work", + "warm-eagles-play", + "weak-bikes-warn", + "weak-moles-nail", + "wet-toys-burn", + "wet-trainers-vanish", + "wicked-flowers-study", + "wicked-snakes-march", + "wild-candles-judge", + "wild-cats-work", + "wild-pugs-burn", + "wise-gorillas-act", + "witty-candles-pretend", + "yellow-chefs-kick", + "yellow-eels-sort", + "yellow-ligers-brake", + "young-dingos-march" + ] +} diff --git a/.changeset/pretty-bikes-appear.md b/.changeset/pretty-bikes-appear.md new file mode 100644 index 000000000000..cc1d861f351b --- /dev/null +++ b/.changeset/pretty-bikes-appear.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +fix (ai/mcp): better support for zero-argument MCP tools diff --git a/.changeset/pretty-doors-promise.md b/.changeset/pretty-doors-promise.md new file mode 100644 index 000000000000..7afc2ad395c4 --- /dev/null +++ b/.changeset/pretty-doors-promise.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +fix (ai): send `start` part in correct position in stream (streamText) diff --git a/.changeset/pretty-jars-reflect.md b/.changeset/pretty-jars-reflect.md new file mode 100644 index 000000000000..8539e2b162c8 --- /dev/null +++ b/.changeset/pretty-jars-reflect.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +chore: refactor source parts (spec) diff --git a/.changeset/pretty-plants-watch.md b/.changeset/pretty-plants-watch.md new file mode 100644 index 000000000000..3b6bf58406b0 --- /dev/null +++ b/.changeset/pretty-plants-watch.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/react': patch +--- + +fix (react): structuredClone message in replaceMessage diff --git a/.changeset/pretty-pugs-eat.md b/.changeset/pretty-pugs-eat.md new file mode 100644 index 000000000000..c0013d4f4277 --- /dev/null +++ b/.changeset/pretty-pugs-eat.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +feat (ai): add args callbacks to tools diff --git a/.changeset/proud-buckets-guess.md b/.changeset/proud-buckets-guess.md new file mode 100644 index 000000000000..e7852d7c39ec --- /dev/null +++ b/.changeset/proud-buckets-guess.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/google': patch +--- + +fix (provider/google): prevent error when thinking signature is used diff --git a/.changeset/proud-cows-bathe.md b/.changeset/proud-cows-bathe.md new file mode 100644 index 000000000000..2a5c803b84e8 --- /dev/null +++ b/.changeset/proud-cows-bathe.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): change file to parts to use urls instead of data diff --git a/.changeset/proud-dancers-doubt.md b/.changeset/proud-dancers-doubt.md new file mode 100644 index 000000000000..d08485653707 --- /dev/null +++ b/.changeset/proud-dancers-doubt.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +feat (ai): support changing the system prompt in prepareSteps diff --git a/.changeset/purple-rocks-cover.md b/.changeset/purple-rocks-cover.md new file mode 100644 index 000000000000..14fe0bba0d85 --- /dev/null +++ b/.changeset/purple-rocks-cover.md @@ -0,0 +1,6 @@ +--- +'@ai-sdk/openai': patch +'@ai-sdk/azure': patch +--- + +chore(providers/openai): remove & enable strict compatibility by default diff --git a/.changeset/quick-toys-help.md b/.changeset/quick-toys-help.md new file mode 100644 index 000000000000..a49be328e0bb --- /dev/null +++ b/.changeset/quick-toys-help.md @@ -0,0 +1,31 @@ +--- +'@ai-sdk/provider': path +--- + +chore (provider): allow both binary and base64 file content (spec) + +Before + +```ts +import { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils'; + +// Had to manually convert binary data to base64 +const fileData = new Uint8Array([0, 1, 2, 3]); +const filePart = { + type: 'file', + mediaType: 'application/pdf', + data: convertUint8ArrayToBase64(fileData), // Required conversion +}; +``` + +After + +```ts +// Can use binary data directly +const fileData = new Uint8Array([0, 1, 2, 3]); +const filePart = { + type: 'file', + mediaType: 'application/pdf', + data: fileData, // Direct Uint8Array support +}; +``` diff --git a/.changeset/quiet-glasses-double.md b/.changeset/quiet-glasses-double.md new file mode 100644 index 000000000000..dab9b6247e0c --- /dev/null +++ b/.changeset/quiet-glasses-double.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/gladia': patch +--- + +feat(providers/gladia): add transcribe diff --git a/.changeset/rare-foxes-build.md b/.changeset/rare-foxes-build.md new file mode 100644 index 000000000000..0935fd8f0d63 --- /dev/null +++ b/.changeset/rare-foxes-build.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): stable sendStart/sendFinish options diff --git a/.changeset/real-apes-lick.md b/.changeset/real-apes-lick.md new file mode 100644 index 000000000000..d6f96f4b0f9d --- /dev/null +++ b/.changeset/real-apes-lick.md @@ -0,0 +1,5 @@ +--- +'ai': minor +--- + +feat (ai): add filename to file ui parts diff --git a/.changeset/real-fireants-smell.md b/.changeset/real-fireants-smell.md new file mode 100644 index 000000000000..dc4567c3b9fe --- /dev/null +++ b/.changeset/real-fireants-smell.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): rename DataStreamToSSETransformStream to JsonToSseTransformStream diff --git a/.changeset/red-frogs-cheer.md b/.changeset/red-frogs-cheer.md new file mode 100644 index 000000000000..472a13db4ac6 --- /dev/null +++ b/.changeset/red-frogs-cheer.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +feat (ai): streamText/generateText: totalUsage contains usage for all steps. usage is for a single step. diff --git a/.changeset/red-worms-help.md b/.changeset/red-worms-help.md new file mode 100644 index 000000000000..52ced5b9aa86 --- /dev/null +++ b/.changeset/red-worms-help.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': patch +--- + +release alpha.13 diff --git a/.changeset/rich-days-call.md b/.changeset/rich-days-call.md new file mode 100644 index 000000000000..cfc430b48251 --- /dev/null +++ b/.changeset/rich-days-call.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/groq': patch +--- + +feat(providers/groq): add transcribe diff --git a/.changeset/rotten-boats-doubt.md b/.changeset/rotten-boats-doubt.md new file mode 100644 index 000000000000..b70753156ea6 --- /dev/null +++ b/.changeset/rotten-boats-doubt.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/react': patch +--- + +chore (ai/react): add experimental throttle back to useChat diff --git a/.changeset/rotten-peaches-doubt.md b/.changeset/rotten-peaches-doubt.md new file mode 100644 index 000000000000..5ce51b6db75d --- /dev/null +++ b/.changeset/rotten-peaches-doubt.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +chore: refactor tool call and tool call delta parts (spec) diff --git a/.changeset/rotten-tomatoes-smoke.md b/.changeset/rotten-tomatoes-smoke.md new file mode 100644 index 000000000000..fb1a8f53d846 --- /dev/null +++ b/.changeset/rotten-tomatoes-smoke.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ui): replace chat store concept with chat instances diff --git a/.changeset/rotten-walls-provide.md b/.changeset/rotten-walls-provide.md new file mode 100644 index 000000000000..9ee7093a34e0 --- /dev/null +++ b/.changeset/rotten-walls-provide.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +chore: refactor text parts (spec) diff --git a/.changeset/rude-badgers-roll.md b/.changeset/rude-badgers-roll.md new file mode 100644 index 000000000000..cd641fc28a9c --- /dev/null +++ b/.changeset/rude-badgers-roll.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/assemblyai': patch +--- + +feat(providers/assemblyai): add transcribe diff --git a/.changeset/rude-bugs-run.md b/.changeset/rude-bugs-run.md new file mode 100644 index 000000000000..616982e8789a --- /dev/null +++ b/.changeset/rude-bugs-run.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): rename continueUntil to stopWhen. Rename maxSteps stop condition to stepCountIs. diff --git a/.changeset/rude-rivers-hide.md b/.changeset/rude-rivers-hide.md new file mode 100644 index 000000000000..0e5e2140d466 --- /dev/null +++ b/.changeset/rude-rivers-hide.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): remove steps from tool invocation ui parts diff --git a/.changeset/selfish-rice-own.md b/.changeset/selfish-rice-own.md new file mode 100644 index 000000000000..44f827260d50 --- /dev/null +++ b/.changeset/selfish-rice-own.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +chore (provider): remove image parts diff --git a/.changeset/selfish-wasps-applaud.md b/.changeset/selfish-wasps-applaud.md new file mode 100644 index 000000000000..94ba2bb9e716 --- /dev/null +++ b/.changeset/selfish-wasps-applaud.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/hume': patch +--- + +feat(providers/hume): add speech diff --git a/.changeset/serious-clouds-cheer.md b/.changeset/serious-clouds-cheer.md new file mode 100644 index 000000000000..1f4992f1cab6 --- /dev/null +++ b/.changeset/serious-clouds-cheer.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': patch +--- + +release alpha.3 diff --git a/.changeset/serious-numbers-teach.md b/.changeset/serious-numbers-teach.md new file mode 100644 index 000000000000..d0918cff2396 --- /dev/null +++ b/.changeset/serious-numbers-teach.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +chore (ai): use JSONValue definition from provider diff --git a/.changeset/serious-taxis-invent.md b/.changeset/serious-taxis-invent.md new file mode 100644 index 000000000000..923cf1103735 --- /dev/null +++ b/.changeset/serious-taxis-invent.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +chore: refactor file parts (spec) diff --git a/.changeset/serious-trains-raise.md b/.changeset/serious-trains-raise.md new file mode 100644 index 000000000000..ed91552701ad --- /dev/null +++ b/.changeset/serious-trains-raise.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/gateway': patch +--- + +feat (providers/gateway): add createGateway shorthand alias for createGatewayProvider diff --git a/.changeset/seven-beans-push.md b/.changeset/seven-beans-push.md new file mode 100644 index 000000000000..70c3a5720800 --- /dev/null +++ b/.changeset/seven-beans-push.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +fix (ai): fix experimental sendStart/sendFinish options in streamText diff --git a/.changeset/seven-dancers-crash.md b/.changeset/seven-dancers-crash.md new file mode 100644 index 000000000000..41c925b7fb30 --- /dev/null +++ b/.changeset/seven-dancers-crash.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): rename id to chatId (in post request, resume request, and useChat) diff --git a/.changeset/seven-fans-speak.md b/.changeset/seven-fans-speak.md new file mode 100644 index 000000000000..a71a4bff2e44 --- /dev/null +++ b/.changeset/seven-fans-speak.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +feat (ui): extended regenerate support diff --git a/.changeset/seven-hornets-peel.md b/.changeset/seven-hornets-peel.md new file mode 100644 index 000000000000..fe6392857426 --- /dev/null +++ b/.changeset/seven-hornets-peel.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/xai': minor +--- + +add live search diff --git a/.changeset/seven-pens-itch.md b/.changeset/seven-pens-itch.md new file mode 100644 index 000000000000..671981d33d90 --- /dev/null +++ b/.changeset/seven-pens-itch.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider-utils': patch +--- + +chore (utils): remove unused test helpers diff --git a/.changeset/seven-tools-type.md b/.changeset/seven-tools-type.md new file mode 100644 index 000000000000..10efb8cbcc1a --- /dev/null +++ b/.changeset/seven-tools-type.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +feat (ai): inject message id in createUIMessageStream diff --git a/.changeset/shaggy-experts-warn.md b/.changeset/shaggy-experts-warn.md new file mode 100644 index 000000000000..59c7d9cf1b13 --- /dev/null +++ b/.changeset/shaggy-experts-warn.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +release alpha.5 diff --git a/.changeset/shaggy-singers-promise.md b/.changeset/shaggy-singers-promise.md new file mode 100644 index 000000000000..575aa29cca30 --- /dev/null +++ b/.changeset/shaggy-singers-promise.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider-utils': patch +--- + +fix(provider-utils): fix SSE parser bug (CRLF) diff --git a/.changeset/sharp-apes-tickle.md b/.changeset/sharp-apes-tickle.md new file mode 100644 index 000000000000..0669dae30670 --- /dev/null +++ b/.changeset/sharp-apes-tickle.md @@ -0,0 +1,8 @@ +--- +'@ai-sdk/svelte': major +'@ai-sdk/react': major +'@ai-sdk/vue': major +'ai': major +--- + +feat (ui): use UI_MESSAGE generic diff --git a/.changeset/sharp-mangos-relate.md b/.changeset/sharp-mangos-relate.md new file mode 100644 index 000000000000..b47a7862517e --- /dev/null +++ b/.changeset/sharp-mangos-relate.md @@ -0,0 +1,7 @@ +--- +'@ai-sdk/provider': patch +--- + +feat: `ImageModelV2#maxImagesPerCall` can be set to a function that returns a `number` or `undefined`, optionally as a promise + +pull request: https://github.com/vercel/ai/pull/6343 diff --git a/.changeset/sharp-ties-kneel.md b/.changeset/sharp-ties-kneel.md new file mode 100644 index 000000000000..7b264caa7d6f --- /dev/null +++ b/.changeset/sharp-ties-kneel.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +release alpha.4 diff --git a/.changeset/shiny-dolphins-lie.md b/.changeset/shiny-dolphins-lie.md new file mode 100644 index 000000000000..a00d42e62ee2 --- /dev/null +++ b/.changeset/shiny-dolphins-lie.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): always stream tool calls diff --git a/.changeset/shiny-parents-know.md b/.changeset/shiny-parents-know.md new file mode 100644 index 000000000000..401f97fc9e26 --- /dev/null +++ b/.changeset/shiny-parents-know.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/valibot': patch +--- + +chore (valibot): update to valibot 1.1 diff --git a/.changeset/shy-lamps-visit.md b/.changeset/shy-lamps-visit.md new file mode 100644 index 000000000000..10e05fcd77c4 --- /dev/null +++ b/.changeset/shy-lamps-visit.md @@ -0,0 +1,7 @@ +--- +'@ai-sdk/provider-utils': patch +'@ai-sdk/valibot': patch +'ai': patch +--- + +chore(provider-utils): move over jsonSchema diff --git a/.changeset/silent-nails-taste.md b/.changeset/silent-nails-taste.md new file mode 100644 index 000000000000..6f0965942318 --- /dev/null +++ b/.changeset/silent-nails-taste.md @@ -0,0 +1,6 @@ +--- +'@ai-sdk/google-vertex': patch +'@ai-sdk/anthropic': patch +--- + +fix (provider/google-vertex): fix anthropic support for image urls in messages diff --git a/.changeset/silent-paws-decide.md b/.changeset/silent-paws-decide.md new file mode 100644 index 000000000000..05925dce4187 --- /dev/null +++ b/.changeset/silent-paws-decide.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +feat (ai): use console.error as default error handler for streamText and streamObject diff --git a/.changeset/silver-vans-march.md b/.changeset/silver-vans-march.md new file mode 100644 index 000000000000..4d29ba030147 --- /dev/null +++ b/.changeset/silver-vans-march.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +feat (provider): support reasoning tokens, cached input tokens, total token in usage information diff --git a/.changeset/six-garlics-sin.md b/.changeset/six-garlics-sin.md new file mode 100644 index 000000000000..6009d86fb667 --- /dev/null +++ b/.changeset/six-garlics-sin.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): rename DataStream* to UIMessage* diff --git a/.changeset/six-moose-know.md b/.changeset/six-moose-know.md new file mode 100644 index 000000000000..f3f1dd1c1a98 --- /dev/null +++ b/.changeset/six-moose-know.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +fix(ai/core): properly handle custom separator in provider registry diff --git a/.changeset/six-olives-rest.md b/.changeset/six-olives-rest.md new file mode 100644 index 000000000000..4105dbab3be5 --- /dev/null +++ b/.changeset/six-olives-rest.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/anthropic': patch +--- + +feat (providers/anthropic): add claude v4 models diff --git a/.changeset/slimy-chefs-play.md b/.changeset/slimy-chefs-play.md new file mode 100644 index 000000000000..0aa0cf412484 --- /dev/null +++ b/.changeset/slimy-chefs-play.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/fal': patch +--- + +fix (providers/fal): improve model compatibility diff --git a/.changeset/slow-donuts-study.md b/.changeset/slow-donuts-study.md new file mode 100644 index 000000000000..cad39ea6cc30 --- /dev/null +++ b/.changeset/slow-donuts-study.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +fix (ai): merge data ui stream parts correctly diff --git a/.changeset/slow-laws-end.md b/.changeset/slow-laws-end.md new file mode 100644 index 000000000000..49b5460286b4 --- /dev/null +++ b/.changeset/slow-laws-end.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +feat (ai): add ui data parts diff --git a/.changeset/slow-pants-buy.md b/.changeset/slow-pants-buy.md new file mode 100644 index 000000000000..c0a1240eb89f --- /dev/null +++ b/.changeset/slow-pants-buy.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/amazon-bedrock': patch +--- + +chore(providers/bedrock): update embedding model to use providerOptions diff --git a/.changeset/slow-windows-ring.md b/.changeset/slow-windows-ring.md new file mode 100644 index 000000000000..8a67285e8956 --- /dev/null +++ b/.changeset/slow-windows-ring.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/gateway': patch +--- + +chore (providers/gateway): update chat model ids diff --git a/.changeset/smart-keys-check.md b/.changeset/smart-keys-check.md new file mode 100644 index 000000000000..0d283f73acd5 --- /dev/null +++ b/.changeset/smart-keys-check.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +feat (ai): add prepareSteps to streamText diff --git a/.changeset/smart-swans-drive.md b/.changeset/smart-swans-drive.md new file mode 100644 index 000000000000..ec8649ef9131 --- /dev/null +++ b/.changeset/smart-swans-drive.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/anthropic': patch +--- + +Add support for URL-based PDF documents in the Anthropic provider diff --git a/.changeset/smooth-carpets-bathe.md b/.changeset/smooth-carpets-bathe.md new file mode 100644 index 000000000000..47c7c60ef038 --- /dev/null +++ b/.changeset/smooth-carpets-bathe.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/gateway': patch +--- + +feat (providers/gateway): add gateway error types with error detail diff --git a/.changeset/smooth-mirrors-kneel.md b/.changeset/smooth-mirrors-kneel.md new file mode 100644 index 000000000000..c90af6f5bd58 --- /dev/null +++ b/.changeset/smooth-mirrors-kneel.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +fix (core): send buffered text in smooth stream when stream parts change diff --git a/.changeset/sour-bananas-remain.md b/.changeset/sour-bananas-remain.md new file mode 100644 index 000000000000..851e02685e9f --- /dev/null +++ b/.changeset/sour-bananas-remain.md @@ -0,0 +1,25 @@ +--- +'@ai-sdk/provider': patch +'@ai-sdk/openai': patch +'ai': patch +--- + +feat (provider): add providerMetadata to ImageModelV2 interface (#5977) + +The `experimental_generateImage` method from the `ai` package now returnes revised prompts for OpenAI's image models. + +```js +const prompt = 'Santa Claus driving a Cadillac'; + +const { providerMetadata } = await experimental_generateImage({ + model: openai.image('dall-e-3'), + prompt, +}); + +const revisedPrompt = providerMetadata.openai.images[0]?.revisedPrompt; + +console.log({ + prompt, + revisedPrompt, +}); +``` diff --git a/.changeset/sour-mails-cheer.md b/.changeset/sour-mails-cheer.md new file mode 100644 index 000000000000..ddd71463a5c2 --- /dev/null +++ b/.changeset/sour-mails-cheer.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +chore (ai/mcp): add `assertCapability` method to experimental MCP client diff --git a/.changeset/sour-radios-boil.md b/.changeset/sour-radios-boil.md new file mode 100644 index 000000000000..ab3f69d7f417 --- /dev/null +++ b/.changeset/sour-radios-boil.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): restructure prepareRequest diff --git a/.changeset/sour-rockets-greet.md b/.changeset/sour-rockets-greet.md new file mode 100644 index 000000000000..c740711d9d94 --- /dev/null +++ b/.changeset/sour-rockets-greet.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider-utils': patch +--- + +chore (provider-utils): switch to standard-schema diff --git a/.changeset/sour-trains-remember.md b/.changeset/sour-trains-remember.md new file mode 100644 index 000000000000..8dd512234a74 --- /dev/null +++ b/.changeset/sour-trains-remember.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/vue': major +--- + +chore (ui/vue): replace useChat with new Chat diff --git a/.changeset/spicy-mangos-brush.md b/.changeset/spicy-mangos-brush.md new file mode 100644 index 000000000000..3261ef731df2 --- /dev/null +++ b/.changeset/spicy-mangos-brush.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +chore: remove object generation mode diff --git a/.changeset/spicy-shoes-matter.md b/.changeset/spicy-shoes-matter.md new file mode 100644 index 000000000000..798d63239e61 --- /dev/null +++ b/.changeset/spicy-shoes-matter.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +feat(provider/openai): add o3 & o4-mini with developer systemMessageMode diff --git a/.changeset/spotty-swans-know.md b/.changeset/spotty-swans-know.md new file mode 100644 index 000000000000..bdd5540eaa32 --- /dev/null +++ b/.changeset/spotty-swans-know.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/langchain': patch +--- + +chore(providers/langchain): extract to separate package diff --git a/.changeset/stale-cherries-heal.md b/.changeset/stale-cherries-heal.md new file mode 100644 index 000000000000..455ea00812b8 --- /dev/null +++ b/.changeset/stale-cherries-heal.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/amazon-bedrock': patch +--- + +feat (provider/amazon-bedrock): add Claude 4 model ids (claude-sonnet-4-20250514-v1:0, claude-opus-4-20250514-v1:0) diff --git a/.changeset/stale-tools-exercise.md b/.changeset/stale-tools-exercise.md new file mode 100644 index 000000000000..1ea014cb5845 --- /dev/null +++ b/.changeset/stale-tools-exercise.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': patch +--- + +release alpha.7 diff --git a/.changeset/strange-apricots-enjoy.md b/.changeset/strange-apricots-enjoy.md new file mode 100644 index 000000000000..81b4ab3ef68b --- /dev/null +++ b/.changeset/strange-apricots-enjoy.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/groq': patch +--- + +chore(providers/groq): convert to providerOptions diff --git a/.changeset/strange-camels-decide.md b/.changeset/strange-camels-decide.md new file mode 100644 index 000000000000..db8aa386bf10 --- /dev/null +++ b/.changeset/strange-camels-decide.md @@ -0,0 +1,12 @@ +--- +'@ai-sdk/openai-compatible': patch +'@ai-sdk/google-vertex': patch +'@ai-sdk/provider': patch +'@ai-sdk/mistral': patch +'@ai-sdk/cohere': patch +'@ai-sdk/google': patch +'@ai-sdk/openai': patch +'ai': patch +--- + +chore(embedding-model-v2): rename rawResponse to response diff --git a/.changeset/strange-flies-remember.md b/.changeset/strange-flies-remember.md new file mode 100644 index 000000000000..42892c8f8571 --- /dev/null +++ b/.changeset/strange-flies-remember.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/fal': patch +--- + +feat (@ai-sdk/fal): support new Flux Kontext models diff --git a/.changeset/strong-windows-wave.md b/.changeset/strong-windows-wave.md new file mode 100644 index 000000000000..2ae220e4ebd3 --- /dev/null +++ b/.changeset/strong-windows-wave.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/react': patch +--- + +feat (ui/react): add resume flag to useChat diff --git a/.changeset/stupid-pots-laugh.md b/.changeset/stupid-pots-laugh.md new file mode 100644 index 000000000000..b6b3e2ce1d67 --- /dev/null +++ b/.changeset/stupid-pots-laugh.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/xai': patch +--- + +Add native XAI chat language model implementation diff --git a/.changeset/sweet-lobsters-type.md b/.changeset/sweet-lobsters-type.md new file mode 100644 index 000000000000..907ac9d17b28 --- /dev/null +++ b/.changeset/sweet-lobsters-type.md @@ -0,0 +1,6 @@ +--- +'@ai-sdk/provider-utils': major +'ai': major +--- + +feat (ui): UI message metadata diff --git a/.changeset/sweet-turtles-kiss.md b/.changeset/sweet-turtles-kiss.md new file mode 100644 index 000000000000..a84f84055899 --- /dev/null +++ b/.changeset/sweet-turtles-kiss.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/elevenlabs': patch +--- + +feat (provider/elevenlabs): add transcription provider diff --git a/.changeset/swift-countries-applaud.md b/.changeset/swift-countries-applaud.md new file mode 100644 index 000000000000..2b3520f54f37 --- /dev/null +++ b/.changeset/swift-countries-applaud.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': patch +--- + +release alpha.2 diff --git a/.changeset/swift-geckos-joke.md b/.changeset/swift-geckos-joke.md new file mode 100644 index 000000000000..a8550220c24a --- /dev/null +++ b/.changeset/swift-geckos-joke.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +fix(providers/openai): logprobs for stream alongside completion model diff --git a/.changeset/swift-ghosts-itch.md b/.changeset/swift-ghosts-itch.md new file mode 100644 index 000000000000..c5d3d677347a --- /dev/null +++ b/.changeset/swift-ghosts-itch.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +chore (provider): remove prompt type from language model v2 spec diff --git a/.changeset/swift-needles-sniff.md b/.changeset/swift-needles-sniff.md new file mode 100644 index 000000000000..0e752c90fa93 --- /dev/null +++ b/.changeset/swift-needles-sniff.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/anthropic': patch +--- + +fix(anthropic): resolve web search API validation errors with partial location + provider output diff --git a/.changeset/swift-turtles-rhyme.md b/.changeset/swift-turtles-rhyme.md new file mode 100644 index 000000000000..4c63da9c2f49 --- /dev/null +++ b/.changeset/swift-turtles-rhyme.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/fal': patch +--- + +feat (fal): Set `.providerMetaData` for image model responses diff --git a/.changeset/tall-garlics-sit.md b/.changeset/tall-garlics-sit.md new file mode 100644 index 000000000000..27bb80a7363d --- /dev/null +++ b/.changeset/tall-garlics-sit.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +feat (ui): support message replacement in chat via messageId param on sendMessage diff --git a/.changeset/tall-rice-flash.md b/.changeset/tall-rice-flash.md new file mode 100644 index 000000000000..20c49b454ede --- /dev/null +++ b/.changeset/tall-rice-flash.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/groq': patch +--- + +feat (provider/groq): add llama 4 model diff --git a/.changeset/tasty-starfishes-swim.md b/.changeset/tasty-starfishes-swim.md new file mode 100644 index 000000000000..071c92479481 --- /dev/null +++ b/.changeset/tasty-starfishes-swim.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/vue': major +--- + +chore (ai): refactor and use chatstore in vue diff --git a/.changeset/ten-ligers-turn.md b/.changeset/ten-ligers-turn.md new file mode 100644 index 000000000000..ac4da3fe43ae --- /dev/null +++ b/.changeset/ten-ligers-turn.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +fix (ai): update source url stream part diff --git a/.changeset/ten-students-yell.md b/.changeset/ten-students-yell.md new file mode 100644 index 000000000000..42387a1c120e --- /dev/null +++ b/.changeset/ten-students-yell.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +feat (ai): add experimental prepareStep callback to generateText diff --git a/.changeset/tender-buses-glow.md b/.changeset/tender-buses-glow.md new file mode 100644 index 000000000000..adea7e261eac --- /dev/null +++ b/.changeset/tender-buses-glow.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +fix (provider/openai): increase transcription model resilience diff --git a/.changeset/tender-comics-rescue.md b/.changeset/tender-comics-rescue.md new file mode 100644 index 000000000000..15d172e641ff --- /dev/null +++ b/.changeset/tender-comics-rescue.md @@ -0,0 +1,11 @@ +--- +'ai': major +--- + +remove StreamTextResult.mergeIntoDataStream method +rename DataStreamOptions.getErrorMessage to onError +add pipeTextStreamToResponse function +add createTextStreamResponse function +change createDataStreamResponse function to accept a DataStream and not a DataStreamWriter +change pipeDataStreamToResponse function to accept a DataStream and not a DataStreamWriter +change pipeDataStreamToResponse function to have a single parameter diff --git a/.changeset/tender-lizards-switch.md b/.changeset/tender-lizards-switch.md new file mode 100644 index 000000000000..d5e6fc957814 --- /dev/null +++ b/.changeset/tender-lizards-switch.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ui): rename RequestOptions to CompletionRequestOptions diff --git a/.changeset/tender-tables-trade.md b/.changeset/tender-tables-trade.md new file mode 100644 index 000000000000..38c2c8a5ca5d --- /dev/null +++ b/.changeset/tender-tables-trade.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': patch +--- + +feat(embedding-model-v2): add providerOptions diff --git a/.changeset/thick-chairs-remain.md b/.changeset/thick-chairs-remain.md new file mode 100644 index 000000000000..9e0bc14bbcaf --- /dev/null +++ b/.changeset/thick-chairs-remain.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +chore (provider): remove mode diff --git a/.changeset/thick-melons-talk.md b/.changeset/thick-melons-talk.md new file mode 100644 index 000000000000..4ba9f49e0e41 --- /dev/null +++ b/.changeset/thick-melons-talk.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/amazon-bedrock': patch +--- + +fix(provider/amazon-bedrock): use consistent document names for prompt cache effectiveness diff --git a/.changeset/thick-parents-grab.md b/.changeset/thick-parents-grab.md new file mode 100644 index 000000000000..8e31e4887514 --- /dev/null +++ b/.changeset/thick-parents-grab.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +feat (ai): replace maxSteps with continueUntil (generateText) diff --git a/.changeset/thin-items-knock.md b/.changeset/thin-items-knock.md new file mode 100644 index 000000000000..d32bf613f853 --- /dev/null +++ b/.changeset/thin-items-knock.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +chore(openai): remove simulateStreaming diff --git a/.changeset/thin-numbers-shave.md b/.changeset/thin-numbers-shave.md new file mode 100644 index 000000000000..e57732de3101 --- /dev/null +++ b/.changeset/thin-numbers-shave.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +fix (core): improve error handling in streamText's consumeStream method diff --git a/.changeset/three-jars-fix.md b/.changeset/three-jars-fix.md new file mode 100644 index 000000000000..28a66f6006dc --- /dev/null +++ b/.changeset/three-jars-fix.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +fix (openai): structure output for responses model diff --git a/.changeset/three-pans-move.md b/.changeset/three-pans-move.md new file mode 100644 index 000000000000..0672e5368688 --- /dev/null +++ b/.changeset/three-pans-move.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider-utils': major +--- + +chore (provider-utils): return IdGenerator interface diff --git a/.changeset/tiny-deers-kick.md b/.changeset/tiny-deers-kick.md new file mode 100644 index 000000000000..5e75717f9f74 --- /dev/null +++ b/.changeset/tiny-deers-kick.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +chore(ui-utils): merge into ai package diff --git a/.changeset/tough-mugs-fail.md b/.changeset/tough-mugs-fail.md new file mode 100644 index 000000000000..830857dd2d96 --- /dev/null +++ b/.changeset/tough-mugs-fail.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): rename reasoning UI parts 'reasoning' property to 'text' diff --git a/.changeset/tough-suns-eat.md b/.changeset/tough-suns-eat.md new file mode 100644 index 000000000000..47fc189de1ef --- /dev/null +++ b/.changeset/tough-suns-eat.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +chore: return content array from doGenerate (spec) diff --git a/.changeset/tricky-hats-fly.md b/.changeset/tricky-hats-fly.md new file mode 100644 index 000000000000..0ee4c7a355d1 --- /dev/null +++ b/.changeset/tricky-hats-fly.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': patch +--- + +Remove `Experimental_LanguageModelV2Middleware` type diff --git a/.changeset/tricky-lions-deliver.md b/.changeset/tricky-lions-deliver.md new file mode 100644 index 000000000000..5198bc9c1d65 --- /dev/null +++ b/.changeset/tricky-lions-deliver.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +feat (ai): add consumeSseStream option to UI message stream responses diff --git a/.changeset/tricky-ravens-kick.md b/.changeset/tricky-ravens-kick.md new file mode 100644 index 000000000000..e5f01eda5bcf --- /dev/null +++ b/.changeset/tricky-ravens-kick.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +feat (ai): restructure chat transports diff --git a/.changeset/tricky-snakes-leave.md b/.changeset/tricky-snakes-leave.md new file mode 100644 index 000000000000..58740fffb2ff --- /dev/null +++ b/.changeset/tricky-snakes-leave.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/anthropic': patch +--- + +feat: streamText onChunk raw chunk support diff --git a/.changeset/tricky-zebras-cover.md b/.changeset/tricky-zebras-cover.md new file mode 100644 index 000000000000..5d18d9c127ad --- /dev/null +++ b/.changeset/tricky-zebras-cover.md @@ -0,0 +1,10 @@ +--- +'ai': patch +--- + +Removed deprecated `options.throwErrorForEmptyVectors` from `cosineSimilarity()`. Since `throwErrorForEmptyVectors` was the only option the entire `options` argument was removed. + +```diff +- cosineSimilarity(vector1, vector2, options) ++cosineSimilarity(vector1, vector2) +``` diff --git a/.changeset/twelve-baboons-sing.md b/.changeset/twelve-baboons-sing.md new file mode 100644 index 000000000000..ff5424b08a46 --- /dev/null +++ b/.changeset/twelve-baboons-sing.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/google-vertex': patch +--- + +Add reasoning token output support for gemini models via Vertex AI Provider diff --git a/.changeset/twelve-kids-travel.md b/.changeset/twelve-kids-travel.md new file mode 100644 index 000000000000..9238be102309 --- /dev/null +++ b/.changeset/twelve-kids-travel.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +chore: remove ai/react diff --git a/.changeset/twelve-pianos-destroy.md b/.changeset/twelve-pianos-destroy.md new file mode 100644 index 000000000000..d924406aa5df --- /dev/null +++ b/.changeset/twelve-pianos-destroy.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +feat (ai): allow using provider default temperature by specifying null diff --git a/.changeset/twelve-stingrays-behave.md b/.changeset/twelve-stingrays-behave.md new file mode 100644 index 000000000000..f0d0ef3ed866 --- /dev/null +++ b/.changeset/twelve-stingrays-behave.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': patch +--- + +release alpha.11 diff --git a/.changeset/twelve-waves-stare.md b/.changeset/twelve-waves-stare.md new file mode 100644 index 000000000000..27f205d253f7 --- /dev/null +++ b/.changeset/twelve-waves-stare.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): remove deprecated useChat isLoading helper diff --git a/.changeset/two-otters-divide.md b/.changeset/two-otters-divide.md new file mode 100644 index 000000000000..22368a9b3dd0 --- /dev/null +++ b/.changeset/two-otters-divide.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): refactor header preparation diff --git a/.changeset/two-otters-whisper.md b/.changeset/two-otters-whisper.md new file mode 100644 index 000000000000..cceeb4b35547 --- /dev/null +++ b/.changeset/two-otters-whisper.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +feat (ai): support model message array in prompt diff --git a/.changeset/two-roses-think.md b/.changeset/two-roses-think.md new file mode 100644 index 000000000000..550b2b282e8b --- /dev/null +++ b/.changeset/two-roses-think.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): remove StreamData and mergeStreams diff --git a/.changeset/unlucky-bikes-fry.md b/.changeset/unlucky-bikes-fry.md new file mode 100644 index 000000000000..eee5e4f4ec64 --- /dev/null +++ b/.changeset/unlucky-bikes-fry.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +feat (ai): replace maxSteps with continueUntil (streamText) diff --git a/.changeset/unlucky-bobcats-wash.md b/.changeset/unlucky-bobcats-wash.md new file mode 100644 index 000000000000..bd0a834bff9a --- /dev/null +++ b/.changeset/unlucky-bobcats-wash.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +chore(openai): remove legacy function calling diff --git a/.changeset/unlucky-cherries-rescue.md b/.changeset/unlucky-cherries-rescue.md new file mode 100644 index 000000000000..f827080b9203 --- /dev/null +++ b/.changeset/unlucky-cherries-rescue.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +fix(providers/openai): zod parse error with function diff --git a/.changeset/unlucky-kiwis-build.md b/.changeset/unlucky-kiwis-build.md new file mode 100644 index 000000000000..9d92e8abf9b9 --- /dev/null +++ b/.changeset/unlucky-kiwis-build.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): remove ui message reasoning property diff --git a/.changeset/unlucky-toes-laugh.md b/.changeset/unlucky-toes-laugh.md new file mode 100644 index 000000000000..ea34258098ab --- /dev/null +++ b/.changeset/unlucky-toes-laugh.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/cohere': patch +--- + +feat(cohere): add citations support for text documents diff --git a/.changeset/violet-taxis-work.md b/.changeset/violet-taxis-work.md new file mode 100644 index 000000000000..8844ebf787f8 --- /dev/null +++ b/.changeset/violet-taxis-work.md @@ -0,0 +1,6 @@ +--- +'@ai-sdk/provider': major +'ai': major +--- + +chore: rename maxTokens to maxOutputTokens diff --git a/.changeset/warm-eagles-play.md b/.changeset/warm-eagles-play.md new file mode 100644 index 000000000000..49c36d1c659e --- /dev/null +++ b/.changeset/warm-eagles-play.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +fix (ai/telemetry): Avoid JSON.stringify on Uint8Arrays for telemetry diff --git a/.changeset/weak-bikes-warn.md b/.changeset/weak-bikes-warn.md new file mode 100644 index 000000000000..b7e8ca4cc931 --- /dev/null +++ b/.changeset/weak-bikes-warn.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/anthropic': patch +--- + +add web search tool support diff --git a/.changeset/weak-moles-nail.md b/.changeset/weak-moles-nail.md new file mode 100644 index 000000000000..a3e0bc47ff3d --- /dev/null +++ b/.changeset/weak-moles-nail.md @@ -0,0 +1,9 @@ +--- +'@ai-sdk/amazon-bedrock': patch +'@ai-sdk/mistral': patch +'@ai-sdk/cohere': patch +'@ai-sdk/openai': patch +'@ai-sdk/groq': patch +--- + +fix(providers): always use optional instead of mix of nullish for providerOptions diff --git a/.changeset/wet-toys-burn.md b/.changeset/wet-toys-burn.md new file mode 100644 index 000000000000..865be770b03c --- /dev/null +++ b/.changeset/wet-toys-burn.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): remove ui message data property diff --git a/.changeset/wet-trainers-vanish.md b/.changeset/wet-trainers-vanish.md new file mode 100644 index 000000000000..0b67c8b371d1 --- /dev/null +++ b/.changeset/wet-trainers-vanish.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': patch +--- + +release alpha.9 diff --git a/.changeset/wicked-flowers-study.md b/.changeset/wicked-flowers-study.md new file mode 100644 index 000000000000..b5eed3887561 --- /dev/null +++ b/.changeset/wicked-flowers-study.md @@ -0,0 +1,9 @@ +--- +'@ai-sdk/anthropic': patch +'@ai-sdk/provider': patch +'@ai-sdk/google': patch +'@ai-sdk/openai': patch +'ai': patch +--- + +feat(tool-calling): don't require the user to have to pass parameters diff --git a/.changeset/wicked-snakes-march.md b/.changeset/wicked-snakes-march.md new file mode 100644 index 000000000000..9cce9100fa6c --- /dev/null +++ b/.changeset/wicked-snakes-march.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +Fix PDF file parts when passed as a string url or Uint8Array diff --git a/.changeset/wild-candles-judge.md b/.changeset/wild-candles-judge.md new file mode 100644 index 000000000000..df33fc279eb2 --- /dev/null +++ b/.changeset/wild-candles-judge.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': major +--- + +chore (provider): rename providerMetadata inputs to providerOptions diff --git a/.changeset/wild-cats-work.md b/.changeset/wild-cats-work.md new file mode 100644 index 000000000000..74c9e2c842fa --- /dev/null +++ b/.changeset/wild-cats-work.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/cohere': patch +--- + +chore(providers/cohere): convert to providerOptions diff --git a/.changeset/wild-pugs-burn.md b/.changeset/wild-pugs-burn.md new file mode 100644 index 000000000000..0de378518e79 --- /dev/null +++ b/.changeset/wild-pugs-burn.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): remove experimental continueSteps diff --git a/.changeset/wise-gorillas-act.md b/.changeset/wise-gorillas-act.md new file mode 100644 index 000000000000..003dfdc1cf36 --- /dev/null +++ b/.changeset/wise-gorillas-act.md @@ -0,0 +1,5 @@ +--- +'ai': patch +--- + +chore (ai): stable prepareStep diff --git a/.changeset/witty-candles-pretend.md b/.changeset/witty-candles-pretend.md new file mode 100644 index 000000000000..17dc025c1b27 --- /dev/null +++ b/.changeset/witty-candles-pretend.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): rename reasoning to reasoningText, rename reasoningDetails to reasoning (streamText, generateText) diff --git a/.changeset/yellow-chefs-kick.md b/.changeset/yellow-chefs-kick.md new file mode 100644 index 000000000000..c14db6aff7b0 --- /dev/null +++ b/.changeset/yellow-chefs-kick.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +feat (providers/openai): support gpt-image-1 image generation diff --git a/.changeset/yellow-eels-sort.md b/.changeset/yellow-eels-sort.md new file mode 100644 index 000000000000..a68ff96be9f9 --- /dev/null +++ b/.changeset/yellow-eels-sort.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/provider': patch +--- + +release alpha.10 diff --git a/.changeset/yellow-ligers-brake.md b/.changeset/yellow-ligers-brake.md new file mode 100644 index 000000000000..10813af7dd60 --- /dev/null +++ b/.changeset/yellow-ligers-brake.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/xai': patch +--- + +feat (providers/xai): add grok-3 models diff --git a/.changeset/young-dingos-march.md b/.changeset/young-dingos-march.md new file mode 100644 index 000000000000..342d097a0f86 --- /dev/null +++ b/.changeset/young-dingos-march.md @@ -0,0 +1,5 @@ +--- +'ai': major +--- + +chore (ai): rename CoreMessage to ModelMessage diff --git a/.github/ISSUE_TEMPLATE/1.bug_report.yml b/.github/ISSUE_TEMPLATE/1.bug_report.yml deleted file mode 100644 index 6a27f1d21c61..000000000000 --- a/.github/ISSUE_TEMPLATE/1.bug_report.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: Bug report -description: Report a bug with the AI SDK. -labels: ['bug'] -body: - - type: markdown - attributes: - value: | - This template is to report bugs. If you need help with your own project, feel free to [start a new thread in our discussions](https://github.com/vercel/ai/discussions). - - type: textarea - attributes: - label: Description - description: A detailed description of the issue that you are encountering with the AI SDK, and how other people can reproduce it. This includes helpful information such as the API you are using, the framework and AI provider. - placeholder: | - Reproduction steps... - validations: - required: true - - type: textarea - attributes: - label: Code example - description: Provide an example code snippet that has the problem. - placeholder: | - import { openai } from '@ai-sdk/openai'; - import { streamText } from 'ai'; - ... - - type: input - id: provider - attributes: - label: AI provider - description: The AI provider (e.g. `@ai-sdk/openai`) that you are using, and its version (e.g. `1.0.0`). - placeholder: | - @ai-sdk/openai v1.0.0 - - type: textarea - attributes: - label: Additional context - description: | - Any extra information that might help us investigate. diff --git a/.github/ISSUE_TEMPLATE/1.support_request.yml b/.github/ISSUE_TEMPLATE/1.support_request.yml new file mode 100644 index 000000000000..0ea8b98fd42d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/1.support_request.yml @@ -0,0 +1,27 @@ +name: Support Request +description: Report a bug, feature request or other issue with the AI SDK. +labels: ['support'] +body: + - type: markdown + attributes: + value: | + This template is ask for help regarding an issue that could be a bug or a feature request. + - type: textarea + attributes: + label: Description + description: A detailed description. Please include relevant information such as reproduction steps, code examples, and any other information that might help us understand the issue. + placeholder: | + Reproduction steps, code examples, background, etc... + validations: + required: true + - type: textarea + attributes: + label: AI SDK Version + description: Which version of the AI SDK are you using? + placeholder: | + Examples: + - ai: 4.1.2 + - @ai-sdk/react: 2.1.0 + - @ai-sdk/openai: 0.5.2 + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/2.feature_request.yml b/.github/ISSUE_TEMPLATE/2.feature_request.yml deleted file mode 100644 index 2c7355ea3698..000000000000 --- a/.github/ISSUE_TEMPLATE/2.feature_request.yml +++ /dev/null @@ -1,29 +0,0 @@ -name: Feature Request -description: Propose a new feature for the AI SDK. -labels: ['enhancement'] -body: - - type: markdown - attributes: - value: | - This template is to propose new features for the AI SDK. If you need help with your own project, feel free to [start a new thread in our discussions](https://github.com/vercel/ai/discussions). - - type: textarea - attributes: - label: Feature Description - description: A detailed description of the feature you are proposing for the SDK. - placeholder: | - Feature description... - validations: - required: true - - type: textarea - attributes: - label: Use Cases - description: Provide use cases where this feature would be beneficial. - placeholder: | - Use case... - - type: textarea - attributes: - label: Additional context - description: | - Any extra information that might help us understand your feature request. - placeholder: | - Additional context... diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 000000000000..8e26c04c99a3 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,51 @@ + + +## Background + + + +## Summary + + + +## Verification + + + +## Tasks + + + +- [ ] Tests have been added / updated (for bug fixes / features) +- [ ] Documentation has been added / updated (for bug fixes / features) +- [ ] A _patch_ changeset for relevant packages has been added (for bug fixes / features - run `pnpm changeset` in the project root) +- [ ] Formatting issues have been fixed (run `pnpm prettier-fix` in the project root) + +## Future Work + + + +## Related Issues + + diff --git a/.github/scripts/cleanup-examples-changesets.mjs b/.github/scripts/cleanup-examples-changesets.mjs index f728fb35a78e..0d132eefc6f5 100644 --- a/.github/scripts/cleanup-examples-changesets.mjs +++ b/.github/scripts/cleanup-examples-changesets.mjs @@ -46,5 +46,5 @@ for (const app of readdirSync(fileURLToPath(examplesUrl))) { // next test server cleanup( '.', - new URL('../../packages/ai/tests/e2e/next-server', import.meta.url), + new URL('../../packages/rsc/tests/e2e/next-server', import.meta.url), ); diff --git a/.github/workflows/actions/verify-changesets/index.js b/.github/workflows/actions/verify-changesets/index.js new file mode 100644 index 000000000000..fa21a3fb171b --- /dev/null +++ b/.github/workflows/actions/verify-changesets/index.js @@ -0,0 +1,137 @@ +import fs from 'node:fs/promises'; + +const BYPASS_LABELS = ['minor', 'major']; + +// check if current file is the entry point +if (import.meta.url.endsWith(process.argv[1])) { + // https://docs.github.com/en/webhooks/webhook-events-and-payloads#pull_request + const pullRequestEvent = JSON.parse( + await fs.readFile(process.env.GITHUB_EVENT_PATH, 'utf-8'), + ); + + try { + const message = await verifyChangesets( + pullRequestEvent, + process.env, + fs.readFile, + ); + await fs.writeFile( + process.env.GITHUB_STEP_SUMMARY, + `## Changeset verification passed ✅\n\n${message || ''}`, + ); + } catch (error) { + // write error to summary + console.error(error.message); + await fs.writeFile( + process.env.GITHUB_STEP_SUMMARY, + `## Changeset verification failed ❌ + +${error.message}`, + ); + + if (error.path) { + await fs.appendFile( + process.env.GITHUB_STEP_SUMMARY, + `\n\nFile: \`${error.path}\``, + ); + } + + if (error.content) { + await fs.appendFile( + process.env.GITHUB_STEP_SUMMARY, + `\n\n\`\`\`yaml\n${error.content}\n\`\`\``, + ); + } + + process.exit(1); + } +} + +export async function verifyChangesets( + event, + env = process.env, + readFile = fs.readFile, +) { + // Skip check if pull request has "minor-release" label + const byPassLabel = event.pull_request.labels.find(label => + BYPASS_LABELS.includes(label.name), + ); + if (byPassLabel) { + return `Skipping changeset verification - "${byPassLabel.name}" label found`; + } + + // Iterate through all changed .changeset/*.md files + for (const path of env.CHANGED_FILES.trim().split(' ')) { + // ignore README.md file + if (path === '.changeset/README.md') continue; + + // Check if the file is a .changeset file + if (!/^\.changeset\/[a-z-]+\.md/.test(path)) { + throw Object.assign(new Error(`Invalid file - not a .changeset file`), { + path, + }); + } + + // find frontmatter + const content = await readFile(`../../../../${path}`, 'utf-8'); + const result = content.match(/---\n([\s\S]+?)\n---/); + if (!result) { + throw Object.assign( + new Error(`Invalid .changeset file - no frontmatter found`), + { + path, + content, + }, + ); + } + + const [frontmatter] = result; + + // Find version bump by package. `frontmatter` looks like this: + // + // ```yaml + // 'ai': patch + // '@ai-sdk/provider': patch + // ``` + const lines = frontmatter.split('\n').slice(1, -1); + const versionBumps = {}; + for (const line of lines) { + const [packageName, versionBump] = line.split(':').map(s => s.trim()); + if (!packageName || !versionBump) { + throw Object.assign( + new Error(`Invalid .changeset file - invalid frontmatter`, { + path, + content, + }), + ); + } + + // Check if packageName is already set + if (versionBumps[packageName]) { + throw Object.assign( + new Error( + `Invalid .changeset file - duplicate package name "${packageName}"`, + ), + { path, content }, + ); + } + + versionBumps[packageName] = versionBump; + } + + // check if any of the version bumps are not "patch" + const invalidVersionBumps = Object.entries(versionBumps).filter( + ([, versionBump]) => versionBump !== 'patch', + ); + + if (invalidVersionBumps.length > 0) { + throw Object.assign( + new Error( + `Invalid .changeset file - invalid version bump (only "patch" is allowed, see https://ai-sdk.dev/docs/migration-guides/versioning). To bypass, add one of the following labels: ${BYPASS_LABELS.join(', ')}`, + ), + + { path, content }, + ); + } + } +} diff --git a/.github/workflows/actions/verify-changesets/package.json b/.github/workflows/actions/verify-changesets/package.json new file mode 100644 index 000000000000..bff806df20e3 --- /dev/null +++ b/.github/workflows/actions/verify-changesets/package.json @@ -0,0 +1,8 @@ +{ + "name": "verify-changesets-action", + "private": true, + "type": "module", + "scripts": { + "test": "node --test test.js" + } +} diff --git a/.github/workflows/actions/verify-changesets/test.js b/.github/workflows/actions/verify-changesets/test.js new file mode 100644 index 000000000000..0c4e024f38ef --- /dev/null +++ b/.github/workflows/actions/verify-changesets/test.js @@ -0,0 +1,193 @@ +import assert from 'node:assert'; +import { mock, test } from 'node:test'; + +import { verifyChangesets } from './index.js'; + +test('happy path', async () => { + const event = { + pull_request: { + labels: [], + }, + }; + const env = { + CHANGED_FILES: '.changeset/some-happy-path.md', + }; + + const readFile = mock.fn(async path => { + return `---\nai: patch\n@ai-sdk/provider: patch\n---\n## Test changeset`; + }); + + await verifyChangesets(event, env, readFile); + + assert.strictEqual(readFile.mock.callCount(), 1); + assert.deepStrictEqual(readFile.mock.calls[0].arguments, [ + '../../../../.changeset/some-happy-path.md', + 'utf-8', + ]); +}); + +test('ignores .changeset/README.md', async () => { + const event = { + pull_request: { + labels: [], + }, + }; + const env = { + CHANGED_FILES: '.changeset/README.md', + }; + + const readFile = mock.fn(() => {}); + + await verifyChangesets(event, env, readFile); + + assert.strictEqual(readFile.mock.callCount(), 0); +}); + +test('invalid file - not a .changeset file', async () => { + const event = { + pull_request: { + labels: [], + }, + }; + const env = { + CHANGED_FILES: '.changeset/not-a-changeset-file.txt', + }; + + const readFile = mock.fn(() => {}); + + await assert.rejects( + () => verifyChangesets(event, env, readFile), + Object.assign(new Error('Invalid file - not a .changeset file'), { + path: '.changeset/not-a-changeset-file.txt', + }), + ); + + assert.strictEqual(readFile.mock.callCount(), 0); +}); + +test('invalid .changeset file - no frontmatter', async () => { + const event = { + pull_request: { + labels: [], + }, + }; + const env = { + CHANGED_FILES: '.changeset/invalid-changeset-file.md', + }; + + const readFile = mock.fn(async path => { + return 'frontmatter missing'; + }); + await assert.rejects( + () => verifyChangesets(event, env, readFile), + Object.assign(new Error('Invalid .changeset file - no frontmatter found'), { + path: '.changeset/invalid-changeset-file.md', + content: 'frontmatter missing', + }), + ); + assert.strictEqual(readFile.mock.callCount(), 1); + assert.deepStrictEqual(readFile.mock.calls[0].arguments, [ + '../../../../.changeset/invalid-changeset-file.md', + 'utf-8', + ]); +}); + +test('minor update', async () => { + const event = { + pull_request: { + labels: [], + }, + }; + const env = { + CHANGED_FILES: '.changeset/patch-update.md .changeset/minor-update.md', + }; + + const readFile = mock.fn(async path => { + if (path.endsWith('patch-update.md')) { + return `---\nai: patch\n---\n## Test changeset`; + } + + return `---\n@ai-sdk/provider: minor\n---\n## Test changeset`; + }); + + await assert.rejects( + () => verifyChangesets(event, env, readFile), + Object.assign( + new Error( + `Invalid .changeset file - invalid version bump (only "patch" is allowed, see https://ai-sdk.dev/docs/migration-guides/versioning). To bypass, add one of the following labels: minor, major`, + ), + { + path: '.changeset/minor-update.md', + content: '---\n@ai-sdk/provider: minor\n---\n## Test changeset', + }, + ), + ); + + assert.strictEqual(readFile.mock.callCount(), 2); + assert.deepStrictEqual(readFile.mock.calls[0].arguments, [ + '../../../../.changeset/patch-update.md', + 'utf-8', + ]); + assert.deepStrictEqual(readFile.mock.calls[1].arguments, [ + '../../../../.changeset/minor-update.md', + 'utf-8', + ]); +}); + +test('minor update - with "minor" label', async () => { + const event = { + pull_request: { + labels: [ + { + name: 'minor', + }, + ], + }, + }; + const env = { + CHANGED_FILES: '.changeset/patch-update.md .changeset/minor-update.md', + }; + + const readFile = mock.fn(async path => { + if (path.endsWith('patch-update.md')) { + return `---\nai: patch\n---\n## Test changeset`; + } + + return `---\n@ai-sdk/provider: minor\n---\n## Test changeset`; + }); + + const message = await verifyChangesets(event, env, readFile); + assert.strictEqual( + message, + 'Skipping changeset verification - "minor" label found', + ); +}); + +test('major update - with "major" label', async () => { + const event = { + pull_request: { + labels: [ + { + name: 'major', + }, + ], + }, + }; + const env = { + CHANGED_FILES: '.changeset/patch-update.md .changeset/major-update.md', + }; + + const readFile = mock.fn(async path => { + if (path.endsWith('patch-update.md')) { + return `---\nai: patch\n---\n## Test changeset`; + } + + return `---\n@ai-sdk/provider: major\n---\n## Test changeset`; + }); + + const message = await verifyChangesets(event, env, readFile); + assert.strictEqual( + message, + 'Skipping changeset verification - "major" label found', + ); +}); diff --git a/.github/workflows/assign-team-pull-request.yml b/.github/workflows/assign-team-pull-request.yml new file mode 100644 index 000000000000..ac500348e9a0 --- /dev/null +++ b/.github/workflows/assign-team-pull-request.yml @@ -0,0 +1,22 @@ +name: Assign Team Pull Requests to Author + +on: + pull_request: + types: [opened] + +permissions: + pull-requests: write + +jobs: + assign: + runs-on: ubuntu-latest + # Only assign pull requests by team members, ignore pull requests from forks + if: github.event.pull_request.head.repo.full_name == github.repository + steps: + - uses: actions/checkout@v4 + - name: Assign pull request to author + run: gh pr edit $PULL_REQUEST_URL --add-assignee $AUTHOR_LOGIN + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PULL_REQUEST_URL: ${{ github.event.pull_request.html_url }} + AUTHOR_LOGIN: ${{ github.event.pull_request.user.login }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7de754472594..89c274e80b81 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,11 +2,107 @@ name: CI on: push: - branches: [main] + branches: [main, v5] pull_request: - branches: [main] + branches: [main, v5] jobs: + build-examples: + name: 'Build Examples' + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + version: 10.11.0 + + - name: Use Node.js 22 + uses: actions/setup-node@v4 + with: + node-version: 22 + cache: 'pnpm' + + - name: Install dependencies + run: pnpm install --frozen-lockfile + + - name: Build Examples + run: pnpm run build:examples + + prettier: + name: 'Prettier' + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + version: 10.11.0 + + - name: Use Node.js 22 + uses: actions/setup-node@v4 + with: + node-version: 22 + cache: 'pnpm' + + - name: Install dependencies + run: pnpm install --frozen-lockfile + + - name: Run Prettier check + run: pnpm run prettier-check + + eslint: + name: 'ESLint' + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + version: 10.11.0 + + - name: Use Node.js 22 + uses: actions/setup-node@v4 + with: + node-version: 22 + cache: 'pnpm' + + - name: Install dependencies + run: pnpm install --frozen-lockfile + + - name: Run ESLint check + run: pnpm run lint + + types: + name: 'TypeScript' + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + version: 10.11.0 + + - name: Use Node.js 22 + uses: actions/setup-node@v4 + with: + node-version: 22 + cache: 'pnpm' + + - name: Install dependencies + run: pnpm install --frozen-lockfile + + - name: Run TypeScript type check + run: pnpm run type-check:full + test: name: 'Test' runs-on: ubuntu-latest @@ -23,7 +119,7 @@ jobs: - name: Setup pnpm uses: pnpm/action-setup@v4 with: - version: 9.12.3 + version: 10.11.0 - name: Use Node.js ${{ matrix.node-version }} uses: actions/setup-node@v4 diff --git a/.github/workflows/quality.yml b/.github/workflows/quality.yml deleted file mode 100644 index 42a185321759..000000000000 --- a/.github/workflows/quality.yml +++ /dev/null @@ -1,80 +0,0 @@ -name: Quality - -on: - push: - branches: [main] - pull_request: - branches: [main] - -jobs: - prettier: - name: 'Prettier' - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Setup pnpm - uses: pnpm/action-setup@v4 - with: - version: 9.12.3 - - - name: Use Node.js 22 - uses: actions/setup-node@v4 - with: - node-version: 22 - cache: 'pnpm' - - - name: Install dependencies - run: pnpm install --frozen-lockfile - - - name: Run Prettier check - run: pnpm run prettier-check - - eslint: - name: 'ESLint' - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Setup pnpm - uses: pnpm/action-setup@v4 - with: - version: 9.12.3 - - - name: Use Node.js 22 - uses: actions/setup-node@v4 - with: - node-version: 22 - cache: 'pnpm' - - - name: Install dependencies - run: pnpm install --frozen-lockfile - - - name: Run ESLint check - run: pnpm run lint - - types: - name: 'TypeScript' - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Setup pnpm - uses: pnpm/action-setup@v4 - with: - version: 9.12.3 - - - name: Use Node.js 22 - uses: actions/setup-node@v4 - with: - node-version: 22 - cache: 'pnpm' - - - name: Install dependencies - run: pnpm install --frozen-lockfile - - - name: Run TypeScript type check - run: pnpm run type-check diff --git a/.github/workflows/release-snapshot.yml b/.github/workflows/release-snapshot.yml index e58a9e0bfd8f..97e198e40651 100644 --- a/.github/workflows/release-snapshot.yml +++ b/.github/workflows/release-snapshot.yml @@ -45,7 +45,7 @@ jobs: - name: Setup pnpm uses: pnpm/action-setup@v4 with: - version: 9.12.3 + version: 10.11.0 - name: Setup Node.js 22 uses: actions/setup-node@v4 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 12eec2f442e2..ff61d6b02d8c 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -4,6 +4,7 @@ on: push: branches: - main + - v5 paths: - '.changeset/**' - '.github/workflows/release.yml' @@ -25,7 +26,7 @@ jobs: - name: Setup pnpm uses: pnpm/action-setup@v4 with: - version: 9.12.3 + version: 10.11.0 - name: Setup Node.js 22 uses: actions/setup-node@v4 @@ -39,7 +40,6 @@ jobs: id: changesets uses: changesets/action@v1 with: - # This expects you to have a script called release which does a build for your packages and calls changeset publish version: pnpm ci:version publish: pnpm ci:release env: diff --git a/.github/workflows/verify-changesets.yml b/.github/workflows/verify-changesets.yml new file mode 100644 index 000000000000..4feca6aef7bb --- /dev/null +++ b/.github/workflows/verify-changesets.yml @@ -0,0 +1,38 @@ +# vercel/ai uses https://github.com/changesets/changesets for versioning and changelogs, +# but is not following semantic versioning. Instead, it uses `patch` for both fixes +# and features. It uses `minor` for "marketing releases", accompanied by a blog post and migration guide. +# This workflow verifies that all `.changeset/*.md` files use `patch` unless a `minor-release` label is present. +name: Verify Changesets + +on: + pull_request: + types: [opened, synchronize, reopened, labeled, unlabeled] + branches: + - main + paths: + - '.changeset/*.md' + +jobs: + verify-changesets: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - uses: actions/setup-node@v4 + with: + node-version: 'lts/*' + - name: get all changed files from .changeset/*.md + id: changeset-files + run: | + echo "changed-files=$(git diff --diff-filter=dr --name-only $BASE_SHA -- '.changeset/*.md' | tr '\n' ' ')" >> $GITHUB_OUTPUT + env: + BASE_SHA: ${{ github.event.pull_request.base.sha }} + - name: Verify changesets + if: steps.changeset-files.outputs.changed-files != '' + working-directory: .github/workflows/actions/verify-changesets + run: | + node index.js + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + CHANGED_FILES: ${{ steps.changeset-files.outputs.changed-files }} diff --git a/.gitignore b/.gitignore index 22b004a9f1da..eecbad96365f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,14 +1,16 @@ .DS_Store -node_modules -.turbo -*.log +.cache +.env .next +.turbo dist dist-ssr -*.local -.env -.cache -server/dist +examples/*/build +node_modules public/dist -.turbo +server/dist test-results +tsconfig.vitest-temp.json +*.log +*.local +*.tsbuildinfo diff --git a/.npmrc b/.npmrc index 2a53e07c0db1..13562b7874fa 100644 --- a/.npmrc +++ b/.npmrc @@ -1,2 +1,4 @@ auto-install-peers = true -link-workspace-packages = true \ No newline at end of file +link-workspace-packages = true +public-hoist-pattern[]=*eslint* +public-hoist-pattern[]=*prettier* \ No newline at end of file diff --git a/.prettierignore b/.prettierignore index c74a64043bc3..ee60870ab39e 100644 --- a/.prettierignore +++ b/.prettierignore @@ -3,6 +3,5 @@ node_modules dist .svelte-kit -.solid _nuxt __testfixtures__ diff --git a/CHANGELOG.md b/CHANGELOG.md index 11f54b62a9ff..86eecc10d977 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ You can find the changelogs for the individual packages in their respective `CHA - [@ai-sdk/deepseek](./packages/deepseek/CHANGELOG.md) - [@ai-sdk/fal](./packages/fal/CHANGELOG.md) - [@ai-sdk/fireworks](./packages/fireworks/CHANGELOG.md) +- [@ai-sdk/gateway](./packages/gateway/CHANGELOG.md) - [@ai-sdk/google](./packages/google/CHANGELOG.md) - [@ai-sdk/google-vertex](./packages/google-vertex/CHANGELOG.md) - [@ai-sdk/groq](./packages/groq/CHANGELOG.md) @@ -24,6 +25,7 @@ You can find the changelogs for the individual packages in their respective `CHA - [@ai-sdk/openai-compatible](./packages/openai-compatible/CHANGELOG.md) - [@ai-sdk/perplexity](./packages/perplexity/CHANGELOG.md) - [@ai-sdk/togetherai](./packages/togetherai/CHANGELOG.md) +- [@ai-sdk/vercel](./packages/vercel/CHANGELOG.md) - [@ai-sdk/xai](./packages/xai/CHANGELOG.md) ### UI integrations diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d9644053c4a2..2b5daebb9a97 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -32,7 +32,7 @@ We welcome your contributions to our code and documentation. Here's how you can ### Environment Setup -AI SDK development requires PNPM v9 (lockfile version) or higher and Node v20 or higher. +AI SDK development requires PNPM v9 (lockfile version) or higher and Node v22. ### Setting Up the Repository Locally @@ -40,8 +40,8 @@ To set up the repository on your local machine, follow these steps: 1. **Fork the Repository**: Make a copy of the repository to your GitHub account. 2. **Clone the Repository**: Clone the repository to your local machine, e.g. using `git clone`. -3. **Install Node**: If you haven't already, install Node v20. -4. **Install pnpm**: If you haven't already, install pnpm v9. You can do this by running `npm install -g pnpm@9` if you're using npm. Alternatively, if you're using Homebrew (Mac), you can run `brew install pnpm`. For more see [the pnpm site](https://pnpm.io/installation). +3. **Install Node**: If you haven't already, install Node v22. +4. **Install pnpm**: If you haven't already, install pnpm v10. You can do this by running `npm install -g pnpm@10` if you're using npm. Alternatively, if you're using Homebrew (Mac), you can run `brew install pnpm`. For more see [the pnpm site](https://pnpm.io/installation). 5. **Install Dependencies**: Navigate to the project directory and run `pnpm install` to install all necessary dependencies. 6. **Build the Project**: Run `pnpm build` in the root to build all packages. @@ -65,15 +65,30 @@ To test the package that you're working on, run `pnpm test` in the package folde You do not need to rebuild your package to test it (only dependencies need to be built). Some packages like `ai` also have more details tests and watch mode, see their `package.json` for more information. +#### Adding package dependencies + +Please run `pnpm update-references` in workspace root to update the `references` section in the `tsconfig.json` file. + ### Submitting Pull Requests We greatly appreciate your pull requests. Here are the steps to submit them: 1. **Create a New Branch**: Initiate your changes in a fresh branch. It's recommended to name the branch in a manner that signifies the changes you're implementing. -2. **Commit Your Changes**: Ensure your commits are succinct and clear, detailing what modifications have been made and the reasons behind them. -3. **Push the Changes to Your GitHub Repository**: After committing your changes, push them to your GitHub repository. -4. **Open a Pull Request**: Propose your changes for review. Furnish a lucid title and description of your contributions. Make sure to link any relevant issues your PR resolves. -5. **Respond to Feedback**: Stay receptive to and address any feedback or alteration requests from the project maintainers. +2. **Add a patch changeset**: If you're updating any packages and want to ensure they're released, add a **patch** changeset to your branch by running `pnpm changeset` in the workspace root. + + - **Please do not use minor or major changesets**, we'll let you know when you need to use a different changeset type than patch. + - You don't need to select any of the `examples/*` packages, as they are not released. + +3. **Commit Your Changes**: Ensure your commits are succinct and clear, detailing what modifications have been made and the reasons behind them. We don't require a specific commit message format, but please be descriptive. +4. **Fix prettier issues**: Run `pnpm prettier-fix` to fix any formatting issues in your code. +5. **Push the Changes to Your GitHub Repository**: After committing your changes, push them to your GitHub repository. +6. **Open a Pull Request**: Propose your changes for review. Furnish a lucid title and description of your contributions. Make sure to link any relevant issues your PR resolves. We use the following PR title format: + + - `fix(package-name): description` or + - `feat(package-name): description` or + - `chore(package-name): description` etc. + +7. **Respond to Feedback**: Stay receptive to and address any feedback or alteration requests from the project maintainers. ### Fixing Prettier Issues diff --git a/README.md b/README.md index f90be20a312e..ad93ed3ce3ec 120000 --- a/README.md +++ b/README.md @@ -1 +1 @@ -packages/ai/README.md \ No newline at end of file +packages/ai/README.md diff --git a/content/cookbook/01-next/11-generate-text-with-chat-prompt.mdx b/content/cookbook/01-next/11-generate-text-with-chat-prompt.mdx index c0106db9ba1c..ce6824c86140 100644 --- a/content/cookbook/01-next/11-generate-text-with-chat-prompt.mdx +++ b/content/cookbook/01-next/11-generate-text-with-chat-prompt.mdx @@ -31,12 +31,12 @@ Let's start by creating a simple chat interface with an input field that sends t ```tsx filename='app/page.tsx' 'use client'; -import { CoreMessage } from 'ai'; +import { ModelMessage } from 'ai'; import { useState } from 'react'; export default function Page() { const [input, setInput] = useState(''); - const [messages, setMessages] = useState([]); + const [messages, setMessages] = useState([]); return (
@@ -90,11 +90,11 @@ export default function Page() { Next, let's create the `/api/chat` endpoint that generates the assistant's response based on the conversation history. ```typescript filename='app/api/chat/route.ts' -import { CoreMessage, generateText } from 'ai'; +import { ModelMessage, generateText } from 'ai'; import { openai } from '@ai-sdk/openai'; export async function POST(req: Request) { - const { messages }: { messages: CoreMessage[] } = await req.json(); + const { messages }: { messages: ModelMessage[] } = await req.json(); const { response } = await generateText({ model: openai('gpt-4'), diff --git a/content/cookbook/01-next/12-generate-image-with-chat-prompt.mdx b/content/cookbook/01-next/12-generate-image-with-chat-prompt.mdx index 7cf9a4f51d53..91ec5f8ba54f 100644 --- a/content/cookbook/01-next/12-generate-image-with-chat-prompt.mdx +++ b/content/cookbook/01-next/12-generate-image-with-chat-prompt.mdx @@ -52,7 +52,7 @@ export async function POST(request: Request) { }), }, }); - return result.toDataStreamResponse(); + return result.toUIMessageStreamResponse(); } ``` diff --git a/content/cookbook/01-next/120-stream-assistant-response.mdx b/content/cookbook/01-next/120-stream-assistant-response.mdx deleted file mode 100644 index 9dc2613d8e3e..000000000000 --- a/content/cookbook/01-next/120-stream-assistant-response.mdx +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: Stream Assistant Response -description: Learn how to stream OpenAI Assistant's response using the AI SDK and Next.js -tags: ['next', 'streaming', 'assistant'] ---- - -# Stream Assistant Response - -## Client - -Let's create a simple chat interface that allows users to send messages to the assistant and receive responses. You will integrate the `useAssistant` hook from `@ai-sdk/react` to stream the messages and status. - -```tsx filename='app/page.tsx' -'use client'; - -import { Message, useAssistant } from '@ai-sdk/react'; - -export default function Page() { - const { status, messages, input, submitMessage, handleInputChange } = - useAssistant({ api: '/api/assistant' }); - - return ( -
-
status: {status}
- -
- {messages.map((message: Message) => ( -
-
{`${message.role}: `}
-
{message.content}
-
- ))} -
- -
- -
-
- ); -} -``` - -## Server - -Next, you will create an API route for `api/assistant` to handle the assistant's messages and responses. You will use the `AssistantResponse` function from `ai` to stream the assistant's responses back to the `useAssistant` hook on the client. - -```tsx filename='app/api/assistant/route.ts' -import OpenAI from 'openai'; -import { AssistantResponse } from 'ai'; - -const openai = new OpenAI({ - apiKey: process.env.OPENAI_API_KEY || '', -}); - -export async function POST(req: Request) { - const input: { - threadId: string | null; - message: string; - } = await req.json(); - - const threadId = input.threadId ?? (await openai.beta.threads.create({})).id; - - const createdMessage = await openai.beta.threads.messages.create(threadId, { - role: 'user', - content: input.message, - }); - - return AssistantResponse( - { threadId, messageId: createdMessage.id }, - async ({ forwardStream }) => { - const runStream = openai.beta.threads.runs.stream(threadId, { - assistant_id: - process.env.ASSISTANT_ID ?? - (() => { - throw new Error('ASSISTANT_ID environment is not set'); - })(), - }); - - await forwardStream(runStream); - }, - ); -} -``` - ---- - - diff --git a/content/cookbook/01-next/121-stream-assistant-response-with-tools.mdx b/content/cookbook/01-next/121-stream-assistant-response-with-tools.mdx deleted file mode 100644 index a0134cc9195a..000000000000 --- a/content/cookbook/01-next/121-stream-assistant-response-with-tools.mdx +++ /dev/null @@ -1,149 +0,0 @@ ---- -title: Stream Assistant Response with Tools -description: Learn how to stream OpenAI Assistant's response using the AI SDK and Next.js -tags: ['next', 'streaming', 'assistant'] ---- - -# Stream Assistant Response with Tools - -Let's create a simple chat interface that allows users to send messages to the assistant and receive responses and give it the ability to use tools. You will integrate the `useAssistant` hook from `@ai-sdk/react` to stream the messages and status. - -You will need to provide the list of tools on the OpenAI [Assistant Dashboard](https://platform.openai.com/assistants). You can use the following schema to create a tool to convert celsius to fahrenheit. - -```json -{ - "name": "celsiusToFahrenheit", - "description": "convert celsius to fahrenheit.", - "parameters": { - "type": "object", - "properties": { - "value": { - "type": "number", - "description": "the value in celsius." - } - }, - "required": ["value"] - } -} -``` - -## Client - -Let's create a simple chat interface that allows users to send messages to the assistant and receive responses. You will integrate the `useAssistant` hook from `@ai-sdk/react` to stream the messages and status. - -```tsx filename='app/page.tsx' -'use client'; - -import { Message, useAssistant } from '@ai-sdk/react'; - -export default function Page() { - const { status, messages, input, submitMessage, handleInputChange } = - useAssistant({ api: '/api/assistant' }); - - return ( -
-
status: {status}
- -
- {messages.map((message: Message) => ( -
-
{`${message.role}: `}
-
{message.content}
-
- ))} -
- -
- -
-
- ); -} -``` - -## Server - -Next, you will create an API route for `api/assistant` to handle the assistant's messages and responses. You will use the `AssistantResponse` function from `ai` to stream the assistant's responses back to the `useAssistant` hook on the client. - -```tsx filename='app/api/assistant/route.ts' -import { AssistantResponse } from 'ai'; -import OpenAI from 'openai'; - -const openai = new OpenAI({ - apiKey: process.env.OPENAI_API_KEY || '', -}); - -export async function POST(req: Request) { - const input: { - threadId: string | null; - message: string; - } = await req.json(); - - const threadId = input.threadId ?? (await openai.beta.threads.create({})).id; - - const createdMessage = await openai.beta.threads.messages.create(threadId, { - role: 'user', - content: input.message, - }); - - return AssistantResponse( - { threadId, messageId: createdMessage.id }, - async ({ forwardStream }) => { - const runStream = openai.beta.threads.runs.stream(threadId, { - assistant_id: - process.env.ASSISTANT_ID ?? - (() => { - throw new Error('ASSISTANT_ID is not set'); - })(), - }); - - let runResult = await forwardStream(runStream); - - while ( - runResult?.status === 'requires_action' && - runResult.required_action?.type === 'submit_tool_outputs' - ) { - const tool_outputs = - runResult.required_action.submit_tool_outputs.tool_calls.map( - (toolCall: any) => { - const parameters = JSON.parse(toolCall.function.arguments); - - switch (toolCall.function.name) { - case 'celsiusToFahrenheit': - const celsius = parseFloat(parameters.value); - const fahrenheit = celsius * (9 / 5) + 32; - - return { - tool_call_id: toolCall.id, - output: `${celsius}°C is ${fahrenheit.toFixed(2)}°F`, - }; - - default: - throw new Error( - `Unknown tool call function: ${toolCall.function.name}`, - ); - } - }, - ); - - runResult = await forwardStream( - openai.beta.threads.runs.submitToolOutputsStream( - threadId, - runResult.id, - { tool_outputs }, - ), - ); - } - }, - ); -} -``` - ---- - - diff --git a/content/cookbook/01-next/122-caching-middleware.mdx b/content/cookbook/01-next/122-caching-middleware.mdx index 0208cfef7180..28e50cb7d8e8 100644 --- a/content/cookbook/01-next/122-caching-middleware.mdx +++ b/content/cookbook/01-next/122-caching-middleware.mdx @@ -66,7 +66,7 @@ You can control the initial delay and delay between chunks by adjusting the `ini import { Redis } from '@upstash/redis'; import { type LanguageModelV1, - type LanguageModelV1Middleware, + type LanguageModelV2Middleware, type LanguageModelV1StreamPart, simulateReadableStream, } from 'ai'; @@ -76,7 +76,7 @@ const redis = new Redis({ token: process.env.KV_TOKEN, }); -export const cacheMiddleware: LanguageModelV1Middleware = { +export const cacheMiddleware: LanguageModelV2Middleware = { wrapGenerate: async ({ doGenerate, params }) => { const cacheKey = JSON.stringify(params); @@ -122,7 +122,6 @@ export const cacheMiddleware: LanguageModelV1Middleware = { chunkDelayInMs: 10, chunks: formattedChunks, }), - rawCall: { rawPrompt: null, rawSettings: {} }, }; } @@ -192,6 +191,6 @@ export async function POST(req: Request) { }), }, }); - return result.toDataStreamResponse(); + return result.toUIMessageStreamResponse(); } ``` diff --git a/content/cookbook/01-next/20-stream-text.mdx b/content/cookbook/01-next/20-stream-text.mdx index aad1ca4e2206..fcfdfe4d605a 100644 --- a/content/cookbook/01-next/20-stream-text.mdx +++ b/content/cookbook/01-next/20-stream-text.mdx @@ -59,7 +59,7 @@ export async function POST(req: Request) { prompt, }); - return result.toDataStreamResponse(); + return result.toUIMessageStreamResponse(); } ``` diff --git a/content/cookbook/01-next/21-stream-text-with-chat-prompt.mdx b/content/cookbook/01-next/21-stream-text-with-chat-prompt.mdx index a4ffdb4e235f..c3f2bd9c1b09 100644 --- a/content/cookbook/01-next/21-stream-text-with-chat-prompt.mdx +++ b/content/cookbook/01-next/21-stream-text-with-chat-prompt.mdx @@ -74,7 +74,7 @@ export async function POST(req: Request) { messages, }); - return result.toDataStreamResponse(); + return result.toUIMessageStreamResponse(); } ``` diff --git a/content/cookbook/01-next/22-stream-text-with-image-prompt.mdx b/content/cookbook/01-next/22-stream-text-with-image-prompt.mdx index 3282dcf807b7..077f58b9ea3b 100644 --- a/content/cookbook/01-next/22-stream-text-with-image-prompt.mdx +++ b/content/cookbook/01-next/22-stream-text-with-image-prompt.mdx @@ -30,7 +30,7 @@ export async function POST(req: Request) { // Call the language model const result = streamText({ - model: openai('gpt-4-turbo'), + model: openai('gpt-4.1'), messages: [ ...initialMessages, { @@ -44,7 +44,7 @@ export async function POST(req: Request) { }); // Respond with the stream - return result.toDataStreamResponse(); + return result.toUIMessageStreamResponse(); } ``` diff --git a/content/cookbook/01-next/23-chat-with-pdf.mdx b/content/cookbook/01-next/23-chat-with-pdf.mdx index 15deb7a3bf9f..b8a2c486edca 100644 --- a/content/cookbook/01-next/23-chat-with-pdf.mdx +++ b/content/cookbook/01-next/23-chat-with-pdf.mdx @@ -10,8 +10,7 @@ Some language models like Anthropic's Claude Sonnet 3.5 and Google's Gemini 2.0 This example requires a provider that supports PDFs, such as Anthropic's - Claude Sonnet 3.5 or Google's Gemini 2.0. Note OpenAI's GPT-4o does not - currently support PDFs. Check the [provider + Claude 3.7, Google's Gemini 2.5, or OpenAI's GPT-4.1. Check the [provider documentation](/providers/ai-sdk-providers) for up-to-date support information. @@ -36,7 +35,7 @@ export async function POST(req: Request) { messages, }); - return result.toDataStreamResponse(); + return result.toUIMessageStreamResponse(); } ``` diff --git a/content/cookbook/01-next/24-stream-text-multistep.mdx b/content/cookbook/01-next/24-stream-text-multistep.mdx index fec93204aa78..ff1f3244466a 100644 --- a/content/cookbook/01-next/24-stream-text-multistep.mdx +++ b/content/cookbook/01-next/24-stream-text-multistep.mdx @@ -27,7 +27,7 @@ export async function POST(req: Request) { execute: async dataStream => { // step 1 example: forced tool call const result1 = streamText({ - model: openai('gpt-4o-mini', { structuredOutputs: true }), + model: openai('gpt-4o-mini'), system: 'Extract the user goal from the conversation.', messages, toolChoice: 'required', // force the model to call a tool @@ -54,7 +54,10 @@ export async function POST(req: Request) { system: 'You are a helpful assistant with a different system prompt. Repeat the extract user goal in your answer.', // continue the workflow stream with the messages from the previous step: - messages: [...messages, ...(await result1.response).messages], + messages: [ + ...convertToModelMessages(messages), + ...(await result1.response).messages, + ], }); // forward the 2nd result to the client (incl. the finish event): diff --git a/content/cookbook/01-next/25-markdown-chatbot-with-memoization.mdx b/content/cookbook/01-next/25-markdown-chatbot-with-memoization.mdx index eb6be83dfbc6..91e45f554080 100644 --- a/content/cookbook/01-next/25-markdown-chatbot-with-memoization.mdx +++ b/content/cookbook/01-next/25-markdown-chatbot-with-memoization.mdx @@ -32,7 +32,7 @@ export async function POST(req: Request) { messages, }); - return result.toDataStreamResponse(); + return result.toUIMessageStreamResponse(); } ``` diff --git a/content/cookbook/01-next/31-generate-object-with-file-prompt.mdx b/content/cookbook/01-next/31-generate-object-with-file-prompt.mdx index 34c3eba7a223..4bdcbe607296 100644 --- a/content/cookbook/01-next/31-generate-object-with-file-prompt.mdx +++ b/content/cookbook/01-next/31-generate-object-with-file-prompt.mdx @@ -88,13 +88,13 @@ export async function POST(request: Request) { { type: 'file', data: await file.arrayBuffer(), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, ], schema: z.object({ - summary: z.string().describe('A 50 word sumamry of the PDF.'), + summary: z.string().describe('A 50 word summary of the PDF.'), }), }); diff --git a/content/cookbook/01-next/40-stream-object.mdx b/content/cookbook/01-next/40-stream-object.mdx index 2bb6e52d3e9e..a85dd8203493 100644 --- a/content/cookbook/01-next/40-stream-object.mdx +++ b/content/cookbook/01-next/40-stream-object.mdx @@ -112,7 +112,7 @@ export async function POST(req: Request) { const context = await req.json(); const result = streamObject({ - model: openai('gpt-4-turbo'), + model: openai('gpt-4.1'), schema: notificationSchema, prompt: `Generate 3 notifications for a messages app in this context:` + context, @@ -246,7 +246,7 @@ export async function POST(req: Request) { const context = await req.json(); const result = streamObject({ - model: openai('gpt-4-turbo'), + model: openai('gpt-4.1'), output: 'array', schema: notificationSchema, prompt: @@ -321,7 +321,7 @@ export async function POST(req: Request) { const context = await req.json(); const result = streamObject({ - model: openai('gpt-4-turbo'), + model: openai('gpt-4.1'), output: 'no-schema', prompt: `Generate 3 notifications for a messages app in this context:` + context, diff --git a/content/cookbook/01-next/70-call-tools.mdx b/content/cookbook/01-next/70-call-tools.mdx index 573e4180fcd3..13c9d5bb918e 100644 --- a/content/cookbook/01-next/70-call-tools.mdx +++ b/content/cookbook/01-next/70-call-tools.mdx @@ -112,7 +112,7 @@ export async function POST(req: Request) { }, }); - return result.toDataStreamResponse(); + return result.toUIMessageStreamResponse(); } ``` diff --git a/content/cookbook/01-next/71-call-tools-in-parallel.mdx b/content/cookbook/01-next/71-call-tools-in-parallel.mdx index 90473363ddfd..4da64b243497 100644 --- a/content/cookbook/01-next/71-call-tools-in-parallel.mdx +++ b/content/cookbook/01-next/71-call-tools-in-parallel.mdx @@ -112,7 +112,7 @@ export async function POST(req: Request) { }, }); - return result.toDataStreamResponse(); + return result.toUIMessageStreamResponse(); } ``` diff --git a/content/cookbook/01-next/72-call-tools-multiple-steps.mdx b/content/cookbook/01-next/72-call-tools-multiple-steps.mdx index a749237c9951..ca62cea3e6b1 100644 --- a/content/cookbook/01-next/72-call-tools-multiple-steps.mdx +++ b/content/cookbook/01-next/72-call-tools-multiple-steps.mdx @@ -107,6 +107,6 @@ export async function POST(req: Request) { }, }); - return result.toDataStreamResponse(); + return result.toUIMessageStreamResponse(); } ``` diff --git a/content/cookbook/01-next/73-mcp-tools.mdx b/content/cookbook/01-next/73-mcp-tools.mdx index 36a58173a173..13570ea2fe7b 100644 --- a/content/cookbook/01-next/73-mcp-tools.mdx +++ b/content/cookbook/01-next/73-mcp-tools.mdx @@ -12,10 +12,15 @@ The AI SDK supports Model Context Protocol (MCP) tools by offering a lightweight Let's create a route handler for `/api/completion` that will generate text based on the input prompt and MCP tools that can be called at any time during a generation. The route will call the `streamText` function from the `ai` module, which will then generate text based on the input prompt and stream it to the client. +To use the `StreamableHTTPClientTransport`, you will need to install the official Typescript SDK for Model Context Protocol: + + + ```ts filename="app/api/completion/route.ts" import { experimental_createMCPClient, streamText } from 'ai'; import { Experimental_StdioMCPTransport } from 'ai/mcp-stdio'; import { openai } from '@ai-sdk/openai'; +import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp'; export async function POST(req: Request) { const { prompt }: { prompt: string } = await req.json(); @@ -38,17 +43,17 @@ export async function POST(req: Request) { }, }); - // Similarly to the stdio example, you can pass in your own custom transport as long as it implements the `MCPTransport` interface: - const transport = new MyCustomTransport({ - // ... - }); - const customTransportClient = await experimental_createMCPClient({ + // Similarly to the stdio example, you can pass in your own custom transport as long as it implements the `MCPTransport` interface (e.g. `StreamableHTTPClientTransport`): + const transport = new StreamableHTTPClientTransport( + new URL('http://localhost:3000/mcp'), + ); + const customClient = await experimental_createMCPClient({ transport, }); const toolSetOne = await stdioClient.tools(); const toolSetTwo = await sseClient.tools(); - const toolSetThree = await customTransportClient.tools(); + const toolSetThree = await customClient.tools(); const tools = { ...toolSetOne, ...toolSetTwo, @@ -63,7 +68,15 @@ export async function POST(req: Request) { onFinish: async () => { await stdioClient.close(); await sseClient.close(); - await customTransportClient.close(); + await customClient.close(); + }, + // Closing clients onError is optional + // - Closing: Immediately frees resources, prevents hanging connections + // - Not closing: Keeps connection open for retries + onError: async error => { + await stdioClient.close(); + await sseClient.close(); + await customClient.close(); }, }); diff --git a/content/cookbook/01-next/75-human-in-the-loop.mdx b/content/cookbook/01-next/75-human-in-the-loop.mdx index 4faba3d440b2..a390623239ab 100644 --- a/content/cookbook/01-next/75-human-in-the-loop.mdx +++ b/content/cookbook/01-next/75-human-in-the-loop.mdx @@ -333,9 +333,10 @@ The solution above is low-level and not very friendly to use in a production env ### Create Utility Functions ```ts filename="utils.ts" -import { formatDataStreamPart, Message } from '@ai-sdk/ui-utils'; import { - convertToCoreMessages, + formatDataStreamPart, + Message, + convertToModelMessages, DataStreamWriter, ToolExecutionOptions, ToolSet, @@ -418,7 +419,7 @@ export async function processToolCalls< const toolInstance = executeFunctions[toolName]; if (toolInstance) { result = await toolInstance(toolInvocation.args, { - messages: convertToCoreMessages(messages), + messages: convertToModelMessages(messages), toolCallId: toolInvocation.toolCallId, }); } else { diff --git a/content/cookbook/01-next/80-send-custom-body-from-use-chat.mdx b/content/cookbook/01-next/80-send-custom-body-from-use-chat.mdx index 0c270f36c782..7d4b35fcdb3f 100644 --- a/content/cookbook/01-next/80-send-custom-body-from-use-chat.mdx +++ b/content/cookbook/01-next/80-send-custom-body-from-use-chat.mdx @@ -8,7 +8,7 @@ tags: ['next', 'chat'] `experimental_prepareRequestBody` is an experimental feature and only - available in React, Solid and Vue. + available in React, Svelte and Vue. By default, `useChat` sends all messages as well as information from the request to the server. @@ -80,7 +80,7 @@ export async function POST(req: Request) { // Call the language model const result = streamText({ - model: openai('gpt-4-turbo'), + model: openai('gpt-4.1'), messages: [...history, { role: 'user', content: text }] onFinish({ text }) { // e.g. save the message and the response to storage @@ -88,6 +88,6 @@ export async function POST(req: Request) { }) // Respond with the stream - return result.toDataStreamResponse() + return result.toUIMessageStreamResponse() } ``` diff --git a/content/cookbook/01-next/90-render-visual-interface-in-chat.mdx b/content/cookbook/01-next/90-render-visual-interface-in-chat.mdx index 0642bf6f2836..9b5076cce606 100644 --- a/content/cookbook/01-next/90-render-visual-interface-in-chat.mdx +++ b/content/cookbook/01-next/90-render-visual-interface-in-chat.mdx @@ -193,7 +193,7 @@ export default async function POST(request: Request) { const { messages } = await request.json(); const result = streamText({ - model: openai('gpt-4-turbo'), + model: openai('gpt-4.1'), messages, tools: { // server-side tool with execute function: @@ -232,6 +232,6 @@ export default async function POST(request: Request) { }, }); - return result.toDataStreamResponse(); + return result.toUIMessageStreamResponse(); } ``` diff --git a/content/cookbook/05-node/11-generate-text-with-chat-prompt.mdx b/content/cookbook/05-node/11-generate-text-with-chat-prompt.mdx index cb63d329f148..98a9959d5d11 100644 --- a/content/cookbook/05-node/11-generate-text-with-chat-prompt.mdx +++ b/content/cookbook/05-node/11-generate-text-with-chat-prompt.mdx @@ -16,7 +16,7 @@ import { openai } from '@ai-sdk/openai'; const result = await generateText({ model: openai('gpt-3.5-turbo'), - maxTokens: 1024, + maxOutputTokens: 1024, system: 'You are a helpful chatbot.', messages: [ { diff --git a/content/cookbook/05-node/12-generate-text-with-image-prompt.mdx b/content/cookbook/05-node/12-generate-text-with-image-prompt.mdx index 9076dc3e726f..b21d1b9048c3 100644 --- a/content/cookbook/05-node/12-generate-text-with-image-prompt.mdx +++ b/content/cookbook/05-node/12-generate-text-with-image-prompt.mdx @@ -15,8 +15,8 @@ import { generateText } from 'ai'; import { openai } from '@ai-sdk/openai'; const result = await generateText({ - model: openai('gpt-4-turbo'), - maxTokens: 512, + model: openai('gpt-4.1'), + maxOutputTokens: 512, messages: [ { role: 'user', @@ -47,8 +47,8 @@ import { openai } from '@ai-sdk/openai'; import fs from 'fs'; const result = await generateText({ - model: openai('gpt-4-turbo'), - maxTokens: 512, + model: openai('gpt-4.1'), + maxOutputTokens: 512, messages: [ { role: 'user', diff --git a/content/cookbook/05-node/20-stream-text.mdx b/content/cookbook/05-node/20-stream-text.mdx index aeb7616c887d..db073746af74 100644 --- a/content/cookbook/05-node/20-stream-text.mdx +++ b/content/cookbook/05-node/20-stream-text.mdx @@ -31,7 +31,7 @@ import { openai } from '@ai-sdk/openai'; const result = streamText({ model: openai('gpt-3.5-turbo'), - maxTokens: 512, + maxOutputTokens: 512, temperature: 0.3, maxRetries: 5, prompt: 'Invent a new holiday and describe its traditions.', @@ -50,7 +50,7 @@ import { openai } from '@ai-sdk/openai'; const result = streamText({ model: openai('gpt-3.5-turbo'), - maxTokens: 512, + maxOutputTokens: 512, temperature: 0.3, maxRetries: 5, prompt: 'Invent a new holiday and describe its traditions.', diff --git a/content/cookbook/05-node/21-stream-text-with-chat-prompt.mdx b/content/cookbook/05-node/21-stream-text-with-chat-prompt.mdx index 48213968166c..e99e45d445b0 100644 --- a/content/cookbook/05-node/21-stream-text-with-chat-prompt.mdx +++ b/content/cookbook/05-node/21-stream-text-with-chat-prompt.mdx @@ -17,7 +17,7 @@ import { openai } from '@ai-sdk/openai'; const result = streamText({ model: openai('gpt-3.5-turbo'), - maxTokens: 1024, + maxOutputTokens: 1024, system: 'You are a helpful chatbot.', messages: [ { diff --git a/content/cookbook/05-node/23-stream-text-with-file-prompt.mdx b/content/cookbook/05-node/23-stream-text-with-file-prompt.mdx index 5d735b004b23..f540e5749c34 100644 --- a/content/cookbook/05-node/23-stream-text-with-file-prompt.mdx +++ b/content/cookbook/05-node/23-stream-text-with-file-prompt.mdx @@ -28,7 +28,7 @@ async function main() { { type: 'file', data: fs.readFileSync('./data/ai.pdf'), - mimeType: 'application/pdf', + mediaType: 'application/pdf', }, ], }, diff --git a/content/cookbook/05-node/30-generate-object.mdx b/content/cookbook/05-node/30-generate-object.mdx index c0cd25fd15de..d7fe1cffce88 100644 --- a/content/cookbook/05-node/30-generate-object.mdx +++ b/content/cookbook/05-node/30-generate-object.mdx @@ -16,7 +16,7 @@ import { openai } from '@ai-sdk/openai'; import { z } from 'zod'; const result = await generateObject({ - model: openai('gpt-4-turbo'), + model: openai('gpt-4.1'), schema: z.object({ recipe: z.object({ name: z.string(), diff --git a/content/cookbook/05-node/40-stream-object.mdx b/content/cookbook/05-node/40-stream-object.mdx index e6508bfb8240..f178deed0fa0 100644 --- a/content/cookbook/05-node/40-stream-object.mdx +++ b/content/cookbook/05-node/40-stream-object.mdx @@ -19,7 +19,7 @@ import { streamObject } from 'ai'; import { z } from 'zod'; const { partialObjectStream } = streamObject({ - model: openai('gpt-4-turbo'), + model: openai('gpt-4.1'), schema: z.object({ recipe: z.object({ name: z.string(), diff --git a/content/cookbook/05-node/41-stream-object-with-image-prompt.mdx b/content/cookbook/05-node/41-stream-object-with-image-prompt.mdx index 0ef6fb137e28..a7524638b7e4 100644 --- a/content/cookbook/05-node/41-stream-object-with-image-prompt.mdx +++ b/content/cookbook/05-node/41-stream-object-with-image-prompt.mdx @@ -20,8 +20,8 @@ dotenv.config(); async function main() { const { partialObjectStream } = streamObject({ - model: openai('gpt-4-turbo'), - maxTokens: 512, + model: openai('gpt-4.1'), + maxOutputTokens: 512, schema: z.object({ stamps: z.array( z.object({ @@ -70,8 +70,8 @@ dotenv.config(); async function main() { const { partialObjectStream } = streamObject({ - model: openai('gpt-4-turbo'), - maxTokens: 512, + model: openai('gpt-4.1'), + maxOutputTokens: 512, schema: z.object({ stamps: z.array( z.object({ diff --git a/content/cookbook/05-node/45-stream-object-record-token-usage.mdx b/content/cookbook/05-node/45-stream-object-record-token-usage.mdx index ec14c3b396da..cdb20160070d 100644 --- a/content/cookbook/05-node/45-stream-object-record-token-usage.mdx +++ b/content/cookbook/05-node/45-stream-object-record-token-usage.mdx @@ -20,7 +20,7 @@ import { streamObject } from 'ai'; import { z } from 'zod'; const result = streamObject({ - model: openai('gpt-4-turbo'), + model: openai('gpt-4.1'), schema: z.object({ recipe: z.object({ name: z.string(), @@ -45,7 +45,7 @@ import { streamObject, TokenUsage } from 'ai'; import { z } from 'zod'; const result = streamObject({ - model: openai('gpt-4-turbo'), + model: openai('gpt-4.1'), schema: z.object({ recipe: z.object({ name: z.string(), diff --git a/content/cookbook/05-node/46-stream-object-record-final-object.mdx b/content/cookbook/05-node/46-stream-object-record-final-object.mdx index fe40bae53238..5c08751bf6d7 100644 --- a/content/cookbook/05-node/46-stream-object-record-final-object.mdx +++ b/content/cookbook/05-node/46-stream-object-record-final-object.mdx @@ -22,7 +22,7 @@ import { streamObject } from 'ai'; import { z } from 'zod'; const result = streamObject({ - model: openai('gpt-4-turbo'), + model: openai('gpt-4.1'), schema: z.object({ recipe: z.object({ name: z.string(), @@ -54,7 +54,7 @@ import { streamObject } from 'ai'; import { z } from 'zod'; const result = streamObject({ - model: openai('gpt-4-turbo'), + model: openai('gpt-4.1'), schema: z.object({ recipe: z.object({ name: z.string(), diff --git a/content/cookbook/05-node/50-call-tools.mdx b/content/cookbook/05-node/50-call-tools.mdx index 2d361347b541..49a1148f5e28 100644 --- a/content/cookbook/05-node/50-call-tools.mdx +++ b/content/cookbook/05-node/50-call-tools.mdx @@ -15,7 +15,7 @@ import { openai } from '@ai-sdk/openai'; import { z } from 'zod'; const result = await generateText({ - model: openai('gpt-4-turbo'), + model: openai('gpt-4.1'), tools: { weather: tool({ description: 'Get the weather in a location', @@ -51,7 +51,7 @@ dotenv.config(); async function main() { const result = await generateText({ model: openai('gpt-3.5-turbo'), - maxTokens: 512, + maxOutputTokens: 512, tools: { weather: tool({ description: 'Get the weather in a location', @@ -107,7 +107,7 @@ dotenv.config(); async function main() { const result = await generateText({ model: openai('gpt-3.5-turbo'), - maxTokens: 512, + maxOutputTokens: 512, tools: { weather: tool({ description: 'Get the weather in a location', diff --git a/content/cookbook/05-node/51-call-tools-in-parallel.mdx b/content/cookbook/05-node/51-call-tools-in-parallel.mdx index 960884645d7c..2c22b58003b7 100644 --- a/content/cookbook/05-node/51-call-tools-in-parallel.mdx +++ b/content/cookbook/05-node/51-call-tools-in-parallel.mdx @@ -1,5 +1,5 @@ --- -title: Call Tools in Parallels +title: Call Tools in Parallel description: Learn how to call tools in parallel using the AI SDK and Node tags: ['node', 'tool use'] --- @@ -15,7 +15,7 @@ import { openai } from '@ai-sdk/openai'; import { z } from 'zod'; const result = await generateText({ - model: openai('gpt-4-turbo'), + model: openai('gpt-4.1'), tools: { weather: tool({ description: 'Get the weather in a location', diff --git a/content/cookbook/05-node/52-call-tools-with-image-prompt.mdx b/content/cookbook/05-node/52-call-tools-with-image-prompt.mdx index ff8ceb585715..f65ff345baad 100644 --- a/content/cookbook/05-node/52-call-tools-with-image-prompt.mdx +++ b/content/cookbook/05-node/52-call-tools-with-image-prompt.mdx @@ -14,7 +14,7 @@ import { openai } from '@ai-sdk/openai'; import { z } from 'zod'; const result = await generateText({ - model: openai('gpt-4-turbo'), + model: openai('gpt-4.1'), messages: [ { role: 'user', diff --git a/content/cookbook/05-node/53-call-tools-multiple-steps.mdx b/content/cookbook/05-node/53-call-tools-multiple-steps.mdx index 75858198a714..c4234b4fadaa 100644 --- a/content/cookbook/05-node/53-call-tools-multiple-steps.mdx +++ b/content/cookbook/05-node/53-call-tools-multiple-steps.mdx @@ -19,7 +19,7 @@ import { openai } from '@ai-sdk/openai'; import { z } from 'zod'; const { text } = await generateText({ - model: openai('gpt-4-turbo'), + model: openai('gpt-4.1'), maxSteps: 5, tools: { weather: tool({ diff --git a/content/cookbook/05-node/80-local-caching-middleware.mdx b/content/cookbook/05-node/80-local-caching-middleware.mdx new file mode 100644 index 000000000000..2d95a6efb562 --- /dev/null +++ b/content/cookbook/05-node/80-local-caching-middleware.mdx @@ -0,0 +1,241 @@ +--- +title: Local Caching Middleware +description: Learn how to create a caching middleware for local development. +tags: ['streaming', 'caching', 'middleware'] +--- + +# Local Caching Middleware + +When developing AI applications, you'll often find yourself repeatedly making the same API calls during development. This can lead to increased costs and slower development cycles. A caching middleware allows you to store responses locally and reuse them when the same inputs are provided. + +This approach is particularly useful in two scenarios: + +1. **Iterating on UI/UX** - When you're focused on styling and user experience, you don't want to regenerate AI responses for every code change. +2. **Working on evals** - When developing evals, you need to repeatedly test the same prompts, but don't need new generations each time. + +## Implementation + +In this implementation, you create a JSON file to store responses. When a request is made, you first check if you have already seen this exact request. If you have, you return the cached response immediately (as a one-off generation or chunks of tokens). If not, you trigger the generation, save the response, and return it. + + + Make sure to add the path of your local cache to your `.gitignore` so you do + not commit it. + + +### How it works + +For regular generations, you store and retrieve complete responses. Instead, the streaming implementation captures each token as it arrives, stores the full sequence, and on cache hits uses the SDK's `simulateReadableStream` utility to recreate the token-by-token streaming experience at a controlled speed (defaults to 10ms between chunks). + +This approach gives you the best of both worlds: + +- Instant responses for repeated queries +- Preserved streaming behavior for UI development + +The middleware handles all transformations needed to make cached responses indistinguishable from fresh ones, including normalizing tool calls and fixing timestamp formats. + +### Middleware + +```ts +import { + type LanguageModelV1, + type LanguageModelV2Middleware, + LanguageModelV1Prompt, + type LanguageModelV1StreamPart, + simulateReadableStream, + wrapLanguageModel, +} from 'ai'; +import 'dotenv/config'; +import fs from 'fs'; +import path from 'path'; + +const CACHE_FILE = path.join(process.cwd(), '.cache/ai-cache.json'); + +export const cached = (model: LanguageModelV1) => + wrapLanguageModel({ + middleware: cacheMiddleware, + model, + }); + +const ensureCacheFile = () => { + const cacheDir = path.dirname(CACHE_FILE); + if (!fs.existsSync(cacheDir)) { + fs.mkdirSync(cacheDir, { recursive: true }); + } + if (!fs.existsSync(CACHE_FILE)) { + fs.writeFileSync(CACHE_FILE, '{}'); + } +}; + +const getCachedResult = (key: string | object) => { + ensureCacheFile(); + const cacheKey = typeof key === 'object' ? JSON.stringify(key) : key; + try { + const cacheContent = fs.readFileSync(CACHE_FILE, 'utf-8'); + + const cache = JSON.parse(cacheContent); + + const result = cache[cacheKey]; + + return result ?? null; + } catch (error) { + console.error('Cache error:', error); + return null; + } +}; + +const updateCache = (key: string, value: any) => { + ensureCacheFile(); + try { + const cache = JSON.parse(fs.readFileSync(CACHE_FILE, 'utf-8')); + const updatedCache = { ...cache, [key]: value }; + fs.writeFileSync(CACHE_FILE, JSON.stringify(updatedCache, null, 2)); + console.log('Cache updated for key:', key); + } catch (error) { + console.error('Failed to update cache:', error); + } +}; +const cleanPrompt = (prompt: LanguageModelV1Prompt) => { + return prompt.map(m => { + if (m.role === 'assistant') { + return m.content.map(part => + part.type === 'tool-call' ? { ...part, toolCallId: 'cached' } : part, + ); + } + if (m.role === 'tool') { + return m.content.map(tc => ({ + ...tc, + toolCallId: 'cached', + result: {}, + })); + } + + return m; + }); +}; + +export const cacheMiddleware: LanguageModelV2Middleware = { + wrapGenerate: async ({ doGenerate, params }) => { + const cacheKey = JSON.stringify({ + ...cleanPrompt(params.prompt), + _function: 'generate', + }); + console.log('Cache Key:', cacheKey); + + const cached = getCachedResult(cacheKey) as Awaited< + ReturnType + > | null; + + if (cached && cached !== null) { + console.log('Cache Hit'); + return { + ...cached, + response: { + ...cached.response, + timestamp: cached?.response?.timestamp + ? new Date(cached?.response?.timestamp) + : undefined, + }, + }; + } + + console.log('Cache Miss'); + const result = await doGenerate(); + + updateCache(cacheKey, result); + + return result; + }, + wrapStream: async ({ doStream, params }) => { + const cacheKey = JSON.stringify({ + ...cleanPrompt(params.prompt), + _function: 'stream', + }); + console.log('Cache Key:', cacheKey); + + // Check if the result is in the cache + const cached = getCachedResult(cacheKey); + + // If cached, return a simulated ReadableStream that yields the cached result + if (cached && cached !== null) { + console.log('Cache Hit'); + // Format the timestamps in the cached response + const formattedChunks = (cached as LanguageModelV1StreamPart[]).map(p => { + if (p.type === 'response-metadata' && p.timestamp) { + return { ...p, timestamp: new Date(p.timestamp) }; + } else return p; + }); + return { + stream: simulateReadableStream({ + initialDelayInMs: 0, + chunkDelayInMs: 10, + chunks: formattedChunks, + }), + }; + } + + console.log('Cache Miss'); + // If not cached, proceed with streaming + const { stream, ...rest } = await doStream(); + + const fullResponse: LanguageModelV1StreamPart[] = []; + + const transformStream = new TransformStream< + LanguageModelV1StreamPart, + LanguageModelV1StreamPart + >({ + transform(chunk, controller) { + fullResponse.push(chunk); + controller.enqueue(chunk); + }, + flush() { + // Store the full response in the cache after streaming is complete + updateCache(cacheKey, fullResponse); + }, + }); + + return { + stream: stream.pipeThrough(transformStream), + ...rest, + }; + }, +}; +``` + +## Using the Middleware + +The middleware can be easily integrated into your existing AI SDK setup: + +```ts highlight="4,8" +import { openai } from '@ai-sdk/openai'; +import { streamText } from 'ai'; +import 'dotenv/config'; +import { cached } from '../middleware/your-cache-middleware'; + +async function main() { + const result = streamText({ + model: cached(openai('gpt-4o')), + maxOutputTokens: 512, + temperature: 0.3, + maxRetries: 5, + prompt: 'Invent a new holiday and describe its traditions.', + }); + + for await (const textPart of result.textStream) { + process.stdout.write(textPart); + } + + console.log(); + console.log('Token usage:', await result.usage); + console.log('Finish reason:', await result.finishReason); +} + +main().catch(console.error); +``` + +## Considerations + +When using this caching middleware, keep these points in mind: + +1. **Development Only** - This approach is intended for local development, not production environments +2. **Cache Invalidation** - You'll need to clear the cache (delete the cache file) when you want fresh responses +3. **Multi-Step Flows** - When using `maxSteps`, be aware that caching occurs at the individual language model response level, not across the entire execution flow. This means that while the model's generation is cached, the tool call is not and will run on each generation. diff --git a/content/cookbook/20-rsc/120-stream-assistant-response.mdx b/content/cookbook/20-rsc/120-stream-assistant-response.mdx deleted file mode 100644 index b686df19c591..000000000000 --- a/content/cookbook/20-rsc/120-stream-assistant-response.mdx +++ /dev/null @@ -1,218 +0,0 @@ ---- -title: Stream Assistant Response -description: Learn how to generate text using the AI SDK and React Server Components. -tags: ['rsc', 'streaming', 'assistant'] ---- - -# Stream Assistant Responses - -In this example, you'll learn how to stream responses from OpenAI's [Assistant API](https://platform.openai.com/docs/assistants/overview) using `ai/rsc`. - -## Client - -In your client component, you will create a simple chat interface that allows users to send messages to the assistant and receive responses. The assistant's responses will be streamed in two parts: the status of the current run and the text content of the messages. - -```tsx filename='app/page.tsx' -'use client'; - -import { useState } from 'react'; -import { ClientMessage } from './actions'; -import { useActions } from 'ai/rsc'; - -export default function Home() { - const [input, setInput] = useState(''); - const [messages, setMessages] = useState([]); - const { submitMessage } = useActions(); - - const handleSubmission = async () => { - setMessages(currentMessages => [ - ...currentMessages, - { - id: '123', - status: 'user.message.created', - text: input, - gui: null, - }, - ]); - - const response = await submitMessage(input); - setMessages(currentMessages => [...currentMessages, response]); - setInput(''); - }; - - return ( -
-
- setInput(event.target.value)} - placeholder="Ask a question" - onKeyDown={event => { - if (event.key === 'Enter') { - handleSubmission(); - } - }} - /> - -
- -
-
- {messages.map(message => ( -
-
-
{message.status}
-
-
{message.text}
-
- ))} -
-
-
- ); -} -``` - -```tsx filename='app/message.tsx' -'use client'; - -import { StreamableValue, useStreamableValue } from 'ai/rsc'; - -export function Message({ textStream }: { textStream: StreamableValue }) { - const [text] = useStreamableValue(textStream); - - return
{text}
; -} -``` - -## Server - -In your server action, you will create a function called `submitMessage` that adds the user's message to the thread. The function will create a new thread if one does not exist and add the user's message to the thread. If a thread already exists, the function will add the user's message to the existing thread. The function will then create a run and stream the assistant's response to the client. Furthermore, the run queue is used to manage multiple runs in the same thread during the lifetime of the server action. - -```tsx filename='app/actions.tsx' -'use server'; - -import { generateId } from 'ai'; -import { createStreamableUI, createStreamableValue } from 'ai/rsc'; -import { OpenAI } from 'openai'; -import { ReactNode } from 'react'; -import { Message } from './message'; - -const openai = new OpenAI({ - apiKey: process.env.OPENAI_API_KEY, -}); - -export interface ClientMessage { - id: string; - status: ReactNode; - text: ReactNode; -} - -const ASSISTANT_ID = 'asst_xxxx'; -let THREAD_ID = ''; -let RUN_ID = ''; - -export async function submitMessage(question: string): Promise { - const statusUIStream = createStreamableUI('thread.init'); - - const textStream = createStreamableValue(''); - const textUIStream = createStreamableUI( - , - ); - - const runQueue = []; - - (async () => { - if (THREAD_ID) { - await openai.beta.threads.messages.create(THREAD_ID, { - role: 'user', - content: question, - }); - - const run = await openai.beta.threads.runs.create(THREAD_ID, { - assistant_id: ASSISTANT_ID, - stream: true, - }); - - runQueue.push({ id: generateId(), run }); - } else { - const run = await openai.beta.threads.createAndRun({ - assistant_id: ASSISTANT_ID, - stream: true, - thread: { - messages: [{ role: 'user', content: question }], - }, - }); - - runQueue.push({ id: generateId(), run }); - } - - while (runQueue.length > 0) { - const latestRun = runQueue.shift(); - - if (latestRun) { - for await (const delta of latestRun.run) { - const { data, event } = delta; - - statusUIStream.update(event); - - if (event === 'thread.created') { - THREAD_ID = data.id; - } else if (event === 'thread.run.created') { - RUN_ID = data.id; - } else if (event === 'thread.message.delta') { - data.delta.content?.map(part => { - if (part.type === 'text') { - if (part.text) { - textStream.append(part.text.value as string); - } - } - }); - } else if (event === 'thread.run.failed') { - console.error(data); - } - } - } - } - - statusUIStream.done(); - textStream.done(); - })(); - - return { - id: generateId(), - status: statusUIStream.value, - text: textUIStream.value, - }; -} -``` - -```tsx filename="app/ai.ts" -import { createAI } from 'ai/rsc'; -import { submitMessage } from './actions'; - -export const AI = createAI({ - actions: { - submitMessage, - }, - initialAIState: [], - initialUIState: [], -}); -``` - -And finally, make sure to update your layout component to wrap the children with the `AI` component. - -```tsx filename="app/layout.tsx" -import { ReactNode } from 'react'; -import { AI } from './ai'; - -export default function Layout({ children }: { children: ReactNode }) { - return {children}; -} -``` diff --git a/content/cookbook/20-rsc/121-stream-assistant-response-with-tools.mdx b/content/cookbook/20-rsc/121-stream-assistant-response-with-tools.mdx deleted file mode 100644 index cd24a11d7b5d..000000000000 --- a/content/cookbook/20-rsc/121-stream-assistant-response-with-tools.mdx +++ /dev/null @@ -1,287 +0,0 @@ ---- -title: Stream Assistant Response with Tools -description: Learn how to generate text using the AI SDK and React Server Components. -tags: ['rsc', 'streaming', 'assistant'] ---- - -# Stream Assistant Responses - -In this example, you'll learn how to stream responses along with tool calls from OpenAI's [Assistant API](https://platform.openai.com/docs/assistants/overview) using `ai/rsc`. - -## Client - -In your client component, you will create a simple chat interface that allows users to send messages to the assistant and receive responses. The assistant's responses will be streamed in two parts: the status of the current run and the text content of the messages. - -```tsx filename='app/page.tsx' -'use client'; - -import { useState } from 'react'; -import { ClientMessage, submitMessage } from './actions'; -import { useActions } from 'ai/rsc'; - -export default function Home() { - const [input, setInput] = useState(''); - const [messages, setMessages] = useState([]); - const { submitMessage } = useActions(); - - const handleSubmission = async () => { - setMessages(currentMessages => [ - ...currentMessages, - { - id: '123', - status: 'user.message.created', - text: input, - gui: null, - }, - ]); - - const response = await submitMessage(input); - setMessages(currentMessages => [...currentMessages, response]); - setInput(''); - }; - - return ( -
-
- setInput(event.target.value)} - placeholder="Ask a question" - onKeyDown={event => { - if (event.key === 'Enter') { - handleSubmission(); - } - }} - /> - -
- -
-
- {messages.map(message => ( -
-
-
{message.status}
-
-
{message.gui}
-
{message.text}
-
- ))} -
-
-
- ); -} -``` - -```tsx filename='app/message.tsx' -'use client'; - -import { StreamableValue, useStreamableValue } from 'ai/rsc'; - -export function Message({ textStream }: { textStream: StreamableValue }) { - const [text] = useStreamableValue(textStream); - - return
{text}
; -} -``` - -## Server - -In your server action, you will create a function called `submitMessage` that adds the user's message to the thread. The function will create a new thread if one does not exist and add the user's message to the thread. If a thread already exists, the function will add the user's message to the existing thread. The function will then create a run and stream the assistant's response to the client. Furthermore, the run queue is used to manage multiple runs in the same thread during the lifetime of the server action. - -In case the assistant requires a tool call, the server action will handle the tool call and return the output to the assistant. In this example, the assistant requires a tool call to search for emails. The server action will search for emails based on the `query` and `has_attachments` parameters and return the output to the both the assistant and the client. - -```tsx filename='app/actions.tsx' -'use server'; - -import { generateId } from 'ai'; -import { createStreamableUI, createStreamableValue } from 'ai/rsc'; -import { OpenAI } from 'openai'; -import { ReactNode } from 'react'; -import { searchEmails } from './function'; -import { Message } from './message'; - -const openai = new OpenAI({ - apiKey: process.env.OPENAI_API_KEY, -}); - -export interface ClientMessage { - id: string; - status: ReactNode; - text: ReactNode; - gui: ReactNode; -} - -const ASSISTANT_ID = 'asst_xxxx'; -let THREAD_ID = ''; -let RUN_ID = ''; - -export async function submitMessage(question: string): Promise { - const status = createStreamableUI('thread.init'); - const textStream = createStreamableValue(''); - const textUIStream = createStreamableUI( - , - ); - const gui = createStreamableUI(); - - const runQueue = []; - - (async () => { - if (THREAD_ID) { - await openai.beta.threads.messages.create(THREAD_ID, { - role: 'user', - content: question, - }); - - const run = await openai.beta.threads.runs.create(THREAD_ID, { - assistant_id: ASSISTANT_ID, - stream: true, - }); - - runQueue.push({ id: generateId(), run }); - } else { - const run = await openai.beta.threads.createAndRun({ - assistant_id: ASSISTANT_ID, - stream: true, - thread: { - messages: [{ role: 'user', content: question }], - }, - }); - - runQueue.push({ id: generateId(), run }); - } - - while (runQueue.length > 0) { - const latestRun = runQueue.shift(); - - if (latestRun) { - for await (const delta of latestRun.run) { - const { data, event } = delta; - - status.update(event); - - if (event === 'thread.created') { - THREAD_ID = data.id; - } else if (event === 'thread.run.created') { - RUN_ID = data.id; - } else if (event === 'thread.message.delta') { - data.delta.content?.map((part: any) => { - if (part.type === 'text') { - if (part.text) { - textStream.append(part.text.value); - } - } - }); - } else if (event === 'thread.run.requires_action') { - if (data.required_action) { - if (data.required_action.type === 'submit_tool_outputs') { - const { tool_calls } = data.required_action.submit_tool_outputs; - const tool_outputs = []; - - for (const tool_call of tool_calls) { - const { id: toolCallId, function: fn } = tool_call; - const { name, arguments: args } = fn; - - if (name === 'search_emails') { - const { query, has_attachments } = JSON.parse(args); - - gui.append( -
-
- Searching for emails: {query}, has_attachments: - {has_attachments ? 'true' : 'false'} -
-
, - ); - - await new Promise(resolve => setTimeout(resolve, 2000)); - - const fakeEmails = searchEmails({ query, has_attachments }); - - gui.append( -
- {fakeEmails.map(email => ( -
-
-
{email.subject}
-
-
{email.date}
-
- ))} -
, - ); - - tool_outputs.push({ - tool_call_id: toolCallId, - output: JSON.stringify(fakeEmails), - }); - } - } - - const nextRun: any = - await openai.beta.threads.runs.submitToolOutputs( - THREAD_ID, - RUN_ID, - { - tool_outputs, - stream: true, - }, - ); - - runQueue.push({ id: generateId(), run: nextRun }); - } - } - } else if (event === 'thread.run.failed') { - console.log(data); - } - } - } - } - - status.done(); - textUIStream.done(); - gui.done(); - })(); - - return { - id: generateId(), - status: status.value, - text: textUIStream.value, - gui: gui.value, - }; -} -``` - -```typescript filename='app/ai.ts' -import { createAI } from 'ai/rsc'; -import { submitMessage } from './actions'; - -export const AI = createAI({ - actions: { - submitMessage, - }, - initialAIState: [], - initialUIState: [], -}); -``` - -And finally, make sure to update your layout component to wrap the children with the `AI` component. - -```tsx filename="app/layout.tsx" -import { ReactNode } from 'react'; -import { AI } from './ai'; - -export default function Layout({ children }: { children: ReactNode }) { - return {children}; -} -``` diff --git a/content/cookbook/20-rsc/20-stream-text.mdx b/content/cookbook/20-rsc/20-stream-text.mdx index f72ca8dc8c96..c10b9fbf696b 100644 --- a/content/cookbook/20-rsc/20-stream-text.mdx +++ b/content/cookbook/20-rsc/20-stream-text.mdx @@ -20,14 +20,14 @@ Text generation can sometimes take a long time to complete, especially when you' ## Client -Let's create a simple React component that will call the `generate` function when a button is clicked. The `generate` function will call the `streamText` function, which will then generate text based on the input prompt. To consume the stream of text in the client, we will use the `readStreamableValue` function from the `ai/rsc` module. +Let's create a simple React component that will call the `generate` function when a button is clicked. The `generate` function will call the `streamText` function, which will then generate text based on the input prompt. To consume the stream of text in the client, we will use the `readStreamableValue` function from the `@ai-sdk/rsc` module. ```tsx filename="app/page.tsx" 'use client'; import { useState } from 'react'; import { generate } from './actions'; -import { readStreamableValue } from 'ai/rsc'; +import { readStreamableValue } from '@ai-sdk/rsc'; // Allow streaming responses up to 30 seconds export const maxDuration = 30; @@ -66,7 +66,7 @@ Using DevTools, we can see the text generation being streamed to the client in r import { streamText } from 'ai'; import { openai } from '@ai-sdk/openai'; -import { createStreamableValue } from 'ai/rsc'; +import { createStreamableValue } from '@ai-sdk/rsc'; export async function generate(input: string) { const stream = createStreamableValue(''); diff --git a/content/cookbook/20-rsc/21-stream-text-with-chat-prompt.mdx b/content/cookbook/20-rsc/21-stream-text-with-chat-prompt.mdx index 0f168eb718d1..bcacb5ee37ad 100644 --- a/content/cookbook/20-rsc/21-stream-text-with-chat-prompt.mdx +++ b/content/cookbook/20-rsc/21-stream-text-with-chat-prompt.mdx @@ -32,7 +32,7 @@ Let's create a simple conversation between a user and a model, and place a butto import { useState } from 'react'; import { Message, continueConversation } from './actions'; -import { readStreamableValue } from 'ai/rsc'; +import { readStreamableValue } from '@ai-sdk/rsc'; // Allow streaming responses up to 30 seconds export const maxDuration = 30; @@ -95,7 +95,7 @@ Now, let's implement the `continueConversation` function that will insert the us import { streamText } from 'ai'; import { openai } from '@ai-sdk/openai'; -import { createStreamableValue } from 'ai/rsc'; +import { createStreamableValue } from '@ai-sdk/rsc'; export interface Message { role: 'user' | 'assistant'; diff --git a/content/cookbook/20-rsc/30-generate-object.mdx b/content/cookbook/20-rsc/30-generate-object.mdx index f30023845c48..49ad25ffd230 100644 --- a/content/cookbook/20-rsc/30-generate-object.mdx +++ b/content/cookbook/20-rsc/30-generate-object.mdx @@ -93,7 +93,7 @@ export async function getNotifications(input: string) { 'use server'; const { object: notifications } = await generateObject({ - model: openai('gpt-4-turbo'), + model: openai('gpt-4.1'), system: 'You generate three notifications for a messages app.', prompt: input, schema: z.object({ diff --git a/content/cookbook/20-rsc/40-stream-object.mdx b/content/cookbook/20-rsc/40-stream-object.mdx index ec70dd5a022a..ce3d8caf58eb 100644 --- a/content/cookbook/20-rsc/40-stream-object.mdx +++ b/content/cookbook/20-rsc/40-stream-object.mdx @@ -51,7 +51,7 @@ Let's create a simple React component that will call the `getNotifications` func import { useState } from 'react'; import { generate } from './actions'; -import { readStreamableValue } from 'ai/rsc'; +import { readStreamableValue } from '@ai-sdk/rsc'; // Allow streaming responses up to 30 seconds export const maxDuration = 30; @@ -92,7 +92,7 @@ Now let's implement the `getNotifications` function. We'll use the `generateObje import { streamObject } from 'ai'; import { openai } from '@ai-sdk/openai'; -import { createStreamableValue } from 'ai/rsc'; +import { createStreamableValue } from '@ai-sdk/rsc'; import { z } from 'zod'; export async function generate(input: string) { @@ -102,7 +102,7 @@ export async function generate(input: string) { (async () => { const { partialObjectStream } = streamObject({ - model: openai('gpt-4-turbo'), + model: openai('gpt-4.1'), system: 'You generate three notifications for a messages app.', prompt: input, schema: z.object({ diff --git a/content/cookbook/20-rsc/60-save-messages-to-database.mdx b/content/cookbook/20-rsc/60-save-messages-to-database.mdx index 357e45c8a0ed..766b67651a0e 100644 --- a/content/cookbook/20-rsc/60-save-messages-to-database.mdx +++ b/content/cookbook/20-rsc/60-save-messages-to-database.mdx @@ -41,7 +41,7 @@ export default function RootLayout({ import { useState } from 'react'; import { ClientMessage } from './actions'; -import { useActions, useUIState } from 'ai/rsc'; +import { useActions, useUIState } from '@ai-sdk/rsc'; import { generateId } from 'ai'; // Allow streaming responses up to 30 seconds @@ -100,7 +100,7 @@ We will use the callback function to listen to state changes and save the conver ```tsx filename='app/actions.tsx' 'use server'; -import { getAIState, getMutableAIState, streamUI } from 'ai/rsc'; +import { getAIState, getMutableAIState, streamUI } from '@ai-sdk/rsc'; import { openai } from '@ai-sdk/openai'; import { ReactNode } from 'react'; import { z } from 'zod'; @@ -176,7 +176,7 @@ export async function continueConversation( ``` ```ts filename='app/ai.ts' -import { createAI } from 'ai/rsc'; +import { createAI } from '@ai-sdk/rsc'; import { ServerMessage, ClientMessage, continueConversation } from './actions'; export const AI = createAI({ diff --git a/content/cookbook/20-rsc/61-restore-messages-from-database.mdx b/content/cookbook/20-rsc/61-restore-messages-from-database.mdx index 962782f50a32..295e7273331a 100644 --- a/content/cookbook/20-rsc/61-restore-messages-from-database.mdx +++ b/content/cookbook/20-rsc/61-restore-messages-from-database.mdx @@ -39,7 +39,7 @@ export default function RootLayout({ import { useState, useEffect } from 'react'; import { ClientMessage } from './actions'; -import { useActions, useUIState } from 'ai/rsc'; +import { useActions, useUIState } from '@ai-sdk/rsc'; import { generateId } from 'ai'; export default function Home() { @@ -97,7 +97,7 @@ export default function Home() { The server-side implementation handles the restoration of messages and their transformation into the appropriate format for display. ```tsx filename='app/ai.ts' -import { createAI } from 'ai/rsc'; +import { createAI } from '@ai-sdk/rsc'; import { ServerMessage, ClientMessage, continueConversation } from './actions'; import { Stock } from '@ai-studio/components/stock'; import { generateId } from 'ai'; @@ -126,7 +126,7 @@ export const AI = createAI({ ```tsx filename='app/actions.tsx' 'use server'; -import { getAIState } from 'ai/rsc'; +import { getAIState } from '@ai-sdk/rsc'; export interface ServerMessage { role: 'user' | 'assistant' | 'function'; diff --git a/content/cookbook/20-rsc/90-render-visual-interface-in-chat.mdx b/content/cookbook/20-rsc/90-render-visual-interface-in-chat.mdx index cdfd36d6958f..8d2a3ae01fbe 100644 --- a/content/cookbook/20-rsc/90-render-visual-interface-in-chat.mdx +++ b/content/cookbook/20-rsc/90-render-visual-interface-in-chat.mdx @@ -17,7 +17,7 @@ When we define multiple functions in [`tools`](/docs/reference/ai-sdk-core/gener import { useState } from 'react'; import { ClientMessage } from './actions'; -import { useActions, useUIState } from 'ai/rsc'; +import { useActions, useUIState } from '@ai-sdk/rsc'; import { generateId } from 'ai'; // Allow streaming responses up to 30 seconds @@ -112,7 +112,7 @@ export async function Flight({ flightNumber }) { ```tsx filename='app/actions.tsx' 'use server'; -import { getMutableAIState, streamUI } from 'ai/rsc'; +import { getMutableAIState, streamUI } from '@ai-sdk/rsc'; import { openai } from '@ai-sdk/openai'; import { ReactNode } from 'react'; import { z } from 'zod'; @@ -206,7 +206,7 @@ export async function continueConversation( ``` ```typescript filename='app/ai.ts' -import { createAI } from 'ai/rsc'; +import { createAI } from '@ai-sdk/rsc'; import { ServerMessage, ClientMessage, continueConversation } from './actions'; export const AI = createAI({ diff --git a/content/cookbook/20-rsc/91-stream-updates-to-visual-interfaces.mdx b/content/cookbook/20-rsc/91-stream-updates-to-visual-interfaces.mdx index dc9f3d374bf8..bfc5c75d2f6a 100644 --- a/content/cookbook/20-rsc/91-stream-updates-to-visual-interfaces.mdx +++ b/content/cookbook/20-rsc/91-stream-updates-to-visual-interfaces.mdx @@ -15,7 +15,7 @@ In our previous example we've been streaming react components from the server to import { useState } from 'react'; import { ClientMessage } from './actions'; -import { useActions, useUIState } from 'ai/rsc'; +import { useActions, useUIState } from '@ai-sdk/rsc'; import { generateId } from 'ai'; // Allow streaming responses up to 30 seconds @@ -72,7 +72,7 @@ export default function Home() { ```tsx filename='app/actions.tsx' 'use server'; -import { getMutableAIState, streamUI } from 'ai/rsc'; +import { getMutableAIState, streamUI } from '@ai-sdk/rsc'; import { openai } from '@ai-sdk/openai'; import { ReactNode } from 'react'; import { z } from 'zod'; @@ -137,7 +137,7 @@ export async function continueConversation( ``` ```typescript filename='app/ai.ts' -import { createAI } from 'ai/rsc'; +import { createAI } from '@ai-sdk/rsc'; import { ServerMessage, ClientMessage, continueConversation } from './actions'; export const AI = createAI({ diff --git a/content/cookbook/20-rsc/92-stream-ui-record-token-usage.mdx b/content/cookbook/20-rsc/92-stream-ui-record-token-usage.mdx index 09481680764d..864e70b8a997 100644 --- a/content/cookbook/20-rsc/92-stream-ui-record-token-usage.mdx +++ b/content/cookbook/20-rsc/92-stream-ui-record-token-usage.mdx @@ -19,7 +19,7 @@ It is called when the stream is finished. import { useState } from 'react'; import { ClientMessage } from './actions'; -import { useActions, useUIState } from 'ai/rsc'; +import { useActions, useUIState } from '@ai-sdk/rsc'; import { generateId } from 'ai'; // Allow streaming responses up to 30 seconds @@ -76,7 +76,7 @@ export default function Home() { ```tsx filename='app/actions.tsx' highlight={"57-63"} 'use server'; -import { createAI, getMutableAIState, streamUI } from 'ai/rsc'; +import { createAI, getMutableAIState, streamUI } from '@ai-sdk/rsc'; import { openai } from '@ai-sdk/openai'; import { ReactNode } from 'react'; import { z } from 'zod'; @@ -148,7 +148,7 @@ export async function continueConversation( ``` ```typescript filename='app/ai.ts' -import { createAI } from 'ai/rsc'; +import { createAI } from '@ai-sdk/rsc'; import { ServerMessage, ClientMessage, continueConversation } from './actions'; export const AI = createAI({ diff --git a/content/docs/00-introduction/index.mdx b/content/docs/00-introduction/index.mdx new file mode 100644 index 000000000000..daa2ae80c2f8 --- /dev/null +++ b/content/docs/00-introduction/index.mdx @@ -0,0 +1,76 @@ +--- +title: AI SDK by Vercel +description: The AI SDK is the TypeScript toolkit for building AI applications and agents with React, Next.js, Vue, Svelte, Node.js, and more. +--- + +# AI SDK + +The AI SDK is the TypeScript toolkit designed to help developers build AI-powered applications and agents with React, Next.js, Vue, Svelte, Node.js, and more. + +## Why use the AI SDK? + +Integrating large language models (LLMs) into applications is complicated and heavily dependent on the specific model provider you use. + +The AI SDK standardizes integrating artificial intelligence (AI) models across [supported providers](/docs/foundations/providers-and-models). This enables developers to focus on building great AI applications, not waste time on technical details. + +For example, here’s how you can generate text with various models using the AI SDK: + + + +The AI SDK has two main libraries: + +- **[AI SDK Core](/docs/ai-sdk-core):** A unified API for generating text, structured objects, tool calls, and building agents with LLMs. +- **[AI SDK UI](/docs/ai-sdk-ui):** A set of framework-agnostic hooks for quickly building chat and generative user interface. + +## Model Providers + +The AI SDK supports [multiple model providers](/providers). + + + +## Templates + +We've built some [templates](https://vercel.com/templates?type=ai) that include AI SDK integrations for different use cases, providers, and frameworks. You can use these templates to get started with your AI-powered application. + +### Starter Kits + + + +### Feature Exploration + + + +### Frameworks + + + +### Generative UI + + + +### Security + + + +## Join our Community + +If you have questions about anything related to the AI SDK, you're always welcome to ask our community on [GitHub Discussions](https://github.com/vercel/ai/discussions). + +## `llms.txt` (for Cursor, Windsurf, Copilot, Claude etc.) + +You can access the entire AI SDK documentation in Markdown format at [ai-sdk.dev/llms.txt](/llms.txt). This can be used to ask any LLM (assuming it has a big enough context window) questions about the AI SDK based on the most up-to-date documentation. + +### Example Usage + +For instance, to prompt an LLM with questions about the AI SDK: + +1. Copy the documentation contents from [ai-sdk.dev/llms.txt](/llms.txt) +2. Use the following prompt format: + +```prompt +Documentation: +{paste documentation here} +--- +Based on the above documentation, answer the following: +{your question} +``` diff --git a/content/docs/01-announcing-ai-sdk-5-alpha/index.mdx b/content/docs/01-announcing-ai-sdk-5-alpha/index.mdx new file mode 100644 index 000000000000..1270e8aca15e --- /dev/null +++ b/content/docs/01-announcing-ai-sdk-5-alpha/index.mdx @@ -0,0 +1,379 @@ +--- +title: AI SDK 5 Alpha +description: Get started with the Alpha version of AI SDK 5. +--- + +# Announcing AI SDK 5 Alpha + + + This is an early preview — AI SDK 5 is under active development. APIs may + change without notice. Pin to specific versions as breaking changes may occur + even in patch releases. + + +## Alpha Version Guidance + +The AI SDK 5 Alpha is intended for: + +- Exploration and early prototypes +- Green-field projects where you can experiment freely +- Development environments where you can tolerate breaking changes + +This Alpha release is **not recommended** for: + +- Production applications +- Projects that require stable APIs +- Existing applications that would need migration paths + +During this Alpha phase, we expect to make significant, potentially breaking changes to the API surface. We're sharing early to gather feedback and improve the SDK before stabilization. Your input is invaluable—please share your experiences through GitHub issues or discussions to help shape the final release. + + + We expect bugs in this Alpha release. To help us improve the SDK, please [file + bug reports on GitHub](https://github.com/vercel/ai/issues/new/choose). Your + reports directly contribute to making the final release more stable and + reliable. + + +## Installation + +To install the AI SDK 5 - Alpha, run the following command: + +```bash +# replace with your provider and framework +npm install ai@alpha @ai-sdk/[your-provider]@alpha @ai-sdk/[your-framework]@alpha +``` + + + APIs may change without notice. Pin to specific versions as breaking changes + may occur even in patch releases. + + +## What's new in AI SDK 5? + +AI SDK 5 is a complete redesign of the AI SDK's protocol and architecture based on everything we've learned over the last two years of real-world usage. We've also modernized the UI and protocols that have remained largely unchanged since AI SDK v2/3, creating a strong foundation for the future. + +### Why AI SDK 5? + +When we originally designed the v1 protocol over a year ago, the standard interaction pattern with language models was simple: text in, text or tool call out. But today's LLMs go way beyond text and tool calls, generating reasoning, sources, images and more. Additionally, new use-cases like computer using agents introduce a fundamentally new approach to interacting with language models that made it near-impossible to support in a unified approach with our original architecture. + +We needed a protocol designed for this new reality. While this is a breaking change that we don't take lightly, it's provided an opportunity to rebuild the foundation and add powerful new features. + +While we've designed AI SDK 5 to be a substantial improvement over previous versions, we're still in active development. You might encounter bugs or unexpected behavior. We'd greatly appreciate your feedback and bug reports—they're essential to making this release better. Please share your experiences and suggestions with us through [GitHub issues](https://github.com/vercel/ai/issues/new/choose) or [GitHub discussions](https://github.com/vercel/ai/discussions). + +## New Features + +- [**`LanguageModelV2`**](#languagemodelv2) - new redesigned architecture +- [**Message Overhaul**](#message-overhaul) - new `UIMessage` and `ModelMessage` types +- [**`ChatStore`**](#chatstore) - new `useChat` architecture +- [**Server-Sent Events (SSE)**](#server-sent-events-sse) - new standardised protocol for sending UI messages to the client +- [**Agentic Control**](#agentic-control) - new primitives for building agentic systems + +## `LanguageModelV2` + +`LanguageModelV2` represents a complete redesign of how the AI SDK communicates with language models, adapting to the increasingly complex outputs modern AI systems generate. The new `LanguageModelV2` treats all LLM outputs as content parts, enabling more consistent handling of text, images, reasoning, sources, and other response types. It now has: + +- **Content-First Design** - Rather than separating text, reasoning, and tool calls, everything is now represented as ordered content parts in a unified array +- **Improved Type Safety** - The new `LanguageModelV2` provides better TypeScript type guarantees, making it easier to work with different content types +- **Simplified Extensibility** - Adding support for new model capabilities no longer requires changes to the core structure + +## Message Overhaul + +AI SDK 5 introduces a completely redesigned message system with two message types that address the dual needs of what you render in your UI and what you send to the model. Context is crucial for effective language model generations, and these two message types serve distinct purposes: + +- **UIMessage** represents the complete conversation history for your interface, preserving all message parts (text, images, data), metadata (creation timestamps, generation times), and UI state—regardless of length. + +- **ModelMessage** is optimized for sending to language models, considering token input constraints. It strips away UI-specific metadata and irrelevant content. + +With this change, you will be required to explicitly convert your `UIMessage`s to `ModelMessage`s before sending them to the model. + +```ts highlight="9" +import { openai } from '@ai-sdk/openai'; +import { convertToModelMessages, streamText, UIMessage } from 'ai'; + +export async function POST(req: Request) { + const { messages }: { messages: UIMessage[] } = await req.json(); + + const result = streamText({ + model: openai('gpt-4o'), + messages: convertToModelMessages(messages), + }); + + return result.toUIMessageStreamResponse(); +} +``` + + + This separation is essential as you cannot use a single message format for + both purposes. The state you save should always be the `UIMessage` format to + prevent information loss, with explicit conversion to `ModelMessage` when + communicating with language models. + + +The new message system has made possible several highly requested features: + +- **Type-safe Message Metadata** - add structured information per message +- **New Stream Writer** - stream any part type (reasoning, sources, etc.) retaining proper order +- **Data Parts** - stream type-safe arbitrary data parts for dynamic UI components + +### Message metadata + +Metadata allows you to attach structured information to individual messages, making it easier to track important details like response time, token usage, or model specifications. This information can enhance your UI with contextual data without embedding it in the message content itself. + +To add metadata to a message, first define the metadata schema: + +```ts filename="app/api/chat/example-metadata-schema.ts" +export const exampleMetadataSchema = z.object({ + duration: z.number().optional(), + model: z.string().optional(), + totalTokens: z.number().optional(), +}); + +export type ExampleMetadata = z.infer; +``` + +Then add the metadata using the `message.metadata` property on the `toUIMessageStreamResponse()` utility: + +```ts filename="app/api/chat/route.ts" +import { openai } from '@ai-sdk/openai'; +import { convertToModelMessages, streamText, UIMessage } from 'ai'; +import { ExampleMetadata } from './example-metadata-schema'; + +export async function POST(req: Request) { + const { messages }: { messages: UIMessage[] } = await req.json(); + + const startTime = Date.now(); + const result = streamText({ + model: openai('gpt-4o'), + prompt: convertToModelMessages(messages), + }); + + return result.toUIMessageStreamResponse({ + messageMetadata: ({ part }): ExampleMetadata | undefined => { + // send custom information to the client on start: + if (part.type === 'start') { + return { + model: 'gpt-4o', // initial model id + }; + } + + // send additional model information on finish-step: + if (part.type === 'finish-step') { + return { + model: part.response.modelId, // update with the actual model id + duration: Date.now() - startTime, + }; + } + + // when the message is finished, send additional information: + if (part.type === 'finish') { + return { + totalTokens: part.totalUsage.totalTokens, + }; + } + }, + }); +} +``` + +Finally, specify the message metadata schema on the client and then render the (type-safe) metadata in your UI: + +```tsx filename="app/page.tsx" +import { zodSchema } from '@ai-sdk/provider-utils'; +import { useChat } from '@ai-sdk/react'; +import { defaultChatStoreOptions } from 'ai'; +import { exampleMetadataSchema } from '@/api/chat/example-metadata-schema'; + +export default function Chat() { + const { messages } = useChat({ + chatStore: defaultChatStoreOptions({ + api: '/api/use-chat', + messageMetadataSchema: zodSchema(exampleMetadataSchema), + }), + }); + + return ( +
+ {messages.map(message => { + const { metadata } = message; + return ( +
+ {metadata?.duration &&
Duration: {metadata.duration}ms
} + {metadata?.model &&
Model: {metadata.model}
} + {metadata?.totalTokens && ( +
Total tokens: {metadata.totalTokens}
+ )} +
+ ); + })} +
+ ); +} +``` + +### UI Message Stream + +The UI Message Stream enables streaming any content parts from the server to the client. With this stream, you can send structured data like custom sources from your RAG pipeline directly to your UI. The stream writer is simply a utility that makes it easy to write to this message stream. + +```ts +const stream = createUIMessageStream({ + execute: writer => { + // stream custom sources + writer.write({ + type: 'source', + value: { + type: 'source', + sourceType: 'url', + id: 'source-1', + url: 'https://example.com', + title: 'Example Source', + }, + }); + }, +}); +``` + +On the client, these will be added to the ordered `message.parts` array. + +### Data Parts + +The new stream writer also enables a type-safe way to stream arbitrary data from the server to the client and display it in your UI. + +You can create and stream custom data parts on the server: + +```tsx +// On the server +const stream = createUIMessageStream({ + execute: writer => { + // Initial update + writer.write({ + type: 'data-weather', // Custom type + id: toolCallId, // ID for updates + data: { city, status: 'loading' }, // Your data + }); + + // Later, update the same part + writer.write({ + type: 'data-weather', + id: toolCallId, + data: { city, weather, status: 'success' }, + }); + }, +}); +``` + +On the client, you can render these parts with full type safety: + +```tsx +{ + message.parts + .filter(part => part.type === 'data-weather') // type-safe + .map((part, index) => ( + + )); +} +``` + +Data parts appear in the `message.parts` array along with other content, maintaining the proper ordering of the conversation. You can update parts by referencing the same ID, enabling dynamic experiences like collaborative artifacts. + +## `ChatStore` + +AI SDK 5 introduces a new `useChat` architecture with `ChatStore` and `ChatTransport` components. These two core building blocks make state management and API integration more flexible, allowing you to compose reactive UI bindings, share chat state across multiple instances, and swap out your backend protocol without rewriting application logic. + +The `ChatStore` is responsible for: + +- **Managing multiple chats** – access and switch between conversations seamlessly. +- **Processing response streams** – handle streams from the server and synchronize state (e.g. when there are concurrent client-side tool results). +- **Caching and synchronizing** – share state (messages, status, errors) between `useChat` hooks. + +You can create a basic ChatStore with the helper function: + +```ts +import { defaultChatStoreOptions } from 'ai'; + +const chatStore = defaultChatStoreOptions({ + api: '/api/chat', // your chat endpoint + maxSteps: 5, // optional: limit LLM calls in tool chains + chats: {}, // optional: preload previous chat sessions +}); + +import { useChat } from '@ai-sdk/react'; +const { messages, input, handleSubmit } = useChat({ chatStore }); +``` + +## Server-Sent Events (SSE) + +AI SDK 5 now uses Server-Sent Events (SSE) instead of a custom streaming protocol. SSE is a common web standard for sending data from servers to browsers. This switch has several advantages: + +- **Works everywhere** - Uses technology that works in all major browsers and platforms +- **Easier to troubleshoot** - You can see the data stream in browser developer tools +- **Simple to build upon** - Adding new features is more straightforward +- **More stable** - Built on proven technology that many developers already use + +## Agentic Control + +AI SDK 5 introduces new features for building agents that help you control model behavior more precisely. + +### prepareStep + +The `prepareStep` function gives you fine-grained control over each step in a multi-step agent. It's called before a step starts and allows you to: + +- Dynamically change the model used for specific steps +- Force specific tool selections for particular steps +- Limit which tools are available during specific steps +- Examine the context of previous steps before proceeding + +```ts +const result = await generateText({ + // ... + experimental_prepareStep: async ({ model, stepNumber, maxSteps, steps }) => { + if (stepNumber === 0) { + return { + // use a different model for this step: + model: modelForThisParticularStep, + // force a tool choice for this step: + toolChoice: { type: 'tool', toolName: 'tool1' }, + // limit the tools that are available for this step: + experimental_activeTools: ['tool1'], + }; + } + // when nothing is returned, the default settings are used + }, +}); +``` + +This makes it easier to build AI systems that adapt their capabilities based on the current context and task requirements. + +### `stopWhen` + +The `stopWhen` parameter lets you define stopping conditions for your agent. Instead of running indefinitely, you can specify exactly when the agent should terminate based on various conditions: + +- Reaching a maximum number of steps +- Calling a specific tool +- Satisfying any custom condition you define + +```ts +const result = generateText({ + // ... + // stop loop at 5 steps + stopWhen: stepCountIs(5), +}); + +const result = generateText({ + // ... + // stop loop when weather tool called + stopWhen: hasToolCall('weather'), +}); + +const result = generateText({ + // ... + // stop loop at your own custom condition + stopWhen: maxTotalTokens(20000), +}); +``` + +These agentic controls form the foundation for building more reliable, controllable AI systems that can tackle complex problems while remaining within well-defined constraints. diff --git a/content/docs/01-introduction/index.mdx b/content/docs/01-introduction/index.mdx deleted file mode 100644 index b53909739d58..000000000000 --- a/content/docs/01-introduction/index.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: AI SDK by Vercel -description: The AI SDK is the TypeScript toolkit for building AI applications and agents with React, Next.js, Vue, Svelte, Node.js, and more. ---- - -# AI SDK - -The AI SDK is the TypeScript toolkit designed to help developers build AI-powered applications and agents with React, Next.js, Vue, Svelte, Node.js, and more. - -## Why use the AI SDK? - -Integrating large language models (LLMs) into applications is complicated and heavily dependent on the specific model provider you use. - -- **[AI SDK Core](/docs/ai-sdk-core):** A unified API for generating text, structured objects, tool calls, and building agents with LLMs. -- **[AI SDK UI](/docs/ai-sdk-ui):** A set of framework-agnostic hooks for quickly building chat and generative user interface. - -## Model Providers - -The AI SDK supports [multiple model providers](/providers). - - - -## Templates - -We've built some [templates](https://vercel.com/templates?type=ai) that include AI SDK integrations for different use cases, providers, and frameworks. You can use these templates to get started with your AI-powered application. - -### Starter Kits - - - -### Feature Exploration - - - -### Frameworks - - - -### Generative UI - - - -### Security - - - -## Join our Community - -If you have questions about anything related to the AI SDK, you're always welcome to ask our community on [GitHub Discussions](https://github.com/vercel/ai/discussions). - -## `llms.txt` - -You can access the entire AI SDK documentation in Markdown format at [sdk.vercel.ai/llms.txt](/llms.txt). This can be used to ask any LLM (assuming it has a big enough context window) questions about the AI SDK based on the most up-to-date documentation. - -### Example Usage - -For instance, to prompt an LLM with questions about the AI SDK: - -1. Copy the documentation contents from [sdk.vercel.ai/llms.txt](/llms.txt) -2. Use the following prompt format: - -```prompt -Documentation: -{paste documentation here} ---- -Based on the above documentation, answer the following: -{your question} -``` diff --git a/content/docs/02-foundations/02-providers-and-models.mdx b/content/docs/02-foundations/02-providers-and-models.mdx index a1902d339608..6a47974d8252 100644 --- a/content/docs/02-foundations/02-providers-and-models.mdx +++ b/content/docs/02-foundations/02-providers-and-models.mdx @@ -40,6 +40,14 @@ The AI SDK comes with a wide range of providers that you can use to interact wit - [Cerebras Provider](/providers/ai-sdk-providers/cerebras) (`@ai-sdk/cerebras`) - [Groq Provider](/providers/ai-sdk-providers/groq) (`@ai-sdk/groq`) - [Perplexity Provider](/providers/ai-sdk-providers/perplexity) (`@ai-sdk/perplexity`) +- [ElevenLabs Provider](/providers/ai-sdk-providers/elevenlabs) (`@ai-sdk/elevenlabs`) +- [LMNT Provider](/providers/ai-sdk-providers/lmnt) (`@ai-sdk/lmnt`) +- [Hume Provider](/providers/ai-sdk-providers/hume) (`@ai-sdk/hume`) +- [Rev.ai Provider](/providers/ai-sdk-providers/revai) (`@ai-sdk/revai`) +- [Deepgram Provider](/providers/ai-sdk-providers/deepgram) (`@ai-sdk/deepgram`) +- [Gladia Provider](/providers/ai-sdk-providers/gladia) (`@ai-sdk/gladia`) +- [LMNT Provider](/providers/ai-sdk-providers/lmnt) (`@ai-sdk/lmnt`) +- [AssemblyAI Provider](/providers/ai-sdk-providers/assemblyai) (`@ai-sdk/assemblyai`) You can also use the [OpenAI Compatible provider](/providers/openai-compatible-providers) with OpenAI-compatible APIs: @@ -56,13 +64,17 @@ The open-source community has created the following providers: - [Portkey Provider](/providers/community-providers/portkey) (`@portkey-ai/vercel-provider`) - [Cloudflare Workers AI Provider](/providers/community-providers/cloudflare-workers-ai) (`workers-ai-provider`) - [OpenRouter Provider](/providers/community-providers/openrouter) (`@openrouter/ai-sdk-provider`) +- [Requesty Provider](/providers/community-providers/requesty) (`@requesty/ai-sdk`) - [Crosshatch Provider](/providers/community-providers/crosshatch) (`@crosshatch/ai-provider`) - [Mixedbread Provider](/providers/community-providers/mixedbread) (`mixedbread-ai-provider`) - [Voyage AI Provider](/providers/community-providers/voyage-ai) (`voyage-ai-provider`) - [Mem0 Provider](/providers/community-providers/mem0)(`@mem0/vercel-ai-provider`) +- [Letta Provider](/providers/community-providers/letta)(`@letta-ai/vercel-ai-sdk-provider`) - [Spark Provider](/providers/community-providers/spark) (`spark-ai-provider`) - [AnthropicVertex Provider](/providers/community-providers/anthropic-vertex-ai) (`anthropic-vertex-ai`) - [LangDB Provider](/providers/community-providers/langdb) (`@langdb/vercel-provider`) +- [Dify Provider](/providers/community-providers/dify) (`dify-ai-provider`) +- [Sarvam Provider](/providers/community-providers/sarvam) (`sarvam-ai-provider`) ## Self-Hosted Models @@ -79,43 +91,55 @@ Additionally, any self-hosted provider that supports the OpenAI specification ca The AI providers support different language models with various capabilities. Here are the capabilities of popular models: -| Provider | Model | Image Input | Object Generation | Tool Usage | Tool Streaming | -| ------------------------------------------------------------------------ | ---------------------------- | ------------------- | ------------------- | ------------------- | ------------------- | -| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-2-1212` | | | | | -| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-2-vision-1212` | | | | | -| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-beta` | | | | | -| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-vision-beta` | | | | | -| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4o` | | | | | -| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4o-mini` | | | | | -| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4-turbo` | | | | | -| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4` | | | | | -| [OpenAI](/providers/ai-sdk-providers/openai) | `o3-mini` | | | | | -| [OpenAI](/providers/ai-sdk-providers/openai) | `o1` | | | | | -| [OpenAI](/providers/ai-sdk-providers/openai) | `o1-mini` | | | | | -| [OpenAI](/providers/ai-sdk-providers/openai) | `o1-preview` | | | | | -| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-7-sonnet-20250219` | | | | | -| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-sonnet-20241022` | | | | | -| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-sonnet-20240620` | | | | | -| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-haiku-20241022` | | | | | -| [Mistral](/providers/ai-sdk-providers/mistral) | `pixtral-large-latest` | | | | | -| [Mistral](/providers/ai-sdk-providers/mistral) | `mistral-large-latest` | | | | | -| [Mistral](/providers/ai-sdk-providers/mistral) | `mistral-small-latest` | | | | | -| [Mistral](/providers/ai-sdk-providers/mistral) | `pixtral-12b-2409` | | | | | -| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-2.0-flash-exp` | | | | | -| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-1.5-flash` | | | | | -| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-1.5-pro` | | | | | -| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-2.0-flash-exp` | | | | | -| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-1.5-flash` | | | | | -| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-1.5-pro` | | | | | -| [DeepSeek](/providers/ai-sdk-providers/deepseek) | `deepseek-chat` | | | | | -| [DeepSeek](/providers/ai-sdk-providers/deepseek) | `deepseek-reasoner` | | | | | -| [Cerebras](/providers/ai-sdk-providers/cerebras) | `llama3.1-8b` | | | | | -| [Cerebras](/providers/ai-sdk-providers/cerebras) | `llama3.1-70b` | | | | | -| [Cerebras](/providers/ai-sdk-providers/cerebras) | `llama3.3-70b` | | | | | -| [Groq](/providers/ai-sdk-providers/groq) | `llama-3.3-70b-versatile` | | | | | -| [Groq](/providers/ai-sdk-providers/groq) | `llama-3.1-8b-instant` | | | | | -| [Groq](/providers/ai-sdk-providers/groq) | `mixtral-8x7b-32768` | | | | | -| [Groq](/providers/ai-sdk-providers/groq) | `gemma2-9b-it` | | | | | +| Provider | Model | Image Input | Object Generation | Tool Usage | Tool Streaming | +| ------------------------------------------------------------------------ | ------------------------------------------- | ------------------- | ------------------- | ------------------- | ------------------- | --- | -------------------------------------------- | --------- | ------------------- | ------------------- | ------------------- | ------------------- | +| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-3` | | | | | +| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-3-fast` | | | | | +| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-3-mini` | | | | | +| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-3-mini-fast` | | | | | +| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-2-1212` | | | | | +| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-2-vision-1212` | | | | | +| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-beta` | | | | | +| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-vision-beta` | | | | | +| [Vercel](/providers/ai-sdk-providers/vercel) | `v0-1.0-md` | | | | | | [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4.1` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4.1-mini` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4.1-nano` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4o` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4o-mini` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4.1` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `o3-mini` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `o3` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `o4-mini` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `o1` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `o1-mini` | | | | | +| [OpenAI](/providers/ai-sdk-providers/openai) | `o1-preview` | | | | | +| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-4-opus-20250514` | | | | | +| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-4-sonnet-20250514` | | | | | +| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-7-sonnet-20250219` | | | | | +| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-sonnet-20241022` | | | | | +| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-sonnet-20240620` | | | | | +| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-haiku-20241022` | | | | | +| [Mistral](/providers/ai-sdk-providers/mistral) | `pixtral-large-latest` | | | | | +| [Mistral](/providers/ai-sdk-providers/mistral) | `mistral-large-latest` | | | | | +| [Mistral](/providers/ai-sdk-providers/mistral) | `mistral-small-latest` | | | | | +| [Mistral](/providers/ai-sdk-providers/mistral) | `pixtral-12b-2409` | | | | | +| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-2.0-flash-exp` | | | | | +| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-1.5-flash` | | | | | +| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-1.5-pro` | | | | | +| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-2.0-flash-exp` | | | | | +| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-1.5-flash` | | | | | +| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-1.5-pro` | | | | | +| [DeepSeek](/providers/ai-sdk-providers/deepseek) | `deepseek-chat` | | | | | +| [DeepSeek](/providers/ai-sdk-providers/deepseek) | `deepseek-reasoner` | | | | | +| [Cerebras](/providers/ai-sdk-providers/cerebras) | `llama3.1-8b` | | | | | +| [Cerebras](/providers/ai-sdk-providers/cerebras) | `llama3.1-70b` | | | | | +| [Cerebras](/providers/ai-sdk-providers/cerebras) | `llama3.3-70b` | | | | | +| [Groq](/providers/ai-sdk-providers/groq) | `meta-llama/llama-4-scout-17b-16e-instruct` | | | | | +| [Groq](/providers/ai-sdk-providers/groq) | `llama-3.3-70b-versatile` | | | | | +| [Groq](/providers/ai-sdk-providers/groq) | `llama-3.1-8b-instant` | | | | | +| [Groq](/providers/ai-sdk-providers/groq) | `mixtral-8x7b-32768` | | | | | +| [Groq](/providers/ai-sdk-providers/groq) | `gemma2-9b-it` | | | | | This table is not exhaustive. Additional models can be found in the provider diff --git a/content/docs/02-foundations/03-prompts.mdx b/content/docs/02-foundations/03-prompts.mdx index a7ad75f94d8a..3811ba3ff642 100644 --- a/content/docs/02-foundations/03-prompts.mdx +++ b/content/docs/02-foundations/03-prompts.mdx @@ -73,7 +73,7 @@ You can use the `messages` property to set message prompts. Each message has a `role` and a `content` property. The content can either be text (for user and assistant messages), or an array of relevant parts (data) for that message type. ```ts highlight="3-7" -const result = await streamUI({ +const result = await generateText({ model: yourModel, messages: [ { role: 'user', content: 'Hi!' }, @@ -92,6 +92,84 @@ Instead of sending a text in the `content` property, you can send an array of pa models](./providers-and-models#model-capabilities). +### Provider Options + +You can pass through additional provider-specific metadata to enable provider-specific functionality at 3 levels. + +#### Function Call Level + +Functions like [`streamText`](/docs/reference/ai-sdk-core/stream-text#provider-options) or [`generateText`](/docs/reference/ai-sdk-core/generate-text#provider-options) accept a `providerOptions` property. + +Adding provider options at the function call level should be used when you do not need granular control over where the provider options are applied. + +```ts +const { text } = await generateText({ + model: azure('your-deployment-name'), + providerOptions: { + openai: { + reasoningEffort: 'low', + }, + }, +}); +``` + +#### Message Level + +For granular control over applying provider options at the message level, you can pass `providerOptions` to the message object: + +```ts +const messages = [ + { + role: 'system', + content: 'Cached system message', + providerOptions: { + // Sets a cache control breakpoint on the system message + anthropic: { cacheControl: { type: 'ephemeral' } }, + }, + }, +]; +``` + +#### Message Part Level + +Certain provider-specific options require configuration at the message part level: + +```ts +const messages = [ + { + role: 'user', + content: [ + { + type: 'text', + text: 'Describe the image in detail.', + providerOptions: { + openai: { imageDetail: 'low' }, + }, + }, + { + type: 'image', + image: + 'https://github.com/vercel/ai/blob/main/examples/ai-core/data/comic-cat.png?raw=true', + // Sets image detail configuration for image part: + providerOptions: { + openai: { imageDetail: 'low' }, + }, + }, + ], + }, +]; +``` + + + AI SDK UI hooks like [`useChat`](/docs/reference/ai-sdk-ui/use-chat) return + arrays of `UIMessage` objects, which do not support provider options. We + recommend using the + [`convertToModelMessages`](/docs/reference/ai-sdk-ui/convert-to-core-messages) + function to convert `UIMessage` objects to + [`ModelMessage`](/docs/reference/ai-sdk-core/model-message) objects before + applying or appending message(s) or message parts with `providerOptions`. + + ### User Messages #### Text Parts @@ -235,7 +313,7 @@ const result = await generateText({ { type: 'text', text: 'What is the file about?' }, { type: 'file', - mimeType: 'application/pdf', + mediaType: 'application/pdf', data: fs.readFileSync('./data/example.pdf'), filename: 'example.pdf', // optional, not used by all providers }, @@ -260,7 +338,7 @@ const result = await generateText({ { type: 'text', text: 'What is the audio saying?' }, { type: 'file', - mimeType: 'audio/mpeg', + mediaType: 'audio/mpeg', data: fs.readFileSync('./data/galileo.mp3'), }, ], @@ -341,7 +419,7 @@ const result = await generateText({ content: [ { type: 'file', - mimeType: 'image/png', + mediaType: 'image/png', data: fs.readFileSync('./data/roquefort.jpg'), }, ], @@ -448,7 +526,7 @@ const result = await generateText({ { type: 'image', data: fs.readFileSync('./data/roquefort-nutrition-data.png'), - mimeType: 'image/png', + mediaType: 'image/png', }, ], }, diff --git a/content/docs/02-foundations/04-tools.mdx b/content/docs/02-foundations/04-tools.mdx index ac8a2acf3338..0855b118bc27 100644 --- a/content/docs/02-foundations/04-tools.mdx +++ b/content/docs/02-foundations/04-tools.mdx @@ -93,7 +93,10 @@ There are several providers that offer pre-built tools as **toolkits** that you - **[agentic](https://github.com/transitive-bullshit/agentic)** - A collection of 20+ tools. Most tools connect to access external APIs such as [Exa](https://exa.ai/) or [E2B](https://e2b.dev/). - **[browserbase](https://docs.browserbase.com/integrations/vercel-ai/introduction)** - Browser tool that runs a headless browser +- **[browserless](https://docs.browserless.io/ai-integrations/vercel-ai-sdk)** - Browser automation service with AI integration - self hosted or cloud based +- **[Smithery](https://smithery.ai/docs/use/connect)** - Smithery provides an open marketplace of 6K+ MCPs, including [Browserbase](https://browserbase.com/) and [Exa](https://exa.ai/). - **[Stripe agent tools](https://docs.stripe.com/agents)** - Tools for interacting with Stripe. +- **[StackOne ToolSet](https://docs.stackone.com/agents)** - Agentic integrations for hundreds of [enterprise SaaS](https://www.stackone.com/integrations) - **[Toolhouse](https://docs.toolhouse.ai/toolhouse/using-vercel-ai)** - AI function-calling in 3 lines of code for over 25 different actions. - **[Agent Tools](https://ai-sdk-agents.vercel.app/?item=introduction)** - A collection of tools for agents. - **[AI Tool Maker](https://github.com/nihaocami/ai-tool-maker)** - A CLI utility to generate AI SDK tools from OpenAPI specs. diff --git a/content/docs/02-foundations/05-streaming.mdx b/content/docs/02-foundations/05-streaming.mdx index 55f185203132..a5380882f53c 100644 --- a/content/docs/02-foundations/05-streaming.mdx +++ b/content/docs/02-foundations/05-streaming.mdx @@ -43,14 +43,14 @@ As you can see, the streaming UI is able to start displaying the response much f While streaming interfaces can greatly enhance user experiences, especially with larger language models, they aren't always necessary or beneficial. If you can achieve your desired functionality using a smaller, faster model without resorting to streaming, this route can often lead to simpler and more manageable development processes. -However, regardless of the speed of your model, the AI SDK is designed to make implementing streaming UIs as simple as possible. In the example below, we stream text generation from OpenAI's `gpt-4-turbo` in under 10 lines of code using the SDK's [`streamText`](/docs/reference/ai-sdk-core/stream-text) function: +However, regardless of the speed of your model, the AI SDK is designed to make implementing streaming UIs as simple as possible. In the example below, we stream text generation from OpenAI's `gpt-4.1` in under 10 lines of code using the SDK's [`streamText`](/docs/reference/ai-sdk-core/stream-text) function: ```ts import { openai } from '@ai-sdk/openai'; import { streamText } from 'ai'; const { textStream } = streamText({ - model: openai('gpt-4-turbo'), + model: openai('gpt-4.1'), prompt: 'Write a poem about embedding models.', }); diff --git a/content/docs/02-foundations/06-agents.mdx b/content/docs/02-foundations/06-agents.mdx index 566bd6419b09..0cd8788cc8fe 100644 --- a/content/docs/02-foundations/06-agents.mdx +++ b/content/docs/02-foundations/06-agents.mdx @@ -393,25 +393,25 @@ async function translateWithFeedback(text: string, targetLanguage: string) { ## Multi-Step Tool Usage -If your use case involves solving problems where the solution path is poorly defined or too complex to map out as a workflow in advance, you may want to provide the LLM with a set of lower-level tools and allow it to break down the task into small pieces that it can solve on its own iteratively, without discrete instructions. To implement this kind of agentic pattern, you need to call an LLM in a loop until a task is complete. The AI SDK makes this simple with the `maxSteps` parameter. +If your use case involves solving problems where the solution path is poorly defined or too complex to map out as a workflow in advance, you may want to provide the LLM with a set of lower-level tools and allow it to break down the task into small pieces that it can solve on its own iteratively, without discrete instructions. To implement this kind of agentic pattern, you need to call an LLM in a loop until a task is complete. The AI SDK makes this simple with the `stopWhen` parameter. -With `maxSteps`, the AI SDK automatically triggers an additional request to the model after every tool result (each request is considered a "step"). If the model does not generate a tool call or the `maxSteps` threshold has been met, the generation is complete. +The AI SDK gives you control over the stopping conditions, enabling you to keep the LLM running until one of the conditions are met. The SDK automatically triggers an additional request to the model after every tool result (each request is considered a "step"), continuing until the model does not generate a tool call or other stopping conditions (e.g. `stepCountIs`) you define are satisfied. -`maxSteps` can be used with both `generateText` and `streamText` +`stopWhen` can be used with both `generateText` and `streamText` -### Using maxSteps +### Using `stopWhen` This example demonstrates how to create an agent that solves math problems. It has a calculator tool (using [math.js](https://mathjs.org/)) that it can call to evaluate mathematical expressions. ```ts file='main.ts' import { openai } from '@ai-sdk/openai'; -import { generateText, tool } from 'ai'; +import { generateText, tool, stepCountIs } from 'ai'; import * as mathjs from 'mathjs'; import { z } from 'zod'; const { text: answer } = await generateText({ - model: openai('gpt-4o-2024-08-06', { structuredOutputs: true }), + model: openai('gpt-4o-2024-08-06'), tools: { calculate: tool({ description: @@ -422,7 +422,7 @@ const { text: answer } = await generateText({ execute: async ({ expression }) => mathjs.evaluate(expression), }), }, - maxSteps: 10, + stopWhen: stepCountIs(10), system: 'You are solving math problems. ' + 'Reason step by step. ' + @@ -445,12 +445,12 @@ When building an agent for tasks like mathematical analysis or report generation ```ts highlight="6,16-29,31,45" import { openai } from '@ai-sdk/openai'; -import { generateText, tool } from 'ai'; +import { generateText, tool, stepCountIs } from 'ai'; import 'dotenv/config'; import { z } from 'zod'; const { toolCalls } = await generateText({ - model: openai('gpt-4o-2024-08-06', { structuredOutputs: true }), + model: openai('gpt-4o-2024-08-06'), tools: { calculate: tool({ description: @@ -475,7 +475,7 @@ const { toolCalls } = await generateText({ }), }, toolChoice: 'required', - maxSteps: 10, + stopWhen: stepCountIs(10), system: 'You are solving math problems. ' + 'Reason step by step. ' + @@ -499,15 +499,15 @@ console.log(`FINAL TOOL CALLS: ${JSON.stringify(toolCalls, null, 2)}`); ### Accessing all steps -Calling `generateText` with `maxSteps` can result in several calls to the LLM (steps). +Calling `generateText` with `stopWhen` can result in several calls to the LLM (steps). You can access information from all steps by using the `steps` property of the response. ```ts highlight="3,9-10" -import { generateText } from 'ai'; +import { generateText, stepCountIs } from 'ai'; const { steps } = await generateText({ model: openai('gpt-4o'), - maxSteps: 10, + stopWhen: stepCountIs(10), // ... }); @@ -522,11 +522,11 @@ It is triggered when a step is finished, i.e. all text deltas, tool calls, and tool results for the step are available. ```tsx highlight="6-8" -import { generateText } from 'ai'; +import { generateText, stepCountIs } from 'ai'; const result = await generateText({ model: yourModel, - maxSteps: 10, + stopWhen: stepCountIs(10), onStepFinish({ text, toolCalls, toolResults, finishReason, usage }) { // your own logic, e.g. for saving the chat history or recording usage }, diff --git a/content/docs/02-getting-started/01-navigating-the-library.mdx b/content/docs/02-getting-started/01-navigating-the-library.mdx index ec2de45b6abf..86cbfbfa1cd1 100644 --- a/content/docs/02-getting-started/01-navigating-the-library.mdx +++ b/content/docs/02-getting-started/01-navigating-the-library.mdx @@ -5,7 +5,7 @@ description: Learn how to navigate the AI SDK. # Navigating the Library -the AI SDK is a powerful toolkit for building AI applications. This page will help you pick the right tools for your requirements. +The AI SDK is a powerful toolkit for building AI applications. This page will help you pick the right tools for your requirements. Let’s start with a quick overview of the AI SDK, which is comprised of three parts: @@ -17,11 +17,11 @@ Let’s start with a quick overview of the AI SDK, which is comprised of three p When deciding which part of the AI SDK to use, your first consideration should be the environment and existing stack you are working with. Different components of the SDK are tailored to specific frameworks and environments. -| Library | Purpose | Environment Compatibility | -| ----------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| [AI SDK Core](/docs/ai-sdk-core/overview) | Call any LLM with unified API (e.g. [generateText](/docs/reference/ai-sdk-core/generate-text) and [generateObject](/docs/reference/ai-sdk-core/generate-object)) | Any JS environment (e.g. Node.js, Deno, Browser) | -| [AI SDK UI](/docs/ai-sdk-ui/overview) | Build streaming chat and generative UIs (e.g. [useChat](/docs/reference/ai-sdk-ui/use-chat)) | React & Next.js, Vue & Nuxt, Svelte & SvelteKit, Solid.js & SolidStart | -| [AI SDK RSC](/docs/ai-sdk-rsc/overview) | Stream generative UIs from Server to Client (e.g. [streamUI](/docs/reference/ai-sdk-rsc/stream-ui)). Development is currently experimental and we recommend using [AI SDK UI](/docs/ai-sdk-ui/overview). | Any framework that supports React Server Components (e.g. Next.js) | +| Library | Purpose | Environment Compatibility | +| ----------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------ | +| [AI SDK Core](/docs/ai-sdk-core/overview) | Call any LLM with unified API (e.g. [generateText](/docs/reference/ai-sdk-core/generate-text) and [generateObject](/docs/reference/ai-sdk-core/generate-object)) | Any JS environment (e.g. Node.js, Deno, Browser) | +| [AI SDK UI](/docs/ai-sdk-ui/overview) | Build streaming chat and generative UIs (e.g. [useChat](/docs/reference/ai-sdk-ui/use-chat)) | React & Next.js, Vue & Nuxt, Svelte & SvelteKit | +| [AI SDK RSC](/docs/ai-sdk-rsc/overview) | Stream generative UIs from Server to Client (e.g. [streamUI](/docs/reference/ai-sdk-rsc/stream-ui)). Development is currently experimental and we recommend using [AI SDK UI](/docs/ai-sdk-ui/overview). | Any framework that supports React Server Components (e.g. Next.js) | ## Environment Compatibility @@ -34,7 +34,6 @@ The following table outlines AI SDK compatibility based on environment: | None / Node.js / Deno | | | | | Vue / Nuxt | | | | | Svelte / SvelteKit | | | | -| Solid.js / SolidStart | | | | | Next.js Pages Router | | | | | Next.js App Router | | | | @@ -49,15 +48,14 @@ AI SDK UI provides a set of framework-agnostic hooks for quickly building **prod ## AI SDK UI Framework Compatibility -AI SDK UI supports the following frameworks: [React](https://react.dev/), [Svelte](https://svelte.dev/), [Vue.js](https://vuejs.org/), and [SolidJS](https://www.solidjs.com/). Here is a comparison of the supported functions across these frameworks: +AI SDK UI supports the following frameworks: [React](https://react.dev/), [Svelte](https://svelte.dev/), and [Vue.js](https://vuejs.org/). Here is a comparison of the supported functions across these frameworks: -| Function | React | Svelte | Vue.js | SolidJS | -| ---------------------------------------------------------- | ------------------- | ------------------- | ------------------- | ------------------- | -| [useChat](/docs/reference/ai-sdk-ui/use-chat) | | | | | -| [useChat](/docs/reference/ai-sdk-ui/use-chat) tool calling | | | | | -| [useCompletion](/docs/reference/ai-sdk-ui/use-completion) | | | | | -| [useObject](/docs/reference/ai-sdk-ui/use-object) | | | | | -| [useAssistant](/docs/reference/ai-sdk-ui/use-assistant) | | | | | +| Function | React | Svelte | Vue.js | +| ---------------------------------------------------------- | ------------------- | ------------------- | ------------------- | +| [useChat](/docs/reference/ai-sdk-ui/use-chat) | | | | +| [useChat](/docs/reference/ai-sdk-ui/use-chat) tool calling | | | | +| [useCompletion](/docs/reference/ai-sdk-ui/use-completion) | | | | +| [useObject](/docs/reference/ai-sdk-ui/use-object) | | | | [Contributions](https://github.com/vercel/ai/blob/main/CONTRIBUTING.md) are diff --git a/content/docs/02-getting-started/02-nextjs-app-router.mdx b/content/docs/02-getting-started/02-nextjs-app-router.mdx index 8a8bc840e606..5f1c7a952d67 100644 --- a/content/docs/02-getting-started/02-nextjs-app-router.mdx +++ b/content/docs/02-getting-started/02-nextjs-app-router.mdx @@ -50,21 +50,26 @@ Install `ai`, `@ai-sdk/react`, and `@ai-sdk/openai`, the AI package, AI SDK's Re
- + - + - +
- - Make sure you are using `ai` version 3.1 or higher. - - ### Configure OpenAI API key Create a `.env.local` file in your project root and add your OpenAI API Key. This key is used to authenticate your application with the OpenAI service. @@ -90,28 +95,28 @@ Create a route handler, `app/api/chat/route.ts` and add the following code: ```tsx filename="app/api/chat/route.ts" import { openai } from '@ai-sdk/openai'; -import { streamText } from 'ai'; +import { streamText, UIMessage, convertToModelMessages } from 'ai'; // Allow streaming responses up to 30 seconds export const maxDuration = 30; export async function POST(req: Request) { - const { messages } = await req.json(); + const { messages }: { messages: UIMessage[] } = await req.json(); const result = streamText({ model: openai('gpt-4o'), - messages, + messages: convertToModelMessages(messages), }); - return result.toDataStreamResponse(); + return result.toUIMessageStreamResponse(); } ``` Let's take a look at what is happening in this code: -1. Define an asynchronous `POST` request handler and extract `messages` from the body of the request. The `messages` variable contains a history of the conversation between you and the chatbot and provides the chatbot with the necessary context to make the next generation. -2. Call [`streamText`](/docs/reference/ai-sdk-core/stream-text), which is imported from the `ai` package. This function accepts a configuration object that contains a `model` provider (imported from `@ai-sdk/openai`) and `messages` (defined in step 1). You can pass additional [settings](/docs/ai-sdk-core/settings) to further customise the model's behaviour. -3. The `streamText` function returns a [`StreamTextResult`](/docs/reference/ai-sdk-core/stream-text#result-object). This result object contains the [ `toDataStreamResponse` ](/docs/reference/ai-sdk-core/stream-text#to-data-stream-response) function which converts the result to a streamed response object. +1. Define an asynchronous `POST` request handler and extract `messages` from the body of the request. The `messages` variable contains a history of the conversation between you and the chatbot and provides the chatbot with the necessary context to make the next generation. The `messages` are of UIMessage type, which are designed for use in application UI - they contain the entire message history and associated metadata like timestamps. +2. Call [`streamText`](/docs/reference/ai-sdk-core/stream-text), which is imported from the `ai` package. This function accepts a configuration object that contains a `model` provider (imported from `@ai-sdk/openai`) and `messages` (defined in step 1). You can pass additional [settings](/docs/ai-sdk-core/settings) to further customise the model's behaviour. The `messages` key expects a `ModelMessage[]` array. This type is different from `UIMessage` in that it does not include metadata, such as timestamps or sender information. +3. The `streamText` function returns a [`StreamTextResult`](/docs/reference/ai-sdk-core/stream-text#result-object). This result object contains the [ `toUIMessageStreamResponse` ](/docs/reference/ai-sdk-core/stream-text#to-data-stream-response) function which converts the result to a streamed response object. 4. Finally, return the result to the client to stream the response. This Route Handler creates a POST request endpoint at `/api/chat`. @@ -193,13 +198,13 @@ Modify your `app/api/chat/route.ts` file to include the new weather tool: ```tsx filename="app/api/chat/route.ts" highlight="2,13-27" import { openai } from '@ai-sdk/openai'; -import { streamText, tool } from 'ai'; +import { streamText, UIMessage, convertToModelMessages, tool } from 'ai'; import { z } from 'zod'; export const maxDuration = 30; export async function POST(req: Request) { - const { messages } = await req.json(); + const { messages }: { messages: UIMessage[] } = await req.json(); const result = streamText({ model: openai('gpt-4o'), @@ -221,7 +226,7 @@ export async function POST(req: Request) { }, }); - return result.toDataStreamResponse(); + return result.toUIMessageStreamResponse(); } ``` @@ -234,17 +239,17 @@ In this updated code: - Defines parameters using a Zod schema, specifying that it requires a `location` string to execute this tool. The model will attempt to extract this parameter from the context of the conversation. If it can't, it will ask the user for the missing information. - Defines an `execute` function that simulates getting weather data (in this case, it returns a random temperature). This is an asynchronous function running on the server so you can fetch real data from an external API. - Now your chatbot can "fetch" weather information for any location the user asks about. When the model determines it needs to use the weather tool, it will generate a tool call with the necessary parameters. The `execute` function will then be automatically run, and you can access the results via `toolInvocations` that is available on the message object. +Now your chatbot can "fetch" weather information for any location the user asks about. When the model determines it needs to use the weather tool, it will generate a tool call with the necessary parameters. The `execute` function will then be automatically run, and you can access the results via `tool-invocations` part that is available on the `message.parts` array. Try asking something like "What's the weather in New York?" and see how the model uses the new tool. -Notice the blank response in the UI? This is because instead of generating a text response, the model generated a tool call. You can access the tool call and subsequent tool result in the `toolInvocations` key of the message object. +Notice the blank response in the UI? This is because instead of generating a text response, the model generated a tool call. You can access the tool call and subsequent tool result via the `tool-invocation` part of the `message.parts` array. ### Update the UI To display the tool invocations in your UI, update your `app/page.tsx` file: -```tsx filename="app/page.tsx" highlight="18-24" +```tsx filename="app/page.tsx" highlight="16-21" 'use client'; import { useChat } from '@ai-sdk/react'; @@ -298,14 +303,18 @@ To solve this, you can enable multi-step tool calls using the `maxSteps` option Modify your `app/page.tsx` file to include the `maxSteps` option: -```tsx filename="app/page.tsx" highlight="7" +```tsx filename="app/page.tsx" 'use client'; import { useChat } from '@ai-sdk/react'; +import { defaultChatStoreOptions } from 'ai'; export default function Chat() { const { messages, input, handleInputChange, handleSubmit } = useChat({ - maxSteps: 5, + chatStore: defaultChatStoreOptions({ + api: '/api/chat', + maxSteps: 5, + }), }); // ... rest of your component code @@ -322,17 +331,17 @@ Update your `app/api/chat/route.ts` file to add a new tool to convert the temper ```tsx filename="app/api/chat/route.ts" highlight="27-40" import { openai } from '@ai-sdk/openai'; -import { streamText, tool } from 'ai'; +import { streamText, UIMessage, convertToModelMessages tool } from 'ai'; import { z } from 'zod'; export const maxDuration = 30; export async function POST(req: Request) { - const { messages } = await req.json(); + const { messages }: { messages: UIMessage[] } = await req.json(); const result = streamText({ model: openai('gpt-4o'), - messages, + messages: convertToModelMessages(messages), tools: { weather: tool({ description: 'Get the weather in a location (fahrenheit)', @@ -364,7 +373,7 @@ export async function POST(req: Request) { }, }); - return result.toDataStreamResponse(); + return result.toUIMessageStreamResponse(); } ``` diff --git a/content/docs/02-getting-started/03-nextjs-pages-router.mdx b/content/docs/02-getting-started/03-nextjs-pages-router.mdx index 6210144d43d6..df718c1f82be 100644 --- a/content/docs/02-getting-started/03-nextjs-pages-router.mdx +++ b/content/docs/02-getting-started/03-nextjs-pages-router.mdx @@ -50,21 +50,26 @@ Install `ai`, `@ai-sdk/react`, and `@ai-sdk/openai`, the AI package, AI SDK's Re
- + - + - +
- - Make sure you are using `ai` version 3.1 or higher. - - ### Configure OpenAI API Key Create a `.env.local` file in your project root and add your OpenAI API Key. This key is used to authenticate your application with the OpenAI service. @@ -96,28 +101,28 @@ Create a Route Handler (`app/api/chat/route.ts`) and add the following code: ```tsx filename="app/api/chat/route.ts" import { openai } from '@ai-sdk/openai'; -import { streamText } from 'ai'; +import { streamText, UIMessage, convertToModelMessages } from 'ai'; // Allow streaming responses up to 30 seconds export const maxDuration = 30; export async function POST(req: Request) { - const { messages } = await req.json(); + const { messages }: { messages: UIMessage[] } = await req.json(); const result = streamText({ model: openai('gpt-4o'), - messages, + messages: convertToModelMessages(messages), }); - return result.toDataStreamResponse(); + return result.toUIMessageStreamResponse(); } ``` Let's take a look at what is happening in this code: -1. Define an asynchronous `POST` request handler and extract `messages` from the body of the request. The `messages` variable contains a history of the conversation between you and the chatbot and provides the chatbot with the necessary context to make the next generation. -2. Call [`streamText`](/docs/reference/ai-sdk-core/stream-text), which is imported from the `ai` package. This function accepts a configuration object that contains a `model` provider (imported from `@ai-sdk/openai`) and `messages` (defined in step 1). You can pass additional [settings](/docs/ai-sdk-core/settings) to further customise the model's behaviour. -3. The `streamText` function returns a [`StreamTextResult`](/docs/reference/ai-sdk-core/stream-text#result-object). This result object contains the [ `toDataStreamResponse` ](/docs/reference/ai-sdk-core/stream-text#to-data-stream-response) function which converts the result to a streamed response object. +1. Define an asynchronous `POST` request handler and extract `messages` from the body of the request. The `messages` variable contains a history of the conversation between you and the chatbot and provides the chatbot with the necessary context to make the next generation. The `messages` are of UIMessage type, which are designed for use in application UI - they contain the entire message history and associated metadata like timestamps. +2. Call [`streamText`](/docs/reference/ai-sdk-core/stream-text), which is imported from the `ai` package. This function accepts a configuration object that contains a `model` provider (imported from `@ai-sdk/openai`) and `messages` (defined in step 1). You can pass additional [settings](/docs/ai-sdk-core/settings) to further customise the model's behaviour. The `messages` key expects a `ModelMessage[]` array. This type is different from `UIMessage` in that it does not include metadata, such as timestamps or sender information. +3. The `streamText` function returns a [`StreamTextResult`](/docs/reference/ai-sdk-core/stream-text#result-object). This result object contains the [ `toUIMessageStreamResponse` ](/docs/reference/ai-sdk-core/stream-text#to-data-stream-response) function which converts the result to a streamed response object. 4. Finally, return the result to the client to stream the response. This Route Handler creates a POST request endpoint at `/api/chat`. @@ -192,13 +197,13 @@ Modify your `app/api/chat/route.ts` file to include the new weather tool: ```tsx filename="app/api/chat/route.ts" highlight="2,13-27" import { openai } from '@ai-sdk/openai'; -import { streamText, tool } from 'ai'; +import { streamText, UIMessage, convertToModelMessages, tool } from 'ai'; import { z } from 'zod'; export const maxDuration = 30; export async function POST(req: Request) { - const { messages } = await req.json(); + const { messages }: { messages: UIMessage[] } = await req.json(); const result = streamText({ model: openai('gpt-4o'), @@ -220,7 +225,7 @@ export async function POST(req: Request) { }, }); - return result.toDataStreamResponse(); + return result.toUIMessageStreamResponse(); } ``` @@ -297,10 +302,14 @@ Modify your `pages/index.tsx` file to include the `maxSteps` option: ```tsx filename="pages/index.tsx" highlight="6" import { useChat } from '@ai-sdk/react'; +import { defaultChatStoreOptions } from 'ai'; export default function Chat() { const { messages, input, handleInputChange, handleSubmit } = useChat({ - maxSteps: 5, + chatStore: defaultChatStoreOptions({ + api: '/api/chat', + maxSteps: 5, + }), }); // ... rest of your component code @@ -317,17 +326,17 @@ Update your `app/api/chat/route.ts` file to add a new tool to convert the temper ```tsx filename="app/api/chat/route.ts" highlight="27-40" import { openai } from '@ai-sdk/openai'; -import { streamText, tool } from 'ai'; +import { streamText, UIMessage, convertToModelMessages tool } from 'ai'; import { z } from 'zod'; export const maxDuration = 30; export async function POST(req: Request) { - const { messages } = await req.json(); + const { messages }: { messages: UIMessage[] } = await req.json(); const result = streamText({ model: openai('gpt-4o'), - messages, + messages: convertToModelMessages(messages), tools: { weather: tool({ description: 'Get the weather in a location (fahrenheit)', @@ -359,7 +368,7 @@ export async function POST(req: Request) { }, }); - return result.toDataStreamResponse(); + return result.toUIMessageStreamResponse(); } ``` diff --git a/content/docs/02-getting-started/04-svelte.mdx b/content/docs/02-getting-started/04-svelte.mdx index 36bd75871702..0c501fb8a2b8 100644 --- a/content/docs/02-getting-started/04-svelte.mdx +++ b/content/docs/02-getting-started/04-svelte.mdx @@ -46,24 +46,26 @@ Install `ai` and `@ai-sdk/openai`, the AI SDK's OpenAI provider.
- + - +
- - Make sure you are using `ai` version 3.1 or higher. - - ### Configure OpenAI API Key Create a `.env.local` file in your project root and add your OpenAI API Key. This key is used to authenticate your application with the OpenAI service. @@ -90,7 +92,7 @@ Create a SvelteKit Endpoint, `src/routes/api/chat/+server.ts` and add the follow ```tsx filename="src/routes/api/chat/+server.ts" import { createOpenAI } from '@ai-sdk/openai'; -import { streamText } from 'ai'; +import { streamText, UIMessage, convertToModelMessages } from 'ai'; import { OPENAI_API_KEY } from '$env/static/private'; @@ -99,14 +101,14 @@ const openai = createOpenAI({ }); export async function POST({ request }) { - const { messages } = await request.json(); + const { messages }: { messages: UIMessage[] } = await request.json(); const result = streamText({ model: openai('gpt-4o'), - messages, + messages: convertToModelMessages(messages), }); - return result.toDataStreamResponse(); + return result.toUIMessageStreamResponse(); } ``` @@ -120,7 +122,7 @@ Let's take a look at what is happening in this code: 1. Create an OpenAI provider instance with the `createOpenAI` function from the `@ai-sdk/openai` package. 2. Define a `POST` request handler and extract `messages` from the body of the request. The `messages` variable contains a history of the conversation with you and the chatbot and will provide the chatbot with the necessary context to make the next generation. 3. Call [`streamText`](/docs/reference/ai-sdk-core/stream-text), which is imported from the `ai` package. This function accepts a configuration object that contains a `model` provider (defined in step 1) and `messages` (defined in step 2). You can pass additional [settings](/docs/ai-sdk-core/settings) to further customise the model's behaviour. -4. The `streamText` function returns a [`StreamTextResult`](/docs/reference/ai-sdk-core/stream-text#result-object). This result object contains the [ `toDataStreamResponse` ](/docs/reference/ai-sdk-core/stream-text#to-data-stream-response) function which converts the result to a streamed response object. +4. The `streamText` function returns a [`StreamTextResult`](/docs/reference/ai-sdk-core/stream-text#result-object). This result object contains the [ `toUIMessageStreamResponse` ](/docs/reference/ai-sdk-core/stream-text#to-data-stream-response) function which converts the result to a streamed response object. 5. Return the result to the client to stream the response. ## Wire up the UI @@ -139,8 +141,17 @@ Update your root page (`src/routes/+page.svelte`) with the following code to sho
    - {#each chat.messages as message} -
  • {message.role}: {message.content}
  • + {#each chat.messages as message, messageIndex (messageIndex)} +
  • +
    {message.role}
    +
    + {#each message.parts as part, partIndex (partIndex)} + {#if part.type === 'text'} +
    {part.text}
    + {/if} + {/each} +
    +
  • {/each}
@@ -152,10 +163,12 @@ Update your root page (`src/routes/+page.svelte`) with the following code to sho This page utilizes the `Chat` class, which will, by default, use the `POST` route handler you created earlier. The hook provides functions and state for handling user input and form submission. The `Chat` class provides multiple utility functions and state variables: -- `messages` - the current chat messages (an array of objects with `id`, `role`, and `content` properties). +- `messages` - the current chat messages (an array of objects with `id`, `role`, and `parts` properties). - `input` - the current value of the user's input field. - `handleSubmit` - function to handle form submission. +The LLM's response is accessed through the message `parts` array. Each message contains an ordered array of `parts` that represents everything the model generated in its response. These parts can include plain text, reasoning tokens, and more that you will see later. The `parts` array preserves the sequence of the model's outputs, allowing you to display or process each component in the order it was generated. + ## Running Your Application With that, you have built everything you need for your chatbot! To start your application, use the command: @@ -180,7 +193,7 @@ Modify your `src/routes/api/chat/+server.ts` file to include the new weather too ```tsx filename="src/routes/api/chat/+server.ts" highlight="2,3,17-31" import { createOpenAI } from '@ai-sdk/openai'; -import { streamText, tool } from 'ai'; +import { streamText, UIMessage, convertToModelMessages, tool } from 'ai'; import { z } from 'zod'; import { OPENAI_API_KEY } from '$env/static/private'; @@ -190,11 +203,11 @@ const openai = createOpenAI({ }); export async function POST({ request }) { - const { messages } = await request.json(); + const { messages }: { messages: UIMessage[] } = await request.json(); const result = streamText({ model: openai('gpt-4o'), - messages, + messages: convertToModelMessages(messages), tools: { weather: tool({ description: 'Get the weather in a location (fahrenheit)', @@ -212,7 +225,7 @@ export async function POST({ request }) { }, }); - return result.toDataStreamResponse(); + return result.toUIMessageStreamResponse(); } ``` @@ -225,11 +238,11 @@ In this updated code: - Defines parameters using a Zod schema, specifying that it requires a `location` string to execute this tool. The model will attempt to extract this parameter from the context of the conversation. If it can't, it will ask the user for the missing information. - Defines an `execute` function that simulates getting weather data (in this case, it returns a random temperature). This is an asynchronous function running on the server so you can fetch real data from an external API. -Now your chatbot can "fetch" weather information for any location the user asks about. When the model determines it needs to use the weather tool, it will generate a tool call with the necessary parameters. The `execute` function will then be automatically run, and you can access the results via `toolInvocations` that is available on the message object. +Now your chatbot can "fetch" weather information for any location the user asks about. When the model determines it needs to use the weather tool, it will generate a tool call with the necessary parameters. The `execute` function will then be automatically run, and you can access the results via `tool-invocations` part that is available on the `message.parts` array. Try asking something like "What's the weather in New York?" and see how the model uses the new tool. -Notice the blank response in the UI? This is because instead of generating a text response, the model generated a tool call. You can access the tool call and subsequent tool result in the `toolInvocations` key of the message object. +Notice the blank response in the UI? This is because instead of generating a text response, the model generated a tool call. You can access the tool call and subsequent tool result via the `tool-invocation` part of the `message.parts` array. ### Update the UI @@ -244,14 +257,18 @@ To display the tool invocations in your UI, update your `src/routes/+page.svelte
    - {#each chat.messages as message} + {#each chat.messages as message, messageIndex (messageIndex)}
  • - {message.role}: - {#if message.toolInvocations} -
    {JSON.stringify(message.toolInvocations, null, 2)}
    - {:else} - {message.content} - {/if} +
    {message.role}
    +
    + {#each message.parts as part, partIndex (partIndex)} + {#if part.type === 'text'} +
    {part.text}
    + {:else if part.type === 'tool-invocation'} +
    {JSON.stringify(part.toolInvocation, null, 2)}
    + {/if} + {/each} +
  • {/each}
@@ -262,7 +279,7 @@ To display the tool invocations in your UI, update your `src/routes/+page.svelte
``` -With this change, you check each message for any tool calls (`toolInvocations`). These tool calls will be displayed as stringified JSON. Otherwise, you show the message content as before. +With this change, you're updating the UI to handle different message parts. For text parts, you display the text content as before. For tool invocations, you display a JSON representation of the tool call and its result. Now, when you ask about the weather, you'll see the tool invocation and its result displayed in your chat interface. @@ -296,7 +313,7 @@ Update your `src/routes/api/chat/+server.ts` file to add a new tool to convert t ```tsx filename="src/routes/api/chat/+server.ts" highlight="31-44" import { createOpenAI } from '@ai-sdk/openai'; -import { streamText, tool } from 'ai'; +import { streamText, UIMessage, convertToModelMessages, tool } from 'ai'; import { z } from 'zod'; import { OPENAI_API_KEY } from '$env/static/private'; @@ -306,11 +323,11 @@ const openai = createOpenAI({ }); export async function POST({ request }) { - const { messages } = await request.json(); + const { messages }: { messages: UIMessage[] } = await request.json(); const result = streamText({ model: openai('gpt-4o'), - messages, + messages: convertToModelMessages(messages), tools: { weather: tool({ description: 'Get the weather in a location (fahrenheit)', @@ -342,7 +359,7 @@ export async function POST({ request }) { }, }); - return result.toDataStreamResponse(); + return result.toUIMessageStreamResponse(); } ``` diff --git a/content/docs/02-getting-started/05-nuxt.mdx b/content/docs/02-getting-started/05-nuxt.mdx index 72ac0e1b7475..d3e0627dda99 100644 --- a/content/docs/02-getting-started/05-nuxt.mdx +++ b/content/docs/02-getting-started/05-nuxt.mdx @@ -24,7 +24,7 @@ If you haven't obtained your OpenAI API key, you can do so by [signing up](https Start by creating a new Nuxt application. This command will create a new directory named `my-ai-app` and set up a basic Nuxt application inside it. - + Navigate to the newly created directory: @@ -44,21 +44,26 @@ Install `ai` and `@ai-sdk/openai`, the AI SDK's OpenAI provider.
- + - + - +
- - Make sure you are using `ai` version 3.1 or higher. - - ### Configure OpenAI API key Create a `.env` file in your project root and add your OpenAI API Key. This key is used to authenticate your application with the OpenAI service. @@ -92,7 +97,7 @@ export default defineNuxtConfig({ Create an API route, `server/api/chat.ts` and add the following code: ```typescript filename="server/api/chat.ts" -import { streamText } from 'ai'; +import { streamText, UIMessage, convertToModelMessages } from 'ai'; import { createOpenAI } from '@ai-sdk/openai'; export default defineLazyEventHandler(async () => { @@ -103,14 +108,14 @@ export default defineLazyEventHandler(async () => { }); return defineEventHandler(async (event: any) => { - const { messages } = await readBody(event); + const { messages }: { messages: UIMessage[] } = await readBody(event); const result = streamText({ model: openai('gpt-4o'), - messages, + messages: convertToModelMessages(messages), }); - return result.toDataStreamResponse(); + return result.toUIMessageStreamResponse(); }); }); ``` @@ -137,15 +142,19 @@ const { messages, input, handleSubmit } = useChat();