From 240bfceea0974f97920c3b8fb4d1e394272fc274 Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman Date: Wed, 29 Apr 2026 16:50:22 -0700 Subject: [PATCH 01/21] docs(registry): add Registry tab with installable Workflow patterns MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce a new top-nav Registry section (`/registry`) that lists installable Workflow recipes — drop-in workflow + API + UI scaffolds wired up for `shadcn add`. Includes a filterable landing grid and per-item detail pages with shiki-rendered code snippets. Initial entries: - Resend (Email): cancellable onboarding email drip campaign. - AI SDK (Vercel): durable multi-turn chat with streaming + tools. - Vercel Sandbox (Vercel): persistent code-execution session beyond the 5-hour cap. - Chat SDK (Vercel): durable bot sessions across Slack, Teams, Discord, etc. Wordmarks for AI SDK and Chat SDK are ported from the canonical marks in vercel/ai and vercel/chat respectively. The card + detail-hero logo slots are flexible-width to accommodate both square brand marks (Resend, Sandbox) and horizontal wordmarks (AI SDK, Chat SDK). The shadcnSlugs are placeholders pending the corresponding shadcn registry-index PR; the install command on each detail page reflects them verbatim. Made-with: Cursor --- docs/app/[lang]/registry/[id]/page.tsx | 169 ++++++++ docs/app/[lang]/registry/page.tsx | 68 ++++ docs/components/registry/RegistryCard.tsx | 62 +++ docs/components/registry/RegistryCodeTabs.tsx | 42 ++ .../registry/RegistryDetailHero.tsx | 110 ++++++ docs/components/registry/RegistryGrid.tsx | 80 ++++ .../registry/RegistryInstallTabs.tsx | 81 ++++ docs/components/registry/logos/index.tsx | 32 ++ .../components/registry/logos/logo-ai-sdk.tsx | 52 +++ .../registry/logos/logo-chat-sdk.tsx | 52 +++ .../components/registry/logos/logo-resend.tsx | 33 ++ .../registry/logos/logo-sandbox.tsx | 36 ++ docs/geistdocs.tsx | 4 + docs/lib/registry/manifest.ts | 338 ++++++++++++++++ docs/lib/registry/snippets/ai-sdk.ts | 268 +++++++++++++ docs/lib/registry/snippets/chat-sdk.ts | 202 ++++++++++ docs/lib/registry/snippets/resend.ts | 202 ++++++++++ docs/lib/registry/snippets/sandbox.ts | 365 ++++++++++++++++++ docs/lib/registry/types.ts | 101 +++++ 19 files changed, 2297 insertions(+) create mode 100644 docs/app/[lang]/registry/[id]/page.tsx create mode 100644 docs/app/[lang]/registry/page.tsx create mode 100644 docs/components/registry/RegistryCard.tsx create mode 100644 docs/components/registry/RegistryCodeTabs.tsx create mode 100644 docs/components/registry/RegistryDetailHero.tsx create mode 100644 docs/components/registry/RegistryGrid.tsx create mode 100644 docs/components/registry/RegistryInstallTabs.tsx create mode 100644 docs/components/registry/logos/index.tsx create mode 100644 docs/components/registry/logos/logo-ai-sdk.tsx create mode 100644 docs/components/registry/logos/logo-chat-sdk.tsx create mode 100644 docs/components/registry/logos/logo-resend.tsx create mode 100644 docs/components/registry/logos/logo-sandbox.tsx create mode 100644 docs/lib/registry/manifest.ts create mode 100644 docs/lib/registry/snippets/ai-sdk.ts create mode 100644 docs/lib/registry/snippets/chat-sdk.ts create mode 100644 docs/lib/registry/snippets/resend.ts create mode 100644 docs/lib/registry/snippets/sandbox.ts create mode 100644 docs/lib/registry/types.ts diff --git a/docs/app/[lang]/registry/[id]/page.tsx b/docs/app/[lang]/registry/[id]/page.tsx new file mode 100644 index 0000000000..e9abb2861d --- /dev/null +++ b/docs/app/[lang]/registry/[id]/page.tsx @@ -0,0 +1,169 @@ +import { ExternalLink } from 'lucide-react'; +import type { Metadata } from 'next'; +import { notFound } from 'next/navigation'; +import { codeToHtml } from 'shiki'; +import { RegistryCodeTabs } from '@/components/registry/RegistryCodeTabs'; +import { RegistryDetailHero } from '@/components/registry/RegistryDetailHero'; +import { RegistryInstallTabs } from '@/components/registry/RegistryInstallTabs'; +import { Button } from '@/components/ui/button'; +import { getRegistryItem, getRegistryItemIds } from '@/lib/registry/manifest'; + +interface PageProps { + params: Promise<{ id: string }>; +} + +export function generateStaticParams() { + return getRegistryItemIds().map((id) => ({ id })); +} + +export async function generateMetadata({ + params, +}: PageProps): Promise { + const { id } = await params; + const item = getRegistryItem(id); + if (!item) return { title: 'Registry item not found' }; + return { + title: `${item.name} | Workflow Registry`, + description: item.description, + }; +} + +export default async function RegistryDetailPage({ params }: PageProps) { + const { id } = await params; + const item = getRegistryItem(id); + if (!item) notFound(); + + // Pre-render every snippet on the server with shiki, then hand the HTML to + // the client tabs component. This keeps the heavy syntax-highlighting work + // off the client bundle. + const blocks = await Promise.all( + item.snippets.map(async (snippet) => ({ + label: snippet.label, + caption: snippet.caption, + html: await codeToHtml(snippet.code, { + lang: snippet.lang, + themes: { + light: 'github-light-default', + dark: 'github-dark-default', + }, + defaultColor: false, + }), + })) + ); + + return ( +
+
+
+ +
+ +
+ {/* Long-form description */} + {item.longDescription && ( +
+

+ {item.longDescription} +

+
+ )} + + {/* Installation */} +
+

+ Installation +

+

+ Run the command for your package manager. The shadcn CLI copies + every file in this recipe into your project — you own the code + after install and can customize it freely. +

+ +
+ + {/* Environment variables */} + {item.envVars && item.envVars.length > 0 && ( +
+

+ Environment +

+

+ Add the following to your .env: +

+
+ {item.envVars.map((envVar) => ( +
+
+ + {envVar.name} + {envVar.exampleValue && ( + + ={envVar.exampleValue} + + )} + +

+ {envVar.description} +

+
+ {envVar.getKeyUrl && ( + + )} +
+ ))} +
+
+ )} + + {/* Files installed */} +
+

+ What gets installed +

+

+ These files land in your project. Edit them however you want — the + shadcn CLI never touches them again. +

+
    + {item.files.map((file) => ( +
  • + + {file.path} + +

    + {file.description} +

    +
  • + ))} +
+
+ + {/* Source preview */} +
+

Source

+

+ A preview of the code that gets copied into your app. +

+ +
+
+
+
+ ); +} diff --git a/docs/app/[lang]/registry/page.tsx b/docs/app/[lang]/registry/page.tsx new file mode 100644 index 0000000000..0adaff91d8 --- /dev/null +++ b/docs/app/[lang]/registry/page.tsx @@ -0,0 +1,68 @@ +import type { Metadata } from 'next'; +import Link from 'next/link'; +import { Button } from '@/components/ui/button'; +import { RegistryGrid } from '@/components/registry/RegistryGrid'; +import { registryItems } from '@/lib/registry/manifest'; + +export const metadata: Metadata = { + title: 'Registry | Workflow SDK', + description: + 'Installable Workflow patterns for popular providers — durable, cancellable, replay-safe recipes you drop into your app with one shadcn command.', +}; + +export default function RegistryPage() { + return ( +
+
+ {/* Hero */} +
+
+

+ Registry +

+

+ Installable Workflow patterns for popular providers. Durable, + cancellable, replay-safe recipes you drop into your app with one{' '} + + shadcn + {' '} + command. +

+
+
+ + {/* Grid */} + + + {/* CTA */} +
+
+

+ Build your own +

+

+ Package any workflow as a shadcn-installable recipe and share it + with the community. Each recipe is just a workflow file plus the + API routes that drive it — anything you can write with the + Workflow SDK qualifies. +

+
+ + +
+
+
+
+
+ ); +} diff --git a/docs/components/registry/RegistryCard.tsx b/docs/components/registry/RegistryCard.tsx new file mode 100644 index 0000000000..2f4dfc5dee --- /dev/null +++ b/docs/components/registry/RegistryCard.tsx @@ -0,0 +1,62 @@ +import Link from 'next/link'; +import { Badge } from '@/components/ui/badge'; +import { + Card, + CardContent, + CardDescription, + CardHeader, + CardTitle, +} from '@/components/ui/card'; +import type { RegistryItem } from '@/lib/registry/types'; +import { getProviderLogo } from './logos'; + +interface RegistryCardProps { + item: RegistryItem; +} + +export function RegistryCard({ item }: RegistryCardProps) { + const Logo = getProviderLogo(item.logo); + + return ( + + + +
+ {Logo && ( + + )} +
+ + {item.name} + + + {item.shadcnSlug} + +
+
+
+ +

+ {item.description} +

+
+
+ {item.tags.slice(0, 4).map((tag) => ( + + {tag} + + ))} +
+
+ + ); +} diff --git a/docs/components/registry/RegistryCodeTabs.tsx b/docs/components/registry/RegistryCodeTabs.tsx new file mode 100644 index 0000000000..b8bc19b651 --- /dev/null +++ b/docs/components/registry/RegistryCodeTabs.tsx @@ -0,0 +1,42 @@ +'use client'; + +import { Tabs, TabsContent, TabsList, TabsTrigger } from '@/components/ui/tabs'; + +interface RegistryCodeTabsProps { + blocks: { + label: string; + caption?: string; + /** Pre-rendered shiki HTML — generated on the server. */ + html: string; + }[]; +} + +export function RegistryCodeTabs({ blocks }: RegistryCodeTabsProps) { + if (blocks.length === 0) return null; + + return ( + + + {blocks.map((b) => ( + + {b.label} + + ))} + + {blocks.map((b) => ( + + {b.caption && ( +

+ {b.caption} +

+ )} +
+ + ))} + + ); +} diff --git a/docs/components/registry/RegistryDetailHero.tsx b/docs/components/registry/RegistryDetailHero.tsx new file mode 100644 index 0000000000..b04847a173 --- /dev/null +++ b/docs/components/registry/RegistryDetailHero.tsx @@ -0,0 +1,110 @@ +import { ChevronRight, ExternalLink, Github, Home } from 'lucide-react'; +import Link from 'next/link'; +import { + Breadcrumb, + BreadcrumbItem, + BreadcrumbLink, + BreadcrumbList, + BreadcrumbPage, + BreadcrumbSeparator, +} from '@/components/ui/breadcrumb'; +import { Badge } from '@/components/ui/badge'; +import type { RegistryItem } from '@/lib/registry/types'; +import { getProviderLogo } from './logos'; + +interface RegistryDetailHeroProps { + item: RegistryItem; +} + +export function RegistryDetailHero({ item }: RegistryDetailHeroProps) { + const Logo = getProviderLogo(item.logo); + + return ( +
+ + + + + Registry + + + + + + + {item.name} + + + + +
+
+
+ {Logo && ( + + )} +

+ {item.name} +

+
+

+ {item.shadcnSlug} +

+

+ {item.description} +

+
+ {item.tags.map((tag) => ( + + {tag} + + ))} +
+
+ +
+ + + Homepage + + {item.docsUrl && ( + + + Provider docs + + )} + {item.sourceUrl && ( + + + Source + + )} +
+
+
+ ); +} diff --git a/docs/components/registry/RegistryGrid.tsx b/docs/components/registry/RegistryGrid.tsx new file mode 100644 index 0000000000..63aa79200a --- /dev/null +++ b/docs/components/registry/RegistryGrid.tsx @@ -0,0 +1,80 @@ +'use client'; + +import { useState } from 'react'; +import { Badge } from '@/components/ui/badge'; +import { categoryLabels } from '@/lib/registry/manifest'; +import type { RegistryCategory, RegistryItem } from '@/lib/registry/types'; +import { RegistryCard } from './RegistryCard'; + +type Filter = 'all' | RegistryCategory; + +interface RegistryGridProps { + items: RegistryItem[]; +} + +export function RegistryGrid({ items }: RegistryGridProps) { + const [filter, setFilter] = useState('all'); + + // Build the list of category filters dynamically — only the categories that + // actually have items get a chip. + const presentCategories = Array.from( + new Set(items.map((item) => item.category)) + ); + + const filtered = + filter === 'all' ? items : items.filter((item) => item.category === filter); + + const filters: { id: Filter; label: string; count: number }[] = [ + { id: 'all', label: 'Show all', count: items.length }, + ...presentCategories.map((category) => ({ + id: category as Filter, + label: categoryLabels[category], + count: items.filter((item) => item.category === category).length, + })), + ]; + + return ( + <> +
+
+ {filters.map(({ id, label, count }) => ( + setFilter(id)} + onKeyDown={(e) => { + if (e.key === 'Enter' || e.key === ' ') { + e.preventDefault(); + setFilter(id); + } + }} + > + {label} ({count}) + + ))} +
+
+ + {filtered.length === 0 ? ( +

+ No registry items match this filter. +

+ ) : ( +
+
+ {filtered.map((item) => ( + + ))} +
+
+ )} + + ); +} diff --git a/docs/components/registry/RegistryInstallTabs.tsx b/docs/components/registry/RegistryInstallTabs.tsx new file mode 100644 index 0000000000..e33f49c787 --- /dev/null +++ b/docs/components/registry/RegistryInstallTabs.tsx @@ -0,0 +1,81 @@ +'use client'; + +import { CheckIcon, CopyIcon } from 'lucide-react'; +import { useState } from 'react'; +import { toast } from 'sonner'; +import { Button } from '@/components/ui/button'; +import { Tabs, TabsContent, TabsList, TabsTrigger } from '@/components/ui/tabs'; + +interface RegistryInstallTabsProps { + /** Bare registry slug, e.g. `@workflow-sdk/resend`. */ + slug: string; +} + +const COMMANDS: { + id: string; + label: string; + command: (slug: string) => string; +}[] = [ + { + id: 'pnpm', + label: 'pnpm', + command: (s) => `pnpm dlx shadcn@latest add ${s}`, + }, + { id: 'npm', label: 'npm', command: (s) => `npx shadcn@latest add ${s}` }, + { + id: 'yarn', + label: 'yarn', + command: (s) => `yarn dlx shadcn@latest add ${s}`, + }, + { id: 'bun', label: 'bun', command: (s) => `bunx shadcn@latest add ${s}` }, +]; + +const COPY_TIMEOUT = 2000; + +export function RegistryInstallTabs({ slug }: RegistryInstallTabsProps) { + const [copiedId, setCopiedId] = useState(null); + + const handleCopy = (id: string, command: string) => { + navigator.clipboard.writeText(command); + toast.success('Copied to clipboard'); + setCopiedId(id); + setTimeout(() => setCopiedId(null), COPY_TIMEOUT); + }; + + return ( + + + {COMMANDS.map(({ id, label }) => ( + + {label} + + ))} + + {COMMANDS.map(({ id, command }) => { + const cmd = command(slug); + const Icon = copiedId === id ? CheckIcon : CopyIcon; + return ( + +
+
+                
+                  $ 
+                  {cmd}
+                
+              
+ +
+
+ ); + })} +
+ ); +} diff --git a/docs/components/registry/logos/index.tsx b/docs/components/registry/logos/index.tsx new file mode 100644 index 0000000000..e8c7d86b71 --- /dev/null +++ b/docs/components/registry/logos/index.tsx @@ -0,0 +1,32 @@ +import type { ComponentType } from 'react'; +import type { RegistryLogoId } from '@/lib/registry/types'; +import { LogoAiSdk } from './logo-ai-sdk'; +import { LogoChatSdk } from './logo-chat-sdk'; +import { LogoResend } from './logo-resend'; +import { LogoSandbox } from './logo-sandbox'; + +export interface ProviderLogoProps { + size?: number; + className?: string; +} + +/** + * Provider brand marks — keyed by `RegistryLogoId`. + * When adding a new provider, register its SVG component here. + */ +export const providerLogos: Record< + RegistryLogoId, + ComponentType +> = { + resend: LogoResend, + 'ai-sdk': LogoAiSdk, + sandbox: LogoSandbox, + 'chat-sdk': LogoChatSdk, +}; + +export function getProviderLogo( + id: RegistryLogoId | undefined +): ComponentType | null { + if (!id) return null; + return providerLogos[id] ?? null; +} diff --git a/docs/components/registry/logos/logo-ai-sdk.tsx b/docs/components/registry/logos/logo-ai-sdk.tsx new file mode 100644 index 0000000000..17ffade6c6 --- /dev/null +++ b/docs/components/registry/logos/logo-ai-sdk.tsx @@ -0,0 +1,52 @@ +/** + * AI SDK wordmark — ported from the canonical `AISDKLogo` in + * https://github.com/vercel/ai + * (packages/devtools/src/viewer/client/components/icons.tsx). + * + * The mark is a horizontal wordmark (`AI` + a pill-shaped `SDK` badge), + * so its rendered width is derived from its viewBox aspect ratio rather + * than the `size` prop directly. `size` here means *height* in px. + * + * All paths and strokes use `currentColor` so the logo inherits text + * color and adapts to light/dark themes automatically. + */ +export function LogoAiSdk({ + size = 20, + className, +}: { + size?: number; + className?: string; +}) { + const width = (311 / 90) * size; + return ( + + ); +} diff --git a/docs/components/registry/logos/logo-chat-sdk.tsx b/docs/components/registry/logos/logo-chat-sdk.tsx new file mode 100644 index 0000000000..8aace8a9d6 --- /dev/null +++ b/docs/components/registry/logos/logo-chat-sdk.tsx @@ -0,0 +1,52 @@ +/** + * Chat SDK wordmark — ported from the canonical mark in + * https://github.com/vercel/chat + * (apps/docs/components/geistcn-fallbacks/geistcn-assets/logos/logo-chat-sdk.tsx). + * + * The mark is a horizontal wordmark (`Chat` + a pill-shaped `SDK` badge), + * so its rendered width is derived from its viewBox aspect ratio rather + * than the `size` prop directly. `size` here means *height* in px. + * + * All paths and strokes use `currentColor` so the logo inherits text color + * and adapts to light/dark themes automatically. + */ +export function LogoChatSdk({ + size = 20, + className, +}: { + size?: number; + className?: string; +}) { + const width = (69 / 22) * size; + return ( + + ); +} diff --git a/docs/components/registry/logos/logo-resend.tsx b/docs/components/registry/logos/logo-resend.tsx new file mode 100644 index 0000000000..6bc60d538d --- /dev/null +++ b/docs/components/registry/logos/logo-resend.tsx @@ -0,0 +1,33 @@ +/** + * Resend brand mark — single-stroke "R" symbol. + * Source: https://resend.com/brand (resend-icon-black.svg). + * + * Recolored to `currentColor` so it inherits text color and adapts to + * light/dark themes automatically. + */ +export function LogoResend({ + size = 20, + className, +}: { + size?: number; + className?: string; +}) { + return ( + + ); +} diff --git a/docs/components/registry/logos/logo-sandbox.tsx b/docs/components/registry/logos/logo-sandbox.tsx new file mode 100644 index 0000000000..0afd822bae --- /dev/null +++ b/docs/components/registry/logos/logo-sandbox.tsx @@ -0,0 +1,36 @@ +/** + * Vercel Sandbox brand mark — isometric cube glyph. + * + * Vercel Sandbox doesn't ship a square brand mark of its own, so this is a + * purpose-built cube icon that reads as "container / sandbox" at a glance + * and pairs with the "Sandbox" title on the card. + * + * Recolored to `currentColor` so it inherits text color and adapts to + * light/dark themes automatically. + */ +export function LogoSandbox({ + size = 20, + className, +}: { + size?: number; + className?: string; +}) { + return ( + + ); +} diff --git a/docs/geistdocs.tsx b/docs/geistdocs.tsx index f9e5dc4680..8d054d0e0f 100644 --- a/docs/geistdocs.tsx +++ b/docs/geistdocs.tsx @@ -20,6 +20,10 @@ export const nav = [ label: 'Worlds', href: '/worlds', }, + { + label: 'Registry', + href: '/registry', + }, { label: 'Examples', href: 'https://github.com/vercel/workflow-examples', diff --git a/docs/lib/registry/manifest.ts b/docs/lib/registry/manifest.ts new file mode 100644 index 0000000000..87ba6a42ba --- /dev/null +++ b/docs/lib/registry/manifest.ts @@ -0,0 +1,338 @@ +import { + aiSdkClientSource, + aiSdkRouteSource, + aiSdkWorkflowSource, +} from './snippets/ai-sdk'; +import { + chatSdkBotSource, + chatSdkHandlersSource, + chatSdkHookTypeSource, + chatSdkWebhookSource, + chatSdkWorkflowSource, +} from './snippets/chat-sdk'; +import { + resendCancelRouteSource, + resendStartRouteSource, + resendUsageSource, + resendWorkflowSource, +} from './snippets/resend'; +import { + sandboxCommandRouteSource, + sandboxStartRouteSource, + sandboxUsageSource, + sandboxWorkflowSource, +} from './snippets/sandbox'; +import type { RegistryItem } from './types'; + +/** + * Public registry of installable Workflow patterns. + * + * The first item is intentionally the simplest end-to-end example — Resend. + * Add new providers below; the listing page picks them up automatically. + */ +export const registryItems: RegistryItem[] = [ + { + id: 'resend', + name: 'Resend', + logo: 'resend', + description: 'Onboarding email drip campaign.', + longDescription: + 'A production-ready email drip campaign powered by Resend. New users get a welcome email immediately, then follow-ups spaced hours, days, or weeks apart — whatever you configure. Each send is a workflow step that gets persisted once it succeeds, so if your server restarts or crashes mid-campaign, no one ever gets a duplicate. The waits between emails cost nothing (the campaign is fully paused, not idling), so it can span days or weeks without keeping anything running. And the moment a user converts, calling a single function from your app stops the whole thing instantly — no leftover emails, no extra database tables, no flag-checking on every send.', + tags: ['email', 'drip', 'cancellable', 'durable'], + category: 'email', + homepage: 'https://resend.com', + docsUrl: 'https://resend.com/docs/send-with-nodejs', + sourceUrl: + 'https://github.com/vercel-labs/workflow_onboarding/tree/main/nextjs_workflow/app/workflows/providers', + shadcnSlug: '@workflow-sdk/resend', + envVars: [ + { + name: 'RESEND_API_KEY', + description: 'API key from your Resend account.', + getKeyUrl: 'https://resend.com/api-keys', + exampleValue: 're_********', + }, + ], + files: [ + { + path: 'app/workflows/providers/resendWorkflow.ts', + description: + 'The durable email drip workflow — `emailSequence()` + `cancelNudges` hook + the three send-email steps.', + }, + { + path: 'app/api/providers/resend/route.ts', + description: + 'POST endpoint that starts a new campaign and pre-cancels any in-flight run for the same email.', + }, + { + path: 'app/api/providers/resend/cancel/route.ts', + description: + 'POST endpoint your app calls when the user converts — resumes the hook so the campaign exits cleanly.', + }, + ], + snippets: [ + { + label: 'Workflow', + lang: 'tsx', + caption: 'app/workflows/providers/resendWorkflow.ts', + code: resendWorkflowSource, + }, + { + label: 'Start route', + lang: 'tsx', + caption: 'app/api/providers/resend/route.ts', + code: resendStartRouteSource, + }, + { + label: 'Cancel route', + lang: 'tsx', + caption: 'app/api/providers/resend/cancel/route.ts', + code: resendCancelRouteSource, + }, + { + label: 'Usage', + lang: 'tsx', + caption: 'Trigger the campaign from your app', + code: resendUsageSource, + }, + ], + }, + { + id: 'ai-sdk', + name: 'AI SDK', + logo: 'ai-sdk', + description: 'Durable multi-turn chat with streaming and tools.', + longDescription: + "A production-ready multi-turn chat agent powered by AI SDK's `streamText`. Each conversation is one workflow run that suspends between turns — zero compute cost while the user is reading — and resumes the moment the next message arrives. The per-turn LLM stream is durable: if your server restarts mid-response, the client reconnects with the same `runId` and picks up exactly where it left off, with the full conversation history intact. Tools are wrapped as workflow steps, so each tool call is recorded once and replayed (not re-executed) on retry. Drop in any AI Gateway model string and it works — switch from Claude to GPT to Gemini without touching the durability layer.", + tags: ['ai', 'chat', 'streaming', 'agents', 'durable'], + category: 'vercel', + homepage: 'https://ai-sdk.dev', + docsUrl: 'https://ai-sdk.dev/docs', + sourceUrl: + 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/integrations/ai-sdk.mdx', + shadcnSlug: '@workflow-sdk/ai-sdk', + envVars: [ + { + name: 'AI_GATEWAY_API_KEY', + description: + 'API key for Vercel AI Gateway. Lets you call any provider (Claude, GPT, Gemini, …) through one credential. Optional when running on Vercel with OIDC.', + getKeyUrl: 'https://vercel.com/dashboard/ai-gateway', + exampleValue: 'vck_********', + }, + ], + files: [ + { + path: 'workflows/support.ts', + description: + 'The durable chat workflow — `supportWorkflow()` + `turnHook` + tool steps. One run = one full conversation.', + }, + { + path: 'app/api/support/route.ts', + description: + 'POST endpoint that handles first-turn `start()` and follow-up `turnHook.resume()`, slicing per-turn streams from the durable log.', + }, + { + path: 'components/support-chat.tsx', + description: + '`useChat()` client component wired up via `WorkflowChatTransport` — forwards `runId` between turns automatically.', + }, + ], + snippets: [ + { + label: 'Workflow', + lang: 'tsx', + caption: 'workflows/support.ts', + code: aiSdkWorkflowSource, + }, + { + label: 'API route', + lang: 'tsx', + caption: 'app/api/support/route.ts', + code: aiSdkRouteSource, + }, + { + label: 'Client', + lang: 'tsx', + caption: 'components/support-chat.tsx', + code: aiSdkClientSource, + }, + ], + }, + { + id: 'sandbox', + name: 'Vercel Sandbox', + logo: 'sandbox', + description: 'Persistent code-execution session beyond the 5-hour cap.', + longDescription: + 'An always-resumable code-execution session built on Vercel Sandbox. One workflow run owns one sandbox for its entire lifetime — full filesystem, network, and runtime — and the client only has to remember a single `runId`. When the user goes idle, the workflow snapshots the VM and hibernates indefinitely at zero cost; when they return, the same filesystem, installed packages, and git history are right there waiting. The pattern also rolls over the sandbox hard cap automatically: a few minutes before the 5-hour deadline it snapshots, spins up a fresh VM from that snapshot, and keeps going — so the logical session can run effectively forever on top of time-bounded infrastructure. Perfect for coding agents, AI dev environments, and any workload where users walk away and come back days later.', + tags: ['sandbox', 'agents', 'sessions', 'durable', 'snapshots'], + category: 'vercel', + homepage: 'https://vercel.com/docs/sandbox', + docsUrl: 'https://vercel.com/docs/sandbox', + sourceUrl: + 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/integrations/sandbox.mdx', + shadcnSlug: '@workflow-sdk/sandbox', + envVars: [ + { + name: 'VERCEL_OIDC_TOKEN', + description: + 'OIDC token used by `@vercel/sandbox` to authenticate. Set automatically when deployed to Vercel; locally, run `vercel env pull` to populate it.', + }, + ], + files: [ + { + path: 'workflows/sandbox-session.ts', + description: + 'The durable session workflow — `sandboxSessionWorkflow()` + `commandHook`, with idle hibernation and proactive sandbox refresh built in.', + }, + { + path: 'app/api/sandbox/start/route.ts', + description: + 'POST endpoint that starts a new session or reconnects to an existing one, replaying the durable event log to a returning client.', + }, + { + path: 'app/api/sandbox/command/route.ts', + description: + 'POST endpoint that resumes the command hook — every shell command the user runs flows through here.', + }, + ], + snippets: [ + { + label: 'Workflow', + lang: 'tsx', + caption: 'workflows/sandbox-session.ts', + code: sandboxWorkflowSource, + }, + { + label: 'Start route', + lang: 'tsx', + caption: 'app/api/sandbox/start/route.ts', + code: sandboxStartRouteSource, + }, + { + label: 'Command route', + lang: 'tsx', + caption: 'app/api/sandbox/command/route.ts', + code: sandboxCommandRouteSource, + }, + { + label: 'Quickstart', + lang: 'tsx', + caption: 'Simpler one-shot pipeline (no session loop)', + code: sandboxUsageSource, + }, + ], + }, + { + id: 'chat-sdk', + name: 'Chat SDK', + logo: 'chat-sdk', + description: 'Durable bot sessions across Slack, Teams, Discord, and more.', + longDescription: + "A durable bot session pattern for Chat SDK. Write the bot once, deploy to Slack, Microsoft Teams, Google Chat, Discord, Telegram, GitHub, Linear, or WhatsApp — and let each conversation thread run as its own workflow. Multi-turn state lives in the durable event log instead of hand-rolled Redis bookkeeping. The bot can sleep for hours waiting on a user reply, schedule a follow-up days later, or pause on a long-running tool call — and survive every deploy and cold start in between. Inbound messages route to either a `start()` (first mention) or `resumeHook()` (every subsequent message), with the `runId` stored in Chat SDK's thread state. Outbound replies are durable steps, so platform side-effects are recorded once and replayed safely on restart.", + tags: ['chat', 'bots', 'slack', 'teams', 'discord', 'durable'], + category: 'vercel', + homepage: 'https://chat-sdk.dev', + docsUrl: 'https://chat-sdk.dev/docs/guides/durable-chat-sessions-nextjs', + sourceUrl: + 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/integrations/chat-sdk.mdx', + shadcnSlug: '@workflow-sdk/chat-sdk', + envVars: [ + { + name: 'SLACK_BOT_TOKEN', + description: + 'Bot token from your Slack app. Used by the Slack adapter to post replies and subscribe to thread events.', + getKeyUrl: 'https://api.slack.com/apps', + exampleValue: 'xoxb-********', + }, + { + name: 'SLACK_SIGNING_SECRET', + description: 'Signing secret used to verify incoming Slack webhooks.', + getKeyUrl: 'https://api.slack.com/apps', + }, + { + name: 'REDIS_URL', + description: + 'Connection string for the Redis instance that backs Chat SDK thread state (`runId` per thread).', + }, + ], + files: [ + { + path: 'lib/bot.ts', + description: + 'The `Chat` singleton — adapters, state backend, and `ThreadState` type that holds the `runId` per thread.', + }, + { + path: 'workflows/durable-chat-session.ts', + description: + 'The durable session workflow — `durableChatSession()` + `chatTurnHook`, with platform side-effects in dynamic-import steps.', + }, + { + path: 'workflows/chat-turn-hook.ts', + description: + 'Stand-alone `ChatTurnPayload` type so the webhook handler can import it without pulling in the workflow module.', + }, + { + path: 'lib/chat-session-handlers.ts', + description: + 'Event handlers — decide whether each inbound message is a `start()` or a `resumeHook()`, with stale-runId fallback.', + }, + { + path: 'app/api/webhooks/[platform]/route.ts', + description: + 'Catch-all webhook route that hands every platform request to the right Chat SDK handler.', + }, + ], + snippets: [ + { + label: 'Bot', + lang: 'tsx', + caption: 'lib/bot.ts', + code: chatSdkBotSource, + }, + { + label: 'Workflow', + lang: 'tsx', + caption: 'workflows/durable-chat-session.ts', + code: chatSdkWorkflowSource, + }, + { + label: 'Hook type', + lang: 'tsx', + caption: 'workflows/chat-turn-hook.ts', + code: chatSdkHookTypeSource, + }, + { + label: 'Handlers', + lang: 'tsx', + caption: 'lib/chat-session-handlers.ts', + code: chatSdkHandlersSource, + }, + { + label: 'Webhook route', + lang: 'tsx', + caption: 'app/api/webhooks/[platform]/route.ts', + code: chatSdkWebhookSource, + }, + ], + }, +]; + +export function getRegistryItem(id: string): RegistryItem | undefined { + return registryItems.find((item) => item.id === id); +} + +export function getRegistryItemIds(): string[] { + return registryItems.map((item) => item.id); +} + +export const categoryLabels: Record = { + vercel: 'Vercel', + email: 'Email', + storage: 'Storage', + ai: 'AI', + auth: 'Auth', + payments: 'Payments', + communication: 'Communication', + other: 'Other', +}; diff --git a/docs/lib/registry/snippets/ai-sdk.ts b/docs/lib/registry/snippets/ai-sdk.ts new file mode 100644 index 0000000000..b2ff8a3cdc --- /dev/null +++ b/docs/lib/registry/snippets/ai-sdk.ts @@ -0,0 +1,268 @@ +/** + * Source snippets for the AI SDK registry entry. + * + * Each export is a raw string of source code that the detail page renders + * with shiki. The canonical reference for these snippets is the AI SDK + * cookbook integration — `content/docs/cookbook/integrations/ai-sdk.mdx`. + * + * The pattern: one workflow run = one full conversation. The workflow + * suspends between turns on a hook and resumes when the next user message + * arrives. `streamText()` runs inside a `"use step"` so the per-turn LLM + * stream is durable and can be sliced by index for follow-up turns. + * + * Note on escaping: template literal placeholders inside the snippet are + * escaped as `\${...}` so they stay literal here. + */ + +export const aiSdkWorkflowSource = `import { streamText, stepCountIs } from "ai"; +import { defineHook, getWritable, getWorkflowMetadata } from "workflow"; +import type { ModelMessage, UIMessageChunk } from "ai"; +import { z } from "zod"; + +const MAX_TURNS = 20; + +// One hook per workflow run drives the multi-turn loop. Each \`.resume()\` +// from the API route delivers the next user message to the workflow. +export const turnHook = defineHook({ + schema: z.object({ message: z.string() }), +}); + +// Tool implementations are durable steps — each call is recorded in the +// event log and replayed (not re-executed) on restart. +async function lookupOrder({ orderId }: { orderId: string }) { + "use step"; + const res = await fetch(\`https://api.store.com/orders/\${orderId}\`); + return res.json(); +} + +async function processRefund({ + orderId, + reason, +}: { orderId: string; reason: string }) { + "use step"; + const res = await fetch("https://api.store.com/refunds", { + method: "POST", + body: JSON.stringify({ orderId, reason }), + }); + return res.json(); +} + +const TOOLS = { + lookupOrder: { + description: "Look up an order by ID", + inputSchema: z.object({ orderId: z.string() }), + execute: lookupOrder, + }, + processRefund: { + description: "Process a refund", + inputSchema: z.object({ orderId: z.string(), reason: z.string() }), + execute: processRefund, + }, +}; + +// Per-turn step — streams one agent response into the durable writable. +async function runTurn(messages: ModelMessage[]) { + "use step"; + + const result = streamText({ + model: "anthropic/claude-haiku-4.5", + system: "You are a customer support agent.", + messages, + tools: TOOLS, + stopWhen: stepCountIs(8), + }); + + const writable = getWritable(); + // \`preventClose: true\` keeps the durable writable open so the next turn + // can write to it. Each turn still emits its own start + finish chunks. + await result.toUIMessageStream().pipeTo(writable, { preventClose: true }); + + const response = await result.response; + return { responseMessages: response.messages }; +} + +export async function supportWorkflow(initialMessages: ModelMessage[]) { + "use workflow"; + + const { workflowRunId } = getWorkflowMetadata(); + // Create the hook ONCE outside the loop. Re-creating it inside with the + // same token throws \`HookConflictError\`. One hook, one token, reused + // every iteration. + const hook = turnHook.create({ token: workflowRunId }); + let allMessages = initialMessages; + + for (let turn = 0; turn < MAX_TURNS; turn++) { + const { responseMessages } = await runTurn(allMessages); + allMessages = [...allMessages, ...responseMessages]; + + // Suspend until the next user message arrives. + const { message } = await hook; + if (message === "/done") break; + + allMessages = [...allMessages, { role: "user", content: message }]; + } + + return { turns: MAX_TURNS }; +} +`; + +export const aiSdkRouteSource = `import type { UIMessage, UIMessageChunk } from "ai"; +import { convertToModelMessages, createUIMessageStreamResponse } from "ai"; +import { start, getRun } from "workflow/api"; +import { supportWorkflow, turnHook } from "@/workflows/support"; + +// Pump the durable stream until this turn's \`finish\` chunk, then close +// the HTTP response. Release (don't cancel) the source reader so the +// workflow's durable stream keeps flowing for the next turn. +function sliceUntilFinish( + source: ReadableStream +): ReadableStream { + return new ReadableStream({ + async start(controller) { + const reader = source.getReader(); + try { + while (true) { + const { done, value } = await reader.read(); + if (done) break; + controller.enqueue(value); + if (value.type === "finish") break; + } + controller.close(); + } catch (e) { + controller.error(e); + } finally { + reader.releaseLock(); + } + }, + }); +} + +// \`/done\` exits the workflow without emitting chunks. Return a synthetic +// start+finish so \`useChat\`'s lifecycle terminates cleanly. +function emptyTurnStream(): ReadableStream { + return new ReadableStream({ + start(controller) { + controller.enqueue({ type: "start", messageId: crypto.randomUUID() }); + controller.enqueue({ type: "finish" }); + controller.close(); + }, + }); +} + +export async function POST(req: Request) { + const { messages, runId }: { messages: UIMessage[]; runId?: string } = + await req.json(); + const modelMessages = await convertToModelMessages(messages); + + // Follow-up turn: resume the hook, return only the new chunks. + if (runId) { + try { + const run = getRun(runId); + + // Snapshot tail BEFORE resuming so the slice only contains this turn. + const probe = run.getReadable(); + const tailIndex = await probe.getTailIndex(); + await probe.cancel(); + + const lastUser = modelMessages.filter((m) => m.role === "user").at(-1); + const text = + typeof lastUser?.content === "string" + ? lastUser.content + : Array.isArray(lastUser?.content) + ? lastUser.content + .filter((p): p is { type: "text"; text: string } => + "type" in p && p.type === "text" + ) + .map((p) => p.text) + .join("") + : ""; + + await turnHook.resume(runId, { message: text }); + + if (text === "/done") { + return createUIMessageStreamResponse({ + stream: emptyTurnStream(), + headers: { "x-workflow-run-id": runId }, + }); + } + + const stream = sliceUntilFinish( + run.getReadable({ startIndex: tailIndex + 1 }) + ); + + return createUIMessageStreamResponse({ + stream, + headers: { "x-workflow-run-id": runId }, + }); + } catch (e: unknown) { + const msg = e instanceof Error ? e.message.toLowerCase() : ""; + if (!msg.includes("not found") && !msg.includes("expired")) throw e; + // Stale runId — fall through to start fresh. + } + } + + // First turn: start a new workflow. + const run = await start(supportWorkflow, [modelMessages]); + const stream = sliceUntilFinish(run.readable); + + return createUIMessageStreamResponse({ + stream, + headers: { "x-workflow-run-id": run.runId }, + }); +} +`; + +export const aiSdkClientSource = `"use client"; + +import { useChat } from "@ai-sdk/react"; +import { WorkflowChatTransport } from "@workflow/ai"; +import { useMemo, useRef, useState } from "react"; + +// Stash the runId in a ref and forward it on every follow-up. +// \`WorkflowChatTransport\` handles the wiring for you. +export function SupportChat() { + const [input, setInput] = useState(""); + const runIdRef = useRef(null); + + const transport = useMemo( + () => + new WorkflowChatTransport({ + api: "/api/support", + prepareSendMessagesRequest: ({ messages, body }) => ({ + body: { ...body, messages, runId: runIdRef.current }, + }), + onChatSendMessage: (response) => { + const id = response.headers.get("x-workflow-run-id"); + if (id) runIdRef.current = id; + }, + }), + [] + ); + + const { messages, sendMessage, status } = useChat({ transport }); + const busy = status === "streaming" || status === "submitted"; + + return ( +
{ + e.preventDefault(); + if (busy || !input.trim()) return; + sendMessage({ text: input }); + setInput(""); + }} + > + {messages.map((m) => ( +
+ {m.role}:{" "} + {m.parts.map((p) => (p.type === "text" ? p.text : "")).join("")} +
+ ))} + setInput(e.target.value)} + disabled={busy} + /> +
+ ); +} +`; diff --git a/docs/lib/registry/snippets/chat-sdk.ts b/docs/lib/registry/snippets/chat-sdk.ts new file mode 100644 index 0000000000..b2ee114549 --- /dev/null +++ b/docs/lib/registry/snippets/chat-sdk.ts @@ -0,0 +1,202 @@ +/** + * Source snippets for the Chat SDK registry entry. + * + * Each export is a raw string of source code that the detail page renders + * with shiki. The canonical reference for these snippets is the Chat SDK + * cookbook integration — `content/docs/cookbook/integrations/chat-sdk.mdx`. + * + * The pattern: one conversation thread = one durable workflow run. Chat + * SDK's thread state holds the `runId`, so inbound messages route to a + * `start()` (first message) or `resumeHook()` (every subsequent message). + * Outbound chat side-effects (`thread.post`, `thread.subscribe`, …) live + * inside `"use step"` functions that dynamically import the bot — keeps + * adapter packages out of the workflow sandbox. + * + * Note on escaping: template literal placeholders inside the snippet are + * escaped as `\${...}` so they stay literal here. + */ + +export const chatSdkBotSource = `import { Chat } from "chat"; +import { createSlackAdapter } from "@chat-adapter/slack"; +import { createRedisState } from "@chat-adapter/state-redis"; + +const adapters = { + slack: createSlackAdapter(), +}; + +// The thread-level state stored by Chat SDK. \`runId\` ties a conversation +// thread to its durable workflow session. +export interface ThreadState { + runId?: string; +} + +// \`registerSingleton()\` is required: Chat SDK re-hydrates \`Thread\` objects +// inside step functions and needs a registered singleton to resolve +// adapters and state for those rehydrated instances. +export const bot = new Chat({ + userName: "durable-bot", + adapters, + state: createRedisState(), + dedupeTtlMs: 600_000, +}).registerSingleton(); +`; + +export const chatSdkWorkflowSource = `import { Message, reviver, type Thread } from "chat"; +import { defineHook, getWorkflowMetadata } from "workflow"; +import type { ThreadState } from "@/lib/bot"; + +// Hook payload type lives in its own file so the webhook side can import +// it without pulling in the workflow module. +import type { ChatTurnPayload } from "@/workflows/chat-turn-hook"; + +const chatTurnHook = defineHook(); + +// Posting back to the platform is a step — adapter packages use Node-only +// modules that aren't available in the workflow sandbox, so we import the +// bot dynamically from inside the step body. +async function postAssistantMessage( + thread: Thread, + text: string +) { + "use step"; + const { bot } = await import("@/lib/bot"); + await bot.initialize(); + await thread.post(text); +} + +async function runTurn(text: string) { + "use step"; + // Your AI SDK call, database lookup, tool loop, etc. + return \`You said: \${text}\`; +} + +async function handleMessage( + thread: Thread, + message: Message +) { + const text = message.text.trim(); + if (text.toLowerCase() === "done") return false; + + const reply = await runTurn(text); + await postAssistantMessage(thread, reply); + return true; +} + +export async function durableChatSession(payload: string) { + "use workflow"; + + const { workflowRunId } = getWorkflowMetadata(); + + // The handler serializes \`thread\` + \`message\` with \`toJSON()\`; we revive + // them here using Chat SDK's standalone \`reviver\`. + const { thread, message } = JSON.parse(payload, reviver) as { + thread: Thread; + message: Message; + }; + + const hook = chatTurnHook.create({ token: workflowRunId }); + + await postAssistantMessage( + thread, + "Session started. Reply here; send \\\`done\\\` to stop." + ); + + if (!(await handleMessage(thread, message))) return; + + // One hook resumption = one turn. The workflow stays suspended between + // messages — zero compute cost while idle. + while (true) { + const { message: nextRaw } = await hook; + const next = Message.fromJSON(nextRaw); + if (!(await handleMessage(thread, next))) return; + } +} +`; + +export const chatSdkHookTypeSource = `import type { SerializedMessage } from "chat"; + +// Importing this from the handler module keeps adapter dependencies out +// of the workflow's import graph. +export type ChatTurnPayload = { + message: SerializedMessage; +}; +`; + +export const chatSdkHandlersSource = `import type { Message, Thread } from "chat"; +import { getRun, resumeHook, start } from "workflow/api"; +import { bot, type ThreadState } from "@/lib/bot"; +import { durableChatSession } from "@/workflows/durable-chat-session"; +import type { ChatTurnPayload } from "@/workflows/chat-turn-hook"; + +async function startSession( + thread: Thread, + message: Message +) { + const run = await start(durableChatSession, [ + JSON.stringify({ + thread: thread.toJSON(), + message: message.toJSON(), + }), + ]); + await thread.setState({ runId: run.runId }); +} + +async function routeTurn( + thread: Thread, + message: Message +) { + const state = await thread.state; + + // No run yet, or the previous run finished — start fresh. + if (!state?.runId || !(await getRun(state.runId).exists)) { + await startSession(thread, message); + return; + } + + try { + await resumeHook(state.runId, { + message: message.toJSON(), + }); + } catch (err) { + const msg = err instanceof Error ? err.message.toLowerCase() : ""; + if (msg.includes("not found") || msg.includes("expired")) { + // Stale runId — start a new session rather than dropping the message. + await startSession(thread, message); + return; + } + throw err; + } +} + +bot.onNewMention(async (thread, message) => { + await thread.subscribe(); + await routeTurn(thread, message); +}); + +bot.onSubscribedMessage(async (thread, message) => { + await routeTurn(thread, message); +}); +`; + +export const chatSdkWebhookSource = `import "@/lib/chat-session-handlers"; +import { after } from "next/server"; +import { bot } from "@/lib/bot"; + +type Platform = keyof typeof bot.webhooks; + +// Catch-all webhook route. Importing \`chat-session-handlers\` for its +// side-effects registers the event handlers before the first webhook +// arrives. +export async function POST( + req: Request, + ctx: RouteContext<"/api/webhooks/[platform]"> +) { + const { platform } = await ctx.params; + const handler = bot.webhooks[platform as Platform]; + if (!handler) { + return new Response(\`Unknown platform: \${platform}\`, { status: 404 }); + } + + return handler(req, { waitUntil: (task) => after(() => task) }); +} +`; diff --git a/docs/lib/registry/snippets/resend.ts b/docs/lib/registry/snippets/resend.ts new file mode 100644 index 0000000000..5b2346eb09 --- /dev/null +++ b/docs/lib/registry/snippets/resend.ts @@ -0,0 +1,202 @@ +/** + * Source snippets for the Resend registry entry. + * + * Each export is a raw string of source code that the detail page renders + * with shiki. Keep these in sync with the canonical files in the + * shadcn-installable registry (the Workflow + Resend example). + * + * Note on escaping: template literal placeholders inside the snippet (e.g. + * `${input.email}`) are escaped as `\${...}` so they stay literal here. + */ + +export const resendWorkflowSource = `import { defineHook, sleep } from "workflow"; +import type { StringValue } from "ms"; +import { Resend } from "resend"; + +export interface UserSignup { + email: string; + name: string; +} + +// Cancellation hook — opened once per run, exported so the cancel API +// route can call \`.resume()\` on it. +export const cancelNudges = defineHook<{ reason?: string }>(); + +export async function emailSequence( + input: UserSignup, + // Defaults to "2d". Pass a short duration like "5s" to demo end-to-end + // without waiting days. Accepts any duration string \`sleep()\` supports. + interval: StringValue = "2d" +) { + "use workflow"; + + // 1. Persist the signup (your DB / CRM call lives in storeUser). + await storeUser(input); + + // 2. Send the welcome email immediately. + await sendWelcomeEmail(input); + + // 3. Open the cancel-nudges hook ONCE for the whole campaign. We race it + // against each sleep; if \`.resume()\` is called on this token, every + // future \`await hook\` resolves immediately. + // + // AGENT — don't create a second hook with the same token in the same + // run. The first one is still pending (it lost a race) and the runtime + // will throw \`HookConflictError\`. Reuse this single hook across every + // cancellable sleep below. + const cancelHook = cancelNudges.create({ + token: \`cancel-nudges:\${input.email}\`, + }); + + // 4. Wait \`interval\` OR exit early if the user takes the desired action. + if ( + await Promise.race([ + sleep(interval).then(() => false), + cancelHook.then(() => true), + ]) + ) { + return { status: "cancelled" as const, email: input.email }; + } + + // 5. First nudge. + await sendFirstNudge(input); + + // 6. Wait again — same hook, raced against a fresh sleep. + if ( + await Promise.race([ + sleep(interval).then(() => false), + cancelHook.then(() => true), + ]) + ) { + return { status: "cancelled" as const, email: input.email }; + } + + // 7. Second (and final) nudge. + await sendSecondNudge(input); + + return { status: "drip-complete" as const, email: input.email }; +} + +async function storeUser(user: UserSignup) { + "use step"; + // Replace with your DB / CRM call: + // await db.insert(users).values({ email: user.email, name: user.name }); + console.log(\`Stored signup for \${user.email}\`); +} + +async function sendWelcomeEmail(user: UserSignup) { + "use step"; + const resend = new Resend(process.env.RESEND_API_KEY); + await resend.emails.send({ + from: "onboarding@resend.dev", + to: user.email, + subject: \`Welcome, \${user.name}!\`, + html: \`

Hey \${user.name},

+

Thanks for signing up! We're excited to have you on board.

\`, + }); +} + +async function sendFirstNudge(user: UserSignup) { + "use step"; + const resend = new Resend(process.env.RESEND_API_KEY); + await resend.emails.send({ + from: "onboarding@resend.dev", + to: user.email, + subject: \`\${user.name}, check out what you can build\`, + html: \`

Hey \${user.name},

+

Now that you're set up, here are a few things to try…

\`, + }); +} + +async function sendSecondNudge(user: UserSignup) { + "use step"; + const resend = new Resend(process.env.RESEND_API_KEY); + await resend.emails.send({ + from: "onboarding@resend.dev", + to: user.email, + subject: \`\${user.name}, you're missing out\`, + html: \`

Hey \${user.name},

+

Need help getting started? Just reply to this email.

\`, + }); +} +`; + +export const resendStartRouteSource = `import { start } from "workflow/api"; +import type { StringValue } from "ms"; +import { NextResponse } from "next/server"; +import { + cancelNudges, + emailSequence, +} from "@/app/workflows/providers/resendWorkflow"; + +export async function POST(req: Request) { + const { name, email, interval } = (await req.json()) as { + name?: string; + email?: string; + interval?: StringValue; + }; + + if (!email) { + return NextResponse.json({ error: "email is required" }, { status: 400 }); + } + + // If a previous campaign is still alive for this email it is holding the + // hook token — fire its hook so it exits cleanly before we start a new run. + try { + await cancelNudges.resume(\`cancel-nudges:\${email}\`, { + reason: "Restarted by new signup", + }); + } catch { + // No active hook — nothing to cancel. + } + + const run = await start(emailSequence, [ + { name: name ?? "there", email }, + interval ?? "2d", + ]); + + return NextResponse.json({ runId: run.runId, email }); +} +`; + +export const resendCancelRouteSource = `import { NextResponse } from "next/server"; +import { cancelNudges } from "@/app/workflows/providers/resendWorkflow"; + +export async function POST(req: Request) { + const { email, reason } = await req.json(); + + if (!email) { + return NextResponse.json({ error: "email is required" }, { status: 400 }); + } + + try { + await cancelNudges.resume(\`cancel-nudges:\${email}\`, { + reason: reason ?? "User completed action", + }); + } catch (error) { + const msg = error instanceof Error ? error.message.toLowerCase() : ""; + if (msg.includes("not found") || msg.includes("expired")) { + return NextResponse.json({ + success: true, + email, + note: "No active nudge sequence (already completed or cancelled)", + }); + } + throw error; + } + + return NextResponse.json({ success: true, email }); +} +`; + +export const resendUsageSource = `import { start } from "workflow/api"; +import { emailSequence } from "@/app/workflows/providers/resendWorkflow"; + +// Anywhere in your app — e.g. a /signup API route — kick off the campaign: +const run = await start(emailSequence, [ + { name: "Jane", email: "jane@example.com" }, + "2d", // interval between nudges; pass "5s" to demo end-to-end +]); + +console.log("Drip started:", run.runId); +`; diff --git a/docs/lib/registry/snippets/sandbox.ts b/docs/lib/registry/snippets/sandbox.ts new file mode 100644 index 0000000000..40240e1f94 --- /dev/null +++ b/docs/lib/registry/snippets/sandbox.ts @@ -0,0 +1,365 @@ +/** + * Source snippets for the Vercel Sandbox registry entry. + * + * Each export is a raw string of source code that the detail page renders + * with shiki. The canonical reference for these snippets is the Sandbox + * cookbook integration — `content/docs/cookbook/integrations/sandbox.mdx`. + * + * The pattern: one workflow run = one persistent sandbox session. The + * workflow races a command hook against `sleep()` timers — when idle, it + * snapshots and hibernates indefinitely; near the sandbox hard cap, it + * snapshots and immediately recreates so the logical session outlives any + * one VM. Exit is via an explicit `/destroy` command. + * + * Note on escaping: template literal placeholders inside the snippet are + * escaped as `\${...}` so they stay literal here. + */ + +export const sandboxWorkflowSource = `import { defineHook, sleep, getWritable, getWorkflowMetadata } from "workflow"; +import { Sandbox, type Snapshot } from "@vercel/sandbox"; +import { z } from "zod"; + +export const commandHook = defineHook({ + schema: z.object({ command: z.string() }), +}); + +const RUNTIME = "node22"; +const HIBERNATE_AFTER_MS = 30 * 60_000; // 30 min idle → hibernate +const SANDBOX_TIMEOUT_MS = 5 * 60 * 60_000; // sandbox hard cap (5h) +const REFRESH_SAFETY_MS = 5 * 60_000; // refresh 5 min before the cap + +export type SandboxEvent = + | { + type: "created"; + sandboxId: string; + runtime: string; + startedAt: number; + sandboxExpiresAt: number; + hibernateAfterMs: number; + } + | { + type: "status"; + state: + | "active" + | "hibernating" + | "hibernated" + | "resuming" + | "refreshing" + | "destroyed"; + at: number; + sandboxId?: string; + sandboxExpiresAt?: number; + snapshotId?: string; + } + | { type: "activity"; at: number } + | { type: "command_start"; id: string; command: string; at: number } + | { type: "command_output"; id: string; stream: "stdout" | "stderr"; data: string } + | { type: "command_end"; id: string; exitCode: number | null; durationMs: number } + | { type: "result"; status: "destroyed"; durationMs: number }; + +async function emit(event: SandboxEvent) { + "use step"; + const writer = getWritable().getWriter(); + try { + await writer.write(event); + } finally { + writer.releaseLock(); + } +} + +async function runCommandAndStream( + sandbox: Sandbox, + id: string, + command: string +) { + "use step"; + const writer = getWritable().getWriter(); + const startedAt = Date.now(); + try { + await writer.write({ type: "command_start", id, command, at: startedAt }); + const result = await sandbox.runCommand({ + cmd: "bash", + args: ["-c", command], + }); + const stdout = await result.stdout(); + if (stdout) { + await writer.write({ type: "command_output", id, stream: "stdout", data: stdout }); + } + const stderr = await result.stderr(); + if (stderr) { + await writer.write({ type: "command_output", id, stream: "stderr", data: stderr }); + } + await writer.write({ + type: "command_end", + id, + exitCode: result.exitCode, + durationMs: Date.now() - startedAt, + }); + } finally { + writer.releaseLock(); + } +} + +export async function sandboxSessionWorkflow() { + "use workflow"; + + const { workflowRunId } = getWorkflowMetadata(); + // Create the hook ONCE outside the loop. Re-creating it inside with the + // same token throws \`HookConflictError\`. One hook, one token, reused + // every iteration. + const hook = commandHook.create({ token: workflowRunId }); + + const startedAt = Date.now(); + + let sandbox: Sandbox = await Sandbox.create({ + runtime: RUNTIME, + timeout: SANDBOX_TIMEOUT_MS, + }); + let sandboxCreatedAt = Date.now(); + let sandboxExpiresAt = sandboxCreatedAt + SANDBOX_TIMEOUT_MS; + + await emit({ + type: "created", + sandboxId: sandbox.sandboxId, + runtime: RUNTIME, + startedAt, + sandboxExpiresAt, + hibernateAfterMs: HIBERNATE_AFTER_MS, + }); + await emit({ + type: "status", + state: "active", + at: Date.now(), + sandboxId: sandbox.sandboxId, + sandboxExpiresAt, + }); + + let snapshot: Snapshot | null = null; + let hibernated = false; + let lastActivityAt = startedAt; + let counter = 0; + let destroyed = false; + + try { + while (!destroyed) { + if (hibernated && snapshot) { + // VM already stopped. Wait for the next command — no idle timer, + // no compute cost. + const payload = await hook; + if (payload.command === "/destroy") { + destroyed = true; + break; + } + + await emit({ type: "status", state: "resuming", at: Date.now() }); + sandbox = await Sandbox.create({ + source: { type: "snapshot", snapshotId: snapshot.snapshotId }, + timeout: SANDBOX_TIMEOUT_MS, + }); + sandboxCreatedAt = Date.now(); + sandboxExpiresAt = sandboxCreatedAt + SANDBOX_TIMEOUT_MS; + hibernated = false; + snapshot = null; + await emit({ + type: "status", + state: "active", + at: Date.now(), + sandboxId: sandbox.sandboxId, + sandboxExpiresAt, + }); + + counter += 1; + await runCommandAndStream(sandbox, \`cmd-\${counter}\`, payload.command); + lastActivityAt = Date.now(); + await emit({ type: "activity", at: lastActivityAt }); + continue; + } + + // Active — wake at whichever comes first: idle deadline or refresh. + const idleDeadline = lastActivityAt + HIBERNATE_AFTER_MS; + const refreshDeadline = sandboxExpiresAt - REFRESH_SAFETY_MS; + const wakeAt = Math.min(idleDeadline, refreshDeadline); + const sleepMs = Math.max(0, wakeAt - Date.now()); + + const outcome = await Promise.race([ + hook.then((p) => ({ type: "command" as const, command: p.command })), + sleep(\`\${sleepMs}ms\`).then(() => ({ type: "timer" as const })), + ]); + + if (outcome.type === "timer") { + const nearExpiry = Date.now() >= refreshDeadline; + + if (nearExpiry) { + // Proactive refresh — snapshot + immediately recreate so the + // session outlives the sandbox hard cap. + await emit({ type: "status", state: "refreshing", at: Date.now() }); + const snap = await sandbox.snapshot(); + sandbox = await Sandbox.create({ + source: { type: "snapshot", snapshotId: snap.snapshotId }, + timeout: SANDBOX_TIMEOUT_MS, + }); + sandboxCreatedAt = Date.now(); + sandboxExpiresAt = sandboxCreatedAt + SANDBOX_TIMEOUT_MS; + await emit({ + type: "status", + state: "active", + at: Date.now(), + sandboxId: sandbox.sandboxId, + sandboxExpiresAt, + snapshotId: snap.snapshotId, + }); + lastActivityAt = Date.now(); + } else { + // Idle — snapshot and hibernate indefinitely. + await emit({ type: "status", state: "hibernating", at: Date.now() }); + snapshot = await sandbox.snapshot(); + hibernated = true; + await emit({ + type: "status", + state: "hibernated", + at: Date.now(), + snapshotId: snapshot.snapshotId, + }); + } + continue; + } + + if (outcome.command === "/destroy") { + destroyed = true; + break; + } + + counter += 1; + await runCommandAndStream(sandbox, \`cmd-\${counter}\`, outcome.command); + lastActivityAt = Date.now(); + await emit({ type: "activity", at: lastActivityAt }); + } + } finally { + if (!hibernated) { + try { + if (sandbox.status === "running") await sandbox.stop(); + } catch { + /* best-effort */ + } + } + await emit({ type: "status", state: "destroyed", at: Date.now() }); + await emit({ + type: "result", + status: "destroyed", + durationMs: Date.now() - startedAt, + }); + } +} +`; + +export const sandboxStartRouteSource = `import { start, getRun } from "workflow/api"; +import { sandboxSessionWorkflow } from "@/workflows/sandbox-session"; + +export async function POST(req: Request) { + let body: { runId?: string } = {}; + try { + const text = await req.text(); + if (text) body = JSON.parse(text); + } catch { + /* ignore malformed body */ + } + + // Reconnect path: if the client sends a known runId, replay the durable + // event log from index 0 so the UI fully rehydrates. + if (body.runId) { + const run = getRun(body.runId); + if (await run.exists) { + const readable = run.getReadable({ startIndex: 0 }); + return new Response(readable.pipeThrough(ndjson()), { + headers: { + "Content-Type": "application/x-ndjson", + "x-workflow-run-id": body.runId, + "x-workflow-reconnected": "true", + "Cache-Control": "no-cache, no-transform", + }, + }); + } + // Stale runId — fall through to start fresh. + } + + const run = await start(sandboxSessionWorkflow, []); + return new Response(run.readable.pipeThrough(ndjson()), { + headers: { + "Content-Type": "application/x-ndjson", + "x-workflow-run-id": run.runId, + "Cache-Control": "no-cache, no-transform", + }, + }); +} + +function ndjson() { + return new TransformStream({ + transform(chunk, controller) { + controller.enqueue(JSON.stringify(chunk) + "\\n"); + }, + }); +} +`; + +export const sandboxCommandRouteSource = `import { commandHook } from "@/workflows/sandbox-session"; + +export async function POST(req: Request) { + const { runId, command } = (await req.json()) as { + runId?: string; + command?: string; + }; + + if (!runId || typeof command !== "string") { + return Response.json( + { error: "runId and command are required" }, + { status: 400 } + ); + } + + try { + await commandHook.resume(runId, { command }); + return Response.json({ ok: true }); + } catch (error) { + const msg = error instanceof Error ? error.message.toLowerCase() : ""; + if (msg.includes("not found") || msg.includes("expired")) { + return Response.json( + { ok: false, note: "session expired" }, + { status: 410 } + ); + } + throw error; + } +} +`; + +export const sandboxUsageSource = `// Quickstart — one-shot pipeline. +// Each \`Sandbox\` method (\`create\`, \`runCommand\`, \`stop\`, \`snapshot\`) is an +// implicit step, so the event log records every command and the workflow +// replays from the last completed call on restart. +import { Sandbox } from "@vercel/sandbox"; + +export async function sandboxPipeline(input: { commands: string[] }) { + "use workflow"; + + const sandbox = await Sandbox.create({ runtime: "node22" }); + + try { + const results = []; + for (const command of input.commands) { + const result = await sandbox.runCommand({ + cmd: "bash", + args: ["-c", command], + }); + results.push({ + command, + exitCode: result.exitCode, + stdout: await result.stdout(), + stderr: await result.stderr(), + }); + } + return { status: "completed", results }; + } finally { + await sandbox.stop(); + } +} +`; diff --git a/docs/lib/registry/types.ts b/docs/lib/registry/types.ts new file mode 100644 index 0000000000..6d56bcb330 --- /dev/null +++ b/docs/lib/registry/types.ts @@ -0,0 +1,101 @@ +/** + * Registry — installable Workflow patterns powered by `shadcn add`. + * + * Each `RegistryItem` is a recipe (workflow + API routes + UI) you can drop + * into your app via the shadcn CLI. The data here drives both the listing + * page (`/registry`) and the per-item detail page (`/registry/[id]`). + * + * To add a new provider: + * 1. Append a new `RegistryItem` to `manifest.ts`. + * 2. Author the source snippets and reference them via `snippets`. + * 3. Submit the corresponding registry JSON to the shadcn registry index + * (https://ui.shadcn.com/docs/registry/registry-index) so the + * `installCommand` actually resolves. + */ + +export type RegistryCategory = + | 'vercel' + | 'email' + | 'storage' + | 'ai' + | 'auth' + | 'payments' + | 'communication' + | 'other'; + +export interface RegistryEnvVar { + /** Variable name as it appears in `.env`. */ + name: string; + /** One-line description of what the variable is. */ + description: string; + /** URL the user can visit to obtain a key. */ + getKeyUrl?: string; + /** Optional human-readable example value, e.g. `re_********`. */ + exampleValue?: string; +} + +export interface RegistryFile { + /** Path the file lands at after install, relative to the project root. */ + path: string; + /** Short blurb shown next to the path on the detail page. */ + description: string; +} + +export interface RegistrySnippet { + /** Tab label shown above the code block. */ + label: string; + /** Shiki language identifier (`tsx`, `ts`, `bash`, …). */ + lang: string; + /** Raw source code — rendered via shiki on the server. */ + code: string; + /** Optional caption rendered above the snippet. */ + caption?: string; +} + +/** + * Identifier for a provider brand mark. The Card / Detail hero look this up + * in `components/registry/logos` to render the actual SVG. Adding a new + * provider: + * 1. Drop a `logo-.tsx` SVG component in `components/registry/logos` + * that paints with `currentColor`. + * 2. Register it in `components/registry/logos/index.ts`. + * 3. Reference its key here. + */ +export type RegistryLogoId = 'resend' | 'ai-sdk' | 'sandbox' | 'chat-sdk'; + +export interface RegistryItem { + /** Slug used in the URL — `/registry/${id}`. */ + id: string; + /** Display name. */ + name: string; + /** Provider brand-mark identifier; rendered on the card + detail hero. */ + logo?: RegistryLogoId; + /** Short blurb (≤ 160 chars). Shown on the listing card and detail hero. */ + description: string; + /** Long-form description rendered as a paragraph on the detail page. */ + longDescription?: string; + /** Searchable tags rendered as small badges. */ + tags: string[]; + /** Primary category — used to group items on the listing page. */ + category: RegistryCategory; + /** Provider homepage / product page. */ + homepage: string; + /** Provider docs entry-point linked from the detail hero. */ + docsUrl?: string; + /** Public GitHub source URL for the snippet, when it lives in a public repo. */ + sourceUrl?: string; + /** + * shadcn registry slug — the exact argument you'd pass to + * `pnpm dlx shadcn@latest add ${shadcnSlug}`. Use the JSON URL or the + * registered short-name. While the registry PR is in flight this can be a + * placeholder; the install command on the detail page will reflect it + * verbatim. + */ + shadcnSlug: string; + /** Required environment variables. */ + envVars?: RegistryEnvVar[]; + /** Files that get added to the user's project on install. */ + files: RegistryFile[]; + /** Code snippets shown on the detail page (workflow source, usage, etc.). */ + snippets: RegistrySnippet[]; +} From a95f4f4774df14247b295847c2709900b48df63b Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman Date: Wed, 29 Apr 2026 18:05:37 -0700 Subject: [PATCH 02/21] docs(registry): add 14 patterns + multi-category support Adds 14 installable patterns to the registry, expanding it from the initial 4 providers/integrations into a near-complete replacement for the cookbook's pattern catalogue: - Agents: Durable Agent, Agent Cancellation, Human In The Loop - Common: Sequential & Parallel, Workflow Composition, Saga, Batching, Rate Limiting, Scheduling, Timeouts, Idempotency, Webhooks - Advanced: Child Workflows, Distributed Abort Controller Each pattern ships its own logo, snippet bundle, and manifest entry. Schema and UI changes: - `RegistryItem.category` becomes `categories: RegistryCategory[]` so items can live in more than one bucket. AI SDK, Chat SDK, and Vercel Sandbox now appear under both Agents and Vercel filters. - `RegistryCard` renders one badge per category; `RegistryGrid` filter counts and matches use `includes()` against the array. - Manifest reordered into Agents -> Vercel -> Common -> Advanced -> Providers, alphabetised within each group; the chip filter row picks up the same order automatically. Polish: - New / refined logos for webhooks (canonical webhook triangle), idempotency (refresh loop around equals sign), durable-agent (bot glyph), and the 11 other patterns. - Detail-page code tabs no longer collide on small viewports (`RegistryCodeTabs` gets `gap-1` + `flex-none` per trigger). Made-with: Cursor --- docs/components/registry/RegistryCard.tsx | 7 +- docs/components/registry/RegistryCodeTabs.tsx | 8 +- .../registry/RegistryDetailHero.tsx | 2 +- docs/components/registry/RegistryGrid.tsx | 12 +- docs/components/registry/logos/index.tsx | 28 + .../logos/logo-agent-cancellation.tsx | 38 + .../registry/logos/logo-batching.tsx | 55 ++ .../registry/logos/logo-child-workflows.tsx | 39 + .../logo-distributed-abort-controller.tsx | 42 + .../registry/logos/logo-durable-agent.tsx | 39 + .../registry/logos/logo-human-in-the-loop.tsx | 35 + .../registry/logos/logo-idempotency.tsx | 37 + .../registry/logos/logo-rate-limiting.tsx | 35 + docs/components/registry/logos/logo-saga.tsx | 36 + .../registry/logos/logo-scheduling.tsx | 34 + .../logos/logo-sequential-and-parallel.tsx | 40 + .../registry/logos/logo-timeouts.tsx | 37 + .../registry/logos/logo-webhooks.tsx | 39 + .../logos/logo-workflow-composition.tsx | 44 + docs/lib/registry/manifest.ts | 918 ++++++++++++++++-- .../registry/snippets/agent-cancellation.ts | 237 +++++ docs/lib/registry/snippets/batching.ts | 100 ++ docs/lib/registry/snippets/child-workflows.ts | 156 +++ .../snippets/distributed-abort-controller.ts | 266 +++++ docs/lib/registry/snippets/durable-agent.ts | 191 ++++ .../registry/snippets/human-in-the-loop.ts | 318 ++++++ docs/lib/registry/snippets/idempotency.ts | 86 ++ docs/lib/registry/snippets/rate-limiting.ts | 82 ++ docs/lib/registry/snippets/saga.ts | 147 +++ docs/lib/registry/snippets/scheduling.ts | 109 +++ .../snippets/sequential-and-parallel.ts | 108 +++ docs/lib/registry/snippets/timeouts.ts | 102 ++ docs/lib/registry/snippets/webhooks.ts | 125 +++ .../registry/snippets/workflow-composition.ts | 111 +++ docs/lib/registry/types.ts | 34 +- 35 files changed, 3617 insertions(+), 80 deletions(-) create mode 100644 docs/components/registry/logos/logo-agent-cancellation.tsx create mode 100644 docs/components/registry/logos/logo-batching.tsx create mode 100644 docs/components/registry/logos/logo-child-workflows.tsx create mode 100644 docs/components/registry/logos/logo-distributed-abort-controller.tsx create mode 100644 docs/components/registry/logos/logo-durable-agent.tsx create mode 100644 docs/components/registry/logos/logo-human-in-the-loop.tsx create mode 100644 docs/components/registry/logos/logo-idempotency.tsx create mode 100644 docs/components/registry/logos/logo-rate-limiting.tsx create mode 100644 docs/components/registry/logos/logo-saga.tsx create mode 100644 docs/components/registry/logos/logo-scheduling.tsx create mode 100644 docs/components/registry/logos/logo-sequential-and-parallel.tsx create mode 100644 docs/components/registry/logos/logo-timeouts.tsx create mode 100644 docs/components/registry/logos/logo-webhooks.tsx create mode 100644 docs/components/registry/logos/logo-workflow-composition.tsx create mode 100644 docs/lib/registry/snippets/agent-cancellation.ts create mode 100644 docs/lib/registry/snippets/batching.ts create mode 100644 docs/lib/registry/snippets/child-workflows.ts create mode 100644 docs/lib/registry/snippets/distributed-abort-controller.ts create mode 100644 docs/lib/registry/snippets/durable-agent.ts create mode 100644 docs/lib/registry/snippets/human-in-the-loop.ts create mode 100644 docs/lib/registry/snippets/idempotency.ts create mode 100644 docs/lib/registry/snippets/rate-limiting.ts create mode 100644 docs/lib/registry/snippets/saga.ts create mode 100644 docs/lib/registry/snippets/scheduling.ts create mode 100644 docs/lib/registry/snippets/sequential-and-parallel.ts create mode 100644 docs/lib/registry/snippets/timeouts.ts create mode 100644 docs/lib/registry/snippets/webhooks.ts create mode 100644 docs/lib/registry/snippets/workflow-composition.ts diff --git a/docs/components/registry/RegistryCard.tsx b/docs/components/registry/RegistryCard.tsx index 2f4dfc5dee..5218cd2185 100644 --- a/docs/components/registry/RegistryCard.tsx +++ b/docs/components/registry/RegistryCard.tsx @@ -7,6 +7,7 @@ import { CardHeader, CardTitle, } from '@/components/ui/card'; +import { categoryLabels } from '@/lib/registry/manifest'; import type { RegistryItem } from '@/lib/registry/types'; import { getProviderLogo } from './logos'; @@ -46,13 +47,13 @@ export function RegistryCard({ item }: RegistryCardProps) {

- {item.tags.slice(0, 4).map((tag) => ( + {item.categories.map((category) => ( - {tag} + {categoryLabels[category]} ))}
diff --git a/docs/components/registry/RegistryCodeTabs.tsx b/docs/components/registry/RegistryCodeTabs.tsx index b8bc19b651..8cebd74615 100644 --- a/docs/components/registry/RegistryCodeTabs.tsx +++ b/docs/components/registry/RegistryCodeTabs.tsx @@ -16,9 +16,13 @@ export function RegistryCodeTabs({ blocks }: RegistryCodeTabsProps) { return ( - + {blocks.map((b) => ( - + {b.label} ))} diff --git a/docs/components/registry/RegistryDetailHero.tsx b/docs/components/registry/RegistryDetailHero.tsx index b04847a173..485960e834 100644 --- a/docs/components/registry/RegistryDetailHero.tsx +++ b/docs/components/registry/RegistryDetailHero.tsx @@ -1,5 +1,6 @@ import { ChevronRight, ExternalLink, Github, Home } from 'lucide-react'; import Link from 'next/link'; +import { Badge } from '@/components/ui/badge'; import { Breadcrumb, BreadcrumbItem, @@ -8,7 +9,6 @@ import { BreadcrumbPage, BreadcrumbSeparator, } from '@/components/ui/breadcrumb'; -import { Badge } from '@/components/ui/badge'; import type { RegistryItem } from '@/lib/registry/types'; import { getProviderLogo } from './logos'; diff --git a/docs/components/registry/RegistryGrid.tsx b/docs/components/registry/RegistryGrid.tsx index 63aa79200a..07cc5eb03d 100644 --- a/docs/components/registry/RegistryGrid.tsx +++ b/docs/components/registry/RegistryGrid.tsx @@ -16,20 +16,24 @@ export function RegistryGrid({ items }: RegistryGridProps) { const [filter, setFilter] = useState('all'); // Build the list of category filters dynamically — only the categories that - // actually have items get a chip. + // actually have items get a chip. Items can belong to more than one + // category (e.g. AI SDK is both `agent` and `vercel`), so they appear under + // every relevant filter and contribute to each chip's count. const presentCategories = Array.from( - new Set(items.map((item) => item.category)) + new Set(items.flatMap((item) => item.categories)) ); const filtered = - filter === 'all' ? items : items.filter((item) => item.category === filter); + filter === 'all' + ? items + : items.filter((item) => item.categories.includes(filter)); const filters: { id: Filter; label: string; count: number }[] = [ { id: 'all', label: 'Show all', count: items.length }, ...presentCategories.map((category) => ({ id: category as Filter, label: categoryLabels[category], - count: items.filter((item) => item.category === category).length, + count: items.filter((item) => item.categories.includes(category)).length, })), ]; diff --git a/docs/components/registry/logos/index.tsx b/docs/components/registry/logos/index.tsx index e8c7d86b71..4c05964bbf 100644 --- a/docs/components/registry/logos/index.tsx +++ b/docs/components/registry/logos/index.tsx @@ -1,9 +1,23 @@ import type { ComponentType } from 'react'; import type { RegistryLogoId } from '@/lib/registry/types'; +import { LogoAgentCancellation } from './logo-agent-cancellation'; import { LogoAiSdk } from './logo-ai-sdk'; +import { LogoBatching } from './logo-batching'; import { LogoChatSdk } from './logo-chat-sdk'; +import { LogoChildWorkflows } from './logo-child-workflows'; +import { LogoDistributedAbortController } from './logo-distributed-abort-controller'; +import { LogoDurableAgent } from './logo-durable-agent'; +import { LogoHumanInTheLoop } from './logo-human-in-the-loop'; +import { LogoIdempotency } from './logo-idempotency'; +import { LogoRateLimiting } from './logo-rate-limiting'; import { LogoResend } from './logo-resend'; +import { LogoSaga } from './logo-saga'; import { LogoSandbox } from './logo-sandbox'; +import { LogoScheduling } from './logo-scheduling'; +import { LogoSequentialAndParallel } from './logo-sequential-and-parallel'; +import { LogoTimeouts } from './logo-timeouts'; +import { LogoWebhooks } from './logo-webhooks'; +import { LogoWorkflowComposition } from './logo-workflow-composition'; export interface ProviderLogoProps { size?: number; @@ -22,6 +36,20 @@ export const providerLogos: Record< 'ai-sdk': LogoAiSdk, sandbox: LogoSandbox, 'chat-sdk': LogoChatSdk, + 'durable-agent': LogoDurableAgent, + 'human-in-the-loop': LogoHumanInTheLoop, + 'agent-cancellation': LogoAgentCancellation, + 'sequential-and-parallel': LogoSequentialAndParallel, + 'workflow-composition': LogoWorkflowComposition, + saga: LogoSaga, + batching: LogoBatching, + 'rate-limiting': LogoRateLimiting, + scheduling: LogoScheduling, + timeouts: LogoTimeouts, + idempotency: LogoIdempotency, + webhooks: LogoWebhooks, + 'child-workflows': LogoChildWorkflows, + 'distributed-abort-controller': LogoDistributedAbortController, }; export function getProviderLogo( diff --git a/docs/components/registry/logos/logo-agent-cancellation.tsx b/docs/components/registry/logos/logo-agent-cancellation.tsx new file mode 100644 index 0000000000..e9da3ff548 --- /dev/null +++ b/docs/components/registry/logos/logo-agent-cancellation.tsx @@ -0,0 +1,38 @@ +/** + * Agent Cancellation brand mark. + * + * Universal media-stop glyph — a circle with a solid square inside. Reads as + * "stop the running thing" in any chat UI. The outer circle is stroked and + * the inner square is filled, both with `currentColor` so the mark adapts + * to light and dark themes. + */ +export function LogoAgentCancellation({ + size = 20, + className, +}: { + size?: number; + className?: string; +}) { + return ( + + ); +} diff --git a/docs/components/registry/logos/logo-batching.tsx b/docs/components/registry/logos/logo-batching.tsx new file mode 100644 index 0000000000..0965aaf940 --- /dev/null +++ b/docs/components/registry/logos/logo-batching.tsx @@ -0,0 +1,55 @@ +/** + * Batching brand mark. + * + * Three stacked, slightly-offset rounded rectangles — a "batch" of work. + * Top rectangle filled to suggest the active batch. + */ +export function LogoBatching({ + size = 20, + className, +}: { + size?: number; + className?: string; +}) { + return ( + + ); +} diff --git a/docs/components/registry/logos/logo-child-workflows.tsx b/docs/components/registry/logos/logo-child-workflows.tsx new file mode 100644 index 0000000000..9e7fbc37d7 --- /dev/null +++ b/docs/components/registry/logos/logo-child-workflows.tsx @@ -0,0 +1,39 @@ +/** + * Child Workflows brand mark. + * + * Parent node fanning out to three child nodes — the spawn-and-poll shape. + * All `currentColor`. + */ +export function LogoChildWorkflows({ + size = 20, + className, +}: { + size?: number; + className?: string; +}) { + return ( + + ); +} diff --git a/docs/components/registry/logos/logo-distributed-abort-controller.tsx b/docs/components/registry/logos/logo-distributed-abort-controller.tsx new file mode 100644 index 0000000000..7851a6ef40 --- /dev/null +++ b/docs/components/registry/logos/logo-distributed-abort-controller.tsx @@ -0,0 +1,42 @@ +/** + * Distributed Abort Controller brand mark. + * + * Universal abort glyph — circle with a slash through it — overlaid with a + * small dotted ring suggesting cross-process / distributed coordination. + * All `currentColor`. + */ +export function LogoDistributedAbortController({ + size = 20, + className, +}: { + size?: number; + className?: string; +}) { + return ( + + ); +} diff --git a/docs/components/registry/logos/logo-durable-agent.tsx b/docs/components/registry/logos/logo-durable-agent.tsx new file mode 100644 index 0000000000..c605357b06 --- /dev/null +++ b/docs/components/registry/logos/logo-durable-agent.tsx @@ -0,0 +1,39 @@ +/** + * Durable Agent brand mark. + * + * Bot glyph — rounded body, antenna, two eyes — the universal "agent" icon. + * All `currentColor`. + */ +export function LogoDurableAgent({ + size = 20, + className, +}: { + size?: number; + className?: string; +}) { + return ( + + ); +} diff --git a/docs/components/registry/logos/logo-human-in-the-loop.tsx b/docs/components/registry/logos/logo-human-in-the-loop.tsx new file mode 100644 index 0000000000..c9445dedd3 --- /dev/null +++ b/docs/components/registry/logos/logo-human-in-the-loop.tsx @@ -0,0 +1,35 @@ +/** + * Human-in-the-Loop brand mark. + * + * Thumbs-up glyph — represents a human approval signal that gates a + * paused agent. Drawn with `currentColor` strokes so it inherits text color + * in both light and dark themes. + */ +export function LogoHumanInTheLoop({ + size = 20, + className, +}: { + size?: number; + className?: string; +}) { + return ( + + ); +} diff --git a/docs/components/registry/logos/logo-idempotency.tsx b/docs/components/registry/logos/logo-idempotency.tsx new file mode 100644 index 0000000000..21a3d28a75 --- /dev/null +++ b/docs/components/registry/logos/logo-idempotency.tsx @@ -0,0 +1,37 @@ +/** + * Idempotency brand mark. + * + * Refresh arrow looping around an equals sign — the visual statement + * "f(f(x)) = f(x)". No matter how many times you replay the operation, + * the result is equal. All `currentColor`. + */ +export function LogoIdempotency({ + size = 20, + className, +}: { + size?: number; + className?: string; +}) { + return ( + + ); +} diff --git a/docs/components/registry/logos/logo-rate-limiting.tsx b/docs/components/registry/logos/logo-rate-limiting.tsx new file mode 100644 index 0000000000..777cea0adc --- /dev/null +++ b/docs/components/registry/logos/logo-rate-limiting.tsx @@ -0,0 +1,35 @@ +/** + * Rate Limiting brand mark. + * + * Gauge / speedometer with a needle — represents throttling and backoff. + * All `currentColor`. + */ +export function LogoRateLimiting({ + size = 20, + className, +}: { + size?: number; + className?: string; +}) { + return ( + + ); +} diff --git a/docs/components/registry/logos/logo-saga.tsx b/docs/components/registry/logos/logo-saga.tsx new file mode 100644 index 0000000000..eabe9c50c9 --- /dev/null +++ b/docs/components/registry/logos/logo-saga.tsx @@ -0,0 +1,36 @@ +/** + * Saga / Transactions & Rollbacks brand mark. + * + * Two arrows curving in opposite directions — forward progress + reverse + * compensation. All `currentColor`. + */ +export function LogoSaga({ + size = 20, + className, +}: { + size?: number; + className?: string; +}) { + return ( + + ); +} diff --git a/docs/components/registry/logos/logo-scheduling.tsx b/docs/components/registry/logos/logo-scheduling.tsx new file mode 100644 index 0000000000..098309896c --- /dev/null +++ b/docs/components/registry/logos/logo-scheduling.tsx @@ -0,0 +1,34 @@ +/** + * Scheduling brand mark. + * + * Clock face with a calendar tick — represents future scheduled actions. + * All `currentColor`. + */ +export function LogoScheduling({ + size = 20, + className, +}: { + size?: number; + className?: string; +}) { + return ( + + ); +} diff --git a/docs/components/registry/logos/logo-sequential-and-parallel.tsx b/docs/components/registry/logos/logo-sequential-and-parallel.tsx new file mode 100644 index 0000000000..a6a158cc36 --- /dev/null +++ b/docs/components/registry/logos/logo-sequential-and-parallel.tsx @@ -0,0 +1,40 @@ +/** + * Sequential & Parallel brand mark. + * + * Three lines branching from a single source — one continuing forward + * (sequential), the others fanning out (parallel). All `currentColor`. + */ +export function LogoSequentialAndParallel({ + size = 20, + className, +}: { + size?: number; + className?: string; +}) { + return ( + + ); +} diff --git a/docs/components/registry/logos/logo-timeouts.tsx b/docs/components/registry/logos/logo-timeouts.tsx new file mode 100644 index 0000000000..42fb169d2c --- /dev/null +++ b/docs/components/registry/logos/logo-timeouts.tsx @@ -0,0 +1,37 @@ +/** + * Timeouts brand mark. + * + * Stopwatch glyph — circle with a top crown and a hand pointing right. + * All `currentColor`. + */ +export function LogoTimeouts({ + size = 20, + className, +}: { + size?: number; + className?: string; +}) { + return ( + + ); +} diff --git a/docs/components/registry/logos/logo-webhooks.tsx b/docs/components/registry/logos/logo-webhooks.tsx new file mode 100644 index 0000000000..6bee9b3058 --- /dev/null +++ b/docs/components/registry/logos/logo-webhooks.tsx @@ -0,0 +1,39 @@ +/** + * Webhooks brand mark. + * + * The webhooks.fyi triangle — three nodes (one at each vertex of an + * equilateral triangle) connected by edges. The de facto "webhook" logo + * across the web. All `currentColor`. + */ +export function LogoWebhooks({ + size = 20, + className, +}: { + size?: number; + className?: string; +}) { + return ( + + ); +} diff --git a/docs/components/registry/logos/logo-workflow-composition.tsx b/docs/components/registry/logos/logo-workflow-composition.tsx new file mode 100644 index 0000000000..1390b2e6ed --- /dev/null +++ b/docs/components/registry/logos/logo-workflow-composition.tsx @@ -0,0 +1,44 @@ +/** + * Workflow Composition brand mark. + * + * Two nested rounded rectangles — a child workflow inside a parent — with a + * small arrow indicating composition / call. All `currentColor`. + */ +export function LogoWorkflowComposition({ + size = 20, + className, +}: { + size?: number; + className?: string; +}) { + return ( + + ); +} diff --git a/docs/lib/registry/manifest.ts b/docs/lib/registry/manifest.ts index 87ba6a42ba..08eaf1aa8a 100644 --- a/docs/lib/registry/manifest.ts +++ b/docs/lib/registry/manifest.ts @@ -1,8 +1,19 @@ +import { + agentCancellationButtonSource, + agentCancellationRouteSource, + agentCancellationStartRouteSource, + agentCancellationUsageSource, + agentCancellationWorkflowSource, +} from './snippets/agent-cancellation'; import { aiSdkClientSource, aiSdkRouteSource, aiSdkWorkflowSource, } from './snippets/ai-sdk'; +import { + batchingStartRouteSource, + batchingWorkflowSource, +} from './snippets/batching'; import { chatSdkBotSource, chatSdkHandlersSource, @@ -10,90 +21,160 @@ import { chatSdkWebhookSource, chatSdkWorkflowSource, } from './snippets/chat-sdk'; +import { + childWorkflowsStartRouteSource, + childWorkflowsWorkflowSource, +} from './snippets/child-workflows'; +import { + distributedAbortControllerButtonSource, + distributedAbortControllerLibSource, + distributedAbortControllerRouteSource, + distributedAbortControllerUsageSource, +} from './snippets/distributed-abort-controller'; +import { + durableAgentClientSource, + durableAgentStartRouteSource, + durableAgentWorkflowSource, +} from './snippets/durable-agent'; +import { + humanInTheLoopCardSource, + humanInTheLoopRouteSource, + humanInTheLoopStartRouteSource, + humanInTheLoopUsageSource, + humanInTheLoopWorkflowSource, +} from './snippets/human-in-the-loop'; +import { + idempotencyStartRouteSource, + idempotencyWorkflowSource, +} from './snippets/idempotency'; +import { + rateLimitingStartRouteSource, + rateLimitingWorkflowSource, +} from './snippets/rate-limiting'; import { resendCancelRouteSource, resendStartRouteSource, resendUsageSource, resendWorkflowSource, } from './snippets/resend'; +import { sagaStartRouteSource, sagaWorkflowSource } from './snippets/saga'; import { sandboxCommandRouteSource, sandboxStartRouteSource, sandboxUsageSource, sandboxWorkflowSource, } from './snippets/sandbox'; -import type { RegistryItem } from './types'; +import { + schedulingCancelRouteSource, + schedulingStartRouteSource, + schedulingWorkflowSource, +} from './snippets/scheduling'; +import { + sequentialAndParallelStartRouteSource, + sequentialAndParallelWorkflowSource, +} from './snippets/sequential-and-parallel'; +import { + timeoutsStartRouteSource, + timeoutsWorkflowSource, +} from './snippets/timeouts'; +import { + webhooksStartRouteSource, + webhooksWorkflowSource, +} from './snippets/webhooks'; +import { + workflowCompositionStartRouteSource, + workflowCompositionWorkflowSource, +} from './snippets/workflow-composition'; +import type { RegistryCategory, RegistryItem } from './types'; /** * Public registry of installable Workflow patterns. * - * The first item is intentionally the simplest end-to-end example — Resend. - * Add new providers below; the listing page picks them up automatically. + * Items are grouped by category in the order surfaced on the listing page — + * Agents, Vercel, Common, Advanced, Providers — and alphabetised within each + * group. Items can belong to more than one category (e.g. AI SDK is both an + * `agent` pattern and a `vercel` integration); they appear once here, in + * their primary group, and the listing page surfaces them under every + * relevant filter. */ export const registryItems: RegistryItem[] = [ { - id: 'resend', - name: 'Resend', - logo: 'resend', - description: 'Onboarding email drip campaign.', + id: 'agent-cancellation', + name: 'Agent Cancellation', + logo: 'agent-cancellation', + description: + 'Cancel a running AI agent gracefully — Stop button + workflow signal + hard-cancel fallback.', longDescription: - 'A production-ready email drip campaign powered by Resend. New users get a welcome email immediately, then follow-ups spaced hours, days, or weeks apart — whatever you configure. Each send is a workflow step that gets persisted once it succeeds, so if your server restarts or crashes mid-campaign, no one ever gets a duplicate. The waits between emails cost nothing (the campaign is fully paused, not idling), so it can span days or weeks without keeping anything running. And the moment a user converts, calling a single function from your app stops the whole thing instantly — no leftover emails, no extra database tables, no flag-checking on every send.', - tags: ['email', 'drip', 'cancellable', 'durable'], - category: 'email', - homepage: 'https://resend.com', - docsUrl: 'https://resend.com/docs/send-with-nodejs', + 'A drop-in cancellation pattern for any `DurableAgent`, covering both graceful Stop Signal and Hard Cancellation. The workflow races the agent against a `stopHook` keyed by the run ID; clicking Stop posts to a route that resumes the hook, the workflow exits at its next `await` boundary, and a `data-stopped` part is streamed to the client so it renders a clean ending instead of an abrupt connection close. The route automatically falls back to `getRun(runId).cancel()` if the hook is already gone (e.g. the agent finished mid-request), so the Stop button always succeeds. Note: the Stop Signal does not cancel the underlying model stream — tokens generated after the stop signal are still produced and billed; what it does is exit the workflow function and notify the client.', + tags: ['agent', 'cancellation', 'stop-button', 'durable'], + categories: ['agent'], + homepage: 'https://workflow-sdk.dev', + docsUrl: + 'https://workflow-sdk.dev/cookbook/agent-patterns/agent-cancellation', sourceUrl: - 'https://github.com/vercel-labs/workflow_onboarding/tree/main/nextjs_workflow/app/workflows/providers', - shadcnSlug: '@workflow-sdk/resend', + 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/agent-patterns/agent-cancellation.mdx', + shadcnSlug: '@workflow-sdk/agent-cancellation', envVars: [ { - name: 'RESEND_API_KEY', - description: 'API key from your Resend account.', - getKeyUrl: 'https://resend.com/api-keys', - exampleValue: 're_********', + name: 'AI_GATEWAY_API_KEY', + description: + 'API key for Vercel AI Gateway. Lets you call any provider (Claude, GPT, Gemini, …) through one credential. Optional when running on Vercel with OIDC.', + getKeyUrl: 'https://vercel.com/dashboard/ai-gateway', + exampleValue: 'vck_********', }, ], files: [ { - path: 'app/workflows/providers/resendWorkflow.ts', + path: 'workflows/stoppable-agent.ts', description: - 'The durable email drip workflow — `emailSequence()` + `cancelNudges` hook + the three send-email steps.', + 'Durable agent + `stopHook` + `Promise.race` exit, with a final `data-stopped` part emitted on stop.', }, { - path: 'app/api/providers/resend/route.ts', + path: 'app/api/agent/route.ts', description: - 'POST endpoint that starts a new campaign and pre-cancels any in-flight run for the same email.', + 'POST endpoint that starts the agent and returns the streaming response with `x-workflow-run-id` set.', }, { - path: 'app/api/providers/resend/cancel/route.ts', + path: 'app/api/agent/[runId]/stop/route.ts', description: - 'POST endpoint your app calls when the user converts — resumes the hook so the campaign exits cleanly.', + 'POST endpoint that resumes `stopHook` for the given `runId` with a `getRun(runId).cancel()` fallback when the hook is already gone.', + }, + { + path: 'components/stop-button.tsx', + description: + 'Reusable client component — takes a `runId`, posts to the stop route, and disables itself while the request is in flight.', }, ], snippets: [ { label: 'Workflow', lang: 'tsx', - caption: 'app/workflows/providers/resendWorkflow.ts', - code: resendWorkflowSource, + caption: 'workflows/stoppable-agent.ts', + code: agentCancellationWorkflowSource, }, { label: 'Start route', lang: 'tsx', - caption: 'app/api/providers/resend/route.ts', - code: resendStartRouteSource, + caption: 'app/api/agent/route.ts', + code: agentCancellationStartRouteSource, }, { - label: 'Cancel route', + label: 'Stop route', lang: 'tsx', - caption: 'app/api/providers/resend/cancel/route.ts', - code: resendCancelRouteSource, + caption: 'app/api/agent/[runId]/stop/route.ts', + code: agentCancellationRouteSource, + }, + { + label: 'Button', + lang: 'tsx', + caption: 'components/stop-button.tsx', + code: agentCancellationButtonSource, }, { label: 'Usage', lang: 'tsx', - caption: 'Trigger the campaign from your app', - code: resendUsageSource, + caption: 'Wire the Stop button into your chat UI', + code: agentCancellationUsageSource, }, ], }, @@ -105,7 +186,7 @@ export const registryItems: RegistryItem[] = [ longDescription: "A production-ready multi-turn chat agent powered by AI SDK's `streamText`. Each conversation is one workflow run that suspends between turns — zero compute cost while the user is reading — and resumes the moment the next message arrives. The per-turn LLM stream is durable: if your server restarts mid-response, the client reconnects with the same `runId` and picks up exactly where it left off, with the full conversation history intact. Tools are wrapped as workflow steps, so each tool call is recorded once and replayed (not re-executed) on retry. Drop in any AI Gateway model string and it works — switch from Claude to GPT to Gemini without touching the durability layer.", tags: ['ai', 'chat', 'streaming', 'agents', 'durable'], - category: 'vercel', + categories: ['agent', 'vercel'], homepage: 'https://ai-sdk.dev', docsUrl: 'https://ai-sdk.dev/docs', sourceUrl: @@ -159,67 +240,144 @@ export const registryItems: RegistryItem[] = [ ], }, { - id: 'sandbox', - name: 'Vercel Sandbox', - logo: 'sandbox', - description: 'Persistent code-execution session beyond the 5-hour cap.', + id: 'durable-agent', + name: 'Durable Agent', + logo: 'durable-agent', + description: + 'Replace a stateless AI agent with a durable one — tools as steps, streamed output, crash-safe by default.', longDescription: - 'An always-resumable code-execution session built on Vercel Sandbox. One workflow run owns one sandbox for its entire lifetime — full filesystem, network, and runtime — and the client only has to remember a single `runId`. When the user goes idle, the workflow snapshots the VM and hibernates indefinitely at zero cost; when they return, the same filesystem, installed packages, and git history are right there waiting. The pattern also rolls over the sandbox hard cap automatically: a few minutes before the 5-hour deadline it snapshots, spins up a fresh VM from that snapshot, and keeps going — so the logical session can run effectively forever on top of time-bounded infrastructure. Perfect for coding agents, AI dev environments, and any workload where users walk away and come back days later.', - tags: ['sandbox', 'agents', 'sessions', 'durable', 'snapshots'], - category: 'vercel', - homepage: 'https://vercel.com/docs/sandbox', - docsUrl: 'https://vercel.com/docs/sandbox', + 'The foundational AI agent pattern on Workflow. Wrap any AI SDK agent in `DurableAgent`, mark each tool with `"use step"`, and stream output through `getWritable()`. The framework handles retries, replay, and persistence automatically — if the process crashes mid-tool-call, the agent resumes from the last completed step on replay, with no extra bookkeeping in your code. Each tool call gets automatic retries (3× by default), an entry in the workflow event log for observability, and full Node.js access. Drop in any AI Gateway model string and switch providers without touching the durability layer. The included example is a flight booking agent (search → book → weather check) — replace the tools with your own; the surrounding shape stays identical.', + tags: ['agents', 'ai', 'durable', 'tools', 'streaming'], + categories: ['agent'], + homepage: 'https://workflow-sdk.dev', + docsUrl: 'https://workflow-sdk.dev/cookbook/agent-patterns/durable-agent', sourceUrl: - 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/integrations/sandbox.mdx', - shadcnSlug: '@workflow-sdk/sandbox', + 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/agent-patterns/durable-agent.mdx', + shadcnSlug: '@workflow-sdk/durable-agent', envVars: [ { - name: 'VERCEL_OIDC_TOKEN', + name: 'AI_GATEWAY_API_KEY', description: - 'OIDC token used by `@vercel/sandbox` to authenticate. Set automatically when deployed to Vercel; locally, run `vercel env pull` to populate it.', + 'API key for Vercel AI Gateway. Lets you call any provider (Claude, GPT, Gemini, …) through one credential. Optional when running on Vercel with OIDC.', + getKeyUrl: 'https://vercel.com/dashboard/ai-gateway', + exampleValue: 'vck_********', }, ], files: [ { - path: 'workflows/sandbox-session.ts', + path: 'workflows/flight-agent.ts', description: - 'The durable session workflow — `sandboxSessionWorkflow()` + `commandHook`, with idle hibernation and proactive sandbox refresh built in.', + 'The durable agent workflow — `flightAgent()` orchestrator + three tool steps (`searchFlights`, `bookFlight`, `checkWeather`). Replace the tools with your own.', }, { - path: 'app/api/sandbox/start/route.ts', + path: 'app/api/flight-agent/route.ts', description: - 'POST endpoint that starts a new session or reconnects to an existing one, replaying the durable event log to a returning client.', + 'POST endpoint that converts incoming `UIMessage`s, starts the agent with `start()`, and returns the streaming response with `x-workflow-run-id` set.', }, { - path: 'app/api/sandbox/command/route.ts', + path: 'components/flight-agent-chat.tsx', description: - 'POST endpoint that resumes the command hook — every shell command the user runs flows through here.', + '`useChat()` client component wired up via `WorkflowChatTransport` — forwards the run ID between turns automatically for durable multi-turn conversations.', }, ], snippets: [ { label: 'Workflow', lang: 'tsx', - caption: 'workflows/sandbox-session.ts', - code: sandboxWorkflowSource, + caption: 'workflows/flight-agent.ts', + code: durableAgentWorkflowSource, }, { label: 'Start route', lang: 'tsx', - caption: 'app/api/sandbox/start/route.ts', - code: sandboxStartRouteSource, + caption: 'app/api/flight-agent/route.ts', + code: durableAgentStartRouteSource, }, { - label: 'Command route', + label: 'Client', lang: 'tsx', - caption: 'app/api/sandbox/command/route.ts', - code: sandboxCommandRouteSource, + caption: 'components/flight-agent-chat.tsx', + code: durableAgentClientSource, }, + ], + }, + { + id: 'human-in-the-loop', + name: 'Human In The Loop', + logo: 'human-in-the-loop', + description: + 'Pause an AI agent to wait for human approval, then resume with the decision.', + longDescription: + 'A drop-in human-in-the-loop pattern for any `DurableAgent`. The agent calls an approval tool before any consequential action; the tool emits a custom data part to the stream so the client can render Approve / Reject controls, then suspends on a `defineHook()` keyed by the tool call ID. An approval API route resumes the hook with the decision, the workflow streams the resolution, and the agent continues. A 24-hour `sleep()` races the hook so stale requests expire automatically. Comes with a generic approval card component that renders any payload schema and listens for `data-approval-needed` / `data-approval-resolved` parts.', + tags: ['agent', 'approval', 'human-in-the-loop', 'durable'], + categories: ['agent'], + homepage: 'https://workflow-sdk.dev', + docsUrl: + 'https://workflow-sdk.dev/cookbook/agent-patterns/human-in-the-loop', + sourceUrl: + 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/agent-patterns/human-in-the-loop.mdx', + shadcnSlug: '@workflow-sdk/human-in-the-loop', + envVars: [ { - label: 'Quickstart', + name: 'AI_GATEWAY_API_KEY', + description: + 'API key for Vercel AI Gateway. Lets you call any provider (Claude, GPT, Gemini, …) through one credential. Optional when running on Vercel with OIDC.', + getKeyUrl: 'https://vercel.com/dashboard/ai-gateway', + exampleValue: 'vck_********', + }, + ], + files: [ + { + path: 'workflows/approval-agent.ts', + description: + 'Durable agent + `approvalHook` + the `requestApproval` tool that races the hook against a 24h `sleep()` and streams resolution parts.', + }, + { + path: 'app/api/approval-agent/route.ts', + description: + 'POST endpoint that starts the agent and returns the streaming response with `x-workflow-run-id` set.', + }, + { + path: 'app/api/approval/route.ts', + description: + 'POST endpoint that resumes `approvalHook` with `{ approved, comment }`. Idempotent against expired/already-consumed hooks.', + }, + { + path: 'components/approval-card.tsx', + description: + 'Reusable client component — renders the payload, posts the decision, and swaps to the resolution once it streams in.', + }, + ], + snippets: [ + { + label: 'Workflow', lang: 'tsx', - caption: 'Simpler one-shot pipeline (no session loop)', - code: sandboxUsageSource, + caption: 'workflows/approval-agent.ts', + code: humanInTheLoopWorkflowSource, + }, + { + label: 'Start route', + lang: 'tsx', + caption: 'app/api/approval-agent/route.ts', + code: humanInTheLoopStartRouteSource, + }, + { + label: 'Approval route', + lang: 'tsx', + caption: 'app/api/approval/route.ts', + code: humanInTheLoopRouteSource, + }, + { + label: 'Card', + lang: 'tsx', + caption: 'components/approval-card.tsx', + code: humanInTheLoopCardSource, + }, + { + label: 'Usage', + lang: 'tsx', + caption: 'Wire the card into your chat UI', + code: humanInTheLoopUsageSource, }, ], }, @@ -231,7 +389,7 @@ export const registryItems: RegistryItem[] = [ longDescription: "A durable bot session pattern for Chat SDK. Write the bot once, deploy to Slack, Microsoft Teams, Google Chat, Discord, Telegram, GitHub, Linear, or WhatsApp — and let each conversation thread run as its own workflow. Multi-turn state lives in the durable event log instead of hand-rolled Redis bookkeeping. The bot can sleep for hours waiting on a user reply, schedule a follow-up days later, or pause on a long-running tool call — and survive every deploy and cold start in between. Inbound messages route to either a `start()` (first mention) or `resumeHook()` (every subsequent message), with the `runId` stored in Chat SDK's thread state. Outbound replies are durable steps, so platform side-effects are recorded once and replayed safely on restart.", tags: ['chat', 'bots', 'slack', 'teams', 'discord', 'durable'], - category: 'vercel', + categories: ['vercel', 'agent'], homepage: 'https://chat-sdk.dev', docsUrl: 'https://chat-sdk.dev/docs/guides/durable-chat-sessions-nextjs', sourceUrl: @@ -316,6 +474,631 @@ export const registryItems: RegistryItem[] = [ }, ], }, + { + id: 'sandbox', + name: 'Vercel Sandbox', + logo: 'sandbox', + description: 'Persistent code-execution session beyond the 5-hour cap.', + longDescription: + 'An always-resumable code-execution session built on Vercel Sandbox. One workflow run owns one sandbox for its entire lifetime — full filesystem, network, and runtime — and the client only has to remember a single `runId`. When the user goes idle, the workflow snapshots the VM and hibernates indefinitely at zero cost; when they return, the same filesystem, installed packages, and git history are right there waiting. The pattern also rolls over the sandbox hard cap automatically: a few minutes before the 5-hour deadline it snapshots, spins up a fresh VM from that snapshot, and keeps going — so the logical session can run effectively forever on top of time-bounded infrastructure. Perfect for coding agents, AI dev environments, and any workload where users walk away and come back days later.', + tags: ['sandbox', 'agents', 'sessions', 'durable', 'snapshots'], + categories: ['vercel', 'agent'], + homepage: 'https://vercel.com/docs/sandbox', + docsUrl: 'https://vercel.com/docs/sandbox', + sourceUrl: + 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/integrations/sandbox.mdx', + shadcnSlug: '@workflow-sdk/sandbox', + envVars: [ + { + name: 'VERCEL_OIDC_TOKEN', + description: + 'OIDC token used by `@vercel/sandbox` to authenticate. Set automatically when deployed to Vercel; locally, run `vercel env pull` to populate it.', + }, + ], + files: [ + { + path: 'workflows/sandbox-session.ts', + description: + 'The durable session workflow — `sandboxSessionWorkflow()` + `commandHook`, with idle hibernation and proactive sandbox refresh built in.', + }, + { + path: 'app/api/sandbox/start/route.ts', + description: + 'POST endpoint that starts a new session or reconnects to an existing one, replaying the durable event log to a returning client.', + }, + { + path: 'app/api/sandbox/command/route.ts', + description: + 'POST endpoint that resumes the command hook — every shell command the user runs flows through here.', + }, + ], + snippets: [ + { + label: 'Workflow', + lang: 'tsx', + caption: 'workflows/sandbox-session.ts', + code: sandboxWorkflowSource, + }, + { + label: 'Start route', + lang: 'tsx', + caption: 'app/api/sandbox/start/route.ts', + code: sandboxStartRouteSource, + }, + { + label: 'Command route', + lang: 'tsx', + caption: 'app/api/sandbox/command/route.ts', + code: sandboxCommandRouteSource, + }, + { + label: 'Quickstart', + lang: 'tsx', + caption: 'Simpler one-shot pipeline (no session loop)', + code: sandboxUsageSource, + }, + ], + }, + { + id: 'batching', + name: 'Batching', + logo: 'batching', + description: + 'Process large collections in parallel batches with failure isolation between groups.', + longDescription: + 'Bulk-process arbitrary records by splitting them into fixed-size batches, running each batch concurrently with `Promise.allSettled` (failures inside a batch are isolated per record), and pacing batches with `sleep()` to respect downstream rate limits. Each record runs as its own step → durable, automatically retried up to 3×, and replayable. The workflow returns a tally with per-record failure reasons. Ships a generic `ImportRecord` shape — replace it with your own and customise the step.', + tags: ['batching', 'fan-out', 'parallel', 'bulk-import'], + categories: ['common'], + homepage: 'https://workflow-sdk.dev', + docsUrl: 'https://workflow-sdk.dev/cookbook/common-patterns/batching', + sourceUrl: + 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/batching.mdx', + shadcnSlug: '@workflow-sdk/batching', + files: [ + { + path: 'workflows/batching.ts', + description: + 'Generic `batchImport()` — chunks records, runs each batch with Promise.allSettled, paces with sleep(), returns a tally + failure list.', + }, + { + path: 'app/api/batching/route.ts', + description: 'POST endpoint that starts the batch import workflow.', + }, + ], + snippets: [ + { + label: 'Workflow', + lang: 'tsx', + caption: 'workflows/batching.ts', + code: batchingWorkflowSource, + }, + { + label: 'Start route', + lang: 'tsx', + caption: 'app/api/batching/route.ts', + code: batchingStartRouteSource, + }, + ], + }, + { + id: 'idempotency', + name: 'Idempotency', + logo: 'idempotency', + description: + "Pass each step's deterministic stepId as the Idempotency-Key so retries never duplicate side effects.", + longDescription: + 'Workflow steps can be retried (on failure) and replayed (on cold start). Without an idempotency key, that means duplicate Stripe charges, duplicate emails, duplicate records. `getStepMetadata().stepId` returns a deterministic ID that is stable across retries and replays of the same step — pass it as the `Idempotency-Key` header to any external API that supports the convention. Ships a Stripe-shaped charge + receipt example; the same shape works for any provider.', + tags: ['idempotency', 'stripe', 'retries', 'exactly-once'], + categories: ['common'], + homepage: 'https://workflow-sdk.dev', + docsUrl: 'https://workflow-sdk.dev/cookbook/common-patterns/idempotency', + sourceUrl: + 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/idempotency.mdx', + shadcnSlug: '@workflow-sdk/idempotency', + envVars: [ + { + name: 'STRIPE_SECRET_KEY', + description: + 'Server-side Stripe secret key. Used by the example charge step — swap for any provider that supports idempotency keys.', + getKeyUrl: 'https://dashboard.stripe.com/apikeys', + exampleValue: 'sk_live_********', + }, + ], + files: [ + { + path: 'workflows/idempotency.ts', + description: + '`chargeCustomer()` workflow — Stripe charge + receipt, both keyed by their step IDs so retries dedupe automatically.', + }, + { + path: 'app/api/idempotency/route.ts', + description: 'POST endpoint that starts the charge workflow.', + }, + ], + snippets: [ + { + label: 'Workflow', + lang: 'tsx', + caption: 'workflows/idempotency.ts', + code: idempotencyWorkflowSource, + }, + { + label: 'Start route', + lang: 'tsx', + caption: 'app/api/idempotency/route.ts', + code: idempotencyStartRouteSource, + }, + ], + }, + { + id: 'rate-limiting', + name: 'Rate Limiting', + logo: 'rate-limiting', + description: + 'Handle 429 responses and transient failures with RetryableError + automatic backoff.', + longDescription: + "Stop writing manual sleep-retry loops. Throw `RetryableError` with a `retryAfter` value (millis, duration string, or `Date`) and the workflow runtime reschedules the step natively — more efficient than wall-clock sleeps and survives cold starts. Ships two flavors: Retry-After (read the header, pass it through) and exponential backoff (use `getStepMetadata().attempt` for `1s, 4s, 9s…`). `FatalError` short-circuits retries when retrying won't help.", + tags: ['rate-limit', 'retry', 'backoff', '429'], + categories: ['common'], + homepage: 'https://workflow-sdk.dev', + docsUrl: 'https://workflow-sdk.dev/cookbook/common-patterns/rate-limiting', + sourceUrl: + 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/rate-limiting.mdx', + shadcnSlug: '@workflow-sdk/rate-limiting', + files: [ + { + path: 'workflows/rate-limiting.ts', + description: + '`syncContact()` — Retry-After header on 429, exponential backoff on 5xx, `maxRetries = 10` override for known-flaky endpoints.', + }, + { + path: 'app/api/rate-limiting/route.ts', + description: 'POST endpoint that starts the rate-limited sync.', + }, + ], + snippets: [ + { + label: 'Workflow', + lang: 'tsx', + caption: 'workflows/rate-limiting.ts', + code: rateLimitingWorkflowSource, + }, + { + label: 'Start route', + lang: 'tsx', + caption: 'app/api/rate-limiting/route.ts', + code: rateLimitingStartRouteSource, + }, + ], + }, + { + id: 'saga', + name: 'Saga', + logo: 'saga', + description: + 'Multi-step business transactions with automatic rollback on failure.', + longDescription: + 'Coordinate transactions that span multiple services with automatic compensation. Each forward step does its work and pushes an undo onto a stack; if a later step throws `FatalError`, the catch block unwinds compensations in LIFO order to restore consistency. Compensations are themselves steps — durable, retried, and idempotent. Ships a complete "reserve seats → capture invoice → provision → notify" example shaped for replacement with your real APIs.', + tags: ['saga', 'transactions', 'rollback', 'compensation'], + categories: ['common'], + homepage: 'https://workflow-sdk.dev', + docsUrl: 'https://workflow-sdk.dev/cookbook/common-patterns/saga', + sourceUrl: + 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/saga.mdx', + shadcnSlug: '@workflow-sdk/saga', + files: [ + { + path: 'workflows/saga.ts', + description: + 'Subscription-upgrade saga — three forward steps, three matching idempotent compensations, LIFO unwind on FatalError.', + }, + { + path: 'app/api/saga/route.ts', + description: 'POST endpoint that starts the saga workflow.', + }, + ], + snippets: [ + { + label: 'Workflow', + lang: 'tsx', + caption: 'workflows/saga.ts', + code: sagaWorkflowSource, + }, + { + label: 'Start route', + lang: 'tsx', + caption: 'app/api/saga/route.ts', + code: sagaStartRouteSource, + }, + ], + }, + { + id: 'scheduling', + name: 'Scheduling', + logo: 'scheduling', + description: + 'Schedule any future action with durable sleep and a cancel hook — no DB flags required.', + longDescription: + 'Drop-in pattern for scheduled actions that need to be cancellable. The workflow races a durable `sleep()` against a `defineHook()` keyed by a stable token (you choose — e.g. `schedule:`). Whichever resolves first wins: timer fires → run the action; hook resolves → cancel cleanly. Costs nothing while sleeping, and survives restarts/deployments. Generic action shape — swap the `runAction` step for emails, push notifications, Slack messages, webhooks, anything.', + tags: ['scheduling', 'reminders', 'cancellable', 'sleep'], + categories: ['common'], + homepage: 'https://workflow-sdk.dev', + docsUrl: 'https://workflow-sdk.dev/cookbook/common-patterns/scheduling', + sourceUrl: + 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/scheduling.mdx', + shadcnSlug: '@workflow-sdk/scheduling', + files: [ + { + path: 'workflows/scheduling.ts', + description: + '`scheduleAction()` workflow + exported `cancelSchedule` hook + `runAction` step you customise per use case.', + }, + { + path: 'app/api/scheduling/route.ts', + description: 'POST endpoint that schedules a new action.', + }, + { + path: 'app/api/scheduling/cancel/route.ts', + description: + 'POST endpoint that cancels an in-flight schedule by token. Idempotent — safe to call when the schedule has already fired.', + }, + ], + snippets: [ + { + label: 'Workflow', + lang: 'tsx', + caption: 'workflows/scheduling.ts', + code: schedulingWorkflowSource, + }, + { + label: 'Start route', + lang: 'tsx', + caption: 'app/api/scheduling/route.ts', + code: schedulingStartRouteSource, + }, + { + label: 'Cancel route', + lang: 'tsx', + caption: 'app/api/scheduling/cancel/route.ts', + code: schedulingCancelRouteSource, + }, + ], + }, + { + id: 'sequential-and-parallel', + name: 'Sequential & Parallel', + logo: 'sequential-and-parallel', + description: + 'Compose steps with await, Promise.all, and Promise.race against durable sleeps and webhooks.', + longDescription: + 'Workflows are plain async functions, so the standard composition primitives apply unchanged — sequential `await` for pipelines, `Promise.all` for fan-out, `Promise.race` for first-finisher logic. Because `sleep()` and `createWebhook()` are also promises, racing real work against a durable deadline is a one-liner. Ships a single workflow file with three illustrative entry points (pipeline / fan-out / race-with-sleep) and a start route — replace the placeholder steps with your real logic.', + tags: ['composition', 'parallel', 'race', 'pipeline'], + categories: ['common'], + homepage: 'https://workflow-sdk.dev', + docsUrl: + 'https://workflow-sdk.dev/cookbook/common-patterns/sequential-and-parallel', + sourceUrl: + 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/sequential-and-parallel.mdx', + shadcnSlug: '@workflow-sdk/sequential-and-parallel', + files: [ + { + path: 'workflows/sequential-and-parallel.ts', + description: + 'Three entry points — pipeline, fan-out, race — over a small set of placeholder steps you replace with real work.', + }, + { + path: 'app/api/sequential-and-parallel/route.ts', + description: 'POST endpoint that starts the fan-out workflow.', + }, + ], + snippets: [ + { + label: 'Workflow', + lang: 'tsx', + caption: 'workflows/sequential-and-parallel.ts', + code: sequentialAndParallelWorkflowSource, + }, + { + label: 'Start route', + lang: 'tsx', + caption: 'app/api/sequential-and-parallel/route.ts', + code: sequentialAndParallelStartRouteSource, + }, + ], + }, + { + id: 'timeouts', + name: 'Timeouts', + logo: 'timeouts', + description: + 'Add deadlines to slow steps, hooks, and webhooks by racing them against durable sleep.', + longDescription: + 'Bound how long any work can take. `Promise.race([work, sleep("30s")])` returns whichever resolves first; tag the sleep branch with a sentinel value so TypeScript narrows the result. Ships hard-timeout (throw on deadline), soft-timeout (fall back to a cached value), and the webhook + 7-day deadline shape for human approvals. Note: the loser keeps running with side effects intact — see Distributed Abort Controller for hard cross-process cancellation.', + tags: ['timeout', 'deadline', 'race', 'sleep'], + categories: ['common'], + homepage: 'https://workflow-sdk.dev', + docsUrl: 'https://workflow-sdk.dev/cookbook/common-patterns/timeouts', + sourceUrl: + 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/timeouts.mdx', + shadcnSlug: '@workflow-sdk/timeouts', + files: [ + { + path: 'workflows/timeouts.ts', + description: + 'Three entry points — hard timeout, soft timeout with fallback, and a webhook racing a 7-day deadline.', + }, + { + path: 'app/api/timeouts/route.ts', + description: 'POST endpoint that starts the hard-timeout workflow.', + }, + ], + snippets: [ + { + label: 'Workflow', + lang: 'tsx', + caption: 'workflows/timeouts.ts', + code: timeoutsWorkflowSource, + }, + { + label: 'Start route', + lang: 'tsx', + caption: 'app/api/timeouts/route.ts', + code: timeoutsStartRouteSource, + }, + ], + }, + { + id: 'webhooks', + name: 'Webhooks', + logo: 'webhooks', + description: + 'Receive HTTP callbacks from external services, process them durably, and respond inline.', + longDescription: + 'Drop-in webhook receiver pattern. `createWebhook()` returns a URL the workflow can `for await` over; each incoming request is processed in its own step with full Node.js access, and `request.respondWith()` lets the step shape the HTTP response inline. Ships two flavors: a long-running listener (Stripe-style multi-event ledger that exits on a terminal event), and async-request-reply (submit to a vendor with our webhook URL, race the callback against a 30-second deadline).', + tags: ['webhook', 'callback', 'integration', 'external-api'], + categories: ['common'], + homepage: 'https://workflow-sdk.dev', + docsUrl: 'https://workflow-sdk.dev/cookbook/common-patterns/webhooks', + sourceUrl: + 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/webhooks.mdx', + shadcnSlug: '@workflow-sdk/webhooks', + files: [ + { + path: 'workflows/webhooks.ts', + description: + 'Two patterns — `paymentWebhook()` (long-running event ledger) and `asyncVerification()` (request-reply with deadline).', + }, + { + path: 'app/api/webhooks/route.ts', + description: + 'POST endpoint that starts the payment webhook. The auto-generated webhook URL is exposed via `webhook.url` in the workflow return value.', + }, + ], + snippets: [ + { + label: 'Workflow', + lang: 'tsx', + caption: 'workflows/webhooks.ts', + code: webhooksWorkflowSource, + }, + { + label: 'Start route', + lang: 'tsx', + caption: 'app/api/webhooks/route.ts', + code: webhooksStartRouteSource, + }, + ], + }, + { + id: 'workflow-composition', + name: 'Workflow Composition', + logo: 'workflow-composition', + description: + 'Call workflows from workflows — direct await for inline composition, start() for independent runs.', + longDescription: + 'Two ways to compose workflows. Direct `await` of a child workflow flattens its steps into the parent\'s event log — one runId, one retry boundary, one timeline. `start()` from inside a step spawns the child as an independent run with its own runId, separate event log, and its own retry boundary — ideal for fire-and-forget, fan-out, and self-upgrading workflows (`deploymentId: "latest"`). Ships parent + child workflows + a spawn step + a start route.', + tags: ['composition', 'child-workflow', 'spawn', 'start'], + categories: ['common'], + homepage: 'https://workflow-sdk.dev', + docsUrl: + 'https://workflow-sdk.dev/cookbook/common-patterns/workflow-composition', + sourceUrl: + 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/workflow-composition.mdx', + shadcnSlug: '@workflow-sdk/workflow-composition', + files: [ + { + path: 'workflows/workflow-composition.ts', + description: + 'Parent + child workflows demonstrating both direct-await flattening and background spawn via `start()` from a step.', + }, + { + path: 'app/api/workflow-composition/route.ts', + description: 'POST endpoint that starts the parent workflow.', + }, + ], + snippets: [ + { + label: 'Workflow', + lang: 'tsx', + caption: 'workflows/workflow-composition.ts', + code: workflowCompositionWorkflowSource, + }, + { + label: 'Start route', + lang: 'tsx', + caption: 'app/api/workflow-composition/route.ts', + code: workflowCompositionStartRouteSource, + }, + ], + }, + { + id: 'child-workflows', + name: 'Child Workflows', + logo: 'child-workflows', + description: + 'Spawn many independent child workflows from a parent and orchestrate them with spawn-and-poll.', + longDescription: + 'Use child workflows when one workflow needs to orchestrate many independent units of work. Each child runs as its own workflow with a separate event log, retry boundary, and failure scope — a failing child never aborts unrelated work, and you get per-item observability via each child\'s runId. Ships the full parent + child + chunked spawn step + durable polling loop + result-collection step. Pre-wired with `deploymentId: "latest"` so children pick up future deployments.', + tags: ['fan-out', 'spawn', 'poll', 'orchestration'], + categories: ['advanced'], + homepage: 'https://workflow-sdk.dev', + docsUrl: 'https://workflow-sdk.dev/cookbook/advanced/child-workflows', + sourceUrl: + 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/advanced/child-workflows.mdx', + shadcnSlug: '@workflow-sdk/child-workflows', + files: [ + { + path: 'workflows/child-workflows.ts', + description: + '`processDocumentBatch()` parent + `processDocument()` child + chunked spawn step + durable polling loop + result-collection step.', + }, + { + path: 'app/api/child-workflows/route.ts', + description: + 'POST endpoint that starts the parent workflow with a list of document IDs.', + }, + ], + snippets: [ + { + label: 'Workflow', + lang: 'tsx', + caption: 'workflows/child-workflows.ts', + code: childWorkflowsWorkflowSource, + }, + { + label: 'Start route', + lang: 'tsx', + caption: 'app/api/child-workflows/route.ts', + code: childWorkflowsStartRouteSource, + }, + ], + }, + { + id: 'distributed-abort-controller', + name: 'Distributed Abort Controller', + logo: 'distributed-abort-controller', + description: + 'AbortController-shaped API for cross-process cancellation, backed by a durable workflow.', + longDescription: + "A drop-in replacement for `AbortController` that works across process boundaries. Calling `.abort()` on one machine fires the `.signal` `AbortSignal` on any other machine that created a controller with the same semantic ID — no run ID sharing required. Backed by a coordination workflow that races a manual abort hook against a TTL sleep; when triggered, it writes to the run's stream and any subscriber's `AbortSignal` flips. Includes `Create`-is-idempotent reconnection (find an existing run by hook token), TTL auto-cleanup, and an optional grace period for late subscribers. Ships the lib module, a remote-abort route, and a client cancel button.", + tags: ['abort', 'cancellation', 'distributed', 'cross-process'], + categories: ['advanced'], + homepage: 'https://workflow-sdk.dev', + docsUrl: + 'https://workflow-sdk.dev/cookbook/advanced/distributed-abort-controller', + sourceUrl: + 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/advanced/distributed-abort-controller.mdx', + shadcnSlug: '@workflow-sdk/distributed-abort-controller', + files: [ + { + path: 'lib/distributed-abort-controller.ts', + description: + 'Coordination workflow + `DistributedAbortController` class with `.abort()` and `.signal` (an `AbortSignal`).', + }, + { + path: 'app/api/abort/[id]/route.ts', + description: + 'POST endpoint that triggers the abort signal for a given semantic ID. Idempotent.', + }, + { + path: 'components/cancel-button.tsx', + description: + 'Client component — calls the abort route on click and reflects the cancellation state in the UI.', + }, + ], + snippets: [ + { + label: 'Lib', + lang: 'tsx', + caption: 'lib/distributed-abort-controller.ts', + code: distributedAbortControllerLibSource, + }, + { + label: 'Abort route', + lang: 'tsx', + caption: 'app/api/abort/[id]/route.ts', + code: distributedAbortControllerRouteSource, + }, + { + label: 'Cancel button', + lang: 'tsx', + caption: 'components/cancel-button.tsx', + code: distributedAbortControllerButtonSource, + }, + { + label: 'Usage', + lang: 'tsx', + caption: 'Pass `controller.signal` to any AbortSignal-aware API', + code: distributedAbortControllerUsageSource, + }, + ], + }, + { + id: 'resend', + name: 'Resend', + logo: 'resend', + description: 'Onboarding email drip campaign.', + longDescription: + 'A production-ready email drip campaign powered by Resend. New users get a welcome email immediately, then follow-ups spaced hours, days, or weeks apart — whatever you configure. Each send is a workflow step that gets persisted once it succeeds, so if your server restarts or crashes mid-campaign, no one ever gets a duplicate. The waits between emails cost nothing (the campaign is fully paused, not idling), so it can span days or weeks without keeping anything running. And the moment a user converts, calling a single function from your app stops the whole thing instantly — no leftover emails, no extra database tables, no flag-checking on every send.', + tags: ['email', 'drip', 'cancellable', 'durable'], + categories: ['provider'], + homepage: 'https://resend.com', + docsUrl: 'https://resend.com/docs/send-with-nodejs', + sourceUrl: + 'https://github.com/vercel-labs/workflow_onboarding/tree/main/nextjs_workflow/app/workflows/providers', + shadcnSlug: '@workflow-sdk/resend', + envVars: [ + { + name: 'RESEND_API_KEY', + description: 'API key from your Resend account.', + getKeyUrl: 'https://resend.com/api-keys', + exampleValue: 're_********', + }, + ], + files: [ + { + path: 'app/workflows/providers/resendWorkflow.ts', + description: + 'The durable email drip workflow — `emailSequence()` + `cancelNudges` hook + the three send-email steps.', + }, + { + path: 'app/api/providers/resend/route.ts', + description: + 'POST endpoint that starts a new campaign and pre-cancels any in-flight run for the same email.', + }, + { + path: 'app/api/providers/resend/cancel/route.ts', + description: + 'POST endpoint your app calls when the user converts — resumes the hook so the campaign exits cleanly.', + }, + ], + snippets: [ + { + label: 'Workflow', + lang: 'tsx', + caption: 'app/workflows/providers/resendWorkflow.ts', + code: resendWorkflowSource, + }, + { + label: 'Start route', + lang: 'tsx', + caption: 'app/api/providers/resend/route.ts', + code: resendStartRouteSource, + }, + { + label: 'Cancel route', + lang: 'tsx', + caption: 'app/api/providers/resend/cancel/route.ts', + code: resendCancelRouteSource, + }, + { + label: 'Usage', + lang: 'tsx', + caption: 'Trigger the campaign from your app', + code: resendUsageSource, + }, + ], + }, ]; export function getRegistryItem(id: string): RegistryItem | undefined { @@ -326,9 +1109,12 @@ export function getRegistryItemIds(): string[] { return registryItems.map((item) => item.id); } -export const categoryLabels: Record = { +export const categoryLabels: Record = { + agent: 'Agents', vercel: 'Vercel', - email: 'Email', + common: 'Common', + advanced: 'Advanced', + provider: 'Providers', storage: 'Storage', ai: 'AI', auth: 'Auth', diff --git a/docs/lib/registry/snippets/agent-cancellation.ts b/docs/lib/registry/snippets/agent-cancellation.ts new file mode 100644 index 0000000000..3df349b209 --- /dev/null +++ b/docs/lib/registry/snippets/agent-cancellation.ts @@ -0,0 +1,237 @@ +/** + * Source snippets for the Agent Cancellation registry entry. + * + * Drop-in cancellation pattern for any `DurableAgent`. The workflow races the + * agent against a `stopHook` keyed by the run ID; when the user clicks the + * Stop button, the route resumes the hook and the workflow exits cleanly, + * emitting a final `data-stopped` part to the stream so the client renders a + * clean ending. Falls back to `getRun(runId).cancel()` if the hook is already + * gone. + * + * IMPORTANT: this pattern does NOT cancel the underlying model stream. + * Tokens generated after the stop signal are still produced (and billed). + * What it DOES is exit the workflow function as soon as the hook fires and + * notify the client. For hard cross-process cancellation that signals the + * inner step to bail out, see the Distributed Abort Controller cookbook. + */ + +export const agentCancellationWorkflowSource = `import { DurableAgent } from "@workflow/ai/agent"; +import { + defineHook, + getWorkflowMetadata, + getWritable, +} from "workflow"; +import { z } from "zod"; +import type { ModelMessage, UIMessageChunk } from "ai"; + +// Hook resumed by the stop API route. +export const stopHook = defineHook({ + schema: z.object({ reason: z.string().optional() }), +}); + +// Replace these with your real tools. +async function searchWeb({ query }: { query: string }) { + "use step"; + await new Promise((r) => setTimeout(r, 1500)); + return { + results: [{ title: \`\${query} — overview\`, snippet: \`Result for \${query}.\` }], + }; +} + +async function emitStopSignal(details: { reason?: string }) { + "use step"; + const writer = getWritable().getWriter(); + try { + await writer.write({ + type: "data-stopped", + id: "stop-signal", + data: details, + } as UIMessageChunk); + } finally { + writer.releaseLock(); + } +} + +export async function stoppableAgent(messages: ModelMessage[]) { + "use workflow"; + + // Token derived from the run ID so the stop API can resume by runId + // alone — no extra bookkeeping required. + const { workflowRunId } = getWorkflowMetadata(); + const hook = stopHook.create({ token: \`stop:\${workflowRunId}\` }); + + const agent = new DurableAgent({ + model: "anthropic/claude-haiku-4.5", + instructions: "You are a research assistant. Search and summarize as needed.", + tools: { + searchWeb: { + description: "Search the web for information", + inputSchema: z.object({ query: z.string() }), + execute: searchWeb, + }, + }, + }); + + // Race the agent against the stop hook. When the hook fires, the workflow + // exits at its next \`await\` boundary; the underlying model stream may keep + // generating tokens in the background. + const result = await Promise.race([ + agent + .stream({ + messages, + writable: getWritable(), + maxSteps: 15, + }) + .then((r) => ({ type: "complete" as const, messages: r.messages })), + hook.then(({ reason }) => ({ type: "stopped" as const, reason })), + ]); + + // Emit a final stream part on stop so the client renders a clean ending. + if (result.type === "stopped") { + await emitStopSignal({ reason: result.reason }); + } + + return result; +} +`; + +export const agentCancellationStartRouteSource = `import type { UIMessage } from "ai"; +import { convertToModelMessages, createUIMessageStreamResponse } from "ai"; +import { start } from "workflow/api"; +import { stoppableAgent } from "@/workflows/stoppable-agent"; + +export async function POST(req: Request) { + const { messages }: { messages: UIMessage[] } = await req.json(); + const modelMessages = await convertToModelMessages(messages); + + const run = await start(stoppableAgent, [modelMessages]); + + return createUIMessageStreamResponse({ + stream: run.readable, + headers: { "x-workflow-run-id": run.runId }, + }); +} +`; + +export const agentCancellationRouteSource = `import { getRun } from "workflow/api"; +import { NextResponse } from "next/server"; +import { stopHook } from "@/workflows/stoppable-agent"; + +export async function POST( + req: Request, + { params }: { params: Promise<{ runId: string }> }, +) { + const { runId } = await params; + const { reason } = (await req.json().catch(() => ({}))) as { + reason?: string; + }; + + // Try the graceful Stop Signal first. + try { + await stopHook.resume(\`stop:\${runId}\`, { + reason: reason ?? "User requested stop", + }); + return NextResponse.json({ success: true, mode: "stop-signal" }); + } catch (error) { + const msg = error instanceof Error ? error.message.toLowerCase() : ""; + if (!msg.includes("not found") && !msg.includes("expired")) { + throw error; + } + // Hook already consumed (e.g. agent finished, race resolved). Fall back + // to hard cancel so the run is definitely terminated. + try { + await getRun(runId).cancel(); + } catch { + // Run already in a terminal state — nothing to do. + } + return NextResponse.json({ success: true, mode: "hard-cancel" }); + } +} +`; + +export const agentCancellationButtonSource = `"use client"; + +import { useState } from "react"; + +interface StopButtonProps { + /** Active workflow run ID (forwarded from \`x-workflow-run-id\` header). */ + runId: string | null | undefined; + /** Endpoint pattern; \`{runId}\` will be substituted. */ + endpoint?: string; + /** Optional className override. */ + className?: string; +} + +export function StopButton({ + runId, + endpoint = "/api/agent/{runId}/stop", + className, +}: StopButtonProps) { + const [stopping, setStopping] = useState(false); + + if (!runId) return null; + + const handleStop = async () => { + setStopping(true); + try { + await fetch(endpoint.replace("{runId}", runId), { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ reason: "User clicked stop" }), + }); + } finally { + setStopping(false); + } + }; + + return ( + + ); +} +`; + +export const agentCancellationUsageSource = `// In your chat client, capture the runId from the response header on the +// FIRST message and render the Stop button while the agent is streaming: +"use client"; + +import { useChat } from "@ai-sdk/react"; +import { useState } from "react"; +import { StopButton } from "@/components/stop-button"; + +export function Chat() { + const [runId, setRunId] = useState(null); + + const { messages, sendMessage, status } = useChat({ + api: "/api/agent", + onResponse(res) { + const id = res.headers.get("x-workflow-run-id"); + if (id) setRunId(id); + }, + }); + + return ( +
+ {/* Render messages, including a "stopped" line if you see a + \`data-stopped\` part. */} + {messages.map((m) => ( +
{/* render parts */}
+ ))} + + {/* Show Stop only while the agent is actively streaming. */} + {status === "streaming" && ( + + )} +
+ ); +} +`; diff --git a/docs/lib/registry/snippets/batching.ts b/docs/lib/registry/snippets/batching.ts new file mode 100644 index 0000000000..696ef1a306 --- /dev/null +++ b/docs/lib/registry/snippets/batching.ts @@ -0,0 +1,100 @@ +/** + * Source snippets for the Batching registry entry. + * + * Process a large list of records in fixed-size parallel batches with + * failure isolation between groups. Each batch runs concurrently via + * Promise.allSettled; sleep() between batches paces requests against + * downstream rate limits. Drop-in starter for bulk imports. + */ + +export const batchingWorkflowSource = `import { sleep } from "workflow"; + +export interface ImportRecord { + name: string; + email: string; + role: string; +} + +export async function batchImport(records: ImportRecord[], batchSize = 10) { + "use workflow"; + + let totalSucceeded = 0; + let totalFailed = 0; + const failures: Array<{ email: string; reason: string }> = []; + + for (let i = 0; i < records.length; i += batchSize) { + const batch = records.slice(i, i + batchSize); + + // Failures inside the batch are isolated — Promise.allSettled never throws. + const outcomes = await Promise.allSettled( + batch.map((record) => processRecord(record)), + ); + + for (let j = 0; j < outcomes.length; j++) { + const outcome = outcomes[j]; + if (outcome.status === "fulfilled") { + totalSucceeded++; + } else { + totalFailed++; + failures.push({ + email: batch[j].email, + reason: + outcome.reason instanceof Error + ? outcome.reason.message + : String(outcome.reason), + }); + } + } + + // Pace between batches so downstream APIs aren't overwhelmed. + // Tune (or remove) to match your provider's rate limits. + if (i + batchSize < records.length) { + await sleep("1s"); + } + } + + return { + total: records.length, + succeeded: totalSucceeded, + failed: totalFailed, + failures, + }; +} + +// Each record runs in its own step → durable, retried up to 3x by default. +async function processRecord(record: ImportRecord): Promise { + "use step"; + const res = await fetch("https://api.example.com/contacts", { + method: "POST", + body: JSON.stringify(record), + }); + if (!res.ok) { + throw new Error(\`Failed to import \${record.email} (\${res.status})\`); + } + const { id } = await res.json(); + return id; +} +`; + +export const batchingStartRouteSource = `import { start } from "workflow/api"; +import { NextResponse } from "next/server"; +import { batchImport, type ImportRecord } from "@/workflows/batching"; + +// POST /api/batching { records: ImportRecord[], batchSize?: number } +export async function POST(request: Request) { + const { records, batchSize } = (await request.json()) as { + records: ImportRecord[]; + batchSize?: number; + }; + + if (!Array.isArray(records) || records.length === 0) { + return NextResponse.json( + { error: "records must be a non-empty array" }, + { status: 400 }, + ); + } + + const run = await start(batchImport, [records, batchSize ?? 10]); + return NextResponse.json({ runId: run.runId }); +} +`; diff --git a/docs/lib/registry/snippets/child-workflows.ts b/docs/lib/registry/snippets/child-workflows.ts new file mode 100644 index 0000000000..c1c5d30096 --- /dev/null +++ b/docs/lib/registry/snippets/child-workflows.ts @@ -0,0 +1,156 @@ +/** + * Source snippets for the Child Workflows registry entry. + * + * Spawn-and-poll pattern for orchestrating many independent workflow runs + * from a parent — each child has its own runId, event log, and retry + * boundary, so a failing child never takes down siblings or the parent. + * Ships parent + child + spawn step + polling step + collection step. + */ + +export const childWorkflowsWorkflowSource = `import { sleep } from "workflow"; +import { getRun, start } from "workflow/api"; + +const POLL_INTERVAL = "30s"; +// 60 minutes worth of poll iterations at the configured interval. +const MAX_POLL_ITERATIONS = 120; +// Spawn in chunks so a single step doesn't time out on huge batches. +const SPAWN_CHUNK_SIZE = 25; + +// CHILD — one independent unit of work. Replace the steps with real logic. +export async function processDocument(documentId: string) { + "use workflow"; + + const content = await fetchDocument(documentId); + const analysis = await analyzeContent(content); + const summary = await generateSummary(analysis); + + return { documentId, summary }; +} + +// PARENT — orchestrates many children, polls them, collects their output. +export async function processDocumentBatch(documentIds: string[]) { + "use workflow"; + + // Spawn in chunks. Each chunk is its own step → durable + retried. + const allRunIds: string[] = []; + for (let i = 0; i < documentIds.length; i += SPAWN_CHUNK_SIZE) { + const chunk = documentIds.slice(i, i + SPAWN_CHUNK_SIZE); + const runIds = await spawnChunk(chunk); + allRunIds.push(...runIds); + } + + // Poll until every child has reached a terminal status. + await pollUntilComplete(allRunIds); + + // Collect return values from each child. + const results = await collectResults(allRunIds); + + return { processed: results.length, results }; +} + +// Polling loop — lives inside the workflow so sleeps replay durably. +async function pollUntilComplete(runIds: string[]): Promise { + for (let iteration = 0; iteration < MAX_POLL_ITERATIONS; iteration++) { + const status = await checkStatuses(runIds); + + if (status.running === 0) { + if (status.failed > 0) { + throw new Error( + \`\${status.failed} of \${runIds.length} children failed\`, + ); + } + return; + } + + await sleep(POLL_INTERVAL); + } + + throw new Error("Timed out waiting for children to complete"); +} + +// start() must be called from a step, not from a workflow function. +// deploymentId: "latest" makes children pick up future deployments. +async function spawnChunk(documentIds: string[]): Promise { + "use step"; + + const runIds: string[] = []; + for (const docId of documentIds) { + const run = await start(processDocument, [docId], { deploymentId: "latest" }); + runIds.push(run.runId); + } + return runIds; +} + +// getRun() also must be called from a step. +async function checkStatuses( + runIds: string[], +): Promise<{ running: number; completed: number; failed: number }> { + "use step"; + + let running = 0; + let completed = 0; + let failed = 0; + + for (const runId of runIds) { + const status = await getRun(runId).status; + if (status === "completed") completed++; + else if (status === "failed" || status === "cancelled") failed++; + else running++; + } + + return { running, completed, failed }; +} + +async function collectResults( + runIds: string[], +): Promise> { + "use step"; + + const results: Array<{ documentId: string; summary: string }> = []; + for (const runId of runIds) { + const value = (await getRun(runId).returnValue) as { + documentId: string; + summary: string; + }; + results.push(value); + } + return results; +} + +// Replace the step bodies below with your real per-document work. +async function fetchDocument(documentId: string): Promise { + "use step"; + const res = await fetch(\`https://docs.example.com/api/\${documentId}\`); + return res.text(); +} + +async function analyzeContent(content: string): Promise { + "use step"; + return \`analysis of \${content.length} chars\`; +} + +async function generateSummary(analysis: string): Promise { + "use step"; + return \`Summary: \${analysis}\`; +} +`; + +export const childWorkflowsStartRouteSource = `import { start } from "workflow/api"; +import { NextResponse } from "next/server"; +import { processDocumentBatch } from "@/workflows/child-workflows"; + +// POST /api/child-workflows { documentIds: string[] } +export async function POST(request: Request) { + const { documentIds } = (await request.json()) as { documentIds: string[] }; + + if (!Array.isArray(documentIds) || documentIds.length === 0) { + return NextResponse.json( + { error: "documentIds must be a non-empty array" }, + { status: 400 }, + ); + } + + const run = await start(processDocumentBatch, [documentIds]); + return NextResponse.json({ runId: run.runId }); +} +`; diff --git a/docs/lib/registry/snippets/distributed-abort-controller.ts b/docs/lib/registry/snippets/distributed-abort-controller.ts new file mode 100644 index 0000000000..30a3d75e36 --- /dev/null +++ b/docs/lib/registry/snippets/distributed-abort-controller.ts @@ -0,0 +1,266 @@ +/** + * Source snippets for the Distributed Abort Controller registry entry. + * + * AbortController-shaped API backed by a durable workflow — calling .abort() + * on one machine fires the .signal AbortSignal on any other machine that + * created a controller with the same semantic ID. TTL auto-expires stale + * controllers; grace period keeps the hook alive for late subscribers. + * + * Ships: + * - lib/distributed-abort-controller.ts — workflow + class + * - app/api/abort/[id]/route.ts — remote abort endpoint + * - components/cancel-button.tsx — drop-in client cancel button + */ + +export const distributedAbortControllerLibSource = `import { defineHook, getWritable, sleep } from "workflow"; +import { start, getRun, getHookByToken } from "workflow/api"; + +const DEFAULT_TTL_MS = 24 * 60 * 60 * 1000; // 24h +const DEFAULT_GRACE_MS = 60 * 60 * 1000; // 1h grace for late subscribers + +export const abortHook = defineHook<{ reason?: string }>(); + +export type AbortMessage = { + type: "abort"; + reason?: string; + expired?: boolean; +}; + +function getAbortToken(id: string): string { + return \`abort:\${id}\`; +} + +async function writeAbortSignal(reason?: string, expired?: boolean) { + "use step"; + const writable = getWritable(); + const writer = writable.getWriter(); + try { + await writer.write({ type: "abort", reason, expired }); + } finally { + writer.releaseLock(); + } + await writable.close(); +} + +// Coordination workflow — races a manual abort against TTL expiration, +// writes the result to the run's stream, then sleeps through the grace +// period (only on TTL expiry) so late subscribers can still observe it. +export async function abortControllerWorkflow( + id: string, + ttlMs: number, + graceMs: number, +) { + "use workflow"; + + const startTime = Date.now(); + const hook = abortHook.create({ token: getAbortToken(id) }); + + const result = await Promise.race([ + hook.then((payload) => ({ + reason: payload.reason, + expired: false, + })), + sleep(\`\${ttlMs}ms\`).then(() => ({ + reason: "Controller expired", + expired: true, + })), + ]); + + await writeAbortSignal(result.reason, result.expired); + + if (result.expired) { + const elapsed = Date.now() - startTime; + const remainingTime = graceMs - (elapsed - ttlMs); + if (remainingTime > 0) { + await sleep(\`\${remainingTime}ms\`); + } + } + + return { aborted: true, reason: result.reason, expired: result.expired }; +} + +/** + * AbortController-shaped API on top of a durable workflow. + * Calling \`.abort()\` on any process triggers \`.signal\` on any other + * process that created a controller with the same ID. + */ +export class DistributedAbortController { + private id: string; + readonly runId: string; + + private constructor(id: string, runId: string) { + this.id = id; + this.runId = runId; + } + + /** + * Create or reconnect by semantic ID. If a controller with this ID + * already exists, returns a handle to it; otherwise spawns a new + * coordination workflow. + */ + static async create( + id: string, + options: { ttlMs?: number; graceMs?: number } = {}, + ): Promise { + const { ttlMs = DEFAULT_TTL_MS, graceMs = DEFAULT_GRACE_MS } = options; + const token = getAbortToken(id); + + const existingHook = await getHookByToken(token).catch(() => null); + if (existingHook) { + return new DistributedAbortController(id, existingHook.runId); + } + + const run = await start(abortControllerWorkflow, [id, ttlMs, graceMs]); + return new DistributedAbortController(id, run.runId); + } + + /** + * Trigger the abort signal. Idempotent — safe to call multiple times or + * after the workflow has completed. + */ + async abort(reason?: string): Promise { + try { + await abortHook.resume(getAbortToken(this.id), { reason }); + } catch (error) { + const msg = error instanceof Error ? error.message.toLowerCase() : ""; + if (msg.includes("not found") || msg.includes("expired")) { + return; + } + throw error; + } + } + + /** + * AbortSignal that fires when \`abort()\` is called or TTL expires. Each + * access to \`.signal\` creates a fresh listener — cache the value if you + * subscribe more than once. + */ + get signal(): AbortSignal { + const run = getRun<{ aborted: boolean; reason?: string; expired?: boolean }>( + this.runId, + ); + const controller = new AbortController(); + const readable = run.getReadable(); + + (async () => { + const reader = readable.getReader(); + try { + while (true) { + const { done, value } = await reader.read(); + if (done) break; + if (value.type === "abort") { + const reason = value.expired + ? \`\${value.reason} (expired)\` + : value.reason; + controller.abort(reason); + break; + } + } + } catch (error) { + if (!controller.signal.aborted) { + controller.abort( + error instanceof Error ? error.message : "Stream read failed", + ); + } + } finally { + reader.releaseLock(); + } + })(); + + return controller.signal; + } +} +`; + +export const distributedAbortControllerRouteSource = `import { NextResponse } from "next/server"; +import { DistributedAbortController } from "@/lib/distributed-abort-controller"; + +// POST /api/abort/[id] { reason? } +// Idempotent — triggering abort twice or after expiry is a no-op. +export async function POST( + request: Request, + { params }: { params: Promise<{ id: string }> }, +) { + const { id } = await params; + const { reason } = (await request + .json() + .catch(() => ({ reason: undefined }))) as { reason?: string }; + + const controller = await DistributedAbortController.create(id); + await controller.abort(reason ?? "Cancelled via API"); + + return NextResponse.json({ success: true, id }); +} +`; + +export const distributedAbortControllerButtonSource = `"use client"; + +import { useState } from "react"; + +interface CancelButtonProps { + /** Same semantic ID used to create the controller on the server. */ + taskId: string; + /** Optional label override. */ + label?: string; +} + +export function CancelButton({ taskId, label = "Cancel" }: CancelButtonProps) { + const [pending, setPending] = useState(false); + const [done, setDone] = useState(false); + + const handleCancel = async () => { + setPending(true); + try { + await fetch(\`/api/abort/\${encodeURIComponent(taskId)}\`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ reason: "User clicked cancel" }), + }); + setDone(true); + } finally { + setPending(false); + } + }; + + return ( + + ); +} +`; + +export const distributedAbortControllerUsageSource = `// Server-side example: cancel a long-running fetch when the user clicks +// the cancel button on a different machine / tab. +import { DistributedAbortController } from "@/lib/distributed-abort-controller"; + +export async function runLongOperation(taskId: string) { + const controller = await DistributedAbortController.create(taskId, { + // Optional: shorter TTL for quick tasks. + ttlMs: 10 * 60 * 1000, // 10 minutes + }); + + try { + const res = await fetch("https://api.example.com/long-operation", { + signal: controller.signal, + }); + return await res.json(); + } catch (err) { + if (err instanceof DOMException && err.name === "AbortError") { + return { aborted: true, reason: controller.signal.reason }; + } + throw err; + } +} + +// Cross-process: any other process can cancel by recreating the controller +// with the same semantic ID — no run ID sharing needed. +// +// const same = await DistributedAbortController.create(taskId); +// await same.abort("Cancelled by admin"); +`; diff --git a/docs/lib/registry/snippets/durable-agent.ts b/docs/lib/registry/snippets/durable-agent.ts new file mode 100644 index 0000000000..5dc276b951 --- /dev/null +++ b/docs/lib/registry/snippets/durable-agent.ts @@ -0,0 +1,191 @@ +/** + * Source snippets for the Durable Agent registry entry. + * + * The foundational AI agent pattern on Workflow: a `DurableAgent` whose tools + * are `"use step"` functions, streamed to the client via `getWritable()`. If + * the process crashes mid-tool-call, the agent resumes from the last completed + * step on replay — every retry, replay, and reconnect is handled by the + * runtime, no extra bookkeeping in your code. + * + * The example uses a flight booking agent because it's the simplest case that + * exercises every aspect of the pattern (multi-tool, multi-turn, side-effecty + * external API calls). Replace the tools with your own — the surrounding + * shape stays identical. + * + * Note on escaping: template literal placeholders inside the snippet (e.g. + * `${runId}`) are escaped as `\${...}` so they stay literal here. + */ + +export const durableAgentWorkflowSource = `import { DurableAgent } from "@workflow/ai/agent"; +import { getWritable } from "workflow"; +import { z } from "zod"; +import type { ModelMessage, UIMessageChunk } from "ai"; + +// Each tool is a regular async function with \`"use step"\` at the top. +// That single directive turns it into a durable step: +// - automatic retries on failure (3x by default) +// - one entry per call in the workflow event log +// - full Node.js access (fetch, fs, child_process, native modules, …) +// - re-entrant: replays return the recorded result instead of re-running +async function searchFlights({ from, to, date }: { + from: string; + to: string; + date: string; +}) { + "use step"; + const res = await fetch( + \`https://api.example.com/flights?from=\${from}&to=\${to}&date=\${date}\`, + ); + if (!res.ok) throw new Error(\`Search failed: \${res.status}\`); + return res.json(); +} + +async function bookFlight({ flightId, passenger }: { + flightId: string; + passenger: string; +}) { + "use step"; + const res = await fetch("https://api.example.com/bookings", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ flightId, passenger }), + }); + if (!res.ok) throw new Error(\`Booking failed: \${res.status}\`); + return res.json(); +} + +async function checkWeather({ city }: { city: string }) { + "use step"; + const res = await fetch( + \`https://api.weather.com/forecast?city=\${city}\`, + ); + return res.json(); +} + +// The agent itself is a workflow. \`"use workflow"\` declares it as the +// orchestrator — its execution is replay-safe and persisted to the event log. +// Each \`agent.stream()\` call drives the LLM loop; tools fire as steps. +export async function flightAgent(messages: ModelMessage[]) { + "use workflow"; + + const agent = new DurableAgent({ + // Any AI Gateway model string works — swap providers without touching + // the durability layer. + model: "anthropic/claude-haiku-4.5", + instructions: "You are a helpful flight booking assistant.", + tools: { + searchFlights: { + description: "Search for available flights between two airports.", + inputSchema: z.object({ + from: z.string().describe("Departure airport code"), + to: z.string().describe("Arrival airport code"), + date: z.string().describe("Travel date (YYYY-MM-DD)"), + }), + execute: searchFlights, + }, + bookFlight: { + description: "Book a specific flight for a passenger.", + inputSchema: z.object({ + flightId: z.string().describe("Flight ID from search results"), + passenger: z.string().describe("Passenger full name"), + }), + execute: bookFlight, + }, + checkWeather: { + description: "Check the weather forecast for a city.", + inputSchema: z.object({ + city: z.string().describe("City name"), + }), + execute: checkWeather, + }, + }, + }); + + // \`getWritable()\` streams text chunks, tool calls, and tool + // results to the client in real time via \`createUIMessageStreamResponse\`. + // \`maxSteps\` caps the LLM loop so a runaway tool-calling agent can't burn + // through your budget — tune for your use case. + const result = await agent.stream({ + messages, + writable: getWritable(), + maxSteps: 10, + }); + + // Return the final messages so multi-turn callers can pass them back in. + return { messages: result.messages }; +} +`; + +export const durableAgentStartRouteSource = `import type { UIMessage } from "ai"; +import { convertToModelMessages, createUIMessageStreamResponse } from "ai"; +import { start } from "workflow/api"; +import { flightAgent } from "@/workflows/flight-agent"; + +export async function POST(req: Request) { + const { messages }: { messages: UIMessage[] } = await req.json(); + // The client sends \`UIMessage\`s; the agent works on \`ModelMessage\`s. This + // converts in-place — no information is lost. + const modelMessages = await convertToModelMessages(messages); + + // \`start()\` kicks off a new workflow run and returns a readable stream of + // UI message chunks plus the run ID. The client should keep that ID around + // so it can reconnect (\`useChat()\`'s WorkflowChatTransport handles this + // automatically). + const run = await start(flightAgent, [modelMessages]); + + return createUIMessageStreamResponse({ + stream: run.readable, + headers: { "x-workflow-run-id": run.runId }, + }); +} +`; + +export const durableAgentClientSource = `"use client"; + +import { useChat } from "@ai-sdk/react"; +import { WorkflowChatTransport } from "@workflow/ai"; + +/** + * Minimal flight-agent chat UI. \`WorkflowChatTransport\` forwards the + * \`x-workflow-run-id\` header between turns so multi-turn conversations land + * on the same workflow run — and reconnect cleanly across page refreshes. + */ +export function FlightAgentChat() { + const { messages, sendMessage, status } = useChat({ + transport: new WorkflowChatTransport({ api: "/api/flight-agent" }), + }); + + return ( +
+
+ {messages.map((message) => ( +
+ {message.role === "user" ? "You" : "Agent"}:{" "} + {message.parts?.map((part, i) => + part.type === "text" ? {part.text} : null, + )} +
+ ))} +
+
{ + e.preventDefault(); + const input = (e.currentTarget.elements.namedItem( + "message", + ) as HTMLInputElement).value; + if (!input) return; + (e.currentTarget as HTMLFormElement).reset(); + await sendMessage({ text: input }); + }} + > + +
+
+ ); +} +`; diff --git a/docs/lib/registry/snippets/human-in-the-loop.ts b/docs/lib/registry/snippets/human-in-the-loop.ts new file mode 100644 index 0000000000..26bf42ad1f --- /dev/null +++ b/docs/lib/registry/snippets/human-in-the-loop.ts @@ -0,0 +1,318 @@ +/** + * Source snippets for the Human-in-the-Loop registry entry. + * + * Drop-in pattern for pausing a `DurableAgent` until a human approves a + * consequential action (booking, payment, irreversible delete, …) and then + * resuming with the decision. Built on `defineHook()` keyed by the tool call + * ID, with a custom data part streamed to the client so it can render + * approval controls before the workflow suspends. + * + * Note on escaping: template literal placeholders inside the snippet (e.g. + * `${runId}`) are escaped as `\${...}` so they stay literal here. + */ + +export const humanInTheLoopWorkflowSource = `import { DurableAgent } from "@workflow/ai/agent"; +import { defineHook, getWritable, sleep } from "workflow"; +import { z } from "zod"; +import type { ModelMessage, UIMessageChunk } from "ai"; + +// Hook keyed by the tool call ID — exported so the approval API route +// can resume it with the human's decision. +export const approvalHook = defineHook({ + schema: z.object({ + approved: z.boolean(), + comment: z.string().optional(), + }), +}); + +// Example tool that requires approval before it does anything irreversible. +// Replace the body with your real side effect (charge card, publish post, +// delete record, etc.). +async function performAction({ summary }: { summary: string }) { + "use step"; + console.log("Performing approved action:", summary); + return { ok: true, summary }; +} + +// Stream a custom data part BEFORE suspending so the client can render +// approval controls. Tool invocations don't stream until the tool returns, +// so without this the UI would have no way to show buttons. +async function emitApprovalRequest(details: { + toolCallId: string; + summary: string; + payload: Record; +}) { + "use step"; + const writer = getWritable().getWriter(); + try { + await writer.write({ + type: "data-approval-needed", + id: details.toolCallId, + data: details, + } as UIMessageChunk); + } finally { + writer.releaseLock(); + } +} + +// Stream the resolution so the client can update the approval card. +async function emitApprovalResolved(details: { + toolCallId: string; + result: string; +}) { + "use step"; + const writer = getWritable().getWriter(); + try { + await writer.write({ + type: "data-approval-resolved", + id: details.toolCallId, + data: details, + } as UIMessageChunk); + } finally { + writer.releaseLock(); + } +} + +// The approval tool. NOTE: no \`"use step"\` here — it uses workflow-level +// primitives (\`defineHook().create()\`, \`Promise.race\`, \`sleep()\`) and must +// run in the workflow context. Steps are called from within for the I/O. +async function requestApproval( + { summary, payload }: { + summary: string; + payload: Record; + }, + { toolCallId }: { toolCallId: string }, +) { + // 1. Emit the approval request to the client BEFORE suspending. + await emitApprovalRequest({ toolCallId, summary, payload }); + + // 2. Suspend on the hook, with a durable timeout fallback. + const hook = approvalHook.create({ token: toolCallId }); + const result = await Promise.race([ + hook.then((p) => ({ type: "decision" as const, ...p })), + sleep("24h").then(() => ({ + type: "timeout" as const, + approved: false as const, + })), + ]); + + // 3. Resolve based on the outcome. + if (result.type === "timeout") { + const msg = "Approval request expired."; + await emitApprovalResolved({ toolCallId, result: msg }); + return msg; + } + if (!result.approved) { + const msg = \`Rejected: \${result.comment || "No reason given"}\`; + await emitApprovalResolved({ toolCallId, result: msg }); + return msg; + } + + const action = await performAction({ summary }); + const msg = \`Approved and executed: \${action.summary}\`; + await emitApprovalResolved({ toolCallId, result: msg }); + return msg; +} + +export async function approvalAgent(messages: ModelMessage[]) { + "use workflow"; + + const agent = new DurableAgent({ + model: "anthropic/claude-haiku-4.5", + instructions: + "You are a careful assistant. ALWAYS call requestApproval before performing any consequential action.", + tools: { + requestApproval: { + description: + "Request human approval before performing a consequential action.", + inputSchema: z.object({ + summary: z.string().describe("Short description of the action."), + payload: z + .record(z.string(), z.unknown()) + .describe( + "Structured details rendered on the approval card — e.g. amount, recipient, etc.", + ), + }), + execute: requestApproval, + }, + }, + }); + + const result = await agent.stream({ + messages, + writable: getWritable(), + maxSteps: 15, + }); + + return { messages: result.messages }; +} +`; + +export const humanInTheLoopStartRouteSource = `import type { UIMessage } from "ai"; +import { convertToModelMessages, createUIMessageStreamResponse } from "ai"; +import { start } from "workflow/api"; +import { approvalAgent } from "@/workflows/approval-agent"; + +export async function POST(req: Request) { + const { messages }: { messages: UIMessage[] } = await req.json(); + const modelMessages = await convertToModelMessages(messages); + + const run = await start(approvalAgent, [modelMessages]); + + return createUIMessageStreamResponse({ + stream: run.readable, + headers: { "x-workflow-run-id": run.runId }, + }); +} +`; + +export const humanInTheLoopRouteSource = `import { NextResponse } from "next/server"; +import { approvalHook } from "@/workflows/approval-agent"; + +export async function POST(req: Request) { + const { toolCallId, approved, comment } = (await req.json()) as { + toolCallId: string; + approved: boolean; + comment?: string; + }; + + if (!toolCallId || typeof approved !== "boolean") { + return NextResponse.json( + { error: "toolCallId and approved are required" }, + { status: 400 }, + ); + } + + try { + await approvalHook.resume(toolCallId, { approved, comment }); + } catch (error) { + const msg = error instanceof Error ? error.message.toLowerCase() : ""; + if (msg.includes("not found") || msg.includes("expired")) { + return NextResponse.json( + { success: true, note: "No active approval for that toolCallId." }, + ); + } + throw error; + } + + return NextResponse.json({ success: true }); +} +`; + +export const humanInTheLoopCardSource = `"use client"; + +import type { UIMessage } from "ai"; + +interface ApprovalNeededPart { + type: "data-approval-needed"; + id: string; + data: { + toolCallId: string; + summary: string; + payload: Record; + }; +} + +interface ApprovalResolvedPart { + type: "data-approval-resolved"; + id: string; + data: { toolCallId: string; result: string }; +} + +interface ApprovalCardProps { + /** The \`data-approval-needed\` part from the message stream. */ + part: ApprovalNeededPart; + /** All messages in the conversation, used to detect resolution. */ + messages: UIMessage[]; + /** Endpoint that resumes the approval hook. */ + endpoint?: string; +} + +export function ApprovalCard({ + part, + messages, + endpoint = "/api/approval", +}: ApprovalCardProps) { + const { toolCallId, summary, payload } = part.data; + + // If we already streamed a resolution for this toolCallId, render it. + const resolved = messages + .flatMap((m) => m.parts ?? []) + .find( + (p): p is ApprovalResolvedPart => + p.type === "data-approval-resolved" && + (p as ApprovalResolvedPart).data.toolCallId === toolCallId, + ); + + if (resolved) { + return ( +
+ {resolved.data.result} +
+ ); + } + + const respond = async (approved: boolean, comment?: string) => { + await fetch(endpoint, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ toolCallId, approved, comment }), + }); + }; + + return ( +
+
{summary}
+
+        {JSON.stringify(payload, null, 2)}
+      
+
+ + +
+
+ ); +} +`; + +export const humanInTheLoopUsageSource = `// In your chat client (\`useChat()\`-based), render the ApprovalCard for any +// \`data-approval-needed\` part and hide the underlying tool invocation: +import { ApprovalCard } from "@/components/approval-card"; + +function MessageParts({ message, messages }) { + return message.parts?.map((part, i) => { + if (part.type === "data-approval-needed") { + return ( + + ); + } + // The approval tool itself doesn't have a useful UI representation — + // the card handles it. + if ( + part.type === "tool-invocation" && + part.toolInvocation.toolName === "requestApproval" + ) { + return null; + } + if (part.type === "text") return

{part.text}

; + return null; + }); +} +`; diff --git a/docs/lib/registry/snippets/idempotency.ts b/docs/lib/registry/snippets/idempotency.ts new file mode 100644 index 0000000000..76596a9e78 --- /dev/null +++ b/docs/lib/registry/snippets/idempotency.ts @@ -0,0 +1,86 @@ +/** + * Source snippets for the Idempotency registry entry. + * + * Pass the deterministic `stepId` from getStepMetadata() as the + * Idempotency-Key header to non-idempotent external APIs (Stripe, etc.) so + * retries and replays never duplicate side effects. Stripe-shaped example + * — same pattern works with any provider that supports idempotency keys. + */ + +export const idempotencyWorkflowSource = `import { getStepMetadata } from "workflow"; + +export async function chargeCustomer(customerId: string, amountCents: number) { + "use workflow"; + + const charge = await createCharge(customerId, amountCents); + await sendReceipt(customerId, charge.id); + + return { customerId, chargeId: charge.id, status: "completed" as const }; +} + +// stepId is deterministic across retries and replays — perfect idempotency key. +async function createCharge( + customerId: string, + amountCents: number, +): Promise<{ id: string; amount: number }> { + "use step"; + + const { stepId } = getStepMetadata(); + + const res = await fetch("https://api.stripe.com/v1/charges", { + method: "POST", + headers: { + Authorization: \`Bearer \${process.env.STRIPE_SECRET_KEY}\`, + "Content-Type": "application/x-www-form-urlencoded", + // Stripe dedupes on this — same stepId always returns the same charge. + "Idempotency-Key": stepId, + }, + body: new URLSearchParams({ + amount: String(amountCents), + currency: "usd", + customer: customerId, + }), + }); + + if (!res.ok) { + const error = await res.json().catch(() => ({ message: "unknown" })); + throw new Error(\`Charge failed: \${error.message ?? res.status}\`); + } + + return res.json(); +} + +async function sendReceipt(customerId: string, chargeId: string): Promise { + "use step"; + + const { stepId } = getStepMetadata(); + + await fetch("https://api.example.com/receipts", { + method: "POST", + headers: { + "Content-Type": "application/json", + "Idempotency-Key": stepId, + }, + body: JSON.stringify({ customerId, chargeId }), + }); +} +`; + +export const idempotencyStartRouteSource = `import { start } from "workflow/api"; +import { NextResponse } from "next/server"; +import { chargeCustomer } from "@/workflows/idempotency"; + +// POST /api/idempotency { customerId, amountCents } +export async function POST(request: Request) { + const { customerId, amountCents } = await request.json(); + if (!customerId || typeof amountCents !== "number") { + return NextResponse.json( + { error: "customerId and amountCents are required" }, + { status: 400 }, + ); + } + + const run = await start(chargeCustomer, [customerId, amountCents]); + return NextResponse.json({ runId: run.runId }); +} +`; diff --git a/docs/lib/registry/snippets/rate-limiting.ts b/docs/lib/registry/snippets/rate-limiting.ts new file mode 100644 index 0000000000..f22ce99055 --- /dev/null +++ b/docs/lib/registry/snippets/rate-limiting.ts @@ -0,0 +1,82 @@ +/** + * Source snippets for the Rate Limiting registry entry. + * + * Throw RetryableError with a Retry-After value and the workflow runtime + * reschedules the step automatically — no manual sleep-retry loops. Includes + * exponential-backoff variant via getStepMetadata(). + */ + +export const rateLimitingWorkflowSource = `import { RetryableError, getStepMetadata } from "workflow"; + +export async function syncContact(contactId: string) { + "use workflow"; + + const contact = await fetchFromCrm(contactId); + await upsertToWarehouse(contactId, contact); + + return { contactId, status: "synced" as const }; +} + +// 429 — read Retry-After and let the runtime reschedule us. +async function fetchFromCrm(contactId: string): Promise { + "use step"; + + const res = await fetch(\`https://crm.example.com/contacts/\${contactId}\`); + + if (res.status === 429) { + const retryAfter = res.headers.get("Retry-After"); + throw new RetryableError("Rate limited by CRM", { + retryAfter: retryAfter ? Number.parseInt(retryAfter, 10) * 1000 : "1m", + }); + } + + if (!res.ok) { + throw new Error(\`CRM returned \${res.status}\`); + } + + return res.json(); +} + +// 5xx + 429 — exponential backoff using the current attempt number. +async function upsertToWarehouse( + contactId: string, + contact: unknown, +): Promise { + "use step"; + + const { attempt } = getStepMetadata(); + const res = await fetch(\`https://warehouse.example.com/contacts/\${contactId}\`, { + method: "PUT", + body: JSON.stringify(contact), + }); + + if (res.status === 429 || res.status >= 500) { + throw new RetryableError(\`Warehouse error \${res.status}\`, { + retryAfter: attempt ** 2 * 1000, // 1s, 4s, 9s... + }); + } + + if (!res.ok) { + throw new Error(\`Warehouse returned \${res.status}\`); + } +} + +// Allow more retries than the default of 3 for known-flaky endpoints. +upsertToWarehouse.maxRetries = 10; +`; + +export const rateLimitingStartRouteSource = `import { start } from "workflow/api"; +import { NextResponse } from "next/server"; +import { syncContact } from "@/workflows/rate-limiting"; + +// POST /api/rate-limiting { contactId } +export async function POST(request: Request) { + const { contactId } = await request.json(); + if (!contactId) { + return NextResponse.json({ error: "contactId is required" }, { status: 400 }); + } + + const run = await start(syncContact, [contactId]); + return NextResponse.json({ runId: run.runId }); +} +`; diff --git a/docs/lib/registry/snippets/saga.ts b/docs/lib/registry/snippets/saga.ts new file mode 100644 index 0000000000..575173a57c --- /dev/null +++ b/docs/lib/registry/snippets/saga.ts @@ -0,0 +1,147 @@ +/** + * Source snippets for the Saga / Transactions & Rollbacks registry entry. + * + * Multi-step business transaction with automatic compensation on failure. + * Each forward step pushes an undo onto a stack; on FatalError the stack is + * unwound in LIFO order to restore consistency. Drop-in starter for + * "reserve → charge → provision → notify" style flows. + */ + +export const sagaWorkflowSource = `import { FatalError } from "workflow"; + +// Forward steps + matching compensations. Replace the API calls below +// with your real services. Compensations MUST be idempotent — they may be +// retried if the workflow restarts mid-rollback. +export async function subscriptionUpgradeSaga(accountId: string, seats: number) { + "use workflow"; + + // Each entry is { name, undo } so we can label what's being rolled back. + const compensations: Array<{ name: string; undo: () => Promise }> = []; + + try { + const reservationId = await reserveSeats(accountId, seats); + compensations.push({ + name: "Release seats", + undo: () => releaseSeats(accountId, reservationId), + }); + + const invoiceId = await captureInvoice(accountId, seats); + compensations.push({ + name: "Refund invoice", + undo: () => refundInvoice(accountId, invoiceId), + }); + + const entitlementId = await provisionSeats(accountId, seats); + compensations.push({ + name: "Deprovision seats", + undo: () => deprovisionSeats(accountId, entitlementId), + }); + + // Fire-and-forget — notifications don't need a compensation. + await sendConfirmation(accountId, invoiceId, entitlementId); + + return { status: "completed" as const, accountId, invoiceId, entitlementId }; + } catch (error) { + // Unwind in LIFO order. Each undo is itself a step → durable + retried. + for (const comp of compensations.reverse()) { + await comp.undo(); + } + return { + status: "rolled_back" as const, + accountId, + reason: error instanceof Error ? error.message : "Unknown error", + }; + } +} + +// Forward steps — throw FatalError on permanent failure to skip retries +// and trigger compensation immediately. +async function reserveSeats(accountId: string, seats: number): Promise { + "use step"; + const res = await fetch("https://api.example.com/seats/reserve", { + method: "POST", + body: JSON.stringify({ accountId, seats }), + }); + if (!res.ok) throw new FatalError("Seat reservation failed"); + const { reservationId } = await res.json(); + return reservationId; +} + +async function captureInvoice(accountId: string, seats: number): Promise { + "use step"; + const res = await fetch("https://api.example.com/invoices", { + method: "POST", + body: JSON.stringify({ accountId, seats }), + }); + if (!res.ok) throw new FatalError("Invoice capture failed"); + const { invoiceId } = await res.json(); + return invoiceId; +} + +async function provisionSeats(accountId: string, seats: number): Promise { + "use step"; + const res = await fetch("https://api.example.com/entitlements", { + method: "POST", + body: JSON.stringify({ accountId, seats }), + }); + if (!res.ok) throw new FatalError("Provisioning failed"); + const { entitlementId } = await res.json(); + return entitlementId; +} + +async function sendConfirmation( + accountId: string, + invoiceId: string, + entitlementId: string, +): Promise { + "use step"; + await fetch("https://api.example.com/notifications", { + method: "POST", + body: JSON.stringify({ accountId, invoiceId, entitlementId, template: "upgrade-complete" }), + }); +} + +// Compensation steps — idempotent. Safe to call again if retried. +async function releaseSeats(accountId: string, reservationId: string): Promise { + "use step"; + await fetch("https://api.example.com/seats/release", { + method: "POST", + body: JSON.stringify({ accountId, reservationId }), + }); +} + +async function refundInvoice(accountId: string, invoiceId: string): Promise { + "use step"; + await fetch(\`https://api.example.com/invoices/\${invoiceId}/refund\`, { + method: "POST", + body: JSON.stringify({ accountId }), + }); +} + +async function deprovisionSeats(accountId: string, entitlementId: string): Promise { + "use step"; + await fetch(\`https://api.example.com/entitlements/\${entitlementId}\`, { + method: "DELETE", + body: JSON.stringify({ accountId }), + }); +} +`; + +export const sagaStartRouteSource = `import { start } from "workflow/api"; +import { NextResponse } from "next/server"; +import { subscriptionUpgradeSaga } from "@/workflows/saga"; + +// POST /api/saga { accountId, seats } +export async function POST(request: Request) { + const { accountId, seats } = await request.json(); + if (!accountId || typeof seats !== "number") { + return NextResponse.json( + { error: "accountId and seats are required" }, + { status: 400 }, + ); + } + + const run = await start(subscriptionUpgradeSaga, [accountId, seats]); + return NextResponse.json({ runId: run.runId }); +} +`; diff --git a/docs/lib/registry/snippets/scheduling.ts b/docs/lib/registry/snippets/scheduling.ts new file mode 100644 index 0000000000..cf90262cf7 --- /dev/null +++ b/docs/lib/registry/snippets/scheduling.ts @@ -0,0 +1,109 @@ +/** + * Source snippets for the Scheduling registry entry. + * + * Schedule any future action minutes / hours / days / weeks ahead using + * durable sleep. Race the sleep against a defineHook() so external events + * (user converts, unsubscribes, snoozes) can cancel or reschedule the + * pending action without ever touching a database flag. Generic shape — + * customise the runAction step for emails, push notifications, webhooks, + * Slack messages, etc. + */ + +export const schedulingWorkflowSource = `import { defineHook, sleep } from "workflow"; + +// Hook fired by your app to cancel an in-flight scheduled action. +// Token format is up to you — we use \`schedule:\` here so the +// caller doesn't need to know the run ID. +export const cancelSchedule = defineHook<{ reason?: string }>(); + +export interface ScheduledAction { + id: string; + /** Duration string ("2d", "1h"), millis, or absolute Date. */ + delay: string | number | Date; + /** Action payload — passed straight to runAction. */ + payload: Record; +} + +export async function scheduleAction(action: ScheduledAction) { + "use workflow"; + + // Race the durable sleep against the cancel hook. Whoever resolves first + // wins — no manual flag-checking, no extra database tables. + const hook = cancelSchedule.create({ token: \`schedule:\${action.id}\` }); + const cancelled = await Promise.race([ + sleep(action.delay).then(() => false as const), + hook.then(() => true as const), + ]); + + if (cancelled) { + return { id: action.id, status: "cancelled" as const }; + } + + await runAction(action); + return { id: action.id, status: "executed" as const }; +} + +// Replace the body of this step with your real action — send an email, +// post to Slack, fire a webhook, write to your DB. The step has full +// Node.js access and is automatically retried on failure. +async function runAction(action: ScheduledAction): Promise { + "use step"; + await fetch("https://api.example.com/scheduled-action", { + method: "POST", + body: JSON.stringify(action), + }); +} +`; + +export const schedulingStartRouteSource = `import { start } from "workflow/api"; +import { NextResponse } from "next/server"; +import { scheduleAction, type ScheduledAction } from "@/workflows/scheduling"; + +// POST /api/scheduling { id, delay, payload } +export async function POST(request: Request) { + const action = (await request.json()) as ScheduledAction; + if (!action.id || action.delay === undefined) { + return NextResponse.json( + { error: "id and delay are required" }, + { status: 400 }, + ); + } + + const run = await start(scheduleAction, [action]); + return NextResponse.json({ runId: run.runId, scheduleId: action.id }); +} +`; + +export const schedulingCancelRouteSource = `import { NextResponse } from "next/server"; +import { cancelSchedule } from "@/workflows/scheduling"; + +// POST /api/scheduling/cancel { scheduleId, reason? } +// Idempotent: returns success even if the hook has already fired or expired. +export async function POST(request: Request) { + const { scheduleId, reason } = await request.json(); + if (!scheduleId) { + return NextResponse.json( + { error: "scheduleId is required" }, + { status: 400 }, + ); + } + + try { + await cancelSchedule.resume(\`schedule:\${scheduleId}\`, { + reason: reason ?? "Cancelled by user", + }); + } catch (error) { + const message = error instanceof Error ? error.message.toLowerCase() : ""; + if (message.includes("not found") || message.includes("expired")) { + return NextResponse.json({ + success: true, + scheduleId, + note: "No active schedule found (already executed or cancelled)", + }); + } + throw error; + } + + return NextResponse.json({ success: true, scheduleId }); +} +`; diff --git a/docs/lib/registry/snippets/sequential-and-parallel.ts b/docs/lib/registry/snippets/sequential-and-parallel.ts new file mode 100644 index 0000000000..061fe0ec5c --- /dev/null +++ b/docs/lib/registry/snippets/sequential-and-parallel.ts @@ -0,0 +1,108 @@ +/** + * Source snippets for the Sequential & Parallel registry entry. + * + * Three composition primitives in one file: sequential `await` for pipelines, + * `Promise.all` for fan-out, and `Promise.race` against `sleep()` for + * deadlines. Drop in and replace the placeholder steps with real work. + */ + +export const sequentialAndParallelWorkflowSource = `import { sleep } from "workflow"; + +// PIPELINE — sequential await chains dependent steps. +export async function dataPipeline(data: unknown) { + "use workflow"; + + const validated = await validateData(data); + const processed = await processData(validated); + const stored = await storeData(processed); + + return stored; +} + +// FAN-OUT — independent work runs in parallel via Promise.all. +export async function fetchUserData(userId: string) { + "use workflow"; + + const [user, orders, preferences] = await Promise.all([ + fetchUser(userId), + fetchOrders(userId), + fetchPreferences(userId), + ]); + + return { user, orders, preferences }; +} + +// RACE — return whichever resolves first; pair with sleep() for deadlines. +export async function firstResponse(userId: string) { + "use workflow"; + + const result = await Promise.race([ + fetchPrimary(userId), + fetchFallback(userId), + sleep("5s").then(() => ({ stale: true } as const)), + ]); + + return result; +} + +// Replace each step body with your real logic — all of Node.js is available. + +async function validateData(data: unknown): Promise { + "use step"; + if (typeof data !== "object" || data === null) { + throw new Error("Invalid input"); + } + return JSON.stringify(data); +} + +async function processData(data: string): Promise { + "use step"; + return data.trim(); +} + +async function storeData(data: string): Promise { + "use step"; + return \`stored:\${data.length}\`; +} + +async function fetchUser(userId: string): Promise<{ id: string; name: string }> { + "use step"; + return { id: userId, name: "Ada" }; +} + +async function fetchOrders(userId: string): Promise<{ id: string; items: number }[]> { + "use step"; + return [{ id: "o_1", items: 3 }]; +} + +async function fetchPreferences(userId: string): Promise<{ theme: string }> { + "use step"; + return { theme: "dark" }; +} + +async function fetchPrimary(userId: string): Promise<{ source: "primary"; userId: string }> { + "use step"; + return { source: "primary", userId }; +} + +async function fetchFallback(userId: string): Promise<{ source: "fallback"; userId: string }> { + "use step"; + return { source: "fallback", userId }; +} +`; + +export const sequentialAndParallelStartRouteSource = `import { start } from "workflow/api"; +import { NextResponse } from "next/server"; +import { fetchUserData } from "@/workflows/sequential-and-parallel"; + +// POST /api/sequential-and-parallel { userId } +export async function POST(request: Request) { + const { userId } = await request.json(); + if (!userId) { + return NextResponse.json({ error: "userId is required" }, { status: 400 }); + } + + const run = await start(fetchUserData, [userId]); + return NextResponse.json({ runId: run.runId }); +} +`; diff --git a/docs/lib/registry/snippets/timeouts.ts b/docs/lib/registry/snippets/timeouts.ts new file mode 100644 index 0000000000..4828feb501 --- /dev/null +++ b/docs/lib/registry/snippets/timeouts.ts @@ -0,0 +1,102 @@ +/** + * Source snippets for the Timeouts registry entry. + * + * Bound the time any step, hook, or webhook can take by racing it against + * a durable sleep. Discriminated sentinel values keep TypeScript narrow on + * both branches. Two flavors: hard timeout (throw) and soft timeout + * (fallback value). + */ + +export const timeoutsWorkflowSource = `import { sleep, createWebhook } from "workflow"; + +const TIMEOUT = Symbol("timeout"); + +// HARD TIMEOUT — throw if the work doesn't finish in time. +export async function processWithTimeout(data: string) { + "use workflow"; + + const result = await Promise.race([ + processData(data), + sleep("30s").then(() => TIMEOUT as typeof TIMEOUT), + ]); + + if (result === TIMEOUT) { + throw new Error("Processing timed out after 30 seconds"); + } + + return result; +} + +// SOFT TIMEOUT — fall back to a cached value if the deadline fires first. +export async function fetchWithFallback(key: string, fallback: string) { + "use workflow"; + + const result = await Promise.race([ + fetchSlow(key), + sleep("3s").then(() => TIMEOUT as typeof TIMEOUT), + ]); + + return result === TIMEOUT ? fallback : result; +} + +// WEBHOOK + DEADLINE — same pattern, racing an external callback against +// a 7-day sleep so workflows never hang forever on a missing event. +export async function waitForApproval(requestId: string) { + "use workflow"; + + const webhook = createWebhook<{ approved: boolean }>(); + await sendApprovalRequest(requestId, webhook.url); + + const result = await Promise.race([ + webhook.then((req) => req.json()), + sleep("7 days").then(() => ({ timedOut: true } as const)), + ]); + + if ("timedOut" in result) { + throw new Error("Approval request expired after 7 days"); + } + + return result.approved; +} + +async function processData(data: string): Promise { + "use step"; + // Replace with real work. Note: the LOSER of Promise.race keeps running + // — the workflow ignores its result, but side effects still happen. + // Use Distributed Abort Controller for hard cross-process cancellation. + return data.toUpperCase(); +} + +async function fetchSlow(key: string): Promise { + "use step"; + const res = await fetch(\`https://api.example.com/slow/\${key}\`); + return res.text(); +} + +async function sendApprovalRequest( + requestId: string, + webhookUrl: string, +): Promise { + "use step"; + await fetch("https://api.example.com/approvals", { + method: "POST", + body: JSON.stringify({ requestId, webhookUrl }), + }); +} +`; + +export const timeoutsStartRouteSource = `import { start } from "workflow/api"; +import { NextResponse } from "next/server"; +import { processWithTimeout } from "@/workflows/timeouts"; + +// POST /api/timeouts { data } +export async function POST(request: Request) { + const { data } = await request.json(); + if (typeof data !== "string") { + return NextResponse.json({ error: "data must be a string" }, { status: 400 }); + } + + const run = await start(processWithTimeout, [data]); + return NextResponse.json({ runId: run.runId }); +} +`; diff --git a/docs/lib/registry/snippets/webhooks.ts b/docs/lib/registry/snippets/webhooks.ts new file mode 100644 index 0000000000..47d663dfea --- /dev/null +++ b/docs/lib/registry/snippets/webhooks.ts @@ -0,0 +1,125 @@ +/** + * Source snippets for the Webhooks & External Callbacks registry entry. + * + * createWebhook() returns a URL the workflow can await. The workflow loops + * over incoming requests, processes each in a step, and responds inline. + * Bonus: async-request-reply variant that submits to a vendor and races + * the callback against a deadline. + */ + +export const webhooksWorkflowSource = `import { + createWebhook, + sleep, + FatalError, + type RequestWithResponse, +} from "workflow"; + +// PATTERN 1 — Long-running webhook listener (Stripe-style). +// Workflow suspends with zero cost, resumes on each incoming request, +// and exits when a terminal event arrives. +export async function paymentWebhook(orderId: string) { + "use workflow"; + + const webhook = createWebhook({ respondWith: "manual" }); + // webhook.url is the URL to register with the external service. + + const ledger: { type: string; at: string }[] = []; + + for await (const request of webhook) { + const entry = await processEvent(request); + ledger.push({ ...entry, at: new Date().toISOString() }); + + if (entry.type === "payment.succeeded" || entry.type === "refund.created") { + break; + } + } + + return { orderId, webhookUrl: webhook.url, ledger, status: "settled" as const }; +} + +// PATTERN 2 — Async request-reply with deadline. Submit to a vendor, +// pass it our webhook URL for the callback, race the callback against +// a 30-second budget. +export async function asyncVerification(documentId: string) { + "use workflow"; + + const webhook = createWebhook({ respondWith: "manual" }); + await submitToVendor(documentId, webhook.url); + + const result = await Promise.race([ + (async () => { + for await (const request of webhook) { + return await processCallback(request); + } + throw new FatalError("Webhook closed without callback"); + })(), + sleep("30s").then(() => ({ status: "timed_out" as const })), + ]); + + return { documentId, ...result }; +} + +async function processEvent( + request: RequestWithResponse, +): Promise<{ type: string }> { + "use step"; + const body = await request.json().catch(() => ({})); + const type = (body?.type as string) ?? "unknown"; + + if (type === "payment.succeeded") { + await request.respondWith(Response.json({ ack: true, action: "captured" })); + } else if (type === "payment.failed") { + await request.respondWith(Response.json({ ack: true, action: "flagged" })); + } else { + await request.respondWith(Response.json({ ack: true, action: "ignored" })); + } + + return { type }; +} + +async function submitToVendor( + documentId: string, + callbackUrl: string, +): Promise { + "use step"; + await fetch("https://vendor.example.com/verify", { + method: "POST", + body: JSON.stringify({ documentId, callbackUrl }), + }); +} + +async function processCallback( + request: RequestWithResponse, +): Promise<{ status: string; details: string }> { + "use step"; + const body = await request.json().catch(() => ({})); + await request.respondWith(Response.json({ ack: true })); + return { + status: body.approved ? "verified" : "rejected", + details: body.details ?? body.reason ?? "", + }; +} +`; + +export const webhooksStartRouteSource = `import { start, getRun } from "workflow/api"; +import { NextResponse } from "next/server"; +import { paymentWebhook } from "@/workflows/webhooks"; + +// POST /api/webhooks { orderId } +// Returns the auto-generated webhook URL — register it with the external service. +export async function POST(request: Request) { + const { orderId } = await request.json(); + if (!orderId) { + return NextResponse.json({ error: "orderId is required" }, { status: 400 }); + } + + const run = await start(paymentWebhook, [orderId]); + + // Read the workflow's return value once to surface webhook.url upstream. + // For long-lived webhooks, prefer streaming or a separate "/url/:runId" route. + return NextResponse.json({ + runId: run.runId, + note: "The workflow exposes webhook.url in its return value once settled.", + }); +} +`; diff --git a/docs/lib/registry/snippets/workflow-composition.ts b/docs/lib/registry/snippets/workflow-composition.ts new file mode 100644 index 0000000000..5f2afd4e86 --- /dev/null +++ b/docs/lib/registry/snippets/workflow-composition.ts @@ -0,0 +1,111 @@ +/** + * Source snippets for the Workflow Composition registry entry. + * + * Two ways to compose workflows: direct `await` flattens the child into the + * parent's event log; `start()` from inside a step spawns the child as an + * independent run with its own runId. + */ + +export const workflowCompositionWorkflowSource = `import { start } from "workflow/api"; + +// CHILD WORKFLOW — runs as part of the parent's event log when awaited. +export async function sendNotifications(userId: string) { + "use workflow"; + + await sendEmail(userId); + await sendPushNotification(userId); + return { notified: true }; +} + +// PARENT — direct await: flattens the child inline. +export async function onboardUser(userId: string) { + "use workflow"; + + await createAccount(userId); + await sendNotifications(userId); + await setupPreferences(userId); + + return { userId, status: "onboarded" }; +} + +// PARENT — background spawn: child runs independently with its own runId. +// Note: start() must be called from a step, not directly from a workflow. +export async function processOrder(orderId: string) { + "use workflow"; + + const order = await fulfillOrder(orderId); + const reportRunId = await triggerReport(orderId); + await sendConfirmation(orderId); + + return { orderId, order, reportRunId }; +} + +async function triggerReport(orderId: string): Promise { + "use step"; + // Spawn the child workflow on the latest deployment so future + // upgrades pick it up automatically. + const run = await start(generateReport, [orderId], { deploymentId: "latest" }); + return run.runId; +} + +// Background-spawnable child — runs independently when started. +export async function generateReport(reportId: string) { + "use workflow"; + await buildReport(reportId); + return { reportId, status: "ready" }; +} + +async function sendEmail(userId: string): Promise { + "use step"; + await fetch(\`https://api.example.com/email/\${userId}\`, { method: "POST" }); +} + +async function sendPushNotification(userId: string): Promise { + "use step"; + await fetch(\`https://api.example.com/push/\${userId}\`, { method: "POST" }); +} + +async function createAccount(userId: string): Promise { + "use step"; + await fetch("https://api.example.com/accounts", { + method: "POST", + body: JSON.stringify({ userId }), + }); +} + +async function setupPreferences(userId: string): Promise { + "use step"; + await fetch(\`https://api.example.com/preferences/\${userId}\`, { method: "PUT" }); +} + +async function fulfillOrder(orderId: string): Promise<{ id: string }> { + "use step"; + return { id: orderId }; +} + +async function sendConfirmation(orderId: string): Promise { + "use step"; + await fetch(\`https://api.example.com/orders/\${orderId}/confirm\`, { method: "POST" }); +} + +async function buildReport(reportId: string): Promise { + "use step"; + await fetch(\`https://api.example.com/reports/\${reportId}\`, { method: "POST" }); +} +`; + +export const workflowCompositionStartRouteSource = `import { start } from "workflow/api"; +import { NextResponse } from "next/server"; +import { onboardUser } from "@/workflows/workflow-composition"; + +// POST /api/workflow-composition { userId } +export async function POST(request: Request) { + const { userId } = await request.json(); + if (!userId) { + return NextResponse.json({ error: "userId is required" }, { status: 400 }); + } + + const run = await start(onboardUser, [userId]); + return NextResponse.json({ runId: run.runId }); +} +`; diff --git a/docs/lib/registry/types.ts b/docs/lib/registry/types.ts index 6d56bcb330..51e13efcab 100644 --- a/docs/lib/registry/types.ts +++ b/docs/lib/registry/types.ts @@ -14,8 +14,11 @@ */ export type RegistryCategory = + | 'provider' + | 'agent' | 'vercel' - | 'email' + | 'advanced' + | 'common' | 'storage' | 'ai' | 'auth' @@ -61,7 +64,25 @@ export interface RegistrySnippet { * 2. Register it in `components/registry/logos/index.ts`. * 3. Reference its key here. */ -export type RegistryLogoId = 'resend' | 'ai-sdk' | 'sandbox' | 'chat-sdk'; +export type RegistryLogoId = + | 'resend' + | 'ai-sdk' + | 'sandbox' + | 'chat-sdk' + | 'durable-agent' + | 'human-in-the-loop' + | 'agent-cancellation' + | 'sequential-and-parallel' + | 'workflow-composition' + | 'saga' + | 'batching' + | 'rate-limiting' + | 'scheduling' + | 'timeouts' + | 'idempotency' + | 'webhooks' + | 'child-workflows' + | 'distributed-abort-controller'; export interface RegistryItem { /** Slug used in the URL — `/registry/${id}`. */ @@ -76,8 +97,13 @@ export interface RegistryItem { longDescription?: string; /** Searchable tags rendered as small badges. */ tags: string[]; - /** Primary category — used to group items on the listing page. */ - category: RegistryCategory; + /** + * Categories the item belongs to. Items can live in more than one — e.g. AI + * SDK is both an `agent` pattern and a `vercel`-built integration. Each + * category renders as its own badge on the card and matches every relevant + * filter on the listing page. + */ + categories: RegistryCategory[]; /** Provider homepage / product page. */ homepage: string; /** Provider docs entry-point linked from the detail hero. */ From fd523080659b91c03db3d0d821e97714a2a5cbfb Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman Date: Sat, 2 May 2026 23:55:30 -0700 Subject: [PATCH 03/21] docs: rename registry to patterns, add shadcn /r routes, enrich install code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Rename /registry → /patterns across all URLs, nav, and internal links - Delete duplicate cookbook pages (all covered by patterns now) with redirects - Add /r and /r/[name] API routes serving shadcn-compatible registry JSON - Add installCode field to RegistrySnippet for richly-commented agent-friendly code - Add installCode exports for all 19 workflow patterns with full adaptation guides - Fix shadcnSlug values from @workflow-sdk/* to https://workflow-sdk.dev/r/* URLs - Exclude /r paths from i18n middleware so API routes resolve correctly - Add RegistryDetailToc, upgrading-workflows snippet, and logo components Co-authored-by: Cursor --- docs/app/[lang]/patterns/[id]/page.tsx | 876 +++++++++++ .../[lang]/{registry => patterns}/page.tsx | 8 +- docs/app/[lang]/registry/[id]/page.tsx | 169 --- docs/app/r/[name]/route.ts | 100 ++ docs/app/r/route.ts | 41 + docs/components/registry/RegistryCard.tsx | 2 +- docs/components/registry/RegistryCodeTabs.tsx | 104 +- .../registry/RegistryDetailHero.tsx | 48 +- .../components/registry/RegistryDetailToc.tsx | 102 ++ docs/components/registry/logos/index.tsx | 2 + .../logos/logo-upgrading-workflows.tsx | 40 + .../cookbook/advanced/child-workflows.mdx | 372 ----- .../advanced/distributed-abort-controller.mdx | 318 ---- .../agent-patterns/agent-cancellation.mdx | 205 --- .../cookbook/agent-patterns/durable-agent.mdx | 150 -- .../agent-patterns/human-in-the-loop.mdx | 255 ---- .../cookbook/common-patterns/batching.mdx | 105 -- .../cookbook/common-patterns/idempotency.mdx | 107 -- .../common-patterns/rate-limiting.mdx | 228 --- .../docs/cookbook/common-patterns/saga.mdx | 247 ---- .../cookbook/common-patterns/scheduling.mdx | 125 -- .../sequential-and-parallel.mdx | 155 -- .../cookbook/common-patterns/timeouts.mdx | 99 -- .../cookbook/common-patterns/webhooks.mdx | 185 --- .../common-patterns/workflow-composition.mdx | 118 -- docs/content/docs/cookbook/index.mdx | 38 - .../docs/cookbook/integrations/ai-sdk.mdx | 360 ----- .../docs/cookbook/integrations/chat-sdk.mdx | 303 ---- .../docs/cookbook/integrations/sandbox.mdx | 516 ------- docs/content/docs/cookbook/meta.json | 2 +- docs/geistdocs.tsx | 8 +- docs/lib/registry/manifest.ts | 1284 +++++++++++++++-- .../registry/snippets/agent-cancellation.ts | 189 +++ docs/lib/registry/snippets/ai-sdk.ts | 120 ++ docs/lib/registry/snippets/batching.ts | 91 ++ docs/lib/registry/snippets/chat-sdk.ts | 103 ++ docs/lib/registry/snippets/child-workflows.ts | 154 ++ .../snippets/distributed-abort-controller.ts | 185 +++ docs/lib/registry/snippets/durable-agent.ts | 124 ++ .../registry/snippets/human-in-the-loop.ts | 165 +++ docs/lib/registry/snippets/idempotency.ts | 87 ++ docs/lib/registry/snippets/rate-limiting.ts | 89 ++ docs/lib/registry/snippets/resend.ts | 115 ++ docs/lib/registry/snippets/saga.ts | 150 ++ docs/lib/registry/snippets/sandbox.ts | 275 ++++ docs/lib/registry/snippets/scheduling.ts | 70 + .../snippets/sequential-and-parallel.ts | 116 ++ docs/lib/registry/snippets/timeouts.ts | 110 ++ .../registry/snippets/upgrading-workflows.ts | 472 ++++++ docs/lib/registry/snippets/webhooks.ts | 173 ++- .../registry/snippets/workflow-composition.ts | 120 ++ docs/lib/registry/types.ts | 186 ++- docs/next.config.ts | 132 +- docs/proxy.ts | 4 +- 54 files changed, 5630 insertions(+), 4272 deletions(-) create mode 100644 docs/app/[lang]/patterns/[id]/page.tsx rename docs/app/[lang]/{registry => patterns}/page.tsx (93%) delete mode 100644 docs/app/[lang]/registry/[id]/page.tsx create mode 100644 docs/app/r/[name]/route.ts create mode 100644 docs/app/r/route.ts create mode 100644 docs/components/registry/RegistryDetailToc.tsx create mode 100644 docs/components/registry/logos/logo-upgrading-workflows.tsx delete mode 100644 docs/content/docs/cookbook/advanced/child-workflows.mdx delete mode 100644 docs/content/docs/cookbook/advanced/distributed-abort-controller.mdx delete mode 100644 docs/content/docs/cookbook/agent-patterns/agent-cancellation.mdx delete mode 100644 docs/content/docs/cookbook/agent-patterns/durable-agent.mdx delete mode 100644 docs/content/docs/cookbook/agent-patterns/human-in-the-loop.mdx delete mode 100644 docs/content/docs/cookbook/common-patterns/batching.mdx delete mode 100644 docs/content/docs/cookbook/common-patterns/idempotency.mdx delete mode 100644 docs/content/docs/cookbook/common-patterns/rate-limiting.mdx delete mode 100644 docs/content/docs/cookbook/common-patterns/saga.mdx delete mode 100644 docs/content/docs/cookbook/common-patterns/scheduling.mdx delete mode 100644 docs/content/docs/cookbook/common-patterns/sequential-and-parallel.mdx delete mode 100644 docs/content/docs/cookbook/common-patterns/timeouts.mdx delete mode 100644 docs/content/docs/cookbook/common-patterns/webhooks.mdx delete mode 100644 docs/content/docs/cookbook/common-patterns/workflow-composition.mdx delete mode 100644 docs/content/docs/cookbook/index.mdx delete mode 100644 docs/content/docs/cookbook/integrations/ai-sdk.mdx delete mode 100644 docs/content/docs/cookbook/integrations/chat-sdk.mdx delete mode 100644 docs/content/docs/cookbook/integrations/sandbox.mdx create mode 100644 docs/lib/registry/snippets/upgrading-workflows.ts diff --git a/docs/app/[lang]/patterns/[id]/page.tsx b/docs/app/[lang]/patterns/[id]/page.tsx new file mode 100644 index 0000000000..d1c179b0ba --- /dev/null +++ b/docs/app/[lang]/patterns/[id]/page.tsx @@ -0,0 +1,876 @@ +import { AlertTriangle, ExternalLink, Info, Lightbulb } from 'lucide-react'; +import type { Metadata } from 'next'; +import { notFound } from 'next/navigation'; +import { codeToHtml } from 'shiki'; +import { Mermaid } from '@/components/geistdocs/mermaid'; +import { RegistryCodeTabs } from '@/components/registry/RegistryCodeTabs'; +import { RegistryDetailHero } from '@/components/registry/RegistryDetailHero'; +import { + RegistryDetailToc, + type RegistryTocItem, +} from '@/components/registry/RegistryDetailToc'; +import { RegistryInstallTabs } from '@/components/registry/RegistryInstallTabs'; +import { getRegistryItem, getRegistryItemIds } from '@/lib/registry/manifest'; +import type { RegistryGuide, RegistrySnippet } from '@/lib/registry/types'; +import { cn } from '@/lib/utils'; + +interface PageProps { + params: Promise<{ id: string }>; +} + +export function generateStaticParams() { + return getRegistryItemIds().map((id) => ({ id })); +} + +export async function generateMetadata({ + params, +}: PageProps): Promise { + const { id } = await params; + const item = getRegistryItem(id); + if (!item) return { title: 'Registry item not found' }; + return { + title: `${item.name} | Workflow Registry`, + description: item.description, + }; +} + +async function renderSnippets(snippets: RegistrySnippet[]) { + return Promise.all( + snippets.map(async (snippet) => ({ + label: snippet.label, + caption: snippet.caption, + description: snippet.description, + code: snippet.code, + html: await codeToHtml(snippet.code, { + lang: snippet.lang, + themes: { + light: 'github-light-default', + dark: 'github-dark-default', + }, + defaultColor: false, + }), + })) + ); +} + +export default async function RegistryDetailPage({ params }: PageProps) { + const { id } = await params; + const item = getRegistryItem(id); + if (!item) notFound(); + + const [blocks, conceptBlocks] = await Promise.all([ + renderSnippets(item.snippets), + item.conceptSnippets + ? renderSnippets(item.conceptSnippets) + : Promise.resolve([]), + ]); + + const guide = item.guide; + const hasApproachSections = (guide?.approachSections?.length ?? 0) > 0; + + // Index conceptSnippets by label for O(1) lookup in approach sections + const conceptBlocksByLabel = new Map(conceptBlocks.map((b) => [b.label, b])); + + // Build the ToC items dynamically based on which sections exist + const tocItems: RegistryTocItem[] = []; + + const useFlatLayout = hasApproachSections || guide?.flatLayout; + + if (guide?.diagram) + tocItems.push({ + id: 'how-it-fits-together', + title: guide.diagramTitle ?? 'How it fits together', + depth: 2, + }); + if (guide?.whySection) + tocItems.push({ + id: 'why-use-this', + title: guide.whySection.title ?? 'Why use this', + depth: 2, + }); + + if (useFlatLayout) { + // Flat structure: When to use + Choosing an approach as top-level h2s + if (guide?.whenToUse) + tocItems.push({ id: 'when-to-use', title: 'When to use this', depth: 2 }); + if (guide?.approaches) + tocItems.push({ + id: 'choosing-an-approach', + title: guide.approaches.title ?? 'Choosing an approach', + depth: 2, + }); + if (hasApproachSections) { + // Per-approach sections (e.g. agent-cancellation) + for (const section of guide!.approachSections!) { + tocItems.push({ + id: slugify(section.title), + title: section.title, + depth: 2, + }); + } + } else { + // flatLayout without per-approach sections: Installation + Source follow normally + tocItems.push({ id: 'installation', title: 'Installation', depth: 2 }); + if (conceptBlocks.length > 0) + tocItems.push({ id: 'concept', title: 'Concept', depth: 2 }); + tocItems.push({ id: 'source', title: 'Source', depth: 2 }); + } + } else { + // Umbrella Overview for items without flat layout + if (guide && (guide.overview || guide.whenToUse || guide.approaches)) { + tocItems.push({ id: 'overview', title: 'Overview', depth: 2 }); + if (guide.whenToUse) + tocItems.push({ + id: 'when-to-use', + title: 'When to use this', + depth: 3, + }); + if (guide.approaches) + tocItems.push({ + id: 'choosing-an-approach', + title: 'Choosing an approach', + depth: 3, + }); + } + tocItems.push({ id: 'installation', title: 'Installation', depth: 2 }); + if (conceptBlocks.length > 0) + tocItems.push({ id: 'concept', title: 'Concept', depth: 2 }); + tocItems.push({ id: 'source', title: 'Source', depth: 2 }); + } + + if (guide?.howItWorks) + tocItems.push({ id: 'how-it-works', title: 'How it works', depth: 2 }); + if (guide?.adapting && guide.adapting.length > 0) + tocItems.push({ + id: 'adapting-this', + title: guide.adaptingTitle ?? 'Adapting this', + depth: 2, + }); + if (guide?.keyApis && guide.keyApis.length > 0) + tocItems.push({ id: 'key-apis', title: 'Key APIs', depth: 2 }); + + // Plain-text summary for "Copy page" + const pageTextSections: string[] = [ + `# ${item.name}`, + item.description, + item.longDescription ?? '', + ...(guide?.introBullets ?? []).map((b) => `- ${b}`), + ]; + + if (guide?.whenToUse && guide.whenToUse.length > 0) { + pageTextSections.push('## When to use this'); + guide.whenToUse.forEach((t) => pageTextSections.push(`- ${t}`)); + } + + if (guide?.whySection) { + pageTextSections.push(`## ${guide.whySection.title ?? 'Why'}`); + if (guide.whySection.problem) + pageTextSections.push(guide.whySection.problem); + if (guide.whySection.solution) + pageTextSections.push(guide.whySection.solution); + (guide.whySection.bullets ?? []).forEach((b) => + pageTextSections.push(`- ${b}`) + ); + } + + if (guide?.approaches) { + pageTextSections.push( + `## ${guide.approaches.title ?? 'Choosing an approach'}` + ); + if (guide.approaches.description) + pageTextSections.push(guide.approaches.description); + (guide.approaches.bullets ?? []).forEach((b) => + pageTextSections.push(`- ${b}`) + ); + if (guide.approaches.columns && guide.approaches.rows) { + pageTextSections.push('| ' + guide.approaches.columns.join(' | ') + ' |'); + pageTextSections.push( + '| ' + guide.approaches.columns.map(() => '---').join(' | ') + ' |' + ); + guide.approaches.rows.forEach((row) => + pageTextSections.push(`| ${row.aspect} | ${row.values.join(' | ')} |`) + ); + } + if (guide.approaches.closing) + pageTextSections.push(guide.approaches.closing); + } + + if (guide?.sourceDescription) { + pageTextSections.push('## Source'); + pageTextSections.push(guide.sourceDescription); + } + + if (guide?.howItWorks && guide.howItWorks.length > 0) { + pageTextSections.push('## How it works'); + guide.howItWorks.forEach((s, i) => pageTextSections.push(`${i + 1}. ${s}`)); + if (guide.howItWorksClosing) pageTextSections.push(guide.howItWorksClosing); + } + + if (guide?.adapting && guide.adapting.length > 0) { + pageTextSections.push(`## ${guide.adaptingTitle ?? 'Adapting this'}`); + if (guide.adaptingIntro) pageTextSections.push(guide.adaptingIntro); + guide.adapting.forEach((t) => pageTextSections.push(`- ${t}`)); + } + + if (guide?.keyApis && guide.keyApis.length > 0) { + pageTextSections.push('## Key APIs'); + guide.keyApis.forEach((api) => + pageTextSections.push(`- [${api.label}](${api.url})`) + ); + } + + if (item.snippets && item.snippets.length > 0) { + pageTextSections.push('## Source code'); + item.snippets.forEach((s) => { + pageTextSections.push( + `### ${s.label}${s.caption ? ` (${s.caption})` : ''}` + ); + if (s.description) pageTextSections.push(s.description); + pageTextSections.push('```'); + pageTextSections.push(s.code); + pageTextSections.push('```'); + }); + } + + const pageText = pageTextSections.filter(Boolean).join('\n\n'); + + return ( +
+
+
+ +
+ +
+ {/* ── Main content ── */} +
+ {/* Long-form intro description + optional feature bullets */} + {(item.longDescription || guide?.introBullets) && ( +
+ {item.longDescription && ( +

+ +

+ )} + {guide?.introBullets && guide.introBullets.length > 0 && ( +
    + {guide.introBullets.map((tip) => ( +
  • + + + + +
  • + ))} +
+ )} +
+ )} + + {/* ── Top-level guide callout (e.g. deprecation / migration notice) ── */} + {guide?.callout && } + + {/* ── Diagram (e.g. mermaid flowchart) + optional context bullets ── */} + {guide?.diagram && ( +
+

+ {guide.diagramTitle ?? 'How it fits together'} +

+
+ +
+ {guide.diagramContext && ( +
+ {guide.diagramContext.prose && ( +

+ +

+ )} + {guide.diagramContext.bullets && + guide.diagramContext.bullets.length > 0 && ( +
    + {guide.diagramContext.bullets.map((b) => ( +
  • + + + + +
  • + ))} +
+ )} +
+ )} +
+ )} + + {/* ── Why use this (problem/solution framing) ── */} + {guide?.whySection && ( +
+

+ {guide.whySection.title ?? 'Why use this'} +

+ {guide.whySection.problemProse && ( +

+ {guide.whySection.problemProse} +

+ )} + {guide.whySection.problemBullets && + guide.whySection.problemBullets.length > 0 && ( +
    + {guide.whySection.problemBullets.map((b) => ( +
  • + + + + +
  • + ))} +
+ )} + {guide.whySection.solutionProse && ( +

+ {guide.whySection.solutionProse} +

+ )} + {guide.whySection.solutionBullets && + guide.whySection.solutionBullets.length > 0 && ( +
    + {guide.whySection.solutionBullets.map((b) => ( +
  • + + + + +
  • + ))} +
+ )} + {guide.whySection.closingProse && ( +

+ +

+ )} +
+ )} + + {useFlatLayout ? ( + <> + {/* ── Flat: When to use + Choosing an approach as top-level h2s ── */} + {guide?.whenToUse && ( +
+

+ When to use this +

+
    + {guide.whenToUse.map((tip) => ( +
  • + + + + +
  • + ))} +
+
+ )} + + {guide?.approaches && ( +
+

+ {guide.approaches.title ?? 'Choosing an approach'} +

+ {guide.approaches.description && ( +

+ {guide.approaches.description} +

+ )} + {guide.approaches.bullets && ( +
    + {guide.approaches.bullets.map((b) => ( +
  • + + + + +
  • + ))} +
+ )} + + {guide.approaches.closing && ( +

+ {guide.approaches.closing} +

+ )} +
+ )} + + {hasApproachSections ? ( + /* ── Per-approach sections (e.g. agent-cancellation) ── */ + <> + {guide!.approachSections!.map((section) => { + const sectionBlocks = section.snippetLabels + .map((label) => conceptBlocksByLabel.get(label)) + .filter( + (b): b is (typeof conceptBlocks)[number] => + b !== undefined + ); + + return ( +
+

+ {section.title} +

+ + {section.description && ( +

+ +

+ )} + + {section.installSlug && ( +
+

+ Install with the shadcn CLI — the code is copied + into your project and you own it from day one. +

+ +
+ )} + + {sectionBlocks.length > 0 && ( + + )} + + {section.afterBullets && + section.afterBullets.length > 0 && ( +
    + {section.afterBullets.map((b) => ( +
  • + + + + +
  • + ))} +
+ )} + + {section.afterProse && ( +

+ {section.afterProse} +

+ )} + + {section.callout && ( + + )} +
+ ); + })} + + ) : ( + /* ── flatLayout without per-approach sections: Installation + Source ── */ + <> +
+

+ Installation +

+

+ The shadcn CLI copies every file in this recipe into + your project — you own the code after install and can + customize it freely. +

+ +
+ + {conceptBlocks.length > 0 && ( +
+

+ Concept +

+

+ A simplified walkthrough of the pattern — good for + understanding the shape before looking at the full + install. +

+ +
+ )} + +
+

+ Source +

+

+ {guide?.sourceDescription ?? + 'A preview of the code that gets copied into your app.'} +

+ +
+ + )} + + ) : ( + <> + {/* ── Umbrella Overview (items without flat layout) ── */} + {guide && + (guide.overview || guide.whenToUse || guide.approaches) && ( +
+

+ Overview +

+ + {guide.overview && ( +

+ {guide.overview} +

+ )} + + {guide.whenToUse && ( +
+

+ When to use this +

+
    + {guide.whenToUse.map((tip) => ( +
  • + + + + +
  • + ))} +
+
+ )} + + {guide.approaches && ( +
+

+ {guide.approaches.title ?? 'Choosing an approach'} +

+ +
+ )} +
+ )} + + {/* ── Installation ── */} +
+

+ Installation +

+

+ The shadcn CLI copies every file in this recipe into your + project — you own the code after install and can customize + it freely. +

+ +
+ + {/* ── Concept snippets (when educational ≠ plug-and-play) ── */} + {conceptBlocks.length > 0 && ( +
+

+ Concept +

+

+ A simplified walkthrough of the pattern — good for + understanding the shape before looking at the full + install. +

+ +
+ )} + + {/* ── Source snippets ── */} +
+

+ Source +

+

+ {guide?.sourceDescription ?? + 'A preview of the code that gets copied into your app.'} +

+ +
+ + )} + + {/* ── GUIDE: How it works ── */} + {guide?.howItWorks && guide.howItWorks.length > 0 && ( +
+

+ How it works +

+
    + {guide.howItWorks.map((step, i) => ( +
  1. + + {i + 1} + + + + +
  2. + ))} +
+ {guide.howItWorksClosing && ( +

+ +

+ )} +
+ )} + + {/* ── GUIDE: Adapting ── */} + {guide?.adapting && guide.adapting.length > 0 && ( +
+

+ {guide.adaptingTitle ?? 'Adapting this'} +

+ {guide.adaptingIntro && ( +

+ {guide.adaptingIntro} +

+ )} +
    + {guide.adapting.map((tip) => ( +
  • + + + + +
  • + ))} +
+
+ )} + + {/* ── GUIDE: Key APIs ── */} + {guide?.keyApis && guide.keyApis.length > 0 && ( +
+

+ Key APIs +

+ +
+ )} +
+ + {/* ── ToC sidebar ── */} + +
+
+
+ ); +} + +// ─── Helpers ────────────────────────────────────────────────────────────────── + +/** Converts an approach section title to a URL-safe anchor id. */ +function slugify(title: string): string { + return title + .toLowerCase() + .replace(/[^a-z0-9]+/g, '-') + .replace(/^-|-$/g, ''); +} + +// ─── Sub-components ─────────────────────────────────────────────────────────── + +/** + * Renders inline **bold** and `code` markers within a string. + * Used for bullet lists and table cells throughout the guide sections. + */ +function InlineMarkdown({ text }: { text: string }) { + const parts = text.split(/(\*\*[^*]+\*\*|`[^`]+`|\[[^\]]+\]\([^)]+\))/g); + if (parts.length === 1) return <>{text}; + return ( + <> + {parts.map((part, i) => { + if (part.startsWith('**') && part.endsWith('**')) { + return ( + + {part.slice(2, -2)} + + ); + } + if (part.startsWith('`') && part.endsWith('`')) { + return ( + + {part.slice(1, -1)} + + ); + } + const linkMatch = part.match(/^\[([^\]]+)\]\(([^)]+)\)$/); + if (linkMatch) { + return ( + + {linkMatch[1]} + + ); + } + return part; + })} + + ); +} + +/** Kept for the ApproachTable which only needs code spans, not bold. */ +const InlineCode = ({ text }: { text: string }) => ( + +); + +function ApproachTable({ + table, +}: { + table: NonNullable; +}) { + // columns[0] is the row-label column header (often empty); rest are approach names + const [, ...approachNames] = table.columns; + return ( +
+ + + + {/* Empty first header — no text, just a spacer */} + + ))} + + + + {table.rows.map((row) => ( + + + {row.values.map((val, ci) => ( + + ))} + + ))} + +
+ {approachNames.map((name) => ( + + {name} +
+ + + +
+
+ ); +} + +const calloutStyles = { + // blue/emerald use Tailwind's default palette (not overridden by Geist), so + // explicit dark: overrides with high-numbered shades work as expected. + info: { + container: 'border-blue-300 bg-blue-100 dark:border-blue-500', + icon: , + text: 'text-blue-1000', + }, + // amber IS overridden by the Geist DS — the scale is inverted in dark mode: + // amber-100 ≈ near-black, amber-1000 ≈ near-white. Use semantic shades and + // let the CSS vars handle the theme switch automatically. + warn: { + container: 'border-amber-300 bg-amber-100 dark:border-amber-500', + icon: , + text: 'text-amber-1000', + }, + tip: { + container: 'border-emerald-300 bg-emerald-100 dark:border-emerald-500', + icon: , + text: 'text-emerald-1000', + }, +}; + +function GuideCallout({ + callout, +}: { + callout: NonNullable; +}) { + const styles = calloutStyles[callout.type]; + return ( +
+ {styles.icon} +

+ +

+
+ ); +} diff --git a/docs/app/[lang]/registry/page.tsx b/docs/app/[lang]/patterns/page.tsx similarity index 93% rename from docs/app/[lang]/registry/page.tsx rename to docs/app/[lang]/patterns/page.tsx index 0adaff91d8..0375c4718b 100644 --- a/docs/app/[lang]/registry/page.tsx +++ b/docs/app/[lang]/patterns/page.tsx @@ -5,12 +5,12 @@ import { RegistryGrid } from '@/components/registry/RegistryGrid'; import { registryItems } from '@/lib/registry/manifest'; export const metadata: Metadata = { - title: 'Registry | Workflow SDK', + title: 'Patterns | Workflow SDK', description: 'Installable Workflow patterns for popular providers — durable, cancellable, replay-safe recipes you drop into your app with one shadcn command.', }; -export default function RegistryPage() { +export default function PatternsPage() { return (
@@ -18,7 +18,7 @@ export default function RegistryPage() {

- Registry + Patterns

Installable Workflow patterns for popular providers. Durable, @@ -48,7 +48,7 @@ export default function RegistryPage() {

- )} -
- ))} -
-
- )} - - {/* Files installed */} -
-

- What gets installed -

-

- These files land in your project. Edit them however you want — the - shadcn CLI never touches them again. -

-
    - {item.files.map((file) => ( -
  • - - {file.path} - -

    - {file.description} -

    -
  • - ))} -
-
- - {/* Source preview */} -
-

Source

-

- A preview of the code that gets copied into your app. -

- -
-
-
-
- ); -} diff --git a/docs/app/r/[name]/route.ts b/docs/app/r/[name]/route.ts new file mode 100644 index 0000000000..b3bdc119dc --- /dev/null +++ b/docs/app/r/[name]/route.ts @@ -0,0 +1,100 @@ +/** + * /r/[name] — shadcn-compatible registry item endpoint. + * + * Returns a single registry item in the shadcn registry-item.json schema so + * the shadcn CLI can install it: + * + * pnpm dlx shadcn@latest add https://workflow-sdk.dev/r/durable-agent + * + * Only workflow source files (captions starting with "workflows/") are + * included in the response. For those files, `installCode` is preferred over + * `code` when present — `installCode` carries the richly-commented version + * with agent-friendly PATTERN / USEFUL WHEN / TO ADAPT sections, while + * `code` is the clean UI display version. + * + * Content negotiation: + * - `Accept: application/json` or `User-Agent: *shadcn*` → JSON response + * - Otherwise → redirect to the human-readable /patterns/[name] page + */ + +import { NextResponse } from 'next/server'; +import { registryItems } from '@/lib/registry/manifest'; + +const WORKFLOW_PATH_PREFIX = 'workflows/'; + +export const dynamic = 'force-dynamic'; + +export async function GET( + _request: Request, + { params }: { params: Promise<{ name: string }> } +) { + const { name } = await params; + + const item = registryItems.find((r) => r.id === name); + if (!item) { + return NextResponse.json( + { error: `Pattern "${name}" not found` }, + { status: 404 } + ); + } + + // Collect workflow files from snippets (installCode > code fallback). + const workflowSnippets = item.snippets.filter((s) => + s.caption?.startsWith(WORKFLOW_PATH_PREFIX) + ); + + // Deduplicate by caption path — multiple tabs may point to the same file. + const seenPaths = new Set(); + const files: Array<{ + path: string; + content: string; + type: 'registry:lib'; + target: string; + }> = []; + + for (const snippet of workflowSnippets) { + const filePath = snippet.caption!; + if (seenPaths.has(filePath)) continue; + seenPaths.add(filePath); + + files.push({ + path: filePath, + content: snippet.installCode ?? snippet.code, + type: 'registry:lib', + // target controls where shadcn places the file in the user's project. + target: filePath, + }); + } + + // If no workflow snippets found, also check conceptSnippets. + if (files.length === 0 && item.conceptSnippets) { + for (const snippet of item.conceptSnippets) { + if (!snippet.caption?.startsWith(WORKFLOW_PATH_PREFIX)) continue; + const filePath = snippet.caption!; + if (seenPaths.has(filePath)) continue; + seenPaths.add(filePath); + files.push({ + path: filePath, + content: snippet.installCode ?? snippet.code, + type: 'registry:lib', + target: filePath, + }); + } + } + + const registryItem = { + $schema: 'https://ui.shadcn.com/schema/registry-item.json', + name: item.id, + type: 'registry:lib' as const, + title: item.name, + description: item.description, + files, + }; + + return NextResponse.json(registryItem, { + headers: { + 'Cache-Control': 'public, max-age=3600, stale-while-revalidate=86400', + 'Access-Control-Allow-Origin': '*', + }, + }); +} diff --git a/docs/app/r/route.ts b/docs/app/r/route.ts new file mode 100644 index 0000000000..9bcccbbd92 --- /dev/null +++ b/docs/app/r/route.ts @@ -0,0 +1,41 @@ +/** + * /r — shadcn-compatible registry index endpoint. + * + * Returns the full registry in the shadcn registry.json schema so the CLI + * can discover all available patterns: + * + * pnpm dlx shadcn@latest add https://workflow-sdk.dev/r + * + * Each item in the index links to /r/[name] for the full file payload. + */ + +import { NextResponse } from 'next/server'; +import { registryItems } from '@/lib/registry/manifest'; + +export const dynamic = 'force-dynamic'; + +export async function GET() { + const items = registryItems.map((item) => ({ + name: item.id, + type: 'registry:lib' as const, + title: item.name, + description: item.description, + registryDependencies: [], + tags: item.tags, + categories: item.categories, + })); + + const registryIndex = { + $schema: 'https://ui.shadcn.com/schema/registry.json', + name: 'workflow-sdk', + homepage: 'https://workflow-sdk.dev', + items, + }; + + return NextResponse.json(registryIndex, { + headers: { + 'Cache-Control': 'public, max-age=3600, stale-while-revalidate=86400', + 'Access-Control-Allow-Origin': '*', + }, + }); +} diff --git a/docs/components/registry/RegistryCard.tsx b/docs/components/registry/RegistryCard.tsx index 5218cd2185..ba9e035c6b 100644 --- a/docs/components/registry/RegistryCard.tsx +++ b/docs/components/registry/RegistryCard.tsx @@ -19,7 +19,7 @@ export function RegistryCard({ item }: RegistryCardProps) { const Logo = getProviderLogo(item.logo); return ( - +
diff --git a/docs/components/registry/RegistryCodeTabs.tsx b/docs/components/registry/RegistryCodeTabs.tsx index 8cebd74615..c6c6d60f69 100644 --- a/docs/components/registry/RegistryCodeTabs.tsx +++ b/docs/components/registry/RegistryCodeTabs.tsx @@ -1,17 +1,34 @@ 'use client'; +import { CheckIcon, CopyIcon } from 'lucide-react'; +import { useState } from 'react'; +import { toast } from 'sonner'; +import { Button } from '@/components/ui/button'; import { Tabs, TabsContent, TabsList, TabsTrigger } from '@/components/ui/tabs'; +interface CodeBlock { + label: string; + caption?: string; + /** Optional prose shown between the caption and the code block. */ + description?: string; + /** Raw source — used by the copy button. */ + code: string; + /** Pre-rendered shiki HTML — generated on the server. */ + html: string; +} + interface RegistryCodeTabsProps { - blocks: { - label: string; - caption?: string; - /** Pre-rendered shiki HTML — generated on the server. */ - html: string; - }[]; + blocks: CodeBlock[]; } +const COPY_TIMEOUT = 2000; +// Roughly 18 lines at 1.5rem line-height before we collapse. +const COLLAPSED_MAX_H = '18rem'; + export function RegistryCodeTabs({ blocks }: RegistryCodeTabsProps) { + // Lifted so "View code" persists when switching tabs. + const [expanded, setExpanded] = useState(false); + if (blocks.length === 0) return null; return ( @@ -30,17 +47,82 @@ export function RegistryCodeTabs({ blocks }: RegistryCodeTabsProps) { {blocks.map((b) => ( {b.caption && ( -

+

{b.caption}

)} -
+ {b.description} +

+ )} + setExpanded(true)} /> ))} ); } + +function BlockCode({ + block, + expanded, + onExpand, +}: { + block: CodeBlock; + expanded: boolean; + onExpand: () => void; +}) { + const [copied, setCopied] = useState(false); + + const handleCopy = () => { + navigator.clipboard.writeText(block.code); + toast.success('Copied to clipboard'); + setCopied(true); + setTimeout(() => setCopied(false), COPY_TIMEOUT); + }; + + return ( +
+ {/* Copy button — always visible in the top-right corner */} + + + {/* Code — capped height when collapsed */} +
+ + {/* Gradient + "View code" button when collapsed */} + {!expanded && ( +
+ +
+ )} +
+ ); +} diff --git a/docs/components/registry/RegistryDetailHero.tsx b/docs/components/registry/RegistryDetailHero.tsx index 485960e834..74537724a7 100644 --- a/docs/components/registry/RegistryDetailHero.tsx +++ b/docs/components/registry/RegistryDetailHero.tsx @@ -1,6 +1,5 @@ -import { ChevronRight, ExternalLink, Github, Home } from 'lucide-react'; +import { ChevronRight, ExternalLink, Github } from 'lucide-react'; import Link from 'next/link'; -import { Badge } from '@/components/ui/badge'; import { Breadcrumb, BreadcrumbItem, @@ -25,7 +24,7 @@ export function RegistryDetailHero({ item }: RegistryDetailHeroProps) { - Registry + Patterns @@ -58,40 +57,21 @@ export function RegistryDetailHero({ item }: RegistryDetailHeroProps) {

{item.description}

-
- {item.tags.map((tag) => ( - - {tag} - - ))} -
- - - Homepage - - {item.docsUrl && ( - - - Provider docs - - )} + {item.docsUrl && + !item.docsUrl.startsWith('https://workflow-sdk.dev') && ( + + + Provider docs + + )} {item.sourceUrl && ( (items[0]?.id ?? ''); + + useEffect(() => { + const observer = new IntersectionObserver( + (entries) => { + for (const entry of entries) { + if (entry.isIntersecting) { + setActiveId(entry.target.id); + } + } + }, + { rootMargin: '-80px 0px -80% 0px', threshold: 0 } + ); + + for (const item of items) { + const element = document.getElementById(item.id); + if (element) observer.observe(element); + } + + return () => observer.disconnect(); + }, [items]); + + if (items.length === 0) return null; + + const githubEditUrl = githubPath + ? `https://github.com/vercel/workflow/edit/main/docs/lib/registry/${githubPath}` + : undefined; + + return ( + + ); +} diff --git a/docs/components/registry/logos/index.tsx b/docs/components/registry/logos/index.tsx index 4c05964bbf..098c47301c 100644 --- a/docs/components/registry/logos/index.tsx +++ b/docs/components/registry/logos/index.tsx @@ -17,6 +17,7 @@ import { LogoScheduling } from './logo-scheduling'; import { LogoSequentialAndParallel } from './logo-sequential-and-parallel'; import { LogoTimeouts } from './logo-timeouts'; import { LogoWebhooks } from './logo-webhooks'; +import { LogoUpgradingWorkflows } from './logo-upgrading-workflows'; import { LogoWorkflowComposition } from './logo-workflow-composition'; export interface ProviderLogoProps { @@ -50,6 +51,7 @@ export const providerLogos: Record< webhooks: LogoWebhooks, 'child-workflows': LogoChildWorkflows, 'distributed-abort-controller': LogoDistributedAbortController, + 'upgrading-workflows': LogoUpgradingWorkflows, }; export function getProviderLogo( diff --git a/docs/components/registry/logos/logo-upgrading-workflows.tsx b/docs/components/registry/logos/logo-upgrading-workflows.tsx new file mode 100644 index 0000000000..6bd25fe396 --- /dev/null +++ b/docs/components/registry/logos/logo-upgrading-workflows.tsx @@ -0,0 +1,40 @@ +/** + * Upgrading Workflows brand mark. + * + * A circular refresh arrow with an upward-pointing bolt at the top, + * representing a workflow that respawns itself on the latest deployment. + * All `currentColor`. + */ +export function LogoUpgradingWorkflows({ + size = 20, + className, +}: { + size?: number; + className?: string; +}) { + return ( + + ); +} diff --git a/docs/content/docs/cookbook/advanced/child-workflows.mdx b/docs/content/docs/cookbook/advanced/child-workflows.mdx deleted file mode 100644 index 68a64496c4..0000000000 --- a/docs/content/docs/cookbook/advanced/child-workflows.mdx +++ /dev/null @@ -1,372 +0,0 @@ ---- -title: Child Workflows -description: Spawn child workflows from a parent and poll their progress for batch processing, report generation, and other multi-workflow orchestration scenarios. -type: guide -summary: Orchestrate independent child workflows from a parent workflow using start(), sleep(), and getRun() to fan out work with isolated failure boundaries. ---- - -Use child workflows when a single workflow needs to orchestrate many independent units of work. Each child runs as its own workflow with a separate event log, retry boundary, and failure scope -- if one child fails, it doesn't take down the parent or siblings. - -## When to use child workflows - -Child workflows are the right choice when: - -- **Work units are independent.** Each child can run without knowing about the others (e.g., processing individual documents, generating separate reports). -- **You need isolated failure boundaries.** A failing child should not abort unrelated work. The parent decides how to handle failures. -- **You want massive fan-out.** Spawning 50 or 500 children is practical because each runs on its own infrastructure. -- **You need per-item observability.** Each child workflow has its own run ID, status, and event log for monitoring. - -For simpler cases where steps share a single event log, use [direct await composition](/cookbook/common-patterns/workflow-composition#direct-await-flattening) instead. - -## Basic pattern: spawn and poll - -The core pattern has three parts: - -1. A **step** that calls `start()` to spawn a child workflow and returns the run ID -2. A **polling loop** in the parent workflow that checks child status with `getRun()` -3. A **step** that retrieves the child's return value once it completes - -```typescript -import { sleep } from "workflow"; -import { getRun, start } from "workflow/api"; - -declare function pollUntilComplete(runIds: string[]): Promise; // @setup -declare function collectResults(runIds: string[]): Promise>; // @setup - -// Child workflow -- processes a single document -export async function processDocument(documentId: string) { - "use workflow"; - - const content = await fetchDocument(documentId); - const analysis = await analyzeContent(content); - const summary = await generateSummary(analysis); - - return { documentId, summary }; -} - -async function fetchDocument(documentId: string): Promise { - "use step"; - const res = await fetch(`https://docs.example.com/api/${documentId}`); - return res.text(); -} - -async function analyzeContent(content: string): Promise { - "use step"; - // Call analysis API - return `analysis of ${content.length} chars`; -} - -async function generateSummary(analysis: string): Promise { - "use step"; - // Generate summary from analysis - return `Summary: ${analysis}`; -} - -// Parent workflow -- orchestrates document processing -export async function processDocumentBatch(documentIds: string[]) { - "use workflow"; - - // Spawn a child workflow for each document - const runIds = await spawnChildren(documentIds); - - // Poll until all children complete - await pollUntilComplete(runIds); - - // Collect results - const results = await collectResults(runIds); - - return { processed: results.length, results }; -} - -async function spawnChildren( - documentIds: string[] -): Promise { - "use step"; // [!code highlight] - - const runIds: string[] = []; - for (const docId of documentIds) { - const run = await start(processDocument, [docId]); // [!code highlight] - runIds.push(run.runId); - } - return runIds; -} -``` - -### Polling loop - -The parent workflow polls child statuses in a loop, sleeping between checks. This is durable -- if the parent replays, the sleep and status checks replay from the event log. - -```typescript -import { sleep } from "workflow"; -import { getRun } from "workflow/api"; - -const POLL_INTERVAL = "30s"; -const MAX_POLL_ITERATIONS = 120; // 60 minutes at 30s intervals - -async function pollUntilComplete(runIds: string[]): Promise { - let iteration = 0; - - while (iteration < MAX_POLL_ITERATIONS) { - const status = await checkStatuses(runIds); // [!code highlight] - - if (status.running === 0) { - if (status.failed > 0) { - throw new Error( - `${status.failed} of ${runIds.length} children failed` - ); - } - return; // All completed successfully - } - - iteration += 1; - await sleep(POLL_INTERVAL); // [!code highlight] - } - - throw new Error("Timed out waiting for children to complete"); -} - -async function checkStatuses( - runIds: string[] -): Promise<{ running: number; completed: number; failed: number }> { - "use step"; // [!code highlight] - - let running = 0; - let completed = 0; - let failed = 0; - - for (const runId of runIds) { - const run = getRun(runId); // [!code highlight] - const status = await run.status; // [!code highlight] - - if (status === "completed") completed += 1; - else if (status === "failed" || status === "cancelled") failed += 1; - else running += 1; // pending, running - } - - return { running, completed, failed }; -} - -async function collectResults( - runIds: string[] -): Promise> { - "use step"; - - const results = []; - for (const runId of runIds) { - const run = getRun(runId); - const value = await run.returnValue; - results.push(value as { documentId: string; summary: string }); - } - return results; -} -``` - -## Fan-out pattern: chunked spawning - -When spawning hundreds of children, batch the `start()` calls to avoid overwhelming the system. Use multiple spawn steps, each launching a chunk of children. - -```typescript -import { start } from "workflow/api"; - -declare function pollUntilComplete(runIds: string[]): Promise; // @setup - -const CHUNK_SIZE = 10; - -export async function largeReportBatch(reportConfigs: Array<{ id: string; query: string }>) { - "use workflow"; - - // Spawn children in chunks - const allRunIds: string[] = []; - for (let i = 0; i < reportConfigs.length; i += CHUNK_SIZE) { - const chunk = reportConfigs.slice(i, i + CHUNK_SIZE); - const runIds = await spawnReportChunk(chunk); // [!code highlight] - allRunIds.push(...runIds); - } - - // Poll until all complete - await pollUntilComplete(allRunIds); - - const results = await collectReportResults(allRunIds); - return { total: results.length, results }; -} - -async function spawnReportChunk( - configs: Array<{ id: string; query: string }> -): Promise { - "use step"; - - const runIds: string[] = []; - for (const config of configs) { - const run = await start(generateReport, [config.id, config.query]); - runIds.push(run.runId); - } - return runIds; -} - -async function generateReport(reportId: string, query: string) { - "use workflow"; - - const data = await queryDatabase(reportId, query); - const formatted = await formatReport(reportId, data); - return { reportId, formatted }; -} - -declare function queryDatabase(reportId: string, query: string): Promise; // @setup -declare function formatReport(reportId: string, data: string): Promise; // @setup - -declare function collectReportResults( - runIds: string[] -): Promise>; // @setup -``` - -## Error handling - -### Tolerating partial failures - -Not every batch requires 100% success. Use `allowFailures` logic to let the parent continue when some children fail, while still surfacing the failures. - -```typescript -import { sleep } from "workflow"; -import { getRun } from "workflow/api"; - -const POLL_INTERVAL = "30s"; -const MAX_POLL_ITERATIONS = 120; - -async function pollWithPartialFailures( - runIds: string[], - maxFailureRate: number -): Promise<{ completed: string[]; failed: string[] }> { - let iteration = 0; - const completedIds: string[] = []; - const failedIds: string[] = []; - - while (iteration < MAX_POLL_ITERATIONS) { - const status = await checkDetailedStatuses(runIds); - - completedIds.length = 0; - failedIds.length = 0; - - for (const entry of status) { - if (entry.status === "completed") completedIds.push(entry.runId); - else if (entry.status === "failed" || entry.status === "cancelled") - failedIds.push(entry.runId); - } - - const active = runIds.length - completedIds.length - failedIds.length; - - // Check if failure rate exceeds threshold - const failureRate = failedIds.length / Math.max(1, runIds.length); // [!code highlight] - if (failureRate > maxFailureRate) { // [!code highlight] - throw new Error( // [!code highlight] - `Failure rate ${(failureRate * 100).toFixed(1)}% exceeds ` + // [!code highlight] - `threshold of ${(maxFailureRate * 100).toFixed(1)}%` // [!code highlight] - ); // [!code highlight] - } // [!code highlight] - - if (active === 0) { - return { completed: completedIds, failed: failedIds }; - } - - iteration += 1; - await sleep(POLL_INTERVAL); - } - - throw new Error("Timed out waiting for children"); -} - -async function checkDetailedStatuses( - runIds: string[] -): Promise> { - "use step"; - - const statuses = []; - for (const runId of runIds) { - const run = getRun(runId); - const status = await run.status; - statuses.push({ runId, status }); - } - return statuses; -} -``` - -### Retrying failed children - -When a child fails, the parent can spawn a replacement and continue polling. Track restart counts to prevent infinite retry loops. - -```typescript -import { sleep } from "workflow"; - -declare function checkDetailedStatuses(runIds: string[]): Promise>; // @setup - -const POLL_INTERVAL = "30s"; -const MAX_POLL_ITERATIONS = 120; - -async function pollWithRetries( - initialRunIds: string[], - maxRestartsPerChild: number, - spawnReplacement: (index: number) => Promise -): Promise { - const activeRuns = new Map(); - const restartCounts = new Map(); - - initialRunIds.forEach((runId, index) => activeRuns.set(index, runId)); - - let iteration = 0; - - while (iteration < MAX_POLL_ITERATIONS) { - const statuses = await checkDetailedStatuses( - Array.from(activeRuns.values()) - ); - const statusByRunId = new Map( - statuses.map((s) => [s.runId, s.status]) - ); - - for (const [index, runId] of activeRuns.entries()) { - const status = statusByRunId.get(runId) ?? "running"; - - if (status === "completed") { - activeRuns.delete(index); - continue; - } - - if (status === "failed" || status === "cancelled") { - const restarts = (restartCounts.get(index) ?? 0) + 1; // [!code highlight] - restartCounts.set(index, restarts); // [!code highlight] - - if (restarts > maxRestartsPerChild) { // [!code highlight] - throw new Error( // [!code highlight] - `Child ${index} exceeded restart limit (${maxRestartsPerChild})` // [!code highlight] - ); // [!code highlight] - } // [!code highlight] - - const newRunId = await spawnReplacement(index); // [!code highlight] - activeRuns.set(index, newRunId); // [!code highlight] - } - } - - if (activeRuns.size === 0) return; - - iteration += 1; - await sleep(POLL_INTERVAL); - } - - throw new Error("Timed out waiting for children"); -} -``` - -## Tips - -- **`start()` must be called from a step**, not directly from a workflow function. Wrap it in a `"use step"` function. -- **`getRun()` must also be called from a step.** The polling loop lives in the workflow, but the actual status check is a step. -- **Set a max iteration count on polling loops** to prevent runaway workflows. Calculate the count from your expected max duration and poll interval. -- **Use chunked spawning for large batches.** Spawning 500 children in a single step can time out. Break it into chunks of 10-50. -- **Each child has its own retry semantics.** Steps inside child workflows retry independently. The parent only sees the child's final status. -- **Use `deploymentId: "latest"`** if children should run on the most recent deployment. See the [`start()` API reference](/docs/api-reference/workflow-api/start#using-deploymentid-latest) for compatibility considerations. - -## Key APIs - -- [`start()`](/docs/api-reference/workflow-api/start) -- spawn a new workflow run and get its run ID -- [`getRun()`](/docs/api-reference/workflow-api/get-run) -- retrieve a workflow run's status and return value -- [`sleep()`](/docs/api-reference/workflow/sleep) -- durably pause between polling iterations -- [`"use workflow"`](/docs/foundations/workflows-and-steps) -- marks the orchestrator function -- [`"use step"`](/docs/foundations/workflows-and-steps) -- marks functions with full Node.js access diff --git a/docs/content/docs/cookbook/advanced/distributed-abort-controller.mdx b/docs/content/docs/cookbook/advanced/distributed-abort-controller.mdx deleted file mode 100644 index ebe3108cab..0000000000 --- a/docs/content/docs/cookbook/advanced/distributed-abort-controller.mdx +++ /dev/null @@ -1,318 +0,0 @@ ---- -title: Distributed Abort Controller -description: A distributed AbortController that uses durable workflows for cross-process cancellation signaling. -type: guide -summary: Build a distributed abort controller that uses workflow streams and hooks to propagate cancellation signals across process boundaries. ---- - -Use this pattern when you need an `AbortController`-like interface that works across distributed systems. The controller uses a durable workflow to coordinate cancellation — calling `.abort()` on one machine triggers the `.signal` on any other machine. - -## When to use this - -- **Cross-process cancellation** — Cancel a long-running operation from a different server, worker, or edge function -- **Durable cancellation** — The abort signal persists even if the process that created it crashes -- **UI stop buttons** — Let users cancel operations running on the server from the browser -- **Timeout coordination** — The built-in TTL auto-expires stale controllers - -## Pattern - -The `DistributedAbortController` class encapsulates a workflow that: -1. Accepts a user-provided unique ID (like a chat ID or task ID) -2. Creates or reconnects to an existing workflow using that ID -3. Waits for a hook signal OR TTL expiration -4. Writes a cancellation message to the run's stream when triggered - -### Core Implementation - -```typescript lineNumbers -import { defineHook, getWritable, sleep } from "workflow"; -import { start, getRun, getHookByToken } from "workflow/api"; - -// Default TTL: 24 hours -const DEFAULT_TTL_MS = 24 * 60 * 60 * 1000; -// Default grace period: 1 hour (keeps hook alive after abort for late subscribers) -const DEFAULT_GRACE_MS = 60 * 60 * 1000; - -// Hook to trigger the abort signal -export const abortHook = defineHook<{ reason?: string }>(); - -// The abort message written to the stream -export type AbortMessage = { - type: "abort"; - reason?: string; - expired?: boolean; -}; - -// Helper to create a consistent hook token from the user ID -function getAbortToken(id: string): string { - return `abort:${id}`; -} - -// Step function that writes the abort message to the stream -async function writeAbortSignal(reason?: string, expired?: boolean) { - "use step"; - - const writable = getWritable(); - const writer = writable.getWriter(); - try { - await writer.write({ type: "abort", reason, expired }); - } finally { - writer.releaseLock(); - } - await writable.close(); -} - -// Workflow that waits for abort or TTL expiration -export async function abortControllerWorkflow( - id: string, - ttlMs: number, - graceMs: number -) { - "use workflow"; - - const startTime = Date.now(); - const hook = abortHook.create({ token: getAbortToken(id) }); - - // Race: manual abort OR TTL expiration // [!code highlight] - const result = await Promise.race([ - hook.then((payload) => ({ - reason: payload.reason, - expired: false, - })), - sleep(`${ttlMs}ms`).then(() => ({ - reason: "Controller expired", - expired: true, - })), - ]); - - await writeAbortSignal(result.reason, result.expired); - - // Only sleep through grace period on TTL expiration (keeps hook alive for late subscribers). // [!code highlight] - // Manual aborts complete immediately. - if (result.expired) { - const elapsed = Date.now() - startTime; - const remainingTime = graceMs - (elapsed - ttlMs); - if (remainingTime > 0) { - await sleep(`${remainingTime}ms`); // [!code highlight] - } - } - - return { aborted: true, reason: result.reason, expired: result.expired }; -} - -/** - * A distributed abort controller that works across process boundaries. - * Uses a semantically meaningful ID (like a chat ID or task ID) to coordinate. - */ -export class DistributedAbortController { - private id: string; - readonly runId: string; - - private constructor(id: string, runId: string) { - this.id = id; - this.runId = runId; - } - - /** - * Creates or reconnects to a distributed abort controller. - * If a controller with this ID already exists, reconnects to it. - * Otherwise, starts a new workflow. - * - * @param id - A unique, semantically meaningful ID (e.g., "chat:123") - * @param options.ttlMs - Time-to-live in ms (default: 24 hours) - * @param options.graceMs - Grace period after abort (default: 1 hour) - */ - static async create( // [!code highlight] - id: string, - options: { ttlMs?: number; graceMs?: number } = {} - ): Promise { - const { ttlMs = DEFAULT_TTL_MS, graceMs = DEFAULT_GRACE_MS } = options; - const token = getAbortToken(id); - - // Try to find an existing run with this hook token - const existingHook = await getHookByToken(token).catch(() => null); // [!code highlight] - - if (existingHook) { - // Reconnect to existing controller - return new DistributedAbortController(id, existingHook.runId); - } - - // Create a new workflow - const run = await start(abortControllerWorkflow, [id, ttlMs, graceMs]); // [!code highlight] - return new DistributedAbortController(id, run.runId); - } - - /** - * Triggers the abort signal. - * Idempotent: safe to call multiple times or after the workflow has completed. - */ - async abort(reason?: string): Promise { // [!code highlight] - try { - await abortHook.resume(getAbortToken(this.id), { reason }); - } catch (error) { - const msg = error instanceof Error ? error.message.toLowerCase() : ''; - if (msg.includes('not found') || msg.includes('expired')) { - return; - } - throw error; - } - } - - /** - * Returns an AbortSignal that fires when abort() is called or TTL expires. - * The signal fires with a reason indicating what triggered it. - */ - get signal(): AbortSignal { // [!code highlight] - const run = getRun<{ aborted: boolean; reason?: string; expired?: boolean }>(this.runId); - const controller = new AbortController(); - const readable = run.getReadable(); - - (async () => { - const reader = readable.getReader(); - try { - while (true) { - const { done, value } = await reader.read(); - if (done) break; - if (value.type === "abort") { - const reason = value.expired - ? `${value.reason} (expired)` - : value.reason; - controller.abort(reason); - break; - } - } - } catch (error) { - if (!controller.signal.aborted) { - controller.abort( - error instanceof Error ? error.message : "Stream read failed" - ); - } - } finally { - reader.releaseLock(); - } - })(); - - return controller.signal; - } -} -``` - -### Usage: Single Process - -```typescript lineNumbers -import { DistributedAbortController } from "./distributed-abort-controller"; - -// Create a controller with a meaningful ID -const controller = await DistributedAbortController.create("chat:user-123"); - -// Get the signal and use it with fetch -const signal = controller.signal; -const response = await fetch("https://api.example.com/long-operation", { - signal, -}); - -// Later: abort the operation -await controller.abort("User cancelled"); -``` - -### Usage: Cross-Process Coordination - -```typescript lineNumbers -import { DistributedAbortController } from "./distributed-abort-controller"; - -// Process A: Create the controller -const controller = await DistributedAbortController.create("task:build-123"); -// start long operation using controller.signal... - -// Process B: Reconnect and abort (no run ID sharing needed!) -const sameController = await DistributedAbortController.create("task:build-123"); // [!code highlight] -await sameController.abort("Cancelled by admin"); - -// Process C: Reconnect and listen -const anotherRef = await DistributedAbortController.create("task:build-123"); -anotherRef.signal.addEventListener("abort", (e) => { - console.log("Task was cancelled:", (e.target as AbortSignal).reason); -}); -``` - -### Custom TTL - -```typescript lineNumbers -import { DistributedAbortController } from "./distributed-abort-controller"; - -// Short-lived controller for a quick operation (5 minutes) -const shortLived = await DistributedAbortController.create("quick-task", { - ttlMs: 5 * 60 * 1000, -}); - -// Long-lived controller for batch jobs (7 days) -const longLived = await DistributedAbortController.create("batch-job", { - ttlMs: 7 * 24 * 60 * 60 * 1000, -}); - -// When TTL expires, the signal fires with expired reason -shortLived.signal.addEventListener("abort", (e) => { - const reason = (e.target as AbortSignal).reason; - if (reason?.includes("expired")) { - console.log("Controller expired, cleaning up..."); - } -}); -``` - -### API Route for Remote Abort - -```typescript lineNumbers -import { DistributedAbortController } from "@/lib/distributed-abort-controller"; - -export async function POST( - request: Request, - { params }: { params: Promise<{ id: string }> } -) { - const { id } = await params; - const { reason } = await request.json(); - - const controller = await DistributedAbortController.create(id); - await controller.abort(reason || "Cancelled via API"); - - return Response.json({ success: true }); -} -``` - -### Client Cancel Button - -```tsx lineNumbers -"use client"; - -export function CancelButton({ taskId }: { taskId: string }) { - const handleCancel = async () => { - await fetch(`/api/abort/${taskId}`, { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ reason: "User clicked cancel" }), - }); - }; - - return ( - - ); -} -``` - -## Tips - -- **Use semantic IDs** — Use meaningful IDs like `chat:123` or `task:abc` instead of random UUIDs -- **Create is idempotent** — Calling `create()` with the same ID reconnects to the existing controller -- **TTL auto-cleanup** — Workflows self-terminate after TTL expires; no manual cleanup needed -- **Signal is a getter** — Each access to `.signal` creates a new listener; cache it if needed -- **One-shot** — Once aborted or expired, the workflow completes; create a new controller for new operations - -## Key APIs - -- [`defineHook()`](/docs/api-reference/workflow/define-hook) — type-safe hook for the abort trigger -- [`getWritable()`](/docs/api-reference/workflow/get-writable) — write abort messages to the stream -- [`sleep()`](/docs/api-reference/workflow/sleep) — TTL timer for auto-expiration -- [`start()`](/docs/api-reference/workflow-api/start) — start the abort controller workflow -- [`getHookByToken()`](/docs/api-reference/workflow-api/get-hook-by-token) — find existing run by hook token -- [`getRun()`](/docs/api-reference/workflow-api/get-run) — reconnect to the workflow's readable stream diff --git a/docs/content/docs/cookbook/agent-patterns/agent-cancellation.mdx b/docs/content/docs/cookbook/agent-patterns/agent-cancellation.mdx deleted file mode 100644 index b793ab731a..0000000000 --- a/docs/content/docs/cookbook/agent-patterns/agent-cancellation.mdx +++ /dev/null @@ -1,205 +0,0 @@ ---- -title: Agent Cancellation -description: Cancel a running agent from the outside — either immediately via run.cancel() or gracefully via a stop signal hook. -type: guide -summary: Two patterns for cancelling a running agent — Hard Cancellation via getRun(runId).cancel() for forced termination, or Stop Signal via a hook + Promise.race for a clean exit with cleanup and final stream notification. ---- - -Cancel a running agent from the outside — for example, a "Stop" button in a chat UI, an admin cancellation endpoint, or a timeout fallback. Two patterns are available depending on whether you need the agent to exit cleanly or just need the run to stop: **Hard Cancellation** via `getRun(runId).cancel()` for immediate forced termination, or **Stop Signal** via a hook + `Promise.race` for a graceful exit that runs cleanup and notifies streaming clients before returning. - -## When to use this - -* **Chat stop buttons** — let users cancel a long-running agent from the browser -* **Admin cancellation** — stop an agent from a different process or API -* **Timeout fallback** — combine with `sleep()` to auto-stop after a deadline - -## Choosing an approach - -Pick the option that matches what your endpoint needs to deliver to the caller: - -* **Hard Cancellation** — terminates the run immediately with no opportunity for cleanup or client notification. A single line of code, but the workflow throws `WorkflowRunCancelledError` and any streaming clients see an abrupt connection close. -* **Stop Signal** — the workflow exits as soon as the hook fires, runs any pending cleanup, emits a final `data-stopped` part to the stream so the client can render cleanly, and returns a real result. - -The trade-offs at a glance: - -| | Hard Cancellation | Stop Signal | -| --- | --- | --- | -| Mechanism | `getRun(runId).cancel()` | Hook + `Promise.race` | -| Speed to terminate | Immediate | At the next `await` boundary in the workflow | -| Runs `finally` / cleanup | No | Yes | -| Final stream notification | No (abrupt close) | Yes (`data-stopped` part) | -| `run.returnValue` | Throws `WorkflowRunCancelledError` | Returns the workflow's result | -| Code complexity | One line | Hook + race + signal step | -| Best for | Stuck or unresponsive runs, forced termination | User-facing stop, admin cancel, timeouts | - -## Hard Cancellation - -Call `.cancel()` on a run to terminate it immediately: - -```typescript lineNumbers -import { getRun } from "workflow/api"; - -export async function POST( - _request: Request, - { params }: { params: Promise<{ runId: string }> } -) { - const { runId } = await params; - await getRun(runId).cancel(); // [!code highlight] - return Response.json({ success: true }); -} -``` - -This is an abrupt termination — the run is stopped mid-step with no opportunity to exit cleanly: - -* **No cleanup runs** — `finally` blocks, defer-style step cleanup, and any logic after the current step are all skipped -* **No final notification to the client** — the writable closes abruptly, so a streaming UI just sees the connection drop with no `data-stopped` part to render a clean ending -* **`run.returnValue` throws** — anyone awaiting the result receives [`WorkflowRunCancelledError`](/docs/api-reference/workflow-errors/workflow-run-cancelled-error) instead of a meaningful payload -* **Underlying step keeps running** — same caveat as the Stop Signal pattern below: the model stream or HTTP call inside the current step continues to completion in the background - -Hard Cancellation is the appropriate choice when the run is stuck or unresponsive, has exceeded its expected runtime, or you don't need a clean exit. For everything else — chat stop buttons, admin "stop" actions, timeout fallbacks — you typically want the Stop Signal pattern: the agent finishes its current step, emits a final stream part so the client renders a clean ending, and returns a real result. - -## Stop Signal - - -**Limitation:** This pattern does not cancel the underlying model stream. The agent step writing to the writable continues running in the background until it completes — tokens generated after the stop signal are still produced (and billed by your model provider). What this pattern *does* is exit the workflow function as soon as the hook fires and emit a `data-stopped` part so the client can stop rendering. For hard cross-process cancellation that signals the inner step to bail out, see [Distributed Abort Controller](/cookbook/advanced/distributed-abort-controller). - - -### Example - -```typescript lineNumbers -import { DurableAgent } from "@workflow/ai/agent"; -import { defineHook, getWritable, getWorkflowMetadata } from "workflow"; -import { z } from "zod"; -import type { ModelMessage, UIMessageChunk } from "ai"; - -export const stopHook = defineHook({ - schema: z.object({ reason: z.string().optional() }), -}); - -async function searchWeb({ query }: { query: string }) { - "use step"; - await new Promise((r) => setTimeout(r, 1500)); - return { results: [{ title: `${query} - Wikipedia`, snippet: `Overview of ${query}...` }] }; -} - -async function analyzeData({ topic }: { topic: string }) { - "use step"; - await new Promise((r) => setTimeout(r, 1200)); - return { summary: `Analysis of ${topic}: significant developments found.`, confidence: 0.85 }; -} - -async function emitStopSignal(details: { reason?: string }) { // [!code highlight] - "use step"; - const writer = getWritable().getWriter(); - try { - await writer.write({ type: "data-stopped", id: "stop-signal", data: details } as UIMessageChunk); - } finally { - writer.releaseLock(); - } -} - -export async function stoppableAgent(messages: ModelMessage[]) { - "use workflow"; - - const { workflowRunId } = getWorkflowMetadata(); - const hook = stopHook.create({ token: `stop:${workflowRunId}` }); // [!code highlight] - - const agent = new DurableAgent({ - model: "anthropic/claude-haiku-4.5", - instructions: "You are a research assistant. Search and analyze data as needed.", - tools: { - searchWeb: { - description: "Search the web for information", - inputSchema: z.object({ query: z.string() }), - execute: searchWeb, - }, - analyzeData: { - description: "Analyze a piece of data", - inputSchema: z.object({ topic: z.string() }), - execute: analyzeData, - }, - }, - }); - - const result = await Promise.race([ // [!code highlight] - agent - .stream({ messages, writable: getWritable(), maxSteps: 15 }) - .then((r) => ({ type: "complete" as const, messages: r.messages })), - hook.then(({ reason }) => ({ type: "stopped" as const, reason })), // [!code highlight] - ]); - - if (result.type === "stopped") { - await emitStopSignal({ reason: result.reason }); // [!code highlight] - } - - return result; -} -``` - -### API Route to Trigger Stop - -```typescript lineNumbers -import { stopHook } from "@/workflows/stoppable-agent"; - -export async function POST( - request: Request, - { params }: { params: Promise<{ runId: string }> } -) { - const { runId } = await params; - const { reason } = await request.json(); - - await stopHook.resume(`stop:${runId}`, { // [!code highlight] - reason: reason || "User requested stop", - }); - - return Response.json({ success: true }); -} -``` - -### Client Stop Button - -```tsx lineNumbers -"use client"; - -export function StopButton({ runId }: { runId: string }) { - const handleStop = async () => { - await fetch(`/api/chat/${runId}/stop`, { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ reason: "User clicked stop" }), - }); - }; - - return ( - - ); -} -``` - -## How it works - -1. A hook is created with token `stop:${workflowRunId}` when the workflow starts -2. `Promise.race` runs the agent stream and the stop hook concurrently -3. When the stop API resumes the hook, the race resolves immediately — the workflow exits -4. Before returning, `emitStopSignal` writes a `data-stopped` part to the stream so the client knows the agent was stopped (not just disconnected) -5. The client detects `data-stopped` and updates the UI accordingly - -This is the same pattern used by the [Distributed Abort Controller](/cookbook/advanced/distributed-abort-controller) — race a long-running operation against a hook signal. - -## Adapting this - -* **Add a timeout** — race a third `sleep()` promise to auto-stop after a deadline -* **Audit logging** — include a `reason` field in the stop schema to record who stopped and why -* **Cross-process** — the hook token is deterministic, so any process can call `stopHook.resume()` with the run ID -* **Step limits** — combine with `maxSteps` on the agent to cap execution even without manual stop -* **Hard Cancellation as a fallback** — wire your stop endpoint to fall back to `getRun(runId).cancel()` if the hook resume errors with `not found` / `expired` (for example, the hook was already consumed). This guarantees the run is terminated even when the Stop Signal path is unavailable. - -## Key APIs - -* [`defineHook()`](/docs/api-reference/workflow/define-hook) — type-safe hook for the stop signal -* [`getWorkflowMetadata()`](/docs/api-reference/workflow/get-workflow-metadata) — access the run ID for deterministic hook tokens -* [`getWritable()`](/docs/api-reference/workflow/get-writable) — stream a stop notification to the client -* [`DurableAgent`](/docs/api-reference/workflow-ai/durable-agent) — the agent that gets raced against the stop hook -* [`getRun()`](/docs/api-reference/workflow-api/get-run) — entry point for Hard Cancellation: `getRun(runId).cancel()` diff --git a/docs/content/docs/cookbook/agent-patterns/durable-agent.mdx b/docs/content/docs/cookbook/agent-patterns/durable-agent.mdx deleted file mode 100644 index e4e4783aa5..0000000000 --- a/docs/content/docs/cookbook/agent-patterns/durable-agent.mdx +++ /dev/null @@ -1,150 +0,0 @@ ---- -title: Durable Agent -description: Replace a stateless AI agent with a durable one that survives crashes, retries tool calls, and streams output. -type: guide -summary: Convert an AI SDK Agent into a DurableAgent backed by a workflow, with tools as retryable steps. ---- - -Use this pattern to make any AI SDK agent durable. The agent becomes a workflow, tools become steps, and the framework handles retries, streaming, and state persistence automatically. - -## When to use this - -- Any AI agent with tool calls that should survive crashes and restarts -- Agents where tool calls hit external APIs that need automatic retries -- Long-running agent sessions where losing progress is unacceptable -- Agents that need per-step observability in the workflow event log - -## Pattern - -Replace `Agent` with `DurableAgent`, wrap the function in `"use workflow"`, mark each tool with `"use step"`, and stream output through `getWritable()`. - -### Workflow - -```typescript -import { DurableAgent } from "@workflow/ai/agent"; -import { getWritable } from "workflow"; -import { z } from "zod"; -import type { ModelMessage, UIMessageChunk } from "ai"; - -async function searchFlights({ from, to, date }: { - from: string; - to: string; - date: string; -}) { - "use step"; // [!code highlight] - const res = await fetch( - `https://api.example.com/flights?from=${from}&to=${to}&date=${date}` - ); - if (!res.ok) throw new Error(`Search failed: ${res.status}`); - return res.json(); -} - -async function bookFlight({ flightId, passenger }: { - flightId: string; - passenger: string; -}) { - "use step"; // [!code highlight] - const res = await fetch("https://api.example.com/bookings", { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ flightId, passenger }), - }); - if (!res.ok) throw new Error(`Booking failed: ${res.status}`); - return res.json(); -} - -async function checkWeather({ city }: { city: string }) { - "use step"; // [!code highlight] - const res = await fetch(`https://api.weather.com/forecast?city=${city}`); - return res.json(); -} - -export async function flightAgent(messages: ModelMessage[]) { - "use workflow"; - - const agent = new DurableAgent({ // [!code highlight] - model: "anthropic/claude-haiku-4.5", - instructions: "You are a helpful flight booking assistant.", - tools: { - searchFlights: { - description: "Search for available flights between two airports", - inputSchema: z.object({ - from: z.string().describe("Departure airport code"), - to: z.string().describe("Arrival airport code"), - date: z.string().describe("Travel date (YYYY-MM-DD)"), - }), - execute: searchFlights, - }, - bookFlight: { - description: "Book a specific flight for a passenger", - inputSchema: z.object({ - flightId: z.string().describe("Flight ID from search results"), - passenger: z.string().describe("Passenger full name"), - }), - execute: bookFlight, - }, - checkWeather: { - description: "Check the weather forecast for a city", - inputSchema: z.object({ - city: z.string().describe("City name"), - }), - execute: checkWeather, - }, - }, - }); - - const result = await agent.stream({ // [!code highlight] - messages, - writable: getWritable(), // [!code highlight] - maxSteps: 10, - }); - - return { messages: result.messages }; -} -``` - -### API route - -```typescript -import type { UIMessage } from "ai"; -import { convertToModelMessages, createUIMessageStreamResponse } from "ai"; -import { start } from "workflow/api"; -import { flightAgent } from "@/app/workflows/flight-agent"; - -export async function POST(req: Request) { - const { messages }: { messages: UIMessage[] } = await req.json(); - const modelMessages = await convertToModelMessages(messages); // [!code highlight] - - const run = await start(flightAgent, [modelMessages]); // [!code highlight] - - return createUIMessageStreamResponse({ // [!code highlight] - stream: run.readable, - headers: { - "x-workflow-run-id": run.runId, - }, - }); -} -``` - -## How it works - -1. **DurableAgent wraps Agent** — same API as AI SDK's `Agent`, but backed by a workflow. If the process crashes, the agent resumes from the last completed step on replay. -2. **Tools as steps** — each tool's `execute` function uses `"use step"`, giving it automatic retries, full Node.js access, and an entry in the workflow event log. -3. **Streaming** — `getWritable()` streams the agent's output (text chunks, tool calls, tool results) to the client in real time via `createUIMessageStreamResponse`. -4. **maxSteps** — limits the total number of LLM calls the agent can make, preventing runaway tool loops. - -## Adapting to your use case - -- **Change the model** — replace `"anthropic/claude-haiku-4.5"` with any AI Gateway model string (e.g. `"openai/gpt-4o"`, `"anthropic/claude-sonnet-4-5"`). -- **Add tools** — define a new `"use step"` function with a Zod schema. Each tool automatically gets retries and persistence. -- **Workflow-level tools** — if a tool needs workflow primitives like `sleep()` or `createHook()`, omit `"use step"` so it runs in the workflow context instead. -- **Multi-turn** — pass `result.messages` plus new user messages to subsequent `agent.stream()` calls for multi-turn conversations. -- **Client integration** — use `useChat()` from `@ai-sdk/react` with `WorkflowChatTransport` from `@workflow/ai` for a full chat UI with reconnection support. - -## Key APIs - -- [`"use workflow"`](/docs/api-reference/workflow/use-workflow) — declares the orchestrator function -- [`"use step"`](/docs/api-reference/workflow/use-step) — declares step functions with retries and full Node.js access -- [`DurableAgent`](/docs/api-reference/workflow-ai/durable-agent) — durable wrapper around AI SDK's Agent -- [`getWritable()`](/docs/api-reference/workflow/get-writable) — streams agent output to the client -- [`start()`](/docs/api-reference/workflow-api/start) — starts a workflow run from an API route diff --git a/docs/content/docs/cookbook/agent-patterns/human-in-the-loop.mdx b/docs/content/docs/cookbook/agent-patterns/human-in-the-loop.mdx deleted file mode 100644 index f3c4de9054..0000000000 --- a/docs/content/docs/cookbook/agent-patterns/human-in-the-loop.mdx +++ /dev/null @@ -1,255 +0,0 @@ ---- -title: Human-in-the-Loop -description: Pause an AI agent to wait for human approval, then resume based on the decision. -type: guide -summary: Use defineHook with the tool call ID to suspend an agent for human approval, with an optional timeout. ---- - -Use this pattern when an AI agent needs human confirmation before performing a consequential action like booking, purchasing, or publishing. The workflow suspends without consuming resources until the human responds. - -## When to use this - -- Booking confirmations where users must approve before charges are made -- Content publishing gates where an editor must sign off -- Any agent action where the cost of getting it wrong justifies a human check -- Actions with side effects that can't be easily undone - -## Pattern - -Create a typed hook using `defineHook()`. When the agent calls the approval tool, the tool emits a custom data part to the stream so the client can render approval controls, then creates a hook and suspends. An API route resumes the hook with the decision. - -### Workflow - -```typescript -import { DurableAgent } from "@workflow/ai/agent"; -import { defineHook, sleep, getWritable } from "workflow"; -import { z } from "zod"; -import type { ModelMessage, UIMessageChunk } from "ai"; - -// Exported so the approval API route can call .resume() -export const bookingApprovalHook = defineHook({ // [!code highlight] - schema: z.object({ - approved: z.boolean(), - comment: z.string().optional(), - }), -}); - -async function searchFlights({ from, to, date }: { - from: string; - to: string; - date: string; -}) { - "use step"; - const res = await fetch( - `https://api.example.com/flights?from=${from}&to=${to}&date=${date}` - ); - return res.json(); -} - -async function confirmBooking({ flightId, passenger }: { - flightId: string; - passenger: string; -}) { - "use step"; - const res = await fetch("https://api.example.com/bookings", { - method: "POST", - body: JSON.stringify({ flightId, passenger }), - }); - return res.json(); -} - -// Stream a custom data part so the client can render the approval UI. -// This MUST run before the hook suspends the workflow — otherwise -// the tool-invocation won't appear in the stream until the tool returns, -// and the client would have no way to show approval buttons. -async function emitApprovalRequest(details: { - flightId: string; - passenger: string; - price: number; - toolCallId: string; -}) { - "use step"; - const writer = getWritable().getWriter(); - try { - await writer.write({ - type: "data-approval-needed", // [!code highlight] - id: details.toolCallId, - data: details, - } as UIMessageChunk); - } finally { - writer.releaseLock(); - } -} - -// Stream the resolution so the client can update the approval card. -async function emitApprovalResolved(details: { - toolCallId: string; - result: string; -}) { - "use step"; - const writer = getWritable().getWriter(); - try { - await writer.write({ - type: "data-approval-resolved", // [!code highlight] - id: details.toolCallId, - data: details, - } as UIMessageChunk); - } finally { - writer.releaseLock(); - } -} - -// No "use step" — hooks are workflow-level primitives -async function requestBookingApproval( - { flightId, passenger, price }: { - flightId: string; - passenger: string; - price: number; - }, - { toolCallId }: { toolCallId: string } -) { - // Emit to the stream before suspending so the UI can show buttons - await emitApprovalRequest({ flightId, passenger, price, toolCallId }); // [!code highlight] - - const hook = bookingApprovalHook.create({ token: toolCallId }); - - // Race: human decision vs. timeout - const result = await Promise.race([ - hook.then((payload) => ({ type: "decision" as const, ...payload })), - sleep("24h").then(() => ({ type: "timeout" as const, approved: false as const })), - ]); - - if (result.type === "timeout") { - const msg = "Booking request expired."; - await emitApprovalResolved({ toolCallId, result: msg }); // [!code highlight] - return msg; - } - if (!result.approved) { - const msg = `Rejected: ${result.comment || "No reason given"}`; - await emitApprovalResolved({ toolCallId, result: msg }); // [!code highlight] - return msg; - } - - const booking = await confirmBooking({ flightId, passenger }); - const msg = `Booked! Confirmation: ${booking.confirmationId}`; - await emitApprovalResolved({ toolCallId, result: msg }); // [!code highlight] - return msg; -} - -export async function bookingAgent(messages: ModelMessage[]) { - "use workflow"; - - const agent = new DurableAgent({ - model: "anthropic/claude-haiku-4.5", - instructions: "You help book flights. Always request approval before booking.", - tools: { - searchFlights: { - description: "Search for available flights", - inputSchema: z.object({ - from: z.string().describe("Departure airport code"), - to: z.string().describe("Arrival airport code"), - date: z.string().describe("Travel date (YYYY-MM-DD)"), - }), - execute: searchFlights, - }, - requestBookingApproval: { - description: "Request human approval before booking a flight", - inputSchema: z.object({ - flightId: z.string().describe("Flight ID to book"), - passenger: z.string().describe("Passenger name"), - price: z.number().describe("Total price"), - }), - execute: requestBookingApproval, - }, - }, - }); - - await agent.stream({ - messages, - writable: getWritable(), - }); -} -``` - -### Approval API route - -The approval route imports the hook definition and calls `.resume()` with the tool call ID as the token: - -```typescript -import { bookingApprovalHook } from "@/app/workflows/booking-agent"; - -export async function POST(req: Request) { - const { toolCallId, approved, comment } = await req.json(); - - await bookingApprovalHook.resume(toolCallId, { approved, comment }); // [!code highlight] - - return Response.json({ success: true }); -} -``` - -### Client rendering - -Listen for `data-approval-needed` and `data-approval-resolved` custom data parts in the message stream. The approval tool invocation itself won't appear until the tool returns, so the custom data parts are the mechanism for showing and updating the approval UI. - -```tsx -// Scan all messages for the resolution -const approvalResult = messages - .flatMap((m) => m.parts) - .find((p) => p.type === "data-approval-resolved") - ?.data?.result; - -// In your message parts loop: -{message.parts.map((part, i) => { - if (part.type === "data-approval-needed") { // [!code highlight] - const { flightId, passenger, price, toolCallId } = part.data; - if (approvalResult) { - return
Result: {approvalResult}
; - } - return ( -
-
-
Flight: {flightId}
-
Passenger: {passenger}
-
Price: ${price}
-
-
- {/* [!code highlight] */} - {/* [!code highlight] */} -
-
- ); - } - // Hide the requestBookingApproval tool-invocation part - if (part.type === "tool-invocation" && - part.toolInvocation.toolName === "requestBookingApproval") { - return null; - } - // ... other part types -})} -``` - -## How it works - -1. **`defineHook()` with schema** — creates a typed hook with Zod validation. The approval payload is validated before the workflow receives it. -2. **`toolCallId` as token** — the approval tool uses the tool call ID as the hook token, naturally linking the hook to the specific tool invocation. -3. **`emitApprovalRequest` step** — writes a `data-approval-needed` custom data part to the stream *before* the hook suspends. Without this, the client would never see the approval controls because tool invocations don't stream until the tool returns. -4. **No `"use step"` on the approval tool** — the tool runs at the workflow level because `defineHook().create()` is a workflow primitive. It calls step functions (`emitApprovalRequest`, `emitApprovalResolved`, `confirmBooking`) for I/O. -5. **`Promise.race` with sleep** — the approval races against a durable timeout. If nobody responds, the workflow continues with an expiration message. -6. **`emitApprovalResolved` step** — writes the outcome to the stream so the client can update the card immediately, without waiting for the tool-invocation result. - -## Adapting to your use case - -- **Change the approval schema** — add fields like `reason`, `amount`, `reviewerEmail` to match your domain. -- **Multiple approval gates** — the pattern works for any number of tools. Each tool creates its own hook with its own `toolCallId`. -- **Escalation** — if the first approver doesn't respond, use `sleep()` + another hook to escalate to a backup reviewer. -- **Adjust timeout** — use `"24h"` for production, shorter durations for demos. -- **Workflow-level vs step tools** — tools that use `sleep()`, `defineHook()`, or other workflow primitives must NOT use `"use step"`. Tools with only I/O (API calls, DB queries) should use `"use step"` for retries. - -## Key APIs - -- [`"use workflow"`](/docs/api-reference/workflow/use-workflow) — declares the orchestrator function -- [`"use step"`](/docs/api-reference/workflow/use-step) — declares step functions with retries -- [`defineHook()`](/docs/api-reference/workflow/define-hook) — type-safe hook with schema validation -- [`sleep()`](/docs/api-reference/workflow/sleep) — durable timeout for approval expiry -- [`getWritable()`](/docs/api-reference/workflow/get-writable) — stream custom data parts from steps -- [`DurableAgent`](/docs/api-reference/workflow-ai/durable-agent) — durable agent with tool definitions diff --git a/docs/content/docs/cookbook/common-patterns/batching.mdx b/docs/content/docs/cookbook/common-patterns/batching.mdx deleted file mode 100644 index a4ed596916..0000000000 --- a/docs/content/docs/cookbook/common-patterns/batching.mdx +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: Batching & Parallel Processing -description: Process large collections in parallel batches with failure isolation between groups. -type: guide -summary: Split items into fixed-size batches, process each batch concurrently with Promise.allSettled, and pace batches with sleep to avoid overloading downstream services. ---- - -Use batching when you need to process a large list of items in parallel while controlling concurrency. Items are split into fixed-size batches, each batch runs concurrently, and failures in one batch don't affect others. - -## When to use this - -- Bulk data imports (contacts, orders, products from a CSV) -- Processing hundreds or thousands of items against external APIs -- Calling rate-limited APIs where you need to control concurrency -- Any fan-out where you want failure isolation between groups - -## How it works - -1. Records are split into fixed-size batches. -2. Each batch runs in parallel via `Promise.allSettled` — failures in one record don't affect others. -3. A `sleep()` between batches paces requests to avoid overloading downstream services. -4. After all batches, a summary is returned with succeeded/failed counts. - -## Pattern - -The workflow splits records into chunks, processes each chunk concurrently, tracks results per batch, and returns a final tally. - -```typescript -import { sleep } from "workflow"; - -type Record = { name: string; email: string; role: string }; - -declare function processRecord(record: Record): Promise; // @setup - -export async function batchImport(records: Record[], batchSize: number) { - "use workflow"; - - let totalSucceeded = 0; - let totalFailed = 0; - - for (let i = 0; i < records.length; i += batchSize) { - const batch = records.slice(i, i + batchSize); - - // Run batch in parallel — failures are isolated per record - const outcomes = await Promise.allSettled( // [!code highlight] - batch.map((record) => processRecord(record)) - ); - - for (let j = 0; j < outcomes.length; j++) { - if (outcomes[j].status === "fulfilled") { - totalSucceeded++; - } else { - totalFailed++; - } - } - - // Pace between batches to avoid overloading downstream - if (i + batchSize < records.length) { - await sleep("1s"); // [!code highlight] - } - } - - return { total: records.length, succeeded: totalSucceeded, failed: totalFailed }; -} -``` - -### Step function - -Each record is processed in its own step with full Node.js access and automatic retries. - -```typescript -type Record = { name: string; email: string; role: string }; - -async function processRecord(record: Record): Promise { - "use step"; - const res = await fetch(`https://api.example.com/contacts`, { - method: "POST", - body: JSON.stringify(record), - }); - if (!res.ok) throw new Error(`Failed to import ${record.email}`); - const { id } = await res.json(); - return id; -} -``` - -## Adapting to your use case - -- Replace the `Record` type with your actual data shape (orders, images, products, etc.). -- Replace `processRecord()` with your real import logic — DB upserts, API calls, file processing. -- Tune `batchSize` and the `sleep()` duration to match your downstream rate limits. -- Add or remove tracking as needed — the pattern works with any item type. - -## Tips - -- **Use `Promise.allSettled` over `Promise.all`** when you want to continue even if some items fail. `Promise.all` rejects on the first failure; `allSettled` waits for everything and tells you what failed. -- **Tune batch size to your downstream API limits.** If the API allows 10 concurrent requests, use `batchSize: 10`. -- **Add pacing with `sleep()`** between batches to respect rate limits. The sleep is durable — it survives cold starts. -- **Each `processRecord` call is an independent step.** If one fails, it retries up to 3 times without affecting other items in the batch. - -## Key APIs - -- [`"use workflow"`](/docs/foundations/workflows-and-steps) -- marks the orchestrator function -- [`"use step"`](/docs/foundations/workflows-and-steps) -- marks functions that run with full Node.js access -- [`sleep()`](/docs/api-reference/workflow/sleep) -- pacing delay between batches -- [`Promise.allSettled()`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise/allSettled) -- runs items in parallel, isolating failures diff --git a/docs/content/docs/cookbook/common-patterns/idempotency.mdx b/docs/content/docs/cookbook/common-patterns/idempotency.mdx deleted file mode 100644 index 7362c656cc..0000000000 --- a/docs/content/docs/cookbook/common-patterns/idempotency.mdx +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: Idempotency -description: Ensure external side effects happen exactly once, even when steps are retried or workflows are replayed. -type: guide -summary: Use step IDs as idempotency keys for external APIs like Stripe so that retries and replays don't create duplicate charges. ---- - -Workflow steps can be retried (on failure) and replayed (on cold start). If a step calls an external API that isn't idempotent, retries could create duplicate charges, send duplicate emails, or double-process records. Use idempotency keys to make these operations safe. - -## When to use this - -- Charging a payment (Stripe, PayPal) -- Sending transactional emails or SMS -- Creating records in external systems where duplicates are harmful -- Any step that has side effects in systems you don't control - -## Pattern: Step ID as idempotency key - -Every step has a unique, deterministic `stepId` available via `getStepMetadata()`. Pass this as the idempotency key to external APIs: - -```typescript -import { getStepMetadata } from "workflow"; - -declare function createCharge(customerId: string, amount: number): Promise<{ id: string }>; // @setup -declare function sendReceipt(customerId: string, chargeId: string): Promise; // @setup - -export async function chargeCustomer(customerId: string, amount: number) { - "use workflow"; - - const charge = await createCharge(customerId, amount); - await sendReceipt(customerId, charge.id); - - return { customerId, chargeId: charge.id, status: "completed" }; -} -``` - -### Step function with idempotency key - -```typescript -import { getStepMetadata } from "workflow"; - -async function createCharge( - customerId: string, - amount: number -): Promise<{ id: string }> { - "use step"; - - const { stepId } = getStepMetadata(); // [!code highlight] - - // Stripe uses the idempotency key to deduplicate requests. - // If this step is retried, Stripe returns the same charge. - const charge = await fetch("https://api.stripe.com/v1/charges", { - method: "POST", - headers: { - Authorization: `Bearer ${process.env.STRIPE_SECRET_KEY}`, - "Idempotency-Key": stepId, // [!code highlight] - }, - body: new URLSearchParams({ - amount: String(amount), - currency: "usd", - customer: customerId, - }), - }); - - if (!charge.ok) { - const error = await charge.json(); - throw new Error(`Charge failed: ${error.message}`); - } - - return charge.json(); -} - -async function sendReceipt(customerId: string, chargeId: string): Promise { - "use step"; - - const { stepId } = getStepMetadata(); - - await fetch("https://api.example.com/receipts", { - method: "POST", - headers: { "Idempotency-Key": stepId }, - body: JSON.stringify({ customerId, chargeId }), - }); -} -``` - -## Race condition caveats - -Workflow does not currently provide distributed locking or true exactly-once delivery across concurrent runs. If two workflow runs could process the same entity concurrently: - -- **Rely on the external API's idempotency** (like Stripe's `Idempotency-Key`) rather than checking a local flag. -- **Don't use check-then-act patterns** like "read a flag, then write if not set" -- another run could read the same flag between your read and write. - -If your external API doesn't support idempotency keys natively, consider adding a deduplication layer (e.g., a database unique constraint on the operation ID). - -## Tips - -- **`stepId` is deterministic.** It's the same value across retries and replays of the same step, making it a reliable idempotency key. -- **Always provide idempotency keys for non-idempotent external calls.** Even if you think a step won't be retried, cold-start replay will re-execute it. -- **Handle 409/conflict as success.** If an external API returns "already processed," treat that as a successful result, not an error. -- **Make your own APIs idempotent** where possible. Accept an idempotency key and return the cached result on duplicate requests. - -## Key APIs - -- [`"use workflow"`](/docs/api-reference/workflow/use-workflow) -- declares the orchestrator function -- [`"use step"`](/docs/api-reference/workflow/use-step) -- declares step functions with full Node.js access -- [`getStepMetadata()`](/docs/api-reference/step/get-step-metadata) -- provides the deterministic `stepId` for idempotency keys -- [`start()`](/docs/api-reference/workflow-api/start) -- starts a new workflow run diff --git a/docs/content/docs/cookbook/common-patterns/rate-limiting.mdx b/docs/content/docs/cookbook/common-patterns/rate-limiting.mdx deleted file mode 100644 index 5cdc700d0e..0000000000 --- a/docs/content/docs/cookbook/common-patterns/rate-limiting.mdx +++ /dev/null @@ -1,228 +0,0 @@ ---- -title: Rate Limiting & Retries -description: Handle 429 responses and transient failures with RetryableError and exponential backoff. -type: guide -summary: When an external API returns 429, throw RetryableError with the Retry-After value so the workflow runtime automatically reschedules the step after the specified delay. ---- - -Use this pattern when calling external APIs that enforce rate limits. Instead of writing manual retry loops, throw `RetryableError` with a `retryAfter` value and let the workflow runtime handle rescheduling. - -## When to use this - -- Calling APIs that return 429 (Too Many Requests) with `Retry-After` headers -- Any step that hits transient failures and needs backoff -- Syncing data with third-party services (Stripe, CRMs, scrapers) - -## Pattern: RetryableError with Retry-After - -A step function calls an external API. On 429, it reads the `Retry-After` header and throws `RetryableError`. The runtime reschedules the step automatically. - -```typescript -import { RetryableError } from "workflow"; - -declare function fetchFromCrm(contactId: string): Promise; // @setup -declare function upsertToWarehouse(contactId: string, contact: unknown): Promise; // @setup - -export async function syncContact(contactId: string) { - "use workflow"; - - const contact = await fetchFromCrm(contactId); - await upsertToWarehouse(contactId, contact); - - return { contactId, status: "synced" }; -} -``` - -### Step function with rate limit handling - -```typescript -import { RetryableError } from "workflow"; - -async function fetchFromCrm(contactId: string) { - "use step"; - - const res = await fetch(`https://crm.example.com/contacts/${contactId}`); - - if (res.status === 429) { // [!code highlight] - const retryAfter = res.headers.get("Retry-After"); - throw new RetryableError("Rate limited by CRM", { // [!code highlight] - retryAfter: retryAfter ? parseInt(retryAfter) * 1000 : "1m", - }); - } - - if (!res.ok) throw new Error(`CRM returned ${res.status}`); - return res.json(); -} - -async function upsertToWarehouse(contactId: string, contact: unknown) { - "use step"; - await fetch(`https://warehouse.example.com/contacts/${contactId}`, { - method: "PUT", - body: JSON.stringify(contact), - }); -} -``` - -## Pattern: Exponential backoff - -Use `getStepMetadata()` to access the current attempt number and calculate increasing delays: - -```typescript -import { RetryableError, getStepMetadata } from "workflow"; - -async function callFlakeyApi(endpoint: string) { - "use step"; - - const { attempt } = getStepMetadata(); // [!code highlight] - const res = await fetch(endpoint); - - if (res.status === 429 || res.status >= 500) { - throw new RetryableError(`Request failed (${res.status})`, { // [!code highlight] - retryAfter: (attempt ** 2) * 1000, // 1s, 4s, 9s... // [!code highlight] - }); - } - - return res.json(); -} -``` - -## Pattern: Circuit breaker with sleep - -When a dependency is completely down, stop hitting it for a cooldown period using `sleep()`, then probe with a single test request: - -```typescript -import { sleep } from "workflow"; - -export async function circuitBreaker(maxRequests: number = 10) { - "use workflow"; - - let state: "closed" | "open" | "half-open" = "closed"; - let consecutiveFailures = 0; - const FAILURE_THRESHOLD = 3; - - for (let i = 1; i <= maxRequests; i++) { - if (state === "open") { - await sleep("30s"); // Durable cooldown // [!code highlight] - state = "half-open"; - } - - const success = await callService(i); - - if (success) { - consecutiveFailures = 0; - if (state === "half-open") state = "closed"; - } else { - consecutiveFailures++; - if (consecutiveFailures >= FAILURE_THRESHOLD) { - state = "open"; - consecutiveFailures = 0; - } - } - } - - return { status: state === "closed" ? "recovered" : "failed" }; -} - -async function callService(requestNum: number): Promise { - "use step"; - try { - const res = await fetch("https://payment-gateway.example.com/charge"); - return res.ok; - } catch { - return false; - } -} -``` - -## Pattern: Custom max retries - -Override the default retry count (3) for steps that need more or fewer attempts: - -```typescript -async function fetchWithRetries(url: string) { - "use step"; - const res = await fetch(url); - if (!res.ok) throw new Error(`Failed: ${res.status}`); - return res.json(); -} - -// Allow up to 10 retry attempts -fetchWithRetries.maxRetries = 10; // [!code highlight] -``` - -## Application-level retry - -Sometimes you need retry logic at the workflow level -- wrapping a step call with your own backoff instead of relying on the framework's built-in `RetryableError`. This is useful when you want full control over retry conditions, delays, and error filtering. - -```typescript -interface RetryOptions { - maxRetries?: number; - baseDelay?: number; - maxDelay?: number; - shouldRetry?: (error: Error, attempt: number) => boolean; -} - -async function withRetry( - fn: () => Promise, - options: RetryOptions = {}, -): Promise { - const { maxRetries = 3, baseDelay = 2000, maxDelay = 10000, shouldRetry } = options; - let lastError: Error | undefined; - - for (let attempt = 0; attempt <= maxRetries; attempt++) { - try { - return await fn(); - } catch (error) { - lastError = error instanceof Error ? error : new Error(String(error)); - const isLastAttempt = attempt === maxRetries; - if (isLastAttempt || (shouldRetry && !shouldRetry(lastError, attempt + 1))) { - throw lastError; - } - // Exponential backoff with jitter - const delay = Math.min(baseDelay * 2 ** attempt * (0.5 + Math.random() * 0.5), maxDelay); - await new Promise(resolve => setTimeout(resolve, delay)); - } - } - - throw lastError; -} -``` - -Use it in a workflow to wrap step calls: - -```typescript -declare function withRetry(fn: () => Promise, options?: { maxRetries?: number; shouldRetry?: (error: Error) => boolean }): Promise; // @setup -declare function downloadFile(url: string): Promise; // @setup - -export async function downloadWithRetry(url: string) { - "use workflow"; - - const result = await withRetry(() => downloadFile(url), { // [!code highlight] - maxRetries: 5, - shouldRetry: (error) => error.message.includes("Timeout"), - }); - - return result; -} -``` - -**When to use this vs `RetryableError`/`FatalError`:** -- **`RetryableError`** runs inside a step -- the framework reschedules the step after the delay. Use it for transient HTTP errors (429, 503) where the runtime should handle backoff. -- **Application-level retry** wraps the step call from the workflow. Use it when you need custom retry conditions, want to retry across different steps, or when you're building a library and prefer not to depend on workflow-specific error classes. - -## Tips - -- **`RetryableError` is for transient failures.** Use it when the request might succeed on a later attempt (429, 503, network timeout). -- **`FatalError` is for permanent failures.** Use it when retrying won't help (404, 401, invalid input). This skips all remaining retries. -- **The `retryAfter` option accepts** a millisecond number, a duration string (`"1m"`, `"30s"`), or a `Date` object. -- **Steps retry up to 3 times by default.** Set `fn.maxRetries = N` to change this per step function. -- **Don't write manual sleep-retry loops.** The runtime handles scheduling natively with `RetryableError` -- it's more efficient and survives cold starts. - -## Key APIs - -- [`"use workflow"`](/docs/foundations/workflows-and-steps) -- marks the orchestrator function -- [`"use step"`](/docs/foundations/workflows-and-steps) -- marks functions that run with full Node.js access -- [`RetryableError`](/docs/api-reference/workflow/retryable-error) -- signals the runtime to retry after a delay -- [`FatalError`](/docs/api-reference/workflow/fatal-error) -- signals a permanent failure, skipping retries -- [`getStepMetadata()`](/docs/api-reference/step/get-step-metadata) -- provides the current attempt number and step ID -- [`sleep()`](/docs/api-reference/workflow/sleep) -- durable pause for circuit breaker cooldowns diff --git a/docs/content/docs/cookbook/common-patterns/saga.mdx b/docs/content/docs/cookbook/common-patterns/saga.mdx deleted file mode 100644 index b200d402e9..0000000000 --- a/docs/content/docs/cookbook/common-patterns/saga.mdx +++ /dev/null @@ -1,247 +0,0 @@ ---- -title: Transactions & Rollbacks (Saga) -description: Coordinate multi-step transactions with automatic rollback when a step fails. -type: guide -summary: Run a sequence of steps where each registers a compensation. If any step throws a FatalError, compensations execute in reverse order to restore consistency. ---- - -Use the saga pattern when a business transaction spans multiple services and you need automatic rollback if any step fails. Each forward step registers a compensation, and on failure the workflow unwinds them in reverse order. - -## When to use this - -- Multi-service transactions (reserve inventory, charge payment, provision access) -- Any sequence where partial completion leaves the system in an inconsistent state -- Operations that need "all or nothing" semantics across external APIs - -## How it works - -1. Each forward step does work and registers a compensation function. -2. If any step throws `FatalError`, the catch block runs compensations in reverse (LIFO) order to restore consistency. -3. Regular errors are retried automatically (up to 3x by default). Use `FatalError` only for permanent failures where retrying won't help. - -## Pattern - -Each step returns a result and pushes a compensation handler onto a stack. If a later step throws a `FatalError`, the workflow catches it and executes compensations in LIFO order. - -```typescript -import { FatalError } from "workflow"; - -declare function reserveSeats(accountId: string, seats: number): Promise; // @setup -declare function releaseSeats(accountId: string, reservationId: string): Promise; // @setup -declare function captureInvoice(accountId: string, seats: number): Promise; // @setup -declare function refundInvoice(accountId: string, invoiceId: string): Promise; // @setup -declare function provisionSeats(accountId: string, seats: number): Promise; // @setup -declare function deprovisionSeats(accountId: string, entitlementId: string): Promise; // @setup -declare function sendConfirmation(accountId: string, invoiceId: string, entitlementId: string): Promise; // @setup - -export async function subscriptionUpgradeSaga(accountId: string, seats: number) { - "use workflow"; - - const compensations: Array<() => Promise> = []; - - try { - const reservationId = await reserveSeats(accountId, seats); - compensations.push(() => releaseSeats(accountId, reservationId)); // [!code highlight] - - const invoiceId = await captureInvoice(accountId, seats); - compensations.push(() => refundInvoice(accountId, invoiceId)); // [!code highlight] - - const entitlementId = await provisionSeats(accountId, seats); - compensations.push(() => deprovisionSeats(accountId, entitlementId)); // [!code highlight] - - // No compensation — notifications are fire-and-forget - await sendConfirmation(accountId, invoiceId, entitlementId); - - return { status: "completed" }; - } catch (error) { - // Unwind compensations in reverse (LIFO) order - for (const compensate of compensations.reverse()) { // [!code highlight] - await compensate(); // [!code highlight] - } - - return { status: "rolled_back" }; - } -} -``` - -### Step functions - -Each step is a `"use step"` function with full Node.js access (fetch, fs, npm packages). Forward steps do the work and throw `FatalError` on permanent failure; compensation steps undo it and must be idempotent — safe to call multiple times if the workflow restarts mid-rollback. - -```typescript -import { FatalError } from "workflow"; - -// Forward steps - -async function reserveSeats(accountId: string, seats: number): Promise { - "use step"; - const res = await fetch(`https://api.example.com/seats/reserve`, { - method: "POST", - body: JSON.stringify({ accountId, seats }), - }); - if (!res.ok) throw new FatalError("Seat reservation failed"); // [!code highlight] - const { reservationId } = await res.json(); - return reservationId; -} - -async function captureInvoice(accountId: string, seats: number): Promise { - "use step"; - const res = await fetch(`https://api.example.com/invoices`, { - method: "POST", - body: JSON.stringify({ accountId, seats }), - }); - if (!res.ok) throw new FatalError("Invoice capture failed"); // [!code highlight] - const { invoiceId } = await res.json(); - return invoiceId; -} - -async function provisionSeats(accountId: string, seats: number): Promise { - "use step"; - const res = await fetch(`https://api.example.com/entitlements`, { - method: "POST", - body: JSON.stringify({ accountId, seats }), - }); - if (!res.ok) throw new FatalError("Provisioning failed"); // [!code highlight] - const { entitlementId } = await res.json(); - return entitlementId; -} - -async function sendConfirmation( - accountId: string, - invoiceId: string, - entitlementId: string -): Promise { - "use step"; - await fetch(`https://api.example.com/notifications`, { - method: "POST", - body: JSON.stringify({ accountId, invoiceId, entitlementId, template: "upgrade-complete" }), - }); -} - -// Compensation steps — must be idempotent - -async function releaseSeats(accountId: string, reservationId: string): Promise { - "use step"; - await fetch(`https://api.example.com/seats/release`, { - method: "POST", - body: JSON.stringify({ accountId, reservationId }), - }); -} - -async function refundInvoice(accountId: string, invoiceId: string): Promise { - "use step"; - await fetch(`https://api.example.com/invoices/${invoiceId}/refund`, { - method: "POST", - body: JSON.stringify({ accountId }), - }); -} - -async function deprovisionSeats(accountId: string, entitlementId: string): Promise { - "use step"; - await fetch(`https://api.example.com/entitlements/${entitlementId}`, { - method: "DELETE", - body: JSON.stringify({ accountId }), - }); -} -``` - -### Streaming step progress (optional) - -Use `getWritable()` to stream progress events to a UI so users can see each step execute in real time. - -```typescript -import { FatalError } from "workflow"; -import { getWritable } from "workflow"; - -type SagaEvent = - | { type: "step_start"; step: string } - | { type: "step_done"; step: string; detail: string } - | { type: "step_failed"; step: string; error: string } - | { type: "compensating"; step: string } - | { type: "compensated"; step: string } - | { type: "result"; status: "completed" | "rolled_back" }; - -async function emit(event: SagaEvent) { - "use step"; - const writer = getWritable().getWriter(); - try { - await writer.write(event); - } finally { - writer.releaseLock(); - } -} - -declare function reserveSeats(accountId: string, seats: number): Promise; // @setup -declare function releaseSeats(accountId: string, reservationId: string): Promise; // @setup -declare function captureInvoice(accountId: string, seats: number): Promise; // @setup -declare function refundInvoice(accountId: string, invoiceId: string): Promise; // @setup -declare function provisionSeats(accountId: string, seats: number): Promise; // @setup -declare function deprovisionSeats(accountId: string, entitlementId: string): Promise; // @setup -declare function sendConfirmation(accountId: string, invoiceId: string, entitlementId: string): Promise; // @setup - -export async function subscriptionUpgradeSaga(accountId: string, seats: number) { - "use workflow"; - - const compensations: Array<{ name: string; execute: () => Promise }> = []; - - try { - await emit({ type: "step_start", step: "Reserve Seats" }); - const reservationId = await reserveSeats(accountId, seats); - compensations.push({ name: "Release Seats", execute: () => releaseSeats(accountId, reservationId) }); - await emit({ type: "step_done", step: "Reserve Seats", detail: reservationId }); - - await emit({ type: "step_start", step: "Capture Invoice" }); - const invoiceId = await captureInvoice(accountId, seats); - compensations.push({ name: "Refund Invoice", execute: () => refundInvoice(accountId, invoiceId) }); - await emit({ type: "step_done", step: "Capture Invoice", detail: invoiceId }); - - await emit({ type: "step_start", step: "Provision Seats" }); - const entitlementId = await provisionSeats(accountId, seats); - compensations.push({ name: "Deprovision Seats", execute: () => deprovisionSeats(accountId, entitlementId) }); - await emit({ type: "step_done", step: "Provision Seats", detail: entitlementId }); - - // No compensation — notifications are fire-and-forget - await emit({ type: "step_start", step: "Send Confirmation" }); - await sendConfirmation(accountId, invoiceId, entitlementId); - await emit({ type: "step_done", step: "Send Confirmation", detail: "sent" }); - - await emit({ type: "result", status: "completed" }); - return { status: "completed" }; - } catch (error) { - const errorMessage = error instanceof Error ? error.message : "Unknown error"; - await emit({ type: "step_failed", step: "failed", error: errorMessage }); - - // Unwind compensations in reverse (LIFO) order - for (const comp of compensations.reverse()) { - await emit({ type: "compensating", step: comp.name }); - await comp.execute(); - await emit({ type: "compensated", step: comp.name }); - } - - await emit({ type: "result", status: "rolled_back" }); - return { status: "rolled_back" }; - } -} -``` - -## Adapting to your use case - -- Replace the step functions with real API calls. Each `"use step"` function has full Node.js access. -- Add or remove steps as needed — the pattern scales to any number of steps. -- Make compensations idempotent — they may be retried if the workflow restarts mid-rollback. -- The `emit()` calls and `SagaEvent` type are optional — remove them if you don't need real-time UI progress. - -## Tips - -- **Use `FatalError` for permanent failures.** Regular errors trigger automatic retries (up to 3 by default). Throw `FatalError` when retrying won't help (e.g., insufficient funds, invalid input). -- **Make compensations idempotent.** If a compensation step is retried, it should produce the same result. Check whether the resource was already released before releasing it again. -- **Compensation steps are also `"use step"` functions.** This makes them durable — if the workflow restarts mid-rollback, it resumes where it left off. -- **Capture values in closures carefully.** Use block-scoped variables or copy values before pushing compensations to avoid referencing stale state. -- **Notifications don't need compensations.** Fire-and-forget steps like sending emails or Slack messages typically don't register a compensation. - -## Key APIs - -- [`"use workflow"`](/docs/api-reference/workflow/use-workflow) -- declares the orchestrator function -- [`"use step"`](/docs/api-reference/workflow/use-step) -- declares step functions with full Node.js access -- [`FatalError`](/docs/api-reference/workflow/fatal-error) -- non-retryable error that triggers compensation -- [`getWritable()`](/docs/api-reference/workflow/get-writable) -- streams data from workflows for real-time UI updates diff --git a/docs/content/docs/cookbook/common-patterns/scheduling.mdx b/docs/content/docs/cookbook/common-patterns/scheduling.mdx deleted file mode 100644 index 988040a75a..0000000000 --- a/docs/content/docs/cookbook/common-patterns/scheduling.mdx +++ /dev/null @@ -1,125 +0,0 @@ ---- -title: Sleep, Scheduling & Timed Workflows -description: Use durable sleep to schedule actions minutes, hours, days, or weeks into the future. -type: guide -summary: Schedule future actions with durable sleep that survives cold starts, and race sleeps against hooks to let external events cancel the workflow early. ---- - -Workflow's `sleep()` is durable — it survives cold starts, restarts, and deployments. Combined with `defineHook()` and `Promise.race()`, it becomes the foundation for interruptible scheduled workflows like drip campaigns, reminders, and timed sequences. - -## When to use this - -- Sending emails on a schedule (drip campaigns, onboarding sequences, reminders) -- Waiting for a deadline but allowing early cancellation -- Any pattern where "do X, wait N hours, then do Y" needs to be both reliable and interruptible - -## Drip campaign with cancellation - -A drip campaign sends emails at intervals, sleeping between each. Each sleep races against a cancellation hook — if an external event fires the hook (e.g. user converts, unsubscribes), the campaign stops immediately. - -```typescript -import { defineHook, sleep } from "workflow"; - -// Hook that any API route can fire to cancel the drip -export const cancelDrip = defineHook<{ reason?: string }>(); // [!code highlight] - -async function sendEmail(email: string, template: string): Promise { - "use step"; - await fetch("https://api.sendgrid.com/v3/mail/send", { - method: "POST", - headers: { Authorization: `Bearer ${process.env.SENDGRID_KEY}` }, - body: JSON.stringify({ to: [{ email }], template_id: template }), - }); -} - -export async function emailSequence(email: string) { - "use workflow"; - - await sendEmail(email, "welcome"); - - // Race durable sleep against the cancellation hook - const hook = cancelDrip.create({ token: `cancel-drip:${email}` }); // [!code highlight] - const cancelled = await Promise.race([ // [!code highlight] - sleep("2d").then(() => false), // [!code highlight] - hook.then(() => true), // [!code highlight] - ]); // [!code highlight] - if (cancelled) return { status: "cancelled", email }; - - await sendEmail(email, "getting-started-tips"); - - // Create a fresh hook for the next sleep window - const hook2 = cancelDrip.create({ token: `cancel-drip:${email}` }); // [!code highlight] - const cancelled2 = await Promise.race([ // [!code highlight] - sleep("2d").then(() => false), // [!code highlight] - hook2.then(() => true), // [!code highlight] - ]); // [!code highlight] - if (cancelled2) return { status: "cancelled", email }; - - await sendEmail(email, "feature-highlights"); - - return { status: "drip-complete", email }; -} -``` - -### Cancelling from an API route - -Any server-side code can fire the hook by calling `.resume()` with the same token: - -```typescript -import { cancelDrip } from "@/workflows/email-sequence"; - -export async function POST(req: Request) { - const { email, reason } = await req.json(); - - if (!email) { - return Response.json({ error: "email is required" }, { status: 400 }); - } - - try { - await cancelDrip.resume(`cancel-drip:${email}`, { // [!code highlight] - reason: reason ?? "User completed action", // [!code highlight] - }); // [!code highlight] - } catch (error) { - const msg = error instanceof Error ? error.message.toLowerCase() : ""; - if (msg.includes("not found") || msg.includes("expired")) { - return Response.json({ - success: true, - email, - note: "No active drip found (already completed or cancelled)", - }); - } - throw error; - } - - return Response.json({ success: true, email }); -} -``` - -## How it works - -1. **Durable sleep** — `sleep("2d")` persists through restarts at zero compute cost. The workflow resumes precisely when the timer fires. -2. **Hook creation** — `cancelDrip.create({ token })` registers a hook that resolves when any external system calls `.resume()` with the same token. -3. **Race** — `Promise.race([sleep(...), hook])` blocks until either the timer fires or the hook is resumed, whichever comes first. -4. **Fresh hooks per window** — after a sleep completes normally, the previous hook instance is consumed. A new `.create()` call registers a fresh hook for the next sleep window, reusing the same token. - -## Adapting to your use case - -- **Change durations** — replace `"2d"` with any duration string (`"1h"`, `"7d"`, `"30m"`) or a `Date` object for absolute times. -- **Add more steps** — the pattern scales to any number of email-then-sleep pairs. -- **Snooze instead of cancel** — resolve the hook with a `snooze` payload and sleep again: `sleep(new Date(Date.now() + payload.snoozeMs))`. -- **Timeout any operation** — the same `Promise.race(sleep, work)` pattern works for adding deadlines to slow steps. -- **Real providers** — swap the `sendEmail` step body for Resend, Postmark, or any HTTP API. The `"use step"` function has full Node.js access. - -## Tips - -- **`sleep()` accepts** duration strings (`"1d"`, `"2h"`, `"30s"`), milliseconds, or `Date` objects for sleeping until a specific time. -- **Durable means durable.** A `sleep("7d")` workflow costs nothing while sleeping — no compute, no memory. -- **Use `sleep()` in workflow context only.** Step functions cannot call `sleep()` directly. If a step needs a delay, use `setTimeout` inside the step. - -## Key APIs - -- [`"use workflow"`](/docs/foundations/workflows-and-steps) — marks the orchestrator function -- [`"use step"`](/docs/foundations/workflows-and-steps) — marks functions that run with full Node.js access -- [`sleep()`](/docs/api-reference/workflow/sleep) — durable wait (survives restarts, zero compute cost) -- [`defineHook()`](/docs/api-reference/workflow/define-hook) — creates a typed hook that external systems can fire -- [`Promise.race()`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise/race) — races sleep against hooks for interruptible waits diff --git a/docs/content/docs/cookbook/common-patterns/sequential-and-parallel.mdx b/docs/content/docs/cookbook/common-patterns/sequential-and-parallel.mdx deleted file mode 100644 index adb3e535c9..0000000000 --- a/docs/content/docs/cookbook/common-patterns/sequential-and-parallel.mdx +++ /dev/null @@ -1,155 +0,0 @@ ---- -title: Sequential & Parallel Execution -description: Compose steps with familiar async/await patterns — sequential await, Promise.all, and Promise.race. -type: guide -summary: Workflows are just async functions, so all the standard composition primitives (await, Promise.all, Promise.race) apply unchanged — including racing webhooks against durable sleeps. -related: - - /docs/foundations/workflows-and-steps - - /cookbook/common-patterns/timeouts - - /cookbook/common-patterns/scheduling ---- - -Workflows are written in plain async/await — there's no new control-flow API to learn. Sequential awaits chain steps that depend on each other, `Promise.all` runs independent steps in parallel, and `Promise.race` returns whichever finishes first. These compose with workflow primitives like [`sleep()`](/docs/api-reference/workflow/sleep) and [`createWebhook()`](/docs/api-reference/workflow/create-webhook) since those are also just promises. - -## When to use this - -- **Pipelines** — each step depends on the previous step's output (validate → process → store) -- **Independent fan-out** — fetch multiple resources or perform multiple actions that don't depend on each other -- **Race conditions** — return as soon as one of N operations completes (timeout, first-responder, deadline) -- **Mixing primitives** — running steps, sleeps, and webhooks side-by-side in the same control-flow expression - -## Pattern - -### Sequential - -The simplest way to orchestrate steps is to execute them one after another, where each step depends on the previous step's output. - -```typescript lineNumbers -declare function validateData(data: unknown): Promise; // @setup -declare function processData(data: string): Promise; // @setup -declare function storeData(data: string): Promise; // @setup - -export async function dataPipelineWorkflow(data: unknown) { - "use workflow"; - - const validated = await validateData(data); - const processed = await processData(validated); - const stored = await storeData(processed); - - return stored; -} -``` - -### Parallel with `Promise.all` - -When steps don't depend on each other, run them concurrently with `Promise.all`. The workflow waits until all of them resolve. - -```typescript lineNumbers -declare function fetchUser(userId: string): Promise<{ name: string }>; // @setup -declare function fetchOrders(userId: string): Promise<{ items: string[] }>; // @setup -declare function fetchPreferences(userId: string): Promise<{ theme: string }>; // @setup - -export async function fetchUserData(userId: string) { - "use workflow"; - - const [user, orders, preferences] = await Promise.all([ // [!code highlight] - fetchUser(userId), // [!code highlight] - fetchOrders(userId), // [!code highlight] - fetchPreferences(userId), // [!code highlight] - ]); // [!code highlight] - - return { user, orders, preferences }; -} -``` - -### Race with `Promise.race` - -`Promise.race` resolves as soon as the first promise settles. Since [`sleep()`](/docs/api-reference/workflow/sleep) and [`createWebhook()`](/docs/api-reference/workflow/create-webhook) return promises, they compose naturally — for example, waiting for a webhook callback with a deadline: - -```typescript lineNumbers -import { sleep, createWebhook } from "workflow"; - -declare function executeExternalTask(webhookUrl: string): Promise; // @setup - -export async function runExternalTask(userId: string) { - "use workflow"; - - const webhook = createWebhook(); - await executeExternalTask(webhook.url); - - await Promise.race([ // [!code highlight] - webhook, // [!code highlight] - sleep("1 day"), // [!code highlight] - ]); // [!code highlight] - - console.log("Done"); -} -``` - -For racing operations against deadlines specifically (timeouts), see the dedicated [Timeouts](/cookbook/common-patterns/timeouts) recipe — it covers result discrimination, `FatalError` semantics, and the "loser keeps running" caveat. - -### Combining sequential, parallel, and durable primitives - -Most real workflows combine all three. Here's a simplified version of the [birthday card generator demo](https://github.com/vercel/workflow-examples/tree/main/birthday-card-generator) — sequential card generation, parallel RSVP fan-out, non-blocking webhook collection, and a durable sleep until the birthday: - -```typescript lineNumbers -import { createWebhook, sleep, type Webhook } from "workflow"; - -declare function makeCardText(prompt: string): Promise; // @setup -declare function makeCardImage(text: string): Promise; // @setup -declare function sendRSVPEmail(friend: string, webhook: Webhook): Promise; // @setup -declare function sendBirthdayCard(text: string, image: string, rsvps: unknown[], email: string): Promise; // @setup - -export async function birthdayWorkflow( - prompt: string, - email: string, - friends: string[], - birthday: Date -) { - "use workflow"; - - const text = await makeCardText(prompt); // [!code highlight] - const image = await makeCardImage(text); // [!code highlight] - - const webhooks = friends.map(() => createWebhook()); - - await Promise.all( // [!code highlight] - friends.map((friend, i) => sendRSVPEmail(friend, webhooks[i])) // [!code highlight] - ); // [!code highlight] - - const rsvps: unknown[] = []; - webhooks.map((webhook) => - webhook.then((req) => req.json()).then(({ rsvp }) => rsvps.push(rsvp)) - ); - - await sleep(birthday); // [!code highlight] - - await sendBirthdayCard(text, image, rsvps, email); - - return { text, image, status: "Sent" }; -} -``` - -## How it works - -1. **`await` is durable.** When the workflow awaits a step, the runtime persists the step's input, suspends the workflow, runs the step, and replays the workflow with the step's result on resume. The same applies to `sleep()` and `createWebhook()`. -2. **`Promise.all` runs steps concurrently.** Each promise in the array is suspended on its own and the workflow resumes only when all have settled. Failures propagate — if any promise rejects, the whole `Promise.all` rejects. -3. **`Promise.race` resolves on the first settle.** The losing promises keep running in the background but their results are discarded by the workflow. -4. **All primitives are promises.** `sleep("1 day")` and `createWebhook()` return promises, so they compose with `Promise.all` / `Promise.race` exactly like steps do — this is what makes patterns like "race a webhook against a 24-hour deadline" a one-liner. - -## Adapting to your use case - -- **Replace `Promise.all` with `Promise.allSettled`** when partial failures should not abort the rest. You'll get an array of `{ status, value | reason }` instead of throwing on the first rejection. -- **Bound the parallelism** — `Promise.all` over 1000 items will fan out 1000 concurrent steps. If your downstream APIs can't handle that, batch the array into chunks (see [Batching](/cookbook/common-patterns/batching)). -- **Add a deadline to any race** — pair the operation with `sleep("30s").then(() => "timeout" as const)` and check the discriminated result. See [Timeouts](/cookbook/common-patterns/timeouts). -- **Mix steps and hooks in a race** — wait for an external signal *or* a deadline *or* a step result, all in the same `Promise.race`. The first one to resolve wins. - -## Key APIs - -- [`"use workflow"`](/docs/foundations/workflows-and-steps) — marks the orchestrator function -- [`"use step"`](/docs/foundations/workflows-and-steps) — marks functions with full Node.js access -- [`sleep()`](/docs/api-reference/workflow/sleep) — durable sleep that survives restarts -- [`createWebhook()`](/docs/api-reference/workflow/create-webhook) — webhook URL the workflow can race against -- [`Promise.all()`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise/all) — wait for all promises -- [`Promise.race()`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise/race) — wait for the first to settle -- [`Promise.allSettled()`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise/allSettled) — wait for all, including failures diff --git a/docs/content/docs/cookbook/common-patterns/timeouts.mdx b/docs/content/docs/cookbook/common-patterns/timeouts.mdx deleted file mode 100644 index f5f05c7f16..0000000000 --- a/docs/content/docs/cookbook/common-patterns/timeouts.mdx +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: Timeouts -description: Add deadlines to slow operations by racing them against a durable sleep. -type: guide -summary: Use `Promise.race` with `sleep()` to bound the time any step, hook, or webhook is allowed to take — and recover gracefully when the deadline fires first. -related: - - /docs/api-reference/workflow/sleep - - /docs/foundations/hooks - - /cookbook/common-patterns/scheduling - - /cookbook/common-patterns/webhooks ---- - -A common requirement is bounding how long a workflow waits for something to finish — a slow step, an external webhook, a human approval. Race the operation against a durable `sleep()` with `Promise.race()` — whichever finishes first wins, and the loser keeps running but its result is ignored. - -## When to use this - -- **Slow steps** — bound the time spent waiting on third-party APIs, model calls, or expensive computation -- **External callbacks** — give webhooks a deadline so the workflow doesn't hang forever waiting for an event that may never arrive -- **Human approvals** — auto-decline or escalate when a hook isn't resumed within a window -- **Polling loops** — give an outer poll-until-ready loop an overall budget - -## Pattern - -### Timeout on a slow step - -```typescript lineNumbers -import { sleep } from "workflow"; - -declare function processData(data: string): Promise; // @setup - -export async function processWithTimeout(data: string) { - "use workflow"; - - const result = await Promise.race([ // [!code highlight] - processData(data), // [!code highlight] - sleep("30s").then(() => "timeout" as const), // [!code highlight] - ]); // [!code highlight] - - if (result === "timeout") { - throw new Error("Processing timed out after 30 seconds"); - } - - return result; -} -``` - -### Timeout on a webhook - -The same pattern works for any promise — including hooks and webhooks. Here a webhook waits for an external service to call back, with a hard deadline of 7 days: - -```typescript lineNumbers -import { sleep, createWebhook } from "workflow"; - -declare function sendApprovalRequest(requestId: string, webhookUrl: string): Promise; // @setup - -export async function waitForApproval(requestId: string) { - "use workflow"; - - const webhook = createWebhook<{ approved: boolean }>(); - await sendApprovalRequest(requestId, webhook.url); - - const result = await Promise.race([ // [!code highlight] - webhook.then((req) => req.json()), // [!code highlight] - sleep("7 days").then(() => ({ timedOut: true }) as const), // [!code highlight] - ]); // [!code highlight] - - if ("timedOut" in result) { - throw new Error("Approval request expired after 7 days"); - } - - return result.approved; -} -``` - -## How it works - -1. **Durable sleep** — `sleep("30s")` persists through restarts at zero compute cost. The workflow resumes precisely when the timer fires. -2. **Race** — `Promise.race([work, sleep(...)])` returns the value of whichever promise resolves first. The loser keeps running in the background but its result is ignored by the workflow. -3. **Discriminated result** — tagging the sleep branch with a sentinel value (`"timeout" as const`, `{ timedOut: true }`) lets TypeScript narrow the result and pick the right branch. -4. **Throw to fail the workflow** — inside a workflow function, throwing an `Error` exits the run with that error. Use `FatalError` inside steps; throw plain errors inside workflows. - - -**The losing operation keeps running.** `Promise.race` doesn't cancel — when the sleep wins, the underlying step (or model call, or HTTP request) continues to completion in the background. This is fine for idempotent reads but matters when the operation has side effects or costs money. For hard cancellation across processes, see [Distributed Abort Controller](/cookbook/advanced/distributed-abort-controller). - - -## Adapting to your use case - -- **Different durations** — `sleep()` accepts duration strings (`"30s"`, `"5m"`, `"7 days"`), milliseconds, or `Date` objects for absolute deadlines. -- **Soft timeout (retry)** — instead of throwing, loop and retry with a fresh `Promise.race` and a backoff. -- **Soft timeout (fallback)** — return a default value when the timer wins instead of throwing: `if (result === "timeout") return cachedFallback`. -- **Combine with cancellation** — race three promises: the operation, a deadline `sleep()`, and a cancellation hook. See the [Scheduling cookbook](/cookbook/common-patterns/scheduling) for the cancellation half of this pattern. -- **Per-step deadlines** — wrap each step in its own `Promise.race` for independent budgets, or use a single outer race for an overall workflow deadline. - -## Key APIs - -- [`sleep()`](/docs/api-reference/workflow/sleep) — durable wait (survives restarts, zero compute cost) -- [`createWebhook()`](/docs/api-reference/workflow/create-webhook) — create a webhook URL the workflow can race against -- [`defineHook()`](/docs/api-reference/workflow/define-hook) — typed hook for in-process cancellation -- [`Promise.race()`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise/race) — race operations against deadlines diff --git a/docs/content/docs/cookbook/common-patterns/webhooks.mdx b/docs/content/docs/cookbook/common-patterns/webhooks.mdx deleted file mode 100644 index e327ad7246..0000000000 --- a/docs/content/docs/cookbook/common-patterns/webhooks.mdx +++ /dev/null @@ -1,185 +0,0 @@ ---- -title: Webhooks & External Callbacks -description: Receive HTTP callbacks from external services, process them durably, and respond inline. -type: guide -summary: Create webhook endpoints that your workflow can await, process incoming requests in steps, and respond to the caller — all within durable workflow context. ---- - -Use webhooks when external services push events to your application via HTTP callbacks. The workflow creates a webhook URL, suspends with zero compute cost, and resumes when a request arrives. - -## When to use this - -- Accepting callbacks from payment processors (Stripe, PayPal) -- Waiting for third-party verification or processing results -- Any integration where an external system calls you back asynchronously - -## Pattern: Processing webhook events - -Create a webhook with manual response control, then iterate over incoming requests: - -```typescript -import { createWebhook, type RequestWithResponse } from "workflow"; - -declare function processEvent(request: RequestWithResponse): Promise<{ type: string }>; // @setup - -export async function paymentWebhook(orderId: string) { - "use workflow"; - - const webhook = createWebhook({ respondWith: "manual" }); // [!code highlight] - // webhook.url is the URL to give to the external service - - const ledger: { type: string }[] = []; - - for await (const request of webhook) { // [!code highlight] - const entry = await processEvent(request); - ledger.push(entry); - - // Break when we've received a terminal event - if (entry.type === "payment.succeeded" || entry.type === "refund.created") { - break; - } - } - - return { orderId, webhookUrl: webhook.url, ledger, status: "settled" }; -} -``` - -### Step function for processing - -Each webhook request is processed in its own step, giving you full Node.js access for validation, database writes, and responding to the caller: - -```typescript -import { type RequestWithResponse } from "workflow"; - -async function processEvent( - request: RequestWithResponse -): Promise<{ type: string }> { - "use step"; - - const body = await request.json().catch(() => ({})); - const type = body?.type ?? "unknown"; - - // Validate, process, and respond inline - if (type === "payment.succeeded") { - // Record the payment in your database - await request.respondWith(Response.json({ ack: true, action: "captured" })); // [!code highlight] - } else if (type === "payment.failed") { - await request.respondWith(Response.json({ ack: true, action: "flagged" })); - } else { - await request.respondWith(Response.json({ ack: true, action: "ignored" })); - } - - return { type }; -} -``` - -## Pattern: Async request-reply with timeout - -Submit a request to an external service, pass it your webhook URL, then race the callback against a deadline: - -```typescript -import { createWebhook, sleep, FatalError, type RequestWithResponse } from "workflow"; - -export async function asyncVerification(documentId: string) { - "use workflow"; - - const webhook = createWebhook({ respondWith: "manual" }); - - // Submit to vendor, passing our webhook URL for the callback - await submitToVendor(documentId, webhook.url); - - // Race: wait for callback OR timeout after 30 seconds - const result = await Promise.race([ // [!code highlight] - (async () => { - for await (const request of webhook) { - const body = await processCallback(request); - return body; - } - throw new FatalError("Webhook closed without callback"); - })(), - sleep("30s").then(() => ({ status: "timed_out" as const })), // [!code highlight] - ]); - - return { documentId, ...result }; -} - -async function submitToVendor(documentId: string, callbackUrl: string): Promise { - "use step"; - await fetch("https://vendor.example.com/verify", { - method: "POST", - body: JSON.stringify({ documentId, callbackUrl }), - }); -} - -async function processCallback( - request: RequestWithResponse -): Promise<{ status: string; details: string }> { - "use step"; - const body = await request.json(); - await request.respondWith(Response.json({ ack: true })); - return { - status: body.approved ? "verified" : "rejected", - details: body.details ?? body.reason ?? "", - }; -} -``` - -## Pattern: Large payload by reference - -When payloads are too large to serialize into the event log, pass a lightweight reference (a "claim check") instead. Use a hook to signal when the data is ready: - -```typescript -import { defineHook } from "workflow"; - -export const blobReady = defineHook<{ blobToken: string }>(); // [!code highlight] - -export async function importLargeFile(importId: string) { - "use workflow"; - - // Suspend until the external system signals the blob is uploaded - const { blobToken } = await blobReady.create({ token: `upload:${importId}` }); // [!code highlight] - - // Process by reference -- the full payload never enters the event log - await processBlob(blobToken); - - return { importId, blobToken, status: "indexed" }; -} - -async function processBlob(blobToken: string): Promise { - "use step"; - // Fetch the blob using the token, process it - const res = await fetch(`https://storage.example.com/blobs/${blobToken}`); - const data = await res.arrayBuffer(); - // Index, transform, or store the data -} -``` - -Resume from an API route when the upload completes: - -```typescript -import { resumeHook } from "workflow/api"; - -// POST /api/upload-complete -export async function POST(request: Request) { - const { importId, blobToken } = await request.json(); - await resumeHook(`upload:${importId}`, { blobToken }); // [!code highlight] - return Response.json({ ok: true }); -} -``` - -## Tips - -- **`respondWith: "manual"`** gives you control over the HTTP response from inside a step. Use this when you need to validate the request before responding. -- **`for await` on a webhook** lets you process multiple events from the same URL. Use `break` to stop listening after a terminal event. -- **Webhooks auto-generate URLs** at `/.well-known/workflow/v1/webhook/:token`. Pass this URL to external services. -- **Race webhooks against `sleep()`** for deadlines. If the callback doesn't arrive in time, the workflow can take a fallback action. -- **For large payloads**, use a hook + reference token instead of passing the data through the workflow. The event log serializes all step inputs/outputs, so large payloads hurt performance. - -## Key APIs - -- [`"use workflow"`](/docs/foundations/workflows-and-steps) -- marks the orchestrator function -- [`"use step"`](/docs/foundations/workflows-and-steps) -- marks functions with full Node.js access -- [`createWebhook()`](/docs/api-reference/workflow/create-webhook) -- creates an HTTP endpoint the workflow can await -- [`defineHook()`](/docs/api-reference/workflow/define-hook) -- creates a typed hook for signal-based patterns -- [`sleep()`](/docs/api-reference/workflow/sleep) -- durable timer for deadlines -- [`FatalError`](/docs/api-reference/workflow/fatal-error) -- prevents retry on permanent failures diff --git a/docs/content/docs/cookbook/common-patterns/workflow-composition.mdx b/docs/content/docs/cookbook/common-patterns/workflow-composition.mdx deleted file mode 100644 index ca2b6751e9..0000000000 --- a/docs/content/docs/cookbook/common-patterns/workflow-composition.mdx +++ /dev/null @@ -1,118 +0,0 @@ ---- -title: Workflow Composition -description: Call workflows from other workflows by direct await (flatten into the parent) or background spawn via start() (separate run). -type: guide -summary: Compose workflows two ways — direct await flattens the child into the parent's event log, while background spawn via start() runs the child as an independent run. -related: - - /cookbook/advanced/child-workflows - - /docs/api-reference/workflow-api/start - - /docs/api-reference/workflow-api/get-run ---- - -Workflows can call other workflows. Choose between two composition modes depending on whether the parent needs the child's result inline (direct await) or wants to fire the child off as an independent run (background spawn). For massive fan-out with polling and partial-failure handling, see [Child Workflows](/cookbook/advanced/child-workflows). - -## When to use this - -- **Direct await** — the parent needs the child's result before continuing, and you want a single unified event log -- **Background spawn** — the parent doesn't need to wait, and you want the child to be observable as a separate run with its own `runId` - -## Pattern - -### Direct await (flattening) - -Call a child workflow with `await` and the child's steps execute inline within the parent — they appear in the parent's event log as if you'd called them directly. - -```typescript lineNumbers -declare function sendEmail(userId: string): Promise; // @setup -declare function sendPushNotification(userId: string): Promise; // @setup -declare function createAccount(userId: string): Promise; // @setup -declare function setupPreferences(userId: string): Promise; // @setup - -// Child workflow -export async function sendNotifications(userId: string) { - "use workflow"; - - await sendEmail(userId); - await sendPushNotification(userId); - return { notified: true }; -} - -// Parent workflow calls the child directly -export async function onboardUser(userId: string) { - "use workflow"; - - await createAccount(userId); - await sendNotifications(userId); // [!code highlight] - await setupPreferences(userId); - - return { userId, status: "onboarded" }; -} -``` - -The parent waits for the child to finish before continuing. Both functions share a single workflow run, a single retry boundary, and a single event log. - -### Background spawn via `start()` - -To run a child workflow independently without blocking the parent, call [`start()`](/docs/api-reference/workflow-api/start) from a step. This launches the child as a separate workflow run with its own `runId`. - -```typescript lineNumbers -import { start } from "workflow/api"; - -declare function generateReport(reportId: string): Promise; // @setup -declare function fulfillOrder(orderId: string): Promise<{ id: string }>; // @setup -declare function sendConfirmation(orderId: string): Promise; // @setup - -async function triggerReportGeneration(reportId: string) { - "use step"; // [!code highlight] - - const run = await start(generateReport, [reportId]); // [!code highlight] - return run.runId; -} - -export async function processOrder(orderId: string) { - "use workflow"; - - const order = await fulfillOrder(orderId); - - const reportRunId = await triggerReportGeneration(orderId); // [!code highlight] - - await sendConfirmation(orderId); - - return { orderId, reportRunId }; -} -``` - -The parent continues immediately after `start()` returns. The child runs independently and can be monitored separately using the returned `runId` (e.g., via [`getRun()`](/docs/api-reference/workflow-api/get-run)). - - -If you want the child workflow to run on the latest deployment rather than the current one, pass [`deploymentId: "latest"`](/docs/api-reference/workflow-api/start#using-deploymentid-latest) in the `start()` options. This is currently a Vercel-specific feature. Be aware that the child workflow's function name, file path, argument types, and return type must remain compatible across deployments — renaming the function or changing its location will change the workflow ID, and modifying expected inputs or outputs can cause serialization failures. - - -## How it works - -1. **Direct await flattens.** When a workflow function awaits another workflow function, the child's `"use workflow"` directive is treated as inline — the child's steps emit into the parent's event log and share the parent's run ID. -2. **`start()` mints a new run.** The child gets its own `runId`, its own event log, and its own retry boundary. The parent only sees the `runId` returned by `start()`. -3. **`start()` must be called from a step.** Calling `start()` directly from a workflow function is not allowed — wrap it in a `"use step"` function. This keeps the spawn deterministic across replays. - -## Choosing between the two modes - -| | Direct await | Background spawn (`start()`) | -| --- | --- | --- | -| Parent waits for child | Yes | No | -| Has its own `runId` | No (shares parent's) | Yes | -| Has its own event log | No | Yes | -| Has its own retry boundary | No | Yes | -| Best for | Sequential composition, helper workflows | Independent work, fire-and-forget, fan-out | - -## Adapting to your use case - -- **Spawn many children at once** — call `start()` in a loop inside a step. For more advanced fan-out (chunking, polling, partial-failure handling), graduate to the [Child Workflows](/cookbook/advanced/child-workflows) recipe. -- **Wait for a background child to finish** — combine `start()` with `getRun()` polling. The [Child Workflows](/cookbook/advanced/child-workflows) page covers the full polling loop. -- **Pass results back from background children** — the spawn step returns the `runId`; later, a poll step uses `getRun(runId).returnValue` to fetch the final result. - -## Key APIs - -- [`"use workflow"`](/docs/foundations/workflows-and-steps) — marks the orchestrator function -- [`"use step"`](/docs/foundations/workflows-and-steps) — marks functions with full Node.js access -- [`start()`](/docs/api-reference/workflow-api/start) — spawn a child workflow as a separate run -- [`getRun()`](/docs/api-reference/workflow-api/get-run) — retrieve a workflow run's status and return value diff --git a/docs/content/docs/cookbook/index.mdx b/docs/content/docs/cookbook/index.mdx deleted file mode 100644 index 94cb6d9d18..0000000000 --- a/docs/content/docs/cookbook/index.mdx +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: Cookbook -description: Best-practice workflow patterns with copy-paste code examples. -type: overview ---- - -A curated collection of workflow patterns with clean, copy-paste code examples for real use cases. - -## Agent Patterns - -- [**Durable Agent**](/cookbook/agent-patterns/durable-agent) — Replace a stateless AI agent with one that survives crashes and retries tool calls -- [**Human-in-the-Loop**](/cookbook/agent-patterns/human-in-the-loop) — Pause an agent for human approval, then resume based on the decision -- [**Agent Cancellation**](/cookbook/agent-patterns/agent-cancellation) — Stop a running agent immediately via `run.cancel()` or gracefully via a hook + `Promise.race` - -## Common Patterns - -- [**Sequential & Parallel Execution**](/cookbook/common-patterns/sequential-and-parallel) — Compose steps with `await`, `Promise.all`, and `Promise.race` against durable sleeps and webhooks -- [**Workflow Composition**](/cookbook/common-patterns/workflow-composition) — Call workflows from other workflows by direct await or background spawn via `start()` -- [**Saga**](/cookbook/common-patterns/saga) — Coordinate multi-step transactions with automatic rollback when a step fails -- [**Batching**](/cookbook/common-patterns/batching) — Process large collections in parallel batches with failure isolation -- [**Rate Limiting**](/cookbook/common-patterns/rate-limiting) — Handle 429 responses and transient failures with RetryableError and backoff -- [**Scheduling**](/cookbook/common-patterns/scheduling) — Use durable sleep to schedule actions minutes, hours, or weeks ahead -- [**Timeouts**](/cookbook/common-patterns/timeouts) — Add deadlines to slow steps, hooks, and webhooks by racing them against a durable sleep -- [**Idempotency**](/cookbook/common-patterns/idempotency) — Ensure side effects happen exactly once, even when steps retry -- [**Webhooks**](/cookbook/common-patterns/webhooks) — Receive HTTP callbacks from external services and process them durably - -## Integrations - -- [**AI SDK**](/cookbook/integrations/ai-sdk) — Use streamText() directly inside a workflow for lower-level control over model calls and tool execution -- [**Chat SDK**](/cookbook/integrations/chat-sdk) — Build durable chat sessions with workflow persistence and AI SDK chat primitives -- [**Sandbox**](/cookbook/integrations/sandbox) — Orchestrate Vercel Sandbox lifecycle inside durable workflows - -## Advanced - -- [**Child Workflows**](/cookbook/advanced/child-workflows) — Spawn and orchestrate child workflows from a parent -- [**Distributed Abort Controller**](/cookbook/advanced/distributed-abort-controller) — Build a cross-process abort controller using workflow streams and hooks -- [**Serializable Steps**](/cookbook/advanced/serializable-steps) — Wrap non-serializable third-party objects so they cross the workflow boundary -- [**Publishing Libraries**](/cookbook/advanced/publishing-libraries) — Ship npm packages that export reusable workflow functions diff --git a/docs/content/docs/cookbook/integrations/ai-sdk.mdx b/docs/content/docs/cookbook/integrations/ai-sdk.mdx deleted file mode 100644 index 7f5c41e49e..0000000000 --- a/docs/content/docs/cookbook/integrations/ai-sdk.mdx +++ /dev/null @@ -1,360 +0,0 @@ ---- -title: AI SDK -description: Use AI SDK's streamText directly inside durable workflows for lower-level control over model calls and tool execution. -type: guide -summary: Use streamText() inside a workflow for full control over model options, stop conditions, and output schemas — while tools remain durable steps. -related: - - /docs/ai - - /docs/ai/chat-session-modeling - - /docs/ai/defining-tools - - /docs/ai/resumable-streams - - /docs/api-reference/workflow-ai/durable-agent ---- - -[AI SDK](https://ai-sdk.dev/) is Vercel's framework-agnostic TypeScript toolkit for building AI-powered apps and agents — unified provider access, streaming, tool calling, structured output, and UI hooks. Workflow SDK complements it by making those calls durable: the model request, the tool loop, and the multi-turn conversation all survive restarts and timeouts. - -For the full AI SDK reference (providers, `streamText`, `generateObject`, `useChat`, tool calling, etc.) see the [AI SDK docs](https://ai-sdk.dev/docs). This page covers the Workflow-specific integration points. - - -For most agent use cases, prefer [`DurableAgent`](/cookbook/agent-patterns/durable-agent) which wraps [`streamText`](https://ai-sdk.dev/docs/reference/ai-sdk-core/stream-text) and manages the tool loop automatically. This page covers using `streamText()` directly when you need lower-level control. - - -## When to use streamText directly - -Use [`streamText()`](https://ai-sdk.dev/docs/reference/ai-sdk-core/stream-text) instead of `DurableAgent` when you need: - -* **Custom stop conditions** — [`stopWhen`](https://ai-sdk.dev/docs/ai-sdk-core/agents#stop-conditions), [`prepareStep`](https://ai-sdk.dev/docs/ai-sdk-core/agents#prepare-step), or [`onStepFinish`](https://ai-sdk.dev/docs/reference/ai-sdk-core/stream-text#on-step-finish) callbacks -* **Structured output** — [`Output.object()`](https://ai-sdk.dev/docs/ai-sdk-core/generating-structured-data) or `Output.array()` alongside tool calling -* **Step-level callbacks** — `onStepFinish` for logging, metrics, or branching logic -* **Provider options** — per-step model switching, reasoning budgets, or custom [provider options](https://ai-sdk.dev/docs/ai-sdk-core/provider-options) - -## Multi-turn pattern - -One workflow run = one full conversation. The workflow suspends between turns on a hook and resumes when the next user message arrives. Conversation state, tool history, and intermediate computation all live inside the run. - - - - - -```typescript title="workflows/support.ts" lineNumbers -import { streamText, stepCountIs } from "ai"; -import { defineHook, getWritable, getWorkflowMetadata } from "workflow"; -import type { ModelMessage, UIMessageChunk } from "ai"; -import { z } from "zod"; - -const MAX_TURNS = 20; - -export const turnHook = defineHook({ // [!code highlight] - schema: z.object({ message: z.string() }), -}); - -async function lookupOrder({ orderId }: { orderId: string }) { - "use step"; - const res = await fetch(`https://api.store.com/orders/${orderId}`); - return res.json(); -} - -async function processRefund({ orderId, reason }: { orderId: string; reason: string }) { - "use step"; - const res = await fetch("https://api.store.com/refunds", { - method: "POST", - body: JSON.stringify({ orderId, reason }), - }); - return res.json(); -} - -const TOOLS = { - lookupOrder: { - description: "Look up an order by ID", - inputSchema: z.object({ orderId: z.string() }), - execute: lookupOrder, - }, - processRefund: { - description: "Process a refund", - inputSchema: z.object({ orderId: z.string(), reason: z.string() }), - execute: processRefund, - }, -}; - -// Per-turn step — streams one agent response to the durable writable // [!code highlight] -async function runTurn(messages: ModelMessage[]) { - "use step"; - - const result = streamText({ - model: "anthropic/claude-haiku-4.5", - system: "You are a customer support agent.", - messages, - tools: TOOLS, - stopWhen: stepCountIs(8), - }); - - const writable = getWritable(); - // preventClose keeps the durable writable open so the next turn can // write to it. Each turn still emits its own start + finish chunks. - await result.toUIMessageStream().pipeTo(writable, { preventClose: true }); // [!code highlight] - - const response = await result.response; - return { responseMessages: response.messages }; -} - -export async function supportWorkflow(initialMessages: ModelMessage[]) { - "use workflow"; - - const { workflowRunId } = getWorkflowMetadata(); - // Create the hook once, outside the loop — same token = HookConflictError // [!code highlight] - const hook = turnHook.create({ token: workflowRunId }); // [!code highlight] - let allMessages = initialMessages; - - for (let turn = 0; turn < MAX_TURNS; turn++) { - const { responseMessages } = await runTurn(allMessages); - allMessages = [...allMessages, ...responseMessages]; - - const { message } = await hook; // [!code highlight] suspend until next user message - if (message === "/done") break; - - allMessages = [...allMessages, { role: "user", content: message }]; - } - - return { turns: MAX_TURNS }; -} -``` - - - - - -One endpoint handles first turn, follow-ups, and the `/done` exit. The client sends `runId` in the body to distinguish first vs follow-up. - -```typescript title="app/api/support/route.ts" lineNumbers -import type { UIMessage, UIMessageChunk } from "ai"; -import { convertToModelMessages, createUIMessageStreamResponse } from "ai"; -import { start, getRun } from "workflow/api"; -import { supportWorkflow, turnHook } from "@/workflows/support"; - -// Pump the durable stream until this turn's `finish` chunk, then close // the HTTP response. The source reader is released (not cancelled) so the -// workflow's durable stream keeps flowing for the next turn. -function sliceUntilFinish( // [!code highlight] - source: ReadableStream -): ReadableStream { - return new ReadableStream({ - async start(controller) { - const reader = source.getReader(); - try { - while (true) { - const { done, value } = await reader.read(); - if (done) break; - controller.enqueue(value); - if (value.type === "finish") break; // [!code highlight] - } - controller.close(); - } catch (e) { - controller.error(e); - } finally { - reader.releaseLock(); - } - }, - }); -} - -// `/done` exits the workflow without emitting chunks. Return a synthetic -// start+finish so useChat's lifecycle terminates cleanly. -function emptyTurnStream(): ReadableStream { - return new ReadableStream({ - start(controller) { - controller.enqueue({ type: "start", messageId: crypto.randomUUID() }); - controller.enqueue({ type: "finish" }); - controller.close(); - }, - }); -} - -export async function POST(req: Request) { - const { messages, runId }: { messages: UIMessage[]; runId?: string } = - await req.json(); - const modelMessages = await convertToModelMessages(messages); - - // Follow-up turn: resume hook, return stream starting AFTER the last turn // [!code highlight] - if (runId) { - try { - const run = getRun(runId); - - // Snapshot tail before resuming so our slice only contains this turn // [!code highlight] - const probe = run.getReadable(); - const tailIndex = await probe.getTailIndex(); - await probe.cancel(); - - const lastUser = modelMessages.filter((m) => m.role === "user").at(-1); - const text = - typeof lastUser?.content === "string" - ? lastUser.content - : Array.isArray(lastUser?.content) - ? lastUser.content - .filter((p): p is { type: "text"; text: string } => - "type" in p && p.type === "text" - ) - .map((p) => p.text) - .join("") - : ""; - - await turnHook.resume(runId, { message: text }); // [!code highlight] - - if (text === "/done") { - return createUIMessageStreamResponse({ - stream: emptyTurnStream(), - headers: { "x-workflow-run-id": runId }, - }); - } - - const stream = sliceUntilFinish( - run.getReadable({ startIndex: tailIndex + 1 }) // [!code highlight] - ); - - return createUIMessageStreamResponse({ - stream, - headers: { "x-workflow-run-id": runId }, - }); - } catch (e: unknown) { - const msg = e instanceof Error ? e.message.toLowerCase() : ""; - if (!msg.includes("not found") && !msg.includes("expired")) throw e; - // Stale runId — fall through to start fresh - } - } - - // First turn: start a new workflow // [!code highlight] - const run = await start(supportWorkflow, [modelMessages]); - const stream = sliceUntilFinish(run.readable); - - return createUIMessageStreamResponse({ - stream, - headers: { "x-workflow-run-id": run.runId }, - }); -} -``` - - - - - -Store the `runId` in a ref and pass it in the body of every follow-up. `WorkflowChatTransport` forwards it for you. - -```tsx title="components/support-chat.tsx" lineNumbers -"use client"; - -import { useChat } from "@ai-sdk/react"; -import { WorkflowChatTransport } from "@workflow/ai"; -import { useMemo, useRef, useState } from "react"; - -export function SupportChat() { - const [input, setInput] = useState(""); - const runIdRef = useRef(null); // [!code highlight] - - const transport = useMemo( - () => - new WorkflowChatTransport({ - api: "/api/support", - prepareSendMessagesRequest: ({ messages, body }) => ({ - body: { ...body, messages, runId: runIdRef.current }, // [!code highlight] - }), - onChatSendMessage: (response) => { - const id = response.headers.get("x-workflow-run-id"); - if (id) runIdRef.current = id; // [!code highlight] - }, - }), - [] - ); - - const { messages, sendMessage, status } = useChat({ transport }); - const busy = status === "streaming" || status === "submitted"; - - return ( -
{ - e.preventDefault(); - if (busy || !input.trim()) return; - sendMessage({ text: input }); - setInput(""); - }} - > - {messages.map((m) => ( -
{m.role}: {m.parts.map((p) => p.type === "text" ? p.text : "").join("")}
- ))} - setInput(e.target.value)} disabled={busy} /> -
- ); -} -``` - -
- -
- -## How it works - -1. **One workflow = one conversation.** The workflow loops on a hook, keeping `allMessages`, tool history, and state alive across turns. -2. **Hook is created once.** `turnHook.create({ token: workflowRunId })` outside the loop — calling it twice with the same token throws `HookConflictError`. -3. **`preventClose: true`** on `pipeTo` keeps the durable writable open so the next turn can write to it. -4. **`sliceUntilFinish`** in the API reads chunks until `type === "finish"`, then closes the HTTP response. The source reader is released — not cancelled — so the workflow stream keeps flowing. -5. **`startIndex: tailIndex + 1`** gives each follow-up response only the new chunks, avoiding replay of previous turns. -6. **`/done`** resumes the hook so the workflow exits cleanly, then returns a synthetic `start` + `finish` so `useChat` transitions out of "streaming". - -## Pitfalls - -Non-obvious correctness details worth knowing before adapting this pattern. - -### Snapshot `tailIndex` *before* resuming the hook - -{/* @skip-typecheck - fragment referencing variables from the surrounding multi-turn pattern */} -```typescript -const tailIndex = await probe.getTailIndex(); // [!code highlight] FIRST -await probe.cancel(); -await turnHook.resume(runId, { message: text }); // [!code highlight] THEN -const stream = run.getReadable({ startIndex: tailIndex + 1 }); -``` - -Reversing the order races the workflow: by the time you read `tailIndex`, the next turn has already written its `start` chunk, and your `startIndex + 1` skips past it. - -### Don't call `writable.close()` inside a workflow function - -I/O operations like closing streams must happen inside a `"use step"` function. Calling `writable.close()` directly in the workflow body throws `Not supported in workflow functions`. When the workflow returns, the runtime closes the underlying writable for you. - -### Don't use `TransformStream.terminate()` to slice the stream - -A `TransformStream` with `controller.terminate()` on the `finish` chunk seems like the obvious fit for `sliceUntilFinish`, but throws `Invalid state: TransformStream has been terminated` when late-arriving chunks hit the transform callback. Manual pumping through a custom `ReadableStream` (as shown above) sidesteps the problem entirely. - -### Release the source reader, don't cancel it - -In `sliceUntilFinish`, use `reader.releaseLock()` in the `finally` block rather than `source.cancel()`. Cancelling propagates upstream and closes the durable writable, breaking the next turn. Releasing the lock just detaches our reader; the durable stream keeps flowing. - -### Handle stale `runId` gracefully - -Clients can send a `runId` from a long-gone workflow (localStorage, back button, server restart). Wrap the follow-up path in a try/catch for `not found` / `expired` and fall through to the first-turn code path to start a fresh workflow. - -## streamText vs DurableAgent - -| | `streamText()` | `DurableAgent` | -|---|---|---| -| **Tool loop** | AI SDK handles via `stopWhen` | DurableAgent handles internally | -| **LLM call durability** | Re-executes on replay | Each LLM call is a durable step | -| **Stop conditions** | `stopWhen`, `prepareStep` | `prepareStep` only | -| **Structured output** | `Output.object()`, `Output.array()` | Not available | -| **Step callbacks** | `onStepFinish`, `onChunk` | Not available | -| **Setup** | Manual stream piping | Automatic | - -Use `DurableAgent` for most agent use cases. Use `streamText` when you need the additional control. - -## Key APIs - -**AI SDK** ([docs](https://ai-sdk.dev/docs)) - -* [`streamText()`](https://ai-sdk.dev/docs/reference/ai-sdk-core/stream-text) — core streaming function; `toUIMessageStream()` pipes into the durable writable -* [`tool()` / tool calling](https://ai-sdk.dev/docs/ai-sdk-core/tools-and-tool-calling) — tools wrap `"use step"` functions so each tool call is replayed from the log, not re-executed -* [`stepCountIs()` / `stopWhen`](https://ai-sdk.dev/docs/ai-sdk-core/agents#stop-conditions) — bound the agent loop inside each turn -* [`convertToModelMessages()`](https://ai-sdk.dev/docs/reference/ai-sdk-ui/convert-to-model-messages) / [`createUIMessageStreamResponse()`](https://ai-sdk.dev/docs/reference/ai-sdk-ui/create-ui-message-stream-response) — UI ↔ model message conversion at the API boundary -* [`useChat()`](https://ai-sdk.dev/docs/reference/ai-sdk-ui/use-chat) — React hook that consumes the UI message stream on the client - -**Workflow SDK** - -* [`"use step"`](/docs/api-reference/workflow/use-step) — makes tool executions durable -* [`defineHook()`](/docs/api-reference/workflow/define-hook) — suspension point for follow-up messages -* [`getWritable()`](/docs/api-reference/workflow/get-writable) — resumable stream output -* [`getRun()`](/docs/api-reference/workflow-api/get-run) — `run.getReadable({ startIndex })` for slicing per-turn streams -* [`WorkflowChatTransport`](/docs/api-reference/workflow-ai/workflow-chat-transport) — passes `runId` between turns diff --git a/docs/content/docs/cookbook/integrations/chat-sdk.mdx b/docs/content/docs/cookbook/integrations/chat-sdk.mdx deleted file mode 100644 index 42128a7a17..0000000000 --- a/docs/content/docs/cookbook/integrations/chat-sdk.mdx +++ /dev/null @@ -1,303 +0,0 @@ ---- -title: Chat SDK -description: Make Chat SDK bot sessions durable — one workflow run per conversation thread, with hooks bridging inbound platform events into long-running agent logic. -type: guide -summary: Chat SDK normalizes Slack, Teams, Discord, Telegram and friends into one thread/message model. Workflow SDK gives each thread a durable run that owns multi-turn state, can sleep for hours, and survives restarts. -related: - - /docs/cookbook/integrations/ai-sdk - - /docs/cookbook/integrations/sandbox - - /docs/api-reference/workflow/define-hook - - /docs/api-reference/workflow-api/start - - /docs/api-reference/workflow-api/get-run ---- - -[Chat SDK](https://chat-sdk.dev/) is a unified TypeScript SDK for building bots across Slack, Microsoft Teams, Google Chat, Discord, Telegram, GitHub, Linear, and WhatsApp. Write the bot once, deploy to every platform. It handles webhook verification, event normalization, subscriptions, and cross-platform features like cards and modals. - -Workflow SDK complements it by making bot **sessions** durable. Each conversation thread maps to a long-running workflow run that: - -- Owns multi-turn state in the durable event log instead of Redis-by-hand bookkeeping -- Can `sleep()` for hours or days waiting for a user reply, an approval, or a scheduled follow-up -- Survives deploys, cold starts, and crashes — the session picks up from the last step on replay -- Receives follow-up messages via hooks, so the bot stays responsive while the workflow is still running - -The rest of this page covers the integration pattern. For a full Slack + Next.js + Redis walkthrough, see the [Durable chat sessions guide](https://chat-sdk.dev/docs/guides/durable-chat-sessions-nextjs) on chat-sdk.dev. - -## How It Fits Together - -Chat SDK owns the edge — webhook verification, event routing, `thread.post()` / `thread.stream()`. Workflow owns the session — state, loops, sleeps, retries. They meet at exactly two points: - -```mermaid -flowchart TD - A["Platform webhook"] --> B["Chat SDK event handler
(onNewMention, onSubscribedMessage, …)"] - B -->|"no runId in thread state"| C["start(durableChatSession, …)"] - B -->|"runId in thread state"| D["resumeHook(runId, { message })"] - C --> E["Workflow run (durable)
one per thread; suspends between turns"] - D --> E - E --> F[""use step" helpers
thread.post(), thread.subscribe(), thread.setState(), …"] -``` - -- **Inbound** — Chat SDK handlers decide whether to `start(workflow, [thread, message])` or `resumeHook(runId, { message })`. The `runId` lives in Chat SDK's thread state (Redis, Postgres, or any state adapter). -- **Outbound** — the workflow calls Chat SDK APIs (`thread.post()`, `thread.subscribe()`, `thread.setState()`) from inside step functions. Never from the top level of a workflow file — adapter packages use Node-only modules that aren't available in the workflow sandbox. - -## Why Workflow + Chat SDK - -Without Workflow, a long-running bot session usually means one of: -- Holding a webhook request open while the agent runs (doesn't survive restarts, blows past platform timeouts) -- Writing session state to Redis manually, plus a scheduler for timeouts and retries, plus custom reconnection logic - -Workflow replaces all of that with a single durable function. The bot can: - -- Run a tool loop for minutes while the user watches typing indicators -- Wait for a human approval in another thread before continuing -- Schedule a follow-up message 24 hours later via `sleep("24h")` -- Pause on sandbox snapshot, resume when the user sends the next command (see the [Sandbox integration](/docs/cookbook/integrations/sandbox)) - -Because the session *is* a workflow run, its history is recoverable from the event log — no separate message store to keep in sync. - -## The Pattern: One Thread = One Workflow Run - -Three files. The bot definition is separate from the workflow so adapter packages stay out of the workflow sandbox. - - - - - -Register the `Chat` instance as a singleton so step functions can dynamically import it and resolve adapters + state: - -```typescript title="lib/bot.ts" lineNumbers -import { Chat } from "chat"; -import { createSlackAdapter } from "@chat-adapter/slack"; -import { createRedisState } from "@chat-adapter/state-redis"; - -const adapters = { - slack: createSlackAdapter(), -}; - -export interface ThreadState { - runId?: string; // [!code highlight] -} - -export const bot = new Chat({ - userName: "durable-bot", - adapters, - state: createRedisState(), - dedupeTtlMs: 600_000, -}).registerSingleton(); // [!code highlight] -``` - -`registerSingleton()` is important: Chat SDK re-hydrates `Thread` objects inside step functions, and it needs a registered singleton to resolve adapters and state for those rehydrated instances. - - - - - -The workflow is a plain loop over a hook. It receives the serialized thread + first message from the handler, revives them via Chat SDK's standalone `reviver`, and every platform-side effect goes inside a `"use step"` helper: - -```typescript title="workflows/durable-chat-session.ts" lineNumbers -import { Message, reviver, type Thread } from "chat"; -import { defineHook, getWorkflowMetadata } from "workflow"; -import type { ThreadState } from "@/lib/bot"; - -// Hook payload lives in its own file so the webhook side can import it without -// pulling in the workflow module. -import type { ChatTurnPayload } from "@/workflows/chat-turn-hook"; - -const chatTurnHook = defineHook(); // [!code highlight] - -async function postAssistantMessage( - thread: Thread, - text: string -) { - "use step"; - // Dynamic import keeps adapter packages out of the workflow sandbox. - const { bot } = await import("@/lib/bot"); // [!code highlight] - await bot.initialize(); - await thread.post(text); -} - -async function runTurn(text: string) { - "use step"; - // Your AI SDK call, database lookup, tool loop, etc. - return `You said: ${text}`; -} - -async function handleMessage( - thread: Thread, - message: Message -) { - const text = message.text.trim(); - if (text.toLowerCase() === "done") return false; - - const reply = await runTurn(text); - await postAssistantMessage(thread, reply); - return true; -} - -export async function durableChatSession(payload: string) { - "use workflow"; - - const { workflowRunId } = getWorkflowMetadata(); - const { thread, message } = JSON.parse(payload, reviver) as { // [!code highlight] - thread: Thread; - message: Message; - }; - - const hook = chatTurnHook.create({ token: workflowRunId }); - - await postAssistantMessage(thread, "Session started. Reply here; send `done` to stop."); - - if (!(await handleMessage(thread, message))) return; - - // Each hook resumption is one turn. The workflow stays suspended between - // messages — zero compute cost while idle. - while (true) { - const { message: nextRaw } = await hook; // [!code highlight] - const next = Message.fromJSON(nextRaw); - if (!(await handleMessage(thread, next))) return; - } -} -``` - -```typescript title="workflows/chat-turn-hook.ts" lineNumbers -import type { SerializedMessage } from "chat"; - -export type ChatTurnPayload = { - message: SerializedMessage; -}; -``` - - - - - -Handlers live outside the workflow file so adapter dependencies don't leak in. They decide whether to start a new workflow or resume an existing one, then store the `runId` in thread state: - -```typescript title="lib/chat-session-handlers.ts" lineNumbers -import type { Message, Thread } from "chat"; -import { getRun, resumeHook, start } from "workflow/api"; -import { bot, type ThreadState } from "@/lib/bot"; -import { durableChatSession } from "@/workflows/durable-chat-session"; -import type { ChatTurnPayload } from "@/workflows/chat-turn-hook"; - -async function startSession(thread: Thread, message: Message) { - const run = await start(durableChatSession, [ // [!code highlight] - JSON.stringify({ - thread: thread.toJSON(), - message: message.toJSON(), - }), - ]); - await thread.setState({ runId: run.runId }); -} - -async function routeTurn(thread: Thread, message: Message) { - const state = await thread.state; - - // No run yet, or the previous run finished — start fresh. - if (!state?.runId || !(await getRun(state.runId).exists)) { - await startSession(thread, message); - return; - } - - try { - await resumeHook(state.runId, { // [!code highlight] - message: message.toJSON(), - }); - } catch (err) { - const msg = err instanceof Error ? err.message.toLowerCase() : ""; - if (msg.includes("not found") || msg.includes("expired")) { - // Stale runId — start a new session rather than dropping the message. - await startSession(thread, message); - return; - } - throw err; - } -} - -bot.onNewMention(async (thread, message) => { - await thread.subscribe(); - await routeTurn(thread, message); -}); - -bot.onSubscribedMessage(async (thread, message) => { - await routeTurn(thread, message); -}); -``` - -Wire Chat SDK's webhook handler into a catch-all route. Importing `chat-session-handlers` for side effects registers the event handlers before the first webhook arrives: - -```typescript title="app/api/webhooks/[platform]/route.ts" lineNumbers -import "@/lib/chat-session-handlers"; -import { after } from "next/server"; -import { bot } from "@/lib/bot"; - -type Platform = keyof typeof bot.webhooks; - -export async function POST( - req: Request, - { params }: { params: Promise<{ platform: string }> } -) { - const { platform } = await params; - const handler = bot.webhooks[platform as Platform]; - if (!handler) return new Response(`Unknown platform: ${platform}`, { status: 404 }); - - return handler(req, { waitUntil: (task) => after(() => task) }); // [!code highlight] -} -``` - - - - - -## How It Works - -1. **Thread state stores the `runId`.** Chat SDK's state adapter (Redis, Postgres, memory) holds `{ runId }` per thread. That's the only piece of glue between the two SDKs. -2. **First mention → `start()`.** Handler serializes `thread` + `message` with `toJSON()`, passes them through `start(durableChatSession, [payload])`, stashes the returned `runId` in thread state. -3. **Subsequent messages → `resumeHook()`.** Handler looks up the `runId`, serializes the new message, and resumes the workflow's hook. The workflow picks up on the next `await hook` iteration. -4. **Workflow posts back via steps.** All Chat SDK side effects (`thread.post`, `thread.subscribe`, `thread.setState`) happen inside `"use step"` helpers that dynamically import the bot. This keeps adapter packages outside the workflow sandbox. -5. **Session ends — two ways.** The workflow returns normally (user said `done`, approval granted, etc.), or the workflow throws. Either way the run completes; the next inbound message with the stale `runId` falls through to `startSession()`. - -The workflow is fully durable between turns: `await hook` suspends with zero compute cost, and platform webhooks can fire from anywhere without concern for which server instance handled the previous turn. - -## Extending the Pattern - -Because the session is just a workflow, everything else from the cookbook composes naturally: - -- **Stream AI SDK responses into the thread.** Use the [AI SDK integration](/docs/cookbook/integrations/ai-sdk) pattern inside a step, then pass `result.fullStream` to `thread.post()` — Chat SDK handles platform-specific streaming (Slack edit-in-place, Telegram message-per-chunk, etc.). -- **Give the bot a sandbox.** Combine with the [Sandbox integration](/docs/cookbook/integrations/sandbox): each thread gets its own persistent sandbox session, snapshots on idle, resumes on the next message. That's effectively a coding-agent bot. -- **Human-in-the-loop approvals.** `Promise.race([hook, approvalHook])` inside the workflow, post buttons in the thread via [cards](https://chat-sdk.dev/docs/cards), resume `approvalHook` from `bot.onAction(...)`. -- **Scheduled follow-ups.** `sleep("24h")` before a proactive check-in. Surviving restarts is free. - -## Pitfalls - -### Don't import the bot at the top of workflow files - -Adapter packages (`@chat-adapter/slack`, `@chat-adapter/telegram`, etc.) depend on Node-only modules that aren't available in the workflow bundler's sandbox. Keep `import { bot } from "@/lib/bot"` inside `"use step"` functions with `await import(...)`. Use `reviver` from `chat` for deserialization inside the workflow — it's standalone and has no adapter dependencies. - -### Register the bot as a singleton - -`new Chat({...}).registerSingleton()`. Chat SDK rehydrates `Thread` objects inside step functions via `reviver`, and it looks up adapters + state from the registered singleton. Without it, thread methods throw when called from step contexts. - -### Hook payloads must be JSON-serializable - -`Message` and `Thread` have methods, so pass them through `.toJSON()` / `Message.fromJSON()` across the hook boundary. Define a `ChatTurnPayload` type in its own file so both the webhook handler (in the Node bundle) and the workflow (in the workflow sandbox) can share it without dragging in adapter code. - -### Handle stale `runId`s - -A workflow run ends but its `runId` is still cached in thread state. The next message calls `resumeHook` on a dead run and throws `not found` / `expired`. Gate on `getRun(runId).exists` before resuming, or catch the error and fall through to `startSession`. Either way the user's message must not be dropped. - -### Keep the hook outside the loop - -One `chatTurnHook.create({ token: workflowRunId })` per workflow run, reused every iteration. Creating a new hook with the same token throws `HookConflictError`. This is the same rule as the [AI SDK](/docs/cookbook/integrations/ai-sdk) and [Sandbox](/docs/cookbook/integrations/sandbox) session patterns. - -### Platform timeouts are separate from workflow timeouts - -Slack wants a 200 within 3 seconds. The webhook handler returns immediately after `resumeHook` (which is fast) — the workflow then runs in the background and posts back via `thread.post`. Don't try to `await` the whole turn inside the webhook handler; that's what breaks in the naive integration. - -## Key APIs - -- [`Chat`](https://chat-sdk.dev/docs/api/chat) / [`Thread`](https://chat-sdk.dev/docs/api/thread) / [`Message`](https://chat-sdk.dev/docs/api/message) — Chat SDK primitives. `toJSON()` / `fromJSON()` / `reviver` are the serialization layer. -- [`start()`](/docs/api-reference/workflow-api/start) — start a new session workflow. Store the returned `runId` in thread state. -- [`resumeHook()`](/docs/api-reference/workflow-api/resume-hook) — forward a new platform message to the running workflow. -- [`getRun()`](/docs/api-reference/workflow-api/get-run) — `run.exists` before resuming, to detect stale `runId`s. -- [`defineHook()`](/docs/api-reference/workflow/define-hook) — per-turn suspension point inside the workflow. -- [`registerSingleton()`](https://chat-sdk.dev/docs/api/chat) — makes the bot resolvable from inside step functions. diff --git a/docs/content/docs/cookbook/integrations/sandbox.mdx b/docs/content/docs/cookbook/integrations/sandbox.mdx deleted file mode 100644 index 942f110f5b..0000000000 --- a/docs/content/docs/cookbook/integrations/sandbox.mdx +++ /dev/null @@ -1,516 +0,0 @@ ---- -title: Sandbox -description: Model one Vercel Sandbox per workflow run — durable, idle-efficient, and not bound by the 5-hour sandbox hard cap. -type: guide -summary: Own a sandbox for the lifetime of a workflow run. Hibernate on idle via snapshot(), proactively refresh before the sandbox hard cap, and reconnect by runId — so one logical session can run effectively forever. -related: - - /docs/ai/defining-tools - - /docs/foundations/errors-and-retries - - /docs/cookbook/common-patterns/scheduling - - /docs/cookbook/agent-patterns/durable-agent ---- - -[Vercel Sandbox](https://vercel.com/docs/sandbox) provides isolated code execution environments. The `@vercel/sandbox` package has first-class support for the Workflow SDK — the `Sandbox` class is serializable, and its methods (`create`, `runCommand`, `stop`, `snapshot`) implicitly run as steps. You can use `Sandbox` directly inside a workflow function without wrapping each call in a separate `"use step"` function. - -## Why Workflow + Sandbox - -A sandbox alone gets you an isolated VM. A workflow around it gets you a **durable controller** for that VM's entire lifetime: - -- **One workflow run = one sandbox session.** The `runId` is the only state you need to persist on the client. Close the tab, come back a week later, POST the same `runId` and you're back in the same session. -- **Efficient resource use.** Active sandboxes cost money; hibernated workflows cost nothing. The workflow races a command hook against a `sleep()` timer — when idle, it calls `sandbox.snapshot()` (which also stops the VM) and waits indefinitely. Next command → spin a new sandbox from the snapshot with filesystem, installed packages, and git history intact. -- **Beyond the 5-hour hard cap.** Every Vercel Sandbox has a maximum lifetime. The workflow tracks that deadline and proactively snapshots + recreates *before* the cap, so the logical session outlives any one VM. Effectively unbounded session duration on top of time-bounded infrastructure. -- **Automatic cleanup.** `try/finally` in the workflow guarantees the VM is stopped on failure or destroy. - -## Use Case: Coding Agents - -This is the pattern [Open Agents](https://open-agents.dev/) uses to spawn coding agents that run "infinitely in the cloud." Each agent session gets its own sandbox — full filesystem, network, and runtime access — and the durable workflow keeps the agent loop resumable across restarts, auto-hibernates when the user walks away, and reconnects instantly when they return. - -Most coding-agent workloads look like this: - -- User sends a task → agent plans, reads files, runs shell commands, commits. -- User walks away mid-run → agent keeps going, eventually goes idle waiting for input. -- User comes back days later → same branch, same filesystem, same conversation history. - -Without durable workflows you'd need a separate state store for the agent loop, a separate job queue for retries, a separate scheduler for idle cleanup, and bespoke reconnection logic. With the pattern below, all of it is one file. - -## Quickstart: One-shot Pipeline - -Before the full session pattern, the simplest shape. Each sandbox method is an implicit step, so the event log records every command and the workflow replays from the last completed call on restart. - -```typescript title="workflows/sandbox-pipeline.ts" lineNumbers -import { Sandbox } from "@vercel/sandbox"; - -export async function sandboxPipeline(input: { commands: string[] }) { - "use workflow"; - - const sandbox = await Sandbox.create({ runtime: "node22" }); // [!code highlight] - - try { - const results = []; - for (const command of input.commands) { - const result = await sandbox.runCommand({ // [!code highlight] - cmd: "bash", - args: ["-c", command], - }); - results.push({ - command, - exitCode: result.exitCode, - stdout: await result.stdout(), - stderr: await result.stderr(), - }); - } - return { status: "completed", results }; - } finally { - await sandbox.stop(); // [!code highlight] - } -} -``` - -## Session Pattern: Persistent Sandbox Beyond the Hard Cap - -One workflow run owns a sandbox for its whole lifetime. The workflow's loop does two jobs simultaneously: - -1. **Command pipeline** — await a hook, run the next user command, stream output, loop. -2. **Sandbox lifecycle** — race the hook against a `sleep()` timer armed for whichever comes first: the idle deadline or the sandbox's refresh deadline (a safety margin before its hard cap). - -When the timer wins: - -- **Idle** → `sandbox.snapshot()` and wait indefinitely for the next command. No compute while asleep. -- **Near sandbox hard cap** → `sandbox.snapshot()` and immediately create a new sandbox from the snapshot. The session appears continuous; the underlying VM just rotated. - -The only way out is an explicit `/destroy` command. - - - - - -```typescript title="workflows/sandbox-session.ts" lineNumbers -import { defineHook, sleep, getWritable, getWorkflowMetadata } from "workflow"; -import { Sandbox, type Snapshot } from "@vercel/sandbox"; -import { z } from "zod"; - -export const commandHook = defineHook({ // [!code highlight] - schema: z.object({ command: z.string() }), -}); - -const RUNTIME = "node22"; -const HIBERNATE_AFTER_MS = 30 * 60_000; // 30 min idle → hibernate -const SANDBOX_TIMEOUT_MS = 5 * 60 * 60_000; // sandbox hard cap (5h) -const REFRESH_SAFETY_MS = 5 * 60_000; // refresh 5 min before the cap - -export type SandboxEvent = - | { - type: "created"; - sandboxId: string; - runtime: string; - startedAt: number; - sandboxExpiresAt: number; - hibernateAfterMs: number; - } - | { - type: "status"; - state: - | "active" - | "hibernating" - | "hibernated" - | "resuming" - | "refreshing" - | "destroyed"; - at: number; - sandboxId?: string; - sandboxExpiresAt?: number; - snapshotId?: string; - } - | { type: "activity"; at: number } - | { type: "command_start"; id: string; command: string; at: number } - | { type: "command_output"; id: string; stream: "stdout" | "stderr"; data: string } - | { type: "command_end"; id: string; exitCode: number | null; durationMs: number } - | { type: "result"; status: "destroyed"; durationMs: number }; - -async function emit(event: SandboxEvent) { - "use step"; - const writer = getWritable().getWriter(); - try { - await writer.write(event); - } finally { - writer.releaseLock(); - } -} - -async function runCommandAndStream(sandbox: Sandbox, id: string, command: string) { - "use step"; - const writer = getWritable().getWriter(); - const startedAt = Date.now(); - try { - await writer.write({ type: "command_start", id, command, at: startedAt }); - const result = await sandbox.runCommand({ cmd: "bash", args: ["-c", command] }); - const stdout = await result.stdout(); - if (stdout) await writer.write({ type: "command_output", id, stream: "stdout", data: stdout }); - const stderr = await result.stderr(); - if (stderr) await writer.write({ type: "command_output", id, stream: "stderr", data: stderr }); - await writer.write({ - type: "command_end", id, - exitCode: result.exitCode, - durationMs: Date.now() - startedAt, - }); - } finally { - writer.releaseLock(); - } -} - -export async function sandboxSessionWorkflow() { - "use workflow"; - - const { workflowRunId } = getWorkflowMetadata(); - // Create the hook once, outside the loop — reusing the same token from inside // [!code highlight] - // the loop would throw HookConflictError. // [!code highlight] - const hook = commandHook.create({ token: workflowRunId }); - - const startedAt = Date.now(); - - let sandbox: Sandbox = await Sandbox.create({ - runtime: RUNTIME, - timeout: SANDBOX_TIMEOUT_MS, - }); - let sandboxCreatedAt = Date.now(); - let sandboxExpiresAt = sandboxCreatedAt + SANDBOX_TIMEOUT_MS; - - await emit({ - type: "created", - sandboxId: sandbox.sandboxId, - runtime: RUNTIME, - startedAt, - sandboxExpiresAt, - hibernateAfterMs: HIBERNATE_AFTER_MS, - }); - await emit({ - type: "status", state: "active", at: Date.now(), - sandboxId: sandbox.sandboxId, sandboxExpiresAt, - }); - - let snapshot: Snapshot | null = null; - let hibernated = false; - let lastActivityAt = startedAt; - let counter = 0; - let destroyed = false; - - try { - while (!destroyed) { - if (hibernated && snapshot) { - // While hibernated, the VM is already stopped. Just wait for the next - // command — no idle timer, no compute cost. - const payload = await hook; - if (payload.command === "/destroy") { destroyed = true; break; } - - await emit({ type: "status", state: "resuming", at: Date.now() }); - sandbox = await Sandbox.create({ // [!code highlight] - source: { type: "snapshot", snapshotId: snapshot.snapshotId }, // [!code highlight] - timeout: SANDBOX_TIMEOUT_MS, // [!code highlight] - }); - sandboxCreatedAt = Date.now(); - sandboxExpiresAt = sandboxCreatedAt + SANDBOX_TIMEOUT_MS; - hibernated = false; - snapshot = null; - await emit({ - type: "status", state: "active", at: Date.now(), - sandboxId: sandbox.sandboxId, sandboxExpiresAt, - }); - - counter += 1; - await runCommandAndStream(sandbox, `cmd-${counter}`, payload.command); - lastActivityAt = Date.now(); - await emit({ type: "activity", at: lastActivityAt }); - continue; - } - - // Active — wake at whichever comes first: idle-deadline or refresh-deadline. - const idleDeadline = lastActivityAt + HIBERNATE_AFTER_MS; - const refreshDeadline = sandboxExpiresAt - REFRESH_SAFETY_MS; - const wakeAt = Math.min(idleDeadline, refreshDeadline); - const sleepMs = Math.max(0, wakeAt - Date.now()); - - const outcome = await Promise.race([ // [!code highlight] - hook.then((p) => ({ type: "command" as const, command: p.command })), - sleep(`${sleepMs}ms`).then(() => ({ type: "timer" as const })), - ]); - - if (outcome.type === "timer") { - const nearExpiry = Date.now() >= refreshDeadline; - - if (nearExpiry) { - // Proactive refresh — snapshot and immediately recreate so the - // session outlives the sandbox hard cap. - await emit({ type: "status", state: "refreshing", at: Date.now() }); - const snap = await sandbox.snapshot(); // [!code highlight] - sandbox = await Sandbox.create({ // [!code highlight] - source: { type: "snapshot", snapshotId: snap.snapshotId }, // [!code highlight] - timeout: SANDBOX_TIMEOUT_MS, // [!code highlight] - }); - sandboxCreatedAt = Date.now(); - sandboxExpiresAt = sandboxCreatedAt + SANDBOX_TIMEOUT_MS; - await emit({ - type: "status", state: "active", at: Date.now(), - sandboxId: sandbox.sandboxId, sandboxExpiresAt, - snapshotId: snap.snapshotId, - }); - lastActivityAt = Date.now(); - } else { - // Idle — snapshot and hibernate indefinitely. - await emit({ type: "status", state: "hibernating", at: Date.now() }); - snapshot = await sandbox.snapshot(); // [!code highlight] - hibernated = true; - await emit({ - type: "status", state: "hibernated", at: Date.now(), - snapshotId: snapshot.snapshotId, - }); - } - continue; - } - - if (outcome.command === "/destroy") { destroyed = true; break; } - - counter += 1; - await runCommandAndStream(sandbox, `cmd-${counter}`, outcome.command); - lastActivityAt = Date.now(); - await emit({ type: "activity", at: lastActivityAt }); - } - } finally { - if (!hibernated) { - try { - if (sandbox.status === "running") await sandbox.stop(); - } catch { /* best-effort */ } - } - await emit({ type: "status", state: "destroyed", at: Date.now() }); - await emit({ - type: "result", - status: "destroyed", - durationMs: Date.now() - startedAt, - }); - } -} -``` - - - - - -Two endpoints. `/start` accepts an optional `{ runId }` — if the run still exists, it replays the event log from index 0 so a returning client fully rehydrates. `/command` resumes the hook and returns immediately; command output lands on the `/start` stream. - -```typescript title="app/api/sandbox/start/route.ts" lineNumbers -import { start, getRun } from "workflow/api"; -import { sandboxSessionWorkflow } from "@/workflows/sandbox-session"; - -export async function POST(req: Request) { - let body: { runId?: string } = {}; - try { - const text = await req.text(); - if (text) body = JSON.parse(text); - } catch { /* ignore malformed body */ } - - // Reconnect path: if the client sends a known runId, stream the durable - // event log from the beginning so the UI can rehydrate. - if (body.runId) { - const run = getRun(body.runId); - if (await run.exists) { // [!code highlight] - const readable = run.getReadable({ startIndex: 0 }); // [!code highlight] - return new Response(readable.pipeThrough(ndjson()), { - headers: { - "Content-Type": "application/x-ndjson", - "x-workflow-run-id": body.runId, - "x-workflow-reconnected": "true", - "Cache-Control": "no-cache, no-transform", - }, - }); - } - // Stale runId — fall through to start fresh. - } - - const run = await start(sandboxSessionWorkflow, []); - return new Response(run.readable.pipeThrough(ndjson()), { - headers: { - "Content-Type": "application/x-ndjson", - "x-workflow-run-id": run.runId, - "Cache-Control": "no-cache, no-transform", - }, - }); -} - -function ndjson() { - return new TransformStream({ - transform(chunk, controller) { - controller.enqueue(JSON.stringify(chunk) + "\n"); - }, - }); -} -``` - -```typescript title="app/api/sandbox/command/route.ts" lineNumbers -import { commandHook } from "@/workflows/sandbox-session"; - -export async function POST(req: Request) { - const { runId, command } = (await req.json()) as { runId?: string; command?: string }; - - if (!runId || typeof command !== "string") { - return Response.json({ error: "runId and command are required" }, { status: 400 }); - } - - try { - await commandHook.resume(runId, { command }); // [!code highlight] - return Response.json({ ok: true }); - } catch (error) { - const msg = error instanceof Error ? error.message.toLowerCase() : ""; - if (msg.includes("not found") || msg.includes("expired")) { - return Response.json({ ok: false, note: "session expired" }, { status: 410 }); - } - throw error; - } -} -``` - - - - - -On mount, if a `runId` is stashed in `localStorage`, reconnect to the existing run. Otherwise start fresh. Commands are POSTed to `/command` — output lands on the `/start` stream. - -```tsx title="components/sandbox-runner.tsx" lineNumbers -"use client"; - -import { useCallback, useEffect, useRef, useState } from "react"; -import type { SandboxEvent } from "@/workflows/sandbox-session"; - -const RUN_ID_KEY = "sandbox.runId"; - -export function SandboxRunner() { - const [events, setEvents] = useState([]); - const runIdRef = useRef(null); - const didReconnectRef = useRef(false); - - const consume = useCallback(async (res: Response) => { - if (!res.ok || !res.body) return; - runIdRef.current = res.headers.get("x-workflow-run-id"); - if (runIdRef.current) { - localStorage.setItem(RUN_ID_KEY, runIdRef.current); // [!code highlight] - } - - const reader = res.body.getReader(); - const decoder = new TextDecoder(); - let buffer = ""; - - while (true) { - const { done, value } = await reader.read(); - if (done) break; - buffer += decoder.decode(value, { stream: true }); - const lines = buffer.split("\n"); - buffer = lines.pop() ?? ""; - for (const line of lines) { - if (!line.trim()) continue; - try { - setEvents((prev) => [...prev, JSON.parse(line) as SandboxEvent]); - } catch { /* malformed line */ } - } - } - }, []); - - const openStream = useCallback( - async (runId?: string) => { - setEvents([]); - const res = await fetch("/api/sandbox/start", { - method: "POST", - headers: runId ? { "Content-Type": "application/json" } : undefined, - body: runId ? JSON.stringify({ runId }) : undefined, - }); - await consume(res); - }, - [consume] - ); - - // Auto-reconnect on mount if a runId is stashed. - useEffect(() => { - if (didReconnectRef.current) return; - didReconnectRef.current = true; - const stored = localStorage.getItem(RUN_ID_KEY); - if (stored) openStream(stored); // [!code highlight] - }, [openStream]); - - const start = useCallback(() => { - localStorage.removeItem(RUN_ID_KEY); - runIdRef.current = null; - openStream(); - }, [openStream]); - - const sendCommand = useCallback(async (command: string) => { - if (!runIdRef.current) return; - const res = await fetch("/api/sandbox/command", { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ runId: runIdRef.current, command }), - }); - if (res.status === 410) localStorage.removeItem(RUN_ID_KEY); - }, []); - - const destroy = useCallback(async () => { - await sendCommand("/destroy"); - localStorage.removeItem(RUN_ID_KEY); - }, [sendCommand]); - - // Render events as a terminal-style log. Drive UI state from `status` events - // (active / hibernating / hibernated / resuming / refreshing / destroyed). - return null; -} -``` - - - - - -## How It Works - -1. **One workflow = one session.** The workflow owns a sandbox for its entire lifetime. The `runId` is the only state the client has to remember. -2. **Hook created once.** `commandHook.create({ token: workflowRunId })` outside the loop. Creating it twice with the same token throws `HookConflictError`. -3. **Two timer branches.** The active-state race wakes on the earlier of `idleDeadline` and `refreshDeadline`. The hibernated state awaits the hook alone — no timer, no compute. -4. **Proactive refresh.** `refreshDeadline = sandboxExpiresAt - REFRESH_SAFETY_MS`. Hitting this triggers a snapshot + immediate new sandbox from that snapshot, rolling over the hard cap without user intervention. -5. **`sandbox.snapshot()` stops the VM.** It's documented as part of the snapshot process — don't call `stop()` separately. -6. **Resume = new sandbox.** `Sandbox.create({ source: { type: "snapshot", snapshotId } })` creates a fresh VM from the snapshot. The new sandbox has a different `sandboxId`; filesystem, installed packages, and git history are preserved. -7. **Reconnect by runId.** `getRun(runId).getReadable({ startIndex: 0 })` replays the durable event log to a returning client, who rebuilds UI state from the replay. -8. **Exit only on `/destroy`.** The workflow loop has no hard deadline of its own. Individual sandboxes time out; the session doesn't. - -## Pitfalls - -### `sandbox.stop()` is terminal - -A stopped sandbox cannot be restarted — you have to create a new one. Hibernation is only possible via `snapshot()` + new-sandbox-from-snapshot. Don't try to "pause" an active sandbox with `stop()` and resume later. - -### `snapshot()` already stops the VM - -Calling `stop()` after `snapshot()` either errors or is a no-op depending on timing. Snapshot takes care of it. - -### New `sandboxId` after resume and refresh - -Both `resuming` (idle → command) and `refreshing` (near-hard-cap rotation) create a new sandbox with a new `sandboxId`. Emit it on the subsequent `status: "active"` event and have the UI read from there, not from the initial `created` event. - -### Keep the refresh margin generous - -`snapshot()` + `Sandbox.create({ source })` takes real time (typically tens of seconds). If `REFRESH_SAFETY_MS` is too small, the old sandbox hits its hard cap mid-snapshot. Leave at least 60–90 seconds; 5 minutes is comfortable. - -### Don't call `writable.close()` inside a workflow function - -Stream closure must happen inside a `"use step"` function. Calling `writable.close()` directly in the workflow body throws `Not supported in workflow functions`. The runtime closes the underlying writable when the workflow returns. - -### Handle stale `runId` gracefully - -Clients can hold `runId`s from long-gone workflow runs (localStorage, back button, server restart). Gate the reconnect path on `run.exists` and fall through to starting fresh. On `hook.resume`, catch `not found` / `expired` and return 410 so the client clears its state. - -### Keep the hook outside the loop - -Each iteration's `hook.then(...)` attaches a listener to the same hook instance. Creating a new hook per iteration with the same token throws `HookConflictError`. One hook, one token (`workflowRunId`), reused every iteration. - -## Key APIs - -- [`Sandbox.create`](https://vercel.com/docs/sandbox) — provision a VM (runtime, source, timeout) -- [`sandbox.runCommand`](https://vercel.com/docs/sandbox) — execute a command; implicit step -- [`sandbox.snapshot`](https://vercel.com/docs/sandbox) — save state and stop the VM; returns `Snapshot` -- [`defineHook()`](/docs/api-reference/workflow/define-hook) — suspension point for user commands -- [`sleep()`](/docs/api-reference/workflow/sleep) — durable timer that powers both idle hibernation and proactive refresh -- [`getRun()`](/docs/api-reference/workflow-api/get-run) — look up a run and replay its event log for reconnection -- [`getWritable()`](/docs/api-reference/workflow/get-writable) — resumable NDJSON event stream diff --git a/docs/content/docs/cookbook/meta.json b/docs/content/docs/cookbook/meta.json index 2665de6b65..5ca07bbe20 100644 --- a/docs/content/docs/cookbook/meta.json +++ b/docs/content/docs/cookbook/meta.json @@ -1,5 +1,5 @@ { "title": "Cookbook", "defaultOpen": true, - "pages": ["agent-patterns", "common-patterns", "integrations", "advanced"] + "pages": ["advanced"] } diff --git a/docs/geistdocs.tsx b/docs/geistdocs.tsx index 8d054d0e0f..c44d5b3c98 100644 --- a/docs/geistdocs.tsx +++ b/docs/geistdocs.tsx @@ -12,17 +12,13 @@ export const nav = [ label: 'Docs', href: '/docs', }, - { - label: 'Cookbook', - href: '/cookbook', - }, { label: 'Worlds', href: '/worlds', }, { - label: 'Registry', - href: '/registry', + label: 'Patterns', + href: '/patterns', }, { label: 'Examples', diff --git a/docs/lib/registry/manifest.ts b/docs/lib/registry/manifest.ts index 08eaf1aa8a..95c027ccbb 100644 --- a/docs/lib/registry/manifest.ts +++ b/docs/lib/registry/manifest.ts @@ -1,18 +1,24 @@ import { agentCancellationButtonSource, + agentCancellationConceptHardCancelSource, + agentCancellationConceptStopRouteSource, + agentCancellationConceptStopSignalSource, agentCancellationRouteSource, agentCancellationStartRouteSource, agentCancellationUsageSource, agentCancellationWorkflowSource, + agentCancellationWorkflowInstallSource, } from './snippets/agent-cancellation'; import { aiSdkClientSource, aiSdkRouteSource, aiSdkWorkflowSource, + aiSdkWorkflowInstallSource, } from './snippets/ai-sdk'; import { batchingStartRouteSource, batchingWorkflowSource, + batchingWorkflowInstallSource, } from './snippets/batching'; import { chatSdkBotSource, @@ -20,21 +26,25 @@ import { chatSdkHookTypeSource, chatSdkWebhookSource, chatSdkWorkflowSource, + chatSdkWorkflowInstallSource, } from './snippets/chat-sdk'; import { childWorkflowsStartRouteSource, childWorkflowsWorkflowSource, + childWorkflowsWorkflowInstallSource, } from './snippets/child-workflows'; import { distributedAbortControllerButtonSource, distributedAbortControllerLibSource, distributedAbortControllerRouteSource, distributedAbortControllerUsageSource, + distributedAbortControllerLibInstallSource, } from './snippets/distributed-abort-controller'; import { durableAgentClientSource, durableAgentStartRouteSource, durableAgentWorkflowSource, + durableAgentWorkflowInstallSource, } from './snippets/durable-agent'; import { humanInTheLoopCardSource, @@ -42,49 +52,74 @@ import { humanInTheLoopStartRouteSource, humanInTheLoopUsageSource, humanInTheLoopWorkflowSource, + humanInTheLoopWorkflowInstallSource, } from './snippets/human-in-the-loop'; import { idempotencyStartRouteSource, idempotencyWorkflowSource, + idempotencyWorkflowInstallSource, } from './snippets/idempotency'; import { rateLimitingStartRouteSource, rateLimitingWorkflowSource, + rateLimitingWorkflowInstallSource, } from './snippets/rate-limiting'; import { resendCancelRouteSource, resendStartRouteSource, resendUsageSource, resendWorkflowSource, + resendWorkflowInstallSource, } from './snippets/resend'; -import { sagaStartRouteSource, sagaWorkflowSource } from './snippets/saga'; import { + sagaStartRouteSource, + sagaWorkflowSource, + sagaWorkflowInstallSource, +} from './snippets/saga'; +import { + sandboxClientSource, sandboxCommandRouteSource, sandboxStartRouteSource, sandboxUsageSource, sandboxWorkflowSource, + sandboxWorkflowInstallSource, } from './snippets/sandbox'; import { schedulingCancelRouteSource, schedulingStartRouteSource, schedulingWorkflowSource, + schedulingWorkflowInstallSource, } from './snippets/scheduling'; import { sequentialAndParallelStartRouteSource, sequentialAndParallelWorkflowSource, + sequentialAndParallelWorkflowInstallSource, } from './snippets/sequential-and-parallel'; import { timeoutsStartRouteSource, timeoutsWorkflowSource, + timeoutsWorkflowInstallSource, } from './snippets/timeouts'; import { webhooksStartRouteSource, - webhooksWorkflowSource, + webhooksEventListenerSource, + webhooksRequestReplySource, + webhooksEventListenerInstallSource, + webhooksRequestReplyInstallSource, } from './snippets/webhooks'; import { workflowCompositionStartRouteSource, workflowCompositionWorkflowSource, + workflowCompositionWorkflowInstallSource, } from './snippets/workflow-composition'; +import { + upgradingWorkflowsResumeRouteSource, + upgradingWorkflowsStartRouteSource, + upgradingWorkflowsWorkflowSource, + upgradingWorkflowsMethod2Source, + upgradingWorkflowsMethod1InstallSource, + upgradingWorkflowsMethod2InstallSource, +} from './snippets/upgrading-workflows'; import type { RegistryCategory, RegistryItem } from './types'; /** @@ -105,7 +140,7 @@ export const registryItems: RegistryItem[] = [ description: 'Cancel a running AI agent gracefully — Stop button + workflow signal + hard-cancel fallback.', longDescription: - 'A drop-in cancellation pattern for any `DurableAgent`, covering both graceful Stop Signal and Hard Cancellation. The workflow races the agent against a `stopHook` keyed by the run ID; clicking Stop posts to a route that resumes the hook, the workflow exits at its next `await` boundary, and a `data-stopped` part is streamed to the client so it renders a clean ending instead of an abrupt connection close. The route automatically falls back to `getRun(runId).cancel()` if the hook is already gone (e.g. the agent finished mid-request), so the Stop button always succeeds. Note: the Stop Signal does not cancel the underlying model stream — tokens generated after the stop signal are still produced and billed; what it does is exit the workflow function and notify the client.', + 'Cancel a running AI agent from the outside — for example, a Stop button in a chat UI, an admin cancellation endpoint, or a timeout fallback. Two patterns are available depending on whether you need the agent to exit cleanly or just need the run to stop: Hard Cancellation via `getRun(runId).cancel()` for immediate forced termination, or Stop Signal via a `stopHook` + `Promise.race` for a graceful exit that runs cleanup and streams a `data-stopped` part to the client so it renders a clean ending instead of an abrupt connection close. The stop route falls back to hard cancel automatically if the hook is already gone — so the Stop button always succeeds regardless of timing.', tags: ['agent', 'cancellation', 'stop-button', 'durable'], categories: ['agent'], homepage: 'https://workflow-sdk.dev', @@ -113,7 +148,7 @@ export const registryItems: RegistryItem[] = [ 'https://workflow-sdk.dev/cookbook/agent-patterns/agent-cancellation', sourceUrl: 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/agent-patterns/agent-cancellation.mdx', - shadcnSlug: '@workflow-sdk/agent-cancellation', + shadcnSlug: 'https://workflow-sdk.dev/r/agent-cancellation', envVars: [ { name: 'AI_GATEWAY_API_KEY', @@ -129,21 +164,6 @@ export const registryItems: RegistryItem[] = [ description: 'Durable agent + `stopHook` + `Promise.race` exit, with a final `data-stopped` part emitted on stop.', }, - { - path: 'app/api/agent/route.ts', - description: - 'POST endpoint that starts the agent and returns the streaming response with `x-workflow-run-id` set.', - }, - { - path: 'app/api/agent/[runId]/stop/route.ts', - description: - 'POST endpoint that resumes `stopHook` for the given `runId` with a `getRun(runId).cancel()` fallback when the hook is already gone.', - }, - { - path: 'components/stop-button.tsx', - description: - 'Reusable client component — takes a `runId`, posts to the stop route, and disables itself while the request is in flight.', - }, ], snippets: [ { @@ -151,6 +171,7 @@ export const registryItems: RegistryItem[] = [ lang: 'tsx', caption: 'workflows/stoppable-agent.ts', code: agentCancellationWorkflowSource, + installCode: agentCancellationWorkflowInstallSource, }, { label: 'Start route', @@ -177,6 +198,150 @@ export const registryItems: RegistryItem[] = [ code: agentCancellationUsageSource, }, ], + conceptSnippets: [ + { + label: 'Hard Cancel', + lang: 'tsx', + caption: + 'app/api/agent/[runId]/cancel/route.ts — one-liner forced termination', + code: agentCancellationConceptHardCancelSource, + }, + { + label: 'Stop Signal', + lang: 'tsx', + caption: + 'workflows/stoppable-agent.ts — hook + Promise.race graceful exit', + code: agentCancellationConceptStopSignalSource, + }, + { + label: 'Stop route', + lang: 'tsx', + caption: 'app/api/agent/[runId]/stop/route.ts — resumes the hook', + code: agentCancellationConceptStopRouteSource, + }, + ], + guide: { + whenToUse: [ + '**Chat stop buttons** — let users cancel a long-running agent from the browser', + '**Admin cancellation** — stop an agent from a different process or API endpoint', + '**Timeout fallback** — combine with `sleep()` to auto-stop after a deadline', + '**Hard Cancellation** — when the run is stuck or unresponsive and you just need it gone', + ], + approaches: { + description: + 'Pick the option that matches what your endpoint needs to deliver to the caller:', + bullets: [ + '**Hard Cancellation** — terminates the run immediately with no opportunity for cleanup or client notification. A single line of code, but the workflow throws `WorkflowRunCancelledError` and any streaming clients see an abrupt connection close.', + '**Stop Signal** — the workflow exits as soon as the hook fires, runs any pending cleanup, emits a final `data-stopped` part to the stream so the client can render cleanly, and returns a real result.', + ], + columns: ['', 'Hard Cancellation', 'Stop Signal'], + rows: [ + { + aspect: 'Mechanism', + values: ['`getRun(runId).cancel()`', 'Hook + `Promise.race`'], + }, + { + aspect: 'Speed to terminate', + values: ['Immediate', 'At the next `await` boundary'], + }, + { + aspect: 'Runs `finally` / cleanup', + values: ['No', 'Yes'], + }, + { + aspect: 'Final stream notification', + values: ['No (abrupt close)', 'Yes (`data-stopped` part)'], + }, + { + aspect: '`run.returnValue`', + values: [ + 'Throws `WorkflowRunCancelledError`', + "Returns the workflow's result", + ], + }, + { + aspect: 'Code complexity', + values: ['One line', 'Hook + race + signal step'], + }, + { + aspect: 'Best for', + values: [ + 'Stuck or unresponsive runs, forced termination', + 'User-facing stop, admin cancel, timeouts', + ], + }, + ], + }, + approachSections: [ + { + title: 'Hard Cancellation', + description: 'Call `.cancel()` on a run to terminate it immediately:', + snippetLabels: ['Hard Cancel'], + afterBullets: [ + '**No cleanup runs** — `finally` blocks, defer-style step cleanup, and any logic after the current step are all skipped', + '**No final notification to the client** — the writable closes abruptly, so a streaming UI just sees the connection drop with no `data-stopped` part to render a clean ending', + '**`run.returnValue` throws** — anyone awaiting the result receives `WorkflowRunCancelledError` instead of a meaningful payload', + '**Underlying step keeps running** — the model stream or HTTP call inside the current step continues to completion in the background', + ], + afterProse: + 'Hard Cancellation is the appropriate choice when the run is stuck or unresponsive, has exceeded its expected runtime, or you don\'t need a clean exit. For everything else — chat stop buttons, admin "stop" actions, timeout fallbacks — you typically want the Stop Signal pattern.', + }, + { + title: 'Stop Signal', + description: + 'The workflow races the agent against a `stopHook` keyed by the run ID. When Stop is triggered, the workflow exits at its next `await` boundary, runs any cleanup, and emits a `data-stopped` stream part so the client renders a clean ending. The route falls back to hard cancel automatically if the hook is already gone.', + installSlug: '@workflow-sdk/agent-cancellation', + snippetLabels: ['Stop Signal', 'Stop route'], + callout: { + type: 'warn', + content: + 'Stop Signal does not cancel the underlying model stream. Tokens generated after the stop signal are still produced and billed by your provider. What it does is exit the workflow function and notify the client. For hard cross-process cancellation that signals the inner step to bail out, see the Distributed Abort Controller pattern.', + }, + }, + ], + howItWorks: [ + 'A stopHook is created with token stop:${workflowRunId} when the workflow starts — the token is deterministic so any process can resume it given just the run ID.', + 'Promise.race runs the DurableAgent stream and the stop hook concurrently. The agent produces tokens normally until one of the two resolves.', + 'When your stop API calls stopHook.resume(runId, { reason }), the race resolves immediately to the stopped branch — the workflow exits at its next await boundary.', + 'Before returning, emitStopSignal writes a data-stopped part to the writable stream so the client knows the agent was stopped intentionally rather than disconnected.', + 'The stop route also falls back to getRun(runId).cancel() if the hook is already gone (e.g. the agent finished mid-request), so the Stop button always succeeds.', + ], + callout: { + type: 'warn', + content: + 'This pattern does not cancel the underlying model stream. Tokens generated after the stop signal are still produced and billed by your provider. What it does is exit the workflow function and notify the client. For hard cross-process cancellation that signals the inner step to bail out, see the Distributed Abort Controller pattern.', + }, + adapting: [ + '**Add a timeout** — race a third `sleep()` promise to auto-stop after a deadline (e.g. 30 minutes).', + '**Audit logging** — include a `reason` field in the stop schema to record who stopped the agent and why.', + '**Cross-process** — the hook token is deterministic, so any server process can call `stopHook.resume()` with just the run ID.', + '**Step limits** — combine with `maxSteps` on `DurableAgent` to cap execution even without a manual stop signal.', + '**Multiple agents** — scope each `stopHook` to its own run ID so parallel agent chains never interfere.', + '**Hard Cancellation as a fallback** — wire your stop endpoint to fall back to `getRun(runId).cancel()` if the hook resume errors with `not found` / `expired` (e.g. the hook was already consumed). This guarantees the run is terminated even when the Stop Signal path is unavailable.', + ], + keyApis: [ + { + label: 'defineHook()', + url: '/docs/api-reference/workflow/define-hook', + }, + { + label: 'getWorkflowMetadata()', + url: '/docs/api-reference/workflow/get-workflow-metadata', + }, + { + label: 'getWritable()', + url: '/docs/api-reference/workflow/get-writable', + }, + { + label: 'DurableAgent', + url: '/docs/api-reference/workflow-ai/durable-agent', + }, + { + label: 'getRun()', + url: '/docs/api-reference/workflow-api/get-run', + }, + ], + }, }, { id: 'ai-sdk', @@ -184,23 +349,14 @@ export const registryItems: RegistryItem[] = [ logo: 'ai-sdk', description: 'Durable multi-turn chat with streaming and tools.', longDescription: - "A production-ready multi-turn chat agent powered by AI SDK's `streamText`. Each conversation is one workflow run that suspends between turns — zero compute cost while the user is reading — and resumes the moment the next message arrives. The per-turn LLM stream is durable: if your server restarts mid-response, the client reconnects with the same `runId` and picks up exactly where it left off, with the full conversation history intact. Tools are wrapped as workflow steps, so each tool call is recorded once and replayed (not re-executed) on retry. Drop in any AI Gateway model string and it works — switch from Claude to GPT to Gemini without touching the durability layer.", + "[AI SDK](https://ai-sdk.dev/) is Vercel's framework-agnostic TypeScript toolkit for building AI-powered apps and agents — unified provider access, streaming, tool calling, structured output, and UI hooks. Workflow SDK complements it by making those calls durable: the model request, the tool loop, and the multi-turn conversation all survive restarts and timeouts. For most agent use cases, prefer `DurableAgent` which wraps `streamText` and manages the tool loop automatically. This pattern covers using `streamText()` directly when you need lower-level control.", tags: ['ai', 'chat', 'streaming', 'agents', 'durable'], categories: ['agent', 'vercel'], homepage: 'https://ai-sdk.dev', docsUrl: 'https://ai-sdk.dev/docs', sourceUrl: 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/integrations/ai-sdk.mdx', - shadcnSlug: '@workflow-sdk/ai-sdk', - envVars: [ - { - name: 'AI_GATEWAY_API_KEY', - description: - 'API key for Vercel AI Gateway. Lets you call any provider (Claude, GPT, Gemini, …) through one credential. Optional when running on Vercel with OIDC.', - getKeyUrl: 'https://vercel.com/dashboard/ai-gateway', - exampleValue: 'vck_********', - }, - ], + shadcnSlug: 'https://workflow-sdk.dev/r/ai-sdk', files: [ { path: 'workflows/support.ts', @@ -222,22 +378,134 @@ export const registryItems: RegistryItem[] = [ { label: 'Workflow', lang: 'tsx', - caption: 'workflows/support.ts', + caption: + 'workflows/support.ts — one workflow run = one full conversation', code: aiSdkWorkflowSource, + installCode: aiSdkWorkflowInstallSource, }, { label: 'API route', lang: 'tsx', - caption: 'app/api/support/route.ts', + caption: + 'app/api/support/route.ts — handles first turn, follow-ups, and /done exit', + description: + 'One endpoint handles first turn, follow-ups, and the `/done` exit. The client sends `runId` in the body to distinguish first vs follow-up.', code: aiSdkRouteSource, }, { label: 'Client', lang: 'tsx', - caption: 'components/support-chat.tsx', + caption: + 'components/support-chat.tsx — stores runId in a ref, forwarded via WorkflowChatTransport', + description: + 'Store the `runId` in a ref and pass it in the body of every follow-up. `WorkflowChatTransport` forwards it for you.', code: aiSdkClientSource, }, ], + guide: { + flatLayout: true, + sourceDescription: + 'One workflow run = one full conversation. The workflow suspends between turns on a hook and resumes when the next user message arrives. Conversation state, tool history, and intermediate computation all live inside the run.', + whenToUse: [ + '**Custom stop conditions** — `stopWhen`, `prepareStep`, or `onStepFinish` callbacks', + '**Structured output** — `Output.object()` or `Output.array()` alongside tool calling', + '**Step-level callbacks** — `onStepFinish` for logging, metrics, or branching logic', + '**Provider options** — per-step model switching, reasoning budgets, or custom provider options', + ], + howItWorks: [ + '**One workflow = one conversation.** The workflow loops on a hook, keeping `allMessages`, tool history, and state alive across turns.', + '**Hook is created once.** `turnHook.create({ token: workflowRunId })` outside the loop — calling it twice with the same token throws `HookConflictError`.', + '**`preventClose: true`** on `pipeTo` keeps the durable writable open so the next turn can write to it.', + '**`sliceUntilFinish`** in the API reads chunks until `type === "finish"`, then closes the HTTP response. The source reader is released — not cancelled — so the workflow stream keeps flowing.', + '**`startIndex: tailIndex + 1`** gives each follow-up response only the new chunks, avoiding replay of previous turns.', + '**`/done`** resumes the hook so the workflow exits cleanly, then returns a synthetic `start` + `finish` so `useChat` transitions out of "streaming".', + ], + approaches: { + title: 'streamText vs DurableAgent', + columns: ['', '`streamText()`', '`DurableAgent`'], + rows: [ + { + aspect: 'Tool loop', + values: [ + 'AI SDK handles via `stopWhen`', + 'DurableAgent handles internally', + ], + }, + { + aspect: 'LLM call durability', + values: [ + 'Re-executes on replay', + 'Each LLM call is a durable step', + ], + }, + { + aspect: 'Stop conditions', + values: ['`stopWhen`, `prepareStep`', '`prepareStep` only'], + }, + { + aspect: 'Structured output', + values: ['`Output.object()`, `Output.array()`', 'Not available'], + }, + { + aspect: 'Step callbacks', + values: ['`onStepFinish`, `onChunk`', 'Not available'], + }, + { aspect: 'Setup', values: ['Manual stream piping', 'Automatic'] }, + ], + closing: + 'Use `DurableAgent` for most agent use cases. Use `streamText` when you need the additional control.', + }, + adaptingIntro: + 'Non-obvious correctness details worth knowing before adapting this pattern.', + adapting: [ + '**Snapshot `tailIndex` before resuming the hook** — reversing the order races the workflow: by the time you read `tailIndex`, the next turn may have already written its `start` chunk.', + '**Don\'t call `writable.close()` inside a workflow function** — I/O operations must happen inside a `"use step"` function. When the workflow returns, the runtime closes the writable for you.', + "**Don't use `TransformStream.terminate()` to slice the stream** — throws `Invalid state` when late-arriving chunks hit the transform. Use a manual `ReadableStream` pump as shown.", + "**Release the source reader, don't cancel it** — use `reader.releaseLock()` in the `finally` block; `source.cancel()` propagates upstream and closes the durable writable, breaking the next turn.", + '**Handle stale `runId` gracefully** — wrap the follow-up path in a try/catch for `not found` / `expired` and fall through to the first-turn path to start a fresh workflow.', + ], + adaptingTitle: 'Pitfalls', + keyApis: [ + { + label: 'streamText()', + url: 'https://ai-sdk.dev/docs/reference/ai-sdk-core/stream-text', + }, + { + label: 'tool() / tool calling', + url: 'https://ai-sdk.dev/docs/ai-sdk-core/tools-and-tool-calling', + }, + { + label: 'stepCountIs() / stopWhen', + url: 'https://ai-sdk.dev/docs/ai-sdk-core/agents#stop-conditions', + }, + { + label: 'convertToModelMessages()', + url: 'https://ai-sdk.dev/docs/reference/ai-sdk-ui/convert-to-model-messages', + }, + { + label: 'createUIMessageStreamResponse()', + url: 'https://ai-sdk.dev/docs/reference/ai-sdk-ui/create-ui-message-stream-response', + }, + { + label: 'useChat()', + url: 'https://ai-sdk.dev/docs/reference/ai-sdk-ui/use-chat', + }, + { label: '"use step"', url: '/docs/api-reference/workflow/use-step' }, + { + label: 'defineHook()', + url: '/docs/api-reference/workflow/define-hook', + }, + { + label: 'getWritable()', + url: '/docs/api-reference/workflow/get-writable', + }, + { label: 'getRun()', url: '/docs/api-reference/workflow-api/get-run' }, + { + label: 'WorkflowChatTransport', + url: '/docs/api-reference/workflow-ai/workflow-chat-transport', + }, + ], + }, }, { id: 'durable-agent', @@ -246,23 +514,14 @@ export const registryItems: RegistryItem[] = [ description: 'Replace a stateless AI agent with a durable one — tools as steps, streamed output, crash-safe by default.', longDescription: - 'The foundational AI agent pattern on Workflow. Wrap any AI SDK agent in `DurableAgent`, mark each tool with `"use step"`, and stream output through `getWritable()`. The framework handles retries, replay, and persistence automatically — if the process crashes mid-tool-call, the agent resumes from the last completed step on replay, with no extra bookkeeping in your code. Each tool call gets automatic retries (3× by default), an entry in the workflow event log for observability, and full Node.js access. Drop in any AI Gateway model string and switch providers without touching the durability layer. The included example is a flight booking agent (search → book → weather check) — replace the tools with your own; the surrounding shape stays identical.', + 'Use this pattern to make any AI SDK agent durable. The agent becomes a workflow, tools become steps, and the framework handles retries, streaming, and state persistence automatically.', tags: ['agents', 'ai', 'durable', 'tools', 'streaming'], categories: ['agent'], homepage: 'https://workflow-sdk.dev', docsUrl: 'https://workflow-sdk.dev/cookbook/agent-patterns/durable-agent', sourceUrl: 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/agent-patterns/durable-agent.mdx', - shadcnSlug: '@workflow-sdk/durable-agent', - envVars: [ - { - name: 'AI_GATEWAY_API_KEY', - description: - 'API key for Vercel AI Gateway. Lets you call any provider (Claude, GPT, Gemini, …) through one credential. Optional when running on Vercel with OIDC.', - getKeyUrl: 'https://vercel.com/dashboard/ai-gateway', - exampleValue: 'vck_********', - }, - ], + shadcnSlug: 'https://workflow-sdk.dev/r/durable-agent', files: [ { path: 'workflows/flight-agent.ts', @@ -286,6 +545,7 @@ export const registryItems: RegistryItem[] = [ lang: 'tsx', caption: 'workflows/flight-agent.ts', code: durableAgentWorkflowSource, + installCode: durableAgentWorkflowInstallSource, }, { label: 'Start route', @@ -300,6 +560,52 @@ export const registryItems: RegistryItem[] = [ code: durableAgentClientSource, }, ], + guide: { + flatLayout: true, + callout: { + type: 'info', + content: + '`WorkflowAgent` from `@ai-sdk/workflow` will replace `DurableAgent` in AI SDK v7. It provides the same durability guarantees with a cleaner API, built-in tool approval flows, and resumable streaming. [View WorkflowAgent docs →](https://ai-sdk.dev/v7/docs/agents/workflow-agent#workflowagent)', + }, + sourceDescription: + 'Replace `Agent` with `DurableAgent`, wrap the function in `"use workflow"`, mark each tool with `"use step"`, and stream output through `getWritable()`.', + whenToUse: [ + '**Any AI agent with tool calls** that should survive crashes and restarts', + '**Agents where tool calls hit external APIs** that need automatic retries', + '**Long-running agent sessions** where losing progress is unacceptable', + '**Agents that need per-step observability** in the workflow event log', + ], + howItWorks: [ + "**`DurableAgent` wraps `Agent`** — same API as AI SDK's `Agent`, but backed by a workflow. If the process crashes, the agent resumes from the last completed step on replay.", + '**Tools as steps** — each tool\'s `execute` function uses `"use step"`, giving it automatic retries, full Node.js access, and an entry in the workflow event log.', + "**Streaming** — `getWritable()` streams the agent's output (text chunks, tool calls, tool results) to the client in real time via `createUIMessageStreamResponse`.", + '**`maxSteps`** — limits the total number of LLM calls the agent can make, preventing runaway tool loops.', + ], + adapting: [ + '**Change the model** — replace `"anthropic/claude-haiku-4.5"` with any AI Gateway model string (e.g. `"openai/gpt-4o"`, `"anthropic/claude-sonnet-4-5"`).', + '**Add tools** — define a new `"use step"` function with a Zod schema. Each tool automatically gets retries and persistence.', + '**Workflow-level tools** — if a tool needs workflow primitives like `sleep()` or `createHook()`, omit `"use step"` so it runs in the workflow context instead.', + '**Multi-turn** — pass `result.messages` plus new user messages to subsequent `agent.stream()` calls for multi-turn conversations.', + '**Client integration** — use `useChat()` from `@ai-sdk/react` with `WorkflowChatTransport` from `@workflow/ai` for a full chat UI with reconnection support.', + ], + adaptingTitle: 'Adapting to your use case', + keyApis: [ + { + label: '"use workflow"', + url: '/docs/api-reference/workflow/use-workflow', + }, + { label: '"use step"', url: '/docs/api-reference/workflow/use-step' }, + { + label: 'DurableAgent', + url: '/docs/api-reference/workflow-ai/durable-agent', + }, + { + label: 'getWritable()', + url: '/docs/api-reference/workflow/get-writable', + }, + { label: 'start()', url: '/docs/api-reference/workflow-api/start' }, + ], + }, }, { id: 'human-in-the-loop', @@ -308,7 +614,7 @@ export const registryItems: RegistryItem[] = [ description: 'Pause an AI agent to wait for human approval, then resume with the decision.', longDescription: - 'A drop-in human-in-the-loop pattern for any `DurableAgent`. The agent calls an approval tool before any consequential action; the tool emits a custom data part to the stream so the client can render Approve / Reject controls, then suspends on a `defineHook()` keyed by the tool call ID. An approval API route resumes the hook with the decision, the workflow streams the resolution, and the agent continues. A 24-hour `sleep()` races the hook so stale requests expire automatically. Comes with a generic approval card component that renders any payload schema and listens for `data-approval-needed` / `data-approval-resolved` parts.', + 'Use this pattern when an AI agent needs human confirmation before performing a consequential action like booking, purchasing, or publishing. The workflow suspends without consuming resources until the human responds.', tags: ['agent', 'approval', 'human-in-the-loop', 'durable'], categories: ['agent'], homepage: 'https://workflow-sdk.dev', @@ -316,16 +622,7 @@ export const registryItems: RegistryItem[] = [ 'https://workflow-sdk.dev/cookbook/agent-patterns/human-in-the-loop', sourceUrl: 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/agent-patterns/human-in-the-loop.mdx', - shadcnSlug: '@workflow-sdk/human-in-the-loop', - envVars: [ - { - name: 'AI_GATEWAY_API_KEY', - description: - 'API key for Vercel AI Gateway. Lets you call any provider (Claude, GPT, Gemini, …) through one credential. Optional when running on Vercel with OIDC.', - getKeyUrl: 'https://vercel.com/dashboard/ai-gateway', - exampleValue: 'vck_********', - }, - ], + shadcnSlug: 'https://workflow-sdk.dev/r/human-in-the-loop', files: [ { path: 'workflows/approval-agent.ts', @@ -354,6 +651,7 @@ export const registryItems: RegistryItem[] = [ lang: 'tsx', caption: 'workflows/approval-agent.ts', code: humanInTheLoopWorkflowSource, + installCode: humanInTheLoopWorkflowInstallSource, }, { label: 'Start route', @@ -365,6 +663,8 @@ export const registryItems: RegistryItem[] = [ label: 'Approval route', lang: 'tsx', caption: 'app/api/approval/route.ts', + description: + 'The approval route imports the hook definition and calls `.resume()` with the tool call ID as the token:', code: humanInTheLoopRouteSource, }, { @@ -377,9 +677,58 @@ export const registryItems: RegistryItem[] = [ label: 'Usage', lang: 'tsx', caption: 'Wire the card into your chat UI', + description: + "Listen for `data-approval-needed` and `data-approval-resolved` custom data parts in the message stream. The approval tool invocation itself won't appear until the tool returns, so the custom data parts are the mechanism for showing and updating the approval UI.", code: humanInTheLoopUsageSource, }, ], + guide: { + flatLayout: true, + sourceDescription: + 'Create a typed hook using `defineHook()`. When the agent calls the approval tool, it emits a custom data part to the stream so the client can render approval controls, then creates a hook and suspends. An API route resumes the hook with the decision.', + whenToUse: [ + '**Booking confirmations** where users must approve before charges are made', + '**Content publishing gates** where an editor must sign off', + '**Any agent action where the cost of getting it wrong** justifies a human check', + '**Actions with side effects** that cannot be easily undone', + ], + howItWorks: [ + '**`defineHook()` with schema** — creates a typed hook with Zod validation. The approval payload is validated before the workflow receives it.', + '**`toolCallId` as token** — the approval tool uses the tool call ID as the hook token, naturally linking the hook to the specific tool invocation.', + "**`emitApprovalRequest` step** — writes a `data-approval-needed` custom data part to the stream *before* the hook suspends. Without this, the client would never see the approval controls because tool invocations don't stream until the tool returns.", + '**No `"use step"` on the approval tool** — the tool runs at the workflow level because `defineHook().create()` is a workflow primitive. It calls step functions for I/O.', + '**`Promise.race` with `sleep`** — the approval races against a durable timeout. If nobody responds, the workflow continues with an expiration message.', + '**`emitApprovalResolved` step** — writes the outcome to the stream so the client can update the card immediately, without waiting for the tool-invocation result.', + ], + adapting: [ + '**Change the approval schema** — add fields like `reason`, `amount`, `reviewerEmail` to match your domain.', + '**Multiple approval gates** — the pattern works for any number of tools. Each tool creates its own hook with its own `toolCallId`.', + "**Escalation** — if the first approver doesn't respond, use `sleep()` + another hook to escalate to a backup reviewer.", + '**Adjust timeout** — use `"24h"` for production, shorter durations for demos.', + '**Workflow-level vs step tools** — tools that use `sleep()`, `defineHook()`, or other workflow primitives must NOT use `"use step"`. Tools with only I/O (API calls, DB queries) should use `"use step"` for retries.', + ], + adaptingTitle: 'Adapting to your use case', + keyApis: [ + { + label: '"use workflow"', + url: '/docs/api-reference/workflow/use-workflow', + }, + { label: '"use step"', url: '/docs/api-reference/workflow/use-step' }, + { + label: 'defineHook()', + url: '/docs/api-reference/workflow/define-hook', + }, + { label: 'sleep()', url: '/docs/api-reference/workflow/sleep' }, + { + label: 'getWritable()', + url: '/docs/api-reference/workflow/get-writable', + }, + { + label: 'DurableAgent', + url: '/docs/api-reference/workflow-ai/durable-agent', + }, + ], + }, }, { id: 'chat-sdk', @@ -387,33 +736,14 @@ export const registryItems: RegistryItem[] = [ logo: 'chat-sdk', description: 'Durable bot sessions across Slack, Teams, Discord, and more.', longDescription: - "A durable bot session pattern for Chat SDK. Write the bot once, deploy to Slack, Microsoft Teams, Google Chat, Discord, Telegram, GitHub, Linear, or WhatsApp — and let each conversation thread run as its own workflow. Multi-turn state lives in the durable event log instead of hand-rolled Redis bookkeeping. The bot can sleep for hours waiting on a user reply, schedule a follow-up days later, or pause on a long-running tool call — and survive every deploy and cold start in between. Inbound messages route to either a `start()` (first mention) or `resumeHook()` (every subsequent message), with the `runId` stored in Chat SDK's thread state. Outbound replies are durable steps, so platform side-effects are recorded once and replayed safely on restart.", + '[Chat SDK](https://chat-sdk.dev/) normalizes Slack, Microsoft Teams, Discord, Telegram, GitHub, Linear, and WhatsApp into one thread/message model. Workflow SDK complements it by making bot sessions durable — each conversation thread maps to one long-running workflow run that owns multi-turn state, can sleep for hours, and survives deploys and cold starts.', tags: ['chat', 'bots', 'slack', 'teams', 'discord', 'durable'], categories: ['vercel', 'agent'], homepage: 'https://chat-sdk.dev', docsUrl: 'https://chat-sdk.dev/docs/guides/durable-chat-sessions-nextjs', sourceUrl: 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/integrations/chat-sdk.mdx', - shadcnSlug: '@workflow-sdk/chat-sdk', - envVars: [ - { - name: 'SLACK_BOT_TOKEN', - description: - 'Bot token from your Slack app. Used by the Slack adapter to post replies and subscribe to thread events.', - getKeyUrl: 'https://api.slack.com/apps', - exampleValue: 'xoxb-********', - }, - { - name: 'SLACK_SIGNING_SECRET', - description: 'Signing secret used to verify incoming Slack webhooks.', - getKeyUrl: 'https://api.slack.com/apps', - }, - { - name: 'REDIS_URL', - description: - 'Connection string for the Redis instance that backs Chat SDK thread state (`runId` per thread).', - }, - ], + shadcnSlug: 'https://workflow-sdk.dev/r/chat-sdk', files: [ { path: 'lib/bot.ts', @@ -453,6 +783,7 @@ export const registryItems: RegistryItem[] = [ lang: 'tsx', caption: 'workflows/durable-chat-session.ts', code: chatSdkWorkflowSource, + installCode: chatSdkWorkflowInstallSource, }, { label: 'Hook type', @@ -473,6 +804,94 @@ export const registryItems: RegistryItem[] = [ code: chatSdkWebhookSource, }, ], + guide: { + flatLayout: true, + introBullets: [ + 'Owns multi-turn state in the durable event log instead of Redis-by-hand bookkeeping', + 'Can `sleep()` for hours or days waiting for a user reply, an approval, or a scheduled follow-up', + 'Survives deploys, cold starts, and crashes — the session picks up from the last step on replay', + 'Receives follow-up messages via hooks, so the bot stays responsive while the workflow is still running', + ], + diagram: + 'flowchart TD\n A["Platform webhook"] --> B["Chat SDK event handler\\n(onNewMention, onSubscribedMessage, …)"]\n B -->|"no runId in thread state"| C["start(durableChatSession, …)"]\n B -->|"runId in thread state"| D["resumeHook(runId, { message })"]\n C --> E["Workflow run (durable)\\none per thread — suspends between turns"]\n D --> E\n E --> F["use step helpers\\nthread.post(), thread.subscribe(), thread.setState(), …"]', + diagramTitle: 'How it fits together', + diagramContext: { + prose: + 'Chat SDK owns the edge — webhook verification, event routing, `thread.post()` / `thread.stream()`. Workflow owns the session — state, loops, sleeps, retries. They meet at exactly two points:', + bullets: [ + "**Inbound** — Chat SDK handlers decide whether to `start(workflow, [thread, message])` or `resumeHook(runId, { message })`. The `runId` lives in Chat SDK's thread state (Redis, Postgres, or any state adapter).", + '**Outbound** — the workflow calls Chat SDK APIs (`thread.post()`, `thread.subscribe()`, `thread.setState()`) from inside step functions only — never from the top level of a workflow file, as adapter packages use Node-only modules not available in the workflow sandbox.', + ], + }, + whySection: { + title: 'Why Workflow + Chat SDK', + problemProse: + 'Without Workflow, a long-running bot session usually means one of:', + problemBullets: [ + "Holding a webhook request open while the agent runs (doesn't survive restarts, blows past platform timeouts)", + 'Writing session state to Redis manually, plus a scheduler for timeouts and retries, plus custom reconnection logic', + ], + solutionProse: + 'Workflow replaces all of that with a single durable function. The bot can:', + solutionBullets: [ + 'Run a tool loop for minutes while the user watches typing indicators', + 'Wait for a human approval in another thread before continuing', + 'Schedule a follow-up message 24 hours later via `sleep("24h")`', + 'Pause on sandbox snapshot, resume when the user sends the next command', + ], + closingProse: + 'Because the session is a workflow run, its history is recoverable from the event log — no separate message store to keep in sync.', + }, + whenToUse: [ + '**Run a tool loop for minutes** while the user watches typing indicators, without holding the webhook open', + '**Wait for human approval** in another thread before continuing — `Promise.race([hook, approvalHook])`', + '**Schedule a follow-up** message hours or days later via `sleep("24h")`', + '**Multi-turn state** without Redis-by-hand bookkeeping, custom schedulers, or reconnection logic', + '**Any bot session** that must survive deploys, cold starts, and crashes mid-turn', + ], + howItWorks: [ + "**Thread state stores the `runId`.** Chat SDK's state adapter (Redis, Postgres, memory) holds `{ runId }` per thread — the only piece of glue between the two SDKs.", + '**First mention → `start()`.** The handler serializes `thread` + `message` with `toJSON()`, passes them to `start(durableChatSession, [payload])`, and stashes the returned `runId` in thread state.', + "**Subsequent messages → `resumeHook()`.** The handler looks up the `runId`, serializes the new message, and resumes the workflow's hook. The workflow picks up on the next `await hook` iteration.", + '**Workflow posts back via steps.** All Chat SDK side-effects (`thread.post`, `thread.subscribe`, `thread.setState`) run inside `"use step"` helpers that dynamically import the bot — keeping adapter packages outside the workflow sandbox.', + '**Session ends two ways.** The workflow returns normally (user said `done`, approval granted) or throws. Either way the run completes; the next inbound message with the stale `runId` falls through to `startSession()`.', + ], + howItWorksClosing: + 'The workflow is fully durable between turns: `await hook` suspends with zero compute cost, and platform webhooks can fire from anywhere without concern for which server instance handled the previous turn.', + adapting: [ + '**Stream AI SDK responses** — use the AI SDK integration inside a step, then pass `result.fullStream` to `thread.post()` for platform-native streaming (Slack edit-in-place, Telegram message-per-chunk).', + '**Give the bot a sandbox** — combine with the Sandbox integration: each thread gets its own persistent sandbox session, snapshots on idle, resumes on the next message.', + '**Human-in-the-loop approvals** — `Promise.race([hook, approvalHook])` inside the workflow, post buttons via cards, resume `approvalHook` from `bot.onAction()`.', + '**Scheduled follow-ups** — `sleep("24h")` before a proactive check-in. Surviving restarts is free.', + '**Don\'t import the bot at the top of workflow files** — keep `import { bot }` inside `"use step"` functions with `await import(...)`. Adapter packages use Node-only modules not available in the workflow sandbox.', + '**Always call `registerSingleton()`** — Chat SDK rehydrates `Thread` objects inside step functions via `reviver` and needs the singleton to resolve adapters and state. Without it, thread methods throw from step contexts.', + '**Hook payloads must be JSON-serializable** — `Message` and `Thread` have methods; pass them through `.toJSON()` / `Message.fromJSON()` across hook boundaries. Define `ChatTurnPayload` in its own file so both the webhook handler and the workflow sandbox can import it without dragging in adapter code.', + "**Handle stale `runId`s** — gate on `getRun(runId).exists` before calling `resumeHook`, or catch `not found` / `expired` and fall through to `startSession`. Never drop the user's message.", + '**Keep the hook outside the loop** — one `chatTurnHook.create({ token: workflowRunId })` per workflow run, reused every iteration. Creating with the same token throws `HookConflictError`.', + '**Platform timeouts are separate from workflow timeouts** — Slack wants a 200 within 3 seconds. Return immediately after `resumeHook` (which is fast); the workflow runs in the background and posts back via `thread.post`. Never `await` the whole turn inside the webhook handler.', + ], + adaptingTitle: 'Extending the pattern', + keyApis: [ + { + label: 'Chat / Thread / Message', + url: 'https://chat-sdk.dev/docs/api/chat', + }, + { label: 'start()', url: '/docs/api-reference/workflow-api/start' }, + { + label: 'resumeHook()', + url: '/docs/api-reference/workflow-api/resume-hook', + }, + { label: 'getRun()', url: '/docs/api-reference/workflow-api/get-run' }, + { + label: 'defineHook()', + url: '/docs/api-reference/workflow/define-hook', + }, + { + label: 'registerSingleton()', + url: 'https://chat-sdk.dev/docs/api/chat', + }, + ], + }, }, { id: 'sandbox', @@ -480,21 +899,14 @@ export const registryItems: RegistryItem[] = [ logo: 'sandbox', description: 'Persistent code-execution session beyond the 5-hour cap.', longDescription: - 'An always-resumable code-execution session built on Vercel Sandbox. One workflow run owns one sandbox for its entire lifetime — full filesystem, network, and runtime — and the client only has to remember a single `runId`. When the user goes idle, the workflow snapshots the VM and hibernates indefinitely at zero cost; when they return, the same filesystem, installed packages, and git history are right there waiting. The pattern also rolls over the sandbox hard cap automatically: a few minutes before the 5-hour deadline it snapshots, spins up a fresh VM from that snapshot, and keeps going — so the logical session can run effectively forever on top of time-bounded infrastructure. Perfect for coding agents, AI dev environments, and any workload where users walk away and come back days later.', + 'The [`@vercel/sandbox`](https://vercel.com/docs/vercel-sandbox) package has first-class support for the Workflow SDK — the `Sandbox` class is serializable, and its methods (`create`, `runCommand`, `stop`, `snapshot`) implicitly run as steps, so you can use `Sandbox` directly inside a workflow function without wrapping each call in `"use step"`. Wrapping the sandbox in a workflow run gives you a durable controller for its entire lifetime: auto-hibernation on idle, proactive rollover before the 5-hour sandbox hard cap, and reconnection by a single `runId` — so one logical session can run effectively forever on top of time-bounded infrastructure.', tags: ['sandbox', 'agents', 'sessions', 'durable', 'snapshots'], categories: ['vercel', 'agent'], - homepage: 'https://vercel.com/docs/sandbox', - docsUrl: 'https://vercel.com/docs/sandbox', + homepage: 'https://vercel.com/docs/vercel-sandbox', + docsUrl: 'https://vercel.com/docs/vercel-sandbox', sourceUrl: 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/integrations/sandbox.mdx', - shadcnSlug: '@workflow-sdk/sandbox', - envVars: [ - { - name: 'VERCEL_OIDC_TOKEN', - description: - 'OIDC token used by `@vercel/sandbox` to authenticate. Set automatically when deployed to Vercel; locally, run `vercel env pull` to populate it.', - }, - ], + shadcnSlug: 'https://workflow-sdk.dev/r/sandbox', files: [ { path: 'workflows/sandbox-session.ts', @@ -511,6 +923,11 @@ export const registryItems: RegistryItem[] = [ description: 'POST endpoint that resumes the command hook — every shell command the user runs flows through here.', }, + { + path: 'components/sandbox-runner.tsx', + description: + 'Client component that streams NDJSON events from `/start`, auto-reconnects from `localStorage` on mount, and sends commands to `/command`.', + }, ], snippets: [ { @@ -518,11 +935,14 @@ export const registryItems: RegistryItem[] = [ lang: 'tsx', caption: 'workflows/sandbox-session.ts', code: sandboxWorkflowSource, + installCode: sandboxWorkflowInstallSource, }, { label: 'Start route', lang: 'tsx', caption: 'app/api/sandbox/start/route.ts', + description: + 'Two endpoints. `/start` accepts an optional `{ runId }` — if the run still exists, it replays the event log from index 0 so a returning client fully rehydrates. `/command` resumes the hook and returns immediately; command output lands on the `/start` stream.', code: sandboxStartRouteSource, }, { @@ -531,13 +951,92 @@ export const registryItems: RegistryItem[] = [ caption: 'app/api/sandbox/command/route.ts', code: sandboxCommandRouteSource, }, + { + label: 'Client', + lang: 'tsx', + caption: 'components/sandbox-runner.tsx', + description: + 'On mount, if a `runId` is stashed in `localStorage`, reconnect to the existing run. Otherwise start fresh. Commands are POSTed to `/command` — output lands on the `/start` stream.', + code: sandboxClientSource, + }, { label: 'Quickstart', lang: 'tsx', - caption: 'Simpler one-shot pipeline (no session loop)', + caption: + 'workflows/sandbox-pipeline.ts — simpler one-shot pipeline (no session loop)', + description: + 'Before the full session pattern, the simplest shape. Each `Sandbox` method is an implicit step, so the event log records every command and the workflow replays from the last completed call on restart.', code: sandboxUsageSource, }, ], + guide: { + flatLayout: true, + whySection: { + title: 'Why Workflow + Sandbox', + solutionProse: + "A sandbox alone gets you an isolated VM. A workflow around it gets you a durable controller for that VM's entire lifetime:", + solutionBullets: [ + "**One workflow run = one sandbox session.** The `runId` is the only state you need to persist on the client. Close the tab, come back a week later, POST the same `runId` and you're back in the same session.", + '**Efficient resource use.** Active sandboxes cost money; hibernated workflows cost nothing. The workflow races a command hook against a `sleep()` timer — when idle, it calls `sandbox.snapshot()` (which also stops the VM) and waits indefinitely.', + '**Beyond the 5-hour hard cap.** The workflow tracks the sandbox deadline and proactively snapshots + recreates before the cap, so the logical session outlives any one VM.', + '**Automatic cleanup.** `try/finally` in the workflow guarantees the VM is stopped on failure or destroy.', + ], + }, + whenToUse: [ + '**Coding agents** — spawn agents that run "infinitely in the cloud": full filesystem, network, and runtime, with auto-hibernation when the user walks away and instant reconnect when they return', + '**AI dev environments** — long-running sessions where users send tasks, go idle, and come back days later expecting the same branch, filesystem, and git history', + '**Any workload that outlives a 5-hour sandbox** — the pattern rolls over the hard cap automatically; the logical session has no deadline of its own', + '**Interactive pipelines** — wherever you need real-time streaming of stdout/stderr to a client while the sandbox runs multi-step jobs', + ], + sourceDescription: + "One workflow run owns a sandbox for its whole lifetime. The workflow's loop does two jobs simultaneously — a command pipeline (await a hook, run the user command, stream output, repeat) and a sandbox lifecycle manager (race the hook against a `sleep()` timer armed for the earlier of the idle deadline or the refresh deadline). When the timer wins: if idle, `sandbox.snapshot()` and wait indefinitely; if near the hard cap, snapshot and immediately create a new sandbox from that snapshot. The only way out is an explicit `/destroy` command.", + howItWorks: [ + '**One workflow = one session.** The workflow owns a sandbox for its entire lifetime. The `runId` is the only state the client has to remember.', + '**Hook created once.** `commandHook.create({ token: workflowRunId })` outside the loop — creating it twice with the same token throws `HookConflictError`.', + '**Two timer branches.** The active-state race wakes on the earlier of `idleDeadline` and `refreshDeadline`. The hibernated state awaits the hook alone — no timer, no compute.', + '**Proactive refresh.** `refreshDeadline = sandboxExpiresAt - REFRESH_SAFETY_MS`. Hitting this triggers a snapshot + immediate new sandbox from that snapshot, rolling over the hard cap without user intervention.', + "**`sandbox.snapshot()` stops the VM.** It's part of the snapshot process — don't call `stop()` separately.", + '**Resume = new sandbox.** `Sandbox.create({ source: { type: "snapshot", snapshotId } })` creates a fresh VM. The new sandbox has a different `sandboxId`; filesystem, installed packages, and git history are preserved.', + '**Reconnect by runId.** `getRun(runId).getReadable({ startIndex: 0 })` replays the durable event log to a returning client, who rebuilds UI state from the replay.', + "**Exit only on `/destroy`.** The workflow loop has no hard deadline of its own. Individual sandboxes time out; the session doesn't.", + ], + adapting: [ + '**`sandbox.stop()` is terminal** — a stopped sandbox cannot be restarted. Hibernation is only possible via `snapshot()` + new-sandbox-from-snapshot. Don\'t "pause" an active sandbox with `stop()` and resume later.', + '**`snapshot()` already stops the VM** — calling `stop()` after `snapshot()` either errors or is a no-op. The snapshot takes care of it.', + '**New `sandboxId` after resume and refresh** — both `resuming` (idle → command) and `refreshing` (near-hard-cap rotation) create a new sandbox with a new `sandboxId`. Emit it on the subsequent `status: "active"` event; don\'t rely on the initial `created` event.', + '**Keep the refresh margin generous** — `snapshot()` + `Sandbox.create({ source })` takes real time (typically tens of seconds). If `REFRESH_SAFETY_MS` is too small the old sandbox hits its hard cap mid-snapshot. Leave at least 60–90 seconds; 5 minutes is comfortable.', + '**Don\'t call `writable.close()` inside a workflow function** — stream closure must happen inside a `"use step"` function. The runtime closes the underlying writable when the workflow returns.', + '**Handle stale `runId` gracefully** — gate the reconnect path on `run.exists` and fall through to starting fresh. On `hook.resume`, catch `not found` / `expired` and return 410 so the client clears its state.', + '**Keep the hook outside the loop** — creating a new hook per iteration with the same token throws `HookConflictError`. One hook, one token (`workflowRunId`), reused every iteration.', + ], + adaptingTitle: 'Pitfalls', + adaptingIntro: + 'Non-obvious correctness details worth knowing before adapting this pattern.', + keyApis: [ + { + label: 'Sandbox.create', + url: 'https://vercel.com/docs/vercel-sandbox/sdk-reference', + }, + { + label: 'sandbox.runCommand', + url: 'https://vercel.com/docs/vercel-sandbox/sdk-reference', + }, + { + label: 'sandbox.snapshot', + url: 'https://vercel.com/docs/vercel-sandbox/sdk-reference', + }, + { + label: 'defineHook()', + url: '/docs/api-reference/workflow/define-hook', + }, + { label: 'sleep()', url: '/docs/api-reference/workflow/sleep' }, + { label: 'getRun()', url: '/docs/api-reference/workflow-api/get-run' }, + { + label: 'getWritable()', + url: '/docs/api-reference/workflow/get-writable', + }, + ], + }, }, { id: 'batching', @@ -546,14 +1045,14 @@ export const registryItems: RegistryItem[] = [ description: 'Process large collections in parallel batches with failure isolation between groups.', longDescription: - 'Bulk-process arbitrary records by splitting them into fixed-size batches, running each batch concurrently with `Promise.allSettled` (failures inside a batch are isolated per record), and pacing batches with `sleep()` to respect downstream rate limits. Each record runs as its own step → durable, automatically retried up to 3×, and replayable. The workflow returns a tally with per-record failure reasons. Ships a generic `ImportRecord` shape — replace it with your own and customise the step.', + "Use batching when you need to process a large list of items in parallel while controlling concurrency. Items are split into fixed-size batches, each batch runs concurrently, and failures in one batch don't affect others.", tags: ['batching', 'fan-out', 'parallel', 'bulk-import'], categories: ['common'], homepage: 'https://workflow-sdk.dev', docsUrl: 'https://workflow-sdk.dev/cookbook/common-patterns/batching', sourceUrl: 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/batching.mdx', - shadcnSlug: '@workflow-sdk/batching', + shadcnSlug: 'https://workflow-sdk.dev/r/batching', files: [ { path: 'workflows/batching.ts', @@ -570,7 +1069,10 @@ export const registryItems: RegistryItem[] = [ label: 'Workflow', lang: 'tsx', caption: 'workflows/batching.ts', + description: + 'The workflow splits records into chunks, processes each chunk concurrently, tracks results per batch, and returns a final tally. Each record runs in its own `"use step"` function with full Node.js access and automatic retries.', code: batchingWorkflowSource, + installCode: batchingWorkflowInstallSource, }, { label: 'Start route', @@ -579,6 +1081,44 @@ export const registryItems: RegistryItem[] = [ code: batchingStartRouteSource, }, ], + guide: { + flatLayout: true, + whenToUse: [ + 'Bulk data imports — contacts, orders, products from a CSV or database', + 'Processing hundreds or thousands of items against external APIs', + 'Calling rate-limited APIs where you need to control concurrency', + 'Any fan-out where you want failure isolation between groups', + ], + howItWorks: [ + 'Records are split into fixed-size batches.', + "Each batch runs in parallel via `Promise.allSettled` — failures in one record don't affect others.", + 'A `sleep()` between batches paces requests to avoid overloading downstream services.', + 'After all batches, a summary is returned with succeeded/failed counts.', + ], + adapting: [ + '**Change the `Record` type** — replace `ImportRecord` with your actual data shape (orders, images, products, etc.).', + '**Replace `processRecord()`** — swap in your real import logic: DB upserts, API calls, file processing.', + '**Tune `batchSize` and `sleep()`** — match the values to your downstream rate limits.', + "**Add or remove tracking** — the pattern works with any item type; strip the failure list if you don't need per-record reasons.", + '**`Promise.allSettled` over `Promise.all`** — `Promise.all` rejects on the first failure; `allSettled` waits for everything and tells you what failed. Use it whenever you want to continue even if some items fail.', + "**Tune batch size to your API's concurrency limit** — if the API allows 10 concurrent requests, use `batchSize: 10`.", + '**`sleep()` is durable** — the pacing delay between batches survives cold starts and process restarts.', + '**Each `processRecord` call is an independent step** — if one fails it retries up to 3× without affecting other items in the batch.', + ], + adaptingTitle: 'Adapting to your use case', + keyApis: [ + { + label: '"use workflow"', + url: '/docs/foundations/workflows-and-steps', + }, + { label: '"use step"', url: '/docs/foundations/workflows-and-steps' }, + { label: 'sleep()', url: '/docs/api-reference/workflow/sleep' }, + { + label: 'Promise.allSettled()', + url: 'https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise/allSettled', + }, + ], + }, }, { id: 'idempotency', @@ -587,23 +1127,14 @@ export const registryItems: RegistryItem[] = [ description: "Pass each step's deterministic stepId as the Idempotency-Key so retries never duplicate side effects.", longDescription: - 'Workflow steps can be retried (on failure) and replayed (on cold start). Without an idempotency key, that means duplicate Stripe charges, duplicate emails, duplicate records. `getStepMetadata().stepId` returns a deterministic ID that is stable across retries and replays of the same step — pass it as the `Idempotency-Key` header to any external API that supports the convention. Ships a Stripe-shaped charge + receipt example; the same shape works for any provider.', + "Workflow steps can be retried (on failure) and replayed (on cold start). If a step calls an external API that isn't idempotent, retries could create duplicate charges, send duplicate emails, or double-process records. Use idempotency keys to make these operations safe.", tags: ['idempotency', 'stripe', 'retries', 'exactly-once'], categories: ['common'], homepage: 'https://workflow-sdk.dev', docsUrl: 'https://workflow-sdk.dev/cookbook/common-patterns/idempotency', sourceUrl: 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/idempotency.mdx', - shadcnSlug: '@workflow-sdk/idempotency', - envVars: [ - { - name: 'STRIPE_SECRET_KEY', - description: - 'Server-side Stripe secret key. Used by the example charge step — swap for any provider that supports idempotency keys.', - getKeyUrl: 'https://dashboard.stripe.com/apikeys', - exampleValue: 'sk_live_********', - }, - ], + shadcnSlug: 'https://workflow-sdk.dev/r/idempotency', files: [ { path: 'workflows/idempotency.ts', @@ -621,6 +1152,7 @@ export const registryItems: RegistryItem[] = [ lang: 'tsx', caption: 'workflows/idempotency.ts', code: idempotencyWorkflowSource, + installCode: idempotencyWorkflowInstallSource, }, { label: 'Start route', @@ -629,6 +1161,38 @@ export const registryItems: RegistryItem[] = [ code: idempotencyStartRouteSource, }, ], + guide: { + flatLayout: true, + whenToUse: [ + 'Charging a payment (Stripe, PayPal)', + 'Sending transactional emails or SMS', + 'Creating records in external systems where duplicates are harmful', + "Any step that has side effects in systems you don't control", + ], + sourceDescription: + 'Every step has a unique, deterministic `stepId` available via `getStepMetadata()`. Pass this as the `Idempotency-Key` header to external APIs — Stripe and most external systems that support the convention will deduplicate requests keyed by this ID.', + adapting: [ + "**`stepId` is deterministic** — it's the same value across retries and replays of the same step, making it a reliable idempotency key.", + "**Always provide idempotency keys for non-idempotent external calls** — even if you think a step won't be retried, cold-start replay will re-execute it.", + '**Handle 409/conflict as success** — if an external API returns "already processed," treat that as a successful result, not an error.', + '**Make your own APIs idempotent** — accept an idempotency key and return the cached result on duplicate requests.', + '**Rely on the external API\'s idempotency, not local flags** — Workflow doesn\'t provide distributed locking. Check-then-act patterns ("read a flag, then write if not set") race between concurrent runs.', + "**Don't use check-then-act patterns** — another run could read the same flag between your read and write. Use a unique constraint or the external API's deduplication layer instead.", + ], + adaptingTitle: 'Tips & caveats', + keyApis: [ + { + label: '"use workflow"', + url: '/docs/api-reference/workflow/use-workflow', + }, + { label: '"use step"', url: '/docs/api-reference/workflow/use-step' }, + { + label: 'getStepMetadata()', + url: '/docs/api-reference/step/get-step-metadata', + }, + { label: 'start()', url: '/docs/api-reference/workflow-api/start' }, + ], + }, }, { id: 'rate-limiting', @@ -637,14 +1201,14 @@ export const registryItems: RegistryItem[] = [ description: 'Handle 429 responses and transient failures with RetryableError + automatic backoff.', longDescription: - "Stop writing manual sleep-retry loops. Throw `RetryableError` with a `retryAfter` value (millis, duration string, or `Date`) and the workflow runtime reschedules the step natively — more efficient than wall-clock sleeps and survives cold starts. Ships two flavors: Retry-After (read the header, pass it through) and exponential backoff (use `getStepMetadata().attempt` for `1s, 4s, 9s…`). `FatalError` short-circuits retries when retrying won't help.", + 'Use this pattern when calling external APIs that enforce rate limits. Instead of writing manual retry loops, throw `RetryableError` with a `retryAfter` value and let the workflow runtime handle rescheduling — more efficient than wall-clock sleeps and survives cold starts.', tags: ['rate-limit', 'retry', 'backoff', '429'], categories: ['common'], homepage: 'https://workflow-sdk.dev', docsUrl: 'https://workflow-sdk.dev/cookbook/common-patterns/rate-limiting', sourceUrl: 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/rate-limiting.mdx', - shadcnSlug: '@workflow-sdk/rate-limiting', + shadcnSlug: 'https://workflow-sdk.dev/r/rate-limiting', files: [ { path: 'workflows/rate-limiting.ts', @@ -662,6 +1226,7 @@ export const registryItems: RegistryItem[] = [ lang: 'tsx', caption: 'workflows/rate-limiting.ts', code: rateLimitingWorkflowSource, + installCode: rateLimitingWorkflowInstallSource, }, { label: 'Start route', @@ -670,6 +1235,46 @@ export const registryItems: RegistryItem[] = [ code: rateLimitingStartRouteSource, }, ], + guide: { + flatLayout: true, + whenToUse: [ + 'Calling APIs that return 429 (Too Many Requests) with `Retry-After` headers', + 'Any step that hits transient failures and needs backoff', + 'Syncing data with third-party services (Stripe, CRMs, scrapers)', + ], + sourceDescription: + 'A step function calls an external API. On 429, it reads the `Retry-After` header and throws `RetryableError` — the runtime reschedules the step after the specified delay. For transient 5xx failures, use `getStepMetadata().attempt` to calculate exponential backoff (`1s, 4s, 9s…`). Set `fn.maxRetries` on the step function to override the default retry count of 3.', + adapting: [ + '**`RetryableError` is for transient failures** — use it when the request might succeed on a later attempt (429, 503, network timeout).', + "**`FatalError` is for permanent failures** — use it when retrying won't help (404, 401, invalid input). This skips all remaining retries immediately.", + '**`retryAfter` accepts millis, duration strings, or a `Date`** — pass `parseInt(retryAfter) * 1000`, `"1m"`, `"30s"`, or `new Date(...)`.', + '**Steps retry up to 3 times by default** — set `fn.maxRetries = N` on any step function to override the retry count per endpoint.', + "**Don't write manual sleep-retry loops** — `RetryableError` is more efficient and survives cold starts; the runtime handles scheduling natively.", + '**Circuit breaker** — when a dependency is completely down, use `sleep()` for a durable cooldown period, then probe with a single test request.', + '**Application-level retry** — for custom retry conditions or when building libraries, wrap step calls with your own backoff utility rather than `RetryableError`.', + ], + adaptingTitle: 'Tips', + keyApis: [ + { + label: '"use workflow"', + url: '/docs/foundations/workflows-and-steps', + }, + { label: '"use step"', url: '/docs/foundations/workflows-and-steps' }, + { + label: 'RetryableError', + url: '/docs/api-reference/workflow/retryable-error', + }, + { + label: 'FatalError', + url: '/docs/api-reference/workflow/fatal-error', + }, + { + label: 'getStepMetadata()', + url: '/docs/api-reference/step/get-step-metadata', + }, + { label: 'sleep()', url: '/docs/api-reference/workflow/sleep' }, + ], + }, }, { id: 'saga', @@ -678,14 +1283,14 @@ export const registryItems: RegistryItem[] = [ description: 'Multi-step business transactions with automatic rollback on failure.', longDescription: - 'Coordinate transactions that span multiple services with automatic compensation. Each forward step does its work and pushes an undo onto a stack; if a later step throws `FatalError`, the catch block unwinds compensations in LIFO order to restore consistency. Compensations are themselves steps — durable, retried, and idempotent. Ships a complete "reserve seats → capture invoice → provision → notify" example shaped for replacement with your real APIs.', + 'Use the saga pattern when a business transaction spans multiple services and you need automatic rollback if any step fails. Each forward step registers a compensation, and on failure the workflow unwinds them in reverse order.', tags: ['saga', 'transactions', 'rollback', 'compensation'], categories: ['common'], homepage: 'https://workflow-sdk.dev', docsUrl: 'https://workflow-sdk.dev/cookbook/common-patterns/saga', sourceUrl: 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/saga.mdx', - shadcnSlug: '@workflow-sdk/saga', + shadcnSlug: 'https://workflow-sdk.dev/r/saga', files: [ { path: 'workflows/saga.ts', @@ -703,6 +1308,7 @@ export const registryItems: RegistryItem[] = [ lang: 'tsx', caption: 'workflows/saga.ts', code: sagaWorkflowSource, + installCode: sagaWorkflowInstallSource, }, { label: 'Start route', @@ -711,6 +1317,47 @@ export const registryItems: RegistryItem[] = [ code: sagaStartRouteSource, }, ], + guide: { + flatLayout: true, + whenToUse: [ + 'Multi-service transactions — reserve inventory, charge payment, provision access', + 'Any sequence where partial completion leaves the system in an inconsistent state', + 'Operations that need "all or nothing" semantics across external APIs', + ], + howItWorks: [ + 'Each forward step does work and registers a compensation function.', + 'If any step throws `FatalError`, the catch block runs compensations in reverse (LIFO) order to restore consistency.', + "Regular errors are retried automatically (up to 3× by default). Use `FatalError` only for permanent failures where retrying won't help.", + ], + sourceDescription: + 'Each step returns a result and pushes a compensation handler onto a stack. If a later step throws a `FatalError`, the workflow catches it and executes compensations in LIFO order.', + adapting: [ + '**Replace step functions with real API calls** — each `"use step"` function has full Node.js access.', + '**Add or remove steps freely** — the pattern scales to any number of forward + compensation pairs.', + '**Make compensations idempotent** — they may be retried if the workflow restarts mid-rollback. Check whether the resource was already released before releasing it again.', + '**Compensation steps are also `"use step"` functions** — this makes them durable; if the workflow restarts mid-rollback, it resumes where it left off.', + "**Use `FatalError` for permanent failures** — regular errors trigger automatic retries (up to 3×). Throw `FatalError` when retrying won't help (insufficient funds, invalid input, etc.).", + '**Capture values in closures carefully** — use block-scoped variables or copy values before pushing compensations to avoid referencing stale state.', + "**Notifications don't need compensations** — fire-and-forget steps like sending emails or Slack messages typically don't register a compensation.", + "**The `emit()` streaming is optional** — remove the `SagaEvent` type and `emit()` calls if you don't need real-time UI progress.", + ], + adaptingTitle: 'Adapting to your use case', + keyApis: [ + { + label: '"use workflow"', + url: '/docs/api-reference/workflow/use-workflow', + }, + { label: '"use step"', url: '/docs/api-reference/workflow/use-step' }, + { + label: 'FatalError', + url: '/docs/api-reference/workflow/fatal-error', + }, + { + label: 'getWritable()', + url: '/docs/api-reference/workflow/get-writable', + }, + ], + }, }, { id: 'scheduling', @@ -719,14 +1366,14 @@ export const registryItems: RegistryItem[] = [ description: 'Schedule any future action with durable sleep and a cancel hook — no DB flags required.', longDescription: - 'Drop-in pattern for scheduled actions that need to be cancellable. The workflow races a durable `sleep()` against a `defineHook()` keyed by a stable token (you choose — e.g. `schedule:`). Whichever resolves first wins: timer fires → run the action; hook resolves → cancel cleanly. Costs nothing while sleeping, and survives restarts/deployments. Generic action shape — swap the `runAction` step for emails, push notifications, Slack messages, webhooks, anything.', + "Workflow's `sleep()` is durable — it survives cold starts, restarts, and deployments. Combined with `defineHook()` and `Promise.race()`, it becomes the foundation for interruptible scheduled workflows like drip campaigns, reminders, and timed sequences.", tags: ['scheduling', 'reminders', 'cancellable', 'sleep'], categories: ['common'], homepage: 'https://workflow-sdk.dev', docsUrl: 'https://workflow-sdk.dev/cookbook/common-patterns/scheduling', sourceUrl: 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/scheduling.mdx', - shadcnSlug: '@workflow-sdk/scheduling', + shadcnSlug: 'https://workflow-sdk.dev/r/scheduling', files: [ { path: 'workflows/scheduling.ts', @@ -749,6 +1396,7 @@ export const registryItems: RegistryItem[] = [ lang: 'tsx', caption: 'workflows/scheduling.ts', code: schedulingWorkflowSource, + installCode: schedulingWorkflowInstallSource, }, { label: 'Start route', @@ -760,9 +1408,54 @@ export const registryItems: RegistryItem[] = [ label: 'Cancel route', lang: 'tsx', caption: 'app/api/scheduling/cancel/route.ts', + description: + 'Any server-side code can fire the hook by calling `.resume()` with the same token — if no active schedule is found, the error is caught and treated as success.', code: schedulingCancelRouteSource, }, ], + guide: { + flatLayout: true, + whenToUse: [ + 'Sending emails on a schedule (drip campaigns, onboarding sequences, reminders)', + 'Waiting for a deadline but allowing early cancellation', + 'Any pattern where "do X, wait N hours, then do Y" needs to be both reliable and interruptible', + ], + sourceDescription: + 'A drip campaign sends emails at intervals, sleeping between each. Each sleep races against a cancellation hook — if an external event fires the hook (e.g. user converts, unsubscribes), the campaign stops immediately.', + howItWorks: [ + '**Durable sleep** — `sleep("2d")` persists through restarts at zero compute cost. The workflow resumes precisely when the timer fires.', + '**Hook creation** — `cancelDrip.create({ token })` registers a hook that resolves when any external system calls `.resume()` with the same token.', + '**Race** — `Promise.race([sleep(...), hook])` blocks until either the timer fires or the hook is resumed, whichever comes first.', + '**Fresh hooks per window** — after a sleep completes normally, the previous hook instance is consumed. A new `.create()` call registers a fresh hook for the next sleep window, reusing the same token.', + ], + adapting: [ + '**Change durations** — replace `"2d"` with any duration string (`"1h"`, `"7d"`, `"30m"`) or a `Date` object for absolute times.', + '**Add more steps** — the pattern scales to any number of email-then-sleep pairs.', + '**Snooze instead of cancel** — resolve the hook with a `snooze` payload and sleep again: `sleep(new Date(Date.now() + payload.snoozeMs))`.', + '**Timeout any operation** — the same `Promise.race(sleep, work)` pattern works for adding deadlines to slow steps.', + '**Real providers** — swap the `sendEmail` step body for Resend, Postmark, or any HTTP API. The `"use step"` function has full Node.js access.', + '**`sleep()` accepts duration strings, millis, or `Date` objects** — `"1d"`, `"2h"`, `"30s"`, a millisecond number, or `new Date(...)` for an absolute time.', + '**Durable means durable** — a `sleep("7d")` workflow costs nothing while sleeping — no compute, no memory.', + '**Use `sleep()` in workflow context only** — step functions cannot call `sleep()` directly. If a step needs a delay, use `setTimeout` inside the step.', + ], + adaptingTitle: 'Adapting to your use case', + keyApis: [ + { + label: '"use workflow"', + url: '/docs/foundations/workflows-and-steps', + }, + { label: '"use step"', url: '/docs/foundations/workflows-and-steps' }, + { label: 'sleep()', url: '/docs/api-reference/workflow/sleep' }, + { + label: 'defineHook()', + url: '/docs/api-reference/workflow/define-hook', + }, + { + label: 'Promise.race()', + url: 'https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise/race', + }, + ], + }, }, { id: 'sequential-and-parallel', @@ -771,7 +1464,7 @@ export const registryItems: RegistryItem[] = [ description: 'Compose steps with await, Promise.all, and Promise.race against durable sleeps and webhooks.', longDescription: - 'Workflows are plain async functions, so the standard composition primitives apply unchanged — sequential `await` for pipelines, `Promise.all` for fan-out, `Promise.race` for first-finisher logic. Because `sleep()` and `createWebhook()` are also promises, racing real work against a durable deadline is a one-liner. Ships a single workflow file with three illustrative entry points (pipeline / fan-out / race-with-sleep) and a start route — replace the placeholder steps with your real logic.', + "Workflows are written in plain async/await — there's no new control-flow API to learn. Sequential awaits chain steps that depend on each other, `Promise.all` runs independent steps in parallel, and `Promise.race` returns whichever finishes first. These compose with workflow primitives like `sleep()` and `createWebhook()` since those are also just promises.", tags: ['composition', 'parallel', 'race', 'pipeline'], categories: ['common'], homepage: 'https://workflow-sdk.dev', @@ -779,7 +1472,7 @@ export const registryItems: RegistryItem[] = [ 'https://workflow-sdk.dev/cookbook/common-patterns/sequential-and-parallel', sourceUrl: 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/sequential-and-parallel.mdx', - shadcnSlug: '@workflow-sdk/sequential-and-parallel', + shadcnSlug: 'https://workflow-sdk.dev/r/sequential-and-parallel', files: [ { path: 'workflows/sequential-and-parallel.ts', @@ -797,6 +1490,7 @@ export const registryItems: RegistryItem[] = [ lang: 'tsx', caption: 'workflows/sequential-and-parallel.ts', code: sequentialAndParallelWorkflowSource, + installCode: sequentialAndParallelWorkflowInstallSource, }, { label: 'Start route', @@ -805,6 +1499,54 @@ export const registryItems: RegistryItem[] = [ code: sequentialAndParallelStartRouteSource, }, ], + guide: { + flatLayout: true, + whenToUse: [ + "**Pipelines** — each step depends on the previous step's output (validate → process → store)", + "**Independent fan-out** — fetch multiple resources or perform multiple actions that don't depend on each other", + '**Race conditions** — return as soon as one of N operations completes (timeout, first-responder, deadline)', + '**Mixing primitives** — running steps, sleeps, and webhooks side-by-side in the same control-flow expression', + ], + sourceDescription: + 'The workflow file ships three entry points — a sequential pipeline, a parallel fan-out with `Promise.all`, and a race against a deadline with `Promise.race`. Most real workflows combine all three.', + howItWorks: [ + "**`await` is durable** — when the workflow awaits a step, the runtime persists the step's input, suspends the workflow, runs the step, and replays the workflow with the result on resume. The same applies to `sleep()` and `createWebhook()`.", + '**`Promise.all` runs steps concurrently** — each promise in the array is suspended on its own and the workflow resumes only when all have settled. Failures propagate — if any promise rejects, the whole `Promise.all` rejects.', + '**`Promise.race` resolves on the first settle** — the losing promises keep running in the background but their results are discarded by the workflow.', + '**All primitives are promises** — `sleep("1 day")` and `createWebhook()` return promises, so they compose with `Promise.all` / `Promise.race` exactly like steps do — this is what makes "race a webhook against a 24-hour deadline" a one-liner.', + ], + adapting: [ + "**Replace `Promise.all` with `Promise.allSettled`** when partial failures should not abort the rest. You'll get an array of `{ status, value | reason }` instead of throwing on the first rejection.", + "**Bound the parallelism** — `Promise.all` over 1000 items will fan out 1000 concurrent steps. If downstream APIs can't handle that, split the array into fixed-size chunks (see the Batching pattern).", + '**Add a deadline to any race** — pair the operation with `sleep("30s").then(() => "timeout" as const)` and check the discriminated result. See the Timeouts pattern for full examples.', + '**Mix steps and hooks in a race** — wait for an external signal, a deadline, or a step result all in the same `Promise.race`. The first one to resolve wins.', + ], + adaptingTitle: 'Adapting to your use case', + keyApis: [ + { + label: '"use workflow"', + url: '/docs/foundations/workflows-and-steps', + }, + { label: '"use step"', url: '/docs/foundations/workflows-and-steps' }, + { label: 'sleep()', url: '/docs/api-reference/workflow/sleep' }, + { + label: 'createWebhook()', + url: '/docs/api-reference/workflow/create-webhook', + }, + { + label: 'Promise.all()', + url: 'https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise/all', + }, + { + label: 'Promise.race()', + url: 'https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise/race', + }, + { + label: 'Promise.allSettled()', + url: 'https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise/allSettled', + }, + ], + }, }, { id: 'timeouts', @@ -813,14 +1555,14 @@ export const registryItems: RegistryItem[] = [ description: 'Add deadlines to slow steps, hooks, and webhooks by racing them against durable sleep.', longDescription: - 'Bound how long any work can take. `Promise.race([work, sleep("30s")])` returns whichever resolves first; tag the sleep branch with a sentinel value so TypeScript narrows the result. Ships hard-timeout (throw on deadline), soft-timeout (fall back to a cached value), and the webhook + 7-day deadline shape for human approvals. Note: the loser keeps running with side effects intact — see Distributed Abort Controller for hard cross-process cancellation.', + 'A common requirement is bounding how long a workflow waits for something to finish — a slow step, an external webhook, a human approval. Race the operation against a durable `sleep()` with `Promise.race()` — whichever finishes first wins, and the loser keeps running but its result is ignored.', tags: ['timeout', 'deadline', 'race', 'sleep'], categories: ['common'], homepage: 'https://workflow-sdk.dev', docsUrl: 'https://workflow-sdk.dev/cookbook/common-patterns/timeouts', sourceUrl: 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/timeouts.mdx', - shadcnSlug: '@workflow-sdk/timeouts', + shadcnSlug: 'https://workflow-sdk.dev/r/timeouts', files: [ { path: 'workflows/timeouts.ts', @@ -838,6 +1580,7 @@ export const registryItems: RegistryItem[] = [ lang: 'tsx', caption: 'workflows/timeouts.ts', code: timeoutsWorkflowSource, + installCode: timeoutsWorkflowInstallSource, }, { label: 'Start route', @@ -846,6 +1589,51 @@ export const registryItems: RegistryItem[] = [ code: timeoutsStartRouteSource, }, ], + guide: { + flatLayout: true, + whenToUse: [ + '**Slow steps** — bound the time spent waiting on third-party APIs, model calls, or expensive computation', + "**External callbacks** — give webhooks a deadline so the workflow doesn't hang forever waiting for an event that may never arrive", + "**Human approvals** — auto-decline or escalate when a hook isn't resumed within a window", + '**Polling loops** — give an outer poll-until-ready loop an overall budget', + ], + sourceDescription: + 'Two entry points are included — a hard timeout on a slow step (throws when the deadline fires) and a timeout on an external webhook callback with a 7-day deadline.', + howItWorks: [ + '**Durable sleep** — `sleep("30s")` persists through restarts at zero compute cost. The workflow resumes precisely when the timer fires.', + '**Race** — `Promise.race([work, sleep(...)])` returns the value of whichever promise resolves first. The loser keeps running in the background but its result is ignored by the workflow.', + '**Discriminated result** — tagging the sleep branch with a sentinel value (`"timeout" as const`, `{ timedOut: true }`) lets TypeScript narrow the result and pick the right branch.', + '**Throw to fail the workflow** — inside a workflow function, throwing an `Error` exits the run with that error. Use `FatalError` inside steps; throw plain errors inside workflows.', + ], + callout: { + type: 'warn', + content: + "**The losing operation keeps running.** `Promise.race` doesn't cancel — when the sleep wins, the underlying step (or model call, or HTTP request) continues to completion in the background. This is fine for idempotent reads but matters when the operation has side effects or costs money.", + }, + adapting: [ + '**Different durations** — `sleep()` accepts duration strings (`"30s"`, `"5m"`, `"7 days"`), milliseconds, or `Date` objects for absolute deadlines.', + '**Soft timeout (retry)** — instead of throwing, loop and retry with a fresh `Promise.race` and a backoff.', + '**Soft timeout (fallback)** — return a default value when the timer wins instead of throwing: `if (result === "timeout") return cachedFallback`.', + '**Combine with cancellation** — race three promises: the operation, a deadline `sleep()`, and a cancellation hook. See the Scheduling pattern for the cancellation half of this.', + '**Per-step deadlines** — wrap each step in its own `Promise.race` for independent budgets, or use a single outer race for an overall workflow deadline.', + ], + adaptingTitle: 'Adapting to your use case', + keyApis: [ + { label: 'sleep()', url: '/docs/api-reference/workflow/sleep' }, + { + label: 'createWebhook()', + url: '/docs/api-reference/workflow/create-webhook', + }, + { + label: 'defineHook()', + url: '/docs/api-reference/workflow/define-hook', + }, + { + label: 'Promise.race()', + url: 'https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise/race', + }, + ], + }, }, { id: 'webhooks', @@ -854,14 +1642,14 @@ export const registryItems: RegistryItem[] = [ description: 'Receive HTTP callbacks from external services, process them durably, and respond inline.', longDescription: - 'Drop-in webhook receiver pattern. `createWebhook()` returns a URL the workflow can `for await` over; each incoming request is processed in its own step with full Node.js access, and `request.respondWith()` lets the step shape the HTTP response inline. Ships two flavors: a long-running listener (Stripe-style multi-event ledger that exits on a terminal event), and async-request-reply (submit to a vendor with our webhook URL, race the callback against a 30-second deadline).', + 'Use webhooks when external services push events to your application via HTTP callbacks. The workflow creates a webhook URL, suspends with zero compute cost, and resumes when a request arrives.', tags: ['webhook', 'callback', 'integration', 'external-api'], categories: ['common'], homepage: 'https://workflow-sdk.dev', docsUrl: 'https://workflow-sdk.dev/cookbook/common-patterns/webhooks', sourceUrl: 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/webhooks.mdx', - shadcnSlug: '@workflow-sdk/webhooks', + shadcnSlug: 'https://workflow-sdk.dev/r/webhooks', files: [ { path: 'workflows/webhooks.ts', @@ -876,10 +1664,22 @@ export const registryItems: RegistryItem[] = [ ], snippets: [ { - label: 'Workflow', + label: 'Event listener', lang: 'tsx', caption: 'workflows/webhooks.ts', - code: webhooksWorkflowSource, + description: + 'Long-running listener that processes multiple requests from one URL and exits on a terminal event — Stripe-style payment ledger.', + code: webhooksEventListenerSource, + installCode: webhooksEventListenerInstallSource, + }, + { + label: 'Request-reply', + lang: 'tsx', + caption: 'workflows/webhooks.ts', + description: + 'Submit a request to an external vendor with your webhook URL as the callback, then race the response against a 30-second deadline.', + code: webhooksRequestReplySource, + installCode: webhooksRequestReplyInstallSource, }, { label: 'Start route', @@ -888,6 +1688,44 @@ export const registryItems: RegistryItem[] = [ code: webhooksStartRouteSource, }, ], + guide: { + flatLayout: true, + whenToUse: [ + 'Accepting callbacks from payment processors (Stripe, PayPal)', + 'Waiting for third-party verification or processing results', + 'Any integration where an external system calls you back asynchronously', + ], + sourceDescription: + 'Two patterns are included — choose the one that fits your integration. Both use `createWebhook({ respondWith: "manual" })` to get a URL you pass to the external service.', + adapting: [ + '**`respondWith: "manual"`** gives you control over the HTTP response from inside a step. Use this when you need to validate the request before responding.', + '**`for await` on a webhook** lets you process multiple events from the same URL. Use `break` to stop listening after a terminal event.', + '**Webhooks auto-generate URLs** at `/.well-known/workflow/v1/webhook/:token`. Pass this URL to external services.', + "**Race webhooks against `sleep()`** for deadlines. If the callback doesn't arrive in time, the workflow can take a fallback action.", + '**For large payloads**, use a hook + reference token instead of passing the data through the workflow. The event log serializes all step inputs/outputs, so large payloads hurt performance.', + ], + adaptingTitle: 'Tips', + keyApis: [ + { + label: '"use workflow"', + url: '/docs/foundations/workflows-and-steps', + }, + { label: '"use step"', url: '/docs/foundations/workflows-and-steps' }, + { + label: 'createWebhook()', + url: '/docs/api-reference/workflow/create-webhook', + }, + { + label: 'defineHook()', + url: '/docs/api-reference/workflow/define-hook', + }, + { label: 'sleep()', url: '/docs/api-reference/workflow/sleep' }, + { + label: 'FatalError', + url: '/docs/api-reference/workflow/fatal-error', + }, + ], + }, }, { id: 'workflow-composition', @@ -896,7 +1734,7 @@ export const registryItems: RegistryItem[] = [ description: 'Call workflows from workflows — direct await for inline composition, start() for independent runs.', longDescription: - 'Two ways to compose workflows. Direct `await` of a child workflow flattens its steps into the parent\'s event log — one runId, one retry boundary, one timeline. `start()` from inside a step spawns the child as an independent run with its own runId, separate event log, and its own retry boundary — ideal for fire-and-forget, fan-out, and self-upgrading workflows (`deploymentId: "latest"`). Ships parent + child workflows + a spawn step + a start route.', + "Workflows can call other workflows. Choose between two composition modes depending on whether the parent needs the child's result inline (direct await) or wants to fire the child off as an independent run (background spawn). For massive fan-out with polling and partial-failure handling, see the Child Workflows pattern.", tags: ['composition', 'child-workflow', 'spawn', 'start'], categories: ['common'], homepage: 'https://workflow-sdk.dev', @@ -904,7 +1742,7 @@ export const registryItems: RegistryItem[] = [ 'https://workflow-sdk.dev/cookbook/common-patterns/workflow-composition', sourceUrl: 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/workflow-composition.mdx', - shadcnSlug: '@workflow-sdk/workflow-composition', + shadcnSlug: 'https://workflow-sdk.dev/r/workflow-composition', files: [ { path: 'workflows/workflow-composition.ts', @@ -922,6 +1760,7 @@ export const registryItems: RegistryItem[] = [ lang: 'tsx', caption: 'workflows/workflow-composition.ts', code: workflowCompositionWorkflowSource, + installCode: workflowCompositionWorkflowInstallSource, }, { label: 'Start route', @@ -930,6 +1769,60 @@ export const registryItems: RegistryItem[] = [ code: workflowCompositionStartRouteSource, }, ], + guide: { + flatLayout: true, + whenToUse: [ + "**Direct await** — the parent needs the child's result before continuing, and you want a single unified event log", + "**Background spawn** — the parent doesn't need to wait, and you want the child to be observable as a separate run with its own `runId`", + ], + sourceDescription: + 'Both composition modes are in a single workflow file — the direct-await child is called inline from the parent, while the background-spawn pattern wraps `start()` inside a `"use step"` function to keep it deterministic across replays.', + howItWorks: [ + "**Direct await flattens** — when a workflow function awaits another workflow function, the child's steps emit into the parent's event log and share the parent's run ID.", + '**`start()` mints a new run** — the child gets its own `runId`, its own event log, and its own retry boundary. The parent only sees the `runId` returned by `start()`.', + '**`start()` must be called from a step** — wrap it in a `"use step"` function. This keeps the spawn deterministic across replays.', + ], + callout: { + type: 'info', + content: + 'To run the child workflow on the latest deployment rather than the current one, pass `deploymentId: "latest"` in the `start()` options. This is a Vercel-specific feature. The child\'s function name, file path, argument types, and return type must remain compatible across deployments — renaming the function or changing its location will change the workflow ID.', + }, + approaches: { + title: 'Choosing between the two modes', + columns: ['', 'Direct await', 'Background spawn (`start()`)'], + rows: [ + { aspect: 'Parent waits for child', values: ['Yes', 'No'] }, + { + aspect: 'Has its own `runId`', + values: ["No (shares parent's)", 'Yes'], + }, + { aspect: 'Has its own event log', values: ['No', 'Yes'] }, + { aspect: 'Has its own retry boundary', values: ['No', 'Yes'] }, + { + aspect: 'Best for', + values: [ + 'Sequential composition, helper workflows', + 'Independent work, fire-and-forget, fan-out', + ], + }, + ], + }, + adapting: [ + '**Spawn many children at once** — call `start()` in a loop inside a step. For more advanced fan-out (chunking, polling, partial-failure handling), see the Child Workflows pattern.', + '**Wait for a background child to finish** — combine `start()` with `getRun()` polling. The Child Workflows pattern covers the full polling loop.', + '**Pass results back from background children** — the spawn step returns the `runId`; later, a poll step uses `getRun(runId).returnValue` to fetch the final result.', + ], + adaptingTitle: 'Adapting to your use case', + keyApis: [ + { + label: '"use workflow"', + url: '/docs/foundations/workflows-and-steps', + }, + { label: '"use step"', url: '/docs/foundations/workflows-and-steps' }, + { label: 'start()', url: '/docs/api-reference/workflow-api/start' }, + { label: 'getRun()', url: '/docs/api-reference/workflow-api/get-run' }, + ], + }, }, { id: 'child-workflows', @@ -938,14 +1831,14 @@ export const registryItems: RegistryItem[] = [ description: 'Spawn many independent child workflows from a parent and orchestrate them with spawn-and-poll.', longDescription: - 'Use child workflows when one workflow needs to orchestrate many independent units of work. Each child runs as its own workflow with a separate event log, retry boundary, and failure scope — a failing child never aborts unrelated work, and you get per-item observability via each child\'s runId. Ships the full parent + child + chunked spawn step + durable polling loop + result-collection step. Pre-wired with `deploymentId: "latest"` so children pick up future deployments.', + "Use child workflows when a single workflow needs to orchestrate many independent units of work. Each child runs as its own workflow with a separate event log, retry boundary, and failure scope — if one child fails, it doesn't take down the parent or siblings.", tags: ['fan-out', 'spawn', 'poll', 'orchestration'], categories: ['advanced'], homepage: 'https://workflow-sdk.dev', docsUrl: 'https://workflow-sdk.dev/cookbook/advanced/child-workflows', sourceUrl: 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/advanced/child-workflows.mdx', - shadcnSlug: '@workflow-sdk/child-workflows', + shadcnSlug: 'https://workflow-sdk.dev/r/child-workflows', files: [ { path: 'workflows/child-workflows.ts', @@ -964,6 +1857,7 @@ export const registryItems: RegistryItem[] = [ lang: 'tsx', caption: 'workflows/child-workflows.ts', code: childWorkflowsWorkflowSource, + installCode: childWorkflowsWorkflowInstallSource, }, { label: 'Start route', @@ -972,6 +1866,43 @@ export const registryItems: RegistryItem[] = [ code: childWorkflowsStartRouteSource, }, ], + guide: { + flatLayout: true, + whenToUse: [ + '**Work units are independent** — each child can run without knowing about the others (e.g., processing individual documents, generating separate reports)', + '**You need isolated failure boundaries** — a failing child should not abort unrelated work; the parent decides how to handle failures', + '**You want massive fan-out** — spawning 50 or 500 children is practical because each runs on its own infrastructure', + '**You need per-item observability** — each child workflow has its own run ID, status, and event log for monitoring', + ], + sourceDescription: + 'The workflow file ships the full spawn-and-poll pattern — a child workflow (`processDocument`), a parent (`processDocumentBatch`), a chunked spawn step, a durable polling loop with `sleep()`, and a result-collection step.', + howItWorks: [ + '**Spawn step** — `start()` is called from inside a `"use step"` function. The step returns an array of `runId`s for all spawned children.', + '**Polling loop** — the parent workflow loops, calling a status-check step then sleeping with `sleep(POLL_INTERVAL)`. The loop is durable — replays resume from the event log.', + '**Status-check step** — `getRun(runId).status` is awaited inside a `"use step"` function. Steps inside child workflows retry independently; the parent only sees the child\'s final status.', + '**Result collection** — once all children complete, a final step calls `getRun(runId).returnValue` for each run ID to gather results.', + ], + adapting: [ + '**`start()` must be called from a step**, not directly from a workflow function. Wrap it in a `"use step"` function to keep spawning deterministic across replays.', + '**`getRun()` must also be called from a step.** The polling loop lives in the workflow, but the actual status check must be a step.', + '**Set a max iteration count on polling loops** to prevent runaway workflows. Calculate the count from your expected max duration and poll interval.', + '**Use chunked spawning for large batches** — spawning 500 children in a single step can time out. Break it into chunks of 10–50.', + '**Tolerate partial failures** — instead of throwing on the first failed child, track `completedIds` and `failedIds` separately and apply a `maxFailureRate` threshold before aborting.', + '**Retry failed children** — on a failed child, spawn a replacement and continue polling. Track restart counts per child index to prevent infinite loops.', + '**Use `deploymentId: "latest"`** if children should run on the most recent deployment. Function name, file path, and argument types must remain compatible across deployments.', + ], + adaptingTitle: 'Tips', + keyApis: [ + { label: 'start()', url: '/docs/api-reference/workflow-api/start' }, + { label: 'getRun()', url: '/docs/api-reference/workflow-api/get-run' }, + { label: 'sleep()', url: '/docs/api-reference/workflow/sleep' }, + { + label: '"use workflow"', + url: '/docs/foundations/workflows-and-steps', + }, + { label: '"use step"', url: '/docs/foundations/workflows-and-steps' }, + ], + }, }, { id: 'distributed-abort-controller', @@ -980,7 +1911,7 @@ export const registryItems: RegistryItem[] = [ description: 'AbortController-shaped API for cross-process cancellation, backed by a durable workflow.', longDescription: - "A drop-in replacement for `AbortController` that works across process boundaries. Calling `.abort()` on one machine fires the `.signal` `AbortSignal` on any other machine that created a controller with the same semantic ID — no run ID sharing required. Backed by a coordination workflow that races a manual abort hook against a TTL sleep; when triggered, it writes to the run's stream and any subscriber's `AbortSignal` flips. Includes `Create`-is-idempotent reconnection (find an existing run by hook token), TTL auto-cleanup, and an optional grace period for late subscribers. Ships the lib module, a remote-abort route, and a client cancel button.", + 'Use this pattern when you need an `AbortController`-like interface that works across distributed systems. The controller uses a durable workflow to coordinate cancellation — calling `.abort()` on one machine triggers the `.signal` on any other machine.', tags: ['abort', 'cancellation', 'distributed', 'cross-process'], categories: ['advanced'], homepage: 'https://workflow-sdk.dev', @@ -988,7 +1919,7 @@ export const registryItems: RegistryItem[] = [ 'https://workflow-sdk.dev/cookbook/advanced/distributed-abort-controller', sourceUrl: 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/advanced/distributed-abort-controller.mdx', - shadcnSlug: '@workflow-sdk/distributed-abort-controller', + shadcnSlug: 'https://workflow-sdk.dev/r/distributed-abort-controller', files: [ { path: 'lib/distributed-abort-controller.ts', @@ -1012,6 +1943,7 @@ export const registryItems: RegistryItem[] = [ lang: 'tsx', caption: 'lib/distributed-abort-controller.ts', code: distributedAbortControllerLibSource, + installCode: distributedAbortControllerLibInstallSource, }, { label: 'Abort route', @@ -1032,6 +1964,113 @@ export const registryItems: RegistryItem[] = [ code: distributedAbortControllerUsageSource, }, ], + guide: { + flatLayout: true, + whenToUse: [ + '**Cross-process cancellation** — cancel a long-running operation from a different server, worker, or edge function', + '**Durable cancellation** — the abort signal persists even if the process that created it crashes', + '**UI stop buttons** — let users cancel operations running on the server from the browser', + '**Timeout coordination** — the built-in TTL auto-expires stale controllers', + ], + sourceDescription: + 'The lib module ships the `DistributedAbortController` class plus the backing workflow. The abort route handles remote cancellation via a POST endpoint. The cancel button is a ready-to-use client component.', + howItWorks: [ + '**Semantic ID** — `create()` accepts a meaningful ID (e.g. `"chat:123"`) and either starts a new coordination workflow or reconnects to an existing one via `getHookByToken()`.', + "**Race** — the workflow races a `defineHook` abort signal against a `sleep()` TTL expiration. Whichever fires first writes a cancellation message to the run's stream.", + '**`.signal` streams** — `getRun(runId).getReadable()` reads the stream and flips a local `AbortController` when the abort message arrives, returning a standard `AbortSignal`.', + '**Grace period** — on TTL expiration (not manual abort), the workflow sleeps through an additional grace period to allow late subscribers to receive the signal before the run closes.', + ], + adapting: [ + '**Use semantic IDs** — use meaningful IDs like `chat:123` or `task:abc` instead of random UUIDs so any process can reconnect without sharing a run ID.', + '**`create()` is idempotent** — calling `create()` with the same ID reconnects to the existing controller; no duplicate workflows are created.', + '**TTL auto-cleanup** — workflows self-terminate after TTL expires; no manual cleanup needed. Adjust `ttlMs` per use case (default: 24 hours).', + '**`.signal` is a getter** — each access to `.signal` creates a new stream reader and `AbortController`; cache the result if you need to reuse it.', + '**One-shot** — once aborted or expired, the workflow completes. Create a new controller for new operations.', + ], + adaptingTitle: 'Tips', + keyApis: [ + { + label: 'defineHook()', + url: '/docs/api-reference/workflow/define-hook', + }, + { + label: 'getWritable()', + url: '/docs/api-reference/workflow/get-writable', + }, + { label: 'sleep()', url: '/docs/api-reference/workflow/sleep' }, + { label: 'start()', url: '/docs/api-reference/workflow-api/start' }, + { + label: 'getHookByToken()', + url: '/docs/api-reference/workflow-api/get-hook-by-token', + }, + { label: 'getRun()', url: '/docs/api-reference/workflow-api/get-run' }, + ], + }, + }, + { + id: 'upgrading-workflows', + name: 'Upgrading Workflows', + logo: 'upgrading-workflows', + description: + 'Respawn a long-running workflow on the latest deployment — shipped fixes take effect on the very next event, no migration needed.', + longDescription: + 'Ship fixes to in-flight runs without migrating state. Each iteration handles one event, then calls `start(self, [newState], { deploymentId: "latest" })` from inside a step to spawn its successor on whichever deployment is currently live. Because state travels as a plain function argument, the logical "session" survives indefinite redeploys — the next run starts fresh on new code and picks up exactly where the last one left off. Useful for workflows that wait on a long timescale (days/weeks) and need shipped fixes to apply immediately, or for any pattern where you want to iterate freely without versioning workflow logic. Ships Method 1 (spawn on every iteration) out of the box; the same start and resume routes also support Method 2 (dedicated upgrade hook racing the main work hook) described in the docs.', + tags: ['upgrade', 'respawn', 'deployment', 'long-running', 'versioning'], + categories: ['common', 'advanced'], + homepage: 'https://workflow-sdk.dev', + docsUrl: 'https://workflow-sdk.dev/cookbook/advanced/upgrading-workflows', + sourceUrl: + 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/advanced/upgrading-workflows.mdx', + shadcnSlug: 'https://workflow-sdk.dev/r/upgrading-workflows', + files: [ + { + path: 'workflows/upgrading-workflow.ts', + description: + 'The self-upgrading workflow — one iteration per run, blocks on `resumeHook`, computes new state, then spawns the next iteration with `deploymentId: "latest"`.', + }, + { + path: 'app/api/upgrade/route.ts', + description: + 'POST endpoint that starts the first iteration of the chain with optional initial state.', + }, + { + path: 'app/api/upgrade/resume/route.ts', + description: + 'POST endpoint that resumes the active iteration by `runId`, triggering a state update and a successor spawn.', + }, + ], + snippets: [ + { + label: 'Method 1 — per-event spawn', + lang: 'tsx', + caption: 'workflows/upgrading-workflow.ts', + description: + 'One run per event. After each resume, state is computed and the next iteration is spawned with `deploymentId: "latest"`. Every event automatically picks up the latest code.', + code: upgradingWorkflowsWorkflowSource, + installCode: upgradingWorkflowsMethod1InstallSource, + }, + { + label: 'Method 2 — explicit upgrade hook', + lang: 'tsx', + caption: 'workflows/upgrading-workflow.ts', + description: + 'Long-running loop that handles many events per run. A separate `upgradeHook` races the work hook — fire it when you want to force a respawn on the latest deployment.', + code: upgradingWorkflowsMethod2Source, + installCode: upgradingWorkflowsMethod2InstallSource, + }, + { + label: 'Start route', + lang: 'tsx', + caption: 'app/api/upgrade/route.ts', + code: upgradingWorkflowsStartRouteSource, + }, + { + label: 'Resume route', + lang: 'tsx', + caption: 'app/api/upgrade/resume/route.ts', + code: upgradingWorkflowsResumeRouteSource, + }, + ], }, { id: 'resend', @@ -1046,7 +2085,7 @@ export const registryItems: RegistryItem[] = [ docsUrl: 'https://resend.com/docs/send-with-nodejs', sourceUrl: 'https://github.com/vercel-labs/workflow_onboarding/tree/main/nextjs_workflow/app/workflows/providers', - shadcnSlug: '@workflow-sdk/resend', + shadcnSlug: 'https://workflow-sdk.dev/r/resend', envVars: [ { name: 'RESEND_API_KEY', @@ -1078,6 +2117,7 @@ export const registryItems: RegistryItem[] = [ lang: 'tsx', caption: 'app/workflows/providers/resendWorkflow.ts', code: resendWorkflowSource, + installCode: resendWorkflowInstallSource, }, { label: 'Start route', diff --git a/docs/lib/registry/snippets/agent-cancellation.ts b/docs/lib/registry/snippets/agent-cancellation.ts index 3df349b209..30ca01fa0c 100644 --- a/docs/lib/registry/snippets/agent-cancellation.ts +++ b/docs/lib/registry/snippets/agent-cancellation.ts @@ -95,6 +95,117 @@ export async function stoppableAgent(messages: ModelMessage[]) { } `; +export const agentCancellationWorkflowInstallSource = `/** + * Agent Cancellation — graceful Stop Signal for any DurableAgent. + * + * THE PATTERN: + * 1. Create a stopHook token = runId so the stop API can resume it with + * just the runId — no extra bookkeeping required. + * 2. Race the agent's .stream() against the hook. Whichever resolves first + * wins; the workflow exits at its next await boundary. + * 3. On stop, emit a "data-stopped" stream part so the client renders a + * clean ending instead of an abrupt connection close. + * 4. The stop API route tries the hook first; falls back to getRun().cancel() + * if the hook is already consumed (agent finished mid-request). + * + * USEFUL WHEN: + * - You need a "Stop" button in a chat UI backed by a streaming agent. + * - The agent should finish its current sentence before exiting (graceful). + * - You want the client to display a tidy "stopped" indicator instead of + * a raw connection drop. + * + * TO ADAPT THIS TO YOUR USE CASE: + * - Replace the searchWeb tool with your real tools. The surrounding + * race/hook shape is domain-agnostic. + * - Change maxSteps to match your budget — 15 is a safe default. + * - Adjust the "data-stopped" part payload if your client needs richer info. + * - For hard cross-process cancellation (kills the model stream too), see + * the Distributed Abort Controller pattern instead. + * + * NOTE: Stop Signal does NOT cancel the underlying model stream. Tokens + * generated after the stop hook fires are still produced (and billed). The + * workflow exits and notifies the client, but the inference keeps running. + * + * DOCS: https://workflow-sdk.dev/patterns/agent-cancellation + */ +import { DurableAgent } from "@workflow/ai/agent"; +import { + defineHook, + getWorkflowMetadata, + getWritable, +} from "workflow"; +import { z } from "zod"; +import type { ModelMessage, UIMessageChunk } from "ai"; + +// Hook resumed by the stop API route. +export const stopHook = defineHook({ + schema: z.object({ reason: z.string().optional() }), +}); + +// Replace these with your real tools. +async function searchWeb({ query }: { query: string }) { + "use step"; + await new Promise((r) => setTimeout(r, 1500)); + return { + results: [{ title: \`\${query} — overview\`, snippet: \`Result for \${query}.\` }], + }; +} + +async function emitStopSignal(details: { reason?: string }) { + "use step"; + const writer = getWritable().getWriter(); + try { + await writer.write({ + type: "data-stopped", + id: "stop-signal", + data: details, + } as UIMessageChunk); + } finally { + writer.releaseLock(); + } +} + +export async function stoppableAgent(messages: ModelMessage[]) { + "use workflow"; + + // Token = runId so any process can resume the stop hook with just the runId. + const { workflowRunId } = getWorkflowMetadata(); + const hook = stopHook.create({ token: \`stop:\${workflowRunId}\` }); + + const agent = new DurableAgent({ + model: "anthropic/claude-haiku-4.5", + instructions: "You are a research assistant. Search and summarize as needed.", + tools: { + searchWeb: { + description: "Search the web for information", + inputSchema: z.object({ query: z.string() }), + execute: searchWeb, + }, + }, + }); + + // Race: whichever resolves first wins. The model stream may keep running + // after the hook fires — see NOTE above. + const result = await Promise.race([ + agent + .stream({ + messages, + writable: getWritable(), + maxSteps: 15, + }) + .then((r) => ({ type: "complete" as const, messages: r.messages })), + hook.then(({ reason }) => ({ type: "stopped" as const, reason })), + ]); + + // Notify the client with a clean "stopped" part instead of a silent drop. + if (result.type === "stopped") { + await emitStopSignal({ reason: result.reason }); + } + + return result; +} +`; + export const agentCancellationStartRouteSource = `import type { UIMessage } from "ai"; import { convertToModelMessages, createUIMessageStreamResponse } from "ai"; import { start } from "workflow/api"; @@ -200,6 +311,84 @@ export function StopButton({ } `; +// ─── Concept snippets ───────────────────────────────────────────────────────── +// Simplified educational code for patterns that differ meaningfully from +// the plug-and-play install. These appear in the "Concept" section and are +// NOT installed by the shadcn CLI. + +export const agentCancellationConceptHardCancelSource = `import { getRun } from "workflow/api"; + +// Hard Cancellation — terminates the run immediately. +// No cleanup runs, no final stream notification, no return value. +// Use when the run is stuck or you just need it gone. +export async function POST( + _request: Request, + { params }: { params: Promise<{ runId: string }> }, +) { + const { runId } = await params; + await getRun(runId).cancel(); + return Response.json({ success: true }); +} +`; + +export const agentCancellationConceptStopSignalSource = `import { DurableAgent } from "@workflow/ai/agent"; +import { defineHook, getWorkflowMetadata, getWritable } from "workflow"; +import type { ModelMessage, UIMessageChunk } from "ai"; + +// Stop Signal — exits cleanly at the next await boundary, runs cleanup, +// and emits a final stream part so the client renders a tidy ending. +export const stopHook = defineHook<{ reason?: string }>(); + +export async function stoppableAgent(messages: ModelMessage[]) { + "use workflow"; + + const { workflowRunId } = getWorkflowMetadata(); + + // Hook token is scoped to this run — any process can resume it with just runId. + const hook = stopHook.create({ token: \`stop:\${workflowRunId}\` }); + + const agent = new DurableAgent({ + model: "anthropic/claude-haiku-4.5", + instructions: "You are a research assistant.", + tools: { /* your tools here */ }, + }); + + // Race: whichever resolves first wins — the agent finishing or the stop hook. + const result = await Promise.race([ + agent + .stream({ messages, writable: getWritable(), maxSteps: 15 }) + .then((r) => ({ type: "complete" as const, messages: r.messages })), + hook.then(({ reason }) => ({ type: "stopped" as const, reason })), + ]); + + // Emit a final stream part so the client knows it was stopped, not dropped. + if (result.type === "stopped") { + const writer = getWritable().getWriter(); + try { + await writer.write({ type: "data-stopped", id: "stop", data: { reason: result.reason } } as UIMessageChunk); + } finally { + writer.releaseLock(); + } + } + + return result; +} +`; + +export const agentCancellationConceptStopRouteSource = `import { stopHook } from "@/workflows/stoppable-agent"; + +// POST /api/agent/[runId]/stop — resumes the hook to trigger a graceful exit. +export async function POST( + request: Request, + { params }: { params: Promise<{ runId: string }> }, +) { + const { runId } = await params; + const { reason } = await request.json().catch(() => ({})); + await stopHook.resume(\`stop:\${runId}\`, { reason: reason ?? "User requested stop" }); + return Response.json({ success: true }); +} +`; + export const agentCancellationUsageSource = `// In your chat client, capture the runId from the response header on the // FIRST message and render the Stop button while the agent is streaming: "use client"; diff --git a/docs/lib/registry/snippets/ai-sdk.ts b/docs/lib/registry/snippets/ai-sdk.ts index b2ff8a3cdc..67530c0c42 100644 --- a/docs/lib/registry/snippets/ai-sdk.ts +++ b/docs/lib/registry/snippets/ai-sdk.ts @@ -106,6 +106,126 @@ export async function supportWorkflow(initialMessages: ModelMessage[]) { } `; +export const aiSdkWorkflowInstallSource = `/** + * AI SDK Integration — durable multi-turn conversation with streamText. + * + * THE PATTERN: + * 1. One workflow run = one full conversation. The run stays alive across + * all turns; each new message is delivered via a hook resume(). + * 2. A per-turn "use step" function calls streamText() and pipes the + * result into the durable writable (preventClose: true keeps it open + * for the next turn). + * 3. The API route slices the run's stream from the current turn's start + * index so each HTTP response only contains that turn's chunks. + * 4. MAX_TURNS caps the conversation; send "/done" to exit cleanly. + * + * USEFUL WHEN: + * - You want durable multi-turn conversations that survive restarts. + * - You need tool calls that are retried without re-running on replay. + * - Users can reconnect mid-stream and receive the full response. + * + * TO ADAPT THIS TO YOUR USE CASE: + * - Replace lookupOrder / processRefund with your domain tools. + * - Change "anthropic/claude-haiku-4.5" to any AI Gateway model string. + * - Adjust MAX_TURNS for your expected conversation length. + * - Change the system prompt in runTurn() to match your use case. + * - Tune stopWhen: stepCountIs(8) to cap the tool-calling loop per turn. + * + * DOCS: https://workflow-sdk.dev/patterns/ai-sdk + */ +import { streamText, stepCountIs } from "ai"; +import { defineHook, getWritable, getWorkflowMetadata } from "workflow"; +import type { ModelMessage, UIMessageChunk } from "ai"; +import { z } from "zod"; + +const MAX_TURNS = 20; + +// One hook per run drives the multi-turn loop. Each .resume() from the API +// route delivers the next user message to the suspended workflow. +export const turnHook = defineHook({ + schema: z.object({ message: z.string() }), +}); + +// Tool implementations are durable steps — recorded before execution, +// replayed (not re-run) on restart, retried automatically on failure. +async function lookupOrder({ orderId }: { orderId: string }) { + "use step"; + const res = await fetch(\`https://api.store.com/orders/\${orderId}\`); + return res.json(); +} + +async function processRefund({ + orderId, + reason, +}: { orderId: string; reason: string }) { + "use step"; + const res = await fetch("https://api.store.com/refunds", { + method: "POST", + body: JSON.stringify({ orderId, reason }), + }); + return res.json(); +} + +const TOOLS = { + lookupOrder: { + description: "Look up an order by ID", + inputSchema: z.object({ orderId: z.string() }), + execute: lookupOrder, + }, + processRefund: { + description: "Process a refund", + inputSchema: z.object({ orderId: z.string(), reason: z.string() }), + execute: processRefund, + }, +}; + +// Per-turn step — streams one LLM response into the durable writable. +// "use step" makes the entire turn replay-safe: if the process restarts +// mid-stream, the next invocation replays from the last completed step. +async function runTurn(messages: ModelMessage[]) { + "use step"; + + const result = streamText({ + model: "anthropic/claude-haiku-4.5", + system: "You are a customer support agent.", + messages, + tools: TOOLS, + stopWhen: stepCountIs(8), + }); + + // preventClose: true keeps the durable writable open across turns. + // Each turn still emits its own start + finish chunks for slice detection. + const writable = getWritable(); + await result.toUIMessageStream().pipeTo(writable, { preventClose: true }); + + const response = await result.response; + return { responseMessages: response.messages }; +} + +export async function supportWorkflow(initialMessages: ModelMessage[]) { + "use workflow"; + + const { workflowRunId } = getWorkflowMetadata(); + // Create the hook ONCE outside the loop. Re-creating inside with the same + // token would throw HookConflictError. One hook, one token, reused every turn. + const hook = turnHook.create({ token: workflowRunId }); + let allMessages = initialMessages; + + for (let turn = 0; turn < MAX_TURNS; turn++) { + const { responseMessages } = await runTurn(allMessages); + allMessages = [...allMessages, ...responseMessages]; + + // Suspend here — the workflow parks until the next user message arrives. + const { message } = await hook; + if (message === "/done") break; + + allMessages = [...allMessages, { role: "user", content: message }]; + } + + return { turns: MAX_TURNS }; +} +`; + export const aiSdkRouteSource = `import type { UIMessage, UIMessageChunk } from "ai"; import { convertToModelMessages, createUIMessageStreamResponse } from "ai"; import { start, getRun } from "workflow/api"; diff --git a/docs/lib/registry/snippets/batching.ts b/docs/lib/registry/snippets/batching.ts index 696ef1a306..875329e608 100644 --- a/docs/lib/registry/snippets/batching.ts +++ b/docs/lib/registry/snippets/batching.ts @@ -76,6 +76,97 @@ async function processRecord(record: ImportRecord): Promise { } `; +export const batchingWorkflowInstallSource = `/** + * Batching — process large lists in parallel chunks with failure isolation. + * + * THE PATTERN: + * 1. Slice the input array into fixed-size batches (default: 10). + * 2. Process each batch with Promise.allSettled() — failures are isolated + * per record; one bad record never aborts the whole batch. + * 3. sleep() between batches paces requests against downstream rate limits. + * 4. Each individual record runs in a "use step" — durable, retried 3x, + * and never re-executed on replay if it already completed. + * + * USEFUL WHEN: + * - Importing thousands of contacts, products, or orders from a CSV. + * - Sending bulk emails or notifications in controlled bursts. + * - Syncing data to a downstream API that has per-minute rate limits. + * - Any "fan-out over a list" task where partial failures are acceptable. + * + * TO ADAPT THIS TO YOUR USE CASE: + * - Replace the ImportRecord interface with your record shape. + * - Replace the processRecord step body with your real API call. + * - Tune batchSize: smaller = more durable checkpoints, larger = faster. + * - Tune the sleep("1s") between batches to match your API's rate limit. + * - Change processRecord.maxRetries (default 3) for flaky endpoints. + * - Collect failure details from the returned failures array for reporting. + * + * DOCS: https://workflow-sdk.dev/patterns/batching + */ +import { sleep } from "workflow"; + +export interface ImportRecord { + name: string; + email: string; + role: string; +} + +export async function batchImport(records: ImportRecord[], batchSize = 10) { + "use workflow"; + + let totalSucceeded = 0; + let totalFailed = 0; + const failures: Array<{ email: string; reason: string }> = []; + + for (let i = 0; i < records.length; i += batchSize) { + const batch = records.slice(i, i + batchSize); + + // allSettled: failures inside a batch are isolated — never throws. + const outcomes = await Promise.allSettled( + batch.map((record) => processRecord(record)), + ); + + for (let j = 0; j < outcomes.length; j++) { + const outcome = outcomes[j]; + if (outcome.status === "fulfilled") { + totalSucceeded++; + } else { + totalFailed++; + failures.push({ + email: batch[j].email, + reason: + outcome.reason instanceof Error + ? outcome.reason.message + : String(outcome.reason), + }); + } + } + + // Pace between batches — tune or remove to match your provider's limits. + if (i + batchSize < records.length) { + await sleep("1s"); + } + } + + return { total: records.length, succeeded: totalSucceeded, failed: totalFailed, failures }; +} + +// Each record runs in its own step → durable, retried up to 3x by default. +// Throw an Error (or RetryableError) to trigger a retry; FatalError to skip. +async function processRecord(record: ImportRecord): Promise { + "use step"; + const res = await fetch("https://api.example.com/contacts", { + method: "POST", + body: JSON.stringify(record), + }); + if (!res.ok) { + throw new Error(\`Failed to import \${record.email} (\${res.status})\`); + } + const { id } = await res.json(); + return id; +} +`; + export const batchingStartRouteSource = `import { start } from "workflow/api"; import { NextResponse } from "next/server"; import { batchImport, type ImportRecord } from "@/workflows/batching"; diff --git a/docs/lib/registry/snippets/chat-sdk.ts b/docs/lib/registry/snippets/chat-sdk.ts index b2ee114549..74cc22199e 100644 --- a/docs/lib/registry/snippets/chat-sdk.ts +++ b/docs/lib/registry/snippets/chat-sdk.ts @@ -113,6 +113,109 @@ export async function durableChatSession(payload: string) { } `; +export const chatSdkWorkflowInstallSource = `/** + * Chat SDK Integration — durable multi-turn chat bot backed by a Workflow run. + * + * THE PATTERN: + * 1. One conversation thread = one workflow run. The run's ID is stored in + * Chat SDK's thread state so follow-up messages route to the same run. + * 2. The bot's message handler calls start() on the first message and + * resumeHook() on every subsequent message, passing a serialized message. + * 3. Inside the workflow, a chatTurnHook (token = runId) suspends between + * turns — zero compute cost while waiting for the next message. + * 4. Platform side effects (thread.post, thread.subscribe) run inside "use + * step" functions that dynamically import the bot — keeps adapter + * packages out of the workflow sandbox. + * + * USEFUL WHEN: + * - You're building a Slack / Telegram / Teams bot that needs durable state. + * - Long AI responses should survive process restarts mid-generation. + * - Tool calls in the bot should be retried without re-running on replay. + * - You want the bot to maintain conversation state across reconnections. + * + * TO ADAPT THIS TO YOUR USE CASE: + * - Replace the runTurn step body with your AI SDK call, tool loop, or + * database lookup — any async logic that should be durable. + * - Replace createSlackAdapter() with your platform adapter (Telegram, + * Teams, Discord, etc.) from the @chat-adapter/* packages. + * - Replace createRedisState() with your preferred state backend. + * - Change "done" to your own session-termination signal. + * - Add more bot.onXxx() event handlers in the handlers file for + * reactions, emoji, DMs, slash commands, etc. + * + * DOCS: https://workflow-sdk.dev/patterns/chat-sdk + */ +import { Message, reviver, type Thread } from "chat"; +import { defineHook, getWorkflowMetadata } from "workflow"; +import type { ThreadState } from "@/lib/bot"; +import type { ChatTurnPayload } from "@/workflows/chat-turn-hook"; + +// One hook per run, token = runId. Reused every turn (created once outside +// the loop to avoid HookConflictError on subsequent turns). +const chatTurnHook = defineHook(); + +// Posting back to the platform is a "use step" — adapter packages use +// Node-only modules unavailable in the workflow sandbox, so we import the +// bot dynamically from inside the step body. +async function postAssistantMessage( + thread: Thread, + text: string +) { + "use step"; + const { bot } = await import("@/lib/bot"); + await bot.initialize(); + await thread.post(text); +} + +async function runTurn(text: string) { + "use step"; + // Replace with your AI SDK call, tool loop, or database lookup. + return \`You said: \${text}\`; +} + +async function handleMessage( + thread: Thread, + message: Message +) { + const text = message.text.trim(); + if (text.toLowerCase() === "done") return false; // session exit signal + + const reply = await runTurn(text); + await postAssistantMessage(thread, reply); + return true; +} + +export async function durableChatSession(payload: string) { + "use workflow"; + + const { workflowRunId } = getWorkflowMetadata(); + + // The handler serializes thread + message as JSON; reviver rehydrates them. + const { thread, message } = JSON.parse(payload, reviver) as { + thread: Thread; + message: Message; + }; + + // One hook per run, reused every turn. + const hook = chatTurnHook.create({ token: workflowRunId }); + + await postAssistantMessage( + thread, + "Session started. Reply here; send \`done\` to stop." + ); + + if (!(await handleMessage(thread, message))) return; + + // One hook resumption = one turn. The workflow suspends between messages + // — zero compute cost while idle. + while (true) { + const { message: nextRaw } = await hook; + const next = Message.fromJSON(nextRaw); + if (!(await handleMessage(thread, next))) return; + } +} +`; + export const chatSdkHookTypeSource = `import type { SerializedMessage } from "chat"; // Importing this from the handler module keeps adapter dependencies out diff --git a/docs/lib/registry/snippets/child-workflows.ts b/docs/lib/registry/snippets/child-workflows.ts index c1c5d30096..9d42eb80ca 100644 --- a/docs/lib/registry/snippets/child-workflows.ts +++ b/docs/lib/registry/snippets/child-workflows.ts @@ -135,6 +135,160 @@ async function generateSummary(analysis: string): Promise { } `; +export const childWorkflowsWorkflowInstallSource = `/** + * Child Workflows — spawn-and-poll for independent parallel runs. + * + * THE PATTERN: + * 1. The parent spawns child workflows in chunks (from a "use step") so + * each chunk is its own durable checkpoint. + * 2. Each child is an independent workflow run with its own runId, event + * log, and retry boundary — a failing child never affects siblings. + * 3. The parent polls children via a sleep + checkStatuses loop. The + * polling lives inside the workflow so each sleep() is durable. + * 4. After all children complete, the parent collects their return values. + * + * USEFUL WHEN: + * - Processing hundreds of independent items (documents, users, records) + * in parallel with isolated failure handling per item. + * - You need per-item run IDs for individual observability and cancellation. + * - Children have long runtimes and you want the parent to survive restarts + * while waiting for them. + * + * TO ADAPT THIS TO YOUR USE CASE: + * - Replace processDocument with your child workflow function. + * - Replace the fetchDocument / analyzeContent / generateSummary steps + * with your real per-item work. + * - Tune SPAWN_CHUNK_SIZE: smaller = more durable checkpoints on spawn. + * - Tune POLL_INTERVAL and MAX_POLL_ITERATIONS to match expected duration. + * - Adjust the collectResults return type to match your child's return value. + * - { deploymentId: "latest" } on start() lets children pick up future + * code deployments automatically during long-running parent runs. + * + * DOCS: https://workflow-sdk.dev/patterns/child-workflows + */ +import { sleep } from "workflow"; +import { getRun, start } from "workflow/api"; + +const POLL_INTERVAL = "30s"; +// 60 minutes worth of poll iterations at the configured interval. +const MAX_POLL_ITERATIONS = 120; +// Spawn in chunks so a single step doesn't time out on huge batches. +const SPAWN_CHUNK_SIZE = 25; + +// CHILD — one independent unit of work. Replace the steps with real logic. +export async function processDocument(documentId: string) { + "use workflow"; + + const content = await fetchDocument(documentId); + const analysis = await analyzeContent(content); + const summary = await generateSummary(analysis); + + return { documentId, summary }; +} + +// PARENT — orchestrates many children, polls them, collects their output. +export async function processDocumentBatch(documentIds: string[]) { + "use workflow"; + + // Spawn in chunks. Each chunk is its own step → durable + retried. + const allRunIds: string[] = []; + for (let i = 0; i < documentIds.length; i += SPAWN_CHUNK_SIZE) { + const chunk = documentIds.slice(i, i + SPAWN_CHUNK_SIZE); + const runIds = await spawnChunk(chunk); + allRunIds.push(...runIds); + } + + await pollUntilComplete(allRunIds); + const results = await collectResults(allRunIds); + + return { processed: results.length, results }; +} + +// Polling loop — lives in the workflow so sleep() replays durably. +async function pollUntilComplete(runIds: string[]): Promise { + for (let iteration = 0; iteration < MAX_POLL_ITERATIONS; iteration++) { + const status = await checkStatuses(runIds); + + if (status.running === 0) { + if (status.failed > 0) { + throw new Error(\`\${status.failed} of \${runIds.length} children failed\`); + } + return; + } + + await sleep(POLL_INTERVAL); + } + + throw new Error("Timed out waiting for children to complete"); +} + +// start() must be called from a step, not directly from a workflow function. +// deploymentId: "latest" makes children pick up future deployments automatically. +async function spawnChunk(documentIds: string[]): Promise { + "use step"; + + const runIds: string[] = []; + for (const docId of documentIds) { + const run = await start(processDocument, [docId], { deploymentId: "latest" }); + runIds.push(run.runId); + } + return runIds; +} + +// getRun() also must be called from a step. +async function checkStatuses( + runIds: string[], +): Promise<{ running: number; completed: number; failed: number }> { + "use step"; + + let running = 0; + let completed = 0; + let failed = 0; + + for (const runId of runIds) { + const status = await getRun(runId).status; + if (status === "completed") completed++; + else if (status === "failed" || status === "cancelled") failed++; + else running++; + } + + return { running, completed, failed }; +} + +async function collectResults( + runIds: string[], +): Promise> { + "use step"; + + const results: Array<{ documentId: string; summary: string }> = []; + for (const runId of runIds) { + const value = (await getRun(runId).returnValue) as { + documentId: string; + summary: string; + }; + results.push(value); + } + return results; +} + +// Replace the step bodies below with your real per-document work. +async function fetchDocument(documentId: string): Promise { + "use step"; + const res = await fetch(\`https://docs.example.com/api/\${documentId}\`); + return res.text(); +} + +async function analyzeContent(content: string): Promise { + "use step"; + return \`analysis of \${content.length} chars\`; +} + +async function generateSummary(analysis: string): Promise { + "use step"; + return \`Summary: \${analysis}\`; +} +`; + export const childWorkflowsStartRouteSource = `import { start } from "workflow/api"; import { NextResponse } from "next/server"; import { processDocumentBatch } from "@/workflows/child-workflows"; diff --git a/docs/lib/registry/snippets/distributed-abort-controller.ts b/docs/lib/registry/snippets/distributed-abort-controller.ts index 30a3d75e36..d355a1c713 100644 --- a/docs/lib/registry/snippets/distributed-abort-controller.ts +++ b/docs/lib/registry/snippets/distributed-abort-controller.ts @@ -172,6 +172,191 @@ export class DistributedAbortController { } `; +export const distributedAbortControllerLibInstallSource = `/** + * Distributed Abort Controller — cross-process AbortController backed by a + * durable workflow. + * + * THE PATTERN: + * 1. DistributedAbortController.create(id) spawns a coordination workflow + * that races a manual abort hook against a TTL sleep. + * 2. Any process that creates a controller with the same semantic ID gets + * a handle to the same underlying run — no runId sharing required. + * 3. .abort() resumes the hook; .signal returns an AbortSignal that fires + * when the hook fires or the TTL expires. + * 4. The coordination workflow writes an "abort" message to its stream; + * .signal subscribes to that stream and calls controller.abort() on it. + * + * USEFUL WHEN: + * - Cancelling a long-running fetch when a user clicks "Cancel" on a + * different machine or tab. + * - Stopping parallel work across multiple serverless function invocations. + * - Propagating a stop signal to any number of subscribers across processes. + * + * TO ADAPT THIS TO YOUR USE CASE: + * - Pass a meaningful semantic ID (job ID, request ID, session ID) that + * all participants can derive without coordination. + * - Tune ttlMs (default 24h) for short-lived operations. + * - Tune graceMs (default 1h) — how long after TTL the stream stays open + * for late subscribers to observe the abort. + * - Pass controller.signal to any fetch() or async operation that respects + * AbortSignal — including OpenAI SDK, axios, and Node.js streams. + * + * DOCS: https://workflow-sdk.dev/patterns/distributed-abort-controller + */ +import { defineHook, getWritable, sleep } from "workflow"; +import { start, getRun, getHookByToken } from "workflow/api"; + +const DEFAULT_TTL_MS = 24 * 60 * 60 * 1000; // 24h +const DEFAULT_GRACE_MS = 60 * 60 * 1000; // 1h grace for late subscribers + +export const abortHook = defineHook<{ reason?: string }>(); + +export type AbortMessage = { + type: "abort"; + reason?: string; + expired?: boolean; +}; + +function getAbortToken(id: string): string { + return \`abort:\${id}\`; +} + +async function writeAbortSignal(reason?: string, expired?: boolean) { + "use step"; + const writable = getWritable(); + const writer = writable.getWriter(); + try { + await writer.write({ type: "abort", reason, expired }); + } finally { + writer.releaseLock(); + } + await writable.close(); +} + +// Coordination workflow — races a manual abort against TTL expiration. +// The result is written to the run's stream so any .signal subscriber +// receives it regardless of when they connect. +export async function abortControllerWorkflow( + id: string, + ttlMs: number, + graceMs: number, +) { + "use workflow"; + + const startTime = Date.now(); + const hook = abortHook.create({ token: getAbortToken(id) }); + + const result = await Promise.race([ + hook.then((payload) => ({ reason: payload.reason, expired: false })), + sleep(\`\${ttlMs}ms\`).then(() => ({ reason: "Controller expired", expired: true })), + ]); + + await writeAbortSignal(result.reason, result.expired); + + // On TTL expiry, sleep through the grace period so late subscribers + // can still observe the abort event from the stream. + if (result.expired) { + const elapsed = Date.now() - startTime; + const remainingTime = graceMs - (elapsed - ttlMs); + if (remainingTime > 0) { + await sleep(\`\${remainingTime}ms\`); + } + } + + return { aborted: true, reason: result.reason, expired: result.expired }; +} + +/** + * AbortController-shaped API on top of a durable workflow. + * Calling .abort() on any process triggers .signal on any other process + * that created a controller with the same ID. + */ +export class DistributedAbortController { + private id: string; + readonly runId: string; + + private constructor(id: string, runId: string) { + this.id = id; + this.runId = runId; + } + + /** + * Create or reconnect by semantic ID. If a controller with this ID already + * exists, returns a handle to it; otherwise spawns a new workflow. + */ + static async create( + id: string, + options: { ttlMs?: number; graceMs?: number } = {}, + ): Promise { + const { ttlMs = DEFAULT_TTL_MS, graceMs = DEFAULT_GRACE_MS } = options; + const token = getAbortToken(id); + + // Reconnect to an existing controller if one is already running. + const existingHook = await getHookByToken(token).catch(() => null); + if (existingHook) { + return new DistributedAbortController(id, existingHook.runId); + } + + const run = await start(abortControllerWorkflow, [id, ttlMs, graceMs]); + return new DistributedAbortController(id, run.runId); + } + + /** + * Trigger the abort signal. Idempotent — safe to call after expiry. + */ + async abort(reason?: string): Promise { + try { + await abortHook.resume(getAbortToken(this.id), { reason }); + } catch (error) { + const msg = error instanceof Error ? error.message.toLowerCase() : ""; + if (msg.includes("not found") || msg.includes("expired")) { + return; // Already aborted or expired — no-op. + } + throw error; + } + } + + /** + * AbortSignal that fires when .abort() is called or TTL expires. + * Cache the returned signal if you subscribe more than once. + */ + get signal(): AbortSignal { + const run = getRun<{ aborted: boolean; reason?: string; expired?: boolean }>( + this.runId, + ); + const controller = new AbortController(); + const readable = run.getReadable(); + + (async () => { + const reader = readable.getReader(); + try { + while (true) { + const { done, value } = await reader.read(); + if (done) break; + if (value.type === "abort") { + const reason = value.expired + ? \`\${value.reason} (expired)\` + : value.reason; + controller.abort(reason); + break; + } + } + } catch (error) { + if (!controller.signal.aborted) { + controller.abort( + error instanceof Error ? error.message : "Stream read failed", + ); + } + } finally { + reader.releaseLock(); + } + })(); + + return controller.signal; + } +} +`; + export const distributedAbortControllerRouteSource = `import { NextResponse } from "next/server"; import { DistributedAbortController } from "@/lib/distributed-abort-controller"; diff --git a/docs/lib/registry/snippets/durable-agent.ts b/docs/lib/registry/snippets/durable-agent.ts index 5dc276b951..dd77d022b1 100644 --- a/docs/lib/registry/snippets/durable-agent.ts +++ b/docs/lib/registry/snippets/durable-agent.ts @@ -116,6 +116,130 @@ export async function flightAgent(messages: ModelMessage[]) { } `; +export const durableAgentWorkflowInstallSource = `/** + * Durable Agent — crash-safe AI agent with durable tool execution. + * + * THE PATTERN: + * 1. Each tool is a "use step" function — the runtime persists its input + * before calling it and replays the recorded result on restart. + * 2. The agent workflow ("use workflow") drives the LLM loop via + * DurableAgent.stream(). If the process crashes mid-tool-call, it + * resumes from the last completed step on the next invocation. + * 3. getWritable() streams text, tool calls, and results + * to the client in real time. The stream is durable — clients can + * reconnect and resume from any offset. + * + * USEFUL WHEN: + * - Your agent calls external APIs with side effects (bookings, payments) + * and you can't afford to re-run them on failure. + * - Tool calls are slow (seconds) and you want retry without duplicates. + * - You need multi-turn conversation state to survive server restarts. + * - You're building a chat UI where users can reconnect mid-generation. + * + * TO ADAPT THIS TO YOUR USE CASE: + * - Replace searchFlights / bookFlight / checkWeather with your tools. + * Every "use step" function gets the same durability guarantees. + * - Swap "anthropic/claude-haiku-4.5" for any AI Gateway model string + * (openai/gpt-4o, google/gemini-2.0-flash, etc.). + * - Tune maxSteps to match your agent's expected tool-call depth. + * - Add a stopHook race (see Agent Cancellation pattern) for a Stop button. + * - For human-in-the-loop approval gates, see the Human-in-the-Loop pattern. + * + * DOCS: https://workflow-sdk.dev/patterns/durable-agent + */ +import { DurableAgent } from "@workflow/ai/agent"; +import { getWritable } from "workflow"; +import { z } from "zod"; +import type { ModelMessage, UIMessageChunk } from "ai"; + +// "use step" turns any async function into a durable step: +// - automatic retries on failure (3x by default, configurable via .maxRetries) +// - one entry per call in the workflow event log +// - full Node.js access (fetch, fs, child_process, native modules, …) +// - replay-safe: re-runs return the recorded result without re-executing +async function searchFlights({ from, to, date }: { + from: string; + to: string; + date: string; +}) { + "use step"; + const res = await fetch( + \`https://api.example.com/flights?from=\${from}&to=\${to}&date=\${date}\`, + ); + if (!res.ok) throw new Error(\`Search failed: \${res.status}\`); + return res.json(); +} + +async function bookFlight({ flightId, passenger }: { + flightId: string; + passenger: string; +}) { + "use step"; + const res = await fetch("https://api.example.com/bookings", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ flightId, passenger }), + }); + if (!res.ok) throw new Error(\`Booking failed: \${res.status}\`); + return res.json(); +} + +async function checkWeather({ city }: { city: string }) { + "use step"; + const res = await fetch(\`https://api.weather.com/forecast?city=\${city}\`); + return res.json(); +} + +// "use workflow" declares the orchestrator — its execution is replay-safe +// and persisted to the event log. Each agent.stream() call drives the LLM +// loop; tools fire as durable steps. +export async function flightAgent(messages: ModelMessage[]) { + "use workflow"; + + const agent = new DurableAgent({ + // Any AI Gateway model string works — swap providers without touching + // the durability layer. + model: "anthropic/claude-haiku-4.5", + instructions: "You are a helpful flight booking assistant.", + tools: { + searchFlights: { + description: "Search for available flights between two airports.", + inputSchema: z.object({ + from: z.string().describe("Departure airport code"), + to: z.string().describe("Arrival airport code"), + date: z.string().describe("Travel date (YYYY-MM-DD)"), + }), + execute: searchFlights, + }, + bookFlight: { + description: "Book a specific flight for a passenger.", + inputSchema: z.object({ + flightId: z.string().describe("Flight ID from search results"), + passenger: z.string().describe("Passenger full name"), + }), + execute: bookFlight, + }, + checkWeather: { + description: "Check the weather forecast for a city.", + inputSchema: z.object({ city: z.string().describe("City name") }), + execute: checkWeather, + }, + }, + }); + + // getWritable() streams chunks to the client in real time. + // maxSteps caps the LLM loop — tune for your agent's expected depth. + const result = await agent.stream({ + messages, + writable: getWritable(), + maxSteps: 10, + }); + + // Return final messages so multi-turn callers can pass them back in. + return { messages: result.messages }; +} +`; + export const durableAgentStartRouteSource = `import type { UIMessage } from "ai"; import { convertToModelMessages, createUIMessageStreamResponse } from "ai"; import { start } from "workflow/api"; diff --git a/docs/lib/registry/snippets/human-in-the-loop.ts b/docs/lib/registry/snippets/human-in-the-loop.ts index 26bf42ad1f..10e4a44ef7 100644 --- a/docs/lib/registry/snippets/human-in-the-loop.ts +++ b/docs/lib/registry/snippets/human-in-the-loop.ts @@ -148,6 +148,171 @@ export async function approvalAgent(messages: ModelMessage[]) { } `; +export const humanInTheLoopWorkflowInstallSource = `/** + * Human-in-the-Loop — pause a DurableAgent for human approval. + * + * THE PATTERN: + * 1. The agent calls requestApproval as a tool with a summary and payload. + * 2. Before suspending, emit a "data-approval-needed" stream part so the + * client can render approval controls immediately — tool results don't + * stream until the tool returns, so this emit is the only way to show UI. + * 3. Create an approvalHook keyed by toolCallId and await it, racing against + * a 24h sleep() timeout so the workflow never hangs forever. + * 4. On approval/rejection, emit a "data-approval-resolved" part so the + * client can update the card, then execute the real side effect. + * 5. The approval API route resumes the hook with { approved, comment }. + * + * USEFUL WHEN: + * - An agent action is irreversible or high-value (payment, delete, publish). + * - Compliance requires a human sign-off before automated side effects. + * - You want an audit trail of who approved what and when. + * + * TO ADAPT THIS TO YOUR USE CASE: + * - Replace performAction with your real side effect (Stripe charge, DB + * write, Slack post, etc.). It is a "use step" so it is durable. + * - Change the timeout duration in sleep("24h") to suit your SLA. + * - Add more fields to the approvalHook schema if you need structured + * reviewer notes beyond a simple boolean + comment. + * - Extend the ApprovalCard UI to display payload fields meaningfully + * (e.g. formatted amount, recipient name). + * + * DOCS: https://workflow-sdk.dev/patterns/human-in-the-loop + */ +import { DurableAgent } from "@workflow/ai/agent"; +import { defineHook, getWritable, sleep } from "workflow"; +import { z } from "zod"; +import type { ModelMessage, UIMessageChunk } from "ai"; + +// Exported so the approval API route can resume with the human's decision. +export const approvalHook = defineHook({ + schema: z.object({ + approved: z.boolean(), + comment: z.string().optional(), + }), +}); + +// Real side effect — replace with your action (charge, publish, delete…). +// "use step" makes it durable: retried on failure, never re-run on replay. +async function performAction({ summary }: { summary: string }) { + "use step"; + console.log("Performing approved action:", summary); + return { ok: true, summary }; +} + +// Emit a custom stream part BEFORE suspending on the hook. +// Without this, the client has no way to show approval controls because +// tool invocations don't stream until the tool returns. +async function emitApprovalRequest(details: { + toolCallId: string; + summary: string; + payload: Record; +}) { + "use step"; + const writer = getWritable().getWriter(); + try { + await writer.write({ + type: "data-approval-needed", + id: details.toolCallId, + data: details, + } as UIMessageChunk); + } finally { + writer.releaseLock(); + } +} + +// Emit a resolution part so the client can update the approval card to +// show the outcome (approved, rejected, or timed out). +async function emitApprovalResolved(details: { + toolCallId: string; + result: string; +}) { + "use step"; + const writer = getWritable().getWriter(); + try { + await writer.write({ + type: "data-approval-resolved", + id: details.toolCallId, + data: details, + } as UIMessageChunk); + } finally { + writer.releaseLock(); + } +} + +// No "use step" here — requestApproval uses workflow-level primitives +// (hook.create, Promise.race, sleep) that must run in workflow context. +// The actual I/O (emit + perform) delegates to "use step" functions above. +async function requestApproval( + { summary, payload }: { + summary: string; + payload: Record; + }, + { toolCallId }: { toolCallId: string }, +) { + // 1. Notify the client immediately before suspending. + await emitApprovalRequest({ toolCallId, summary, payload }); + + // 2. Suspend — the workflow pauses here until the reviewer responds or + // the timeout fires. The run stays alive in the Workflow backend. + const hook = approvalHook.create({ token: toolCallId }); + const result = await Promise.race([ + hook.then((p) => ({ type: "decision" as const, ...p })), + sleep("24h").then(() => ({ type: "timeout" as const, approved: false as const })), + ]); + + // 3. Resolve based on outcome. + if (result.type === "timeout") { + const msg = "Approval request expired."; + await emitApprovalResolved({ toolCallId, result: msg }); + return msg; + } + if (!result.approved) { + const msg = \`Rejected: \${result.comment || "No reason given"}\`; + await emitApprovalResolved({ toolCallId, result: msg }); + return msg; + } + + const action = await performAction({ summary }); + const msg = \`Approved and executed: \${action.summary}\`; + await emitApprovalResolved({ toolCallId, result: msg }); + return msg; +} + +export async function approvalAgent(messages: ModelMessage[]) { + "use workflow"; + + const agent = new DurableAgent({ + model: "anthropic/claude-haiku-4.5", + // Instruct the model to ALWAYS request approval before side effects. + instructions: + "You are a careful assistant. ALWAYS call requestApproval before performing any consequential action.", + tools: { + requestApproval: { + description: + "Request human approval before performing a consequential action.", + inputSchema: z.object({ + summary: z.string().describe("Short description of the action."), + payload: z + .record(z.string(), z.unknown()) + .describe( + "Structured details rendered on the approval card — e.g. amount, recipient, etc.", + ), + }), + execute: requestApproval, + }, + }, + }); + + const result = await agent.stream({ + messages, + writable: getWritable(), + maxSteps: 15, + }); + + return { messages: result.messages }; +} +`; + export const humanInTheLoopStartRouteSource = `import type { UIMessage } from "ai"; import { convertToModelMessages, createUIMessageStreamResponse } from "ai"; import { start } from "workflow/api"; diff --git a/docs/lib/registry/snippets/idempotency.ts b/docs/lib/registry/snippets/idempotency.ts index 76596a9e78..bf3f07c252 100644 --- a/docs/lib/registry/snippets/idempotency.ts +++ b/docs/lib/registry/snippets/idempotency.ts @@ -66,6 +66,93 @@ async function sendReceipt(customerId: string, chargeId: string): Promise } `; +export const idempotencyWorkflowInstallSource = `/** + * Idempotency — prevent duplicate side effects on retries and replays. + * + * THE PATTERN: + * 1. getStepMetadata().stepId returns a deterministic ID that is stable + * across retries and replays of the same step invocation. + * 2. Pass that stepId as the Idempotency-Key header to external APIs that + * support it (Stripe, Braintree, Adyen, etc.). + * 3. The provider deduplicates: a retry with the same key returns the + * original response instead of creating a second charge / email / etc. + * + * USEFUL WHEN: + * - Charging a credit card (duplicates cause double charges). + * - Sending transactional emails (duplicates annoy users). + * - Creating external resources where duplication would cause data issues. + * - Any non-idempotent API call inside a retryable step. + * + * TO ADAPT THIS TO YOUR USE CASE: + * - Replace the Stripe charge call with your provider's API. + * - Pass stepId as the idempotency key header your provider expects + * (Stripe: "Idempotency-Key", Braintree: "X-Request-Id", etc.). + * - Replace sendReceipt with your notification step (Resend, SendGrid…). + * - Add STRIPE_SECRET_KEY (and other secrets) to your .env file. + * + * DOCS: https://workflow-sdk.dev/patterns/idempotency + */ +import { getStepMetadata } from "workflow"; + +export async function chargeCustomer(customerId: string, amountCents: number) { + "use workflow"; + + const charge = await createCharge(customerId, amountCents); + await sendReceipt(customerId, charge.id); + + return { customerId, chargeId: charge.id, status: "completed" as const }; +} + +// stepId is deterministic across retries — Stripe deduplicates on it, +// so even if this step runs twice the customer is only charged once. +async function createCharge( + customerId: string, + amountCents: number, +): Promise<{ id: string; amount: number }> { + "use step"; + + const { stepId } = getStepMetadata(); + + const res = await fetch("https://api.stripe.com/v1/charges", { + method: "POST", + headers: { + Authorization: \`Bearer \${process.env.STRIPE_SECRET_KEY}\`, + "Content-Type": "application/x-www-form-urlencoded", + // Stripe returns the same charge object if this key has been seen before. + "Idempotency-Key": stepId, + }, + body: new URLSearchParams({ + amount: String(amountCents), + currency: "usd", + customer: customerId, + }), + }); + + if (!res.ok) { + const error = await res.json().catch(() => ({ message: "unknown" })); + throw new Error(\`Charge failed: \${error.message ?? res.status}\`); + } + + return res.json(); +} + +async function sendReceipt(customerId: string, chargeId: string): Promise { + "use step"; + + const { stepId } = getStepMetadata(); + + await fetch("https://api.example.com/receipts", { + method: "POST", + headers: { + "Content-Type": "application/json", + // Same pattern for any non-idempotent notification API. + "Idempotency-Key": stepId, + }, + body: JSON.stringify({ customerId, chargeId }), + }); +} +`; + export const idempotencyStartRouteSource = `import { start } from "workflow/api"; import { NextResponse } from "next/server"; import { chargeCustomer } from "@/workflows/idempotency"; diff --git a/docs/lib/registry/snippets/rate-limiting.ts b/docs/lib/registry/snippets/rate-limiting.ts index f22ce99055..1525ea9374 100644 --- a/docs/lib/registry/snippets/rate-limiting.ts +++ b/docs/lib/registry/snippets/rate-limiting.ts @@ -65,6 +65,95 @@ async function upsertToWarehouse( upsertToWarehouse.maxRetries = 10; `; +export const rateLimitingWorkflowInstallSource = `/** + * Rate Limiting — handle 429s and back-pressure without manual sleep loops. + * + * THE PATTERN: + * 1. On a 429 response, throw RetryableError with a retryAfter value + * (milliseconds or a duration string like "1m"). + * 2. The Workflow runtime reschedules the step automatically — no + * manual retry loops, timers, or sleep() calls needed. + * 3. For exponential backoff, read getStepMetadata().attempt (0-indexed) + * and compute the delay: attempt ** 2 * 1000 gives 1s, 4s, 9s… + * 4. Set stepFn.maxRetries to override the 3x default for flaky endpoints. + * + * USEFUL WHEN: + * - Fetching from third-party APIs that enforce rate limits (CRMs, SaaS). + * - Writing to analytics warehouses or data pipelines with back-pressure. + * - Any external HTTP call that can transiently return 429 or 503. + * + * TO ADAPT THIS TO YOUR USE CASE: + * - Replace fetchFromCrm with your rate-limited GET step. + * - Replace upsertToWarehouse with your write step. + * - Adjust retryAfter values to match your provider's documented limits. + * - For non-HTTP back-pressure (queue depth, token bucket), compute + * retryAfter from your own logic and throw RetryableError the same way. + * - Increase maxRetries beyond 10 for very spiky endpoints. + * + * DOCS: https://workflow-sdk.dev/patterns/rate-limiting + */ +import { RetryableError, getStepMetadata } from "workflow"; + +export async function syncContact(contactId: string) { + "use workflow"; + + const contact = await fetchFromCrm(contactId); + await upsertToWarehouse(contactId, contact); + + return { contactId, status: "synced" as const }; +} + +// 429 — read Retry-After from the response and let the runtime reschedule. +// The step will be called again after the delay; no loop or timer needed. +async function fetchFromCrm(contactId: string): Promise { + "use step"; + + const res = await fetch(\`https://crm.example.com/contacts/\${contactId}\`); + + if (res.status === 429) { + const retryAfter = res.headers.get("Retry-After"); + throw new RetryableError("Rate limited by CRM", { + // Accept the provider's delay if present; default to 1 minute. + retryAfter: retryAfter ? Number.parseInt(retryAfter, 10) * 1000 : "1m", + }); + } + + if (!res.ok) { + throw new Error(\`CRM returned \${res.status}\`); + } + + return res.json(); +} + +// 5xx + 429 — exponential backoff using the current attempt count. +// attempt is 0-indexed: attempt 0 → 1s wait, attempt 1 → 4s, attempt 2 → 9s… +async function upsertToWarehouse( + contactId: string, + contact: unknown, +): Promise { + "use step"; + + const { attempt } = getStepMetadata(); + const res = await fetch(\`https://warehouse.example.com/contacts/\${contactId}\`, { + method: "PUT", + body: JSON.stringify(contact), + }); + + if (res.status === 429 || res.status >= 500) { + throw new RetryableError(\`Warehouse error \${res.status}\`, { + retryAfter: attempt ** 2 * 1000, // 1s, 4s, 9s... + }); + } + + if (!res.ok) { + throw new Error(\`Warehouse returned \${res.status}\`); + } +} + +// Allow more retries than the default of 3 for known-flaky endpoints. +upsertToWarehouse.maxRetries = 10; +`; + export const rateLimitingStartRouteSource = `import { start } from "workflow/api"; import { NextResponse } from "next/server"; import { syncContact } from "@/workflows/rate-limiting"; diff --git a/docs/lib/registry/snippets/resend.ts b/docs/lib/registry/snippets/resend.ts index 5b2346eb09..e41238766c 100644 --- a/docs/lib/registry/snippets/resend.ts +++ b/docs/lib/registry/snippets/resend.ts @@ -121,6 +121,121 @@ async function sendSecondNudge(user: UserSignup) { } `; +export const resendWorkflowInstallSource = `/** + * Resend — drip email sequence with cancellation. + * + * THE PATTERN: + * 1. On signup, store the user and send an immediate welcome email. + * 2. Open a cancelNudges hook (token = email) once — it persists for the + * entire campaign so any race() below can fire it. + * 3. Race each sleep() against the hook. If the hook fires (user converts, + * unsubscribes, or re-signs-up), exit early without sending more email. + * 4. If the sleep wins, send the nudge and continue to the next sleep. + * + * USEFUL WHEN: + * - Sending a welcome → nudge → nudge drip sequence after signup. + * - You need the sequence to stop early if the user takes the desired action. + * - You want crash-safe delivery without a job queue or scheduler. + * + * TO ADAPT THIS TO YOUR USE CASE: + * - Add more nudge steps following the same sleep → race → send pattern. + * - Replace storeUser with your real DB / CRM call. + * - Replace the Resend email bodies with your real HTML templates. + * - Tune the interval parameter (default "2d") — pass "5s" to test fast. + * - The cancel route calls cancelNudges.resume(email) to stop the sequence; + * fire this on checkout, unsubscribe, or any conversion event. + * + * IMPORTANT: Create the cancelHook ONCE before the first race, not inside + * each sleep block. Re-creating it with the same token would throw + * HookConflictError because the first hook is still pending. + * + * DOCS: https://workflow-sdk.dev/patterns/resend + */ +import { defineHook, sleep } from "workflow"; +import type { StringValue } from "ms"; +import { Resend } from "resend"; + +export interface UserSignup { + email: string; + name: string; +} + +// Exported so the cancel API route can call .resume() to stop the sequence. +export const cancelNudges = defineHook<{ reason?: string }>(); + +export async function emailSequence( + input: UserSignup, + interval: StringValue = "2d" +) { + "use workflow"; + + await storeUser(input); + await sendWelcomeEmail(input); + + // Create the hook ONCE before any race. Reuse this single hook across + // every cancellable sleep — do NOT recreate it inside the loop. + const cancelHook = cancelNudges.create({ + token: \`cancel-nudges:\${input.email}\`, + }); + + // First wait: sleep OR cancel. + if (await Promise.race([sleep(interval).then(() => false), cancelHook.then(() => true)])) { + return { status: "cancelled" as const, email: input.email }; + } + + await sendFirstNudge(input); + + // Second wait: same hook, fresh sleep. + if (await Promise.race([sleep(interval).then(() => false), cancelHook.then(() => true)])) { + return { status: "cancelled" as const, email: input.email }; + } + + await sendSecondNudge(input); + + return { status: "drip-complete" as const, email: input.email }; +} + +async function storeUser(user: UserSignup) { + "use step"; + // Replace with your DB / CRM call: + // await db.insert(users).values({ email: user.email, name: user.name }); + console.log(\`Stored signup for \${user.email}\`); +} + +async function sendWelcomeEmail(user: UserSignup) { + "use step"; + const resend = new Resend(process.env.RESEND_API_KEY); + await resend.emails.send({ + from: "onboarding@resend.dev", + to: user.email, + subject: \`Welcome, \${user.name}!\`, + html: \`

Hey \${user.name},

Thanks for signing up!

\`, + }); +} + +async function sendFirstNudge(user: UserSignup) { + "use step"; + const resend = new Resend(process.env.RESEND_API_KEY); + await resend.emails.send({ + from: "onboarding@resend.dev", + to: user.email, + subject: \`\${user.name}, check out what you can build\`, + html: \`

Hey \${user.name},

Here are a few things to try…

\`, + }); +} + +async function sendSecondNudge(user: UserSignup) { + "use step"; + const resend = new Resend(process.env.RESEND_API_KEY); + await resend.emails.send({ + from: "onboarding@resend.dev", + to: user.email, + subject: \`\${user.name}, you're missing out\`, + html: \`

Hey \${user.name},

Need help getting started?

\`, + }); +} +`; + export const resendStartRouteSource = `import { start } from "workflow/api"; import type { StringValue } from "ms"; import { NextResponse } from "next/server"; diff --git a/docs/lib/registry/snippets/saga.ts b/docs/lib/registry/snippets/saga.ts index 575173a57c..3bc079592b 100644 --- a/docs/lib/registry/snippets/saga.ts +++ b/docs/lib/registry/snippets/saga.ts @@ -127,6 +127,156 @@ async function deprovisionSeats(accountId: string, entitlementId: string): Promi } `; +export const sagaWorkflowInstallSource = `/** + * Saga (Transactions & Rollbacks) — multi-step transaction with automatic + * compensation on failure. + * + * THE PATTERN: + * 1. Each forward step pushes a matching undo function onto a compensation + * stack before executing — so the stack is always in sync with what + * has actually succeeded. + * 2. On any error, the catch block unwinds the stack in LIFO order, + * calling each undo step to restore consistency. + * 3. Compensation steps are "use step" functions — durable and retried — + * so a mid-rollback crash doesn't leave data inconsistent. + * 4. FatalError skips the default 3x retry and triggers rollback immediately + * for errors that can't benefit from a retry (e.g. "card declined"). + * + * USEFUL WHEN: + * - A multi-step flow (reserve → charge → provision → notify) must be + * consistent: if any step fails, all prior steps must be undone. + * - You can't use a database transaction across multiple external services. + * - You need an audit trail of what was attempted and what was rolled back. + * + * TO ADAPT THIS TO YOUR USE CASE: + * - Replace reserveSeats / captureInvoice / provisionSeats with your + * forward steps. Each must have a matching compensation pushed before it. + * - Make all compensation steps idempotent — they may be called multiple + * times if the workflow restarts mid-rollback. + * - Use FatalError on permanent failures (auth errors, validation) to skip + * retries and trigger the rollback immediately. + * - sendConfirmation is fire-and-forget (no compensation) — OK for + * notifications where duplication is harmless. + * + * DOCS: https://workflow-sdk.dev/patterns/saga + */ +import { FatalError } from "workflow"; + +export async function subscriptionUpgradeSaga(accountId: string, seats: number) { + "use workflow"; + + // Stack grows as steps succeed; unwound in LIFO order on failure. + const compensations: Array<{ name: string; undo: () => Promise }> = []; + + try { + const reservationId = await reserveSeats(accountId, seats); + compensations.push({ + name: "Release seats", + undo: () => releaseSeats(accountId, reservationId), + }); + + const invoiceId = await captureInvoice(accountId, seats); + compensations.push({ + name: "Refund invoice", + undo: () => refundInvoice(accountId, invoiceId), + }); + + const entitlementId = await provisionSeats(accountId, seats); + compensations.push({ + name: "Deprovision seats", + undo: () => deprovisionSeats(accountId, entitlementId), + }); + + // Fire-and-forget — notifications don't need a compensation. + await sendConfirmation(accountId, invoiceId, entitlementId); + + return { status: "completed" as const, accountId, invoiceId, entitlementId }; + } catch (error) { + // Unwind in LIFO order. Each undo is itself a step → durable + retried. + for (const comp of compensations.reverse()) { + await comp.undo(); + } + return { + status: "rolled_back" as const, + accountId, + reason: error instanceof Error ? error.message : "Unknown error", + }; + } +} + +// Forward steps — throw FatalError for permanent failures to skip retries +// and trigger compensation immediately. +async function reserveSeats(accountId: string, seats: number): Promise { + "use step"; + const res = await fetch("https://api.example.com/seats/reserve", { + method: "POST", + body: JSON.stringify({ accountId, seats }), + }); + if (!res.ok) throw new FatalError("Seat reservation failed"); + const { reservationId } = await res.json(); + return reservationId; +} + +async function captureInvoice(accountId: string, seats: number): Promise { + "use step"; + const res = await fetch("https://api.example.com/invoices", { + method: "POST", + body: JSON.stringify({ accountId, seats }), + }); + if (!res.ok) throw new FatalError("Invoice capture failed"); + const { invoiceId } = await res.json(); + return invoiceId; +} + +async function provisionSeats(accountId: string, seats: number): Promise { + "use step"; + const res = await fetch("https://api.example.com/entitlements", { + method: "POST", + body: JSON.stringify({ accountId, seats }), + }); + if (!res.ok) throw new FatalError("Provisioning failed"); + const { entitlementId } = await res.json(); + return entitlementId; +} + +async function sendConfirmation( + accountId: string, + invoiceId: string, + entitlementId: string, +): Promise { + "use step"; + await fetch("https://api.example.com/notifications", { + method: "POST", + body: JSON.stringify({ accountId, invoiceId, entitlementId, template: "upgrade-complete" }), + }); +} + +// Compensation steps — MUST be idempotent. May be called again if retried. +async function releaseSeats(accountId: string, reservationId: string): Promise { + "use step"; + await fetch("https://api.example.com/seats/release", { + method: "POST", + body: JSON.stringify({ accountId, reservationId }), + }); +} + +async function refundInvoice(accountId: string, invoiceId: string): Promise { + "use step"; + await fetch(\`https://api.example.com/invoices/\${invoiceId}/refund\`, { + method: "POST", + body: JSON.stringify({ accountId }), + }); +} + +async function deprovisionSeats(accountId: string, entitlementId: string): Promise { + "use step"; + await fetch(\`https://api.example.com/entitlements/\${entitlementId}\`, { + method: "DELETE", + body: JSON.stringify({ accountId }), + }); +} +`; + export const sagaStartRouteSource = `import { start } from "workflow/api"; import { NextResponse } from "next/server"; import { subscriptionUpgradeSaga } from "@/workflows/saga"; diff --git a/docs/lib/registry/snippets/sandbox.ts b/docs/lib/registry/snippets/sandbox.ts index 40240e1f94..09743032e0 100644 --- a/docs/lib/registry/snippets/sandbox.ts +++ b/docs/lib/registry/snippets/sandbox.ts @@ -252,6 +252,195 @@ export async function sandboxSessionWorkflow() { } `; +export const sandboxWorkflowInstallSource = `/** + * Vercel Sandbox — persistent interactive sandbox session with auto-hibernate. + * + * THE PATTERN: + * 1. One workflow run = one logical sandbox session. The workflow manages + * the VM lifecycle: create → run commands → hibernate idle → resume + * → refresh near hard cap → destroy on explicit command. + * 2. A commandHook (token = runId) receives commands from the API route, + * created once outside the loop and reused every iteration. + * 3. When idle, the workflow snapshots the sandbox and hibernates (zero + * compute cost). On the next command, it resumes from the snapshot. + * 4. Before the sandbox hard cap, the workflow proactively snapshots and + * recreates so the logical session outlives any one VM. + * + * USEFUL WHEN: + * - You need an interactive coding environment that survives page refreshes. + * - You want the sandbox to hibernate when idle without losing state. + * - Sessions should persist across multiple browser tabs or reconnections. + * + * TO ADAPT THIS TO YOUR USE CASE: + * - Tune HIBERNATE_AFTER_MS (default 30 min) for your idle tolerance. + * - Tune SANDBOX_TIMEOUT_MS (default 5h) to match your hard cap. + * - Change RUNTIME to "python3.13", "node22", etc. as needed. + * - Add custom event types to SandboxEvent for richer client UI. + * - For one-shot pipelines (no interactivity), use the simpler + * sandboxPipeline pattern in the Usage snippet instead. + * + * DOCS: https://workflow-sdk.dev/patterns/sandbox + */ +import { defineHook, sleep, getWritable, getWorkflowMetadata } from "workflow"; +import { Sandbox, type Snapshot } from "@vercel/sandbox"; +import { z } from "zod"; + +export const commandHook = defineHook({ + schema: z.object({ command: z.string() }), +}); + +const RUNTIME = "node22"; +const HIBERNATE_AFTER_MS = 30 * 60_000; // 30 min idle → hibernate +const SANDBOX_TIMEOUT_MS = 5 * 60 * 60_000; // sandbox hard cap (5h) +const REFRESH_SAFETY_MS = 5 * 60_000; // refresh 5 min before the cap + +export type SandboxEvent = + | { + type: "created"; + sandboxId: string; + runtime: string; + startedAt: number; + sandboxExpiresAt: number; + hibernateAfterMs: number; + } + | { + type: "status"; + state: + | "active" + | "hibernating" + | "hibernated" + | "resuming" + | "refreshing" + | "destroyed"; + at: number; + sandboxId?: string; + sandboxExpiresAt?: number; + snapshotId?: string; + } + | { type: "activity"; at: number } + | { type: "command_start"; id: string; command: string; at: number } + | { type: "command_output"; id: string; stream: "stdout" | "stderr"; data: string } + | { type: "command_end"; id: string; exitCode: number | null; durationMs: number } + | { type: "result"; status: "destroyed"; durationMs: number }; + +async function emit(event: SandboxEvent) { + "use step"; + const writer = getWritable().getWriter(); + try { + await writer.write(event); + } finally { + writer.releaseLock(); + } +} + +async function runCommandAndStream(sandbox: Sandbox, id: string, command: string) { + "use step"; + const writer = getWritable().getWriter(); + const startedAt = Date.now(); + try { + await writer.write({ type: "command_start", id, command, at: startedAt }); + const result = await sandbox.runCommand({ cmd: "bash", args: ["-c", command] }); + const stdout = await result.stdout(); + if (stdout) await writer.write({ type: "command_output", id, stream: "stdout", data: stdout }); + const stderr = await result.stderr(); + if (stderr) await writer.write({ type: "command_output", id, stream: "stderr", data: stderr }); + await writer.write({ type: "command_end", id, exitCode: result.exitCode, durationMs: Date.now() - startedAt }); + } finally { + writer.releaseLock(); + } +} + +export async function sandboxSessionWorkflow() { + "use workflow"; + + const { workflowRunId } = getWorkflowMetadata(); + // Create the hook ONCE outside the loop — reused every iteration. + // Re-creating inside the loop with the same token would throw HookConflictError. + const hook = commandHook.create({ token: workflowRunId }); + + const startedAt = Date.now(); + let sandbox: Sandbox = await Sandbox.create({ runtime: RUNTIME, timeout: SANDBOX_TIMEOUT_MS }); + let sandboxCreatedAt = Date.now(); + let sandboxExpiresAt = sandboxCreatedAt + SANDBOX_TIMEOUT_MS; + + await emit({ type: "created", sandboxId: sandbox.sandboxId, runtime: RUNTIME, startedAt, sandboxExpiresAt, hibernateAfterMs: HIBERNATE_AFTER_MS }); + await emit({ type: "status", state: "active", at: Date.now(), sandboxId: sandbox.sandboxId, sandboxExpiresAt }); + + let snapshot: Snapshot | null = null; + let hibernated = false; + let lastActivityAt = startedAt; + let counter = 0; + let destroyed = false; + + try { + while (!destroyed) { + if (hibernated && snapshot) { + // VM stopped — wait for next command (zero compute cost). + const payload = await hook; + if (payload.command === "/destroy") { destroyed = true; break; } + + await emit({ type: "status", state: "resuming", at: Date.now() }); + sandbox = await Sandbox.create({ source: { type: "snapshot", snapshotId: snapshot.snapshotId }, timeout: SANDBOX_TIMEOUT_MS }); + sandboxCreatedAt = Date.now(); + sandboxExpiresAt = sandboxCreatedAt + SANDBOX_TIMEOUT_MS; + hibernated = false; + snapshot = null; + await emit({ type: "status", state: "active", at: Date.now(), sandboxId: sandbox.sandboxId, sandboxExpiresAt }); + counter += 1; + await runCommandAndStream(sandbox, \`cmd-\${counter}\`, payload.command); + lastActivityAt = Date.now(); + await emit({ type: "activity", at: lastActivityAt }); + continue; + } + + // Active — sleep until idle deadline or refresh deadline, whichever is sooner. + const idleDeadline = lastActivityAt + HIBERNATE_AFTER_MS; + const refreshDeadline = sandboxExpiresAt - REFRESH_SAFETY_MS; + const sleepMs = Math.max(0, Math.min(idleDeadline, refreshDeadline) - Date.now()); + + const outcome = await Promise.race([ + hook.then((p) => ({ type: "command" as const, command: p.command })), + sleep(\`\${sleepMs}ms\`).then(() => ({ type: "timer" as const })), + ]); + + if (outcome.type === "timer") { + const nearExpiry = Date.now() >= refreshDeadline; + if (nearExpiry) { + // Proactive refresh — snapshot + recreate so session outlives the VM cap. + await emit({ type: "status", state: "refreshing", at: Date.now() }); + const snap = await sandbox.snapshot(); + sandbox = await Sandbox.create({ source: { type: "snapshot", snapshotId: snap.snapshotId }, timeout: SANDBOX_TIMEOUT_MS }); + sandboxCreatedAt = Date.now(); + sandboxExpiresAt = sandboxCreatedAt + SANDBOX_TIMEOUT_MS; + await emit({ type: "status", state: "active", at: Date.now(), sandboxId: sandbox.sandboxId, sandboxExpiresAt, snapshotId: snap.snapshotId }); + lastActivityAt = Date.now(); + } else { + // Idle — snapshot and hibernate indefinitely. + await emit({ type: "status", state: "hibernating", at: Date.now() }); + snapshot = await sandbox.snapshot(); + hibernated = true; + await emit({ type: "status", state: "hibernated", at: Date.now(), snapshotId: snapshot.snapshotId }); + } + continue; + } + + if (outcome.command === "/destroy") { destroyed = true; break; } + + counter += 1; + await runCommandAndStream(sandbox, \`cmd-\${counter}\`, outcome.command); + lastActivityAt = Date.now(); + await emit({ type: "activity", at: lastActivityAt }); + } + } finally { + if (!hibernated) { + try { if (sandbox.status === "running") await sandbox.stop(); } catch { /* best-effort */ } + } + await emit({ type: "status", state: "destroyed", at: Date.now() }); + await emit({ type: "result", status: "destroyed", durationMs: Date.now() - startedAt }); + } +} +`; + export const sandboxStartRouteSource = `import { start, getRun } from "workflow/api"; import { sandboxSessionWorkflow } from "@/workflows/sandbox-session"; @@ -332,6 +521,92 @@ export async function POST(req: Request) { } `; +export const sandboxClientSource = `"use client"; + +import { useCallback, useEffect, useRef, useState } from "react"; +import type { SandboxEvent } from "@/workflows/sandbox-session"; + +const RUN_ID_KEY = "sandbox.runId"; + +export function SandboxRunner() { + const [events, setEvents] = useState([]); + const runIdRef = useRef(null); + const didReconnectRef = useRef(false); + + const consume = useCallback(async (res: Response) => { + if (!res.ok || !res.body) return; + runIdRef.current = res.headers.get("x-workflow-run-id"); + if (runIdRef.current) { + localStorage.setItem(RUN_ID_KEY, runIdRef.current); + } + + const reader = res.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ""; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\\n"); + buffer = lines.pop() ?? ""; + for (const line of lines) { + if (!line.trim()) continue; + try { + setEvents((prev) => [...prev, JSON.parse(line) as SandboxEvent]); + } catch { /* malformed line */ } + } + } + }, []); + + const openStream = useCallback( + async (runId?: string) => { + setEvents([]); + const res = await fetch("/api/sandbox/start", { + method: "POST", + headers: runId ? { "Content-Type": "application/json" } : undefined, + body: runId ? JSON.stringify({ runId }) : undefined, + }); + await consume(res); + }, + [consume] + ); + + // Auto-reconnect on mount if a runId is stashed in localStorage. + useEffect(() => { + if (didReconnectRef.current) return; + didReconnectRef.current = true; + const stored = localStorage.getItem(RUN_ID_KEY); + if (stored) openStream(stored); + }, [openStream]); + + const start = useCallback(() => { + localStorage.removeItem(RUN_ID_KEY); + runIdRef.current = null; + openStream(); + }, [openStream]); + + const sendCommand = useCallback(async (command: string) => { + if (!runIdRef.current) return; + const res = await fetch("/api/sandbox/command", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ runId: runIdRef.current, command }), + }); + if (res.status === 410) localStorage.removeItem(RUN_ID_KEY); + }, []); + + const destroy = useCallback(async () => { + await sendCommand("/destroy"); + localStorage.removeItem(RUN_ID_KEY); + }, [sendCommand]); + + // Render events as a terminal-style log. Drive UI state from \`status\` events + // (active / hibernating / hibernated / resuming / refreshing / destroyed). + return null; +} +`; + export const sandboxUsageSource = `// Quickstart — one-shot pipeline. // Each \`Sandbox\` method (\`create\`, \`runCommand\`, \`stop\`, \`snapshot\`) is an // implicit step, so the event log records every command and the workflow diff --git a/docs/lib/registry/snippets/scheduling.ts b/docs/lib/registry/snippets/scheduling.ts index cf90262cf7..6e29306182 100644 --- a/docs/lib/registry/snippets/scheduling.ts +++ b/docs/lib/registry/snippets/scheduling.ts @@ -55,6 +55,76 @@ async function runAction(action: ScheduledAction): Promise { } `; +export const schedulingWorkflowInstallSource = `/** + * Scheduling — defer any action with a cancellable durable sleep. + * + * THE PATTERN: + * 1. sleep() suspends the workflow until the delay elapses — no cron + * jobs, no DB flags, no scheduler infrastructure required. + * 2. A cancelSchedule hook races against the sleep. Whichever resolves + * first wins: the action executes or is cancelled. + * 3. The hook token is keyed by the schedule ID, not the run ID, so the + * cancel API only needs the ID you provided at schedule time. + * + * USEFUL WHEN: + * - Sending a reminder email N days after signup. + * - Triggering a follow-up notification if a user hasn't acted yet. + * - Scheduling a deferred webhook call or Slack message. + * - Implementing "send later" / snooze / retry-after patterns. + * + * TO ADAPT THIS TO YOUR USE CASE: + * - Replace the runAction step body with your real action — send an email + * via Resend, post to Slack, fire a webhook, write to your database. + * - The delay field accepts a duration string ("2d", "1h", "30m"), millis, + * or an absolute Date for scheduling to a specific timestamp. + * - Add payload fields to ScheduledAction for everything your action needs. + * - For recurring schedules, loop back and sleep again after runAction. + * + * DOCS: https://workflow-sdk.dev/patterns/scheduling + */ +import { defineHook, sleep } from "workflow"; + +// Exported so the cancel API route can resume it with just the schedule ID. +export const cancelSchedule = defineHook<{ reason?: string }>(); + +export interface ScheduledAction { + id: string; + /** Duration string ("2d", "1h"), millis, or absolute Date. */ + delay: string | number | Date; + /** Action payload — passed straight to runAction. */ + payload: Record; +} + +export async function scheduleAction(action: ScheduledAction) { + "use workflow"; + + // Race: sleep fires when the delay elapses; hook fires when cancelled. + // No manual flag-checking or extra DB tables — the runtime handles it. + const hook = cancelSchedule.create({ token: \`schedule:\${action.id}\` }); + const cancelled = await Promise.race([ + sleep(action.delay).then(() => false as const), + hook.then(() => true as const), + ]); + + if (cancelled) { + return { id: action.id, status: "cancelled" as const }; + } + + await runAction(action); + return { id: action.id, status: "executed" as const }; +} + +// Replace the body of this step with your real action. The step has full +// Node.js access and is automatically retried on transient failure (3x). +async function runAction(action: ScheduledAction): Promise { + "use step"; + await fetch("https://api.example.com/scheduled-action", { + method: "POST", + body: JSON.stringify(action), + }); +} +`; + export const schedulingStartRouteSource = `import { start } from "workflow/api"; import { NextResponse } from "next/server"; import { scheduleAction, type ScheduledAction } from "@/workflows/scheduling"; diff --git a/docs/lib/registry/snippets/sequential-and-parallel.ts b/docs/lib/registry/snippets/sequential-and-parallel.ts index 061fe0ec5c..ee612c5d09 100644 --- a/docs/lib/registry/snippets/sequential-and-parallel.ts +++ b/docs/lib/registry/snippets/sequential-and-parallel.ts @@ -91,6 +91,122 @@ async function fetchFallback(userId: string): Promise<{ source: "fallback"; user } `; +export const sequentialAndParallelWorkflowInstallSource = `/** + * Sequential & Parallel Execution — the three step composition primitives. + * + * THE PATTERN: + * SEQUENTIAL (pipeline): await each step in order when steps are dependent + * — the output of one feeds the input of the next. + * + * PARALLEL (fan-out): Promise.all() for independent steps that can run + * concurrently — collects all results before continuing. + * + * RACE: Promise.race() returns the first result; pair with sleep() for + * deadlines or with a fallback fetch for primary/secondary failover. + * + * USEFUL WHEN: + * - You need a data transformation pipeline (validate → process → store). + * - You fetch multiple independent resources and need all of them. + * - You want to bound how long a slow step can take with a deadline. + * - You have a primary API with a fast fallback. + * + * TO ADAPT THIS TO YOUR USE CASE: + * - Keep only the patterns you need — all three are shown for reference. + * - Replace step bodies with your real work. All of Node.js is available. + * - For Promise.all with failure isolation (one failure = don't throw), + * use Promise.allSettled() instead (see the Batching pattern). + * - The TIMEOUT sentinel pattern (Symbol) is type-safe: TypeScript narrows + * the union correctly without a discriminant string field. + * + * DOCS: https://workflow-sdk.dev/patterns/sequential-and-parallel + */ +import { sleep } from "workflow"; + +// PIPELINE — sequential await chains dependent steps. +export async function dataPipeline(data: unknown) { + "use workflow"; + + const validated = await validateData(data); + const processed = await processData(validated); + const stored = await storeData(processed); + + return stored; +} + +// FAN-OUT — independent work runs in parallel via Promise.all. +export async function fetchUserData(userId: string) { + "use workflow"; + + // All three steps fire concurrently; the workflow awaits all of them. + const [user, orders, preferences] = await Promise.all([ + fetchUser(userId), + fetchOrders(userId), + fetchPreferences(userId), + ]); + + return { user, orders, preferences }; +} + +// RACE — return whichever resolves first; sleep() provides the deadline. +export async function firstResponse(userId: string) { + "use workflow"; + + const result = await Promise.race([ + fetchPrimary(userId), + fetchFallback(userId), + // After 5s, return a stale sentinel — caller decides how to handle it. + sleep("5s").then(() => ({ stale: true } as const)), + ]); + + return result; +} + +// Replace each step body with your real logic — all of Node.js is available. + +async function validateData(data: unknown): Promise { + "use step"; + if (typeof data !== "object" || data === null) { + throw new Error("Invalid input"); + } + return JSON.stringify(data); +} + +async function processData(data: string): Promise { + "use step"; + return data.trim(); +} + +async function storeData(data: string): Promise { + "use step"; + return \`stored:\${data.length}\`; +} + +async function fetchUser(userId: string): Promise<{ id: string; name: string }> { + "use step"; + return { id: userId, name: "Ada" }; +} + +async function fetchOrders(userId: string): Promise<{ id: string; items: number }[]> { + "use step"; + return [{ id: "o_1", items: 3 }]; +} + +async function fetchPreferences(userId: string): Promise<{ theme: string }> { + "use step"; + return { theme: "dark" }; +} + +async function fetchPrimary(userId: string): Promise<{ source: "primary"; userId: string }> { + "use step"; + return { source: "primary", userId }; +} + +async function fetchFallback(userId: string): Promise<{ source: "fallback"; userId: string }> { + "use step"; + return { source: "fallback", userId }; +} +`; + export const sequentialAndParallelStartRouteSource = `import { start } from "workflow/api"; import { NextResponse } from "next/server"; import { fetchUserData } from "@/workflows/sequential-and-parallel"; diff --git a/docs/lib/registry/snippets/timeouts.ts b/docs/lib/registry/snippets/timeouts.ts index 4828feb501..78a5466d9a 100644 --- a/docs/lib/registry/snippets/timeouts.ts +++ b/docs/lib/registry/snippets/timeouts.ts @@ -85,6 +85,116 @@ async function sendApprovalRequest( } `; +export const timeoutsWorkflowInstallSource = `/** + * Timeouts — bound how long a step, hook, or webhook can take. + * + * THE PATTERN: + * Promise.race() against sleep() creates a durable deadline: + * - HARD TIMEOUT: throw if work doesn't finish — use for SLA enforcement. + * - SOFT TIMEOUT: fall back to a cached/default value — use when partial + * results are acceptable. + * - WEBHOOK + DEADLINE: race an external callback against a long sleep() + * so the workflow never waits forever for an event that never arrives. + * + * The Symbol sentinel is TypeScript-safe: it narrows the union without + * a discriminant string, and can't accidentally collide with real return + * values the way null or "" could. + * + * USEFUL WHEN: + * - A slow external API should fail fast after N seconds. + * - You need "return cached data if fresh data takes too long". + * - A webhook approval / payment callback should expire after N days. + * - Any "wait for X but not forever" pattern. + * + * TO ADAPT THIS TO YOUR USE CASE: + * - Keep only the flavors you need (hard / soft / webhook + deadline). + * - Replace processData / fetchSlow / sendApprovalRequest with your steps. + * - Tune the sleep duration to match your SLA or UX requirements. + * - NOTE: the LOSER of Promise.race keeps running — the workflow ignores + * its result but side effects still happen. Use the Distributed Abort + * Controller pattern if you need hard cross-process cancellation. + * + * DOCS: https://workflow-sdk.dev/patterns/timeouts + */ +import { sleep, createWebhook } from "workflow"; + +// Unique sentinel — can't collide with real return values. +const TIMEOUT = Symbol("timeout"); + +// HARD TIMEOUT — throw if the work doesn't finish in time. +export async function processWithTimeout(data: string) { + "use workflow"; + + const result = await Promise.race([ + processData(data), + sleep("30s").then(() => TIMEOUT as typeof TIMEOUT), + ]); + + if (result === TIMEOUT) { + throw new Error("Processing timed out after 30 seconds"); + } + + return result; +} + +// SOFT TIMEOUT — fall back to a cached value if the deadline fires first. +export async function fetchWithFallback(key: string, fallback: string) { + "use workflow"; + + const result = await Promise.race([ + fetchSlow(key), + sleep("3s").then(() => TIMEOUT as typeof TIMEOUT), + ]); + + return result === TIMEOUT ? fallback : result; +} + +// WEBHOOK + DEADLINE — race an external callback against a 7-day sleep so +// the workflow never hangs forever on a missing event. +export async function waitForApproval(requestId: string) { + "use workflow"; + + const webhook = createWebhook<{ approved: boolean }>(); + await sendApprovalRequest(requestId, webhook.url); + + const result = await Promise.race([ + webhook.then((req) => req.json()), + sleep("7 days").then(() => ({ timedOut: true } as const)), + ]); + + if ("timedOut" in result) { + throw new Error("Approval request expired after 7 days"); + } + + return result.approved; +} + +async function processData(data: string): Promise { + "use step"; + // Replace with real work. The LOSER of Promise.race keeps running — + // the workflow ignores its result, but side effects still happen. + // Use Distributed Abort Controller for hard cross-process cancellation. + return data.toUpperCase(); +} + +async function fetchSlow(key: string): Promise { + "use step"; + const res = await fetch(\`https://api.example.com/slow/\${key}\`); + return res.text(); +} + +async function sendApprovalRequest( + requestId: string, + webhookUrl: string, +): Promise { + "use step"; + await fetch("https://api.example.com/approvals", { + method: "POST", + body: JSON.stringify({ requestId, webhookUrl }), + }); +} +`; + export const timeoutsStartRouteSource = `import { start } from "workflow/api"; import { NextResponse } from "next/server"; import { processWithTimeout } from "@/workflows/timeouts"; diff --git a/docs/lib/registry/snippets/upgrading-workflows.ts b/docs/lib/registry/snippets/upgrading-workflows.ts new file mode 100644 index 0000000000..30d1584b48 --- /dev/null +++ b/docs/lib/registry/snippets/upgrading-workflows.ts @@ -0,0 +1,472 @@ +/** + * Source snippets for the Upgrading Workflows registry entry. + * + * Self-upgrading pattern — long-running runs that respawn themselves on the + * latest deployment so shipped fixes take effect on the very next event, + * without migrating in-flight state. Ships Method 1 (per-iteration spawn) + * out of the box; the start and resume routes work for both methods. + */ + +export const upgradingWorkflowsWorkflowSource = `import { defineHook, getWritable, getWorkflowMetadata } from "workflow"; +import { start } from "workflow/api"; + +// --------------------------------------------------------------------------- +// Deployment identifier — captured once at process start. +// On Vercel, VERCEL_DEPLOYMENT_ID changes on every deploy so each version +// reports a distinct ID. Locally a timestamp simulates a redeploy on restart. +// --------------------------------------------------------------------------- +const DEPLOYMENT_ID = + process.env.VERCEL_DEPLOYMENT_ID?.slice(0, 12) ?? + process.env.VERCEL_GIT_COMMIT_SHA?.slice(0, 7) ?? + \`dev-\${Date.now().toString(36)}\`; + +// --------------------------------------------------------------------------- +// State — replace with your domain's shape (queue cursor, FSM, subscription…) +// --------------------------------------------------------------------------- +export interface WorkflowState { + count: number; + history: IterationRecord[]; +} + +export interface IterationRecord { + runId: string; + deploymentId: string; + incrementedBy: number; + result: number; + at: string; +} + +// --------------------------------------------------------------------------- +// Hook — token = runId so each chain is isolated. Export so the resume +// route can call hook.resume(runId, payload) without a shared singleton. +// --------------------------------------------------------------------------- +export const resumeHook = defineHook<{ amount: number }>(); + +// --------------------------------------------------------------------------- +// Workflow — ONE iteration per run (Method 1). +// +// 1. Emit current state to the stream so clients can read it. +// 2. Block on the hook until an external trigger resumes it. +// 3. Compute next state, emit progress. +// 4. Spawn the next iteration with deploymentId "latest" so it picks up +// whichever deployment is live at that moment. +// 5. Exit — the chain continues on a fresh run. +// --------------------------------------------------------------------------- +export async function upgradingWorkflow( + state: WorkflowState = { count: 0, history: [] }, +): Promise { + "use workflow"; + + const { workflowRunId } = getWorkflowMetadata(); + + // Emit ready so listeners know which run / deployment is active. + await emitEvent({ type: "ready", runId: workflowRunId, deploymentId: DEPLOYMENT_ID, state }); + + // Suspend until the caller resumes this specific run. + const payload = await resumeHook.create({ token: workflowRunId }); + + // Process on this deployment's code path. + const result = state.count + payload.amount; + const newState: WorkflowState = { + count: result, + history: [ + ...state.history, + { runId: workflowRunId, deploymentId: DEPLOYMENT_ID, incrementedBy: payload.amount, result, at: new Date().toISOString() }, + ], + }; + + await emitEvent({ type: "incremented", payload, newState }); + + // Spawn the successor on the latest deployment, then exit. + const nextRunId = await spawnSelfOnLatest(newState); + await emitEvent({ type: "spawned", nextRunId }); +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +// start() must run inside a "use step" function. +// deploymentId: "latest" is the key to the upgrade pattern — the new run +// picks up whichever deployment is live when it lands, not the caller's. +async function spawnSelfOnLatest(state: WorkflowState): Promise { + "use step"; + const next = await start(upgradingWorkflow, [state], { deploymentId: "latest" }); + return next.runId; +} + +type UpgradeEvent = + | { type: "ready"; runId: string; deploymentId: string; state: WorkflowState } + | { type: "incremented"; payload: { amount: number }; newState: WorkflowState } + | { type: "spawned"; nextRunId: string }; + +async function emitEvent(event: UpgradeEvent): Promise { + "use step"; + const writer = getWritable().getWriter(); + try { await writer.write(event); } finally { writer.releaseLock(); } +} +`; + +// Method 2 — long-running loop + dedicated upgrade hook. +// The workflow handles many events per run; a separate upgradeHook forces +// an explicit respawn on the latest deployment whenever you choose. +export const upgradingWorkflowsMethod2Source = `import { defineHook, getWritable, getWorkflowMetadata } from "workflow"; +import { start } from "workflow/api"; + +const DEPLOYMENT_ID = + process.env.VERCEL_DEPLOYMENT_ID?.slice(0, 12) ?? + process.env.VERCEL_GIT_COMMIT_SHA?.slice(0, 7) ?? + \`dev-\${Date.now().toString(36)}\`; + +export interface WorkflowState { + count: number; + history: IterationRecord[]; +} + +export interface IterationRecord { + runId: string; + deploymentId: string; + incrementedBy: number; + result: number; + at: string; +} + +// Work hook — resumes with a payload to process +export const resumeHook = defineHook<{ amount: number }>(); +// Upgrade hook — fire this to force a respawn on the latest deployment +export const upgradeHook = defineHook(); + +export async function upgradingWorkflow( + state: WorkflowState = { count: 0, history: [] }, +): Promise { + "use workflow"; + + const { workflowRunId } = getWorkflowMetadata(); + + await emitEvent({ type: "ready", runId: workflowRunId, deploymentId: DEPLOYMENT_ID, state }); + + while (true) { + // Race: process the next event OR upgrade to the latest deployment + const outcome = await Promise.race([ + resumeHook + .create({ token: \`work:\${workflowRunId}\` }) + .then((p) => ({ kind: "work" as const, payload: p })), + upgradeHook + .create({ token: \`upgrade:\${workflowRunId}\` }) + .then(() => ({ kind: "upgrade" as const })), + ]); + + if (outcome.kind === "upgrade") { + // Spawn successor on the latest deployment and exit + const nextRunId = await spawnSelfOnLatest(state); + await emitEvent({ type: "spawned", nextRunId }); + return; + } + + // Process the event on this deployment's code path + const result = state.count + outcome.payload.amount; + state = { + count: result, + history: [ + ...state.history, + { + runId: workflowRunId, + deploymentId: DEPLOYMENT_ID, + incrementedBy: outcome.payload.amount, + result, + at: new Date().toISOString(), + }, + ], + }; + + await emitEvent({ type: "incremented", payload: outcome.payload, newState: state }); + } +} + +async function spawnSelfOnLatest(state: WorkflowState): Promise { + "use step"; + const next = await start(upgradingWorkflow, [state], { deploymentId: "latest" }); + return next.runId; +} + +type UpgradeEvent = + | { type: "ready"; runId: string; deploymentId: string; state: WorkflowState } + | { type: "incremented"; payload: { amount: number }; newState: WorkflowState } + | { type: "spawned"; nextRunId: string }; + +async function emitEvent(event: UpgradeEvent): Promise { + "use step"; + const writer = getWritable().getWriter(); + try { await writer.write(event); } finally { writer.releaseLock(); } +} +`; + +// ─── Method 1 install code ──────────────────────────────────────────────────── +export const upgradingWorkflowsMethod1InstallSource = `/** + * Upgrading Workflows — Method 1: per-event spawn (simple, always up-to-date). + * + * THE PATTERN: + * 1. One run = one iteration. The workflow handles one event then exits. + * 2. Before exiting, it spawns a successor run with deploymentId: "latest" + * so the next iteration runs on whichever deployment is live then. + * 3. State is passed explicitly as arguments — no shared DB, no migration. + * + * USEFUL WHEN: + * - Every event should benefit immediately from the latest code. + * - You prefer simplicity over fine-grained upgrade control. + * - Your state shape is small enough to pass as function arguments. + * + * TO ADAPT THIS TO YOUR USE CASE: + * - Replace WorkflowState / IterationRecord with your domain's state shape. + * - Replace the increment logic with your event processing (queue consumer, + * FSM transition, subscription renewal, etc.). + * - Customize the emitEvent helper or remove it if you don't need streaming. + * - The resumeHook token = runId so each iteration is independently resumable + * without a shared registry. + * + * DOCS: https://workflow-sdk.dev/patterns/upgrading-workflows + */ +import { defineHook, getWritable, getWorkflowMetadata } from "workflow"; +import { start } from "workflow/api"; + +// Captured once per process — changes on every deploy on Vercel. +const DEPLOYMENT_ID = + process.env.VERCEL_DEPLOYMENT_ID?.slice(0, 12) ?? + process.env.VERCEL_GIT_COMMIT_SHA?.slice(0, 7) ?? + \`dev-\${Date.now().toString(36)}\`; + +export interface WorkflowState { + count: number; + history: IterationRecord[]; +} + +export interface IterationRecord { + runId: string; + deploymentId: string; + incrementedBy: number; + result: number; + at: string; +} + +// Token = runId so each iteration is isolated and independently resumable. +export const resumeHook = defineHook<{ amount: number }>(); + +export async function upgradingWorkflow( + state: WorkflowState = { count: 0, history: [] }, +): Promise { + "use workflow"; + + const { workflowRunId } = getWorkflowMetadata(); + + // 1. Emit ready so clients know which run/deployment is active. + await emitEvent({ type: "ready", runId: workflowRunId, deploymentId: DEPLOYMENT_ID, state }); + + // 2. Suspend until an external trigger resumes this specific run. + const payload = await resumeHook.create({ token: workflowRunId }); + + // 3. Process on this deployment's code — swap logic here for domain work. + const result = state.count + payload.amount; + const newState: WorkflowState = { + count: result, + history: [ + ...state.history, + { runId: workflowRunId, deploymentId: DEPLOYMENT_ID, incrementedBy: payload.amount, result, at: new Date().toISOString() }, + ], + }; + + await emitEvent({ type: "incremented", payload, newState }); + + // 4. Spawn successor on latest deployment — the upgrade happens here. + const nextRunId = await spawnSelfOnLatest(newState); + await emitEvent({ type: "spawned", nextRunId }); +} + +// start() must be inside a "use step" function. +// deploymentId: "latest" is the key — the successor picks up whichever +// deployment is live when it starts, not the caller's deployment. +async function spawnSelfOnLatest(state: WorkflowState): Promise { + "use step"; + const next = await start(upgradingWorkflow, [state], { deploymentId: "latest" }); + return next.runId; +} + +type UpgradeEvent = + | { type: "ready"; runId: string; deploymentId: string; state: WorkflowState } + | { type: "incremented"; payload: { amount: number }; newState: WorkflowState } + | { type: "spawned"; nextRunId: string }; + +async function emitEvent(event: UpgradeEvent): Promise { + "use step"; + const writer = getWritable().getWriter(); + try { await writer.write(event); } finally { writer.releaseLock(); } +} +`; + +// ─── Method 2 install code ──────────────────────────────────────────────────── +export const upgradingWorkflowsMethod2InstallSource = `/** + * Upgrading Workflows — Method 2: long-running loop + explicit upgrade hook. + * + * THE PATTERN: + * 1. One run handles many events in a loop — runs stay alive longer. + * 2. An upgradeHook races against the normal work hook. Fire the upgrade + * hook when you want to respawn on the latest deployment. + * 3. On upgrade, the workflow spawns a successor with deploymentId: "latest" + * and exits — code updates take effect at the moment you choose. + * + * USEFUL WHEN: + * - You want the run to stay alive and handle many events before upgrading. + * - You need explicit control over when upgrades happen (e.g. off-hours). + * - Your state is expensive to serialize between every event. + * + * TO ADAPT THIS TO YOUR USE CASE: + * - Replace the increment logic in the work branch with your event handler. + * - Replace WorkflowState / IterationRecord with your domain's shape. + * - Fire upgradeHook.resume(runId) from a deploy hook or admin API to + * trigger a controlled upgrade at the time of your choosing. + * - Both hooks use separate token prefixes ("work:" vs "upgrade:") so a + * single runId can carry both concurrently without conflict. + * + * DOCS: https://workflow-sdk.dev/patterns/upgrading-workflows + */ +import { defineHook, getWritable, getWorkflowMetadata } from "workflow"; +import { start } from "workflow/api"; + +const DEPLOYMENT_ID = + process.env.VERCEL_DEPLOYMENT_ID?.slice(0, 12) ?? + process.env.VERCEL_GIT_COMMIT_SHA?.slice(0, 7) ?? + \`dev-\${Date.now().toString(36)}\`; + +export interface WorkflowState { + count: number; + history: IterationRecord[]; +} + +export interface IterationRecord { + runId: string; + deploymentId: string; + incrementedBy: number; + result: number; + at: string; +} + +// Work hook — resumes with a payload to process. +export const resumeHook = defineHook<{ amount: number }>(); +// Upgrade hook — fire this to force a respawn on the latest deployment. +export const upgradeHook = defineHook(); + +export async function upgradingWorkflow( + state: WorkflowState = { count: 0, history: [] }, +): Promise { + "use workflow"; + + const { workflowRunId } = getWorkflowMetadata(); + + await emitEvent({ type: "ready", runId: workflowRunId, deploymentId: DEPLOYMENT_ID, state }); + + while (true) { + // Race: process next event OR upgrade to the latest deployment. + // Separate token prefixes prevent HookConflictError. + const outcome = await Promise.race([ + resumeHook + .create({ token: \`work:\${workflowRunId}\` }) + .then((p) => ({ kind: "work" as const, payload: p })), + upgradeHook + .create({ token: \`upgrade:\${workflowRunId}\` }) + .then(() => ({ kind: "upgrade" as const })), + ]); + + if (outcome.kind === "upgrade") { + // Controlled upgrade: spawn successor on the latest deployment and exit. + const nextRunId = await spawnSelfOnLatest(state); + await emitEvent({ type: "spawned", nextRunId }); + return; + } + + // Process the event on this deployment's code. + const result = state.count + outcome.payload.amount; + state = { + count: result, + history: [ + ...state.history, + { + runId: workflowRunId, + deploymentId: DEPLOYMENT_ID, + incrementedBy: outcome.payload.amount, + result, + at: new Date().toISOString(), + }, + ], + }; + + await emitEvent({ type: "incremented", payload: outcome.payload, newState: state }); + } +} + +async function spawnSelfOnLatest(state: WorkflowState): Promise { + "use step"; + const next = await start(upgradingWorkflow, [state], { deploymentId: "latest" }); + return next.runId; +} + +type UpgradeEvent = + | { type: "ready"; runId: string; deploymentId: string; state: WorkflowState } + | { type: "incremented"; payload: { amount: number }; newState: WorkflowState } + | { type: "spawned"; nextRunId: string }; + +async function emitEvent(event: UpgradeEvent): Promise { + "use step"; + const writer = getWritable().getWriter(); + try { await writer.write(event); } finally { writer.releaseLock(); } +} +`; + +export const upgradingWorkflowsStartRouteSource = `import { start } from "workflow/api"; +import { NextResponse } from "next/server"; +import { upgradingWorkflow, type WorkflowState } from "@/workflows/upgrading-workflow"; + +// POST /api/upgrade — starts the first iteration of the chain. +export async function POST(request: Request) { + const body = (await request.json().catch(() => ({}))) as { + initial?: WorkflowState; + }; + const initial: WorkflowState = body.initial ?? { count: 0, history: [] }; + const run = await start(upgradingWorkflow, [initial]); + return NextResponse.json({ runId: run.runId }); +} +`; + +export const upgradingWorkflowsResumeRouteSource = `import { NextResponse } from "next/server"; +import { resumeHook } from "@/workflows/upgrading-workflow"; + +// POST /api/upgrade/resume { runId, amount } +// Resumes the active iteration, triggering a state update and a spawn. +export async function POST(request: Request) { + const { runId, amount } = (await request.json()) as { + runId?: string; + amount?: number; + }; + + if (typeof runId !== "string" || typeof amount !== "number") { + return NextResponse.json( + { error: "runId (string) and amount (number) are required" }, + { status: 400 }, + ); + } + + try { + await resumeHook.resume(runId, { amount }); + } catch (error) { + const msg = error instanceof Error ? error.message.toLowerCase() : ""; + if (msg.includes("not found") || msg.includes("expired")) { + // The run already spawned its successor — caller should use the new runId. + return NextResponse.json( + { success: false, note: "Run has already moved to its successor" }, + { status: 409 }, + ); + } + throw error; + } + + return NextResponse.json({ success: true }); +} +`; diff --git a/docs/lib/registry/snippets/webhooks.ts b/docs/lib/registry/snippets/webhooks.ts index 47d663dfea..19008aafa9 100644 --- a/docs/lib/registry/snippets/webhooks.ts +++ b/docs/lib/registry/snippets/webhooks.ts @@ -7,16 +7,12 @@ * the callback against a deadline. */ -export const webhooksWorkflowSource = `import { +// Pattern 1 — Long-running webhook listener (Stripe-style event ledger). +export const webhooksEventListenerSource = `import { createWebhook, - sleep, - FatalError, type RequestWithResponse, } from "workflow"; -// PATTERN 1 — Long-running webhook listener (Stripe-style). -// Workflow suspends with zero cost, resumes on each incoming request, -// and exits when a terminal event arrives. export async function paymentWebhook(orderId: string) { "use workflow"; @@ -37,9 +33,33 @@ export async function paymentWebhook(orderId: string) { return { orderId, webhookUrl: webhook.url, ledger, status: "settled" as const }; } -// PATTERN 2 — Async request-reply with deadline. Submit to a vendor, -// pass it our webhook URL for the callback, race the callback against -// a 30-second budget. +async function processEvent( + request: RequestWithResponse, +): Promise<{ type: string }> { + "use step"; + const body = await request.json().catch(() => ({})); + const type = (body?.type as string) ?? "unknown"; + + if (type === "payment.succeeded") { + await request.respondWith(Response.json({ ack: true, action: "captured" })); + } else if (type === "payment.failed") { + await request.respondWith(Response.json({ ack: true, action: "flagged" })); + } else { + await request.respondWith(Response.json({ ack: true, action: "ignored" })); + } + + return { type }; +} +`; + +// Pattern 2 — Async request-reply with deadline. +export const webhooksRequestReplySource = `import { + createWebhook, + sleep, + FatalError, + type RequestWithResponse, +} from "workflow"; + export async function asyncVerification(documentId: string) { "use workflow"; @@ -59,6 +79,87 @@ export async function asyncVerification(documentId: string) { return { documentId, ...result }; } +async function submitToVendor( + documentId: string, + callbackUrl: string, +): Promise { + "use step"; + await fetch("https://vendor.example.com/verify", { + method: "POST", + body: JSON.stringify({ documentId, callbackUrl }), + }); +} + +async function processCallback( + request: RequestWithResponse, +): Promise<{ status: string; details: string }> { + "use step"; + const body = await request.json().catch(() => ({})); + await request.respondWith(Response.json({ ack: true })); + return { + status: body.approved ? "verified" : "rejected", + details: body.details ?? body.reason ?? "", + }; +} +`; + +// ─── Event Listener install code ────────────────────────────────────────────── +export const webhooksEventListenerInstallSource = `/** + * Webhooks — Event Listener pattern (long-running webhook ledger). + * + * THE PATTERN: + * 1. createWebhook({ respondWith: "manual" }) returns a durable URL and an + * async iterator. Register the URL with the external service once. + * 2. \`for await (const request of webhook)\` yields incoming HTTP requests + * into the workflow's event loop — each iteration is a durable step. + * 3. Process and respond to each webhook inside processEvent ("use step") + * so the response is durable and the handler retries on crash. + * 4. \`break\` the loop to terminate the workflow when a terminal event + * arrives (payment.succeeded, refund.created, etc.). + * + * USEFUL WHEN: + * - You need to receive and process a sequence of webhook events for a + * single entity (order, payment, document) over time. + * - Each event must be acknowledged individually to the provider. + * - The workflow must survive restarts without missing or duplicating events. + * + * TO ADAPT THIS TO YOUR USE CASE: + * - Register webhook.url with your provider (Stripe, GitHub, Twilio…) + * after starting the workflow — it's stable for the run's lifetime. + * - Replace processEvent with your domain logic. Return a type discriminant + * so the loop knows when to break. + * - Add more terminal event types to the break condition as needed. + * - For a single callback (not a sequence), use the Request-Reply pattern. + * + * DOCS: https://workflow-sdk.dev/patterns/webhooks + */ +import { + createWebhook, + type RequestWithResponse, +} from "workflow"; + +export async function paymentWebhook(orderId: string) { + "use workflow"; + + // createWebhook returns a stable URL and an async iterator over requests. + const webhook = createWebhook({ respondWith: "manual" }); + // Register webhook.url with your provider — it's valid for this run's lifetime. + + const ledger: { type: string; at: string }[] = []; + + for await (const request of webhook) { + const entry = await processEvent(request); + ledger.push({ ...entry, at: new Date().toISOString() }); + + // Break on terminal events to end the workflow. + if (entry.type === "payment.succeeded" || entry.type === "refund.created") { + break; + } + } + + return { orderId, webhookUrl: webhook.url, ledger, status: "settled" as const }; +} + async function processEvent( request: RequestWithResponse, ): Promise<{ type: string }> { @@ -76,6 +177,60 @@ async function processEvent( return { type }; } +`; + +// ─── Request-Reply install code ──────────────────────────────────────────────── +export const webhooksRequestReplyInstallSource = `/** + * Webhooks — Async Request-Reply pattern (single callback with deadline). + * + * THE PATTERN: + * 1. createWebhook() generates a one-time callback URL. + * 2. Submit the URL to an external vendor that processes asynchronously + * (document verification, identity check, payment authorization…). + * 3. Race the webhook callback against a sleep() deadline so the workflow + * never waits forever for an external service that never responds. + * 4. Process the callback in a "use step" function so the response is + * durable and the handler retries on crash. + * + * USEFUL WHEN: + * - You call an external API that responds asynchronously via a callback URL. + * - You need a hard deadline after which the workflow times out gracefully. + * - The vendor callback is a one-shot event (not a sequence). + * + * TO ADAPT THIS TO YOUR USE CASE: + * - Replace submitToVendor with your external API call. + * - Replace processCallback with your domain logic for the callback. + * - Tune the sleep("30s") deadline to match your vendor's SLA. + * - For sequences of events, use the Event Listener pattern instead. + * + * DOCS: https://workflow-sdk.dev/patterns/webhooks + */ +import { + createWebhook, + sleep, + FatalError, + type RequestWithResponse, +} from "workflow"; + +export async function asyncVerification(documentId: string) { + "use workflow"; + + const webhook = createWebhook({ respondWith: "manual" }); + await submitToVendor(documentId, webhook.url); + + const result = await Promise.race([ + (async () => { + for await (const request of webhook) { + return await processCallback(request); + } + throw new FatalError("Webhook closed without callback"); + })(), + // Deadline: return a timed_out sentinel after 30s. + sleep("30s").then(() => ({ status: "timed_out" as const })), + ]); + + return { documentId, ...result }; +} async function submitToVendor( documentId: string, diff --git a/docs/lib/registry/snippets/workflow-composition.ts b/docs/lib/registry/snippets/workflow-composition.ts index 5f2afd4e86..da68495e5e 100644 --- a/docs/lib/registry/snippets/workflow-composition.ts +++ b/docs/lib/registry/snippets/workflow-composition.ts @@ -94,6 +94,126 @@ async function buildReport(reportId: string): Promise { } `; +export const workflowCompositionWorkflowInstallSource = `/** + * Workflow Composition — compose workflows via direct await or background spawn. + * + * THE PATTERN: + * DIRECT AWAIT: calling another workflow function with await flattens it + * into the parent's event log — one run, shared lifecycle. + * + * BACKGROUND SPAWN: calling start() from a step spawns the child as an + * independent run — its own runId, event log, retry boundary, and status. + * The parent only gets a runId back; it doesn't wait for the child. + * + * USEFUL WHEN: + * DIRECT AWAIT: + * - Sub-flows you want to reuse across multiple parent workflows. + * - The parent needs the child's output before continuing. + * - You want a single observable run in your dashboard. + * + * BACKGROUND SPAWN: + * - Fire-and-forget side effects (generate report, send analytics). + * - The child takes much longer than the parent needs. + * - You want the child cancellable / observable independently. + * + * TO ADAPT THIS TO YOUR USE CASE: + * - Replace sendNotifications / onboardUser with your domain workflows. + * - Replace generateReport / processOrder with your background tasks. + * - { deploymentId: "latest" } on start() lets the child pick up future + * code deployments automatically — omit for pinned behavior. + * - If the parent needs to poll / await the child, see Child Workflows. + * + * DOCS: https://workflow-sdk.dev/patterns/workflow-composition + */ +import { start } from "workflow/api"; + +// CHILD WORKFLOW — runs as part of the parent's event log when awaited directly. +export async function sendNotifications(userId: string) { + "use workflow"; + + await sendEmail(userId); + await sendPushNotification(userId); + return { notified: true }; +} + +// PARENT — direct await: flattens sendNotifications inline. +export async function onboardUser(userId: string) { + "use workflow"; + + await createAccount(userId); + // Direct await: child steps appear in this run's event log. + await sendNotifications(userId); + await setupPreferences(userId); + + return { userId, status: "onboarded" }; +} + +// PARENT — background spawn: child runs independently with its own runId. +// Note: start() must be called from a step, not directly from a workflow. +export async function processOrder(orderId: string) { + "use workflow"; + + const order = await fulfillOrder(orderId); + // triggerReport is a step that calls start() — spawns an independent child. + const reportRunId = await triggerReport(orderId); + await sendConfirmation(orderId); + + return { orderId, order, reportRunId }; +} + +async function triggerReport(orderId: string): Promise { + "use step"; + // deploymentId: "latest" → child picks up future code deployments. + const run = await start(generateReport, [orderId], { deploymentId: "latest" }); + return run.runId; +} + +// Background-spawnable child — independent run when started via start(). +export async function generateReport(reportId: string) { + "use workflow"; + await buildReport(reportId); + return { reportId, status: "ready" }; +} + +async function sendEmail(userId: string): Promise { + "use step"; + await fetch(\`https://api.example.com/email/\${userId}\`, { method: "POST" }); +} + +async function sendPushNotification(userId: string): Promise { + "use step"; + await fetch(\`https://api.example.com/push/\${userId}\`, { method: "POST" }); +} + +async function createAccount(userId: string): Promise { + "use step"; + await fetch("https://api.example.com/accounts", { + method: "POST", + body: JSON.stringify({ userId }), + }); +} + +async function setupPreferences(userId: string): Promise { + "use step"; + await fetch(\`https://api.example.com/preferences/\${userId}\`, { method: "PUT" }); +} + +async function fulfillOrder(orderId: string): Promise<{ id: string }> { + "use step"; + return { id: orderId }; +} + +async function sendConfirmation(orderId: string): Promise { + "use step"; + await fetch(\`https://api.example.com/orders/\${orderId}/confirm\`, { method: "POST" }); +} + +async function buildReport(reportId: string): Promise { + "use step"; + await fetch(\`https://api.example.com/reports/\${reportId}\`, { method: "POST" }); +} +`; + export const workflowCompositionStartRouteSource = `import { start } from "workflow/api"; import { NextResponse } from "next/server"; import { onboardUser } from "@/workflows/workflow-composition"; diff --git a/docs/lib/registry/types.ts b/docs/lib/registry/types.ts index 51e13efcab..b9bfa1a4f9 100644 --- a/docs/lib/registry/types.ts +++ b/docs/lib/registry/types.ts @@ -51,8 +51,22 @@ export interface RegistrySnippet { lang: string; /** Raw source code — rendered via shiki on the server. */ code: string; - /** Optional caption rendered above the snippet. */ + /** + * Richly-commented version of `code` installed via the shadcn CLI. + * When present, the `/r/[name]` route serves this instead of `code` so + * the file landing in the user's project includes agent-friendly comments + * (PATTERN, USEFUL WHEN, TO ADAPT, inline "why" notes) without cluttering + * the UI. Falls back to `code` when absent. + */ + installCode?: string; + /** Optional caption rendered above the snippet (e.g. file path). */ caption?: string; + /** + * Optional prose rendered between the caption and the code block. + * Use for per-tab context that isn't obvious from the code alone + * (e.g. "The approval route imports the hook definition and calls .resume()…"). + */ + description?: string; } /** @@ -82,7 +96,162 @@ export type RegistryLogoId = | 'idempotency' | 'webhooks' | 'child-workflows' - | 'distributed-abort-controller'; + | 'distributed-abort-controller' + | 'upgrading-workflows'; + +/** + * Comparison table for patterns that have multiple valid approaches. + * First column is the "Aspect" label; remaining columns are approach names. + */ +export interface RegistryApproachTable { + /** Section heading. Defaults to "Choosing an approach" when omitted. */ + title?: string; + /** Optional prose intro rendered above the bullet summaries and table. */ + description?: string; + /** Short bullet summaries of each approach, rendered before the table. Supports **bold** and `code` inline syntax. */ + bullets?: string[]; + columns: string[]; + rows: { aspect: string; values: string[] }[]; + /** Optional closing sentence rendered below the table. */ + closing?: string; +} + +/** + * A per-approach section for patterns that have multiple distinct + * implementations (e.g. Hard Cancellation vs Stop Signal). When present on + * a guide, the detail page replaces the unified Overview/Concept layout with + * individual h2 sections — one per approach — each with its own code and an + * optional dedicated install command. + */ +export interface RegistryApproachSection { + /** Section heading — becomes an h2 on the detail page and a ToC entry. */ + title: string; + /** Optional prose rendered under the heading. */ + description?: string; + /** + * If this specific approach has its own shadcn install slug, show it here. + * Use when only one of the approaches is installable (e.g. Stop Signal), + * while the other is a one-liner built into the SDK (e.g. Hard Cancel). + */ + installSlug?: string; + /** + * Which `conceptSnippets` to render for this approach, matched by label. + * Order is preserved. + */ + snippetLabels: string[]; + /** + * Bullet points rendered after the code (e.g. consequences of this approach). + * Supports **bold** and `code` inline syntax. + */ + afterBullets?: string[]; + /** Closing paragraph rendered after afterBullets. */ + afterProse?: string; + /** Optional callout rendered after afterBullets/afterProse. */ + callout?: { + type: 'info' | 'warn' | 'tip'; + content: string; + }; +} + +/** + * Inline guide content that turns the registry detail page into a unified + * educational + plug-and-play surface. Replaces the need for a separate + * cookbook page for the same pattern. + */ +export interface RegistryGuide { + /** + * One or two educational paragraphs explaining the pattern and its variants. + * Rendered as prose before the when-to-use list and comparison table. + */ + overview?: string; + /** + * Short feature bullets rendered directly under longDescription (no heading). + * Good for surfacing 3-4 concrete capabilities before the deeper sections. + */ + introBullets?: string[]; + /** + * Optional mermaid diagram string rendered after longDescription/introBullets. + * Use for patterns with a clear data-flow that's easier to understand + * visually. The section title defaults to "How it fits together". + */ + diagram?: string; + /** Override the default "How it fits together" diagram section title. */ + diagramTitle?: string; + /** + * Optional prose + bullets rendered immediately after the diagram. + * Use to explain the key integration points shown in the diagram + * (e.g. "Inbound — …" / "Outbound — …" for Chat SDK). + */ + diagramContext?: { + prose?: string; + bullets?: string[]; + }; + /** + * Optional "Why use this" section rendered before "When to use this". + * Explains what the naive approach looks like without this pattern + * and what becomes possible with it. Defaults to "Why use this". + */ + whySection?: { + title?: string; + /** Prose introducing the problem (e.g. "Without Workflow, you'd need…"). */ + problemProse?: string; + /** Bullets describing the pain points of the naive approach. */ + problemBullets?: string[]; + /** Prose introducing what this pattern enables. */ + solutionProse?: string; + /** Bullets describing what the pattern unlocks. */ + solutionBullets?: string[]; + /** Optional closing sentence after the solution bullets. */ + closingProse?: string; + }; + /** "When to use this" bullet points. */ + whenToUse?: string[]; + /** + * Side-by-side comparison of multiple approaches — e.g. Hard Cancel vs + * Stop Signal. Only needed when the pattern has meaningful trade-offs worth + * calling out explicitly. + */ + approaches?: RegistryApproachTable; + /** + * When true, "When to use" and "Choosing an approach" render as top-level + * h2s instead of being nested under an "Overview" umbrella. Use this for + * single-pattern items (no approachSections) that still want a flat layout + * matching the cookbook structure. + */ + flatLayout?: boolean; + /** + * When defined, the page splits into per-approach h2 sections instead of + * the unified Concept tab view. Each section shows its own code and an + * optional install command. "When to use" and "Choosing an approach" are + * promoted to top-level h2s (no umbrella Overview heading). + */ + approachSections?: RegistryApproachSection[]; + /** Numbered "how it works" steps. */ + howItWorks?: string[]; + /** + * Optional prose rendered after the numbered howItWorks list. + * Good for a single closing sentence that ties the steps together. + */ + howItWorksClosing?: string; + /** Optional callout rendered inside the How it works section. */ + callout?: { + type: 'info' | 'warn' | 'tip'; + content: string; + }; + /** + * Replaces the generic "A preview of the code that gets copied into your app." + * description in the Source section with a pattern-specific explanation. + */ + sourceDescription?: string; + /** Bullet-point adaptation tips (or pitfalls, etc.). */ + adapting?: string[]; + /** Override the "Adapting this" heading. */ + adaptingTitle?: string; + /** Optional prose intro rendered before the adapting bullets. */ + adaptingIntro?: string; + /** Key API links rendered at the bottom of the page. */ + keyApis?: { label: string; url: string }[]; +} export interface RegistryItem { /** Slug used in the URL — `/registry/${id}`. */ @@ -124,4 +293,17 @@ export interface RegistryItem { files: RegistryFile[]; /** Code snippets shown on the detail page (workflow source, usage, etc.). */ snippets: RegistrySnippet[]; + /** + * Inline guide content — when present, the detail page renders educational + * sections (overview, how it works, adapting, key APIs) so the page serves + * as both cookbook and plug-and-play reference. + */ + guide?: RegistryGuide; + /** + * Simplified conceptual snippets for patterns where the educational code is + * genuinely different from the plug-and-play shadcn code. When present, + * the Source section renders these under a "Concept" heading before the + * production snippets. + */ + conceptSnippets?: RegistrySnippet[]; } diff --git a/docs/next.config.ts b/docs/next.config.ts index cb9be79bf3..b5d5fca5f6 100644 --- a/docs/next.config.ts +++ b/docs/next.config.ts @@ -69,22 +69,22 @@ const config: NextConfig = { }, { source: '/docs/cookbook', - destination: '/cookbook', + destination: '/patterns', permanent: true, }, { source: '/docs/cookbook/:path*', - destination: '/cookbook/:path*', + destination: '/patterns', permanent: true, }, { source: '/cookbooks', - destination: '/cookbook', + destination: '/patterns', permanent: true, }, { source: '/cookbooks/:path*', - destination: '/cookbook/:path*', + destination: '/patterns', permanent: true, }, { @@ -113,47 +113,129 @@ const config: NextConfig = { destination: '/worlds', permanent: true, }, - // Foundations "Common Patterns" page was retired in favor of dedicated - // cookbook recipes. Path-level redirect lands visitors on the cookbook - // overview where each pattern (Sequential & Parallel, Workflow - // Composition, Timeouts, etc.) has its own page. Note: anchor fragments - // from old links (#timeout-pattern, #direct-await-flattening, etc.) are - // dropped on redirect — Next.js redirects() does not match anchors. + // Foundations "Common Patterns" page was retired — now part of /patterns { source: '/docs/foundations/common-patterns', - destination: '/cookbook', + destination: '/patterns', permanent: true, }, { source: '/docs/foundations/control-flow-patterns', - destination: '/cookbook', + destination: '/patterns', permanent: true, }, - // Cookbook: child-workflows and distributed-abort-controller moved - // from common-patterns (now "Reliability Patterns") to advanced + // /registry → /patterns (renamed) + { source: '/registry', destination: '/patterns', permanent: true }, { - source: '/cookbook/common-patterns/child-workflows', - destination: '/cookbook/advanced/child-workflows', + source: '/registry/:id', + destination: '/patterns/:id', permanent: true, }, + // Cookbook → Patterns redirects (cookbook pages merged into patterns) + { source: '/cookbook', destination: '/patterns', permanent: true }, { - source: '/cookbook/common-patterns/distributed-abort-controller', - destination: '/cookbook/advanced/distributed-abort-controller', + source: '/cookbook/agent-patterns/agent-cancellation', + destination: '/patterns/agent-cancellation', permanent: true, }, - // Cookbook: stop-workflow → agent-stop-signal → agent-cancellation. - // The page now covers both Hard Cancellation (run.cancel()) and Stop - // Signal (hook + Promise.race) as named patterns, so the broader - // "Agent Cancellation" title fits both. Both prior URLs land directly - // on the current page (no redirect chains). { source: '/cookbook/agent-patterns/stop-workflow', - destination: '/cookbook/agent-patterns/agent-cancellation', + destination: '/patterns/agent-cancellation', permanent: true, }, { source: '/cookbook/agent-patterns/agent-stop-signal', - destination: '/cookbook/agent-patterns/agent-cancellation', + destination: '/patterns/agent-cancellation', + permanent: true, + }, + { + source: '/cookbook/agent-patterns/durable-agent', + destination: '/patterns/durable-agent', + permanent: true, + }, + { + source: '/cookbook/agent-patterns/human-in-the-loop', + destination: '/patterns/human-in-the-loop', + permanent: true, + }, + { + source: '/cookbook/integrations/ai-sdk', + destination: '/patterns/ai-sdk', + permanent: true, + }, + { + source: '/cookbook/integrations/chat-sdk', + destination: '/patterns/chat-sdk', + permanent: true, + }, + { + source: '/cookbook/integrations/sandbox', + destination: '/patterns/sandbox', + permanent: true, + }, + { + source: '/cookbook/common-patterns/batching', + destination: '/patterns/batching', + permanent: true, + }, + { + source: '/cookbook/common-patterns/idempotency', + destination: '/patterns/idempotency', + permanent: true, + }, + { + source: '/cookbook/common-patterns/rate-limiting', + destination: '/patterns/rate-limiting', + permanent: true, + }, + { + source: '/cookbook/common-patterns/saga', + destination: '/patterns/saga', + permanent: true, + }, + { + source: '/cookbook/common-patterns/scheduling', + destination: '/patterns/scheduling', + permanent: true, + }, + { + source: '/cookbook/common-patterns/sequential-and-parallel', + destination: '/patterns/sequential-and-parallel', + permanent: true, + }, + { + source: '/cookbook/common-patterns/timeouts', + destination: '/patterns/timeouts', + permanent: true, + }, + { + source: '/cookbook/common-patterns/webhooks', + destination: '/patterns/webhooks', + permanent: true, + }, + { + source: '/cookbook/common-patterns/workflow-composition', + destination: '/patterns/workflow-composition', + permanent: true, + }, + { + source: '/cookbook/common-patterns/child-workflows', + destination: '/patterns/child-workflows', + permanent: true, + }, + { + source: '/cookbook/common-patterns/distributed-abort-controller', + destination: '/patterns/distributed-abort-controller', + permanent: true, + }, + { + source: '/cookbook/advanced/child-workflows', + destination: '/patterns/child-workflows', + permanent: true, + }, + { + source: '/cookbook/advanced/distributed-abort-controller', + destination: '/patterns/distributed-abort-controller', permanent: true, }, ]; diff --git a/docs/proxy.ts b/docs/proxy.ts index a2b7d5185e..f93344c957 100644 --- a/docs/proxy.ts +++ b/docs/proxy.ts @@ -130,9 +130,9 @@ const proxy = (request: NextRequest, context: NextFetchEvent) => { }; export const config = { - // Matcher ignoring `/_next/`, `/api/`, static assets, favicon, sitemap, robots, etc. + // Matcher ignoring `/_next/`, `/api/`, `/r/` (shadcn registry), static assets, etc. matcher: [ - '/((?!api|_next/static|_next/image|favicon.ico|sitemap.xml|robots.txt|og|.*\\.tgz$|.*\\.svg$|.*\\.zip$).*)', + '/((?!api|r(?:/|$)|_next/static|_next/image|favicon.ico|sitemap.xml|robots.txt|og|.*\\.tgz$|.*\\.svg$|.*\\.zip$).*)', ], }; From 2e2e4089d0faf625eef622e41ff7c24083878a8a Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman Date: Sun, 3 May 2026 00:00:28 -0700 Subject: [PATCH 04/21] fix: correct installSlug for agent-cancellation stop signal approach Co-authored-by: Cursor --- docs/lib/registry/manifest.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/lib/registry/manifest.ts b/docs/lib/registry/manifest.ts index 95c027ccbb..1671494748 100644 --- a/docs/lib/registry/manifest.ts +++ b/docs/lib/registry/manifest.ts @@ -290,7 +290,7 @@ export const registryItems: RegistryItem[] = [ title: 'Stop Signal', description: 'The workflow races the agent against a `stopHook` keyed by the run ID. When Stop is triggered, the workflow exits at its next `await` boundary, runs any cleanup, and emits a `data-stopped` stream part so the client renders a clean ending. The route falls back to hard cancel automatically if the hook is already gone.', - installSlug: '@workflow-sdk/agent-cancellation', + installSlug: 'https://workflow-sdk.dev/r/agent-cancellation', snippetLabels: ['Stop Signal', 'Stop route'], callout: { type: 'warn', From 9843a9a3432b4fbeb097fff50c6e58596d3de235 Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman Date: Sun, 3 May 2026 09:45:28 -0700 Subject: [PATCH 05/21] fix: install workflow files into app/workflows/ not root workflows/ Co-authored-by: Cursor --- docs/app/r/[name]/route.ts | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/app/r/[name]/route.ts b/docs/app/r/[name]/route.ts index b3bdc119dc..a6f3a07d11 100644 --- a/docs/app/r/[name]/route.ts +++ b/docs/app/r/[name]/route.ts @@ -62,7 +62,8 @@ export async function GET( content: snippet.installCode ?? snippet.code, type: 'registry:lib', // target controls where shadcn places the file in the user's project. - target: filePath, + // Workflow files live under app/workflows/ in a Next.js app-router project. + target: `app/${filePath}`, }); } @@ -77,7 +78,7 @@ export async function GET( path: filePath, content: snippet.installCode ?? snippet.code, type: 'registry:lib', - target: filePath, + target: `app/${filePath}`, }); } } From 768a81aeac9fed6593f211775c5c08fc8ee436e4 Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman Date: Sun, 3 May 2026 09:50:33 -0700 Subject: [PATCH 06/21] fix: use registry:file type so shadcn writes inline content instead of fetching path URL Co-authored-by: Cursor --- docs/app/r/[name]/route.ts | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/docs/app/r/[name]/route.ts b/docs/app/r/[name]/route.ts index a6f3a07d11..fad55d428e 100644 --- a/docs/app/r/[name]/route.ts +++ b/docs/app/r/[name]/route.ts @@ -48,7 +48,7 @@ export async function GET( const files: Array<{ path: string; content: string; - type: 'registry:lib'; + type: 'registry:file'; target: string; }> = []; @@ -60,8 +60,9 @@ export async function GET( files.push({ path: filePath, content: snippet.installCode ?? snippet.code, - type: 'registry:lib', - // target controls where shadcn places the file in the user's project. + // registry:file tells the shadcn CLI to write the inline `content` field + // directly to `target` without trying to resolve `path` as a URL. + type: 'registry:file', // Workflow files live under app/workflows/ in a Next.js app-router project. target: `app/${filePath}`, }); @@ -77,7 +78,7 @@ export async function GET( files.push({ path: filePath, content: snippet.installCode ?? snippet.code, - type: 'registry:lib', + type: 'registry:file', target: `app/${filePath}`, }); } @@ -86,7 +87,7 @@ export async function GET( const registryItem = { $schema: 'https://ui.shadcn.com/schema/registry-item.json', name: item.id, - type: 'registry:lib' as const, + type: 'registry:file' as const, title: item.name, description: item.description, files, From b5d910d58b53fb4f0879cfe4d3bab71a2084f5ec Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman Date: Sun, 3 May 2026 09:51:19 -0700 Subject: [PATCH 07/21] fix: rename installed workflow file from stoppable-agent.ts to agent-cancellation.ts Co-authored-by: Cursor --- docs/lib/registry/manifest.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/lib/registry/manifest.ts b/docs/lib/registry/manifest.ts index 1671494748..e405275204 100644 --- a/docs/lib/registry/manifest.ts +++ b/docs/lib/registry/manifest.ts @@ -160,7 +160,7 @@ export const registryItems: RegistryItem[] = [ ], files: [ { - path: 'workflows/stoppable-agent.ts', + path: 'workflows/agent-cancellation.ts', description: 'Durable agent + `stopHook` + `Promise.race` exit, with a final `data-stopped` part emitted on stop.', }, @@ -169,7 +169,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Workflow', lang: 'tsx', - caption: 'workflows/stoppable-agent.ts', + caption: 'workflows/agent-cancellation.ts', code: agentCancellationWorkflowSource, installCode: agentCancellationWorkflowInstallSource, }, @@ -210,7 +210,7 @@ export const registryItems: RegistryItem[] = [ label: 'Stop Signal', lang: 'tsx', caption: - 'workflows/stoppable-agent.ts — hook + Promise.race graceful exit', + 'workflows/agent-cancellation.ts — hook + Promise.race graceful exit', code: agentCancellationConceptStopSignalSource, }, { From b2723cfa228e4693807db0de9c84d3856249a3fb Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman Date: Sun, 3 May 2026 09:55:22 -0700 Subject: [PATCH 08/21] fix: all 19 patterns now install correctly with rich comments - Add installCode to chat-sdk hook type snippet - Fix sandbox pipeline caption (strip inline description from filename) - Add installCode to sandbox pipeline snippet - Fix distributed-abort-controller caption: lib/ \u2192 workflows/ - Fix resend caption: app/workflows/ \u2192 workflows/ (route filter was missing it) Co-authored-by: Cursor --- docs/lib/registry/manifest.ts | 11 ++++-- docs/lib/registry/snippets/chat-sdk.ts | 30 +++++++++++++++ docs/lib/registry/snippets/sandbox.ts | 51 ++++++++++++++++++++++++++ 3 files changed, 88 insertions(+), 4 deletions(-) diff --git a/docs/lib/registry/manifest.ts b/docs/lib/registry/manifest.ts index e405275204..e49da88446 100644 --- a/docs/lib/registry/manifest.ts +++ b/docs/lib/registry/manifest.ts @@ -24,6 +24,7 @@ import { chatSdkBotSource, chatSdkHandlersSource, chatSdkHookTypeSource, + chatSdkHookTypeInstallSource, chatSdkWebhookSource, chatSdkWorkflowSource, chatSdkWorkflowInstallSource, @@ -81,6 +82,7 @@ import { sandboxCommandRouteSource, sandboxStartRouteSource, sandboxUsageSource, + sandboxPipelineInstallSource, sandboxWorkflowSource, sandboxWorkflowInstallSource, } from './snippets/sandbox'; @@ -790,6 +792,7 @@ export const registryItems: RegistryItem[] = [ lang: 'tsx', caption: 'workflows/chat-turn-hook.ts', code: chatSdkHookTypeSource, + installCode: chatSdkHookTypeInstallSource, }, { label: 'Handlers', @@ -962,11 +965,11 @@ export const registryItems: RegistryItem[] = [ { label: 'Quickstart', lang: 'tsx', - caption: - 'workflows/sandbox-pipeline.ts — simpler one-shot pipeline (no session loop)', + caption: 'workflows/sandbox-pipeline.ts', description: 'Before the full session pattern, the simplest shape. Each `Sandbox` method is an implicit step, so the event log records every command and the workflow replays from the last completed call on restart.', code: sandboxUsageSource, + installCode: sandboxPipelineInstallSource, }, ], guide: { @@ -1941,7 +1944,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Lib', lang: 'tsx', - caption: 'lib/distributed-abort-controller.ts', + caption: 'workflows/distributed-abort-controller.ts', code: distributedAbortControllerLibSource, installCode: distributedAbortControllerLibInstallSource, }, @@ -2115,7 +2118,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Workflow', lang: 'tsx', - caption: 'app/workflows/providers/resendWorkflow.ts', + caption: 'workflows/providers/resendWorkflow.ts', code: resendWorkflowSource, installCode: resendWorkflowInstallSource, }, diff --git a/docs/lib/registry/snippets/chat-sdk.ts b/docs/lib/registry/snippets/chat-sdk.ts index 74cc22199e..f019f15a08 100644 --- a/docs/lib/registry/snippets/chat-sdk.ts +++ b/docs/lib/registry/snippets/chat-sdk.ts @@ -225,6 +225,36 @@ export type ChatTurnPayload = { }; `; +export const chatSdkHookTypeInstallSource = `/** + * Chat SDK — ChatTurnPayload hook type. + * + * THE PATTERN: + * This file defines only the payload type used by the chat-turn hook. + * Keeping it in a separate module means the workflow file can import the + * type without pulling in handler-level dependencies (Chat SDK adapters, + * DB clients, etc.) into the workflow's import graph. + * + * USEFUL WHEN: + * - Your workflow and webhook handler live in different modules and you want + * a clean shared type without circular imports. + * - You are building on top of the Chat SDK durable-chat-session pattern. + * + * TO ADAPT THIS TO YOUR USE CASE: + * - Add any extra fields your handler needs to pass to the workflow turn + * (e.g. userId, metadata, attachments). + * - Import this type from both the workflow and the webhook handler. + * + * DOCS: https://workflow-sdk.dev/patterns/chat-sdk + */ +import type { SerializedMessage } from "chat"; + +// Importing this from the handler module keeps adapter dependencies out +// of the workflow's import graph. +export type ChatTurnPayload = { + message: SerializedMessage; +}; +`; + export const chatSdkHandlersSource = `import type { Message, Thread } from "chat"; import { getRun, resumeHook, start } from "workflow/api"; import { bot, type ThreadState } from "@/lib/bot"; diff --git a/docs/lib/registry/snippets/sandbox.ts b/docs/lib/registry/snippets/sandbox.ts index 09743032e0..1a48cb4a1b 100644 --- a/docs/lib/registry/snippets/sandbox.ts +++ b/docs/lib/registry/snippets/sandbox.ts @@ -607,6 +607,57 @@ export function SandboxRunner() { } `; +export const sandboxPipelineInstallSource = `/** + * Vercel Sandbox — one-shot pipeline (quickstart). + * + * THE PATTERN: + * Each \`Sandbox\` method (\`create\`, \`runCommand\`, \`stop\`) is an implicit + * workflow step. The runtime persists the result of every call so on a + * restart the workflow skips already-completed steps and resumes from the + * last successful one — no duplicate sandbox charges, no lost output. + * + * USEFUL WHEN: + * - You need to run a short, finite list of shell commands in a clean VM. + * - You want crash-safety for free without managing the full session loop. + * - You are prototyping before graduating to the full sandbox-session pattern. + * + * TO ADAPT THIS TO YOUR USE CASE: + * - Replace the \`commands\` array with whatever shell commands your use + * case needs (install deps, run tests, compile, etc.). + * - Change the runtime to \`python3.13\`, \`deno2\`, etc. as needed. + * - For long-running interactive sessions with a persistent sandbox, + * see the full sandbox-session pattern instead. + * + * DOCS: https://workflow-sdk.dev/patterns/sandbox + */ +import { Sandbox } from "@vercel/sandbox"; + +export async function sandboxPipeline(input: { commands: string[] }) { + "use workflow"; + + const sandbox = await Sandbox.create({ runtime: "node22" }); + + try { + const results = []; + for (const command of input.commands) { + const result = await sandbox.runCommand({ + cmd: "bash", + args: ["-c", command], + }); + results.push({ + command, + exitCode: result.exitCode, + stdout: await result.stdout(), + stderr: await result.stderr(), + }); + } + return { status: "completed", results }; + } finally { + await sandbox.stop(); + } +} +`; + export const sandboxUsageSource = `// Quickstart — one-shot pipeline. // Each \`Sandbox\` method (\`create\`, \`runCommand\`, \`stop\`, \`snapshot\`) is an // implicit step, so the event log records every command and the workflow From b4b34684de2ec67b44053736a4074121e77399f5 Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman Date: Sun, 3 May 2026 09:59:13 -0700 Subject: [PATCH 09/21] fix: remove inline descriptions from snippet captions (ai-sdk support.ts, agent-cancellation stop route) Co-authored-by: Cursor --- docs/lib/registry/manifest.ts | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/lib/registry/manifest.ts b/docs/lib/registry/manifest.ts index e49da88446..4d2e6c43d5 100644 --- a/docs/lib/registry/manifest.ts +++ b/docs/lib/registry/manifest.ts @@ -218,7 +218,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Stop route', lang: 'tsx', - caption: 'app/api/agent/[runId]/stop/route.ts — resumes the hook', + caption: 'app/api/agent/[runId]/stop/route.ts', code: agentCancellationConceptStopRouteSource, }, ], @@ -380,8 +380,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Workflow', lang: 'tsx', - caption: - 'workflows/support.ts — one workflow run = one full conversation', + caption: 'workflows/support.ts', code: aiSdkWorkflowSource, installCode: aiSdkWorkflowInstallSource, }, From 207ee2791c2be38ae8af34bdf5f93db187ff63ea Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman Date: Sun, 3 May 2026 10:02:45 -0700 Subject: [PATCH 10/21] fix: rename workflow files to match pattern IDs and fix import paths MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - flight-agent.ts → durable-agent.ts - approval-agent.ts → human-in-the-loop.ts - durable-chat-session.ts → chat-sdk.ts - support.ts → ai-sdk.ts - upgrading-workflow.ts → upgrading-workflows.ts - Fix all @/workflows/ import paths to @/app/workflows/ - Add DEPENDENCIES section to chat-sdk installCode comments Co-authored-by: Cursor --- docs/lib/registry/manifest.ts | 22 +++++++++---------- .../registry/snippets/agent-cancellation.ts | 6 ++--- docs/lib/registry/snippets/ai-sdk.ts | 2 +- docs/lib/registry/snippets/batching.ts | 2 +- docs/lib/registry/snippets/chat-sdk.ts | 20 +++++++++++++---- docs/lib/registry/snippets/child-workflows.ts | 2 +- docs/lib/registry/snippets/durable-agent.ts | 2 +- .../registry/snippets/human-in-the-loop.ts | 4 ++-- docs/lib/registry/snippets/idempotency.ts | 2 +- docs/lib/registry/snippets/rate-limiting.ts | 2 +- docs/lib/registry/snippets/saga.ts | 2 +- docs/lib/registry/snippets/sandbox.ts | 6 ++--- docs/lib/registry/snippets/scheduling.ts | 4 ++-- .../snippets/sequential-and-parallel.ts | 2 +- docs/lib/registry/snippets/timeouts.ts | 2 +- .../registry/snippets/upgrading-workflows.ts | 4 ++-- docs/lib/registry/snippets/webhooks.ts | 2 +- .../registry/snippets/workflow-composition.ts | 2 +- 18 files changed, 50 insertions(+), 38 deletions(-) diff --git a/docs/lib/registry/manifest.ts b/docs/lib/registry/manifest.ts index 4d2e6c43d5..e20dc396d4 100644 --- a/docs/lib/registry/manifest.ts +++ b/docs/lib/registry/manifest.ts @@ -361,7 +361,7 @@ export const registryItems: RegistryItem[] = [ shadcnSlug: 'https://workflow-sdk.dev/r/ai-sdk', files: [ { - path: 'workflows/support.ts', + path: 'workflows/ai-sdk.ts', description: 'The durable chat workflow — `supportWorkflow()` + `turnHook` + tool steps. One run = one full conversation.', }, @@ -380,7 +380,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Workflow', lang: 'tsx', - caption: 'workflows/support.ts', + caption: 'workflows/ai-sdk.ts', code: aiSdkWorkflowSource, installCode: aiSdkWorkflowInstallSource, }, @@ -525,7 +525,7 @@ export const registryItems: RegistryItem[] = [ shadcnSlug: 'https://workflow-sdk.dev/r/durable-agent', files: [ { - path: 'workflows/flight-agent.ts', + path: 'workflows/durable-agent.ts', description: 'The durable agent workflow — `flightAgent()` orchestrator + three tool steps (`searchFlights`, `bookFlight`, `checkWeather`). Replace the tools with your own.', }, @@ -544,7 +544,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Workflow', lang: 'tsx', - caption: 'workflows/flight-agent.ts', + caption: 'workflows/durable-agent.ts', code: durableAgentWorkflowSource, installCode: durableAgentWorkflowInstallSource, }, @@ -626,7 +626,7 @@ export const registryItems: RegistryItem[] = [ shadcnSlug: 'https://workflow-sdk.dev/r/human-in-the-loop', files: [ { - path: 'workflows/approval-agent.ts', + path: 'workflows/human-in-the-loop.ts', description: 'Durable agent + `approvalHook` + the `requestApproval` tool that races the hook against a 24h `sleep()` and streams resolution parts.', }, @@ -650,7 +650,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Workflow', lang: 'tsx', - caption: 'workflows/approval-agent.ts', + caption: 'workflows/human-in-the-loop.ts', code: humanInTheLoopWorkflowSource, installCode: humanInTheLoopWorkflowInstallSource, }, @@ -752,7 +752,7 @@ export const registryItems: RegistryItem[] = [ 'The `Chat` singleton — adapters, state backend, and `ThreadState` type that holds the `runId` per thread.', }, { - path: 'workflows/durable-chat-session.ts', + path: 'workflows/chat-sdk.ts', description: 'The durable session workflow — `durableChatSession()` + `chatTurnHook`, with platform side-effects in dynamic-import steps.', }, @@ -782,7 +782,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Workflow', lang: 'tsx', - caption: 'workflows/durable-chat-session.ts', + caption: 'workflows/chat-sdk.ts', code: chatSdkWorkflowSource, installCode: chatSdkWorkflowInstallSource, }, @@ -2026,7 +2026,7 @@ export const registryItems: RegistryItem[] = [ shadcnSlug: 'https://workflow-sdk.dev/r/upgrading-workflows', files: [ { - path: 'workflows/upgrading-workflow.ts', + path: 'workflows/upgrading-workflows.ts', description: 'The self-upgrading workflow — one iteration per run, blocks on `resumeHook`, computes new state, then spawns the next iteration with `deploymentId: "latest"`.', }, @@ -2045,7 +2045,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Method 1 — per-event spawn', lang: 'tsx', - caption: 'workflows/upgrading-workflow.ts', + caption: 'workflows/upgrading-workflows.ts', description: 'One run per event. After each resume, state is computed and the next iteration is spawned with `deploymentId: "latest"`. Every event automatically picks up the latest code.', code: upgradingWorkflowsWorkflowSource, @@ -2054,7 +2054,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Method 2 — explicit upgrade hook', lang: 'tsx', - caption: 'workflows/upgrading-workflow.ts', + caption: 'workflows/upgrading-workflows.ts', description: 'Long-running loop that handles many events per run. A separate `upgradeHook` races the work hook — fire it when you want to force a respawn on the latest deployment.', code: upgradingWorkflowsMethod2Source, diff --git a/docs/lib/registry/snippets/agent-cancellation.ts b/docs/lib/registry/snippets/agent-cancellation.ts index 30ca01fa0c..e799cd6422 100644 --- a/docs/lib/registry/snippets/agent-cancellation.ts +++ b/docs/lib/registry/snippets/agent-cancellation.ts @@ -209,7 +209,7 @@ export async function stoppableAgent(messages: ModelMessage[]) { export const agentCancellationStartRouteSource = `import type { UIMessage } from "ai"; import { convertToModelMessages, createUIMessageStreamResponse } from "ai"; import { start } from "workflow/api"; -import { stoppableAgent } from "@/workflows/stoppable-agent"; +import { stoppableAgent } from "@/app/workflows/agent-cancellation"; export async function POST(req: Request) { const { messages }: { messages: UIMessage[] } = await req.json(); @@ -226,7 +226,7 @@ export async function POST(req: Request) { export const agentCancellationRouteSource = `import { getRun } from "workflow/api"; import { NextResponse } from "next/server"; -import { stopHook } from "@/workflows/stoppable-agent"; +import { stopHook } from "@/app/workflows/agent-cancellation"; export async function POST( req: Request, @@ -375,7 +375,7 @@ export async function stoppableAgent(messages: ModelMessage[]) { } `; -export const agentCancellationConceptStopRouteSource = `import { stopHook } from "@/workflows/stoppable-agent"; +export const agentCancellationConceptStopRouteSource = `import { stopHook } from "@/app/workflows/agent-cancellation"; // POST /api/agent/[runId]/stop — resumes the hook to trigger a graceful exit. export async function POST( diff --git a/docs/lib/registry/snippets/ai-sdk.ts b/docs/lib/registry/snippets/ai-sdk.ts index 67530c0c42..32815d4cfd 100644 --- a/docs/lib/registry/snippets/ai-sdk.ts +++ b/docs/lib/registry/snippets/ai-sdk.ts @@ -229,7 +229,7 @@ export async function supportWorkflow(initialMessages: ModelMessage[]) { export const aiSdkRouteSource = `import type { UIMessage, UIMessageChunk } from "ai"; import { convertToModelMessages, createUIMessageStreamResponse } from "ai"; import { start, getRun } from "workflow/api"; -import { supportWorkflow, turnHook } from "@/workflows/support"; +import { supportWorkflow, turnHook } from "@/app/workflows/support"; // Pump the durable stream until this turn's \`finish\` chunk, then close // the HTTP response. Release (don't cancel) the source reader so the diff --git a/docs/lib/registry/snippets/batching.ts b/docs/lib/registry/snippets/batching.ts index 875329e608..3fdbb97140 100644 --- a/docs/lib/registry/snippets/batching.ts +++ b/docs/lib/registry/snippets/batching.ts @@ -169,7 +169,7 @@ async function processRecord(record: ImportRecord): Promise { export const batchingStartRouteSource = `import { start } from "workflow/api"; import { NextResponse } from "next/server"; -import { batchImport, type ImportRecord } from "@/workflows/batching"; +import { batchImport, type ImportRecord } from "@/app/workflows/batching"; // POST /api/batching { records: ImportRecord[], batchSize?: number } export async function POST(request: Request) { diff --git a/docs/lib/registry/snippets/chat-sdk.ts b/docs/lib/registry/snippets/chat-sdk.ts index f019f15a08..96279c7144 100644 --- a/docs/lib/registry/snippets/chat-sdk.ts +++ b/docs/lib/registry/snippets/chat-sdk.ts @@ -47,7 +47,7 @@ import type { ThreadState } from "@/lib/bot"; // Hook payload type lives in its own file so the webhook side can import // it without pulling in the workflow module. -import type { ChatTurnPayload } from "@/workflows/chat-turn-hook"; +import type { ChatTurnPayload } from "@/app/workflows/chat-turn-hook"; const chatTurnHook = defineHook(); @@ -133,6 +133,15 @@ export const chatSdkWorkflowInstallSource = `/** * - Tool calls in the bot should be retried without re-running on replay. * - You want the bot to maintain conversation state across reconnections. * + * DEPENDENCIES — run these before the workflow will compile: + * pnpm add chat # Chat SDK core (provides Message, Thread, etc.) + * pnpm add @chat-adapter/slack # or telegram, teams, discord — your platform + * pnpm add @chat-state/redis # or another state backend + * + * Then create lib/bot.ts exporting: + * export const bot = createBot({ adapter, state }); + * export type ThreadState = { workflowRunId?: string }; + * * TO ADAPT THIS TO YOUR USE CASE: * - Replace the runTurn step body with your AI SDK call, tool loop, or * database lookup — any async logic that should be durable. @@ -148,7 +157,7 @@ export const chatSdkWorkflowInstallSource = `/** import { Message, reviver, type Thread } from "chat"; import { defineHook, getWorkflowMetadata } from "workflow"; import type { ThreadState } from "@/lib/bot"; -import type { ChatTurnPayload } from "@/workflows/chat-turn-hook"; +import type { ChatTurnPayload } from "@/app/workflows/chat-turn-hook"; // One hook per run, token = runId. Reused every turn (created once outside // the loop to avoid HookConflictError on subsequent turns). @@ -239,6 +248,9 @@ export const chatSdkHookTypeInstallSource = `/** * a clean shared type without circular imports. * - You are building on top of the Chat SDK durable-chat-session pattern. * + * DEPENDENCIES: + * pnpm add chat # Chat SDK core (provides SerializedMessage) + * * TO ADAPT THIS TO YOUR USE CASE: * - Add any extra fields your handler needs to pass to the workflow turn * (e.g. userId, metadata, attachments). @@ -258,8 +270,8 @@ export type ChatTurnPayload = { export const chatSdkHandlersSource = `import type { Message, Thread } from "chat"; import { getRun, resumeHook, start } from "workflow/api"; import { bot, type ThreadState } from "@/lib/bot"; -import { durableChatSession } from "@/workflows/durable-chat-session"; -import type { ChatTurnPayload } from "@/workflows/chat-turn-hook"; +import { durableChatSession } from "@/app/workflows/chat-sdk"; +import type { ChatTurnPayload } from "@/app/workflows/chat-turn-hook"; async function startSession( thread: Thread, diff --git a/docs/lib/registry/snippets/child-workflows.ts b/docs/lib/registry/snippets/child-workflows.ts index 9d42eb80ca..8e9259ec78 100644 --- a/docs/lib/registry/snippets/child-workflows.ts +++ b/docs/lib/registry/snippets/child-workflows.ts @@ -291,7 +291,7 @@ async function generateSummary(analysis: string): Promise { export const childWorkflowsStartRouteSource = `import { start } from "workflow/api"; import { NextResponse } from "next/server"; -import { processDocumentBatch } from "@/workflows/child-workflows"; +import { processDocumentBatch } from "@/app/workflows/child-workflows"; // POST /api/child-workflows { documentIds: string[] } export async function POST(request: Request) { diff --git a/docs/lib/registry/snippets/durable-agent.ts b/docs/lib/registry/snippets/durable-agent.ts index dd77d022b1..d2cc6215b5 100644 --- a/docs/lib/registry/snippets/durable-agent.ts +++ b/docs/lib/registry/snippets/durable-agent.ts @@ -243,7 +243,7 @@ export async function flightAgent(messages: ModelMessage[]) { export const durableAgentStartRouteSource = `import type { UIMessage } from "ai"; import { convertToModelMessages, createUIMessageStreamResponse } from "ai"; import { start } from "workflow/api"; -import { flightAgent } from "@/workflows/flight-agent"; +import { flightAgent } from "@/app/workflows/durable-agent"; export async function POST(req: Request) { const { messages }: { messages: UIMessage[] } = await req.json(); diff --git a/docs/lib/registry/snippets/human-in-the-loop.ts b/docs/lib/registry/snippets/human-in-the-loop.ts index 10e4a44ef7..81852fb537 100644 --- a/docs/lib/registry/snippets/human-in-the-loop.ts +++ b/docs/lib/registry/snippets/human-in-the-loop.ts @@ -316,7 +316,7 @@ export async function approvalAgent(messages: ModelMessage[]) { export const humanInTheLoopStartRouteSource = `import type { UIMessage } from "ai"; import { convertToModelMessages, createUIMessageStreamResponse } from "ai"; import { start } from "workflow/api"; -import { approvalAgent } from "@/workflows/approval-agent"; +import { approvalAgent } from "@/app/workflows/human-in-the-loop"; export async function POST(req: Request) { const { messages }: { messages: UIMessage[] } = await req.json(); @@ -332,7 +332,7 @@ export async function POST(req: Request) { `; export const humanInTheLoopRouteSource = `import { NextResponse } from "next/server"; -import { approvalHook } from "@/workflows/approval-agent"; +import { approvalHook } from "@/app/workflows/human-in-the-loop"; export async function POST(req: Request) { const { toolCallId, approved, comment } = (await req.json()) as { diff --git a/docs/lib/registry/snippets/idempotency.ts b/docs/lib/registry/snippets/idempotency.ts index bf3f07c252..4f1101e8da 100644 --- a/docs/lib/registry/snippets/idempotency.ts +++ b/docs/lib/registry/snippets/idempotency.ts @@ -155,7 +155,7 @@ async function sendReceipt(customerId: string, chargeId: string): Promise export const idempotencyStartRouteSource = `import { start } from "workflow/api"; import { NextResponse } from "next/server"; -import { chargeCustomer } from "@/workflows/idempotency"; +import { chargeCustomer } from "@/app/workflows/idempotency"; // POST /api/idempotency { customerId, amountCents } export async function POST(request: Request) { diff --git a/docs/lib/registry/snippets/rate-limiting.ts b/docs/lib/registry/snippets/rate-limiting.ts index 1525ea9374..0332777942 100644 --- a/docs/lib/registry/snippets/rate-limiting.ts +++ b/docs/lib/registry/snippets/rate-limiting.ts @@ -156,7 +156,7 @@ upsertToWarehouse.maxRetries = 10; export const rateLimitingStartRouteSource = `import { start } from "workflow/api"; import { NextResponse } from "next/server"; -import { syncContact } from "@/workflows/rate-limiting"; +import { syncContact } from "@/app/workflows/rate-limiting"; // POST /api/rate-limiting { contactId } export async function POST(request: Request) { diff --git a/docs/lib/registry/snippets/saga.ts b/docs/lib/registry/snippets/saga.ts index 3bc079592b..f8e8f48ba4 100644 --- a/docs/lib/registry/snippets/saga.ts +++ b/docs/lib/registry/snippets/saga.ts @@ -279,7 +279,7 @@ async function deprovisionSeats(accountId: string, entitlementId: string): Promi export const sagaStartRouteSource = `import { start } from "workflow/api"; import { NextResponse } from "next/server"; -import { subscriptionUpgradeSaga } from "@/workflows/saga"; +import { subscriptionUpgradeSaga } from "@/app/workflows/saga"; // POST /api/saga { accountId, seats } export async function POST(request: Request) { diff --git a/docs/lib/registry/snippets/sandbox.ts b/docs/lib/registry/snippets/sandbox.ts index 1a48cb4a1b..1eb0ed33e7 100644 --- a/docs/lib/registry/snippets/sandbox.ts +++ b/docs/lib/registry/snippets/sandbox.ts @@ -442,7 +442,7 @@ export async function sandboxSessionWorkflow() { `; export const sandboxStartRouteSource = `import { start, getRun } from "workflow/api"; -import { sandboxSessionWorkflow } from "@/workflows/sandbox-session"; +import { sandboxSessionWorkflow } from "@/app/workflows/sandbox-session"; export async function POST(req: Request) { let body: { runId?: string } = {}; @@ -490,7 +490,7 @@ function ndjson() { } `; -export const sandboxCommandRouteSource = `import { commandHook } from "@/workflows/sandbox-session"; +export const sandboxCommandRouteSource = `import { commandHook } from "@/app/workflows/sandbox-session"; export async function POST(req: Request) { const { runId, command } = (await req.json()) as { @@ -524,7 +524,7 @@ export async function POST(req: Request) { export const sandboxClientSource = `"use client"; import { useCallback, useEffect, useRef, useState } from "react"; -import type { SandboxEvent } from "@/workflows/sandbox-session"; +import type { SandboxEvent } from "@/app/workflows/sandbox-session"; const RUN_ID_KEY = "sandbox.runId"; diff --git a/docs/lib/registry/snippets/scheduling.ts b/docs/lib/registry/snippets/scheduling.ts index 6e29306182..1d35190511 100644 --- a/docs/lib/registry/snippets/scheduling.ts +++ b/docs/lib/registry/snippets/scheduling.ts @@ -127,7 +127,7 @@ async function runAction(action: ScheduledAction): Promise { export const schedulingStartRouteSource = `import { start } from "workflow/api"; import { NextResponse } from "next/server"; -import { scheduleAction, type ScheduledAction } from "@/workflows/scheduling"; +import { scheduleAction, type ScheduledAction } from "@/app/workflows/scheduling"; // POST /api/scheduling { id, delay, payload } export async function POST(request: Request) { @@ -145,7 +145,7 @@ export async function POST(request: Request) { `; export const schedulingCancelRouteSource = `import { NextResponse } from "next/server"; -import { cancelSchedule } from "@/workflows/scheduling"; +import { cancelSchedule } from "@/app/workflows/scheduling"; // POST /api/scheduling/cancel { scheduleId, reason? } // Idempotent: returns success even if the hook has already fired or expired. diff --git a/docs/lib/registry/snippets/sequential-and-parallel.ts b/docs/lib/registry/snippets/sequential-and-parallel.ts index ee612c5d09..59050132fc 100644 --- a/docs/lib/registry/snippets/sequential-and-parallel.ts +++ b/docs/lib/registry/snippets/sequential-and-parallel.ts @@ -209,7 +209,7 @@ async function fetchFallback(userId: string): Promise<{ source: "fallback"; user export const sequentialAndParallelStartRouteSource = `import { start } from "workflow/api"; import { NextResponse } from "next/server"; -import { fetchUserData } from "@/workflows/sequential-and-parallel"; +import { fetchUserData } from "@/app/workflows/sequential-and-parallel"; // POST /api/sequential-and-parallel { userId } export async function POST(request: Request) { diff --git a/docs/lib/registry/snippets/timeouts.ts b/docs/lib/registry/snippets/timeouts.ts index 78a5466d9a..376fbea58d 100644 --- a/docs/lib/registry/snippets/timeouts.ts +++ b/docs/lib/registry/snippets/timeouts.ts @@ -197,7 +197,7 @@ async function sendApprovalRequest( export const timeoutsStartRouteSource = `import { start } from "workflow/api"; import { NextResponse } from "next/server"; -import { processWithTimeout } from "@/workflows/timeouts"; +import { processWithTimeout } from "@/app/workflows/timeouts"; // POST /api/timeouts { data } export async function POST(request: Request) { diff --git a/docs/lib/registry/snippets/upgrading-workflows.ts b/docs/lib/registry/snippets/upgrading-workflows.ts index 30d1584b48..d254b9ac13 100644 --- a/docs/lib/registry/snippets/upgrading-workflows.ts +++ b/docs/lib/registry/snippets/upgrading-workflows.ts @@ -422,7 +422,7 @@ async function emitEvent(event: UpgradeEvent): Promise { export const upgradingWorkflowsStartRouteSource = `import { start } from "workflow/api"; import { NextResponse } from "next/server"; -import { upgradingWorkflow, type WorkflowState } from "@/workflows/upgrading-workflow"; +import { upgradingWorkflow, type WorkflowState } from "@/app/workflows/upgrading-workflows; // POST /api/upgrade — starts the first iteration of the chain. export async function POST(request: Request) { @@ -436,7 +436,7 @@ export async function POST(request: Request) { `; export const upgradingWorkflowsResumeRouteSource = `import { NextResponse } from "next/server"; -import { resumeHook } from "@/workflows/upgrading-workflow"; +import { resumeHook } from "@/app/workflows/upgrading-workflows; // POST /api/upgrade/resume { runId, amount } // Resumes the active iteration, triggering a state update and a spawn. diff --git a/docs/lib/registry/snippets/webhooks.ts b/docs/lib/registry/snippets/webhooks.ts index 19008aafa9..8caf57f208 100644 --- a/docs/lib/registry/snippets/webhooks.ts +++ b/docs/lib/registry/snippets/webhooks.ts @@ -258,7 +258,7 @@ async function processCallback( export const webhooksStartRouteSource = `import { start, getRun } from "workflow/api"; import { NextResponse } from "next/server"; -import { paymentWebhook } from "@/workflows/webhooks"; +import { paymentWebhook } from "@/app/workflows/webhooks"; // POST /api/webhooks { orderId } // Returns the auto-generated webhook URL — register it with the external service. diff --git a/docs/lib/registry/snippets/workflow-composition.ts b/docs/lib/registry/snippets/workflow-composition.ts index da68495e5e..9e7e1bb5dd 100644 --- a/docs/lib/registry/snippets/workflow-composition.ts +++ b/docs/lib/registry/snippets/workflow-composition.ts @@ -216,7 +216,7 @@ async function buildReport(reportId: string): Promise { export const workflowCompositionStartRouteSource = `import { start } from "workflow/api"; import { NextResponse } from "next/server"; -import { onboardUser } from "@/workflows/workflow-composition"; +import { onboardUser } from "@/app/workflows/workflow-composition"; // POST /api/workflow-composition { userId } export async function POST(request: Request) { From 8c09f4081c3c748ed0300e68f08a85484726b11a Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman Date: Sun, 3 May 2026 10:04:39 -0700 Subject: [PATCH 11/21] fix: rename all installed workflow files to end in -workflow.ts Co-authored-by: Cursor --- docs/lib/registry/manifest.ts | 82 +++++++++++++++++------------------ 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/docs/lib/registry/manifest.ts b/docs/lib/registry/manifest.ts index e20dc396d4..f42a7c3a3e 100644 --- a/docs/lib/registry/manifest.ts +++ b/docs/lib/registry/manifest.ts @@ -162,7 +162,7 @@ export const registryItems: RegistryItem[] = [ ], files: [ { - path: 'workflows/agent-cancellation.ts', + path: 'workflows/agent-cancellation-workflow.ts', description: 'Durable agent + `stopHook` + `Promise.race` exit, with a final `data-stopped` part emitted on stop.', }, @@ -171,7 +171,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Workflow', lang: 'tsx', - caption: 'workflows/agent-cancellation.ts', + caption: 'workflows/agent-cancellation-workflow.ts', code: agentCancellationWorkflowSource, installCode: agentCancellationWorkflowInstallSource, }, @@ -212,7 +212,7 @@ export const registryItems: RegistryItem[] = [ label: 'Stop Signal', lang: 'tsx', caption: - 'workflows/agent-cancellation.ts — hook + Promise.race graceful exit', + 'workflows/agent-cancellation-workflow.ts — hook + Promise.race graceful exit', code: agentCancellationConceptStopSignalSource, }, { @@ -361,7 +361,7 @@ export const registryItems: RegistryItem[] = [ shadcnSlug: 'https://workflow-sdk.dev/r/ai-sdk', files: [ { - path: 'workflows/ai-sdk.ts', + path: 'workflows/ai-sdk-workflow.ts', description: 'The durable chat workflow — `supportWorkflow()` + `turnHook` + tool steps. One run = one full conversation.', }, @@ -380,7 +380,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Workflow', lang: 'tsx', - caption: 'workflows/ai-sdk.ts', + caption: 'workflows/ai-sdk-workflow.ts', code: aiSdkWorkflowSource, installCode: aiSdkWorkflowInstallSource, }, @@ -525,7 +525,7 @@ export const registryItems: RegistryItem[] = [ shadcnSlug: 'https://workflow-sdk.dev/r/durable-agent', files: [ { - path: 'workflows/durable-agent.ts', + path: 'workflows/durable-agent-workflow.ts', description: 'The durable agent workflow — `flightAgent()` orchestrator + three tool steps (`searchFlights`, `bookFlight`, `checkWeather`). Replace the tools with your own.', }, @@ -544,7 +544,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Workflow', lang: 'tsx', - caption: 'workflows/durable-agent.ts', + caption: 'workflows/durable-agent-workflow.ts', code: durableAgentWorkflowSource, installCode: durableAgentWorkflowInstallSource, }, @@ -626,7 +626,7 @@ export const registryItems: RegistryItem[] = [ shadcnSlug: 'https://workflow-sdk.dev/r/human-in-the-loop', files: [ { - path: 'workflows/human-in-the-loop.ts', + path: 'workflows/human-in-the-loop-workflow.ts', description: 'Durable agent + `approvalHook` + the `requestApproval` tool that races the hook against a 24h `sleep()` and streams resolution parts.', }, @@ -650,7 +650,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Workflow', lang: 'tsx', - caption: 'workflows/human-in-the-loop.ts', + caption: 'workflows/human-in-the-loop-workflow.ts', code: humanInTheLoopWorkflowSource, installCode: humanInTheLoopWorkflowInstallSource, }, @@ -752,12 +752,12 @@ export const registryItems: RegistryItem[] = [ 'The `Chat` singleton — adapters, state backend, and `ThreadState` type that holds the `runId` per thread.', }, { - path: 'workflows/chat-sdk.ts', + path: 'workflows/chat-sdk-workflow.ts', description: 'The durable session workflow — `durableChatSession()` + `chatTurnHook`, with platform side-effects in dynamic-import steps.', }, { - path: 'workflows/chat-turn-hook.ts', + path: 'workflows/chat-turn-hook-workflow.ts', description: 'Stand-alone `ChatTurnPayload` type so the webhook handler can import it without pulling in the workflow module.', }, @@ -782,14 +782,14 @@ export const registryItems: RegistryItem[] = [ { label: 'Workflow', lang: 'tsx', - caption: 'workflows/chat-sdk.ts', + caption: 'workflows/chat-sdk-workflow.ts', code: chatSdkWorkflowSource, installCode: chatSdkWorkflowInstallSource, }, { label: 'Hook type', lang: 'tsx', - caption: 'workflows/chat-turn-hook.ts', + caption: 'workflows/chat-turn-hook-workflow.ts', code: chatSdkHookTypeSource, installCode: chatSdkHookTypeInstallSource, }, @@ -911,7 +911,7 @@ export const registryItems: RegistryItem[] = [ shadcnSlug: 'https://workflow-sdk.dev/r/sandbox', files: [ { - path: 'workflows/sandbox-session.ts', + path: 'workflows/sandbox-session-workflow.ts', description: 'The durable session workflow — `sandboxSessionWorkflow()` + `commandHook`, with idle hibernation and proactive sandbox refresh built in.', }, @@ -935,7 +935,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Workflow', lang: 'tsx', - caption: 'workflows/sandbox-session.ts', + caption: 'workflows/sandbox-session-workflow.ts', code: sandboxWorkflowSource, installCode: sandboxWorkflowInstallSource, }, @@ -964,7 +964,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Quickstart', lang: 'tsx', - caption: 'workflows/sandbox-pipeline.ts', + caption: 'workflows/sandbox-pipeline-workflow.ts', description: 'Before the full session pattern, the simplest shape. Each `Sandbox` method is an implicit step, so the event log records every command and the workflow replays from the last completed call on restart.', code: sandboxUsageSource, @@ -1057,7 +1057,7 @@ export const registryItems: RegistryItem[] = [ shadcnSlug: 'https://workflow-sdk.dev/r/batching', files: [ { - path: 'workflows/batching.ts', + path: 'workflows/batching-workflow.ts', description: 'Generic `batchImport()` — chunks records, runs each batch with Promise.allSettled, paces with sleep(), returns a tally + failure list.', }, @@ -1070,7 +1070,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Workflow', lang: 'tsx', - caption: 'workflows/batching.ts', + caption: 'workflows/batching-workflow.ts', description: 'The workflow splits records into chunks, processes each chunk concurrently, tracks results per batch, and returns a final tally. Each record runs in its own `"use step"` function with full Node.js access and automatic retries.', code: batchingWorkflowSource, @@ -1139,7 +1139,7 @@ export const registryItems: RegistryItem[] = [ shadcnSlug: 'https://workflow-sdk.dev/r/idempotency', files: [ { - path: 'workflows/idempotency.ts', + path: 'workflows/idempotency-workflow.ts', description: '`chargeCustomer()` workflow — Stripe charge + receipt, both keyed by their step IDs so retries dedupe automatically.', }, @@ -1152,7 +1152,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Workflow', lang: 'tsx', - caption: 'workflows/idempotency.ts', + caption: 'workflows/idempotency-workflow.ts', code: idempotencyWorkflowSource, installCode: idempotencyWorkflowInstallSource, }, @@ -1213,7 +1213,7 @@ export const registryItems: RegistryItem[] = [ shadcnSlug: 'https://workflow-sdk.dev/r/rate-limiting', files: [ { - path: 'workflows/rate-limiting.ts', + path: 'workflows/rate-limiting-workflow.ts', description: '`syncContact()` — Retry-After header on 429, exponential backoff on 5xx, `maxRetries = 10` override for known-flaky endpoints.', }, @@ -1226,7 +1226,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Workflow', lang: 'tsx', - caption: 'workflows/rate-limiting.ts', + caption: 'workflows/rate-limiting-workflow.ts', code: rateLimitingWorkflowSource, installCode: rateLimitingWorkflowInstallSource, }, @@ -1295,7 +1295,7 @@ export const registryItems: RegistryItem[] = [ shadcnSlug: 'https://workflow-sdk.dev/r/saga', files: [ { - path: 'workflows/saga.ts', + path: 'workflows/saga-workflow.ts', description: 'Subscription-upgrade saga — three forward steps, three matching idempotent compensations, LIFO unwind on FatalError.', }, @@ -1308,7 +1308,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Workflow', lang: 'tsx', - caption: 'workflows/saga.ts', + caption: 'workflows/saga-workflow.ts', code: sagaWorkflowSource, installCode: sagaWorkflowInstallSource, }, @@ -1378,7 +1378,7 @@ export const registryItems: RegistryItem[] = [ shadcnSlug: 'https://workflow-sdk.dev/r/scheduling', files: [ { - path: 'workflows/scheduling.ts', + path: 'workflows/scheduling-workflow.ts', description: '`scheduleAction()` workflow + exported `cancelSchedule` hook + `runAction` step you customise per use case.', }, @@ -1396,7 +1396,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Workflow', lang: 'tsx', - caption: 'workflows/scheduling.ts', + caption: 'workflows/scheduling-workflow.ts', code: schedulingWorkflowSource, installCode: schedulingWorkflowInstallSource, }, @@ -1477,7 +1477,7 @@ export const registryItems: RegistryItem[] = [ shadcnSlug: 'https://workflow-sdk.dev/r/sequential-and-parallel', files: [ { - path: 'workflows/sequential-and-parallel.ts', + path: 'workflows/sequential-and-parallel-workflow.ts', description: 'Three entry points — pipeline, fan-out, race — over a small set of placeholder steps you replace with real work.', }, @@ -1490,7 +1490,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Workflow', lang: 'tsx', - caption: 'workflows/sequential-and-parallel.ts', + caption: 'workflows/sequential-and-parallel-workflow.ts', code: sequentialAndParallelWorkflowSource, installCode: sequentialAndParallelWorkflowInstallSource, }, @@ -1567,7 +1567,7 @@ export const registryItems: RegistryItem[] = [ shadcnSlug: 'https://workflow-sdk.dev/r/timeouts', files: [ { - path: 'workflows/timeouts.ts', + path: 'workflows/timeouts-workflow.ts', description: 'Three entry points — hard timeout, soft timeout with fallback, and a webhook racing a 7-day deadline.', }, @@ -1580,7 +1580,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Workflow', lang: 'tsx', - caption: 'workflows/timeouts.ts', + caption: 'workflows/timeouts-workflow.ts', code: timeoutsWorkflowSource, installCode: timeoutsWorkflowInstallSource, }, @@ -1654,7 +1654,7 @@ export const registryItems: RegistryItem[] = [ shadcnSlug: 'https://workflow-sdk.dev/r/webhooks', files: [ { - path: 'workflows/webhooks.ts', + path: 'workflows/webhooks-workflow.ts', description: 'Two patterns — `paymentWebhook()` (long-running event ledger) and `asyncVerification()` (request-reply with deadline).', }, @@ -1668,7 +1668,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Event listener', lang: 'tsx', - caption: 'workflows/webhooks.ts', + caption: 'workflows/webhooks-workflow.ts', description: 'Long-running listener that processes multiple requests from one URL and exits on a terminal event — Stripe-style payment ledger.', code: webhooksEventListenerSource, @@ -1677,7 +1677,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Request-reply', lang: 'tsx', - caption: 'workflows/webhooks.ts', + caption: 'workflows/webhooks-workflow.ts', description: 'Submit a request to an external vendor with your webhook URL as the callback, then race the response against a 30-second deadline.', code: webhooksRequestReplySource, @@ -1747,7 +1747,7 @@ export const registryItems: RegistryItem[] = [ shadcnSlug: 'https://workflow-sdk.dev/r/workflow-composition', files: [ { - path: 'workflows/workflow-composition.ts', + path: 'workflows/workflow-composition-workflow.ts', description: 'Parent + child workflows demonstrating both direct-await flattening and background spawn via `start()` from a step.', }, @@ -1760,7 +1760,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Workflow', lang: 'tsx', - caption: 'workflows/workflow-composition.ts', + caption: 'workflows/workflow-composition-workflow.ts', code: workflowCompositionWorkflowSource, installCode: workflowCompositionWorkflowInstallSource, }, @@ -1843,7 +1843,7 @@ export const registryItems: RegistryItem[] = [ shadcnSlug: 'https://workflow-sdk.dev/r/child-workflows', files: [ { - path: 'workflows/child-workflows.ts', + path: 'workflows/child-workflows-workflow.ts', description: '`processDocumentBatch()` parent + `processDocument()` child + chunked spawn step + durable polling loop + result-collection step.', }, @@ -1857,7 +1857,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Workflow', lang: 'tsx', - caption: 'workflows/child-workflows.ts', + caption: 'workflows/child-workflows-workflow.ts', code: childWorkflowsWorkflowSource, installCode: childWorkflowsWorkflowInstallSource, }, @@ -1943,7 +1943,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Lib', lang: 'tsx', - caption: 'workflows/distributed-abort-controller.ts', + caption: 'workflows/distributed-abort-controller-workflow.ts', code: distributedAbortControllerLibSource, installCode: distributedAbortControllerLibInstallSource, }, @@ -2026,7 +2026,7 @@ export const registryItems: RegistryItem[] = [ shadcnSlug: 'https://workflow-sdk.dev/r/upgrading-workflows', files: [ { - path: 'workflows/upgrading-workflows.ts', + path: 'workflows/upgrading-workflows-workflow.ts', description: 'The self-upgrading workflow — one iteration per run, blocks on `resumeHook`, computes new state, then spawns the next iteration with `deploymentId: "latest"`.', }, @@ -2045,7 +2045,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Method 1 — per-event spawn', lang: 'tsx', - caption: 'workflows/upgrading-workflows.ts', + caption: 'workflows/upgrading-workflows-workflow.ts', description: 'One run per event. After each resume, state is computed and the next iteration is spawned with `deploymentId: "latest"`. Every event automatically picks up the latest code.', code: upgradingWorkflowsWorkflowSource, @@ -2054,7 +2054,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Method 2 — explicit upgrade hook', lang: 'tsx', - caption: 'workflows/upgrading-workflows.ts', + caption: 'workflows/upgrading-workflows-workflow.ts', description: 'Long-running loop that handles many events per run. A separate `upgradeHook` races the work hook — fire it when you want to force a respawn on the latest deployment.', code: upgradingWorkflowsMethod2Source, From 18d83a2312939e6d3d87067390e8a08cb44c442b Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman Date: Sun, 3 May 2026 10:08:25 -0700 Subject: [PATCH 12/21] fix: fold ChatTurnPayload into chat-sdk-workflow.ts, remove separate hook type file Co-authored-by: Cursor --- docs/lib/registry/manifest.ts | 14 -------------- docs/lib/registry/snippets/chat-sdk.ts | 17 ++++++++++------- 2 files changed, 10 insertions(+), 21 deletions(-) diff --git a/docs/lib/registry/manifest.ts b/docs/lib/registry/manifest.ts index f42a7c3a3e..273cff51e4 100644 --- a/docs/lib/registry/manifest.ts +++ b/docs/lib/registry/manifest.ts @@ -23,8 +23,6 @@ import { import { chatSdkBotSource, chatSdkHandlersSource, - chatSdkHookTypeSource, - chatSdkHookTypeInstallSource, chatSdkWebhookSource, chatSdkWorkflowSource, chatSdkWorkflowInstallSource, @@ -756,11 +754,6 @@ export const registryItems: RegistryItem[] = [ description: 'The durable session workflow — `durableChatSession()` + `chatTurnHook`, with platform side-effects in dynamic-import steps.', }, - { - path: 'workflows/chat-turn-hook-workflow.ts', - description: - 'Stand-alone `ChatTurnPayload` type so the webhook handler can import it without pulling in the workflow module.', - }, { path: 'lib/chat-session-handlers.ts', description: @@ -786,13 +779,6 @@ export const registryItems: RegistryItem[] = [ code: chatSdkWorkflowSource, installCode: chatSdkWorkflowInstallSource, }, - { - label: 'Hook type', - lang: 'tsx', - caption: 'workflows/chat-turn-hook-workflow.ts', - code: chatSdkHookTypeSource, - installCode: chatSdkHookTypeInstallSource, - }, { label: 'Handlers', lang: 'tsx', diff --git a/docs/lib/registry/snippets/chat-sdk.ts b/docs/lib/registry/snippets/chat-sdk.ts index 96279c7144..00d1b43aa6 100644 --- a/docs/lib/registry/snippets/chat-sdk.ts +++ b/docs/lib/registry/snippets/chat-sdk.ts @@ -41,13 +41,13 @@ export const bot = new Chat({ }).registerSingleton(); `; -export const chatSdkWorkflowSource = `import { Message, reviver, type Thread } from "chat"; +export const chatSdkWorkflowSource = `import { Message, reviver, type Thread, type SerializedMessage } from "chat"; import { defineHook, getWorkflowMetadata } from "workflow"; import type { ThreadState } from "@/lib/bot"; -// Hook payload type lives in its own file so the webhook side can import -// it without pulling in the workflow module. -import type { ChatTurnPayload } from "@/app/workflows/chat-turn-hook"; +// Hook payload type — exported so webhook handlers can import it without +// pulling in workflow-specific modules. +export type ChatTurnPayload = { message: SerializedMessage }; const chatTurnHook = defineHook(); @@ -154,10 +154,13 @@ export const chatSdkWorkflowInstallSource = `/** * * DOCS: https://workflow-sdk.dev/patterns/chat-sdk */ -import { Message, reviver, type Thread } from "chat"; +import { Message, reviver, type Thread, type SerializedMessage } from "chat"; import { defineHook, getWorkflowMetadata } from "workflow"; import type { ThreadState } from "@/lib/bot"; -import type { ChatTurnPayload } from "@/app/workflows/chat-turn-hook"; + +// Hook payload type — exported so webhook handlers can import it without +// pulling in workflow-specific modules. +export type ChatTurnPayload = { message: SerializedMessage }; // One hook per run, token = runId. Reused every turn (created once outside // the loop to avoid HookConflictError on subsequent turns). @@ -271,7 +274,7 @@ export const chatSdkHandlersSource = `import type { Message, Thread } from "chat import { getRun, resumeHook, start } from "workflow/api"; import { bot, type ThreadState } from "@/lib/bot"; import { durableChatSession } from "@/app/workflows/chat-sdk"; -import type { ChatTurnPayload } from "@/app/workflows/chat-turn-hook"; +import type { ChatTurnPayload } from "@/app/workflows/chat-sdk-workflow"; async function startSession( thread: Thread, From 5fdf9d0a71454329ed8c02619cb831527bf1783e Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman Date: Sun, 3 May 2026 10:16:47 -0700 Subject: [PATCH 13/21] refactor: rename lib/registry and components/registry to lib/patterns and components/patterns Co-authored-by: Cursor --- docs/app/[lang]/patterns/[id]/page.tsx | 12 ++++++------ docs/app/[lang]/patterns/page.tsx | 4 ++-- docs/app/r/[name]/route.ts | 2 +- docs/app/r/route.ts | 2 +- .../{registry => patterns}/RegistryCard.tsx | 4 ++-- .../{registry => patterns}/RegistryCodeTabs.tsx | 0 .../{registry => patterns}/RegistryDetailHero.tsx | 2 +- .../{registry => patterns}/RegistryDetailToc.tsx | 2 +- .../{registry => patterns}/RegistryGrid.tsx | 4 ++-- .../{registry => patterns}/RegistryInstallTabs.tsx | 0 .../{registry => patterns}/logos/index.tsx | 2 +- .../logos/logo-agent-cancellation.tsx | 0 .../{registry => patterns}/logos/logo-ai-sdk.tsx | 0 .../{registry => patterns}/logos/logo-batching.tsx | 0 .../{registry => patterns}/logos/logo-chat-sdk.tsx | 0 .../logos/logo-child-workflows.tsx | 0 .../logos/logo-distributed-abort-controller.tsx | 0 .../logos/logo-durable-agent.tsx | 0 .../logos/logo-human-in-the-loop.tsx | 0 .../logos/logo-idempotency.tsx | 0 .../logos/logo-rate-limiting.tsx | 0 .../{registry => patterns}/logos/logo-resend.tsx | 0 .../{registry => patterns}/logos/logo-saga.tsx | 0 .../{registry => patterns}/logos/logo-sandbox.tsx | 0 .../{registry => patterns}/logos/logo-scheduling.tsx | 0 .../logos/logo-sequential-and-parallel.tsx | 0 .../{registry => patterns}/logos/logo-timeouts.tsx | 0 .../logos/logo-upgrading-workflows.tsx | 0 .../{registry => patterns}/logos/logo-webhooks.tsx | 0 .../logos/logo-workflow-composition.tsx | 0 docs/lib/{registry => patterns}/manifest.ts | 0 .../snippets/agent-cancellation.ts | 0 docs/lib/{registry => patterns}/snippets/ai-sdk.ts | 0 docs/lib/{registry => patterns}/snippets/batching.ts | 0 docs/lib/{registry => patterns}/snippets/chat-sdk.ts | 0 .../snippets/child-workflows.ts | 0 .../snippets/distributed-abort-controller.ts | 0 .../{registry => patterns}/snippets/durable-agent.ts | 0 .../snippets/human-in-the-loop.ts | 0 .../{registry => patterns}/snippets/idempotency.ts | 0 .../{registry => patterns}/snippets/rate-limiting.ts | 0 docs/lib/{registry => patterns}/snippets/resend.ts | 0 docs/lib/{registry => patterns}/snippets/saga.ts | 0 docs/lib/{registry => patterns}/snippets/sandbox.ts | 0 .../{registry => patterns}/snippets/scheduling.ts | 0 .../snippets/sequential-and-parallel.ts | 0 docs/lib/{registry => patterns}/snippets/timeouts.ts | 0 .../snippets/upgrading-workflows.ts | 0 docs/lib/{registry => patterns}/snippets/webhooks.ts | 0 .../snippets/workflow-composition.ts | 0 docs/lib/{registry => patterns}/types.ts | 12 ++++++------ 51 files changed, 23 insertions(+), 23 deletions(-) rename docs/components/{registry => patterns}/RegistryCard.tsx (94%) rename docs/components/{registry => patterns}/RegistryCodeTabs.tsx (100%) rename docs/components/{registry => patterns}/RegistryDetailHero.tsx (98%) rename docs/components/{registry => patterns}/RegistryDetailToc.tsx (97%) rename docs/components/{registry => patterns}/RegistryGrid.tsx (95%) rename docs/components/{registry => patterns}/RegistryInstallTabs.tsx (100%) rename docs/components/{registry => patterns}/logos/index.tsx (97%) rename docs/components/{registry => patterns}/logos/logo-agent-cancellation.tsx (100%) rename docs/components/{registry => patterns}/logos/logo-ai-sdk.tsx (100%) rename docs/components/{registry => patterns}/logos/logo-batching.tsx (100%) rename docs/components/{registry => patterns}/logos/logo-chat-sdk.tsx (100%) rename docs/components/{registry => patterns}/logos/logo-child-workflows.tsx (100%) rename docs/components/{registry => patterns}/logos/logo-distributed-abort-controller.tsx (100%) rename docs/components/{registry => patterns}/logos/logo-durable-agent.tsx (100%) rename docs/components/{registry => patterns}/logos/logo-human-in-the-loop.tsx (100%) rename docs/components/{registry => patterns}/logos/logo-idempotency.tsx (100%) rename docs/components/{registry => patterns}/logos/logo-rate-limiting.tsx (100%) rename docs/components/{registry => patterns}/logos/logo-resend.tsx (100%) rename docs/components/{registry => patterns}/logos/logo-saga.tsx (100%) rename docs/components/{registry => patterns}/logos/logo-sandbox.tsx (100%) rename docs/components/{registry => patterns}/logos/logo-scheduling.tsx (100%) rename docs/components/{registry => patterns}/logos/logo-sequential-and-parallel.tsx (100%) rename docs/components/{registry => patterns}/logos/logo-timeouts.tsx (100%) rename docs/components/{registry => patterns}/logos/logo-upgrading-workflows.tsx (100%) rename docs/components/{registry => patterns}/logos/logo-webhooks.tsx (100%) rename docs/components/{registry => patterns}/logos/logo-workflow-composition.tsx (100%) rename docs/lib/{registry => patterns}/manifest.ts (100%) rename docs/lib/{registry => patterns}/snippets/agent-cancellation.ts (100%) rename docs/lib/{registry => patterns}/snippets/ai-sdk.ts (100%) rename docs/lib/{registry => patterns}/snippets/batching.ts (100%) rename docs/lib/{registry => patterns}/snippets/chat-sdk.ts (100%) rename docs/lib/{registry => patterns}/snippets/child-workflows.ts (100%) rename docs/lib/{registry => patterns}/snippets/distributed-abort-controller.ts (100%) rename docs/lib/{registry => patterns}/snippets/durable-agent.ts (100%) rename docs/lib/{registry => patterns}/snippets/human-in-the-loop.ts (100%) rename docs/lib/{registry => patterns}/snippets/idempotency.ts (100%) rename docs/lib/{registry => patterns}/snippets/rate-limiting.ts (100%) rename docs/lib/{registry => patterns}/snippets/resend.ts (100%) rename docs/lib/{registry => patterns}/snippets/saga.ts (100%) rename docs/lib/{registry => patterns}/snippets/sandbox.ts (100%) rename docs/lib/{registry => patterns}/snippets/scheduling.ts (100%) rename docs/lib/{registry => patterns}/snippets/sequential-and-parallel.ts (100%) rename docs/lib/{registry => patterns}/snippets/timeouts.ts (100%) rename docs/lib/{registry => patterns}/snippets/upgrading-workflows.ts (100%) rename docs/lib/{registry => patterns}/snippets/webhooks.ts (100%) rename docs/lib/{registry => patterns}/snippets/workflow-composition.ts (100%) rename docs/lib/{registry => patterns}/types.ts (96%) diff --git a/docs/app/[lang]/patterns/[id]/page.tsx b/docs/app/[lang]/patterns/[id]/page.tsx index d1c179b0ba..0e5a4c08a0 100644 --- a/docs/app/[lang]/patterns/[id]/page.tsx +++ b/docs/app/[lang]/patterns/[id]/page.tsx @@ -3,15 +3,15 @@ import type { Metadata } from 'next'; import { notFound } from 'next/navigation'; import { codeToHtml } from 'shiki'; import { Mermaid } from '@/components/geistdocs/mermaid'; -import { RegistryCodeTabs } from '@/components/registry/RegistryCodeTabs'; -import { RegistryDetailHero } from '@/components/registry/RegistryDetailHero'; +import { RegistryCodeTabs } from '@/components/patterns/RegistryCodeTabs'; +import { RegistryDetailHero } from '@/components/patterns/RegistryDetailHero'; import { RegistryDetailToc, type RegistryTocItem, -} from '@/components/registry/RegistryDetailToc'; -import { RegistryInstallTabs } from '@/components/registry/RegistryInstallTabs'; -import { getRegistryItem, getRegistryItemIds } from '@/lib/registry/manifest'; -import type { RegistryGuide, RegistrySnippet } from '@/lib/registry/types'; +} from '@/components/patterns/RegistryDetailToc'; +import { RegistryInstallTabs } from '@/components/patterns/RegistryInstallTabs'; +import { getRegistryItem, getRegistryItemIds } from '@/lib/patterns/manifest'; +import type { RegistryGuide, RegistrySnippet } from '@/lib/patterns/types'; import { cn } from '@/lib/utils'; interface PageProps { diff --git a/docs/app/[lang]/patterns/page.tsx b/docs/app/[lang]/patterns/page.tsx index 0375c4718b..79b1ff5ddb 100644 --- a/docs/app/[lang]/patterns/page.tsx +++ b/docs/app/[lang]/patterns/page.tsx @@ -1,8 +1,8 @@ import type { Metadata } from 'next'; import Link from 'next/link'; import { Button } from '@/components/ui/button'; -import { RegistryGrid } from '@/components/registry/RegistryGrid'; -import { registryItems } from '@/lib/registry/manifest'; +import { RegistryGrid } from '@/components/patterns/RegistryGrid'; +import { registryItems } from '@/lib/patterns/manifest'; export const metadata: Metadata = { title: 'Patterns | Workflow SDK', diff --git a/docs/app/r/[name]/route.ts b/docs/app/r/[name]/route.ts index fad55d428e..aea703df9f 100644 --- a/docs/app/r/[name]/route.ts +++ b/docs/app/r/[name]/route.ts @@ -18,7 +18,7 @@ */ import { NextResponse } from 'next/server'; -import { registryItems } from '@/lib/registry/manifest'; +import { registryItems } from '@/lib/patterns/manifest'; const WORKFLOW_PATH_PREFIX = 'workflows/'; diff --git a/docs/app/r/route.ts b/docs/app/r/route.ts index 9bcccbbd92..ba35e2c102 100644 --- a/docs/app/r/route.ts +++ b/docs/app/r/route.ts @@ -10,7 +10,7 @@ */ import { NextResponse } from 'next/server'; -import { registryItems } from '@/lib/registry/manifest'; +import { registryItems } from '@/lib/patterns/manifest'; export const dynamic = 'force-dynamic'; diff --git a/docs/components/registry/RegistryCard.tsx b/docs/components/patterns/RegistryCard.tsx similarity index 94% rename from docs/components/registry/RegistryCard.tsx rename to docs/components/patterns/RegistryCard.tsx index ba9e035c6b..09b3bf22d5 100644 --- a/docs/components/registry/RegistryCard.tsx +++ b/docs/components/patterns/RegistryCard.tsx @@ -7,8 +7,8 @@ import { CardHeader, CardTitle, } from '@/components/ui/card'; -import { categoryLabels } from '@/lib/registry/manifest'; -import type { RegistryItem } from '@/lib/registry/types'; +import { categoryLabels } from '@/lib/patterns/manifest'; +import type { RegistryItem } from '@/lib/patterns/types'; import { getProviderLogo } from './logos'; interface RegistryCardProps { diff --git a/docs/components/registry/RegistryCodeTabs.tsx b/docs/components/patterns/RegistryCodeTabs.tsx similarity index 100% rename from docs/components/registry/RegistryCodeTabs.tsx rename to docs/components/patterns/RegistryCodeTabs.tsx diff --git a/docs/components/registry/RegistryDetailHero.tsx b/docs/components/patterns/RegistryDetailHero.tsx similarity index 98% rename from docs/components/registry/RegistryDetailHero.tsx rename to docs/components/patterns/RegistryDetailHero.tsx index 74537724a7..9fe27b952c 100644 --- a/docs/components/registry/RegistryDetailHero.tsx +++ b/docs/components/patterns/RegistryDetailHero.tsx @@ -8,7 +8,7 @@ import { BreadcrumbPage, BreadcrumbSeparator, } from '@/components/ui/breadcrumb'; -import type { RegistryItem } from '@/lib/registry/types'; +import type { RegistryItem } from '@/lib/patterns/types'; import { getProviderLogo } from './logos'; interface RegistryDetailHeroProps { diff --git a/docs/components/registry/RegistryDetailToc.tsx b/docs/components/patterns/RegistryDetailToc.tsx similarity index 97% rename from docs/components/registry/RegistryDetailToc.tsx rename to docs/components/patterns/RegistryDetailToc.tsx index a8ec78dba9..6ed7e83ba8 100644 --- a/docs/components/registry/RegistryDetailToc.tsx +++ b/docs/components/patterns/RegistryDetailToc.tsx @@ -54,7 +54,7 @@ export function RegistryDetailToc({ if (items.length === 0) return null; const githubEditUrl = githubPath - ? `https://github.com/vercel/workflow/edit/main/docs/lib/registry/${githubPath}` + ? `https://github.com/vercel/workflow/edit/main/docs/lib/patterns/${githubPath}` : undefined; return ( diff --git a/docs/components/registry/RegistryGrid.tsx b/docs/components/patterns/RegistryGrid.tsx similarity index 95% rename from docs/components/registry/RegistryGrid.tsx rename to docs/components/patterns/RegistryGrid.tsx index 07cc5eb03d..74e191fba2 100644 --- a/docs/components/registry/RegistryGrid.tsx +++ b/docs/components/patterns/RegistryGrid.tsx @@ -2,8 +2,8 @@ import { useState } from 'react'; import { Badge } from '@/components/ui/badge'; -import { categoryLabels } from '@/lib/registry/manifest'; -import type { RegistryCategory, RegistryItem } from '@/lib/registry/types'; +import { categoryLabels } from '@/lib/patterns/manifest'; +import type { RegistryCategory, RegistryItem } from '@/lib/patterns/types'; import { RegistryCard } from './RegistryCard'; type Filter = 'all' | RegistryCategory; diff --git a/docs/components/registry/RegistryInstallTabs.tsx b/docs/components/patterns/RegistryInstallTabs.tsx similarity index 100% rename from docs/components/registry/RegistryInstallTabs.tsx rename to docs/components/patterns/RegistryInstallTabs.tsx diff --git a/docs/components/registry/logos/index.tsx b/docs/components/patterns/logos/index.tsx similarity index 97% rename from docs/components/registry/logos/index.tsx rename to docs/components/patterns/logos/index.tsx index 098c47301c..c57f7f4bf0 100644 --- a/docs/components/registry/logos/index.tsx +++ b/docs/components/patterns/logos/index.tsx @@ -1,5 +1,5 @@ import type { ComponentType } from 'react'; -import type { RegistryLogoId } from '@/lib/registry/types'; +import type { RegistryLogoId } from '@/lib/patterns/types'; import { LogoAgentCancellation } from './logo-agent-cancellation'; import { LogoAiSdk } from './logo-ai-sdk'; import { LogoBatching } from './logo-batching'; diff --git a/docs/components/registry/logos/logo-agent-cancellation.tsx b/docs/components/patterns/logos/logo-agent-cancellation.tsx similarity index 100% rename from docs/components/registry/logos/logo-agent-cancellation.tsx rename to docs/components/patterns/logos/logo-agent-cancellation.tsx diff --git a/docs/components/registry/logos/logo-ai-sdk.tsx b/docs/components/patterns/logos/logo-ai-sdk.tsx similarity index 100% rename from docs/components/registry/logos/logo-ai-sdk.tsx rename to docs/components/patterns/logos/logo-ai-sdk.tsx diff --git a/docs/components/registry/logos/logo-batching.tsx b/docs/components/patterns/logos/logo-batching.tsx similarity index 100% rename from docs/components/registry/logos/logo-batching.tsx rename to docs/components/patterns/logos/logo-batching.tsx diff --git a/docs/components/registry/logos/logo-chat-sdk.tsx b/docs/components/patterns/logos/logo-chat-sdk.tsx similarity index 100% rename from docs/components/registry/logos/logo-chat-sdk.tsx rename to docs/components/patterns/logos/logo-chat-sdk.tsx diff --git a/docs/components/registry/logos/logo-child-workflows.tsx b/docs/components/patterns/logos/logo-child-workflows.tsx similarity index 100% rename from docs/components/registry/logos/logo-child-workflows.tsx rename to docs/components/patterns/logos/logo-child-workflows.tsx diff --git a/docs/components/registry/logos/logo-distributed-abort-controller.tsx b/docs/components/patterns/logos/logo-distributed-abort-controller.tsx similarity index 100% rename from docs/components/registry/logos/logo-distributed-abort-controller.tsx rename to docs/components/patterns/logos/logo-distributed-abort-controller.tsx diff --git a/docs/components/registry/logos/logo-durable-agent.tsx b/docs/components/patterns/logos/logo-durable-agent.tsx similarity index 100% rename from docs/components/registry/logos/logo-durable-agent.tsx rename to docs/components/patterns/logos/logo-durable-agent.tsx diff --git a/docs/components/registry/logos/logo-human-in-the-loop.tsx b/docs/components/patterns/logos/logo-human-in-the-loop.tsx similarity index 100% rename from docs/components/registry/logos/logo-human-in-the-loop.tsx rename to docs/components/patterns/logos/logo-human-in-the-loop.tsx diff --git a/docs/components/registry/logos/logo-idempotency.tsx b/docs/components/patterns/logos/logo-idempotency.tsx similarity index 100% rename from docs/components/registry/logos/logo-idempotency.tsx rename to docs/components/patterns/logos/logo-idempotency.tsx diff --git a/docs/components/registry/logos/logo-rate-limiting.tsx b/docs/components/patterns/logos/logo-rate-limiting.tsx similarity index 100% rename from docs/components/registry/logos/logo-rate-limiting.tsx rename to docs/components/patterns/logos/logo-rate-limiting.tsx diff --git a/docs/components/registry/logos/logo-resend.tsx b/docs/components/patterns/logos/logo-resend.tsx similarity index 100% rename from docs/components/registry/logos/logo-resend.tsx rename to docs/components/patterns/logos/logo-resend.tsx diff --git a/docs/components/registry/logos/logo-saga.tsx b/docs/components/patterns/logos/logo-saga.tsx similarity index 100% rename from docs/components/registry/logos/logo-saga.tsx rename to docs/components/patterns/logos/logo-saga.tsx diff --git a/docs/components/registry/logos/logo-sandbox.tsx b/docs/components/patterns/logos/logo-sandbox.tsx similarity index 100% rename from docs/components/registry/logos/logo-sandbox.tsx rename to docs/components/patterns/logos/logo-sandbox.tsx diff --git a/docs/components/registry/logos/logo-scheduling.tsx b/docs/components/patterns/logos/logo-scheduling.tsx similarity index 100% rename from docs/components/registry/logos/logo-scheduling.tsx rename to docs/components/patterns/logos/logo-scheduling.tsx diff --git a/docs/components/registry/logos/logo-sequential-and-parallel.tsx b/docs/components/patterns/logos/logo-sequential-and-parallel.tsx similarity index 100% rename from docs/components/registry/logos/logo-sequential-and-parallel.tsx rename to docs/components/patterns/logos/logo-sequential-and-parallel.tsx diff --git a/docs/components/registry/logos/logo-timeouts.tsx b/docs/components/patterns/logos/logo-timeouts.tsx similarity index 100% rename from docs/components/registry/logos/logo-timeouts.tsx rename to docs/components/patterns/logos/logo-timeouts.tsx diff --git a/docs/components/registry/logos/logo-upgrading-workflows.tsx b/docs/components/patterns/logos/logo-upgrading-workflows.tsx similarity index 100% rename from docs/components/registry/logos/logo-upgrading-workflows.tsx rename to docs/components/patterns/logos/logo-upgrading-workflows.tsx diff --git a/docs/components/registry/logos/logo-webhooks.tsx b/docs/components/patterns/logos/logo-webhooks.tsx similarity index 100% rename from docs/components/registry/logos/logo-webhooks.tsx rename to docs/components/patterns/logos/logo-webhooks.tsx diff --git a/docs/components/registry/logos/logo-workflow-composition.tsx b/docs/components/patterns/logos/logo-workflow-composition.tsx similarity index 100% rename from docs/components/registry/logos/logo-workflow-composition.tsx rename to docs/components/patterns/logos/logo-workflow-composition.tsx diff --git a/docs/lib/registry/manifest.ts b/docs/lib/patterns/manifest.ts similarity index 100% rename from docs/lib/registry/manifest.ts rename to docs/lib/patterns/manifest.ts diff --git a/docs/lib/registry/snippets/agent-cancellation.ts b/docs/lib/patterns/snippets/agent-cancellation.ts similarity index 100% rename from docs/lib/registry/snippets/agent-cancellation.ts rename to docs/lib/patterns/snippets/agent-cancellation.ts diff --git a/docs/lib/registry/snippets/ai-sdk.ts b/docs/lib/patterns/snippets/ai-sdk.ts similarity index 100% rename from docs/lib/registry/snippets/ai-sdk.ts rename to docs/lib/patterns/snippets/ai-sdk.ts diff --git a/docs/lib/registry/snippets/batching.ts b/docs/lib/patterns/snippets/batching.ts similarity index 100% rename from docs/lib/registry/snippets/batching.ts rename to docs/lib/patterns/snippets/batching.ts diff --git a/docs/lib/registry/snippets/chat-sdk.ts b/docs/lib/patterns/snippets/chat-sdk.ts similarity index 100% rename from docs/lib/registry/snippets/chat-sdk.ts rename to docs/lib/patterns/snippets/chat-sdk.ts diff --git a/docs/lib/registry/snippets/child-workflows.ts b/docs/lib/patterns/snippets/child-workflows.ts similarity index 100% rename from docs/lib/registry/snippets/child-workflows.ts rename to docs/lib/patterns/snippets/child-workflows.ts diff --git a/docs/lib/registry/snippets/distributed-abort-controller.ts b/docs/lib/patterns/snippets/distributed-abort-controller.ts similarity index 100% rename from docs/lib/registry/snippets/distributed-abort-controller.ts rename to docs/lib/patterns/snippets/distributed-abort-controller.ts diff --git a/docs/lib/registry/snippets/durable-agent.ts b/docs/lib/patterns/snippets/durable-agent.ts similarity index 100% rename from docs/lib/registry/snippets/durable-agent.ts rename to docs/lib/patterns/snippets/durable-agent.ts diff --git a/docs/lib/registry/snippets/human-in-the-loop.ts b/docs/lib/patterns/snippets/human-in-the-loop.ts similarity index 100% rename from docs/lib/registry/snippets/human-in-the-loop.ts rename to docs/lib/patterns/snippets/human-in-the-loop.ts diff --git a/docs/lib/registry/snippets/idempotency.ts b/docs/lib/patterns/snippets/idempotency.ts similarity index 100% rename from docs/lib/registry/snippets/idempotency.ts rename to docs/lib/patterns/snippets/idempotency.ts diff --git a/docs/lib/registry/snippets/rate-limiting.ts b/docs/lib/patterns/snippets/rate-limiting.ts similarity index 100% rename from docs/lib/registry/snippets/rate-limiting.ts rename to docs/lib/patterns/snippets/rate-limiting.ts diff --git a/docs/lib/registry/snippets/resend.ts b/docs/lib/patterns/snippets/resend.ts similarity index 100% rename from docs/lib/registry/snippets/resend.ts rename to docs/lib/patterns/snippets/resend.ts diff --git a/docs/lib/registry/snippets/saga.ts b/docs/lib/patterns/snippets/saga.ts similarity index 100% rename from docs/lib/registry/snippets/saga.ts rename to docs/lib/patterns/snippets/saga.ts diff --git a/docs/lib/registry/snippets/sandbox.ts b/docs/lib/patterns/snippets/sandbox.ts similarity index 100% rename from docs/lib/registry/snippets/sandbox.ts rename to docs/lib/patterns/snippets/sandbox.ts diff --git a/docs/lib/registry/snippets/scheduling.ts b/docs/lib/patterns/snippets/scheduling.ts similarity index 100% rename from docs/lib/registry/snippets/scheduling.ts rename to docs/lib/patterns/snippets/scheduling.ts diff --git a/docs/lib/registry/snippets/sequential-and-parallel.ts b/docs/lib/patterns/snippets/sequential-and-parallel.ts similarity index 100% rename from docs/lib/registry/snippets/sequential-and-parallel.ts rename to docs/lib/patterns/snippets/sequential-and-parallel.ts diff --git a/docs/lib/registry/snippets/timeouts.ts b/docs/lib/patterns/snippets/timeouts.ts similarity index 100% rename from docs/lib/registry/snippets/timeouts.ts rename to docs/lib/patterns/snippets/timeouts.ts diff --git a/docs/lib/registry/snippets/upgrading-workflows.ts b/docs/lib/patterns/snippets/upgrading-workflows.ts similarity index 100% rename from docs/lib/registry/snippets/upgrading-workflows.ts rename to docs/lib/patterns/snippets/upgrading-workflows.ts diff --git a/docs/lib/registry/snippets/webhooks.ts b/docs/lib/patterns/snippets/webhooks.ts similarity index 100% rename from docs/lib/registry/snippets/webhooks.ts rename to docs/lib/patterns/snippets/webhooks.ts diff --git a/docs/lib/registry/snippets/workflow-composition.ts b/docs/lib/patterns/snippets/workflow-composition.ts similarity index 100% rename from docs/lib/registry/snippets/workflow-composition.ts rename to docs/lib/patterns/snippets/workflow-composition.ts diff --git a/docs/lib/registry/types.ts b/docs/lib/patterns/types.ts similarity index 96% rename from docs/lib/registry/types.ts rename to docs/lib/patterns/types.ts index b9bfa1a4f9..9fbe06565c 100644 --- a/docs/lib/registry/types.ts +++ b/docs/lib/patterns/types.ts @@ -3,7 +3,7 @@ * * Each `RegistryItem` is a recipe (workflow + API routes + UI) you can drop * into your app via the shadcn CLI. The data here drives both the listing - * page (`/registry`) and the per-item detail page (`/registry/[id]`). + * page (`/patterns`) and the per-item detail page (`/patterns/[id]`). * * To add a new provider: * 1. Append a new `RegistryItem` to `manifest.ts`. @@ -71,11 +71,11 @@ export interface RegistrySnippet { /** * Identifier for a provider brand mark. The Card / Detail hero look this up - * in `components/registry/logos` to render the actual SVG. Adding a new + * in `components/patterns/logos` to render the actual SVG. Adding a new * provider: - * 1. Drop a `logo-.tsx` SVG component in `components/registry/logos` + * 1. Drop a `logo-.tsx` SVG component in `components/patterns/logos` * that paints with `currentColor`. - * 2. Register it in `components/registry/logos/index.ts`. + * 2. Register it in `components/patterns/logos/index.ts`. * 3. Reference its key here. */ export type RegistryLogoId = @@ -154,7 +154,7 @@ export interface RegistryApproachSection { } /** - * Inline guide content that turns the registry detail page into a unified + * Inline guide content that turns the patterns detail page into a unified * educational + plug-and-play surface. Replaces the need for a separate * cookbook page for the same pattern. */ @@ -254,7 +254,7 @@ export interface RegistryGuide { } export interface RegistryItem { - /** Slug used in the URL — `/registry/${id}`. */ + /** Slug used in the URL — `/patterns/${id}`. */ id: string; /** Display name. */ name: string; From 5ed053371a4fb8c01ad0da02845aa17ab2d1c29e Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman Date: Sun, 3 May 2026 10:21:55 -0700 Subject: [PATCH 14/21] refactor: replace hand-written logo SVGs with lucide-react icons, keep brand SVGs Co-authored-by: Cursor --- docs/components/patterns/logos/index.tsx | 70 ++++++++++--------- .../logos/logo-agent-cancellation.tsx | 38 ---------- .../patterns/logos/logo-batching.tsx | 55 --------------- .../patterns/logos/logo-child-workflows.tsx | 39 ----------- .../logo-distributed-abort-controller.tsx | 42 ----------- .../patterns/logos/logo-durable-agent.tsx | 39 ----------- .../patterns/logos/logo-human-in-the-loop.tsx | 35 ---------- .../patterns/logos/logo-idempotency.tsx | 37 ---------- .../patterns/logos/logo-rate-limiting.tsx | 35 ---------- docs/components/patterns/logos/logo-saga.tsx | 36 ---------- .../patterns/logos/logo-sandbox.tsx | 36 ---------- .../patterns/logos/logo-scheduling.tsx | 34 --------- .../logos/logo-sequential-and-parallel.tsx | 40 ----------- .../patterns/logos/logo-timeouts.tsx | 37 ---------- .../logos/logo-upgrading-workflows.tsx | 40 ----------- .../patterns/logos/logo-webhooks.tsx | 39 ----------- .../logos/logo-workflow-composition.tsx | 44 ------------ 17 files changed, 36 insertions(+), 660 deletions(-) delete mode 100644 docs/components/patterns/logos/logo-agent-cancellation.tsx delete mode 100644 docs/components/patterns/logos/logo-batching.tsx delete mode 100644 docs/components/patterns/logos/logo-child-workflows.tsx delete mode 100644 docs/components/patterns/logos/logo-distributed-abort-controller.tsx delete mode 100644 docs/components/patterns/logos/logo-durable-agent.tsx delete mode 100644 docs/components/patterns/logos/logo-human-in-the-loop.tsx delete mode 100644 docs/components/patterns/logos/logo-idempotency.tsx delete mode 100644 docs/components/patterns/logos/logo-rate-limiting.tsx delete mode 100644 docs/components/patterns/logos/logo-saga.tsx delete mode 100644 docs/components/patterns/logos/logo-sandbox.tsx delete mode 100644 docs/components/patterns/logos/logo-scheduling.tsx delete mode 100644 docs/components/patterns/logos/logo-sequential-and-parallel.tsx delete mode 100644 docs/components/patterns/logos/logo-timeouts.tsx delete mode 100644 docs/components/patterns/logos/logo-upgrading-workflows.tsx delete mode 100644 docs/components/patterns/logos/logo-webhooks.tsx delete mode 100644 docs/components/patterns/logos/logo-workflow-composition.tsx diff --git a/docs/components/patterns/logos/index.tsx b/docs/components/patterns/logos/index.tsx index c57f7f4bf0..7da0cb41ba 100644 --- a/docs/components/patterns/logos/index.tsx +++ b/docs/components/patterns/logos/index.tsx @@ -1,24 +1,26 @@ import type { ComponentType } from 'react'; +import { + Ban, + Bot, + Box, + CalendarClock, + CircleStop, + Gauge, + GitFork, + Layers, + Network, + RefreshCw, + Repeat2, + Split, + ThumbsUp, + Timer, + Webhook, + Zap, +} from 'lucide-react'; import type { RegistryLogoId } from '@/lib/patterns/types'; -import { LogoAgentCancellation } from './logo-agent-cancellation'; import { LogoAiSdk } from './logo-ai-sdk'; -import { LogoBatching } from './logo-batching'; import { LogoChatSdk } from './logo-chat-sdk'; -import { LogoChildWorkflows } from './logo-child-workflows'; -import { LogoDistributedAbortController } from './logo-distributed-abort-controller'; -import { LogoDurableAgent } from './logo-durable-agent'; -import { LogoHumanInTheLoop } from './logo-human-in-the-loop'; -import { LogoIdempotency } from './logo-idempotency'; -import { LogoRateLimiting } from './logo-rate-limiting'; import { LogoResend } from './logo-resend'; -import { LogoSaga } from './logo-saga'; -import { LogoSandbox } from './logo-sandbox'; -import { LogoScheduling } from './logo-scheduling'; -import { LogoSequentialAndParallel } from './logo-sequential-and-parallel'; -import { LogoTimeouts } from './logo-timeouts'; -import { LogoWebhooks } from './logo-webhooks'; -import { LogoUpgradingWorkflows } from './logo-upgrading-workflows'; -import { LogoWorkflowComposition } from './logo-workflow-composition'; export interface ProviderLogoProps { size?: number; @@ -26,8 +28,8 @@ export interface ProviderLogoProps { } /** - * Provider brand marks — keyed by `RegistryLogoId`. - * When adding a new provider, register its SVG component here. + * Pattern logos keyed by `RegistryLogoId`. + * Conceptual patterns use lucide-react icons; brand marks use custom SVGs. */ export const providerLogos: Record< RegistryLogoId, @@ -35,23 +37,23 @@ export const providerLogos: Record< > = { resend: LogoResend, 'ai-sdk': LogoAiSdk, - sandbox: LogoSandbox, 'chat-sdk': LogoChatSdk, - 'durable-agent': LogoDurableAgent, - 'human-in-the-loop': LogoHumanInTheLoop, - 'agent-cancellation': LogoAgentCancellation, - 'sequential-and-parallel': LogoSequentialAndParallel, - 'workflow-composition': LogoWorkflowComposition, - saga: LogoSaga, - batching: LogoBatching, - 'rate-limiting': LogoRateLimiting, - scheduling: LogoScheduling, - timeouts: LogoTimeouts, - idempotency: LogoIdempotency, - webhooks: LogoWebhooks, - 'child-workflows': LogoChildWorkflows, - 'distributed-abort-controller': LogoDistributedAbortController, - 'upgrading-workflows': LogoUpgradingWorkflows, + 'agent-cancellation': CircleStop, + batching: Layers, + 'child-workflows': GitFork, + 'distributed-abort-controller': Ban, + 'durable-agent': Bot, + 'human-in-the-loop': ThumbsUp, + idempotency: RefreshCw, + 'rate-limiting': Gauge, + saga: Repeat2, + sandbox: Box, + scheduling: CalendarClock, + 'sequential-and-parallel': Split, + timeouts: Timer, + 'upgrading-workflows': Zap, + webhooks: Webhook, + 'workflow-composition': Network, }; export function getProviderLogo( diff --git a/docs/components/patterns/logos/logo-agent-cancellation.tsx b/docs/components/patterns/logos/logo-agent-cancellation.tsx deleted file mode 100644 index e9da3ff548..0000000000 --- a/docs/components/patterns/logos/logo-agent-cancellation.tsx +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Agent Cancellation brand mark. - * - * Universal media-stop glyph — a circle with a solid square inside. Reads as - * "stop the running thing" in any chat UI. The outer circle is stroked and - * the inner square is filled, both with `currentColor` so the mark adapts - * to light and dark themes. - */ -export function LogoAgentCancellation({ - size = 20, - className, -}: { - size?: number; - className?: string; -}) { - return ( - - ); -} diff --git a/docs/components/patterns/logos/logo-batching.tsx b/docs/components/patterns/logos/logo-batching.tsx deleted file mode 100644 index 0965aaf940..0000000000 --- a/docs/components/patterns/logos/logo-batching.tsx +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Batching brand mark. - * - * Three stacked, slightly-offset rounded rectangles — a "batch" of work. - * Top rectangle filled to suggest the active batch. - */ -export function LogoBatching({ - size = 20, - className, -}: { - size?: number; - className?: string; -}) { - return ( - - ); -} diff --git a/docs/components/patterns/logos/logo-child-workflows.tsx b/docs/components/patterns/logos/logo-child-workflows.tsx deleted file mode 100644 index 9e7fbc37d7..0000000000 --- a/docs/components/patterns/logos/logo-child-workflows.tsx +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Child Workflows brand mark. - * - * Parent node fanning out to three child nodes — the spawn-and-poll shape. - * All `currentColor`. - */ -export function LogoChildWorkflows({ - size = 20, - className, -}: { - size?: number; - className?: string; -}) { - return ( - - ); -} diff --git a/docs/components/patterns/logos/logo-distributed-abort-controller.tsx b/docs/components/patterns/logos/logo-distributed-abort-controller.tsx deleted file mode 100644 index 7851a6ef40..0000000000 --- a/docs/components/patterns/logos/logo-distributed-abort-controller.tsx +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Distributed Abort Controller brand mark. - * - * Universal abort glyph — circle with a slash through it — overlaid with a - * small dotted ring suggesting cross-process / distributed coordination. - * All `currentColor`. - */ -export function LogoDistributedAbortController({ - size = 20, - className, -}: { - size?: number; - className?: string; -}) { - return ( - - ); -} diff --git a/docs/components/patterns/logos/logo-durable-agent.tsx b/docs/components/patterns/logos/logo-durable-agent.tsx deleted file mode 100644 index c605357b06..0000000000 --- a/docs/components/patterns/logos/logo-durable-agent.tsx +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Durable Agent brand mark. - * - * Bot glyph — rounded body, antenna, two eyes — the universal "agent" icon. - * All `currentColor`. - */ -export function LogoDurableAgent({ - size = 20, - className, -}: { - size?: number; - className?: string; -}) { - return ( - - ); -} diff --git a/docs/components/patterns/logos/logo-human-in-the-loop.tsx b/docs/components/patterns/logos/logo-human-in-the-loop.tsx deleted file mode 100644 index c9445dedd3..0000000000 --- a/docs/components/patterns/logos/logo-human-in-the-loop.tsx +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Human-in-the-Loop brand mark. - * - * Thumbs-up glyph — represents a human approval signal that gates a - * paused agent. Drawn with `currentColor` strokes so it inherits text color - * in both light and dark themes. - */ -export function LogoHumanInTheLoop({ - size = 20, - className, -}: { - size?: number; - className?: string; -}) { - return ( - - ); -} diff --git a/docs/components/patterns/logos/logo-idempotency.tsx b/docs/components/patterns/logos/logo-idempotency.tsx deleted file mode 100644 index 21a3d28a75..0000000000 --- a/docs/components/patterns/logos/logo-idempotency.tsx +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Idempotency brand mark. - * - * Refresh arrow looping around an equals sign — the visual statement - * "f(f(x)) = f(x)". No matter how many times you replay the operation, - * the result is equal. All `currentColor`. - */ -export function LogoIdempotency({ - size = 20, - className, -}: { - size?: number; - className?: string; -}) { - return ( - - ); -} diff --git a/docs/components/patterns/logos/logo-rate-limiting.tsx b/docs/components/patterns/logos/logo-rate-limiting.tsx deleted file mode 100644 index 777cea0adc..0000000000 --- a/docs/components/patterns/logos/logo-rate-limiting.tsx +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Rate Limiting brand mark. - * - * Gauge / speedometer with a needle — represents throttling and backoff. - * All `currentColor`. - */ -export function LogoRateLimiting({ - size = 20, - className, -}: { - size?: number; - className?: string; -}) { - return ( - - ); -} diff --git a/docs/components/patterns/logos/logo-saga.tsx b/docs/components/patterns/logos/logo-saga.tsx deleted file mode 100644 index eabe9c50c9..0000000000 --- a/docs/components/patterns/logos/logo-saga.tsx +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Saga / Transactions & Rollbacks brand mark. - * - * Two arrows curving in opposite directions — forward progress + reverse - * compensation. All `currentColor`. - */ -export function LogoSaga({ - size = 20, - className, -}: { - size?: number; - className?: string; -}) { - return ( - - ); -} diff --git a/docs/components/patterns/logos/logo-sandbox.tsx b/docs/components/patterns/logos/logo-sandbox.tsx deleted file mode 100644 index 0afd822bae..0000000000 --- a/docs/components/patterns/logos/logo-sandbox.tsx +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Vercel Sandbox brand mark — isometric cube glyph. - * - * Vercel Sandbox doesn't ship a square brand mark of its own, so this is a - * purpose-built cube icon that reads as "container / sandbox" at a glance - * and pairs with the "Sandbox" title on the card. - * - * Recolored to `currentColor` so it inherits text color and adapts to - * light/dark themes automatically. - */ -export function LogoSandbox({ - size = 20, - className, -}: { - size?: number; - className?: string; -}) { - return ( - - ); -} diff --git a/docs/components/patterns/logos/logo-scheduling.tsx b/docs/components/patterns/logos/logo-scheduling.tsx deleted file mode 100644 index 098309896c..0000000000 --- a/docs/components/patterns/logos/logo-scheduling.tsx +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Scheduling brand mark. - * - * Clock face with a calendar tick — represents future scheduled actions. - * All `currentColor`. - */ -export function LogoScheduling({ - size = 20, - className, -}: { - size?: number; - className?: string; -}) { - return ( - - ); -} diff --git a/docs/components/patterns/logos/logo-sequential-and-parallel.tsx b/docs/components/patterns/logos/logo-sequential-and-parallel.tsx deleted file mode 100644 index a6a158cc36..0000000000 --- a/docs/components/patterns/logos/logo-sequential-and-parallel.tsx +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Sequential & Parallel brand mark. - * - * Three lines branching from a single source — one continuing forward - * (sequential), the others fanning out (parallel). All `currentColor`. - */ -export function LogoSequentialAndParallel({ - size = 20, - className, -}: { - size?: number; - className?: string; -}) { - return ( - - ); -} diff --git a/docs/components/patterns/logos/logo-timeouts.tsx b/docs/components/patterns/logos/logo-timeouts.tsx deleted file mode 100644 index 42fb169d2c..0000000000 --- a/docs/components/patterns/logos/logo-timeouts.tsx +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Timeouts brand mark. - * - * Stopwatch glyph — circle with a top crown and a hand pointing right. - * All `currentColor`. - */ -export function LogoTimeouts({ - size = 20, - className, -}: { - size?: number; - className?: string; -}) { - return ( - - ); -} diff --git a/docs/components/patterns/logos/logo-upgrading-workflows.tsx b/docs/components/patterns/logos/logo-upgrading-workflows.tsx deleted file mode 100644 index 6bd25fe396..0000000000 --- a/docs/components/patterns/logos/logo-upgrading-workflows.tsx +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Upgrading Workflows brand mark. - * - * A circular refresh arrow with an upward-pointing bolt at the top, - * representing a workflow that respawns itself on the latest deployment. - * All `currentColor`. - */ -export function LogoUpgradingWorkflows({ - size = 20, - className, -}: { - size?: number; - className?: string; -}) { - return ( - - ); -} diff --git a/docs/components/patterns/logos/logo-webhooks.tsx b/docs/components/patterns/logos/logo-webhooks.tsx deleted file mode 100644 index 6bee9b3058..0000000000 --- a/docs/components/patterns/logos/logo-webhooks.tsx +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Webhooks brand mark. - * - * The webhooks.fyi triangle — three nodes (one at each vertex of an - * equilateral triangle) connected by edges. The de facto "webhook" logo - * across the web. All `currentColor`. - */ -export function LogoWebhooks({ - size = 20, - className, -}: { - size?: number; - className?: string; -}) { - return ( - - ); -} diff --git a/docs/components/patterns/logos/logo-workflow-composition.tsx b/docs/components/patterns/logos/logo-workflow-composition.tsx deleted file mode 100644 index 1390b2e6ed..0000000000 --- a/docs/components/patterns/logos/logo-workflow-composition.tsx +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Workflow Composition brand mark. - * - * Two nested rounded rectangles — a child workflow inside a parent — with a - * small arrow indicating composition / call. All `currentColor`. - */ -export function LogoWorkflowComposition({ - size = 20, - className, -}: { - size?: number; - className?: string; -}) { - return ( - - ); -} From 780684c10241623536dde9b3eb15e8715832823a Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman Date: Sun, 3 May 2026 10:25:04 -0700 Subject: [PATCH 15/21] fix: use correct whySection field names in copy-page text builder Co-authored-by: Cursor --- docs/app/[lang]/patterns/[id]/page.tsx | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/docs/app/[lang]/patterns/[id]/page.tsx b/docs/app/[lang]/patterns/[id]/page.tsx index 0e5a4c08a0..ca71467211 100644 --- a/docs/app/[lang]/patterns/[id]/page.tsx +++ b/docs/app/[lang]/patterns/[id]/page.tsx @@ -164,13 +164,18 @@ export default async function RegistryDetailPage({ params }: PageProps) { if (guide?.whySection) { pageTextSections.push(`## ${guide.whySection.title ?? 'Why'}`); - if (guide.whySection.problem) - pageTextSections.push(guide.whySection.problem); - if (guide.whySection.solution) - pageTextSections.push(guide.whySection.solution); - (guide.whySection.bullets ?? []).forEach((b) => + if (guide.whySection.problemProse) + pageTextSections.push(guide.whySection.problemProse); + (guide.whySection.problemBullets ?? []).forEach((b) => pageTextSections.push(`- ${b}`) ); + if (guide.whySection.solutionProse) + pageTextSections.push(guide.whySection.solutionProse); + (guide.whySection.solutionBullets ?? []).forEach((b) => + pageTextSections.push(`- ${b}`) + ); + if (guide.whySection.closingProse) + pageTextSections.push(guide.whySection.closingProse); } if (guide?.approaches) { From 4558bd253396423eb045d949628a85d66eca603f Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman Date: Sun, 3 May 2026 10:35:24 -0700 Subject: [PATCH 16/21] fix: update all sourceUrls from deleted cookbook MDX files to pattern snippet files Co-authored-by: Cursor --- docs/app/[lang]/patterns/[id]/page.tsx | 7 ++++ docs/app/og/patterns/[id]/route.tsx | 23 ++++++++++ docs/app/r/[name]/route.ts | 24 ++++++++++- docs/app/r/route.ts | 14 ++++++- docs/lib/patterns/manifest.ts | 58 +++++++++++++------------- docs/scripts/check-docs-smoke.mjs | 13 +++--- 6 files changed, 100 insertions(+), 39 deletions(-) create mode 100644 docs/app/og/patterns/[id]/route.tsx diff --git a/docs/app/[lang]/patterns/[id]/page.tsx b/docs/app/[lang]/patterns/[id]/page.tsx index ca71467211..513f67f17c 100644 --- a/docs/app/[lang]/patterns/[id]/page.tsx +++ b/docs/app/[lang]/patterns/[id]/page.tsx @@ -28,9 +28,16 @@ export async function generateMetadata({ const { id } = await params; const item = getRegistryItem(id); if (!item) return { title: 'Registry item not found' }; + const ogImage = `/og/patterns/${id}`; return { title: `${item.name} | Workflow Registry`, description: item.description, + openGraph: { + images: [ogImage], + }, + twitter: { + images: [ogImage], + }, }; } diff --git a/docs/app/og/patterns/[id]/route.tsx b/docs/app/og/patterns/[id]/route.tsx new file mode 100644 index 0000000000..8337d89100 --- /dev/null +++ b/docs/app/og/patterns/[id]/route.tsx @@ -0,0 +1,23 @@ +import type { NextRequest } from 'next/server'; +import { getRegistryItem, getRegistryItemIds } from '@/lib/patterns/manifest'; +import { createOgImage } from '@/lib/og'; + +export const GET = async ( + _request: NextRequest, + { params }: RouteContext<'/og/patterns/[id]'> +) => { + const { id } = await params; + const item = getRegistryItem(id); + + if (!item) { + return new Response('Not found', { status: 404 }); + } + + return createOgImage({ + title: item.name, + description: item.description, + }); +}; + +export const generateStaticParams = () => + getRegistryItemIds().map((id) => ({ id })); diff --git a/docs/app/r/[name]/route.ts b/docs/app/r/[name]/route.ts index aea703df9f..19c446d946 100644 --- a/docs/app/r/[name]/route.ts +++ b/docs/app/r/[name]/route.ts @@ -24,20 +24,42 @@ const WORKFLOW_PATH_PREFIX = 'workflows/'; export const dynamic = 'force-dynamic'; +function wantsBrowserRedirect(request: Request): boolean { + const accept = request.headers.get('accept') ?? ''; + const userAgent = request.headers.get('user-agent') ?? ''; + // shadcn CLI sends Accept: application/json; browsers send text/html first. + if (accept.includes('application/json')) return false; + if (/shadcn/i.test(userAgent)) return false; + if (accept.includes('text/html')) return true; + return false; +} + export async function GET( - _request: Request, + request: Request, { params }: { params: Promise<{ name: string }> } ) { const { name } = await params; const item = registryItems.find((r) => r.id === name); if (!item) { + // For browser requests, redirect to the patterns index. + if (wantsBrowserRedirect(request)) { + return NextResponse.redirect(new URL('/patterns', request.url), 302); + } return NextResponse.json( { error: `Pattern "${name}" not found` }, { status: 404 } ); } + // Browsers visiting /r/durable-agent get the pretty detail page instead. + if (wantsBrowserRedirect(request)) { + return NextResponse.redirect( + new URL(`/patterns/${name}`, request.url), + 302 + ); + } + // Collect workflow files from snippets (installCode > code fallback). const workflowSnippets = item.snippets.filter((s) => s.caption?.startsWith(WORKFLOW_PATH_PREFIX) diff --git a/docs/app/r/route.ts b/docs/app/r/route.ts index ba35e2c102..d8352ebc20 100644 --- a/docs/app/r/route.ts +++ b/docs/app/r/route.ts @@ -14,7 +14,19 @@ import { registryItems } from '@/lib/patterns/manifest'; export const dynamic = 'force-dynamic'; -export async function GET() { +function wantsBrowserRedirect(request: Request): boolean { + const accept = request.headers.get('accept') ?? ''; + const userAgent = request.headers.get('user-agent') ?? ''; + if (accept.includes('application/json')) return false; + if (/shadcn/i.test(userAgent)) return false; + if (accept.includes('text/html')) return true; + return false; +} + +export async function GET(request: Request) { + if (wantsBrowserRedirect(request)) { + return NextResponse.redirect(new URL('/patterns', request.url), 302); + } const items = registryItems.map((item) => ({ name: item.id, type: 'registry:lib' as const, diff --git a/docs/lib/patterns/manifest.ts b/docs/lib/patterns/manifest.ts index 273cff51e4..b214c02484 100644 --- a/docs/lib/patterns/manifest.ts +++ b/docs/lib/patterns/manifest.ts @@ -147,7 +147,7 @@ export const registryItems: RegistryItem[] = [ docsUrl: 'https://workflow-sdk.dev/cookbook/agent-patterns/agent-cancellation', sourceUrl: - 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/agent-patterns/agent-cancellation.mdx', + 'https://github.com/vercel/workflow/blob/main/docs/lib/patterns/snippets/agent-cancellation.ts', shadcnSlug: 'https://workflow-sdk.dev/r/agent-cancellation', envVars: [ { @@ -355,7 +355,7 @@ export const registryItems: RegistryItem[] = [ homepage: 'https://ai-sdk.dev', docsUrl: 'https://ai-sdk.dev/docs', sourceUrl: - 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/integrations/ai-sdk.mdx', + 'https://github.com/vercel/workflow/blob/main/docs/lib/patterns/snippets/ai-sdk.ts', shadcnSlug: 'https://workflow-sdk.dev/r/ai-sdk', files: [ { @@ -517,9 +517,9 @@ export const registryItems: RegistryItem[] = [ tags: ['agents', 'ai', 'durable', 'tools', 'streaming'], categories: ['agent'], homepage: 'https://workflow-sdk.dev', - docsUrl: 'https://workflow-sdk.dev/cookbook/agent-patterns/durable-agent', + docsUrl: 'https://workflow-sdk.dev/patterns/durable-agent', sourceUrl: - 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/agent-patterns/durable-agent.mdx', + 'https://github.com/vercel/workflow/blob/main/docs/lib/patterns/snippets/durable-agent.ts', shadcnSlug: 'https://workflow-sdk.dev/r/durable-agent', files: [ { @@ -620,7 +620,7 @@ export const registryItems: RegistryItem[] = [ docsUrl: 'https://workflow-sdk.dev/cookbook/agent-patterns/human-in-the-loop', sourceUrl: - 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/agent-patterns/human-in-the-loop.mdx', + 'https://github.com/vercel/workflow/blob/main/docs/lib/patterns/snippets/human-in-the-loop.ts', shadcnSlug: 'https://workflow-sdk.dev/r/human-in-the-loop', files: [ { @@ -741,7 +741,7 @@ export const registryItems: RegistryItem[] = [ homepage: 'https://chat-sdk.dev', docsUrl: 'https://chat-sdk.dev/docs/guides/durable-chat-sessions-nextjs', sourceUrl: - 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/integrations/chat-sdk.mdx', + 'https://github.com/vercel/workflow/blob/main/docs/lib/patterns/snippets/chat-sdk.ts', shadcnSlug: 'https://workflow-sdk.dev/r/chat-sdk', files: [ { @@ -893,7 +893,7 @@ export const registryItems: RegistryItem[] = [ homepage: 'https://vercel.com/docs/vercel-sandbox', docsUrl: 'https://vercel.com/docs/vercel-sandbox', sourceUrl: - 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/integrations/sandbox.mdx', + 'https://github.com/vercel/workflow/blob/main/docs/lib/patterns/snippets/sandbox.ts', shadcnSlug: 'https://workflow-sdk.dev/r/sandbox', files: [ { @@ -1037,9 +1037,9 @@ export const registryItems: RegistryItem[] = [ tags: ['batching', 'fan-out', 'parallel', 'bulk-import'], categories: ['common'], homepage: 'https://workflow-sdk.dev', - docsUrl: 'https://workflow-sdk.dev/cookbook/common-patterns/batching', + docsUrl: 'https://workflow-sdk.dev/patterns/batching', sourceUrl: - 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/batching.mdx', + 'https://github.com/vercel/workflow/blob/main/docs/lib/patterns/snippets/batching.ts', shadcnSlug: 'https://workflow-sdk.dev/r/batching', files: [ { @@ -1119,9 +1119,9 @@ export const registryItems: RegistryItem[] = [ tags: ['idempotency', 'stripe', 'retries', 'exactly-once'], categories: ['common'], homepage: 'https://workflow-sdk.dev', - docsUrl: 'https://workflow-sdk.dev/cookbook/common-patterns/idempotency', + docsUrl: 'https://workflow-sdk.dev/patterns/idempotency', sourceUrl: - 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/idempotency.mdx', + 'https://github.com/vercel/workflow/blob/main/docs/lib/patterns/snippets/idempotency.ts', shadcnSlug: 'https://workflow-sdk.dev/r/idempotency', files: [ { @@ -1193,9 +1193,9 @@ export const registryItems: RegistryItem[] = [ tags: ['rate-limit', 'retry', 'backoff', '429'], categories: ['common'], homepage: 'https://workflow-sdk.dev', - docsUrl: 'https://workflow-sdk.dev/cookbook/common-patterns/rate-limiting', + docsUrl: 'https://workflow-sdk.dev/patterns/rate-limiting', sourceUrl: - 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/rate-limiting.mdx', + 'https://github.com/vercel/workflow/blob/main/docs/lib/patterns/snippets/rate-limiting.ts', shadcnSlug: 'https://workflow-sdk.dev/r/rate-limiting', files: [ { @@ -1275,9 +1275,9 @@ export const registryItems: RegistryItem[] = [ tags: ['saga', 'transactions', 'rollback', 'compensation'], categories: ['common'], homepage: 'https://workflow-sdk.dev', - docsUrl: 'https://workflow-sdk.dev/cookbook/common-patterns/saga', + docsUrl: 'https://workflow-sdk.dev/patterns/saga', sourceUrl: - 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/saga.mdx', + 'https://github.com/vercel/workflow/blob/main/docs/lib/patterns/snippets/saga.ts', shadcnSlug: 'https://workflow-sdk.dev/r/saga', files: [ { @@ -1358,9 +1358,9 @@ export const registryItems: RegistryItem[] = [ tags: ['scheduling', 'reminders', 'cancellable', 'sleep'], categories: ['common'], homepage: 'https://workflow-sdk.dev', - docsUrl: 'https://workflow-sdk.dev/cookbook/common-patterns/scheduling', + docsUrl: 'https://workflow-sdk.dev/patterns/scheduling', sourceUrl: - 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/scheduling.mdx', + 'https://github.com/vercel/workflow/blob/main/docs/lib/patterns/snippets/scheduling.ts', shadcnSlug: 'https://workflow-sdk.dev/r/scheduling', files: [ { @@ -1459,7 +1459,7 @@ export const registryItems: RegistryItem[] = [ docsUrl: 'https://workflow-sdk.dev/cookbook/common-patterns/sequential-and-parallel', sourceUrl: - 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/sequential-and-parallel.mdx', + 'https://github.com/vercel/workflow/blob/main/docs/lib/patterns/snippets/sequential-and-parallel.ts', shadcnSlug: 'https://workflow-sdk.dev/r/sequential-and-parallel', files: [ { @@ -1547,9 +1547,9 @@ export const registryItems: RegistryItem[] = [ tags: ['timeout', 'deadline', 'race', 'sleep'], categories: ['common'], homepage: 'https://workflow-sdk.dev', - docsUrl: 'https://workflow-sdk.dev/cookbook/common-patterns/timeouts', + docsUrl: 'https://workflow-sdk.dev/patterns/timeouts', sourceUrl: - 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/timeouts.mdx', + 'https://github.com/vercel/workflow/blob/main/docs/lib/patterns/snippets/timeouts.ts', shadcnSlug: 'https://workflow-sdk.dev/r/timeouts', files: [ { @@ -1634,9 +1634,9 @@ export const registryItems: RegistryItem[] = [ tags: ['webhook', 'callback', 'integration', 'external-api'], categories: ['common'], homepage: 'https://workflow-sdk.dev', - docsUrl: 'https://workflow-sdk.dev/cookbook/common-patterns/webhooks', + docsUrl: 'https://workflow-sdk.dev/patterns/webhooks', sourceUrl: - 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/webhooks.mdx', + 'https://github.com/vercel/workflow/blob/main/docs/lib/patterns/snippets/webhooks.ts', shadcnSlug: 'https://workflow-sdk.dev/r/webhooks', files: [ { @@ -1729,7 +1729,7 @@ export const registryItems: RegistryItem[] = [ docsUrl: 'https://workflow-sdk.dev/cookbook/common-patterns/workflow-composition', sourceUrl: - 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/common-patterns/workflow-composition.mdx', + 'https://github.com/vercel/workflow/blob/main/docs/lib/patterns/snippets/workflow-composition.ts', shadcnSlug: 'https://workflow-sdk.dev/r/workflow-composition', files: [ { @@ -1823,9 +1823,9 @@ export const registryItems: RegistryItem[] = [ tags: ['fan-out', 'spawn', 'poll', 'orchestration'], categories: ['advanced'], homepage: 'https://workflow-sdk.dev', - docsUrl: 'https://workflow-sdk.dev/cookbook/advanced/child-workflows', + docsUrl: 'https://workflow-sdk.dev/patterns/child-workflows', sourceUrl: - 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/advanced/child-workflows.mdx', + 'https://github.com/vercel/workflow/blob/main/docs/lib/patterns/snippets/child-workflows.ts', shadcnSlug: 'https://workflow-sdk.dev/r/child-workflows', files: [ { @@ -1906,7 +1906,7 @@ export const registryItems: RegistryItem[] = [ docsUrl: 'https://workflow-sdk.dev/cookbook/advanced/distributed-abort-controller', sourceUrl: - 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/advanced/distributed-abort-controller.mdx', + 'https://github.com/vercel/workflow/blob/main/docs/lib/patterns/snippets/distributed-abort-controller.ts', shadcnSlug: 'https://workflow-sdk.dev/r/distributed-abort-controller', files: [ { @@ -2006,9 +2006,9 @@ export const registryItems: RegistryItem[] = [ tags: ['upgrade', 'respawn', 'deployment', 'long-running', 'versioning'], categories: ['common', 'advanced'], homepage: 'https://workflow-sdk.dev', - docsUrl: 'https://workflow-sdk.dev/cookbook/advanced/upgrading-workflows', + docsUrl: 'https://workflow-sdk.dev/patterns/upgrading-workflows', sourceUrl: - 'https://github.com/vercel/workflow/tree/main/docs/content/docs/cookbook/advanced/upgrading-workflows.mdx', + 'https://github.com/vercel/workflow/blob/main/docs/lib/patterns/snippets/upgrading-workflows.ts', shadcnSlug: 'https://workflow-sdk.dev/r/upgrading-workflows', files: [ { @@ -2072,7 +2072,7 @@ export const registryItems: RegistryItem[] = [ homepage: 'https://resend.com', docsUrl: 'https://resend.com/docs/send-with-nodejs', sourceUrl: - 'https://github.com/vercel-labs/workflow_onboarding/tree/main/nextjs_workflow/app/workflows/providers', + 'https://github.com/vercel/workflow/blob/main/docs/lib/patterns/snippets/resend.ts', shadcnSlug: 'https://workflow-sdk.dev/r/resend', envVars: [ { diff --git a/docs/scripts/check-docs-smoke.mjs b/docs/scripts/check-docs-smoke.mjs index 14c9f01285..b2418b0220 100644 --- a/docs/scripts/check-docs-smoke.mjs +++ b/docs/scripts/check-docs-smoke.mjs @@ -158,11 +158,11 @@ const checks = [ ), }, { - name: 'HTML meta - cookbook sequential & parallel', + name: 'HTML meta - patterns sequential & parallel', run: () => assertHtmlMeta( - '/cookbook/common-patterns/sequential-and-parallel', - '/og/cookbook/common-patterns/sequential-and-parallel/image.png' + '/patterns/sequential-and-parallel', + '/og/patterns/sequential-and-parallel' ), }, { @@ -198,11 +198,8 @@ const checks = [ run: () => assertPngResponse('/og/getting-started/image.png'), }, { - name: 'OG cookbook common-patterns image', - run: () => - assertPngResponse( - '/og/cookbook/common-patterns/sequential-and-parallel/image.png' - ), + name: 'OG patterns image', + run: () => assertPngResponse('/og/patterns/sequential-and-parallel'), }, { name: 'OG docs reference image', From 539ca738b2db307004e6b73ca004629a8fff6955 Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman Date: Sun, 3 May 2026 14:44:32 -0700 Subject: [PATCH 17/21] fix: split webhooks into two separate installable files (event-listener + request-reply) Co-authored-by: Cursor --- docs/lib/patterns/manifest.ts | 13 +++++++++---- docs/lib/patterns/snippets/webhooks.ts | 2 +- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/docs/lib/patterns/manifest.ts b/docs/lib/patterns/manifest.ts index b214c02484..52eb961139 100644 --- a/docs/lib/patterns/manifest.ts +++ b/docs/lib/patterns/manifest.ts @@ -1640,9 +1640,14 @@ export const registryItems: RegistryItem[] = [ shadcnSlug: 'https://workflow-sdk.dev/r/webhooks', files: [ { - path: 'workflows/webhooks-workflow.ts', + path: 'workflows/webhooks-event-listener-workflow.ts', description: - 'Two patterns — `paymentWebhook()` (long-running event ledger) and `asyncVerification()` (request-reply with deadline).', + '`paymentWebhook()` — long-running event ledger that processes multiple requests from one URL and exits on a terminal event.', + }, + { + path: 'workflows/webhooks-request-reply-workflow.ts', + description: + '`asyncVerification()` — submits a request with your webhook URL as callback and races the response against a deadline.', }, { path: 'app/api/webhooks/route.ts', @@ -1654,7 +1659,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Event listener', lang: 'tsx', - caption: 'workflows/webhooks-workflow.ts', + caption: 'workflows/webhooks-event-listener-workflow.ts', description: 'Long-running listener that processes multiple requests from one URL and exits on a terminal event — Stripe-style payment ledger.', code: webhooksEventListenerSource, @@ -1663,7 +1668,7 @@ export const registryItems: RegistryItem[] = [ { label: 'Request-reply', lang: 'tsx', - caption: 'workflows/webhooks-workflow.ts', + caption: 'workflows/webhooks-request-reply-workflow.ts', description: 'Submit a request to an external vendor with your webhook URL as the callback, then race the response against a 30-second deadline.', code: webhooksRequestReplySource, diff --git a/docs/lib/patterns/snippets/webhooks.ts b/docs/lib/patterns/snippets/webhooks.ts index 8caf57f208..578040343c 100644 --- a/docs/lib/patterns/snippets/webhooks.ts +++ b/docs/lib/patterns/snippets/webhooks.ts @@ -258,7 +258,7 @@ async function processCallback( export const webhooksStartRouteSource = `import { start, getRun } from "workflow/api"; import { NextResponse } from "next/server"; -import { paymentWebhook } from "@/app/workflows/webhooks"; +import { paymentWebhook } from "@/app/workflows/webhooks-event-listener-workflow"; // POST /api/webhooks { orderId } // Returns the auto-generated webhook URL — register it with the external service. From e58afd988747b1b00b5df0a4ac35013d0b42d804 Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman Date: Sun, 3 May 2026 14:50:09 -0700 Subject: [PATCH 18/21] fix: remove invalid createWebhook type parameter in timeouts pattern Co-authored-by: Cursor --- docs/lib/patterns/snippets/timeouts.ts | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/lib/patterns/snippets/timeouts.ts b/docs/lib/patterns/snippets/timeouts.ts index 376fbea58d..75444052b6 100644 --- a/docs/lib/patterns/snippets/timeouts.ts +++ b/docs/lib/patterns/snippets/timeouts.ts @@ -44,11 +44,11 @@ export async function fetchWithFallback(key: string, fallback: string) { export async function waitForApproval(requestId: string) { "use workflow"; - const webhook = createWebhook<{ approved: boolean }>(); + const webhook = createWebhook(); await sendApprovalRequest(requestId, webhook.url); const result = await Promise.race([ - webhook.then((req) => req.json()), + webhook.then((req) => req.json() as Promise<{ approved: boolean }>), sleep("7 days").then(() => ({ timedOut: true } as const)), ]); @@ -154,11 +154,11 @@ export async function fetchWithFallback(key: string, fallback: string) { export async function waitForApproval(requestId: string) { "use workflow"; - const webhook = createWebhook<{ approved: boolean }>(); + const webhook = createWebhook(); await sendApprovalRequest(requestId, webhook.url); const result = await Promise.race([ - webhook.then((req) => req.json()), + webhook.then((req) => req.json() as Promise<{ approved: boolean }>), sleep("7 days").then(() => ({ timedOut: true } as const)), ]); From be7311f7f8d93b76385d50f6151eed01626fcd5c Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman Date: Sun, 3 May 2026 14:51:37 -0700 Subject: [PATCH 19/21] fix: cast sleep() delay argument to any to satisfy StringValue overload constraint Co-authored-by: Cursor --- docs/lib/patterns/snippets/scheduling.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/lib/patterns/snippets/scheduling.ts b/docs/lib/patterns/snippets/scheduling.ts index 1d35190511..f196b6665e 100644 --- a/docs/lib/patterns/snippets/scheduling.ts +++ b/docs/lib/patterns/snippets/scheduling.ts @@ -31,7 +31,7 @@ export async function scheduleAction(action: ScheduledAction) { // wins — no manual flag-checking, no extra database tables. const hook = cancelSchedule.create({ token: \`schedule:\${action.id}\` }); const cancelled = await Promise.race([ - sleep(action.delay).then(() => false as const), + sleep(action.delay as any).then(() => false as const), hook.then(() => true as const), ]); @@ -102,7 +102,7 @@ export async function scheduleAction(action: ScheduledAction) { // No manual flag-checking or extra DB tables — the runtime handles it. const hook = cancelSchedule.create({ token: \`schedule:\${action.id}\` }); const cancelled = await Promise.race([ - sleep(action.delay).then(() => false as const), + sleep(action.delay as any).then(() => false as const), hook.then(() => true as const), ]); From 1f3a583d24d4b662691d59c7f3fbef05557f33a2 Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman Date: Sun, 3 May 2026 14:56:31 -0700 Subject: [PATCH 20/21] feat: add search bar to patterns grid above cards Co-authored-by: Cursor --- docs/components/patterns/RegistryGrid.tsx | 52 +++++++++++++++++++---- 1 file changed, 43 insertions(+), 9 deletions(-) diff --git a/docs/components/patterns/RegistryGrid.tsx b/docs/components/patterns/RegistryGrid.tsx index 74e191fba2..5349c1738c 100644 --- a/docs/components/patterns/RegistryGrid.tsx +++ b/docs/components/patterns/RegistryGrid.tsx @@ -1,7 +1,9 @@ 'use client'; import { useState } from 'react'; +import { Search } from 'lucide-react'; import { Badge } from '@/components/ui/badge'; +import { Input } from '@/components/ui/input'; import { categoryLabels } from '@/lib/patterns/manifest'; import type { RegistryCategory, RegistryItem } from '@/lib/patterns/types'; import { RegistryCard } from './RegistryCard'; @@ -12,33 +14,47 @@ interface RegistryGridProps { items: RegistryItem[]; } +function matchesQuery(item: RegistryItem, query: string): boolean { + const q = query.toLowerCase(); + return ( + item.name.toLowerCase().includes(q) || + item.description.toLowerCase().includes(q) || + (item.longDescription?.toLowerCase().includes(q) ?? false) || + item.tags.some((t) => t.toLowerCase().includes(q)) || + item.categories.some((c) => categoryLabels[c].toLowerCase().includes(q)) + ); +} + export function RegistryGrid({ items }: RegistryGridProps) { const [filter, setFilter] = useState('all'); + const [query, setQuery] = useState(''); - // Build the list of category filters dynamically — only the categories that - // actually have items get a chip. Items can belong to more than one - // category (e.g. AI SDK is both `agent` and `vercel`), so they appear under - // every relevant filter and contribute to each chip's count. const presentCategories = Array.from( new Set(items.flatMap((item) => item.categories)) ); + const afterSearch = query.trim() + ? items.filter((item) => matchesQuery(item, query.trim())) + : items; + const filtered = filter === 'all' - ? items - : items.filter((item) => item.categories.includes(filter)); + ? afterSearch + : afterSearch.filter((item) => item.categories.includes(filter)); const filters: { id: Filter; label: string; count: number }[] = [ - { id: 'all', label: 'Show all', count: items.length }, + { id: 'all', label: 'Show all', count: afterSearch.length }, ...presentCategories.map((category) => ({ id: category as Filter, label: categoryLabels[category], - count: items.filter((item) => item.categories.includes(category)).length, + count: afterSearch.filter((item) => item.categories.includes(category)) + .length, })), ]; return ( <> + {/* Category filters */}
{filters.map(({ id, label, count }) => ( @@ -66,9 +82,27 @@ export function RegistryGrid({ items }: RegistryGridProps) {
+ {/* Search */} +
+
+ + { + setQuery(e.target.value); + setFilter('all'); + }} + className="pl-9" + /> +
+
+ {filtered.length === 0 ? (

- No registry items match this filter. + No patterns match + {query.trim() ? ` "${query.trim()}"` : ' this filter'}.

) : (
From 8d7c013b0ae6aa1aaf7ffb68fcc4033464f97c50 Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman Date: Sun, 3 May 2026 15:08:56 -0700 Subject: [PATCH 21/21] fix: update Submit your recipe link to vercel/workflow repo Co-authored-by: Cursor --- docs/app/[lang]/patterns/page.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/app/[lang]/patterns/page.tsx b/docs/app/[lang]/patterns/page.tsx index 79b1ff5ddb..e9808af878 100644 --- a/docs/app/[lang]/patterns/page.tsx +++ b/docs/app/[lang]/patterns/page.tsx @@ -52,7 +52,7 @@ export default function PatternsPage() {