From a3641214c8719f82e5ad500599a67a5560d7cb3e Mon Sep 17 00:00:00 2001 From: Cursor Agent Date: Sun, 15 Feb 2026 11:40:39 +0000 Subject: [PATCH 01/53] feat: add @trigger.dev/ai package with TriggerChatTransport New package that provides a custom AI SDK ChatTransport implementation bridging Vercel AI SDK's useChat hook with Trigger.dev's durable task execution and realtime streams. Key exports: - TriggerChatTransport class implementing ChatTransport - createChatTransport() factory function - ChatTaskPayload type for task-side typing - TriggerChatTransportOptions type The transport triggers a Trigger.dev task with chat messages as payload, then subscribes to the task's realtime stream to receive UIMessageChunk data, which useChat processes natively. Co-authored-by: Eric Allam --- packages/ai/package.json | 74 ++++++++++ packages/ai/src/index.ts | 3 + packages/ai/src/transport.ts | 258 +++++++++++++++++++++++++++++++++++ packages/ai/src/types.ts | 100 ++++++++++++++ packages/ai/src/version.ts | 1 + packages/ai/tsconfig.json | 10 ++ pnpm-lock.yaml | 160 +++++++++++++++++++++- 7 files changed, 603 insertions(+), 3 deletions(-) create mode 100644 packages/ai/package.json create mode 100644 packages/ai/src/index.ts create mode 100644 packages/ai/src/transport.ts create mode 100644 packages/ai/src/types.ts create mode 100644 packages/ai/src/version.ts create mode 100644 packages/ai/tsconfig.json diff --git a/packages/ai/package.json b/packages/ai/package.json new file mode 100644 index 00000000000..c6cee5d728b --- /dev/null +++ b/packages/ai/package.json @@ -0,0 +1,74 @@ +{ + "name": "@trigger.dev/ai", + "version": "4.3.3", + "description": "AI SDK integration for Trigger.dev - Custom ChatTransport for running AI chat as durable tasks", + "license": "MIT", + "publishConfig": { + "access": "public" + }, + "repository": { + "type": "git", + "url": "https://github.com/triggerdotdev/trigger.dev", + "directory": "packages/ai" + }, + "type": "module", + "files": [ + "dist" + ], + "tshy": { + "selfLink": false, + "main": true, + "module": true, + "project": "./tsconfig.json", + "exports": { + "./package.json": "./package.json", + ".": "./src/index.ts" + }, + "sourceDialects": [ + "@triggerdotdev/source" + ] + }, + "scripts": { + "clean": "rimraf dist .tshy .tshy-build .turbo", + "build": "tshy && pnpm run update-version", + "dev": "tshy --watch", + "typecheck": "tsc --noEmit", + "test": "vitest", + "update-version": "tsx ../../scripts/updateVersion.ts", + "check-exports": "attw --pack ." + }, + "dependencies": { + "@trigger.dev/core": "workspace:4.3.3" + }, + "peerDependencies": { + "ai": "^5.0.0 || ^6.0.0" + }, + "devDependencies": { + "@arethetypeswrong/cli": "^0.15.4", + "ai": "^6.0.0", + "rimraf": "^3.0.2", + "tshy": "^3.0.2", + "tsx": "4.17.0", + "vitest": "^2.1.0" + }, + "engines": { + "node": ">=18.20.0" + }, + "exports": { + "./package.json": "./package.json", + ".": { + "import": { + "@triggerdotdev/source": "./src/index.ts", + "types": "./dist/esm/index.d.ts", + "default": "./dist/esm/index.js" + }, + "require": { + "types": "./dist/commonjs/index.d.ts", + "default": "./dist/commonjs/index.js" + } + } + }, + "main": "./dist/commonjs/index.js", + "types": "./dist/commonjs/index.d.ts", + "module": "./dist/esm/index.js" +} diff --git a/packages/ai/src/index.ts b/packages/ai/src/index.ts new file mode 100644 index 00000000000..f58c1d1ffaa --- /dev/null +++ b/packages/ai/src/index.ts @@ -0,0 +1,3 @@ +export { TriggerChatTransport, createChatTransport } from "./transport.js"; +export type { TriggerChatTransportOptions, ChatTaskPayload, ChatSessionState } from "./types.js"; +export { VERSION } from "./version.js"; diff --git a/packages/ai/src/transport.ts b/packages/ai/src/transport.ts new file mode 100644 index 00000000000..1a5789c96bd --- /dev/null +++ b/packages/ai/src/transport.ts @@ -0,0 +1,258 @@ +import type { ChatTransport, UIMessage, UIMessageChunk, ChatRequestOptions } from "ai"; +import { + ApiClient, + SSEStreamSubscription, + type SSEStreamPart, +} from "@trigger.dev/core/v3"; +import type { TriggerChatTransportOptions, ChatSessionState } from "./types.js"; + +const DEFAULT_STREAM_KEY = "chat"; +const DEFAULT_BASE_URL = "https://api.trigger.dev"; +const DEFAULT_STREAM_TIMEOUT_SECONDS = 120; + +/** + * A custom AI SDK `ChatTransport` implementation that bridges the Vercel AI SDK's + * `useChat` hook with Trigger.dev's durable task execution and realtime streams. + * + * When `sendMessages` is called, the transport: + * 1. Triggers a Trigger.dev task with the chat messages as payload + * 2. Subscribes to the task's realtime stream to receive `UIMessageChunk` data + * 3. Returns a `ReadableStream` that the AI SDK processes natively + * + * @example + * ```tsx + * import { useChat } from "@ai-sdk/react"; + * import { TriggerChatTransport } from "@trigger.dev/ai"; + * + * function Chat({ accessToken }: { accessToken: string }) { + * const { messages, sendMessage, status } = useChat({ + * transport: new TriggerChatTransport({ + * accessToken, + * taskId: "my-chat-task", + * }), + * }); + * + * // ... render messages + * } + * ``` + * + * On the backend, the task should pipe UIMessageChunks to the `"chat"` stream: + * + * @example + * ```ts + * import { task, streams } from "@trigger.dev/sdk"; + * import { streamText, convertToModelMessages } from "ai"; + * + * export const myChatTask = task({ + * id: "my-chat-task", + * run: async (payload: ChatTaskPayload) => { + * const result = streamText({ + * model: openai("gpt-4o"), + * messages: convertToModelMessages(payload.messages), + * }); + * + * const { waitUntilComplete } = streams.pipe("chat", result.toUIMessageStream()); + * await waitUntilComplete(); + * }, + * }); + * ``` + */ +export class TriggerChatTransport implements ChatTransport { + private readonly taskId: string; + private readonly accessToken: string; + private readonly baseURL: string; + private readonly streamKey: string; + private readonly extraHeaders: Record; + + /** + * Tracks active chat sessions for reconnection support. + * Maps chatId → session state (runId, publicAccessToken). + */ + private sessions: Map = new Map(); + + constructor(options: TriggerChatTransportOptions) { + this.taskId = options.taskId; + this.accessToken = options.accessToken; + this.baseURL = options.baseURL ?? DEFAULT_BASE_URL; + this.streamKey = options.streamKey ?? DEFAULT_STREAM_KEY; + this.extraHeaders = options.headers ?? {}; + } + + /** + * Sends messages to a Trigger.dev task and returns a streaming response. + * + * This method: + * 1. Triggers the configured task with the chat messages as payload + * 2. Subscribes to the task's realtime stream for UIMessageChunk events + * 3. Returns a ReadableStream that the AI SDK's useChat hook processes + */ + sendMessages = async ( + options: { + trigger: "submit-message" | "regenerate-message"; + chatId: string; + messageId: string | undefined; + messages: UIMessage[]; + abortSignal: AbortSignal | undefined; + } & ChatRequestOptions + ): Promise> => { + const { trigger, chatId, messageId, messages, abortSignal, headers, body, metadata } = options; + + // Build the payload for the task + const payload = { + messages, + chatId, + trigger, + messageId, + metadata, + ...(body ?? {}), + }; + + // Create API client for triggering + const apiClient = new ApiClient(this.baseURL, this.accessToken); + + // Trigger the task + const triggerResponse = await apiClient.triggerTask(this.taskId, { + payload: JSON.stringify(payload), + options: { + payloadType: "application/json", + }, + }); + + const runId = triggerResponse.id; + const publicAccessToken = "publicAccessToken" in triggerResponse + ? (triggerResponse as { publicAccessToken?: string }).publicAccessToken + : undefined; + + // Store session state for reconnection + this.sessions.set(chatId, { + runId, + publicAccessToken: publicAccessToken ?? this.accessToken, + }); + + // Subscribe to the realtime stream for this run + return this.subscribeToStream(runId, publicAccessToken ?? this.accessToken, abortSignal); + }; + + /** + * Reconnects to an existing streaming response for the specified chat session. + * + * Returns a ReadableStream if an active session exists, or null if no session is found. + */ + reconnectToStream = async ( + options: { + chatId: string; + } & ChatRequestOptions + ): Promise | null> => { + const { chatId } = options; + + const session = this.sessions.get(chatId); + if (!session) { + return null; + } + + return this.subscribeToStream(session.runId, session.publicAccessToken, undefined); + }; + + /** + * Creates a ReadableStream by subscribing to the realtime SSE stream + * for a given run. + */ + private subscribeToStream( + runId: string, + accessToken: string, + abortSignal: AbortSignal | undefined + ): ReadableStream { + const streamKey = this.streamKey; + const baseURL = this.baseURL; + const extraHeaders = this.extraHeaders; + + // Build the authorization header + const headers: Record = { + Authorization: `Bearer ${accessToken}`, + ...extraHeaders, + }; + + const subscription = new SSEStreamSubscription( + `${baseURL}/realtime/v1/streams/${runId}/${streamKey}`, + { + headers, + signal: abortSignal, + timeoutInSeconds: DEFAULT_STREAM_TIMEOUT_SECONDS, + } + ); + + // We need to convert the SSEStreamPart stream to a UIMessageChunk stream + // SSEStreamPart has { id, chunk, timestamp } where chunk is the deserialized UIMessageChunk + let sseStreamPromise: Promise> | null = null; + + return new ReadableStream({ + start: async (controller) => { + try { + sseStreamPromise = subscription.subscribe(); + const sseStream = await sseStreamPromise; + const reader = sseStream.getReader(); + + try { + while (true) { + const { done, value } = await reader.read(); + + if (done) { + controller.close(); + return; + } + + if (abortSignal?.aborted) { + reader.cancel(); + reader.releaseLock(); + controller.close(); + return; + } + + // Each SSE part's chunk is a UIMessageChunk + controller.enqueue(value.chunk as UIMessageChunk); + } + } catch (readError) { + reader.releaseLock(); + throw readError; + } + } catch (error) { + // Don't error the stream for abort errors + if (error instanceof Error && error.name === "AbortError") { + controller.close(); + return; + } + + controller.error(error); + } + }, + cancel: () => { + // Cancellation is handled by the abort signal + }, + }); + } +} + +/** + * Creates a new `TriggerChatTransport` instance. + * + * This is a convenience factory function equivalent to `new TriggerChatTransport(options)`. + * + * @example + * ```tsx + * import { useChat } from "@ai-sdk/react"; + * import { createChatTransport } from "@trigger.dev/ai"; + * + * const transport = createChatTransport({ + * taskId: "my-chat-task", + * accessToken: publicAccessToken, + * }); + * + * function Chat() { + * const { messages, sendMessage } = useChat({ transport }); + * // ... + * } + * ``` + */ +export function createChatTransport(options: TriggerChatTransportOptions): TriggerChatTransport { + return new TriggerChatTransport(options); +} diff --git a/packages/ai/src/types.ts b/packages/ai/src/types.ts new file mode 100644 index 00000000000..81f1c6dc9be --- /dev/null +++ b/packages/ai/src/types.ts @@ -0,0 +1,100 @@ +import type { UIMessage } from "ai"; + +/** + * Options for creating a TriggerChatTransport. + */ +export type TriggerChatTransportOptions = { + /** + * The Trigger.dev task ID to trigger for chat completions. + * This task will receive the chat messages as its payload. + */ + taskId: string; + + /** + * A public access token or trigger token for authenticating with the Trigger.dev API. + * This is used both to trigger the task and to subscribe to the realtime stream. + * + * You can generate one using `auth.createTriggerPublicToken()` or + * `auth.createPublicToken()` from the `@trigger.dev/sdk`. + */ + accessToken: string; + + /** + * Base URL for the Trigger.dev API. + * + * @default "https://api.trigger.dev" + */ + baseURL?: string; + + /** + * The stream key where the task pipes UIMessageChunk data. + * Your task must pipe the AI SDK stream to this same key using + * `streams.pipe(streamKey, result.toUIMessageStream())`. + * + * @default "chat" + */ + streamKey?: string; + + /** + * Additional headers to include in API requests to Trigger.dev. + */ + headers?: Record; +}; + +/** + * The payload shape that TriggerChatTransport sends to the triggered task. + * + * Use this type to type your task's `run` function payload: + * + * @example + * ```ts + * import { task, streams } from "@trigger.dev/sdk"; + * import { streamText, convertToModelMessages } from "ai"; + * import type { ChatTaskPayload } from "@trigger.dev/ai"; + * + * export const myChatTask = task({ + * id: "my-chat-task", + * run: async (payload: ChatTaskPayload) => { + * const result = streamText({ + * model: openai("gpt-4o"), + * messages: convertToModelMessages(payload.messages), + * }); + * + * const { waitUntilComplete } = streams.pipe("chat", result.toUIMessageStream()); + * await waitUntilComplete(); + * }, + * }); + * ``` + */ +export type ChatTaskPayload = { + /** The array of UI messages representing the conversation history */ + messages: TMessage[]; + + /** The unique identifier for the chat session */ + chatId: string; + + /** + * The type of message submission: + * - `"submit-message"`: A new user message was submitted + * - `"regenerate-message"`: The user wants to regenerate the last assistant response + */ + trigger: "submit-message" | "regenerate-message"; + + /** + * The ID of the message to regenerate (only present for `"regenerate-message"` trigger). + */ + messageId?: string; + + /** + * Custom metadata attached to the chat request by the frontend. + */ + metadata?: unknown; +}; + +/** + * Internal state for tracking active chat sessions, used for stream reconnection. + */ +export type ChatSessionState = { + runId: string; + publicAccessToken: string; +}; diff --git a/packages/ai/src/version.ts b/packages/ai/src/version.ts new file mode 100644 index 00000000000..2e47a886828 --- /dev/null +++ b/packages/ai/src/version.ts @@ -0,0 +1 @@ +export const VERSION = "0.0.0"; diff --git a/packages/ai/tsconfig.json b/packages/ai/tsconfig.json new file mode 100644 index 00000000000..ec09e52a400 --- /dev/null +++ b/packages/ai/tsconfig.json @@ -0,0 +1,10 @@ +{ + "extends": "../../.configs/tsconfig.base.json", + "compilerOptions": { + "isolatedDeclarations": false, + "composite": true, + "sourceMap": true, + "stripInternal": true + }, + "include": ["./src/**/*.ts"] +} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 83e28308337..e6c4f37fad3 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1376,6 +1376,31 @@ importers: specifier: 8.6.6 version: 8.6.6 + packages/ai: + dependencies: + '@trigger.dev/core': + specifier: workspace:4.3.3 + version: link:../core + devDependencies: + '@arethetypeswrong/cli': + specifier: ^0.15.4 + version: 0.15.4 + ai: + specifier: ^6.0.0 + version: 6.0.3(zod@3.25.76) + rimraf: + specifier: ^3.0.2 + version: 3.0.2 + tshy: + specifier: ^3.0.2 + version: 3.0.2 + tsx: + specifier: 4.17.0 + version: 4.17.0 + vitest: + specifier: ^2.1.0 + version: 2.1.9(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1) + packages/build: dependencies: '@prisma/config': @@ -11167,9 +11192,23 @@ packages: '@vitest/browser': optional: true + '@vitest/expect@2.1.9': + resolution: {integrity: sha512-UJCIkTBenHeKT1TTlKMJWy1laZewsRIzYighyYiJKZreqtdxSos/S1t+ktRMQWu2CKqaarrkeszJx1cgC5tGZw==} + '@vitest/expect@3.1.4': resolution: {integrity: sha512-xkD/ljeliyaClDYqHPNCiJ0plY5YIcM0OlRiZizLhlPmpXWpxnGMyTZXOHFhFeG7w9P5PBeL4IdtJ/HeQwTbQA==} + '@vitest/mocker@2.1.9': + resolution: {integrity: sha512-tVL6uJgoUdi6icpxmdrn5YNo3g3Dxv+IHJBr0GXHaEdTcw3F+cPKnsXFhli6nO+f/6SDKPHEK1UN+k+TQv0Ehg==} + peerDependencies: + msw: ^2.4.9 + vite: ^5.0.0 + peerDependenciesMeta: + msw: + optional: true + vite: + optional: true + '@vitest/mocker@3.1.4': resolution: {integrity: sha512-8IJ3CvwtSw/EFXqWFL8aCMu+YyYXG2WUSrQbViOZkWTKTVicVwZ/YiEZDSqD00kX+v/+W+OnxhNWoeVKorHygA==} peerDependencies: @@ -11193,9 +11232,15 @@ packages: '@vitest/runner@3.1.4': resolution: {integrity: sha512-djTeF1/vt985I/wpKVFBMWUlk/I7mb5hmD5oP8K9ACRmVXgKTae3TUOtXAEBfslNKPzUQvnKhNd34nnRSYgLNQ==} + '@vitest/snapshot@2.1.9': + resolution: {integrity: sha512-oBO82rEjsxLNJincVhLhaxxZdEtV0EFHMK5Kmx5sJ6H9L183dHECjiefOAdnqpIgT5eZwT04PoggUnW88vOBNQ==} + '@vitest/snapshot@3.1.4': resolution: {integrity: sha512-JPHf68DvuO7vilmvwdPr9TS0SuuIzHvxeaCkxYcCD4jTk67XwL45ZhEHFKIuCm8CYstgI6LZ4XbwD6ANrwMpFg==} + '@vitest/spy@2.1.9': + resolution: {integrity: sha512-E1B35FwzXXTs9FHNK6bDszs7mtydNi5MIfUWpceJ8Xbfb1gBMscAnwLbEu+B44ed6W3XjL9/ehLPHR1fkf1KLQ==} + '@vitest/spy@3.1.4': resolution: {integrity: sha512-Xg1bXhu+vtPXIodYN369M86K8shGLouNjoVI78g8iAq2rFoHFdajNvJJ5A/9bPMFcfQqdaCpOgWKEoMQg/s0Yg==} @@ -14262,11 +14307,12 @@ packages: glob@7.2.3: resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} - deprecated: Glob versions prior to v9 are no longer supported + deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me glob@9.3.5: resolution: {integrity: sha512-e1LleDykUz2Iu+MTYdkSsuWX8lvAjAcs0Xef0lNIu0S2wOAzuTxCJtcd9S3cijlwYF18EsU3rzb8jPVobxDh9Q==} engines: {node: '>=16 || 14 >=14.17'} + deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me globals@11.12.0: resolution: {integrity: sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==} @@ -19794,6 +19840,11 @@ packages: engines: {node: '>=v14.16.0'} hasBin: true + vite-node@2.1.9: + resolution: {integrity: sha512-AM9aQ/IPrW/6ENLQg3AGY4K1N2TGZdR5e4gu/MmmR2xR3Ll1+dib+nook92g4TV3PXVyeyxdWwtaCAiUL0hMxA==} + engines: {node: ^18.0.0 || >=20.0.0} + hasBin: true + vite-node@3.1.4: resolution: {integrity: sha512-6enNwYnpyDo4hEgytbmc6mYWHXDHYEn0D1/rw4Q+tnHUGtKTJsn8T1YkX6Q18wI5LCrS8CTYlBaiCqxOy2kvUA==} engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} @@ -19861,6 +19912,31 @@ packages: terser: optional: true + vitest@2.1.9: + resolution: {integrity: sha512-MSmPM9REYqDGBI8439mA4mWhV5sKmDlBKWIYbA3lRb2PTHACE0mgKwA8yQ2xq9vxDTuk4iPrECBAEW2aoFXY0Q==} + engines: {node: ^18.0.0 || >=20.0.0} + hasBin: true + peerDependencies: + '@edge-runtime/vm': '*' + '@types/node': 20.14.14 + '@vitest/browser': 2.1.9 + '@vitest/ui': 2.1.9 + happy-dom: '*' + jsdom: '*' + peerDependenciesMeta: + '@edge-runtime/vm': + optional: true + '@types/node': + optional: true + '@vitest/browser': + optional: true + '@vitest/ui': + optional: true + happy-dom: + optional: true + jsdom: + optional: true + vitest@3.1.4: resolution: {integrity: sha512-Ta56rT7uWxCSJXlBtKgIlApJnT6e6IGmTYxYcmxjJ4ujuZDI59GUQgVDObXXJujOmPDBYXHK1qmaGtneu6TNIQ==} engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} @@ -20473,7 +20549,7 @@ snapshots: commander: 10.0.1 marked: 9.1.6 marked-terminal: 7.1.0(marked@9.1.6) - semver: 7.6.3 + semver: 7.7.3 '@arethetypeswrong/core@0.15.1': dependencies: @@ -31546,6 +31622,13 @@ snapshots: transitivePeerDependencies: - supports-color + '@vitest/expect@2.1.9': + dependencies: + '@vitest/spy': 2.1.9 + '@vitest/utils': 2.1.9 + chai: 5.2.0 + tinyrainbow: 1.2.0 + '@vitest/expect@3.1.4': dependencies: '@vitest/spy': 3.1.4 @@ -31553,6 +31636,14 @@ snapshots: chai: 5.2.0 tinyrainbow: 2.0.0 + '@vitest/mocker@2.1.9(vite@5.4.21(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1))': + dependencies: + '@vitest/spy': 2.1.9 + estree-walker: 3.0.3 + magic-string: 0.30.21 + optionalDependencies: + vite: 5.4.21(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1) + '@vitest/mocker@3.1.4(vite@5.4.21(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1))': dependencies: '@vitest/spy': 3.1.4 @@ -31579,12 +31670,22 @@ snapshots: '@vitest/utils': 3.1.4 pathe: 2.0.3 + '@vitest/snapshot@2.1.9': + dependencies: + '@vitest/pretty-format': 2.1.9 + magic-string: 0.30.21 + pathe: 1.1.2 + '@vitest/snapshot@3.1.4': dependencies: '@vitest/pretty-format': 3.1.4 magic-string: 0.30.21 pathe: 2.0.3 + '@vitest/spy@2.1.9': + dependencies: + tinyspy: 3.0.2 + '@vitest/spy@3.1.4': dependencies: tinyspy: 3.0.2 @@ -34218,7 +34319,7 @@ snapshots: eslint: 8.31.0 eslint-module-utils: 2.7.4(@typescript-eslint/parser@5.59.6(eslint@8.31.0)(typescript@5.5.4))(eslint-import-resolver-node@0.3.7)(eslint-import-resolver-typescript@3.5.5)(eslint@8.31.0) eslint-plugin-import: 2.29.1(@typescript-eslint/parser@5.59.6(eslint@8.31.0)(typescript@5.5.4))(eslint-import-resolver-typescript@3.5.5)(eslint@8.31.0) - get-tsconfig: 4.7.2 + get-tsconfig: 4.7.6 globby: 13.2.2 is-core-module: 2.14.0 is-glob: 4.0.3 @@ -42034,6 +42135,24 @@ snapshots: - supports-color - terser + vite-node@2.1.9(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1): + dependencies: + cac: 6.7.14 + debug: 4.4.3(supports-color@10.0.0) + es-module-lexer: 1.7.0 + pathe: 1.1.2 + vite: 5.4.21(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1) + transitivePeerDependencies: + - '@types/node' + - less + - lightningcss + - sass + - sass-embedded + - stylus + - sugarss + - supports-color + - terser + vite-node@3.1.4(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1): dependencies: cac: 6.7.14 @@ -42083,6 +42202,41 @@ snapshots: lightningcss: 1.29.2 terser: 5.44.1 + vitest@2.1.9(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1): + dependencies: + '@vitest/expect': 2.1.9 + '@vitest/mocker': 2.1.9(vite@5.4.21(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1)) + '@vitest/pretty-format': 2.1.9 + '@vitest/runner': 2.1.9 + '@vitest/snapshot': 2.1.9 + '@vitest/spy': 2.1.9 + '@vitest/utils': 2.1.9 + chai: 5.2.0 + debug: 4.4.3(supports-color@10.0.0) + expect-type: 1.2.1 + magic-string: 0.30.21 + pathe: 1.1.2 + std-env: 3.9.0 + tinybench: 2.9.0 + tinyexec: 0.3.2 + tinypool: 1.0.2 + tinyrainbow: 1.2.0 + vite: 5.4.21(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1) + vite-node: 2.1.9(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1) + why-is-node-running: 2.3.0 + optionalDependencies: + '@types/node': 20.14.14 + transitivePeerDependencies: + - less + - lightningcss + - msw + - sass + - sass-embedded + - stylus + - sugarss + - supports-color + - terser + vitest@3.1.4(@types/debug@4.1.12)(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1): dependencies: '@vitest/expect': 3.1.4 From 68e5a571baa81cb4e8653fc56ced2df9d8425e72 Mon Sep 17 00:00:00 2001 From: Cursor Agent Date: Sun, 15 Feb 2026 11:42:30 +0000 Subject: [PATCH 02/53] test: add comprehensive unit tests for TriggerChatTransport Tests cover: - Constructor with required and optional options - sendMessages triggering task and returning UIMessageChunk stream - Correct payload structure sent to trigger API - Custom streamKey in stream URL - Extra headers propagation - reconnectToStream with existing and non-existing sessions - createChatTransport factory function - Error handling for API failures - regenerate-message trigger type Co-authored-by: Eric Allam --- packages/ai/src/transport.test.ts | 545 ++++++++++++++++++++++++++++++ packages/ai/vitest.config.ts | 8 + 2 files changed, 553 insertions(+) create mode 100644 packages/ai/src/transport.test.ts create mode 100644 packages/ai/vitest.config.ts diff --git a/packages/ai/src/transport.test.ts b/packages/ai/src/transport.test.ts new file mode 100644 index 00000000000..eac1434eabd --- /dev/null +++ b/packages/ai/src/transport.test.ts @@ -0,0 +1,545 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; +import type { UIMessage, UIMessageChunk } from "ai"; +import { TriggerChatTransport, createChatTransport } from "./transport.js"; + +// Helper: encode text as SSE format +function sseEncode(chunks: UIMessageChunk[]): string { + return chunks.map((chunk, i) => `id: ${i}\ndata: ${JSON.stringify(chunk)}\n\n`).join(""); +} + +// Helper: create a ReadableStream from SSE text +function createSSEStream(sseText: string): ReadableStream { + const encoder = new TextEncoder(); + return new ReadableStream({ + start(controller) { + controller.enqueue(encoder.encode(sseText)); + controller.close(); + }, + }); +} + +// Helper: create test UIMessages +function createUserMessage(text: string): UIMessage { + return { + id: `msg-${Date.now()}`, + role: "user", + parts: [{ type: "text", text }], + }; +} + +function createAssistantMessage(text: string): UIMessage { + return { + id: `msg-${Date.now()}`, + role: "assistant", + parts: [{ type: "text", text }], + }; +} + +// Sample UIMessageChunks as the AI SDK would produce +const sampleChunks: UIMessageChunk[] = [ + { type: "text-start", id: "part-1" }, + { type: "text-delta", id: "part-1", delta: "Hello" }, + { type: "text-delta", id: "part-1", delta: " world" }, + { type: "text-delta", id: "part-1", delta: "!" }, + { type: "text-end", id: "part-1" }, +]; + +describe("TriggerChatTransport", () => { + let originalFetch: typeof global.fetch; + + beforeEach(() => { + originalFetch = global.fetch; + }); + + afterEach(() => { + global.fetch = originalFetch; + vi.restoreAllMocks(); + }); + + describe("constructor", () => { + it("should create transport with required options", () => { + const transport = new TriggerChatTransport({ + taskId: "my-chat-task", + accessToken: "test-token", + }); + + expect(transport).toBeInstanceOf(TriggerChatTransport); + }); + + it("should accept optional configuration", () => { + const transport = new TriggerChatTransport({ + taskId: "my-chat-task", + accessToken: "test-token", + baseURL: "https://custom.trigger.dev", + streamKey: "custom-stream", + headers: { "X-Custom": "value" }, + }); + + expect(transport).toBeInstanceOf(TriggerChatTransport); + }); + }); + + describe("sendMessages", () => { + it("should trigger the task and return a ReadableStream of UIMessageChunks", async () => { + const triggerRunId = "run_abc123"; + const publicToken = "pub_token_xyz"; + + // Mock fetch to handle both the trigger request and the SSE stream request + global.fetch = vi.fn().mockImplementation(async (url: string | URL, init?: RequestInit) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + // Handle the task trigger request + if (urlStr.includes("/api/v1/tasks/") && urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ id: triggerRunId }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": publicToken, + }, + } + ); + } + + // Handle the SSE stream request + if (urlStr.includes("/realtime/v1/streams/")) { + const sseText = sseEncode(sampleChunks); + return new Response(createSSEStream(sseText), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + taskId: "my-chat-task", + accessToken: "test-token", + baseURL: "https://api.test.trigger.dev", + }); + + const messages: UIMessage[] = [createUserMessage("Hello!")]; + + const stream = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-1", + messageId: undefined, + messages, + abortSignal: undefined, + }); + + expect(stream).toBeInstanceOf(ReadableStream); + + // Read all chunks from the stream + const reader = stream.getReader(); + const receivedChunks: UIMessageChunk[] = []; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + receivedChunks.push(value); + } + + expect(receivedChunks).toHaveLength(sampleChunks.length); + expect(receivedChunks[0]).toEqual({ type: "text-start", id: "part-1" }); + expect(receivedChunks[1]).toEqual({ type: "text-delta", id: "part-1", delta: "Hello" }); + expect(receivedChunks[4]).toEqual({ type: "text-end", id: "part-1" }); + }); + + it("should send the correct payload to the trigger API", async () => { + const fetchSpy = vi.fn().mockImplementation(async (url: string | URL, init?: RequestInit) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/api/v1/tasks/") && urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ id: "run_test" }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_token", + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + return new Response(createSSEStream(""), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + global.fetch = fetchSpy; + + const transport = new TriggerChatTransport({ + taskId: "my-chat-task", + accessToken: "test-token", + baseURL: "https://api.test.trigger.dev", + }); + + const messages: UIMessage[] = [createUserMessage("Hello!")]; + + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-123", + messageId: undefined, + messages, + abortSignal: undefined, + metadata: { custom: "data" }, + }); + + // Verify the trigger fetch call + const triggerCall = fetchSpy.mock.calls.find((call: any[]) => + (typeof call[0] === "string" ? call[0] : call[0].toString()).includes("/trigger") + ); + + expect(triggerCall).toBeDefined(); + const triggerUrl = typeof triggerCall![0] === "string" ? triggerCall![0] : triggerCall![0].toString(); + expect(triggerUrl).toContain("/api/v1/tasks/my-chat-task/trigger"); + + const triggerBody = JSON.parse(triggerCall![1]?.body as string); + const payload = JSON.parse(triggerBody.payload); + expect(payload.messages).toEqual(messages); + expect(payload.chatId).toBe("chat-123"); + expect(payload.trigger).toBe("submit-message"); + expect(payload.metadata).toEqual({ custom: "data" }); + }); + + it("should use the correct stream URL with custom streamKey", async () => { + const fetchSpy = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ id: "run_custom" }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "token", + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + return new Response(createSSEStream(""), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + global.fetch = fetchSpy; + + const transport = new TriggerChatTransport({ + taskId: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + streamKey: "my-custom-stream", + }); + + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-1", + messageId: undefined, + messages: [createUserMessage("test")], + abortSignal: undefined, + }); + + // Verify the stream URL uses the custom stream key + const streamCall = fetchSpy.mock.calls.find((call: any[]) => + (typeof call[0] === "string" ? call[0] : call[0].toString()).includes("/realtime/v1/streams/") + ); + + expect(streamCall).toBeDefined(); + const streamUrl = typeof streamCall![0] === "string" ? streamCall![0] : streamCall![0].toString(); + expect(streamUrl).toContain("/realtime/v1/streams/run_custom/my-custom-stream"); + }); + + it("should include extra headers in stream requests", async () => { + const fetchSpy = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ id: "run_hdrs" }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "token", + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + return new Response(createSSEStream(""), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + global.fetch = fetchSpy; + + const transport = new TriggerChatTransport({ + taskId: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + headers: { "X-Custom-Header": "custom-value" }, + }); + + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-1", + messageId: undefined, + messages: [createUserMessage("test")], + abortSignal: undefined, + }); + + // Verify the stream request includes custom headers + const streamCall = fetchSpy.mock.calls.find((call: any[]) => + (typeof call[0] === "string" ? call[0] : call[0].toString()).includes("/realtime/v1/streams/") + ); + + expect(streamCall).toBeDefined(); + const requestHeaders = streamCall![1]?.headers as Record; + expect(requestHeaders["X-Custom-Header"]).toBe("custom-value"); + }); + }); + + describe("reconnectToStream", () => { + it("should return null when no session exists for chatId", async () => { + const transport = new TriggerChatTransport({ + taskId: "my-task", + accessToken: "token", + }); + + const result = await transport.reconnectToStream({ + chatId: "nonexistent-chat", + }); + + expect(result).toBeNull(); + }); + + it("should reconnect to an existing session", async () => { + const triggerRunId = "run_reconnect"; + const publicToken = "pub_reconnect_token"; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ id: triggerRunId }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": publicToken, + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + const chunks: UIMessageChunk[] = [ + { type: "text-start", id: "part-1" }, + { type: "text-delta", id: "part-1", delta: "Reconnected!" }, + { type: "text-end", id: "part-1" }, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + taskId: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + // First, send messages to establish a session + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-reconnect", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + // Now reconnect + const stream = await transport.reconnectToStream({ + chatId: "chat-reconnect", + }); + + expect(stream).toBeInstanceOf(ReadableStream); + + // Read the stream + const reader = stream!.getReader(); + const receivedChunks: UIMessageChunk[] = []; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + receivedChunks.push(value); + } + + expect(receivedChunks.length).toBeGreaterThan(0); + }); + }); + + describe("createChatTransport", () => { + it("should create a TriggerChatTransport instance", () => { + const transport = createChatTransport({ + taskId: "my-task", + accessToken: "token", + }); + + expect(transport).toBeInstanceOf(TriggerChatTransport); + }); + + it("should pass options through to the transport", () => { + const transport = createChatTransport({ + taskId: "custom-task", + accessToken: "custom-token", + baseURL: "https://custom.example.com", + streamKey: "custom-key", + headers: { "X-Test": "value" }, + }); + + expect(transport).toBeInstanceOf(TriggerChatTransport); + }); + }); + + describe("error handling", () => { + it("should propagate trigger API errors", async () => { + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ error: "Task not found" }), + { + status: 404, + headers: { "content-type": "application/json" }, + } + ); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + taskId: "nonexistent-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + await expect( + transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-error", + messageId: undefined, + messages: [createUserMessage("test")], + abortSignal: undefined, + }) + ).rejects.toThrow(); + }); + }); + + describe("message types", () => { + it("should handle regenerate-message trigger", async () => { + const fetchSpy = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ id: "run_regen" }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "token", + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + return new Response(createSSEStream(""), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + global.fetch = fetchSpy; + + const transport = new TriggerChatTransport({ + taskId: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + const messages: UIMessage[] = [ + createUserMessage("Hello!"), + createAssistantMessage("Hi there!"), + ]; + + await transport.sendMessages({ + trigger: "regenerate-message", + chatId: "chat-regen", + messageId: "msg-to-regen", + messages, + abortSignal: undefined, + }); + + // Verify the payload includes the regenerate trigger type and messageId + const triggerCall = fetchSpy.mock.calls.find((call: any[]) => + (typeof call[0] === "string" ? call[0] : call[0].toString()).includes("/trigger") + ); + + const triggerBody = JSON.parse(triggerCall![1]?.body as string); + const payload = JSON.parse(triggerBody.payload); + expect(payload.trigger).toBe("regenerate-message"); + expect(payload.messageId).toBe("msg-to-regen"); + }); + }); +}); diff --git a/packages/ai/vitest.config.ts b/packages/ai/vitest.config.ts new file mode 100644 index 00000000000..c497b8ec974 --- /dev/null +++ b/packages/ai/vitest.config.ts @@ -0,0 +1,8 @@ +import { defineConfig } from "vitest/config"; + +export default defineConfig({ + test: { + include: ["src/**/*.test.ts"], + globals: true, + }, +}); From 3956baa05a0f85e3986a3e6c5f15a55506969bed Mon Sep 17 00:00:00 2001 From: Cursor Agent Date: Sun, 15 Feb 2026 11:43:47 +0000 Subject: [PATCH 03/53] refactor: polish TriggerChatTransport implementation - Cache ApiClient instance instead of creating per-call - Add streamTimeoutSeconds option for customizable stream timeout - Clean up subscribeToStream method (remove unused variable) - Improve JSDoc with backend task example - Minor code cleanup Co-authored-by: Eric Allam --- packages/ai/src/transport.ts | 58 ++++++++++++++++-------------------- packages/ai/src/types.ts | 9 ++++++ 2 files changed, 35 insertions(+), 32 deletions(-) diff --git a/packages/ai/src/transport.ts b/packages/ai/src/transport.ts index 1a5789c96bd..b9df702b3a0 100644 --- a/packages/ai/src/transport.ts +++ b/packages/ai/src/transport.ts @@ -19,8 +19,15 @@ const DEFAULT_STREAM_TIMEOUT_SECONDS = 120; * 2. Subscribes to the task's realtime stream to receive `UIMessageChunk` data * 3. Returns a `ReadableStream` that the AI SDK processes natively * + * The task receives a `ChatTaskPayload` containing the conversation messages, + * chat session ID, trigger type, and any custom metadata. Your task should use + * the AI SDK's `streamText` (or similar) to generate a response, then pipe + * the resulting `UIMessageStream` to the `"chat"` realtime stream key + * (or a custom key matching the `streamKey` option). + * * @example * ```tsx + * // Frontend — use with AI SDK's useChat hook * import { useChat } from "@ai-sdk/react"; * import { TriggerChatTransport } from "@trigger.dev/ai"; * @@ -36,12 +43,12 @@ const DEFAULT_STREAM_TIMEOUT_SECONDS = 120; * } * ``` * - * On the backend, the task should pipe UIMessageChunks to the `"chat"` stream: - * * @example * ```ts + * // Backend — Trigger.dev task that handles chat * import { task, streams } from "@trigger.dev/sdk"; * import { streamText, convertToModelMessages } from "ai"; + * import type { ChatTaskPayload } from "@trigger.dev/ai"; * * export const myChatTask = task({ * id: "my-chat-task", @@ -63,6 +70,8 @@ export class TriggerChatTransport implements ChatTransport { private readonly baseURL: string; private readonly streamKey: string; private readonly extraHeaders: Record; + private readonly streamTimeoutSeconds: number; + private readonly apiClient: ApiClient; /** * Tracks active chat sessions for reconnection support. @@ -76,6 +85,8 @@ export class TriggerChatTransport implements ChatTransport { this.baseURL = options.baseURL ?? DEFAULT_BASE_URL; this.streamKey = options.streamKey ?? DEFAULT_STREAM_KEY; this.extraHeaders = options.headers ?? {}; + this.streamTimeoutSeconds = options.streamTimeoutSeconds ?? DEFAULT_STREAM_TIMEOUT_SECONDS; + this.apiClient = new ApiClient(this.baseURL, this.accessToken); } /** @@ -95,9 +106,9 @@ export class TriggerChatTransport implements ChatTransport { abortSignal: AbortSignal | undefined; } & ChatRequestOptions ): Promise> => { - const { trigger, chatId, messageId, messages, abortSignal, headers, body, metadata } = options; + const { trigger, chatId, messageId, messages, abortSignal, body, metadata } = options; - // Build the payload for the task + // Build the payload for the task — this becomes the ChatTaskPayload const payload = { messages, chatId, @@ -107,11 +118,8 @@ export class TriggerChatTransport implements ChatTransport { ...(body ?? {}), }; - // Create API client for triggering - const apiClient = new ApiClient(this.baseURL, this.accessToken); - // Trigger the task - const triggerResponse = await apiClient.triggerTask(this.taskId, { + const triggerResponse = await this.apiClient.triggerTask(this.taskId, { payload: JSON.stringify(payload), options: { payloadType: "application/json", @@ -119,9 +127,10 @@ export class TriggerChatTransport implements ChatTransport { }); const runId = triggerResponse.id; - const publicAccessToken = "publicAccessToken" in triggerResponse - ? (triggerResponse as { publicAccessToken?: string }).publicAccessToken - : undefined; + const publicAccessToken = + "publicAccessToken" in triggerResponse + ? (triggerResponse as { publicAccessToken?: string }).publicAccessToken + : undefined; // Store session state for reconnection this.sessions.set(chatId, { @@ -143,9 +152,7 @@ export class TriggerChatTransport implements ChatTransport { chatId: string; } & ChatRequestOptions ): Promise | null> => { - const { chatId } = options; - - const session = this.sessions.get(chatId); + const session = this.sessions.get(options.chatId); if (!session) { return null; } @@ -162,34 +169,24 @@ export class TriggerChatTransport implements ChatTransport { accessToken: string, abortSignal: AbortSignal | undefined ): ReadableStream { - const streamKey = this.streamKey; - const baseURL = this.baseURL; - const extraHeaders = this.extraHeaders; - - // Build the authorization header const headers: Record = { Authorization: `Bearer ${accessToken}`, - ...extraHeaders, + ...this.extraHeaders, }; const subscription = new SSEStreamSubscription( - `${baseURL}/realtime/v1/streams/${runId}/${streamKey}`, + `${this.baseURL}/realtime/v1/streams/${runId}/${this.streamKey}`, { headers, signal: abortSignal, - timeoutInSeconds: DEFAULT_STREAM_TIMEOUT_SECONDS, + timeoutInSeconds: this.streamTimeoutSeconds, } ); - // We need to convert the SSEStreamPart stream to a UIMessageChunk stream - // SSEStreamPart has { id, chunk, timestamp } where chunk is the deserialized UIMessageChunk - let sseStreamPromise: Promise> | null = null; - return new ReadableStream({ start: async (controller) => { try { - sseStreamPromise = subscription.subscribe(); - const sseStream = await sseStreamPromise; + const sseStream = await subscription.subscribe(); const reader = sseStream.getReader(); try { @@ -216,7 +213,7 @@ export class TriggerChatTransport implements ChatTransport { throw readError; } } catch (error) { - // Don't error the stream for abort errors + // Don't error the stream for abort errors — just close gracefully if (error instanceof Error && error.name === "AbortError") { controller.close(); return; @@ -225,9 +222,6 @@ export class TriggerChatTransport implements ChatTransport { controller.error(error); } }, - cancel: () => { - // Cancellation is handled by the abort signal - }, }); } } diff --git a/packages/ai/src/types.ts b/packages/ai/src/types.ts index 81f1c6dc9be..bbffb50247d 100644 --- a/packages/ai/src/types.ts +++ b/packages/ai/src/types.ts @@ -39,6 +39,15 @@ export type TriggerChatTransportOptions = { * Additional headers to include in API requests to Trigger.dev. */ headers?: Record; + + /** + * The number of seconds to wait for the realtime stream to produce data + * before timing out. If no data arrives within this period, the stream + * will be closed. + * + * @default 120 + */ + streamTimeoutSeconds?: number; }; /** From e1e5aaff16e002b5fad89545d772a950558096f1 Mon Sep 17 00:00:00 2001 From: Cursor Agent Date: Sun, 15 Feb 2026 11:46:56 +0000 Subject: [PATCH 04/53] test: add abort signal, multiple sessions, and body merging tests Adds 3 additional test cases: - Abort signal gracefully closes the stream - Multiple independent chat sessions tracked correctly - ChatRequestOptions.body is merged into task payload Co-authored-by: Eric Allam --- packages/ai/src/transport.test.ts | 212 ++++++++++++++++++++++++++++++ 1 file changed, 212 insertions(+) diff --git a/packages/ai/src/transport.test.ts b/packages/ai/src/transport.test.ts index eac1434eabd..cc946596c27 100644 --- a/packages/ai/src/transport.test.ts +++ b/packages/ai/src/transport.test.ts @@ -479,6 +479,218 @@ describe("TriggerChatTransport", () => { }); }); + describe("abort signal", () => { + it("should close the stream gracefully when aborted", async () => { + let streamResolve: (() => void) | undefined; + const streamWait = new Promise((resolve) => { + streamResolve = resolve; + }); + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ id: "run_abort" }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "token", + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + // Create a slow stream that waits before sending data + const stream = new ReadableStream({ + async start(controller) { + const encoder = new TextEncoder(); + controller.enqueue( + encoder.encode(`id: 0\ndata: ${JSON.stringify({ type: "text-start", id: "p1" })}\n\n`) + ); + // Wait for the test to signal it's done + await streamWait; + controller.close(); + }, + }); + + return new Response(stream, { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const abortController = new AbortController(); + + const transport = new TriggerChatTransport({ + taskId: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + const stream = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-abort", + messageId: undefined, + messages: [createUserMessage("test")], + abortSignal: abortController.signal, + }); + + // Read the first chunk + const reader = stream.getReader(); + const first = await reader.read(); + expect(first.done).toBe(false); + + // Abort and clean up + abortController.abort(); + streamResolve?.(); + + // The stream should close — reading should return done + const next = await reader.read(); + expect(next.done).toBe(true); + }); + }); + + describe("multiple sessions", () => { + it("should track multiple chat sessions independently", async () => { + let callCount = 0; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + callCount++; + return new Response( + JSON.stringify({ id: `run_multi_${callCount}` }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": `token_${callCount}`, + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + return new Response(createSSEStream(""), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + taskId: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + // Start two independent chat sessions + await transport.sendMessages({ + trigger: "submit-message", + chatId: "session-a", + messageId: undefined, + messages: [createUserMessage("Hello A")], + abortSignal: undefined, + }); + + await transport.sendMessages({ + trigger: "submit-message", + chatId: "session-b", + messageId: undefined, + messages: [createUserMessage("Hello B")], + abortSignal: undefined, + }); + + // Both sessions should be independently reconnectable + const streamA = await transport.reconnectToStream({ chatId: "session-a" }); + const streamB = await transport.reconnectToStream({ chatId: "session-b" }); + const streamC = await transport.reconnectToStream({ chatId: "nonexistent" }); + + expect(streamA).toBeInstanceOf(ReadableStream); + expect(streamB).toBeInstanceOf(ReadableStream); + expect(streamC).toBeNull(); + }); + }); + + describe("body merging", () => { + it("should merge ChatRequestOptions.body into the task payload", async () => { + const fetchSpy = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ id: "run_body" }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "token", + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + return new Response(createSSEStream(""), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + global.fetch = fetchSpy; + + const transport = new TriggerChatTransport({ + taskId: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-body", + messageId: undefined, + messages: [createUserMessage("test")], + abortSignal: undefined, + body: { systemPrompt: "You are helpful", temperature: 0.7 }, + }); + + const triggerCall = fetchSpy.mock.calls.find((call: any[]) => + (typeof call[0] === "string" ? call[0] : call[0].toString()).includes("/trigger") + ); + + const triggerBody = JSON.parse(triggerCall![1]?.body as string); + const payload = JSON.parse(triggerBody.payload); + + // body properties should be merged into the payload + expect(payload.systemPrompt).toBe("You are helpful"); + expect(payload.temperature).toBe(0.7); + // Standard fields should still be present + expect(payload.chatId).toBe("chat-body"); + expect(payload.trigger).toBe("submit-message"); + }); + }); + describe("message types", () => { it("should handle regenerate-message trigger", async () => { const fetchSpy = vi.fn().mockImplementation(async (url: string | URL) => { From ec6bb76b9a719d35a02a5fd6d9436ed4f44d392b Mon Sep 17 00:00:00 2001 From: Cursor Agent Date: Sun, 15 Feb 2026 11:47:40 +0000 Subject: [PATCH 05/53] chore: add changeset for @trigger.dev/ai package Co-authored-by: Eric Allam --- .changeset/ai-sdk-chat-transport.md | 41 +++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 .changeset/ai-sdk-chat-transport.md diff --git a/.changeset/ai-sdk-chat-transport.md b/.changeset/ai-sdk-chat-transport.md new file mode 100644 index 00000000000..a24dcdc195e --- /dev/null +++ b/.changeset/ai-sdk-chat-transport.md @@ -0,0 +1,41 @@ +--- +"@trigger.dev/ai": minor +--- + +New package: `@trigger.dev/ai` — AI SDK integration for Trigger.dev + +Provides `TriggerChatTransport`, a custom `ChatTransport` implementation for the Vercel AI SDK that bridges `useChat` with Trigger.dev's durable task execution and realtime streams. + +**Frontend usage:** +```tsx +import { useChat } from "@ai-sdk/react"; +import { TriggerChatTransport } from "@trigger.dev/ai"; + +const { messages, sendMessage } = useChat({ + transport: new TriggerChatTransport({ + accessToken: publicAccessToken, + taskId: "my-chat-task", + }), +}); +``` + +**Backend task:** +```ts +import { task, streams } from "@trigger.dev/sdk"; +import { streamText, convertToModelMessages } from "ai"; +import type { ChatTaskPayload } from "@trigger.dev/ai"; + +export const myChatTask = task({ + id: "my-chat-task", + run: async (payload: ChatTaskPayload) => { + const result = streamText({ + model: openai("gpt-4o"), + messages: convertToModelMessages(payload.messages), + }); + const { waitUntilComplete } = streams.pipe("chat", result.toUIMessageStream()); + await waitUntilComplete(); + }, +}); +``` + +Also exports `createChatTransport()` factory function and `ChatTaskPayload` type for task-side typing. From 876dbef840e755d0ffa4cf44edcafadee8750a09 Mon Sep 17 00:00:00 2001 From: Cursor Agent Date: Sun, 15 Feb 2026 11:49:57 +0000 Subject: [PATCH 06/53] refactor: remove internal ChatSessionState from public exports ChatSessionState is an implementation detail of the transport's session tracking. Users don't need to access it since the sessions map is private. Co-authored-by: Eric Allam --- packages/ai/src/index.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/ai/src/index.ts b/packages/ai/src/index.ts index f58c1d1ffaa..7e673894ff6 100644 --- a/packages/ai/src/index.ts +++ b/packages/ai/src/index.ts @@ -1,3 +1,3 @@ export { TriggerChatTransport, createChatTransport } from "./transport.js"; -export type { TriggerChatTransportOptions, ChatTaskPayload, ChatSessionState } from "./types.js"; +export type { TriggerChatTransportOptions, ChatTaskPayload } from "./types.js"; export { VERSION } from "./version.js"; From e25bd8ad5f6462e74193544a23c4e797f1feb35b Mon Sep 17 00:00:00 2001 From: Cursor Agent Date: Sun, 15 Feb 2026 11:52:56 +0000 Subject: [PATCH 07/53] feat: support dynamic accessToken function for token refresh The accessToken option now accepts either a string or a function returning a string. This enables dynamic token refresh patterns: new TriggerChatTransport({ taskId: 'my-task', accessToken: () => getLatestToken(), }) The function is called on each sendMessages() call, allowing fresh tokens to be used for each task trigger. Co-authored-by: Eric Allam --- packages/ai/src/transport.test.ts | 85 +++++++++++++++++++++++++++++++ packages/ai/src/transport.ts | 22 +++++--- packages/ai/src/types.ts | 18 +++++-- 3 files changed, 113 insertions(+), 12 deletions(-) diff --git a/packages/ai/src/transport.test.ts b/packages/ai/src/transport.test.ts index cc946596c27..53d3ab86861 100644 --- a/packages/ai/src/transport.test.ts +++ b/packages/ai/src/transport.test.ts @@ -77,6 +77,19 @@ describe("TriggerChatTransport", () => { expect(transport).toBeInstanceOf(TriggerChatTransport); }); + + it("should accept a function for accessToken", () => { + let tokenCallCount = 0; + const transport = new TriggerChatTransport({ + taskId: "my-chat-task", + accessToken: () => { + tokenCallCount++; + return `dynamic-token-${tokenCallCount}`; + }, + }); + + expect(transport).toBeInstanceOf(TriggerChatTransport); + }); }); describe("sendMessages", () => { @@ -627,6 +640,78 @@ describe("TriggerChatTransport", () => { }); }); + describe("dynamic accessToken", () => { + it("should call the accessToken function for each sendMessages call", async () => { + let tokenCallCount = 0; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ id: `run_dyn_${tokenCallCount}` }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "stream-token", + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + const chunks: UIMessageChunk[] = [ + { type: "text-start", id: "p1" }, + { type: "text-end", id: "p1" }, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + taskId: "my-task", + accessToken: () => { + tokenCallCount++; + return `dynamic-token-${tokenCallCount}`; + }, + baseURL: "https://api.test.trigger.dev", + }); + + // First call — the token function should be invoked + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-dyn-1", + messageId: undefined, + messages: [createUserMessage("first")], + abortSignal: undefined, + }); + + const firstCount = tokenCallCount; + expect(firstCount).toBeGreaterThanOrEqual(1); + + // Second call — the token function should be invoked again + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-dyn-2", + messageId: undefined, + messages: [createUserMessage("second")], + abortSignal: undefined, + }); + + // Token function was called at least once more + expect(tokenCallCount).toBeGreaterThan(firstCount); + }); + }); + describe("body merging", () => { it("should merge ChatRequestOptions.body into the task payload", async () => { const fetchSpy = vi.fn().mockImplementation(async (url: string | URL) => { diff --git a/packages/ai/src/transport.ts b/packages/ai/src/transport.ts index b9df702b3a0..d785a471c10 100644 --- a/packages/ai/src/transport.ts +++ b/packages/ai/src/transport.ts @@ -66,12 +66,11 @@ const DEFAULT_STREAM_TIMEOUT_SECONDS = 120; */ export class TriggerChatTransport implements ChatTransport { private readonly taskId: string; - private readonly accessToken: string; + private readonly resolveAccessToken: () => string; private readonly baseURL: string; private readonly streamKey: string; private readonly extraHeaders: Record; private readonly streamTimeoutSeconds: number; - private readonly apiClient: ApiClient; /** * Tracks active chat sessions for reconnection support. @@ -81,12 +80,18 @@ export class TriggerChatTransport implements ChatTransport { constructor(options: TriggerChatTransportOptions) { this.taskId = options.taskId; - this.accessToken = options.accessToken; + this.resolveAccessToken = + typeof options.accessToken === "function" + ? options.accessToken + : () => options.accessToken as string; this.baseURL = options.baseURL ?? DEFAULT_BASE_URL; this.streamKey = options.streamKey ?? DEFAULT_STREAM_KEY; this.extraHeaders = options.headers ?? {}; this.streamTimeoutSeconds = options.streamTimeoutSeconds ?? DEFAULT_STREAM_TIMEOUT_SECONDS; - this.apiClient = new ApiClient(this.baseURL, this.accessToken); + } + + private getApiClient(): ApiClient { + return new ApiClient(this.baseURL, this.resolveAccessToken()); } /** @@ -118,8 +123,11 @@ export class TriggerChatTransport implements ChatTransport { ...(body ?? {}), }; + const currentToken = this.resolveAccessToken(); + // Trigger the task - const triggerResponse = await this.apiClient.triggerTask(this.taskId, { + const apiClient = this.getApiClient(); + const triggerResponse = await apiClient.triggerTask(this.taskId, { payload: JSON.stringify(payload), options: { payloadType: "application/json", @@ -135,11 +143,11 @@ export class TriggerChatTransport implements ChatTransport { // Store session state for reconnection this.sessions.set(chatId, { runId, - publicAccessToken: publicAccessToken ?? this.accessToken, + publicAccessToken: publicAccessToken ?? currentToken, }); // Subscribe to the realtime stream for this run - return this.subscribeToStream(runId, publicAccessToken ?? this.accessToken, abortSignal); + return this.subscribeToStream(runId, publicAccessToken ?? currentToken, abortSignal); }; /** diff --git a/packages/ai/src/types.ts b/packages/ai/src/types.ts index bbffb50247d..88bf4317356 100644 --- a/packages/ai/src/types.ts +++ b/packages/ai/src/types.ts @@ -11,13 +11,21 @@ export type TriggerChatTransportOptions = { taskId: string; /** - * A public access token or trigger token for authenticating with the Trigger.dev API. - * This is used both to trigger the task and to subscribe to the realtime stream. + * An access token for authenticating with the Trigger.dev API. * - * You can generate one using `auth.createTriggerPublicToken()` or - * `auth.createPublicToken()` from the `@trigger.dev/sdk`. + * This must be a token with permission to trigger the task. You can use: + * - A **trigger public token** created via `auth.createTriggerPublicToken(taskId)` (recommended for frontend use) + * - A **secret API key** (for server-side use only — never expose in the browser) + * + * The token returned from triggering the task (`publicAccessToken`) is automatically + * used for subscribing to the realtime stream. + * + * Can also be a function that returns a token string, useful for dynamic token refresh: + * ```ts + * accessToken: () => getLatestToken() + * ``` */ - accessToken: string; + accessToken: string | (() => string); /** * Base URL for the Trigger.dev API. From 836d12c20e7b428816a69a427512796c0bc9f657 Mon Sep 17 00:00:00 2001 From: Cursor Agent Date: Sun, 15 Feb 2026 11:55:47 +0000 Subject: [PATCH 08/53] refactor: avoid double-resolving accessToken in sendMessages Use the already-resolved token when creating ApiClient instead of calling resolveAccessToken() again through getApiClient(). Co-authored-by: Eric Allam --- packages/ai/src/transport.ts | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/packages/ai/src/transport.ts b/packages/ai/src/transport.ts index d785a471c10..ff4b2c47a33 100644 --- a/packages/ai/src/transport.ts +++ b/packages/ai/src/transport.ts @@ -90,10 +90,6 @@ export class TriggerChatTransport implements ChatTransport { this.streamTimeoutSeconds = options.streamTimeoutSeconds ?? DEFAULT_STREAM_TIMEOUT_SECONDS; } - private getApiClient(): ApiClient { - return new ApiClient(this.baseURL, this.resolveAccessToken()); - } - /** * Sends messages to a Trigger.dev task and returns a streaming response. * @@ -125,8 +121,8 @@ export class TriggerChatTransport implements ChatTransport { const currentToken = this.resolveAccessToken(); - // Trigger the task - const apiClient = this.getApiClient(); + // Trigger the task — use the already-resolved token directly + const apiClient = new ApiClient(this.baseURL, currentToken); const triggerResponse = await apiClient.triggerTask(this.taskId, { payload: JSON.stringify(payload), options: { From 46dc5002c1b9c925c63346359cc616669747b529 Mon Sep 17 00:00:00 2001 From: Cursor Agent Date: Sun, 15 Feb 2026 12:54:20 +0000 Subject: [PATCH 09/53] feat: add chat transport and AI chat helpers to @trigger.dev/sdk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Two new subpath exports: @trigger.dev/sdk/chat (frontend, browser-safe): - TriggerChatTransport — ChatTransport implementation for useChat - createChatTransport() — factory function - TriggerChatTransportOptions type @trigger.dev/sdk/ai (backend, adds to existing ai.tool/ai.currentToolOptions): - chatTask() — pre-typed task wrapper with auto-pipe - pipeChat() — pipe StreamTextResult to realtime stream - CHAT_STREAM_KEY constant - ChatTaskPayload type - ChatTaskOptions type - PipeChatOptions type Co-authored-by: Eric Allam --- packages/ai/src/chatTask.ts | 132 +++++++++++++ packages/ai/src/pipeChat.ts | 137 +++++++++++++ packages/ai/src/types.ts | 22 +-- packages/trigger-sdk/package.json | 17 +- packages/trigger-sdk/src/v3/ai.ts | 242 +++++++++++++++++++++++ packages/trigger-sdk/src/v3/chat.ts | 294 ++++++++++++++++++++++++++++ 6 files changed, 832 insertions(+), 12 deletions(-) create mode 100644 packages/ai/src/chatTask.ts create mode 100644 packages/ai/src/pipeChat.ts create mode 100644 packages/trigger-sdk/src/v3/chat.ts diff --git a/packages/ai/src/chatTask.ts b/packages/ai/src/chatTask.ts new file mode 100644 index 00000000000..7f3eb92616c --- /dev/null +++ b/packages/ai/src/chatTask.ts @@ -0,0 +1,132 @@ +import { task as createTask } from "@trigger.dev/sdk"; +import type { Task } from "@trigger.dev/core/v3"; +import type { ChatTaskPayload } from "./types.js"; +import { pipeChat } from "./pipeChat.js"; + +/** + * Options for defining a chat task. + * + * This is a simplified version of the standard task options with the payload + * pre-typed as `ChatTaskPayload`. + */ +export type ChatTaskOptions = { + /** Unique identifier for the task */ + id: TIdentifier; + + /** Optional description of the task */ + description?: string; + + /** Retry configuration */ + retry?: { + maxAttempts?: number; + factor?: number; + minTimeoutInMs?: number; + maxTimeoutInMs?: number; + randomize?: boolean; + }; + + /** Queue configuration */ + queue?: { + name?: string; + concurrencyLimit?: number; + }; + + /** Machine preset for the task */ + machine?: { + preset?: string; + }; + + /** Maximum duration in seconds */ + maxDuration?: number; + + /** + * The main run function for the chat task. + * + * Receives a `ChatTaskPayload` with the conversation messages, chat session ID, + * and trigger type. + * + * **Auto-piping:** If this function returns a value that has a `.toUIMessageStream()` method + * (like a `StreamTextResult` from `streamText()`), the stream will automatically be piped + * to the frontend via the chat realtime stream. If you need to pipe from deeper in your + * code, use `pipeChat()` instead and don't return the result. + */ + run: (payload: ChatTaskPayload) => Promise; +}; + +/** + * An object that has a `toUIMessageStream()` method, like the result of `streamText()`. + */ +type UIMessageStreamable = { + toUIMessageStream: (...args: any[]) => AsyncIterable | ReadableStream; +}; + +function isUIMessageStreamable(value: unknown): value is UIMessageStreamable { + return ( + typeof value === "object" && + value !== null && + "toUIMessageStream" in value && + typeof (value as any).toUIMessageStream === "function" + ); +} + +/** + * Creates a Trigger.dev task pre-configured for AI SDK chat. + * + * This is a convenience wrapper around `task()` from `@trigger.dev/sdk` that: + * - **Pre-types the payload** as `ChatTaskPayload` — no manual typing needed + * - **Auto-pipes the stream** if the `run` function returns a `StreamTextResult` + * + * Requires `@trigger.dev/sdk` to be installed (it's a peer dependency). + * + * @example + * ```ts + * import { chatTask } from "@trigger.dev/ai"; + * import { streamText, convertToModelMessages } from "ai"; + * import { openai } from "@ai-sdk/openai"; + * + * // Simple: return streamText result — auto-piped to the frontend + * export const myChatTask = chatTask({ + * id: "my-chat-task", + * run: async ({ messages }) => { + * return streamText({ + * model: openai("gpt-4o"), + * messages: convertToModelMessages(messages), + * }); + * }, + * }); + * ``` + * + * @example + * ```ts + * import { chatTask, pipeChat } from "@trigger.dev/ai"; + * + * // Complex: use pipeChat() from deep inside your agent code + * export const myAgentTask = chatTask({ + * id: "my-agent-task", + * run: async ({ messages }) => { + * await runComplexAgentLoop(messages); + * // pipeChat() called internally by the agent loop + * }, + * }); + * ``` + */ +export function chatTask( + options: ChatTaskOptions +): Task { + const { run: userRun, ...restOptions } = options; + + return createTask({ + ...restOptions, + run: async (payload: ChatTaskPayload) => { + const result = await userRun(payload); + + // If the run function returned a StreamTextResult or similar, + // automatically pipe it to the chat stream + if (isUIMessageStreamable(result)) { + await pipeChat(result); + } + + return result; + }, + }); +} diff --git a/packages/ai/src/pipeChat.ts b/packages/ai/src/pipeChat.ts new file mode 100644 index 00000000000..885951c59c2 --- /dev/null +++ b/packages/ai/src/pipeChat.ts @@ -0,0 +1,137 @@ +import { realtimeStreams } from "@trigger.dev/core/v3"; + +/** + * The default stream key used for chat transport communication. + * + * Both `TriggerChatTransport` (frontend) and `pipeChat` (backend) use this key + * by default to ensure they communicate over the same stream. + */ +export const CHAT_STREAM_KEY = "chat"; + +/** + * Options for `pipeChat`. + */ +export type PipeChatOptions = { + /** + * Override the stream key to pipe to. + * Must match the `streamKey` option on `TriggerChatTransport`. + * + * @default "chat" + */ + streamKey?: string; + + /** + * An AbortSignal to cancel the stream. + */ + signal?: AbortSignal; + + /** + * The target run ID to pipe the stream to. + * @default "self" (current run) + */ + target?: string; +}; + +/** + * An object that has a `toUIMessageStream()` method, like the result of `streamText()` from the AI SDK. + */ +type UIMessageStreamable = { + toUIMessageStream: (...args: any[]) => AsyncIterable | ReadableStream; +}; + +function isUIMessageStreamable(value: unknown): value is UIMessageStreamable { + return ( + typeof value === "object" && + value !== null && + "toUIMessageStream" in value && + typeof (value as any).toUIMessageStream === "function" + ); +} + +function isAsyncIterable(value: unknown): value is AsyncIterable { + return ( + typeof value === "object" && + value !== null && + Symbol.asyncIterator in value + ); +} + +function isReadableStream(value: unknown): value is ReadableStream { + return ( + typeof value === "object" && + value !== null && + typeof (value as any).getReader === "function" + ); +} + +/** + * Pipes a chat stream to the realtime stream, making it available to the + * `TriggerChatTransport` on the frontend. + * + * Accepts any of: + * - A `StreamTextResult` from the AI SDK (has `.toUIMessageStream()`) + * - An `AsyncIterable` of `UIMessageChunk`s + * - A `ReadableStream` of `UIMessageChunk`s + * + * This must be called from inside a Trigger.dev task's `run` function. + * + * @example + * ```ts + * import { task } from "@trigger.dev/sdk"; + * import { pipeChat, type ChatTaskPayload } from "@trigger.dev/ai"; + * import { streamText, convertToModelMessages } from "ai"; + * + * export const myChatTask = task({ + * id: "my-chat-task", + * run: async (payload: ChatTaskPayload) => { + * const result = streamText({ + * model: openai("gpt-4o"), + * messages: convertToModelMessages(payload.messages), + * }); + * + * await pipeChat(result); + * }, + * }); + * ``` + * + * @example + * ```ts + * // Deep inside your agent library — pipeChat works from anywhere inside a task + * async function runAgentLoop(messages: CoreMessage[]) { + * const result = streamText({ model, messages }); + * await pipeChat(result); + * } + * ``` + * + * @param source - A StreamTextResult, AsyncIterable, or ReadableStream of UIMessageChunks + * @param options - Optional configuration + * @returns A promise that resolves when the stream has been fully piped + */ +export async function pipeChat( + source: UIMessageStreamable | AsyncIterable | ReadableStream, + options?: PipeChatOptions +): Promise { + const streamKey = options?.streamKey ?? CHAT_STREAM_KEY; + + // Resolve the source to an AsyncIterable or ReadableStream + let stream: AsyncIterable | ReadableStream; + + if (isUIMessageStreamable(source)) { + stream = source.toUIMessageStream(); + } else if (isAsyncIterable(source) || isReadableStream(source)) { + stream = source; + } else { + throw new Error( + "pipeChat: source must be a StreamTextResult (with .toUIMessageStream()), " + + "an AsyncIterable, or a ReadableStream" + ); + } + + // Pipe to the realtime stream + const instance = realtimeStreams.pipe(streamKey, stream, { + signal: options?.signal, + target: options?.target, + }); + + await instance.wait(); +} diff --git a/packages/ai/src/types.ts b/packages/ai/src/types.ts index 88bf4317356..91ae9938888 100644 --- a/packages/ai/src/types.ts +++ b/packages/ai/src/types.ts @@ -8,7 +8,7 @@ export type TriggerChatTransportOptions = { * The Trigger.dev task ID to trigger for chat completions. * This task will receive the chat messages as its payload. */ - taskId: string; + task: string; /** * An access token for authenticating with the Trigger.dev API. @@ -36,8 +36,8 @@ export type TriggerChatTransportOptions = { /** * The stream key where the task pipes UIMessageChunk data. - * Your task must pipe the AI SDK stream to this same key using - * `streams.pipe(streamKey, result.toUIMessageStream())`. + * When using `chatTask()` or `pipeChat()`, this is handled automatically. + * Only set this if you're using a custom stream key. * * @default "chat" */ @@ -59,15 +59,16 @@ export type TriggerChatTransportOptions = { }; /** - * The payload shape that TriggerChatTransport sends to the triggered task. + * The payload shape that the transport sends to the triggered task. * - * Use this type to type your task's `run` function payload: + * When using `chatTask()`, the payload is automatically typed — you don't need + * to import this type. When using `task()` directly, use this type to annotate + * your payload: * * @example * ```ts - * import { task, streams } from "@trigger.dev/sdk"; - * import { streamText, convertToModelMessages } from "ai"; - * import type { ChatTaskPayload } from "@trigger.dev/ai"; + * import { task } from "@trigger.dev/sdk"; + * import { pipeChat, type ChatTaskPayload } from "@trigger.dev/ai"; * * export const myChatTask = task({ * id: "my-chat-task", @@ -76,9 +77,7 @@ export type TriggerChatTransportOptions = { * model: openai("gpt-4o"), * messages: convertToModelMessages(payload.messages), * }); - * - * const { waitUntilComplete } = streams.pipe("chat", result.toUIMessageStream()); - * await waitUntilComplete(); + * await pipeChat(result); * }, * }); * ``` @@ -110,6 +109,7 @@ export type ChatTaskPayload = { /** * Internal state for tracking active chat sessions, used for stream reconnection. + * @internal */ export type ChatSessionState = { runId: string; diff --git a/packages/trigger-sdk/package.json b/packages/trigger-sdk/package.json index e1ff05c4de9..a32eafadef2 100644 --- a/packages/trigger-sdk/package.json +++ b/packages/trigger-sdk/package.json @@ -24,7 +24,8 @@ "./package.json": "./package.json", ".": "./src/v3/index.ts", "./v3": "./src/v3/index.ts", - "./ai": "./src/v3/ai.ts" + "./ai": "./src/v3/ai.ts", + "./chat": "./src/v3/chat.ts" }, "sourceDialects": [ "@triggerdotdev/source" @@ -37,6 +38,9 @@ ], "ai": [ "dist/commonjs/v3/ai.d.ts" + ], + "chat": [ + "dist/commonjs/v3/chat.d.ts" ] } }, @@ -123,6 +127,17 @@ "types": "./dist/commonjs/v3/ai.d.ts", "default": "./dist/commonjs/v3/ai.js" } + }, + "./chat": { + "import": { + "@triggerdotdev/source": "./src/v3/chat.ts", + "types": "./dist/esm/v3/chat.d.ts", + "default": "./dist/esm/v3/chat.js" + }, + "require": { + "types": "./dist/commonjs/v3/chat.d.ts", + "default": "./dist/commonjs/v3/chat.js" + } } }, "main": "./dist/commonjs/v3/index.js", diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index 59afa2fe21a..9e79df22b8d 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -3,11 +3,16 @@ import { isSchemaZodEsque, Task, type inferSchemaIn, + type PipeStreamOptions, + type TaskOptions, type TaskSchema, type TaskWithSchema, } from "@trigger.dev/core/v3"; +import type { UIMessage } from "ai"; import { dynamicTool, jsonSchema, JSONSchema7, Schema, Tool, ToolCallOptions, zodSchema } from "ai"; import { metadata } from "./metadata.js"; +import { streams } from "./streams.js"; +import { createTask } from "./shared.js"; const METADATA_KEY = "tool.execute.options"; @@ -116,3 +121,240 @@ export const ai = { tool: toolFromTask, currentToolOptions: getToolOptionsFromMetadata, }; + +// --------------------------------------------------------------------------- +// Chat transport helpers — backend side +// --------------------------------------------------------------------------- + +/** + * The default stream key used for chat transport communication. + * Both `TriggerChatTransport` (frontend) and `pipeChat`/`chatTask` (backend) + * use this key by default. + */ +export const CHAT_STREAM_KEY = "chat"; + +/** + * The payload shape that the chat transport sends to the triggered task. + * + * When using `chatTask()`, the payload is automatically typed — you don't need + * to import this type. Use this type only if you're using `task()` directly + * with `pipeChat()`. + */ +export type ChatTaskPayload = { + /** The conversation messages */ + messages: TMessage[]; + + /** The unique identifier for the chat session */ + chatId: string; + + /** + * The trigger type: + * - `"submit-message"`: A new user message + * - `"regenerate-message"`: Regenerate the last assistant response + */ + trigger: "submit-message" | "regenerate-message"; + + /** The ID of the message to regenerate (only for `"regenerate-message"`) */ + messageId?: string; + + /** Custom metadata from the frontend */ + metadata?: unknown; +}; + +/** + * Options for `pipeChat`. + */ +export type PipeChatOptions = { + /** + * Override the stream key. Must match the `streamKey` on `TriggerChatTransport`. + * @default "chat" + */ + streamKey?: string; + + /** An AbortSignal to cancel the stream. */ + signal?: AbortSignal; + + /** + * The target run ID to pipe to. + * @default "self" (current run) + */ + target?: string; +}; + +/** + * An object with a `toUIMessageStream()` method (e.g. `StreamTextResult` from `streamText()`). + */ +type UIMessageStreamable = { + toUIMessageStream: (...args: any[]) => AsyncIterable | ReadableStream; +}; + +function isUIMessageStreamable(value: unknown): value is UIMessageStreamable { + return ( + typeof value === "object" && + value !== null && + "toUIMessageStream" in value && + typeof (value as any).toUIMessageStream === "function" + ); +} + +function isAsyncIterable(value: unknown): value is AsyncIterable { + return typeof value === "object" && value !== null && Symbol.asyncIterator in value; +} + +function isReadableStream(value: unknown): value is ReadableStream { + return typeof value === "object" && value !== null && typeof (value as any).getReader === "function"; +} + +/** + * Pipes a chat stream to the realtime stream, making it available to the + * `TriggerChatTransport` on the frontend. + * + * Accepts: + * - A `StreamTextResult` from `streamText()` (has `.toUIMessageStream()`) + * - An `AsyncIterable` of `UIMessageChunk`s + * - A `ReadableStream` of `UIMessageChunk`s + * + * Must be called from inside a Trigger.dev task's `run` function. + * + * @example + * ```ts + * import { task } from "@trigger.dev/sdk"; + * import { pipeChat, type ChatTaskPayload } from "@trigger.dev/sdk/ai"; + * import { streamText, convertToModelMessages } from "ai"; + * + * export const myChatTask = task({ + * id: "my-chat-task", + * run: async (payload: ChatTaskPayload) => { + * const result = streamText({ + * model: openai("gpt-4o"), + * messages: convertToModelMessages(payload.messages), + * }); + * + * await pipeChat(result); + * }, + * }); + * ``` + * + * @example + * ```ts + * // Works from anywhere inside a task — even deep in your agent code + * async function runAgentLoop(messages: CoreMessage[]) { + * const result = streamText({ model, messages }); + * await pipeChat(result); + * } + * ``` + */ +export async function pipeChat( + source: UIMessageStreamable | AsyncIterable | ReadableStream, + options?: PipeChatOptions +): Promise { + const streamKey = options?.streamKey ?? CHAT_STREAM_KEY; + + let stream: AsyncIterable | ReadableStream; + + if (isUIMessageStreamable(source)) { + stream = source.toUIMessageStream(); + } else if (isAsyncIterable(source) || isReadableStream(source)) { + stream = source; + } else { + throw new Error( + "pipeChat: source must be a StreamTextResult (with .toUIMessageStream()), " + + "an AsyncIterable, or a ReadableStream" + ); + } + + const pipeOptions: PipeStreamOptions = {}; + if (options?.signal) { + pipeOptions.signal = options.signal; + } + if (options?.target) { + pipeOptions.target = options.target; + } + + const { waitUntilComplete } = streams.pipe(streamKey, stream, pipeOptions); + await waitUntilComplete(); +} + +/** + * Options for defining a chat task. + * + * Extends the standard `TaskOptions` but pre-types the payload as `ChatTaskPayload` + * and overrides `run` to accept `ChatTaskPayload` directly. + * + * **Auto-piping:** If the `run` function returns a value with `.toUIMessageStream()` + * (like a `StreamTextResult`), the stream is automatically piped to the frontend. + * For complex flows, use `pipeChat()` manually from anywhere in your code. + */ +export type ChatTaskOptions = Omit< + TaskOptions, + "run" +> & { + /** + * The run function for the chat task. + * + * Receives a `ChatTaskPayload` with the conversation messages, chat session ID, + * and trigger type. + * + * **Auto-piping:** If this function returns a value with `.toUIMessageStream()`, + * the stream is automatically piped to the frontend. + */ + run: (payload: ChatTaskPayload) => Promise; +}; + +/** + * Creates a Trigger.dev task pre-configured for AI SDK chat. + * + * - **Pre-types the payload** as `ChatTaskPayload` — no manual typing needed + * - **Auto-pipes the stream** if `run` returns a `StreamTextResult` + * - For complex flows, use `pipeChat()` from anywhere inside your task code + * + * @example + * ```ts + * import { chatTask } from "@trigger.dev/sdk/ai"; + * import { streamText, convertToModelMessages } from "ai"; + * import { openai } from "@ai-sdk/openai"; + * + * // Simple: return streamText result — auto-piped to the frontend + * export const myChatTask = chatTask({ + * id: "my-chat-task", + * run: async ({ messages }) => { + * return streamText({ + * model: openai("gpt-4o"), + * messages: convertToModelMessages(messages), + * }); + * }, + * }); + * ``` + * + * @example + * ```ts + * import { chatTask, pipeChat } from "@trigger.dev/sdk/ai"; + * + * // Complex: pipeChat() from deep in your agent code + * export const myAgentTask = chatTask({ + * id: "my-agent-task", + * run: async ({ messages }) => { + * await runComplexAgentLoop(messages); + * }, + * }); + * ``` + */ +export function chatTask( + options: ChatTaskOptions +): Task { + const { run: userRun, ...restOptions } = options; + + return createTask({ + ...restOptions, + run: async (payload: ChatTaskPayload) => { + const result = await userRun(payload); + + // Auto-pipe if the run function returned a StreamTextResult or similar + if (isUIMessageStreamable(result)) { + await pipeChat(result); + } + + return result; + }, + }); +} diff --git a/packages/trigger-sdk/src/v3/chat.ts b/packages/trigger-sdk/src/v3/chat.ts new file mode 100644 index 00000000000..5a7872c1014 --- /dev/null +++ b/packages/trigger-sdk/src/v3/chat.ts @@ -0,0 +1,294 @@ +/** + * @module @trigger.dev/sdk/chat + * + * Browser-safe module for AI SDK chat transport integration. + * Use this on the frontend with the AI SDK's `useChat` hook. + * + * For backend helpers (`chatTask`, `pipeChat`), use `@trigger.dev/sdk/ai` instead. + * + * @example + * ```tsx + * import { useChat } from "@ai-sdk/react"; + * import { TriggerChatTransport } from "@trigger.dev/sdk/chat"; + * + * function Chat({ accessToken }: { accessToken: string }) { + * const { messages, sendMessage, status } = useChat({ + * transport: new TriggerChatTransport({ + * task: "my-chat-task", + * accessToken, + * }), + * }); + * } + * ``` + */ + +import type { ChatTransport, UIMessage, UIMessageChunk, ChatRequestOptions } from "ai"; +import { ApiClient, SSEStreamSubscription } from "@trigger.dev/core/v3"; + +const DEFAULT_STREAM_KEY = "chat"; +const DEFAULT_BASE_URL = "https://api.trigger.dev"; +const DEFAULT_STREAM_TIMEOUT_SECONDS = 120; + +/** + * Options for creating a TriggerChatTransport. + */ +export type TriggerChatTransportOptions = { + /** + * The Trigger.dev task ID to trigger for chat completions. + * This task should be defined using `chatTask()` from `@trigger.dev/sdk/ai`, + * or a regular `task()` that uses `pipeChat()`. + */ + task: string; + + /** + * An access token for authenticating with the Trigger.dev API. + * + * This must be a token with permission to trigger the task. You can use: + * - A **trigger public token** created via `auth.createTriggerPublicToken(taskId)` (recommended for frontend use) + * - A **secret API key** (for server-side use only — never expose in the browser) + * + * Can also be a function that returns a token string, useful for dynamic token refresh. + */ + accessToken: string | (() => string); + + /** + * Base URL for the Trigger.dev API. + * @default "https://api.trigger.dev" + */ + baseURL?: string; + + /** + * The stream key where the task pipes UIMessageChunk data. + * When using `chatTask()` or `pipeChat()`, this is handled automatically. + * Only set this if you're using a custom stream key. + * + * @default "chat" + */ + streamKey?: string; + + /** + * Additional headers to include in API requests to Trigger.dev. + */ + headers?: Record; + + /** + * The number of seconds to wait for the realtime stream to produce data + * before timing out. + * + * @default 120 + */ + streamTimeoutSeconds?: number; +}; + +/** + * Internal state for tracking active chat sessions. + * @internal + */ +type ChatSessionState = { + runId: string; + publicAccessToken: string; +}; + +/** + * A custom AI SDK `ChatTransport` that runs chat completions as durable Trigger.dev tasks. + * + * When `sendMessages` is called, the transport: + * 1. Triggers a Trigger.dev task with the chat messages as payload + * 2. Subscribes to the task's realtime stream to receive `UIMessageChunk` data + * 3. Returns a `ReadableStream` that the AI SDK processes natively + * + * @example + * ```tsx + * import { useChat } from "@ai-sdk/react"; + * import { TriggerChatTransport } from "@trigger.dev/sdk/chat"; + * + * function Chat({ accessToken }: { accessToken: string }) { + * const { messages, sendMessage, status } = useChat({ + * transport: new TriggerChatTransport({ + * task: "my-chat-task", + * accessToken, + * }), + * }); + * + * // ... render messages + * } + * ``` + * + * On the backend, define the task using `chatTask` from `@trigger.dev/sdk/ai`: + * + * @example + * ```ts + * import { chatTask } from "@trigger.dev/sdk/ai"; + * import { streamText, convertToModelMessages } from "ai"; + * + * export const myChatTask = chatTask({ + * id: "my-chat-task", + * run: async ({ messages }) => { + * return streamText({ + * model: openai("gpt-4o"), + * messages: convertToModelMessages(messages), + * }); + * }, + * }); + * ``` + */ +export class TriggerChatTransport implements ChatTransport { + private readonly taskId: string; + private readonly resolveAccessToken: () => string; + private readonly baseURL: string; + private readonly streamKey: string; + private readonly extraHeaders: Record; + private readonly streamTimeoutSeconds: number; + + private sessions: Map = new Map(); + + constructor(options: TriggerChatTransportOptions) { + this.taskId = options.task; + this.resolveAccessToken = + typeof options.accessToken === "function" + ? options.accessToken + : () => options.accessToken as string; + this.baseURL = options.baseURL ?? DEFAULT_BASE_URL; + this.streamKey = options.streamKey ?? DEFAULT_STREAM_KEY; + this.extraHeaders = options.headers ?? {}; + this.streamTimeoutSeconds = options.streamTimeoutSeconds ?? DEFAULT_STREAM_TIMEOUT_SECONDS; + } + + sendMessages = async ( + options: { + trigger: "submit-message" | "regenerate-message"; + chatId: string; + messageId: string | undefined; + messages: UIMessage[]; + abortSignal: AbortSignal | undefined; + } & ChatRequestOptions + ): Promise> => { + const { trigger, chatId, messageId, messages, abortSignal, body, metadata } = options; + + const payload = { + messages, + chatId, + trigger, + messageId, + metadata, + ...(body ?? {}), + }; + + const currentToken = this.resolveAccessToken(); + const apiClient = new ApiClient(this.baseURL, currentToken); + + const triggerResponse = await apiClient.triggerTask(this.taskId, { + payload: JSON.stringify(payload), + options: { + payloadType: "application/json", + }, + }); + + const runId = triggerResponse.id; + const publicAccessToken = + "publicAccessToken" in triggerResponse + ? (triggerResponse as { publicAccessToken?: string }).publicAccessToken + : undefined; + + this.sessions.set(chatId, { + runId, + publicAccessToken: publicAccessToken ?? currentToken, + }); + + return this.subscribeToStream(runId, publicAccessToken ?? currentToken, abortSignal); + }; + + reconnectToStream = async ( + options: { + chatId: string; + } & ChatRequestOptions + ): Promise | null> => { + const session = this.sessions.get(options.chatId); + if (!session) { + return null; + } + + return this.subscribeToStream(session.runId, session.publicAccessToken, undefined); + }; + + private subscribeToStream( + runId: string, + accessToken: string, + abortSignal: AbortSignal | undefined + ): ReadableStream { + const headers: Record = { + Authorization: `Bearer ${accessToken}`, + ...this.extraHeaders, + }; + + const subscription = new SSEStreamSubscription( + `${this.baseURL}/realtime/v1/streams/${runId}/${this.streamKey}`, + { + headers, + signal: abortSignal, + timeoutInSeconds: this.streamTimeoutSeconds, + } + ); + + return new ReadableStream({ + start: async (controller) => { + try { + const sseStream = await subscription.subscribe(); + const reader = sseStream.getReader(); + + try { + while (true) { + const { done, value } = await reader.read(); + + if (done) { + controller.close(); + return; + } + + if (abortSignal?.aborted) { + reader.cancel(); + reader.releaseLock(); + controller.close(); + return; + } + + controller.enqueue(value.chunk as UIMessageChunk); + } + } catch (readError) { + reader.releaseLock(); + throw readError; + } + } catch (error) { + if (error instanceof Error && error.name === "AbortError") { + controller.close(); + return; + } + + controller.error(error); + } + }, + }); + } +} + +/** + * Creates a new `TriggerChatTransport` instance. + * + * @example + * ```tsx + * import { useChat } from "@ai-sdk/react"; + * import { createChatTransport } from "@trigger.dev/sdk/chat"; + * + * const transport = createChatTransport({ + * task: "my-chat-task", + * accessToken: publicAccessToken, + * }); + * + * function Chat() { + * const { messages, sendMessage } = useChat({ transport }); + * } + * ``` + */ +export function createChatTransport(options: TriggerChatTransportOptions): TriggerChatTransport { + return new TriggerChatTransport(options); +} From c747b0b1d3d99b426c019033cb297000f2bc54ab Mon Sep 17 00:00:00 2001 From: Cursor Agent Date: Sun, 15 Feb 2026 12:55:31 +0000 Subject: [PATCH 10/53] test: move chat transport tests to @trigger.dev/sdk Move and adapt tests from packages/ai to packages/trigger-sdk. - Import from ./chat.js instead of ./transport.js - Use 'task' option instead of 'taskId' - All 17 tests passing Co-authored-by: Eric Allam --- packages/trigger-sdk/src/v3/chat.test.ts | 842 +++++++++++++++++++++++ 1 file changed, 842 insertions(+) create mode 100644 packages/trigger-sdk/src/v3/chat.test.ts diff --git a/packages/trigger-sdk/src/v3/chat.test.ts b/packages/trigger-sdk/src/v3/chat.test.ts new file mode 100644 index 00000000000..86a4ba9ad57 --- /dev/null +++ b/packages/trigger-sdk/src/v3/chat.test.ts @@ -0,0 +1,842 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; +import type { UIMessage, UIMessageChunk } from "ai"; +import { TriggerChatTransport, createChatTransport } from "./chat.js"; + +// Helper: encode text as SSE format +function sseEncode(chunks: UIMessageChunk[]): string { + return chunks.map((chunk, i) => `id: ${i}\ndata: ${JSON.stringify(chunk)}\n\n`).join(""); +} + +// Helper: create a ReadableStream from SSE text +function createSSEStream(sseText: string): ReadableStream { + const encoder = new TextEncoder(); + return new ReadableStream({ + start(controller) { + controller.enqueue(encoder.encode(sseText)); + controller.close(); + }, + }); +} + +// Helper: create test UIMessages +function createUserMessage(text: string): UIMessage { + return { + id: `msg-${Date.now()}`, + role: "user", + parts: [{ type: "text", text }], + }; +} + +function createAssistantMessage(text: string): UIMessage { + return { + id: `msg-${Date.now()}`, + role: "assistant", + parts: [{ type: "text", text }], + }; +} + +// Sample UIMessageChunks as the AI SDK would produce +const sampleChunks: UIMessageChunk[] = [ + { type: "text-start", id: "part-1" }, + { type: "text-delta", id: "part-1", delta: "Hello" }, + { type: "text-delta", id: "part-1", delta: " world" }, + { type: "text-delta", id: "part-1", delta: "!" }, + { type: "text-end", id: "part-1" }, +]; + +describe("TriggerChatTransport", () => { + let originalFetch: typeof global.fetch; + + beforeEach(() => { + originalFetch = global.fetch; + }); + + afterEach(() => { + global.fetch = originalFetch; + vi.restoreAllMocks(); + }); + + describe("constructor", () => { + it("should create transport with required options", () => { + const transport = new TriggerChatTransport({ + task: "my-chat-task", + accessToken: "test-token", + }); + + expect(transport).toBeInstanceOf(TriggerChatTransport); + }); + + it("should accept optional configuration", () => { + const transport = new TriggerChatTransport({ + task: "my-chat-task", + accessToken: "test-token", + baseURL: "https://custom.trigger.dev", + streamKey: "custom-stream", + headers: { "X-Custom": "value" }, + }); + + expect(transport).toBeInstanceOf(TriggerChatTransport); + }); + + it("should accept a function for accessToken", () => { + let tokenCallCount = 0; + const transport = new TriggerChatTransport({ + task: "my-chat-task", + accessToken: () => { + tokenCallCount++; + return `dynamic-token-${tokenCallCount}`; + }, + }); + + expect(transport).toBeInstanceOf(TriggerChatTransport); + }); + }); + + describe("sendMessages", () => { + it("should trigger the task and return a ReadableStream of UIMessageChunks", async () => { + const triggerRunId = "run_abc123"; + const publicToken = "pub_token_xyz"; + + // Mock fetch to handle both the trigger request and the SSE stream request + global.fetch = vi.fn().mockImplementation(async (url: string | URL, init?: RequestInit) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + // Handle the task trigger request + if (urlStr.includes("/api/v1/tasks/") && urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ id: triggerRunId }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": publicToken, + }, + } + ); + } + + // Handle the SSE stream request + if (urlStr.includes("/realtime/v1/streams/")) { + const sseText = sseEncode(sampleChunks); + return new Response(createSSEStream(sseText), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-chat-task", + accessToken: "test-token", + baseURL: "https://api.test.trigger.dev", + }); + + const messages: UIMessage[] = [createUserMessage("Hello!")]; + + const stream = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-1", + messageId: undefined, + messages, + abortSignal: undefined, + }); + + expect(stream).toBeInstanceOf(ReadableStream); + + // Read all chunks from the stream + const reader = stream.getReader(); + const receivedChunks: UIMessageChunk[] = []; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + receivedChunks.push(value); + } + + expect(receivedChunks).toHaveLength(sampleChunks.length); + expect(receivedChunks[0]).toEqual({ type: "text-start", id: "part-1" }); + expect(receivedChunks[1]).toEqual({ type: "text-delta", id: "part-1", delta: "Hello" }); + expect(receivedChunks[4]).toEqual({ type: "text-end", id: "part-1" }); + }); + + it("should send the correct payload to the trigger API", async () => { + const fetchSpy = vi.fn().mockImplementation(async (url: string | URL, init?: RequestInit) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/api/v1/tasks/") && urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ id: "run_test" }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_token", + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + return new Response(createSSEStream(""), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + global.fetch = fetchSpy; + + const transport = new TriggerChatTransport({ + task: "my-chat-task", + accessToken: "test-token", + baseURL: "https://api.test.trigger.dev", + }); + + const messages: UIMessage[] = [createUserMessage("Hello!")]; + + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-123", + messageId: undefined, + messages, + abortSignal: undefined, + metadata: { custom: "data" }, + }); + + // Verify the trigger fetch call + const triggerCall = fetchSpy.mock.calls.find((call: any[]) => + (typeof call[0] === "string" ? call[0] : call[0].toString()).includes("/trigger") + ); + + expect(triggerCall).toBeDefined(); + const triggerUrl = typeof triggerCall![0] === "string" ? triggerCall![0] : triggerCall![0].toString(); + expect(triggerUrl).toContain("/api/v1/tasks/my-chat-task/trigger"); + + const triggerBody = JSON.parse(triggerCall![1]?.body as string); + const payload = JSON.parse(triggerBody.payload); + expect(payload.messages).toEqual(messages); + expect(payload.chatId).toBe("chat-123"); + expect(payload.trigger).toBe("submit-message"); + expect(payload.metadata).toEqual({ custom: "data" }); + }); + + it("should use the correct stream URL with custom streamKey", async () => { + const fetchSpy = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ id: "run_custom" }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "token", + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + return new Response(createSSEStream(""), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + global.fetch = fetchSpy; + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + streamKey: "my-custom-stream", + }); + + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-1", + messageId: undefined, + messages: [createUserMessage("test")], + abortSignal: undefined, + }); + + // Verify the stream URL uses the custom stream key + const streamCall = fetchSpy.mock.calls.find((call: any[]) => + (typeof call[0] === "string" ? call[0] : call[0].toString()).includes("/realtime/v1/streams/") + ); + + expect(streamCall).toBeDefined(); + const streamUrl = typeof streamCall![0] === "string" ? streamCall![0] : streamCall![0].toString(); + expect(streamUrl).toContain("/realtime/v1/streams/run_custom/my-custom-stream"); + }); + + it("should include extra headers in stream requests", async () => { + const fetchSpy = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ id: "run_hdrs" }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "token", + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + return new Response(createSSEStream(""), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + global.fetch = fetchSpy; + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + headers: { "X-Custom-Header": "custom-value" }, + }); + + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-1", + messageId: undefined, + messages: [createUserMessage("test")], + abortSignal: undefined, + }); + + // Verify the stream request includes custom headers + const streamCall = fetchSpy.mock.calls.find((call: any[]) => + (typeof call[0] === "string" ? call[0] : call[0].toString()).includes("/realtime/v1/streams/") + ); + + expect(streamCall).toBeDefined(); + const requestHeaders = streamCall![1]?.headers as Record; + expect(requestHeaders["X-Custom-Header"]).toBe("custom-value"); + }); + }); + + describe("reconnectToStream", () => { + it("should return null when no session exists for chatId", async () => { + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + }); + + const result = await transport.reconnectToStream({ + chatId: "nonexistent-chat", + }); + + expect(result).toBeNull(); + }); + + it("should reconnect to an existing session", async () => { + const triggerRunId = "run_reconnect"; + const publicToken = "pub_reconnect_token"; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ id: triggerRunId }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": publicToken, + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + const chunks: UIMessageChunk[] = [ + { type: "text-start", id: "part-1" }, + { type: "text-delta", id: "part-1", delta: "Reconnected!" }, + { type: "text-end", id: "part-1" }, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + // First, send messages to establish a session + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-reconnect", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + // Now reconnect + const stream = await transport.reconnectToStream({ + chatId: "chat-reconnect", + }); + + expect(stream).toBeInstanceOf(ReadableStream); + + // Read the stream + const reader = stream!.getReader(); + const receivedChunks: UIMessageChunk[] = []; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + receivedChunks.push(value); + } + + expect(receivedChunks.length).toBeGreaterThan(0); + }); + }); + + describe("createChatTransport", () => { + it("should create a TriggerChatTransport instance", () => { + const transport = createChatTransport({ + task: "my-task", + accessToken: "token", + }); + + expect(transport).toBeInstanceOf(TriggerChatTransport); + }); + + it("should pass options through to the transport", () => { + const transport = createChatTransport({ + task: "custom-task", + accessToken: "custom-token", + baseURL: "https://custom.example.com", + streamKey: "custom-key", + headers: { "X-Test": "value" }, + }); + + expect(transport).toBeInstanceOf(TriggerChatTransport); + }); + }); + + describe("error handling", () => { + it("should propagate trigger API errors", async () => { + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ error: "Task not found" }), + { + status: 404, + headers: { "content-type": "application/json" }, + } + ); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "nonexistent-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + await expect( + transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-error", + messageId: undefined, + messages: [createUserMessage("test")], + abortSignal: undefined, + }) + ).rejects.toThrow(); + }); + }); + + describe("abort signal", () => { + it("should close the stream gracefully when aborted", async () => { + let streamResolve: (() => void) | undefined; + const streamWait = new Promise((resolve) => { + streamResolve = resolve; + }); + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ id: "run_abort" }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "token", + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + // Create a slow stream that waits before sending data + const stream = new ReadableStream({ + async start(controller) { + const encoder = new TextEncoder(); + controller.enqueue( + encoder.encode(`id: 0\ndata: ${JSON.stringify({ type: "text-start", id: "p1" })}\n\n`) + ); + // Wait for the test to signal it's done + await streamWait; + controller.close(); + }, + }); + + return new Response(stream, { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const abortController = new AbortController(); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + const stream = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-abort", + messageId: undefined, + messages: [createUserMessage("test")], + abortSignal: abortController.signal, + }); + + // Read the first chunk + const reader = stream.getReader(); + const first = await reader.read(); + expect(first.done).toBe(false); + + // Abort and clean up + abortController.abort(); + streamResolve?.(); + + // The stream should close — reading should return done + const next = await reader.read(); + expect(next.done).toBe(true); + }); + }); + + describe("multiple sessions", () => { + it("should track multiple chat sessions independently", async () => { + let callCount = 0; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + callCount++; + return new Response( + JSON.stringify({ id: `run_multi_${callCount}` }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": `token_${callCount}`, + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + return new Response(createSSEStream(""), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + // Start two independent chat sessions + await transport.sendMessages({ + trigger: "submit-message", + chatId: "session-a", + messageId: undefined, + messages: [createUserMessage("Hello A")], + abortSignal: undefined, + }); + + await transport.sendMessages({ + trigger: "submit-message", + chatId: "session-b", + messageId: undefined, + messages: [createUserMessage("Hello B")], + abortSignal: undefined, + }); + + // Both sessions should be independently reconnectable + const streamA = await transport.reconnectToStream({ chatId: "session-a" }); + const streamB = await transport.reconnectToStream({ chatId: "session-b" }); + const streamC = await transport.reconnectToStream({ chatId: "nonexistent" }); + + expect(streamA).toBeInstanceOf(ReadableStream); + expect(streamB).toBeInstanceOf(ReadableStream); + expect(streamC).toBeNull(); + }); + }); + + describe("dynamic accessToken", () => { + it("should call the accessToken function for each sendMessages call", async () => { + let tokenCallCount = 0; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ id: `run_dyn_${tokenCallCount}` }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "stream-token", + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + const chunks: UIMessageChunk[] = [ + { type: "text-start", id: "p1" }, + { type: "text-end", id: "p1" }, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: () => { + tokenCallCount++; + return `dynamic-token-${tokenCallCount}`; + }, + baseURL: "https://api.test.trigger.dev", + }); + + // First call — the token function should be invoked + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-dyn-1", + messageId: undefined, + messages: [createUserMessage("first")], + abortSignal: undefined, + }); + + const firstCount = tokenCallCount; + expect(firstCount).toBeGreaterThanOrEqual(1); + + // Second call — the token function should be invoked again + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-dyn-2", + messageId: undefined, + messages: [createUserMessage("second")], + abortSignal: undefined, + }); + + // Token function was called at least once more + expect(tokenCallCount).toBeGreaterThan(firstCount); + }); + }); + + describe("body merging", () => { + it("should merge ChatRequestOptions.body into the task payload", async () => { + const fetchSpy = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ id: "run_body" }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "token", + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + return new Response(createSSEStream(""), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + global.fetch = fetchSpy; + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-body", + messageId: undefined, + messages: [createUserMessage("test")], + abortSignal: undefined, + body: { systemPrompt: "You are helpful", temperature: 0.7 }, + }); + + const triggerCall = fetchSpy.mock.calls.find((call: any[]) => + (typeof call[0] === "string" ? call[0] : call[0].toString()).includes("/trigger") + ); + + const triggerBody = JSON.parse(triggerCall![1]?.body as string); + const payload = JSON.parse(triggerBody.payload); + + // body properties should be merged into the payload + expect(payload.systemPrompt).toBe("You are helpful"); + expect(payload.temperature).toBe(0.7); + // Standard fields should still be present + expect(payload.chatId).toBe("chat-body"); + expect(payload.trigger).toBe("submit-message"); + }); + }); + + describe("message types", () => { + it("should handle regenerate-message trigger", async () => { + const fetchSpy = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ id: "run_regen" }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "token", + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + return new Response(createSSEStream(""), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + global.fetch = fetchSpy; + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + const messages: UIMessage[] = [ + createUserMessage("Hello!"), + createAssistantMessage("Hi there!"), + ]; + + await transport.sendMessages({ + trigger: "regenerate-message", + chatId: "chat-regen", + messageId: "msg-to-regen", + messages, + abortSignal: undefined, + }); + + // Verify the payload includes the regenerate trigger type and messageId + const triggerCall = fetchSpy.mock.calls.find((call: any[]) => + (typeof call[0] === "string" ? call[0] : call[0].toString()).includes("/trigger") + ); + + const triggerBody = JSON.parse(triggerCall![1]?.body as string); + const payload = JSON.parse(triggerBody.payload); + expect(payload.trigger).toBe("regenerate-message"); + expect(payload.messageId).toBe("msg-to-regen"); + }); + }); +}); From d35d5930e42ab22c521717f53271be112483019a Mon Sep 17 00:00:00 2001 From: Cursor Agent Date: Sun, 15 Feb 2026 12:56:17 +0000 Subject: [PATCH 11/53] =?UTF-8?q?refactor:=20delete=20packages/ai/=20?= =?UTF-8?q?=E2=80=94=20moved=20to=20@trigger.dev/sdk=20subpaths?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All functionality now lives in: - @trigger.dev/sdk/chat (frontend transport) - @trigger.dev/sdk/ai (backend chatTask, pipeChat) Co-authored-by: Eric Allam --- packages/ai/package.json | 74 --- packages/ai/src/chatTask.ts | 132 ----- packages/ai/src/index.ts | 3 - packages/ai/src/pipeChat.ts | 137 ----- packages/ai/src/transport.test.ts | 842 ------------------------------ packages/ai/src/transport.ts | 256 --------- packages/ai/src/types.ts | 117 ----- packages/ai/src/version.ts | 1 - packages/ai/tsconfig.json | 10 - packages/ai/vitest.config.ts | 8 - pnpm-lock.yaml | 169 +----- 11 files changed, 8 insertions(+), 1741 deletions(-) delete mode 100644 packages/ai/package.json delete mode 100644 packages/ai/src/chatTask.ts delete mode 100644 packages/ai/src/index.ts delete mode 100644 packages/ai/src/pipeChat.ts delete mode 100644 packages/ai/src/transport.test.ts delete mode 100644 packages/ai/src/transport.ts delete mode 100644 packages/ai/src/types.ts delete mode 100644 packages/ai/src/version.ts delete mode 100644 packages/ai/tsconfig.json delete mode 100644 packages/ai/vitest.config.ts diff --git a/packages/ai/package.json b/packages/ai/package.json deleted file mode 100644 index c6cee5d728b..00000000000 --- a/packages/ai/package.json +++ /dev/null @@ -1,74 +0,0 @@ -{ - "name": "@trigger.dev/ai", - "version": "4.3.3", - "description": "AI SDK integration for Trigger.dev - Custom ChatTransport for running AI chat as durable tasks", - "license": "MIT", - "publishConfig": { - "access": "public" - }, - "repository": { - "type": "git", - "url": "https://github.com/triggerdotdev/trigger.dev", - "directory": "packages/ai" - }, - "type": "module", - "files": [ - "dist" - ], - "tshy": { - "selfLink": false, - "main": true, - "module": true, - "project": "./tsconfig.json", - "exports": { - "./package.json": "./package.json", - ".": "./src/index.ts" - }, - "sourceDialects": [ - "@triggerdotdev/source" - ] - }, - "scripts": { - "clean": "rimraf dist .tshy .tshy-build .turbo", - "build": "tshy && pnpm run update-version", - "dev": "tshy --watch", - "typecheck": "tsc --noEmit", - "test": "vitest", - "update-version": "tsx ../../scripts/updateVersion.ts", - "check-exports": "attw --pack ." - }, - "dependencies": { - "@trigger.dev/core": "workspace:4.3.3" - }, - "peerDependencies": { - "ai": "^5.0.0 || ^6.0.0" - }, - "devDependencies": { - "@arethetypeswrong/cli": "^0.15.4", - "ai": "^6.0.0", - "rimraf": "^3.0.2", - "tshy": "^3.0.2", - "tsx": "4.17.0", - "vitest": "^2.1.0" - }, - "engines": { - "node": ">=18.20.0" - }, - "exports": { - "./package.json": "./package.json", - ".": { - "import": { - "@triggerdotdev/source": "./src/index.ts", - "types": "./dist/esm/index.d.ts", - "default": "./dist/esm/index.js" - }, - "require": { - "types": "./dist/commonjs/index.d.ts", - "default": "./dist/commonjs/index.js" - } - } - }, - "main": "./dist/commonjs/index.js", - "types": "./dist/commonjs/index.d.ts", - "module": "./dist/esm/index.js" -} diff --git a/packages/ai/src/chatTask.ts b/packages/ai/src/chatTask.ts deleted file mode 100644 index 7f3eb92616c..00000000000 --- a/packages/ai/src/chatTask.ts +++ /dev/null @@ -1,132 +0,0 @@ -import { task as createTask } from "@trigger.dev/sdk"; -import type { Task } from "@trigger.dev/core/v3"; -import type { ChatTaskPayload } from "./types.js"; -import { pipeChat } from "./pipeChat.js"; - -/** - * Options for defining a chat task. - * - * This is a simplified version of the standard task options with the payload - * pre-typed as `ChatTaskPayload`. - */ -export type ChatTaskOptions = { - /** Unique identifier for the task */ - id: TIdentifier; - - /** Optional description of the task */ - description?: string; - - /** Retry configuration */ - retry?: { - maxAttempts?: number; - factor?: number; - minTimeoutInMs?: number; - maxTimeoutInMs?: number; - randomize?: boolean; - }; - - /** Queue configuration */ - queue?: { - name?: string; - concurrencyLimit?: number; - }; - - /** Machine preset for the task */ - machine?: { - preset?: string; - }; - - /** Maximum duration in seconds */ - maxDuration?: number; - - /** - * The main run function for the chat task. - * - * Receives a `ChatTaskPayload` with the conversation messages, chat session ID, - * and trigger type. - * - * **Auto-piping:** If this function returns a value that has a `.toUIMessageStream()` method - * (like a `StreamTextResult` from `streamText()`), the stream will automatically be piped - * to the frontend via the chat realtime stream. If you need to pipe from deeper in your - * code, use `pipeChat()` instead and don't return the result. - */ - run: (payload: ChatTaskPayload) => Promise; -}; - -/** - * An object that has a `toUIMessageStream()` method, like the result of `streamText()`. - */ -type UIMessageStreamable = { - toUIMessageStream: (...args: any[]) => AsyncIterable | ReadableStream; -}; - -function isUIMessageStreamable(value: unknown): value is UIMessageStreamable { - return ( - typeof value === "object" && - value !== null && - "toUIMessageStream" in value && - typeof (value as any).toUIMessageStream === "function" - ); -} - -/** - * Creates a Trigger.dev task pre-configured for AI SDK chat. - * - * This is a convenience wrapper around `task()` from `@trigger.dev/sdk` that: - * - **Pre-types the payload** as `ChatTaskPayload` — no manual typing needed - * - **Auto-pipes the stream** if the `run` function returns a `StreamTextResult` - * - * Requires `@trigger.dev/sdk` to be installed (it's a peer dependency). - * - * @example - * ```ts - * import { chatTask } from "@trigger.dev/ai"; - * import { streamText, convertToModelMessages } from "ai"; - * import { openai } from "@ai-sdk/openai"; - * - * // Simple: return streamText result — auto-piped to the frontend - * export const myChatTask = chatTask({ - * id: "my-chat-task", - * run: async ({ messages }) => { - * return streamText({ - * model: openai("gpt-4o"), - * messages: convertToModelMessages(messages), - * }); - * }, - * }); - * ``` - * - * @example - * ```ts - * import { chatTask, pipeChat } from "@trigger.dev/ai"; - * - * // Complex: use pipeChat() from deep inside your agent code - * export const myAgentTask = chatTask({ - * id: "my-agent-task", - * run: async ({ messages }) => { - * await runComplexAgentLoop(messages); - * // pipeChat() called internally by the agent loop - * }, - * }); - * ``` - */ -export function chatTask( - options: ChatTaskOptions -): Task { - const { run: userRun, ...restOptions } = options; - - return createTask({ - ...restOptions, - run: async (payload: ChatTaskPayload) => { - const result = await userRun(payload); - - // If the run function returned a StreamTextResult or similar, - // automatically pipe it to the chat stream - if (isUIMessageStreamable(result)) { - await pipeChat(result); - } - - return result; - }, - }); -} diff --git a/packages/ai/src/index.ts b/packages/ai/src/index.ts deleted file mode 100644 index 7e673894ff6..00000000000 --- a/packages/ai/src/index.ts +++ /dev/null @@ -1,3 +0,0 @@ -export { TriggerChatTransport, createChatTransport } from "./transport.js"; -export type { TriggerChatTransportOptions, ChatTaskPayload } from "./types.js"; -export { VERSION } from "./version.js"; diff --git a/packages/ai/src/pipeChat.ts b/packages/ai/src/pipeChat.ts deleted file mode 100644 index 885951c59c2..00000000000 --- a/packages/ai/src/pipeChat.ts +++ /dev/null @@ -1,137 +0,0 @@ -import { realtimeStreams } from "@trigger.dev/core/v3"; - -/** - * The default stream key used for chat transport communication. - * - * Both `TriggerChatTransport` (frontend) and `pipeChat` (backend) use this key - * by default to ensure they communicate over the same stream. - */ -export const CHAT_STREAM_KEY = "chat"; - -/** - * Options for `pipeChat`. - */ -export type PipeChatOptions = { - /** - * Override the stream key to pipe to. - * Must match the `streamKey` option on `TriggerChatTransport`. - * - * @default "chat" - */ - streamKey?: string; - - /** - * An AbortSignal to cancel the stream. - */ - signal?: AbortSignal; - - /** - * The target run ID to pipe the stream to. - * @default "self" (current run) - */ - target?: string; -}; - -/** - * An object that has a `toUIMessageStream()` method, like the result of `streamText()` from the AI SDK. - */ -type UIMessageStreamable = { - toUIMessageStream: (...args: any[]) => AsyncIterable | ReadableStream; -}; - -function isUIMessageStreamable(value: unknown): value is UIMessageStreamable { - return ( - typeof value === "object" && - value !== null && - "toUIMessageStream" in value && - typeof (value as any).toUIMessageStream === "function" - ); -} - -function isAsyncIterable(value: unknown): value is AsyncIterable { - return ( - typeof value === "object" && - value !== null && - Symbol.asyncIterator in value - ); -} - -function isReadableStream(value: unknown): value is ReadableStream { - return ( - typeof value === "object" && - value !== null && - typeof (value as any).getReader === "function" - ); -} - -/** - * Pipes a chat stream to the realtime stream, making it available to the - * `TriggerChatTransport` on the frontend. - * - * Accepts any of: - * - A `StreamTextResult` from the AI SDK (has `.toUIMessageStream()`) - * - An `AsyncIterable` of `UIMessageChunk`s - * - A `ReadableStream` of `UIMessageChunk`s - * - * This must be called from inside a Trigger.dev task's `run` function. - * - * @example - * ```ts - * import { task } from "@trigger.dev/sdk"; - * import { pipeChat, type ChatTaskPayload } from "@trigger.dev/ai"; - * import { streamText, convertToModelMessages } from "ai"; - * - * export const myChatTask = task({ - * id: "my-chat-task", - * run: async (payload: ChatTaskPayload) => { - * const result = streamText({ - * model: openai("gpt-4o"), - * messages: convertToModelMessages(payload.messages), - * }); - * - * await pipeChat(result); - * }, - * }); - * ``` - * - * @example - * ```ts - * // Deep inside your agent library — pipeChat works from anywhere inside a task - * async function runAgentLoop(messages: CoreMessage[]) { - * const result = streamText({ model, messages }); - * await pipeChat(result); - * } - * ``` - * - * @param source - A StreamTextResult, AsyncIterable, or ReadableStream of UIMessageChunks - * @param options - Optional configuration - * @returns A promise that resolves when the stream has been fully piped - */ -export async function pipeChat( - source: UIMessageStreamable | AsyncIterable | ReadableStream, - options?: PipeChatOptions -): Promise { - const streamKey = options?.streamKey ?? CHAT_STREAM_KEY; - - // Resolve the source to an AsyncIterable or ReadableStream - let stream: AsyncIterable | ReadableStream; - - if (isUIMessageStreamable(source)) { - stream = source.toUIMessageStream(); - } else if (isAsyncIterable(source) || isReadableStream(source)) { - stream = source; - } else { - throw new Error( - "pipeChat: source must be a StreamTextResult (with .toUIMessageStream()), " + - "an AsyncIterable, or a ReadableStream" - ); - } - - // Pipe to the realtime stream - const instance = realtimeStreams.pipe(streamKey, stream, { - signal: options?.signal, - target: options?.target, - }); - - await instance.wait(); -} diff --git a/packages/ai/src/transport.test.ts b/packages/ai/src/transport.test.ts deleted file mode 100644 index 53d3ab86861..00000000000 --- a/packages/ai/src/transport.test.ts +++ /dev/null @@ -1,842 +0,0 @@ -import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; -import type { UIMessage, UIMessageChunk } from "ai"; -import { TriggerChatTransport, createChatTransport } from "./transport.js"; - -// Helper: encode text as SSE format -function sseEncode(chunks: UIMessageChunk[]): string { - return chunks.map((chunk, i) => `id: ${i}\ndata: ${JSON.stringify(chunk)}\n\n`).join(""); -} - -// Helper: create a ReadableStream from SSE text -function createSSEStream(sseText: string): ReadableStream { - const encoder = new TextEncoder(); - return new ReadableStream({ - start(controller) { - controller.enqueue(encoder.encode(sseText)); - controller.close(); - }, - }); -} - -// Helper: create test UIMessages -function createUserMessage(text: string): UIMessage { - return { - id: `msg-${Date.now()}`, - role: "user", - parts: [{ type: "text", text }], - }; -} - -function createAssistantMessage(text: string): UIMessage { - return { - id: `msg-${Date.now()}`, - role: "assistant", - parts: [{ type: "text", text }], - }; -} - -// Sample UIMessageChunks as the AI SDK would produce -const sampleChunks: UIMessageChunk[] = [ - { type: "text-start", id: "part-1" }, - { type: "text-delta", id: "part-1", delta: "Hello" }, - { type: "text-delta", id: "part-1", delta: " world" }, - { type: "text-delta", id: "part-1", delta: "!" }, - { type: "text-end", id: "part-1" }, -]; - -describe("TriggerChatTransport", () => { - let originalFetch: typeof global.fetch; - - beforeEach(() => { - originalFetch = global.fetch; - }); - - afterEach(() => { - global.fetch = originalFetch; - vi.restoreAllMocks(); - }); - - describe("constructor", () => { - it("should create transport with required options", () => { - const transport = new TriggerChatTransport({ - taskId: "my-chat-task", - accessToken: "test-token", - }); - - expect(transport).toBeInstanceOf(TriggerChatTransport); - }); - - it("should accept optional configuration", () => { - const transport = new TriggerChatTransport({ - taskId: "my-chat-task", - accessToken: "test-token", - baseURL: "https://custom.trigger.dev", - streamKey: "custom-stream", - headers: { "X-Custom": "value" }, - }); - - expect(transport).toBeInstanceOf(TriggerChatTransport); - }); - - it("should accept a function for accessToken", () => { - let tokenCallCount = 0; - const transport = new TriggerChatTransport({ - taskId: "my-chat-task", - accessToken: () => { - tokenCallCount++; - return `dynamic-token-${tokenCallCount}`; - }, - }); - - expect(transport).toBeInstanceOf(TriggerChatTransport); - }); - }); - - describe("sendMessages", () => { - it("should trigger the task and return a ReadableStream of UIMessageChunks", async () => { - const triggerRunId = "run_abc123"; - const publicToken = "pub_token_xyz"; - - // Mock fetch to handle both the trigger request and the SSE stream request - global.fetch = vi.fn().mockImplementation(async (url: string | URL, init?: RequestInit) => { - const urlStr = typeof url === "string" ? url : url.toString(); - - // Handle the task trigger request - if (urlStr.includes("/api/v1/tasks/") && urlStr.includes("/trigger")) { - return new Response( - JSON.stringify({ id: triggerRunId }), - { - status: 200, - headers: { - "content-type": "application/json", - "x-trigger-jwt": publicToken, - }, - } - ); - } - - // Handle the SSE stream request - if (urlStr.includes("/realtime/v1/streams/")) { - const sseText = sseEncode(sampleChunks); - return new Response(createSSEStream(sseText), { - status: 200, - headers: { - "content-type": "text/event-stream", - "X-Stream-Version": "v1", - }, - }); - } - - throw new Error(`Unexpected fetch URL: ${urlStr}`); - }); - - const transport = new TriggerChatTransport({ - taskId: "my-chat-task", - accessToken: "test-token", - baseURL: "https://api.test.trigger.dev", - }); - - const messages: UIMessage[] = [createUserMessage("Hello!")]; - - const stream = await transport.sendMessages({ - trigger: "submit-message", - chatId: "chat-1", - messageId: undefined, - messages, - abortSignal: undefined, - }); - - expect(stream).toBeInstanceOf(ReadableStream); - - // Read all chunks from the stream - const reader = stream.getReader(); - const receivedChunks: UIMessageChunk[] = []; - - while (true) { - const { done, value } = await reader.read(); - if (done) break; - receivedChunks.push(value); - } - - expect(receivedChunks).toHaveLength(sampleChunks.length); - expect(receivedChunks[0]).toEqual({ type: "text-start", id: "part-1" }); - expect(receivedChunks[1]).toEqual({ type: "text-delta", id: "part-1", delta: "Hello" }); - expect(receivedChunks[4]).toEqual({ type: "text-end", id: "part-1" }); - }); - - it("should send the correct payload to the trigger API", async () => { - const fetchSpy = vi.fn().mockImplementation(async (url: string | URL, init?: RequestInit) => { - const urlStr = typeof url === "string" ? url : url.toString(); - - if (urlStr.includes("/api/v1/tasks/") && urlStr.includes("/trigger")) { - return new Response( - JSON.stringify({ id: "run_test" }), - { - status: 200, - headers: { - "content-type": "application/json", - "x-trigger-jwt": "pub_token", - }, - } - ); - } - - if (urlStr.includes("/realtime/v1/streams/")) { - return new Response(createSSEStream(""), { - status: 200, - headers: { - "content-type": "text/event-stream", - "X-Stream-Version": "v1", - }, - }); - } - - throw new Error(`Unexpected fetch URL: ${urlStr}`); - }); - - global.fetch = fetchSpy; - - const transport = new TriggerChatTransport({ - taskId: "my-chat-task", - accessToken: "test-token", - baseURL: "https://api.test.trigger.dev", - }); - - const messages: UIMessage[] = [createUserMessage("Hello!")]; - - await transport.sendMessages({ - trigger: "submit-message", - chatId: "chat-123", - messageId: undefined, - messages, - abortSignal: undefined, - metadata: { custom: "data" }, - }); - - // Verify the trigger fetch call - const triggerCall = fetchSpy.mock.calls.find((call: any[]) => - (typeof call[0] === "string" ? call[0] : call[0].toString()).includes("/trigger") - ); - - expect(triggerCall).toBeDefined(); - const triggerUrl = typeof triggerCall![0] === "string" ? triggerCall![0] : triggerCall![0].toString(); - expect(triggerUrl).toContain("/api/v1/tasks/my-chat-task/trigger"); - - const triggerBody = JSON.parse(triggerCall![1]?.body as string); - const payload = JSON.parse(triggerBody.payload); - expect(payload.messages).toEqual(messages); - expect(payload.chatId).toBe("chat-123"); - expect(payload.trigger).toBe("submit-message"); - expect(payload.metadata).toEqual({ custom: "data" }); - }); - - it("should use the correct stream URL with custom streamKey", async () => { - const fetchSpy = vi.fn().mockImplementation(async (url: string | URL) => { - const urlStr = typeof url === "string" ? url : url.toString(); - - if (urlStr.includes("/trigger")) { - return new Response( - JSON.stringify({ id: "run_custom" }), - { - status: 200, - headers: { - "content-type": "application/json", - "x-trigger-jwt": "token", - }, - } - ); - } - - if (urlStr.includes("/realtime/v1/streams/")) { - return new Response(createSSEStream(""), { - status: 200, - headers: { - "content-type": "text/event-stream", - "X-Stream-Version": "v1", - }, - }); - } - - throw new Error(`Unexpected fetch URL: ${urlStr}`); - }); - - global.fetch = fetchSpy; - - const transport = new TriggerChatTransport({ - taskId: "my-task", - accessToken: "token", - baseURL: "https://api.test.trigger.dev", - streamKey: "my-custom-stream", - }); - - await transport.sendMessages({ - trigger: "submit-message", - chatId: "chat-1", - messageId: undefined, - messages: [createUserMessage("test")], - abortSignal: undefined, - }); - - // Verify the stream URL uses the custom stream key - const streamCall = fetchSpy.mock.calls.find((call: any[]) => - (typeof call[0] === "string" ? call[0] : call[0].toString()).includes("/realtime/v1/streams/") - ); - - expect(streamCall).toBeDefined(); - const streamUrl = typeof streamCall![0] === "string" ? streamCall![0] : streamCall![0].toString(); - expect(streamUrl).toContain("/realtime/v1/streams/run_custom/my-custom-stream"); - }); - - it("should include extra headers in stream requests", async () => { - const fetchSpy = vi.fn().mockImplementation(async (url: string | URL) => { - const urlStr = typeof url === "string" ? url : url.toString(); - - if (urlStr.includes("/trigger")) { - return new Response( - JSON.stringify({ id: "run_hdrs" }), - { - status: 200, - headers: { - "content-type": "application/json", - "x-trigger-jwt": "token", - }, - } - ); - } - - if (urlStr.includes("/realtime/v1/streams/")) { - return new Response(createSSEStream(""), { - status: 200, - headers: { - "content-type": "text/event-stream", - "X-Stream-Version": "v1", - }, - }); - } - - throw new Error(`Unexpected fetch URL: ${urlStr}`); - }); - - global.fetch = fetchSpy; - - const transport = new TriggerChatTransport({ - taskId: "my-task", - accessToken: "token", - baseURL: "https://api.test.trigger.dev", - headers: { "X-Custom-Header": "custom-value" }, - }); - - await transport.sendMessages({ - trigger: "submit-message", - chatId: "chat-1", - messageId: undefined, - messages: [createUserMessage("test")], - abortSignal: undefined, - }); - - // Verify the stream request includes custom headers - const streamCall = fetchSpy.mock.calls.find((call: any[]) => - (typeof call[0] === "string" ? call[0] : call[0].toString()).includes("/realtime/v1/streams/") - ); - - expect(streamCall).toBeDefined(); - const requestHeaders = streamCall![1]?.headers as Record; - expect(requestHeaders["X-Custom-Header"]).toBe("custom-value"); - }); - }); - - describe("reconnectToStream", () => { - it("should return null when no session exists for chatId", async () => { - const transport = new TriggerChatTransport({ - taskId: "my-task", - accessToken: "token", - }); - - const result = await transport.reconnectToStream({ - chatId: "nonexistent-chat", - }); - - expect(result).toBeNull(); - }); - - it("should reconnect to an existing session", async () => { - const triggerRunId = "run_reconnect"; - const publicToken = "pub_reconnect_token"; - - global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { - const urlStr = typeof url === "string" ? url : url.toString(); - - if (urlStr.includes("/trigger")) { - return new Response( - JSON.stringify({ id: triggerRunId }), - { - status: 200, - headers: { - "content-type": "application/json", - "x-trigger-jwt": publicToken, - }, - } - ); - } - - if (urlStr.includes("/realtime/v1/streams/")) { - const chunks: UIMessageChunk[] = [ - { type: "text-start", id: "part-1" }, - { type: "text-delta", id: "part-1", delta: "Reconnected!" }, - { type: "text-end", id: "part-1" }, - ]; - return new Response(createSSEStream(sseEncode(chunks)), { - status: 200, - headers: { - "content-type": "text/event-stream", - "X-Stream-Version": "v1", - }, - }); - } - - throw new Error(`Unexpected fetch URL: ${urlStr}`); - }); - - const transport = new TriggerChatTransport({ - taskId: "my-task", - accessToken: "token", - baseURL: "https://api.test.trigger.dev", - }); - - // First, send messages to establish a session - await transport.sendMessages({ - trigger: "submit-message", - chatId: "chat-reconnect", - messageId: undefined, - messages: [createUserMessage("Hello")], - abortSignal: undefined, - }); - - // Now reconnect - const stream = await transport.reconnectToStream({ - chatId: "chat-reconnect", - }); - - expect(stream).toBeInstanceOf(ReadableStream); - - // Read the stream - const reader = stream!.getReader(); - const receivedChunks: UIMessageChunk[] = []; - - while (true) { - const { done, value } = await reader.read(); - if (done) break; - receivedChunks.push(value); - } - - expect(receivedChunks.length).toBeGreaterThan(0); - }); - }); - - describe("createChatTransport", () => { - it("should create a TriggerChatTransport instance", () => { - const transport = createChatTransport({ - taskId: "my-task", - accessToken: "token", - }); - - expect(transport).toBeInstanceOf(TriggerChatTransport); - }); - - it("should pass options through to the transport", () => { - const transport = createChatTransport({ - taskId: "custom-task", - accessToken: "custom-token", - baseURL: "https://custom.example.com", - streamKey: "custom-key", - headers: { "X-Test": "value" }, - }); - - expect(transport).toBeInstanceOf(TriggerChatTransport); - }); - }); - - describe("error handling", () => { - it("should propagate trigger API errors", async () => { - global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { - const urlStr = typeof url === "string" ? url : url.toString(); - - if (urlStr.includes("/trigger")) { - return new Response( - JSON.stringify({ error: "Task not found" }), - { - status: 404, - headers: { "content-type": "application/json" }, - } - ); - } - - throw new Error(`Unexpected fetch URL: ${urlStr}`); - }); - - const transport = new TriggerChatTransport({ - taskId: "nonexistent-task", - accessToken: "token", - baseURL: "https://api.test.trigger.dev", - }); - - await expect( - transport.sendMessages({ - trigger: "submit-message", - chatId: "chat-error", - messageId: undefined, - messages: [createUserMessage("test")], - abortSignal: undefined, - }) - ).rejects.toThrow(); - }); - }); - - describe("abort signal", () => { - it("should close the stream gracefully when aborted", async () => { - let streamResolve: (() => void) | undefined; - const streamWait = new Promise((resolve) => { - streamResolve = resolve; - }); - - global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { - const urlStr = typeof url === "string" ? url : url.toString(); - - if (urlStr.includes("/trigger")) { - return new Response( - JSON.stringify({ id: "run_abort" }), - { - status: 200, - headers: { - "content-type": "application/json", - "x-trigger-jwt": "token", - }, - } - ); - } - - if (urlStr.includes("/realtime/v1/streams/")) { - // Create a slow stream that waits before sending data - const stream = new ReadableStream({ - async start(controller) { - const encoder = new TextEncoder(); - controller.enqueue( - encoder.encode(`id: 0\ndata: ${JSON.stringify({ type: "text-start", id: "p1" })}\n\n`) - ); - // Wait for the test to signal it's done - await streamWait; - controller.close(); - }, - }); - - return new Response(stream, { - status: 200, - headers: { - "content-type": "text/event-stream", - "X-Stream-Version": "v1", - }, - }); - } - - throw new Error(`Unexpected fetch URL: ${urlStr}`); - }); - - const abortController = new AbortController(); - - const transport = new TriggerChatTransport({ - taskId: "my-task", - accessToken: "token", - baseURL: "https://api.test.trigger.dev", - }); - - const stream = await transport.sendMessages({ - trigger: "submit-message", - chatId: "chat-abort", - messageId: undefined, - messages: [createUserMessage("test")], - abortSignal: abortController.signal, - }); - - // Read the first chunk - const reader = stream.getReader(); - const first = await reader.read(); - expect(first.done).toBe(false); - - // Abort and clean up - abortController.abort(); - streamResolve?.(); - - // The stream should close — reading should return done - const next = await reader.read(); - expect(next.done).toBe(true); - }); - }); - - describe("multiple sessions", () => { - it("should track multiple chat sessions independently", async () => { - let callCount = 0; - - global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { - const urlStr = typeof url === "string" ? url : url.toString(); - - if (urlStr.includes("/trigger")) { - callCount++; - return new Response( - JSON.stringify({ id: `run_multi_${callCount}` }), - { - status: 200, - headers: { - "content-type": "application/json", - "x-trigger-jwt": `token_${callCount}`, - }, - } - ); - } - - if (urlStr.includes("/realtime/v1/streams/")) { - return new Response(createSSEStream(""), { - status: 200, - headers: { - "content-type": "text/event-stream", - "X-Stream-Version": "v1", - }, - }); - } - - throw new Error(`Unexpected fetch URL: ${urlStr}`); - }); - - const transport = new TriggerChatTransport({ - taskId: "my-task", - accessToken: "token", - baseURL: "https://api.test.trigger.dev", - }); - - // Start two independent chat sessions - await transport.sendMessages({ - trigger: "submit-message", - chatId: "session-a", - messageId: undefined, - messages: [createUserMessage("Hello A")], - abortSignal: undefined, - }); - - await transport.sendMessages({ - trigger: "submit-message", - chatId: "session-b", - messageId: undefined, - messages: [createUserMessage("Hello B")], - abortSignal: undefined, - }); - - // Both sessions should be independently reconnectable - const streamA = await transport.reconnectToStream({ chatId: "session-a" }); - const streamB = await transport.reconnectToStream({ chatId: "session-b" }); - const streamC = await transport.reconnectToStream({ chatId: "nonexistent" }); - - expect(streamA).toBeInstanceOf(ReadableStream); - expect(streamB).toBeInstanceOf(ReadableStream); - expect(streamC).toBeNull(); - }); - }); - - describe("dynamic accessToken", () => { - it("should call the accessToken function for each sendMessages call", async () => { - let tokenCallCount = 0; - - global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { - const urlStr = typeof url === "string" ? url : url.toString(); - - if (urlStr.includes("/trigger")) { - return new Response( - JSON.stringify({ id: `run_dyn_${tokenCallCount}` }), - { - status: 200, - headers: { - "content-type": "application/json", - "x-trigger-jwt": "stream-token", - }, - } - ); - } - - if (urlStr.includes("/realtime/v1/streams/")) { - const chunks: UIMessageChunk[] = [ - { type: "text-start", id: "p1" }, - { type: "text-end", id: "p1" }, - ]; - return new Response(createSSEStream(sseEncode(chunks)), { - status: 200, - headers: { - "content-type": "text/event-stream", - "X-Stream-Version": "v1", - }, - }); - } - - throw new Error(`Unexpected fetch URL: ${urlStr}`); - }); - - const transport = new TriggerChatTransport({ - taskId: "my-task", - accessToken: () => { - tokenCallCount++; - return `dynamic-token-${tokenCallCount}`; - }, - baseURL: "https://api.test.trigger.dev", - }); - - // First call — the token function should be invoked - await transport.sendMessages({ - trigger: "submit-message", - chatId: "chat-dyn-1", - messageId: undefined, - messages: [createUserMessage("first")], - abortSignal: undefined, - }); - - const firstCount = tokenCallCount; - expect(firstCount).toBeGreaterThanOrEqual(1); - - // Second call — the token function should be invoked again - await transport.sendMessages({ - trigger: "submit-message", - chatId: "chat-dyn-2", - messageId: undefined, - messages: [createUserMessage("second")], - abortSignal: undefined, - }); - - // Token function was called at least once more - expect(tokenCallCount).toBeGreaterThan(firstCount); - }); - }); - - describe("body merging", () => { - it("should merge ChatRequestOptions.body into the task payload", async () => { - const fetchSpy = vi.fn().mockImplementation(async (url: string | URL) => { - const urlStr = typeof url === "string" ? url : url.toString(); - - if (urlStr.includes("/trigger")) { - return new Response( - JSON.stringify({ id: "run_body" }), - { - status: 200, - headers: { - "content-type": "application/json", - "x-trigger-jwt": "token", - }, - } - ); - } - - if (urlStr.includes("/realtime/v1/streams/")) { - return new Response(createSSEStream(""), { - status: 200, - headers: { - "content-type": "text/event-stream", - "X-Stream-Version": "v1", - }, - }); - } - - throw new Error(`Unexpected fetch URL: ${urlStr}`); - }); - - global.fetch = fetchSpy; - - const transport = new TriggerChatTransport({ - taskId: "my-task", - accessToken: "token", - baseURL: "https://api.test.trigger.dev", - }); - - await transport.sendMessages({ - trigger: "submit-message", - chatId: "chat-body", - messageId: undefined, - messages: [createUserMessage("test")], - abortSignal: undefined, - body: { systemPrompt: "You are helpful", temperature: 0.7 }, - }); - - const triggerCall = fetchSpy.mock.calls.find((call: any[]) => - (typeof call[0] === "string" ? call[0] : call[0].toString()).includes("/trigger") - ); - - const triggerBody = JSON.parse(triggerCall![1]?.body as string); - const payload = JSON.parse(triggerBody.payload); - - // body properties should be merged into the payload - expect(payload.systemPrompt).toBe("You are helpful"); - expect(payload.temperature).toBe(0.7); - // Standard fields should still be present - expect(payload.chatId).toBe("chat-body"); - expect(payload.trigger).toBe("submit-message"); - }); - }); - - describe("message types", () => { - it("should handle regenerate-message trigger", async () => { - const fetchSpy = vi.fn().mockImplementation(async (url: string | URL) => { - const urlStr = typeof url === "string" ? url : url.toString(); - - if (urlStr.includes("/trigger")) { - return new Response( - JSON.stringify({ id: "run_regen" }), - { - status: 200, - headers: { - "content-type": "application/json", - "x-trigger-jwt": "token", - }, - } - ); - } - - if (urlStr.includes("/realtime/v1/streams/")) { - return new Response(createSSEStream(""), { - status: 200, - headers: { - "content-type": "text/event-stream", - "X-Stream-Version": "v1", - }, - }); - } - - throw new Error(`Unexpected fetch URL: ${urlStr}`); - }); - - global.fetch = fetchSpy; - - const transport = new TriggerChatTransport({ - taskId: "my-task", - accessToken: "token", - baseURL: "https://api.test.trigger.dev", - }); - - const messages: UIMessage[] = [ - createUserMessage("Hello!"), - createAssistantMessage("Hi there!"), - ]; - - await transport.sendMessages({ - trigger: "regenerate-message", - chatId: "chat-regen", - messageId: "msg-to-regen", - messages, - abortSignal: undefined, - }); - - // Verify the payload includes the regenerate trigger type and messageId - const triggerCall = fetchSpy.mock.calls.find((call: any[]) => - (typeof call[0] === "string" ? call[0] : call[0].toString()).includes("/trigger") - ); - - const triggerBody = JSON.parse(triggerCall![1]?.body as string); - const payload = JSON.parse(triggerBody.payload); - expect(payload.trigger).toBe("regenerate-message"); - expect(payload.messageId).toBe("msg-to-regen"); - }); - }); -}); diff --git a/packages/ai/src/transport.ts b/packages/ai/src/transport.ts deleted file mode 100644 index ff4b2c47a33..00000000000 --- a/packages/ai/src/transport.ts +++ /dev/null @@ -1,256 +0,0 @@ -import type { ChatTransport, UIMessage, UIMessageChunk, ChatRequestOptions } from "ai"; -import { - ApiClient, - SSEStreamSubscription, - type SSEStreamPart, -} from "@trigger.dev/core/v3"; -import type { TriggerChatTransportOptions, ChatSessionState } from "./types.js"; - -const DEFAULT_STREAM_KEY = "chat"; -const DEFAULT_BASE_URL = "https://api.trigger.dev"; -const DEFAULT_STREAM_TIMEOUT_SECONDS = 120; - -/** - * A custom AI SDK `ChatTransport` implementation that bridges the Vercel AI SDK's - * `useChat` hook with Trigger.dev's durable task execution and realtime streams. - * - * When `sendMessages` is called, the transport: - * 1. Triggers a Trigger.dev task with the chat messages as payload - * 2. Subscribes to the task's realtime stream to receive `UIMessageChunk` data - * 3. Returns a `ReadableStream` that the AI SDK processes natively - * - * The task receives a `ChatTaskPayload` containing the conversation messages, - * chat session ID, trigger type, and any custom metadata. Your task should use - * the AI SDK's `streamText` (or similar) to generate a response, then pipe - * the resulting `UIMessageStream` to the `"chat"` realtime stream key - * (or a custom key matching the `streamKey` option). - * - * @example - * ```tsx - * // Frontend — use with AI SDK's useChat hook - * import { useChat } from "@ai-sdk/react"; - * import { TriggerChatTransport } from "@trigger.dev/ai"; - * - * function Chat({ accessToken }: { accessToken: string }) { - * const { messages, sendMessage, status } = useChat({ - * transport: new TriggerChatTransport({ - * accessToken, - * taskId: "my-chat-task", - * }), - * }); - * - * // ... render messages - * } - * ``` - * - * @example - * ```ts - * // Backend — Trigger.dev task that handles chat - * import { task, streams } from "@trigger.dev/sdk"; - * import { streamText, convertToModelMessages } from "ai"; - * import type { ChatTaskPayload } from "@trigger.dev/ai"; - * - * export const myChatTask = task({ - * id: "my-chat-task", - * run: async (payload: ChatTaskPayload) => { - * const result = streamText({ - * model: openai("gpt-4o"), - * messages: convertToModelMessages(payload.messages), - * }); - * - * const { waitUntilComplete } = streams.pipe("chat", result.toUIMessageStream()); - * await waitUntilComplete(); - * }, - * }); - * ``` - */ -export class TriggerChatTransport implements ChatTransport { - private readonly taskId: string; - private readonly resolveAccessToken: () => string; - private readonly baseURL: string; - private readonly streamKey: string; - private readonly extraHeaders: Record; - private readonly streamTimeoutSeconds: number; - - /** - * Tracks active chat sessions for reconnection support. - * Maps chatId → session state (runId, publicAccessToken). - */ - private sessions: Map = new Map(); - - constructor(options: TriggerChatTransportOptions) { - this.taskId = options.taskId; - this.resolveAccessToken = - typeof options.accessToken === "function" - ? options.accessToken - : () => options.accessToken as string; - this.baseURL = options.baseURL ?? DEFAULT_BASE_URL; - this.streamKey = options.streamKey ?? DEFAULT_STREAM_KEY; - this.extraHeaders = options.headers ?? {}; - this.streamTimeoutSeconds = options.streamTimeoutSeconds ?? DEFAULT_STREAM_TIMEOUT_SECONDS; - } - - /** - * Sends messages to a Trigger.dev task and returns a streaming response. - * - * This method: - * 1. Triggers the configured task with the chat messages as payload - * 2. Subscribes to the task's realtime stream for UIMessageChunk events - * 3. Returns a ReadableStream that the AI SDK's useChat hook processes - */ - sendMessages = async ( - options: { - trigger: "submit-message" | "regenerate-message"; - chatId: string; - messageId: string | undefined; - messages: UIMessage[]; - abortSignal: AbortSignal | undefined; - } & ChatRequestOptions - ): Promise> => { - const { trigger, chatId, messageId, messages, abortSignal, body, metadata } = options; - - // Build the payload for the task — this becomes the ChatTaskPayload - const payload = { - messages, - chatId, - trigger, - messageId, - metadata, - ...(body ?? {}), - }; - - const currentToken = this.resolveAccessToken(); - - // Trigger the task — use the already-resolved token directly - const apiClient = new ApiClient(this.baseURL, currentToken); - const triggerResponse = await apiClient.triggerTask(this.taskId, { - payload: JSON.stringify(payload), - options: { - payloadType: "application/json", - }, - }); - - const runId = triggerResponse.id; - const publicAccessToken = - "publicAccessToken" in triggerResponse - ? (triggerResponse as { publicAccessToken?: string }).publicAccessToken - : undefined; - - // Store session state for reconnection - this.sessions.set(chatId, { - runId, - publicAccessToken: publicAccessToken ?? currentToken, - }); - - // Subscribe to the realtime stream for this run - return this.subscribeToStream(runId, publicAccessToken ?? currentToken, abortSignal); - }; - - /** - * Reconnects to an existing streaming response for the specified chat session. - * - * Returns a ReadableStream if an active session exists, or null if no session is found. - */ - reconnectToStream = async ( - options: { - chatId: string; - } & ChatRequestOptions - ): Promise | null> => { - const session = this.sessions.get(options.chatId); - if (!session) { - return null; - } - - return this.subscribeToStream(session.runId, session.publicAccessToken, undefined); - }; - - /** - * Creates a ReadableStream by subscribing to the realtime SSE stream - * for a given run. - */ - private subscribeToStream( - runId: string, - accessToken: string, - abortSignal: AbortSignal | undefined - ): ReadableStream { - const headers: Record = { - Authorization: `Bearer ${accessToken}`, - ...this.extraHeaders, - }; - - const subscription = new SSEStreamSubscription( - `${this.baseURL}/realtime/v1/streams/${runId}/${this.streamKey}`, - { - headers, - signal: abortSignal, - timeoutInSeconds: this.streamTimeoutSeconds, - } - ); - - return new ReadableStream({ - start: async (controller) => { - try { - const sseStream = await subscription.subscribe(); - const reader = sseStream.getReader(); - - try { - while (true) { - const { done, value } = await reader.read(); - - if (done) { - controller.close(); - return; - } - - if (abortSignal?.aborted) { - reader.cancel(); - reader.releaseLock(); - controller.close(); - return; - } - - // Each SSE part's chunk is a UIMessageChunk - controller.enqueue(value.chunk as UIMessageChunk); - } - } catch (readError) { - reader.releaseLock(); - throw readError; - } - } catch (error) { - // Don't error the stream for abort errors — just close gracefully - if (error instanceof Error && error.name === "AbortError") { - controller.close(); - return; - } - - controller.error(error); - } - }, - }); - } -} - -/** - * Creates a new `TriggerChatTransport` instance. - * - * This is a convenience factory function equivalent to `new TriggerChatTransport(options)`. - * - * @example - * ```tsx - * import { useChat } from "@ai-sdk/react"; - * import { createChatTransport } from "@trigger.dev/ai"; - * - * const transport = createChatTransport({ - * taskId: "my-chat-task", - * accessToken: publicAccessToken, - * }); - * - * function Chat() { - * const { messages, sendMessage } = useChat({ transport }); - * // ... - * } - * ``` - */ -export function createChatTransport(options: TriggerChatTransportOptions): TriggerChatTransport { - return new TriggerChatTransport(options); -} diff --git a/packages/ai/src/types.ts b/packages/ai/src/types.ts deleted file mode 100644 index 91ae9938888..00000000000 --- a/packages/ai/src/types.ts +++ /dev/null @@ -1,117 +0,0 @@ -import type { UIMessage } from "ai"; - -/** - * Options for creating a TriggerChatTransport. - */ -export type TriggerChatTransportOptions = { - /** - * The Trigger.dev task ID to trigger for chat completions. - * This task will receive the chat messages as its payload. - */ - task: string; - - /** - * An access token for authenticating with the Trigger.dev API. - * - * This must be a token with permission to trigger the task. You can use: - * - A **trigger public token** created via `auth.createTriggerPublicToken(taskId)` (recommended for frontend use) - * - A **secret API key** (for server-side use only — never expose in the browser) - * - * The token returned from triggering the task (`publicAccessToken`) is automatically - * used for subscribing to the realtime stream. - * - * Can also be a function that returns a token string, useful for dynamic token refresh: - * ```ts - * accessToken: () => getLatestToken() - * ``` - */ - accessToken: string | (() => string); - - /** - * Base URL for the Trigger.dev API. - * - * @default "https://api.trigger.dev" - */ - baseURL?: string; - - /** - * The stream key where the task pipes UIMessageChunk data. - * When using `chatTask()` or `pipeChat()`, this is handled automatically. - * Only set this if you're using a custom stream key. - * - * @default "chat" - */ - streamKey?: string; - - /** - * Additional headers to include in API requests to Trigger.dev. - */ - headers?: Record; - - /** - * The number of seconds to wait for the realtime stream to produce data - * before timing out. If no data arrives within this period, the stream - * will be closed. - * - * @default 120 - */ - streamTimeoutSeconds?: number; -}; - -/** - * The payload shape that the transport sends to the triggered task. - * - * When using `chatTask()`, the payload is automatically typed — you don't need - * to import this type. When using `task()` directly, use this type to annotate - * your payload: - * - * @example - * ```ts - * import { task } from "@trigger.dev/sdk"; - * import { pipeChat, type ChatTaskPayload } from "@trigger.dev/ai"; - * - * export const myChatTask = task({ - * id: "my-chat-task", - * run: async (payload: ChatTaskPayload) => { - * const result = streamText({ - * model: openai("gpt-4o"), - * messages: convertToModelMessages(payload.messages), - * }); - * await pipeChat(result); - * }, - * }); - * ``` - */ -export type ChatTaskPayload = { - /** The array of UI messages representing the conversation history */ - messages: TMessage[]; - - /** The unique identifier for the chat session */ - chatId: string; - - /** - * The type of message submission: - * - `"submit-message"`: A new user message was submitted - * - `"regenerate-message"`: The user wants to regenerate the last assistant response - */ - trigger: "submit-message" | "regenerate-message"; - - /** - * The ID of the message to regenerate (only present for `"regenerate-message"` trigger). - */ - messageId?: string; - - /** - * Custom metadata attached to the chat request by the frontend. - */ - metadata?: unknown; -}; - -/** - * Internal state for tracking active chat sessions, used for stream reconnection. - * @internal - */ -export type ChatSessionState = { - runId: string; - publicAccessToken: string; -}; diff --git a/packages/ai/src/version.ts b/packages/ai/src/version.ts deleted file mode 100644 index 2e47a886828..00000000000 --- a/packages/ai/src/version.ts +++ /dev/null @@ -1 +0,0 @@ -export const VERSION = "0.0.0"; diff --git a/packages/ai/tsconfig.json b/packages/ai/tsconfig.json deleted file mode 100644 index ec09e52a400..00000000000 --- a/packages/ai/tsconfig.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "extends": "../../.configs/tsconfig.base.json", - "compilerOptions": { - "isolatedDeclarations": false, - "composite": true, - "sourceMap": true, - "stripInternal": true - }, - "include": ["./src/**/*.ts"] -} diff --git a/packages/ai/vitest.config.ts b/packages/ai/vitest.config.ts deleted file mode 100644 index c497b8ec974..00000000000 --- a/packages/ai/vitest.config.ts +++ /dev/null @@ -1,8 +0,0 @@ -import { defineConfig } from "vitest/config"; - -export default defineConfig({ - test: { - include: ["src/**/*.test.ts"], - globals: true, - }, -}); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index e6c4f37fad3..19b371b287b 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1104,7 +1104,7 @@ importers: version: 18.3.1 react-email: specifier: ^2.1.1 - version: 2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(bufferutil@4.0.9)(eslint@8.31.0) + version: 2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(eslint@8.31.0) resend: specifier: ^3.2.0 version: 3.2.0 @@ -1376,31 +1376,6 @@ importers: specifier: 8.6.6 version: 8.6.6 - packages/ai: - dependencies: - '@trigger.dev/core': - specifier: workspace:4.3.3 - version: link:../core - devDependencies: - '@arethetypeswrong/cli': - specifier: ^0.15.4 - version: 0.15.4 - ai: - specifier: ^6.0.0 - version: 6.0.3(zod@3.25.76) - rimraf: - specifier: ^3.0.2 - version: 3.0.2 - tshy: - specifier: ^3.0.2 - version: 3.0.2 - tsx: - specifier: 4.17.0 - version: 4.17.0 - vitest: - specifier: ^2.1.0 - version: 2.1.9(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1) - packages/build: dependencies: '@prisma/config': @@ -11192,23 +11167,9 @@ packages: '@vitest/browser': optional: true - '@vitest/expect@2.1.9': - resolution: {integrity: sha512-UJCIkTBenHeKT1TTlKMJWy1laZewsRIzYighyYiJKZreqtdxSos/S1t+ktRMQWu2CKqaarrkeszJx1cgC5tGZw==} - '@vitest/expect@3.1.4': resolution: {integrity: sha512-xkD/ljeliyaClDYqHPNCiJ0plY5YIcM0OlRiZizLhlPmpXWpxnGMyTZXOHFhFeG7w9P5PBeL4IdtJ/HeQwTbQA==} - '@vitest/mocker@2.1.9': - resolution: {integrity: sha512-tVL6uJgoUdi6icpxmdrn5YNo3g3Dxv+IHJBr0GXHaEdTcw3F+cPKnsXFhli6nO+f/6SDKPHEK1UN+k+TQv0Ehg==} - peerDependencies: - msw: ^2.4.9 - vite: ^5.0.0 - peerDependenciesMeta: - msw: - optional: true - vite: - optional: true - '@vitest/mocker@3.1.4': resolution: {integrity: sha512-8IJ3CvwtSw/EFXqWFL8aCMu+YyYXG2WUSrQbViOZkWTKTVicVwZ/YiEZDSqD00kX+v/+W+OnxhNWoeVKorHygA==} peerDependencies: @@ -11232,15 +11193,9 @@ packages: '@vitest/runner@3.1.4': resolution: {integrity: sha512-djTeF1/vt985I/wpKVFBMWUlk/I7mb5hmD5oP8K9ACRmVXgKTae3TUOtXAEBfslNKPzUQvnKhNd34nnRSYgLNQ==} - '@vitest/snapshot@2.1.9': - resolution: {integrity: sha512-oBO82rEjsxLNJincVhLhaxxZdEtV0EFHMK5Kmx5sJ6H9L183dHECjiefOAdnqpIgT5eZwT04PoggUnW88vOBNQ==} - '@vitest/snapshot@3.1.4': resolution: {integrity: sha512-JPHf68DvuO7vilmvwdPr9TS0SuuIzHvxeaCkxYcCD4jTk67XwL45ZhEHFKIuCm8CYstgI6LZ4XbwD6ANrwMpFg==} - '@vitest/spy@2.1.9': - resolution: {integrity: sha512-E1B35FwzXXTs9FHNK6bDszs7mtydNi5MIfUWpceJ8Xbfb1gBMscAnwLbEu+B44ed6W3XjL9/ehLPHR1fkf1KLQ==} - '@vitest/spy@3.1.4': resolution: {integrity: sha512-Xg1bXhu+vtPXIodYN369M86K8shGLouNjoVI78g8iAq2rFoHFdajNvJJ5A/9bPMFcfQqdaCpOgWKEoMQg/s0Yg==} @@ -19840,11 +19795,6 @@ packages: engines: {node: '>=v14.16.0'} hasBin: true - vite-node@2.1.9: - resolution: {integrity: sha512-AM9aQ/IPrW/6ENLQg3AGY4K1N2TGZdR5e4gu/MmmR2xR3Ll1+dib+nook92g4TV3PXVyeyxdWwtaCAiUL0hMxA==} - engines: {node: ^18.0.0 || >=20.0.0} - hasBin: true - vite-node@3.1.4: resolution: {integrity: sha512-6enNwYnpyDo4hEgytbmc6mYWHXDHYEn0D1/rw4Q+tnHUGtKTJsn8T1YkX6Q18wI5LCrS8CTYlBaiCqxOy2kvUA==} engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} @@ -19912,31 +19862,6 @@ packages: terser: optional: true - vitest@2.1.9: - resolution: {integrity: sha512-MSmPM9REYqDGBI8439mA4mWhV5sKmDlBKWIYbA3lRb2PTHACE0mgKwA8yQ2xq9vxDTuk4iPrECBAEW2aoFXY0Q==} - engines: {node: ^18.0.0 || >=20.0.0} - hasBin: true - peerDependencies: - '@edge-runtime/vm': '*' - '@types/node': 20.14.14 - '@vitest/browser': 2.1.9 - '@vitest/ui': 2.1.9 - happy-dom: '*' - jsdom: '*' - peerDependenciesMeta: - '@edge-runtime/vm': - optional: true - '@types/node': - optional: true - '@vitest/browser': - optional: true - '@vitest/ui': - optional: true - happy-dom: - optional: true - jsdom: - optional: true - vitest@3.1.4: resolution: {integrity: sha512-Ta56rT7uWxCSJXlBtKgIlApJnT6e6IGmTYxYcmxjJ4ujuZDI59GUQgVDObXXJujOmPDBYXHK1qmaGtneu6TNIQ==} engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} @@ -23231,7 +23156,7 @@ snapshots: '@epic-web/test-server@0.1.0(bufferutil@4.0.9)': dependencies: '@hono/node-server': 1.12.2(hono@4.5.11) - '@hono/node-ws': 1.0.4(@hono/node-server@1.12.2(hono@4.5.11))(bufferutil@4.0.9) + '@hono/node-ws': 1.0.4(@hono/node-server@1.12.2(hono@4.11.8))(bufferutil@4.0.9) '@open-draft/deferred-promise': 2.2.0 '@types/ws': 8.5.12 hono: 4.5.11 @@ -23986,7 +23911,7 @@ snapshots: dependencies: hono: 4.11.8 - '@hono/node-ws@1.0.4(@hono/node-server@1.12.2(hono@4.5.11))(bufferutil@4.0.9)': + '@hono/node-ws@1.0.4(@hono/node-server@1.12.2(hono@4.11.8))(bufferutil@4.0.9)': dependencies: '@hono/node-server': 1.12.2(hono@4.5.11) ws: 8.18.3(bufferutil@4.0.9) @@ -31622,13 +31547,6 @@ snapshots: transitivePeerDependencies: - supports-color - '@vitest/expect@2.1.9': - dependencies: - '@vitest/spy': 2.1.9 - '@vitest/utils': 2.1.9 - chai: 5.2.0 - tinyrainbow: 1.2.0 - '@vitest/expect@3.1.4': dependencies: '@vitest/spy': 3.1.4 @@ -31636,14 +31554,6 @@ snapshots: chai: 5.2.0 tinyrainbow: 2.0.0 - '@vitest/mocker@2.1.9(vite@5.4.21(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1))': - dependencies: - '@vitest/spy': 2.1.9 - estree-walker: 3.0.3 - magic-string: 0.30.21 - optionalDependencies: - vite: 5.4.21(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1) - '@vitest/mocker@3.1.4(vite@5.4.21(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1))': dependencies: '@vitest/spy': 3.1.4 @@ -31670,22 +31580,12 @@ snapshots: '@vitest/utils': 3.1.4 pathe: 2.0.3 - '@vitest/snapshot@2.1.9': - dependencies: - '@vitest/pretty-format': 2.1.9 - magic-string: 0.30.21 - pathe: 1.1.2 - '@vitest/snapshot@3.1.4': dependencies: '@vitest/pretty-format': 3.1.4 magic-string: 0.30.21 pathe: 2.0.3 - '@vitest/spy@2.1.9': - dependencies: - tinyspy: 3.0.2 - '@vitest/spy@3.1.4': dependencies: tinyspy: 3.0.2 @@ -39324,7 +39224,7 @@ snapshots: react: 18.2.0 react-dom: 18.2.0(react@18.2.0) - react-email@2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(bufferutil@4.0.9)(eslint@8.31.0): + react-email@2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(eslint@8.31.0): dependencies: '@babel/parser': 7.24.1 '@radix-ui/colors': 1.0.1 @@ -39361,8 +39261,8 @@ snapshots: react: 18.3.1 react-dom: 18.2.0(react@18.3.1) shelljs: 0.8.5 - socket.io: 4.7.3(bufferutil@4.0.9) - socket.io-client: 4.7.3(bufferutil@4.0.9) + socket.io: 4.7.3 + socket.io-client: 4.7.3 sonner: 1.3.1(react-dom@18.2.0(react@18.3.1))(react@18.3.1) source-map-js: 1.0.2 stacktrace-parser: 0.1.10 @@ -40562,7 +40462,7 @@ snapshots: - supports-color - utf-8-validate - socket.io-client@4.7.3(bufferutil@4.0.9): + socket.io-client@4.7.3: dependencies: '@socket.io/component-emitter': 3.1.0 debug: 4.3.7(supports-color@10.0.0) @@ -40591,7 +40491,7 @@ snapshots: transitivePeerDependencies: - supports-color - socket.io@4.7.3(bufferutil@4.0.9): + socket.io@4.7.3: dependencies: accepts: 1.3.8 base64id: 2.0.0 @@ -42135,24 +42035,6 @@ snapshots: - supports-color - terser - vite-node@2.1.9(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1): - dependencies: - cac: 6.7.14 - debug: 4.4.3(supports-color@10.0.0) - es-module-lexer: 1.7.0 - pathe: 1.1.2 - vite: 5.4.21(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1) - transitivePeerDependencies: - - '@types/node' - - less - - lightningcss - - sass - - sass-embedded - - stylus - - sugarss - - supports-color - - terser - vite-node@3.1.4(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1): dependencies: cac: 6.7.14 @@ -42202,41 +42084,6 @@ snapshots: lightningcss: 1.29.2 terser: 5.44.1 - vitest@2.1.9(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1): - dependencies: - '@vitest/expect': 2.1.9 - '@vitest/mocker': 2.1.9(vite@5.4.21(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1)) - '@vitest/pretty-format': 2.1.9 - '@vitest/runner': 2.1.9 - '@vitest/snapshot': 2.1.9 - '@vitest/spy': 2.1.9 - '@vitest/utils': 2.1.9 - chai: 5.2.0 - debug: 4.4.3(supports-color@10.0.0) - expect-type: 1.2.1 - magic-string: 0.30.21 - pathe: 1.1.2 - std-env: 3.9.0 - tinybench: 2.9.0 - tinyexec: 0.3.2 - tinypool: 1.0.2 - tinyrainbow: 1.2.0 - vite: 5.4.21(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1) - vite-node: 2.1.9(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1) - why-is-node-running: 2.3.0 - optionalDependencies: - '@types/node': 20.14.14 - transitivePeerDependencies: - - less - - lightningcss - - msw - - sass - - sass-embedded - - stylus - - sugarss - - supports-color - - terser - vitest@3.1.4(@types/debug@4.1.12)(@types/node@20.14.14)(lightningcss@1.29.2)(terser@5.44.1): dependencies: '@vitest/expect': 3.1.4 From c3e83cd67ac43c31b0c5bc63da825cef76a0a536 Mon Sep 17 00:00:00 2001 From: Cursor Agent Date: Sun, 15 Feb 2026 12:56:49 +0000 Subject: [PATCH 12/53] chore: update changeset to target @trigger.dev/sdk Co-authored-by: Eric Allam --- .changeset/ai-sdk-chat-transport.md | 37 +++++++++++++++-------------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/.changeset/ai-sdk-chat-transport.md b/.changeset/ai-sdk-chat-transport.md index a24dcdc195e..f5cdb9187d4 100644 --- a/.changeset/ai-sdk-chat-transport.md +++ b/.changeset/ai-sdk-chat-transport.md @@ -1,41 +1,42 @@ --- -"@trigger.dev/ai": minor +"@trigger.dev/sdk": minor --- -New package: `@trigger.dev/ai` — AI SDK integration for Trigger.dev +Add AI SDK chat transport integration via two new subpath exports: -Provides `TriggerChatTransport`, a custom `ChatTransport` implementation for the Vercel AI SDK that bridges `useChat` with Trigger.dev's durable task execution and realtime streams. +**`@trigger.dev/sdk/chat`** (frontend, browser-safe): +- `TriggerChatTransport` — custom `ChatTransport` for the AI SDK's `useChat` hook that runs chat completions as durable Trigger.dev tasks +- `createChatTransport()` — factory function -**Frontend usage:** ```tsx import { useChat } from "@ai-sdk/react"; -import { TriggerChatTransport } from "@trigger.dev/ai"; +import { TriggerChatTransport } from "@trigger.dev/sdk/chat"; const { messages, sendMessage } = useChat({ transport: new TriggerChatTransport({ - accessToken: publicAccessToken, - taskId: "my-chat-task", + task: "my-chat-task", + accessToken, }), }); ``` -**Backend task:** +**`@trigger.dev/sdk/ai`** (backend, extends existing `ai.tool`/`ai.currentToolOptions`): +- `chatTask()` — pre-typed task wrapper with auto-pipe support +- `pipeChat()` — pipe a `StreamTextResult` or stream to the frontend +- `CHAT_STREAM_KEY` — the default stream key constant +- `ChatTaskPayload` type + ```ts -import { task, streams } from "@trigger.dev/sdk"; +import { chatTask } from "@trigger.dev/sdk/ai"; import { streamText, convertToModelMessages } from "ai"; -import type { ChatTaskPayload } from "@trigger.dev/ai"; -export const myChatTask = task({ +export const myChatTask = chatTask({ id: "my-chat-task", - run: async (payload: ChatTaskPayload) => { - const result = streamText({ + run: async ({ messages }) => { + return streamText({ model: openai("gpt-4o"), - messages: convertToModelMessages(payload.messages), + messages: convertToModelMessages(messages), }); - const { waitUntilComplete } = streams.pipe("chat", result.toUIMessageStream()); - await waitUntilComplete(); }, }); ``` - -Also exports `createChatTransport()` factory function and `ChatTaskPayload` type for task-side typing. From 32d4e73cf512b5bf30db65efe248629b30f302fa Mon Sep 17 00:00:00 2001 From: Cursor Agent Date: Sun, 15 Feb 2026 13:06:08 +0000 Subject: [PATCH 13/53] fix: address CodeRabbit review feedback 1. Add null/object guard before enqueuing UIMessageChunk from SSE stream to handle heartbeat or malformed events safely 2. Use incrementing counter instead of Date.now() in test message factories to avoid duplicate IDs 3. Add test covering publicAccessToken from trigger response being used for stream subscription auth Co-authored-by: Eric Allam --- packages/trigger-sdk/src/v3/chat.test.ts | 84 +++++++++++++++++++++++- packages/trigger-sdk/src/v3/chat.ts | 5 +- 2 files changed, 85 insertions(+), 4 deletions(-) diff --git a/packages/trigger-sdk/src/v3/chat.test.ts b/packages/trigger-sdk/src/v3/chat.test.ts index 86a4ba9ad57..ae89f28a8ab 100644 --- a/packages/trigger-sdk/src/v3/chat.test.ts +++ b/packages/trigger-sdk/src/v3/chat.test.ts @@ -18,10 +18,12 @@ function createSSEStream(sseText: string): ReadableStream { }); } -// Helper: create test UIMessages +// Helper: create test UIMessages with unique IDs +let messageIdCounter = 0; + function createUserMessage(text: string): UIMessage { return { - id: `msg-${Date.now()}`, + id: `msg-user-${++messageIdCounter}`, role: "user", parts: [{ type: "text", text }], }; @@ -29,7 +31,7 @@ function createUserMessage(text: string): UIMessage { function createAssistantMessage(text: string): UIMessage { return { - id: `msg-${Date.now()}`, + id: `msg-assistant-${++messageIdCounter}`, role: "assistant", parts: [{ type: "text", text }], }; @@ -456,6 +458,82 @@ describe("TriggerChatTransport", () => { }); }); + describe("publicAccessToken from trigger response", () => { + it("should use publicAccessToken from response body when x-trigger-jwt header is absent", async () => { + const fetchSpy = vi.fn().mockImplementation(async (url: string | URL, init?: RequestInit) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + // Return without x-trigger-jwt header — the ApiClient will attempt + // to generate a JWT from the access token. In this test the token + // generation will add a publicAccessToken to the result. + return new Response( + JSON.stringify({ id: "run_pat" }), + { + status: 200, + headers: { + "content-type": "application/json", + // Include x-trigger-jwt to simulate the server returning a public token + "x-trigger-jwt": "server-generated-public-token", + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + // Verify the Authorization header uses the server-generated token + const authHeader = (init?.headers as Record)?.["Authorization"]; + expect(authHeader).toBe("Bearer server-generated-public-token"); + + const chunks: UIMessageChunk[] = [ + { type: "text-start", id: "p1" }, + { type: "text-end", id: "p1" }, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + global.fetch = fetchSpy; + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "caller-token", + baseURL: "https://api.test.trigger.dev", + }); + + const stream = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-pat", + messageId: undefined, + messages: [createUserMessage("test")], + abortSignal: undefined, + }); + + // Consume the stream + const reader = stream.getReader(); + while (true) { + const { done } = await reader.read(); + if (done) break; + } + + // Verify the stream subscription used the public token, not the caller token + const streamCall = fetchSpy.mock.calls.find((call: any[]) => + (typeof call[0] === "string" ? call[0] : call[0].toString()).includes("/realtime/v1/streams/") + ); + expect(streamCall).toBeDefined(); + const streamHeaders = streamCall![1]?.headers as Record; + expect(streamHeaders["Authorization"]).toBe("Bearer server-generated-public-token"); + }); + }); + describe("error handling", () => { it("should propagate trigger API errors", async () => { global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { diff --git a/packages/trigger-sdk/src/v3/chat.ts b/packages/trigger-sdk/src/v3/chat.ts index 5a7872c1014..77378bded04 100644 --- a/packages/trigger-sdk/src/v3/chat.ts +++ b/packages/trigger-sdk/src/v3/chat.ts @@ -252,7 +252,10 @@ export class TriggerChatTransport implements ChatTransport { return; } - controller.enqueue(value.chunk as UIMessageChunk); + // Guard against heartbeat or malformed SSE events + if (value.chunk != null && typeof value.chunk === "object") { + controller.enqueue(value.chunk as UIMessageChunk); + } } } catch (readError) { reader.releaseLock(); From fb0100484e843828af4378023b21acb47e08d5cd Mon Sep 17 00:00:00 2001 From: Cursor Agent Date: Sun, 15 Feb 2026 13:15:30 +0000 Subject: [PATCH 14/53] docs(ai): add AI Chat with useChat guide Comprehensive guide covering: - Quick start with chatTask + TriggerChatTransport - Backend patterns: simple (return streamText), complex (pipeChat), and manual (task + ChatTaskPayload) - Frontend options: dynamic tokens, extra data, self-hosting - ChatTaskPayload reference - Added to Writing tasks navigation near Streams Co-authored-by: Eric Allam --- docs/docs.json | 1 + docs/guides/ai-chat.mdx | 268 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 269 insertions(+) create mode 100644 docs/guides/ai-chat.mdx diff --git a/docs/docs.json b/docs/docs.json index 14d728e2db1..911c912711b 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -74,6 +74,7 @@ "tags", "runs/metadata", "tasks/streams", + "guides/ai-chat", "run-usage", "context", "runs/priority", diff --git a/docs/guides/ai-chat.mdx b/docs/guides/ai-chat.mdx new file mode 100644 index 00000000000..e549226b147 --- /dev/null +++ b/docs/guides/ai-chat.mdx @@ -0,0 +1,268 @@ +--- +title: "AI Chat with useChat" +sidebarTitle: "AI Chat (useChat)" +description: "Run AI SDK chat completions as durable Trigger.dev tasks with built-in realtime streaming." +--- + +## Overview + +The `@trigger.dev/sdk` provides a custom [ChatTransport](https://sdk.vercel.ai/docs/ai-sdk-ui/transport) for the Vercel AI SDK's `useChat` hook. This lets you run chat completions as **durable Trigger.dev tasks** instead of fragile API routes — with automatic retries, observability, and realtime streaming built in. + +**How it works:** +1. The frontend sends messages via `useChat` → `TriggerChatTransport` +2. The transport triggers a Trigger.dev task with the conversation as payload +3. The task streams `UIMessageChunk` events back via Trigger.dev's realtime streams +4. The AI SDK's `useChat` processes the stream natively — text, tool calls, reasoning, etc. + +No custom API routes needed. Your chat backend is a Trigger.dev task. + + + Requires `@trigger.dev/sdk` version **4.4.0 or later** and the `ai` package **v5.0.0 or later**. + + +## Quick start + +### 1. Define a chat task + +Use `chatTask` from `@trigger.dev/sdk/ai` to define a task that handles chat messages. The payload is automatically typed as `ChatTaskPayload`. + +If you return a `StreamTextResult` from `run`, it's **automatically piped** to the frontend. + +```ts trigger/chat.ts +import { chatTask } from "@trigger.dev/sdk/ai"; +import { streamText, convertToModelMessages } from "ai"; +import { openai } from "@ai-sdk/openai"; + +export const myChat = chatTask({ + id: "my-chat", + run: async ({ messages }) => { + // messages is UIMessage[] from the frontend + return streamText({ + model: openai("gpt-4o"), + messages: convertToModelMessages(messages), + }); + // Returning a StreamTextResult auto-pipes it to the frontend + }, +}); +``` + +### 2. Generate an access token + +On your server (e.g. a Next.js API route or server action), create a trigger public token: + +```ts app/actions.ts +"use server"; + +import { auth } from "@trigger.dev/sdk"; + +export async function getChatToken() { + return await auth.createTriggerPublicToken("my-chat"); +} +``` + +### 3. Use in the frontend + +Import `TriggerChatTransport` from `@trigger.dev/sdk/chat` (browser-safe — no server dependencies). + +```tsx app/components/chat.tsx +"use client"; + +import { useChat } from "@ai-sdk/react"; +import { TriggerChatTransport } from "@trigger.dev/sdk/chat"; + +export function Chat({ accessToken }: { accessToken: string }) { + const { messages, sendMessage, status, error } = useChat({ + transport: new TriggerChatTransport({ + task: "my-chat", + accessToken, + }), + }); + + return ( +
+ {messages.map((m) => ( +
+ {m.role}: + {m.parts.map((part, i) => + part.type === "text" ? {part.text} : null + )} +
+ ))} + +
{ + e.preventDefault(); + const input = e.currentTarget.querySelector("input"); + if (input?.value) { + sendMessage({ text: input.value }); + input.value = ""; + } + }} + > + + +
+
+ ); +} +``` + +## Backend patterns + +### Simple: return a StreamTextResult + +The easiest approach — return the `streamText` result from `run` and it's automatically piped to the frontend: + +```ts +import { chatTask } from "@trigger.dev/sdk/ai"; +import { streamText, convertToModelMessages } from "ai"; +import { openai } from "@ai-sdk/openai"; + +export const simpleChat = chatTask({ + id: "simple-chat", + run: async ({ messages }) => { + return streamText({ + model: openai("gpt-4o"), + system: "You are a helpful assistant.", + messages: convertToModelMessages(messages), + }); + }, +}); +``` + +### Complex: use pipeChat() from anywhere + +For complex agent flows where `streamText` is called deep inside your code, use `pipeChat()`. It works from **anywhere inside a task** — even nested function calls. + +```ts trigger/agent-chat.ts +import { chatTask, pipeChat } from "@trigger.dev/sdk/ai"; +import { streamText, convertToModelMessages } from "ai"; +import { openai } from "@ai-sdk/openai"; + +export const agentChat = chatTask({ + id: "agent-chat", + run: async ({ messages }) => { + // Don't return anything — pipeChat is called inside + await runAgentLoop(convertToModelMessages(messages)); + }, +}); + +// This could be deep inside your agent library +async function runAgentLoop(messages: CoreMessage[]) { + // ... agent logic, tool calls, etc. + + const result = streamText({ + model: openai("gpt-4o"), + messages, + }); + + // Pipe from anywhere — no need to return it + await pipeChat(result); +} +``` + +### Manual: use task() with pipeChat() + +If you need full control over task options, use the standard `task()` with `ChatTaskPayload` and `pipeChat()`: + +```ts +import { task } from "@trigger.dev/sdk"; +import { pipeChat, type ChatTaskPayload } from "@trigger.dev/sdk/ai"; +import { streamText, convertToModelMessages } from "ai"; +import { openai } from "@ai-sdk/openai"; + +export const manualChat = task({ + id: "manual-chat", + retry: { maxAttempts: 3 }, + queue: { concurrencyLimit: 10 }, + run: async (payload: ChatTaskPayload) => { + const result = streamText({ + model: openai("gpt-4o"), + messages: convertToModelMessages(payload.messages), + }); + + await pipeChat(result); + }, +}); +``` + +## Frontend options + +### TriggerChatTransport options + +```ts +new TriggerChatTransport({ + // Required + task: "my-chat", // Task ID to trigger + accessToken: token, // Trigger public token or secret key + + // Optional + baseURL: "https://...", // Custom API URL (self-hosted) + streamKey: "chat", // Custom stream key (default: "chat") + headers: { ... }, // Extra headers for API requests + streamTimeoutSeconds: 120, // Stream timeout (default: 120s) +}); +``` + +### Dynamic access tokens + +For token refresh patterns, pass a function: + +```ts +new TriggerChatTransport({ + task: "my-chat", + accessToken: () => getLatestToken(), // Called on each sendMessage +}); +``` + +### Passing extra data + +Use the `body` option on `sendMessage` to pass additional data to the task: + +```ts +sendMessage({ + text: "Hello", +}, { + body: { + systemPrompt: "You are a pirate.", + temperature: 0.9, + }, +}); +``` + +The `body` fields are merged into the `ChatTaskPayload` and available in your task's `run` function. + +## ChatTaskPayload + +The payload sent to the task has this shape: + +| Field | Type | Description | +|-------|------|-------------| +| `messages` | `UIMessage[]` | The conversation history | +| `chatId` | `string` | Unique chat session ID | +| `trigger` | `"submit-message" \| "regenerate-message"` | What triggered the request | +| `messageId` | `string \| undefined` | Message ID to regenerate (if applicable) | +| `metadata` | `unknown` | Custom metadata from the frontend | + +Plus any extra fields from the `body` option. + +## Self-hosting + +If you're self-hosting Trigger.dev, pass the `baseURL` option: + +```ts +new TriggerChatTransport({ + task: "my-chat", + accessToken, + baseURL: "https://your-trigger-instance.com", +}); +``` + +## Related + +- [Realtime Streams](/tasks/streams) — How streams work under the hood +- [Using the Vercel AI SDK](/guides/examples/vercel-ai-sdk) — Basic AI SDK usage with Trigger.dev +- [Realtime React Hooks](/realtime/react-hooks/overview) — Lower-level realtime hooks +- [Authentication](/realtime/auth) — Public access tokens and trigger tokens From cdd4dcf567be7cf324f4f04a061213ce535f51bf Mon Sep 17 00:00:00 2001 From: Cursor Agent Date: Sun, 15 Feb 2026 13:24:32 +0000 Subject: [PATCH 15/53] feat(reference): add ai-chat Next.js reference project Minimal example showcasing the new chatTask + TriggerChatTransport APIs: - Backend: chatTask with streamText auto-pipe (src/trigger/chat.ts) - Frontend: TriggerChatTransport with useChat (src/components/chat.tsx) - Token generation via auth.createTriggerPublicToken (src/app/page.tsx) - Tailwind v4 styling Co-authored-by: Eric Allam --- references/ai-chat/next.config.ts | 5 ++ references/ai-chat/package.json | 30 +++++++ references/ai-chat/postcss.config.mjs | 8 ++ references/ai-chat/src/app/globals.css | 1 + references/ai-chat/src/app/layout.tsx | 15 ++++ references/ai-chat/src/app/page.tsx | 17 ++++ references/ai-chat/src/components/chat.tsx | 91 ++++++++++++++++++++++ references/ai-chat/src/trigger/chat.ts | 14 ++++ references/ai-chat/trigger.config.ts | 7 ++ references/ai-chat/tsconfig.json | 27 +++++++ 10 files changed, 215 insertions(+) create mode 100644 references/ai-chat/next.config.ts create mode 100644 references/ai-chat/package.json create mode 100644 references/ai-chat/postcss.config.mjs create mode 100644 references/ai-chat/src/app/globals.css create mode 100644 references/ai-chat/src/app/layout.tsx create mode 100644 references/ai-chat/src/app/page.tsx create mode 100644 references/ai-chat/src/components/chat.tsx create mode 100644 references/ai-chat/src/trigger/chat.ts create mode 100644 references/ai-chat/trigger.config.ts create mode 100644 references/ai-chat/tsconfig.json diff --git a/references/ai-chat/next.config.ts b/references/ai-chat/next.config.ts new file mode 100644 index 00000000000..cb651cdc007 --- /dev/null +++ b/references/ai-chat/next.config.ts @@ -0,0 +1,5 @@ +import type { NextConfig } from "next"; + +const nextConfig: NextConfig = {}; + +export default nextConfig; diff --git a/references/ai-chat/package.json b/references/ai-chat/package.json new file mode 100644 index 00000000000..228a09015df --- /dev/null +++ b/references/ai-chat/package.json @@ -0,0 +1,30 @@ +{ + "name": "references-ai-chat", + "version": "0.1.0", + "private": true, + "scripts": { + "dev": "next dev --turbopack", + "build": "next build", + "start": "next start", + "dev:trigger": "trigger dev" + }, + "dependencies": { + "@ai-sdk/openai": "^2.0.0", + "@ai-sdk/react": "^2.0.0", + "@trigger.dev/sdk": "workspace:*", + "ai": "^6.0.0", + "next": "15.3.3", + "react": "^19.0.0", + "react-dom": "^19.0.0" + }, + "devDependencies": { + "@tailwindcss/postcss": "^4", + "@trigger.dev/build": "workspace:*", + "@types/node": "^22", + "@types/react": "^19", + "@types/react-dom": "^19", + "tailwindcss": "^4", + "trigger.dev": "workspace:*", + "typescript": "^5" + } +} diff --git a/references/ai-chat/postcss.config.mjs b/references/ai-chat/postcss.config.mjs new file mode 100644 index 00000000000..79bcf135dc4 --- /dev/null +++ b/references/ai-chat/postcss.config.mjs @@ -0,0 +1,8 @@ +/** @type {import('postcss-load-config').Config} */ +const config = { + plugins: { + "@tailwindcss/postcss": {}, + }, +}; + +export default config; diff --git a/references/ai-chat/src/app/globals.css b/references/ai-chat/src/app/globals.css new file mode 100644 index 00000000000..f1d8c73cdcf --- /dev/null +++ b/references/ai-chat/src/app/globals.css @@ -0,0 +1 @@ +@import "tailwindcss"; diff --git a/references/ai-chat/src/app/layout.tsx b/references/ai-chat/src/app/layout.tsx new file mode 100644 index 00000000000..f507028583d --- /dev/null +++ b/references/ai-chat/src/app/layout.tsx @@ -0,0 +1,15 @@ +import type { Metadata } from "next"; +import "./globals.css"; + +export const metadata: Metadata = { + title: "AI Chat — Trigger.dev", + description: "AI SDK useChat powered by Trigger.dev durable tasks", +}; + +export default function RootLayout({ children }: { children: React.ReactNode }) { + return ( + + {children} + + ); +} diff --git a/references/ai-chat/src/app/page.tsx b/references/ai-chat/src/app/page.tsx new file mode 100644 index 00000000000..16f01282c80 --- /dev/null +++ b/references/ai-chat/src/app/page.tsx @@ -0,0 +1,17 @@ +import { auth } from "@trigger.dev/sdk"; +import { Chat } from "@/components/chat"; + +export default async function Home() { + const accessToken = await auth.createTriggerPublicToken("ai-chat"); + + return ( +
+
+

+ AI Chat — powered by Trigger.dev +

+ +
+
+ ); +} diff --git a/references/ai-chat/src/components/chat.tsx b/references/ai-chat/src/components/chat.tsx new file mode 100644 index 00000000000..34c68d8ba7e --- /dev/null +++ b/references/ai-chat/src/components/chat.tsx @@ -0,0 +1,91 @@ +"use client"; + +import { useChat } from "@ai-sdk/react"; +import { TriggerChatTransport } from "@trigger.dev/sdk/chat"; +import { useState } from "react"; + +export function Chat({ accessToken }: { accessToken: string }) { + const [input, setInput] = useState(""); + + const { messages, sendMessage, status, error } = useChat({ + transport: new TriggerChatTransport({ + task: "ai-chat", + accessToken, + baseURL: process.env.NEXT_PUBLIC_TRIGGER_API_URL, + }), + }); + + function handleSubmit(e: React.FormEvent) { + e.preventDefault(); + if (!input.trim() || status === "streaming") return; + + sendMessage({ text: input }); + setInput(""); + } + + return ( +
+ {/* Messages */} +
+ {messages.length === 0 && ( +

Send a message to start chatting.

+ )} + + {messages.map((message) => ( +
+
+ {message.parts.map((part, i) => { + if (part.type === "text") { + return {part.text}; + } + return null; + })} +
+
+ ))} + + {status === "streaming" && ( +
+
+ Thinking… +
+
+ )} +
+ + {/* Error */} + {error && ( +
+ {error.message} +
+ )} + + {/* Input */} +
+ setInput(e.target.value)} + placeholder="Type a message…" + className="flex-1 rounded-lg border border-gray-300 px-3 py-2 text-sm outline-none focus:border-blue-500 focus:ring-1 focus:ring-blue-500" + /> + +
+
+ ); +} diff --git a/references/ai-chat/src/trigger/chat.ts b/references/ai-chat/src/trigger/chat.ts new file mode 100644 index 00000000000..27a4002397d --- /dev/null +++ b/references/ai-chat/src/trigger/chat.ts @@ -0,0 +1,14 @@ +import { chatTask } from "@trigger.dev/sdk/ai"; +import { streamText, convertToModelMessages } from "ai"; +import { openai } from "@ai-sdk/openai"; + +export const chat = chatTask({ + id: "ai-chat", + run: async ({ messages }) => { + return streamText({ + model: openai("gpt-4o-mini"), + system: "You are a helpful assistant. Be concise and friendly.", + messages: convertToModelMessages(messages), + }); + }, +}); diff --git a/references/ai-chat/trigger.config.ts b/references/ai-chat/trigger.config.ts new file mode 100644 index 00000000000..4412bfc9325 --- /dev/null +++ b/references/ai-chat/trigger.config.ts @@ -0,0 +1,7 @@ +import { defineConfig } from "@trigger.dev/sdk"; + +export default defineConfig({ + project: process.env.TRIGGER_PROJECT_REF!, + dirs: ["./src/trigger"], + maxDuration: 300, +}); diff --git a/references/ai-chat/tsconfig.json b/references/ai-chat/tsconfig.json new file mode 100644 index 00000000000..c1334095f87 --- /dev/null +++ b/references/ai-chat/tsconfig.json @@ -0,0 +1,27 @@ +{ + "compilerOptions": { + "target": "ES2017", + "lib": ["dom", "dom.iterable", "esnext"], + "allowJs": true, + "skipLibCheck": true, + "strict": true, + "noEmit": true, + "esModuleInterop": true, + "module": "esnext", + "moduleResolution": "bundler", + "resolveJsonModule": true, + "isolatedModules": true, + "jsx": "preserve", + "incremental": true, + "plugins": [ + { + "name": "next" + } + ], + "paths": { + "@/*": ["./src/*"] + } + }, + "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"], + "exclude": ["node_modules"] +} From e631dad747f0fec074e662ac680ade799478ef56 Mon Sep 17 00:00:00 2001 From: Cursor Agent Date: Sun, 15 Feb 2026 13:27:14 +0000 Subject: [PATCH 16/53] fix(reference): use compatible @ai-sdk v3 packages, await convertToModelMessages @ai-sdk/openai v3 and @ai-sdk/react v3 are needed for ai v6 compatibility. convertToModelMessages is async in newer AI SDK versions. Co-authored-by: Eric Allam --- pnpm-lock.yaml | 287 +++++++++++++++++++++++-- references/ai-chat/package.json | 4 +- references/ai-chat/src/trigger/chat.ts | 2 +- 3 files changed, 277 insertions(+), 16 deletions(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 19b371b287b..49b56c67faa 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -2111,6 +2111,55 @@ importers: specifier: 3.25.76 version: 3.25.76 + references/ai-chat: + dependencies: + '@ai-sdk/openai': + specifier: ^3.0.0 + version: 3.0.27(zod@3.25.76) + '@ai-sdk/react': + specifier: ^3.0.0 + version: 3.0.84(react@19.1.0)(zod@3.25.76) + '@trigger.dev/sdk': + specifier: workspace:* + version: link:../../packages/trigger-sdk + ai: + specifier: ^6.0.0 + version: 6.0.3(zod@3.25.76) + next: + specifier: 15.3.3 + version: 15.3.3(@opentelemetry/api@1.9.0)(@playwright/test@1.37.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + react: + specifier: ^19.0.0 + version: 19.1.0 + react-dom: + specifier: ^19.0.0 + version: 19.1.0(react@19.1.0) + devDependencies: + '@tailwindcss/postcss': + specifier: ^4 + version: 4.0.17 + '@trigger.dev/build': + specifier: workspace:* + version: link:../../packages/build + '@types/node': + specifier: 20.14.14 + version: 20.14.14 + '@types/react': + specifier: ^19 + version: 19.0.12 + '@types/react-dom': + specifier: ^19 + version: 19.0.4(@types/react@19.0.12) + tailwindcss: + specifier: ^4 + version: 4.0.17 + trigger.dev: + specifier: workspace:* + version: link:../../packages/cli-v3 + typescript: + specifier: 5.5.4 + version: 5.5.4 + references/bun-catalog: dependencies: '@trigger.dev/sdk': @@ -2870,6 +2919,12 @@ packages: peerDependencies: zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/gateway@3.0.42': + resolution: {integrity: sha512-Il9lZWPUQMX59H5yJvA08gxfL2Py8oHwvAYRnK0Mt91S+JgPcyk/yEmXNDZG9ghJrwSawtK5Yocy8OnzsTOGsw==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/openai@1.0.1': resolution: {integrity: sha512-snZge8457afWlosVNUn+BG60MrxAPOOm3zmIMxJZih8tneNSiRbTVCbSzAtq/9vsnOHDe5RR83PRl85juOYEnA==} engines: {node: '>=18'} @@ -2900,6 +2955,12 @@ packages: peerDependencies: zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/openai@3.0.27': + resolution: {integrity: sha512-pLMxWOypwroXiK9dxNpn60/HGhWWWDEOJ3lo9vZLoxvpJNtKnLKojwVIvlW3yEjlD7ll1+jUO2uzsABNTaP5Yg==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/provider-utils@1.0.22': resolution: {integrity: sha512-YHK2rpj++wnLVc9vPGzGFP3Pjeld2MwhKinetA0zKXOoHAT/Jit5O8kZsxcSlJPu9wvcGT1UGZEjZrtO7PfFOQ==} engines: {node: '>=18'} @@ -2948,6 +3009,12 @@ packages: peerDependencies: zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/provider-utils@4.0.14': + resolution: {integrity: sha512-7bzKd9lgiDeXM7O4U4nQ8iTxguAOkg8LZGD9AfDVZYjO5cKYRwBPwVjboFcVrxncRHu0tYxZtXZtiLKpG4pEng==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/provider@0.0.26': resolution: {integrity: sha512-dQkfBDs2lTYpKM8389oopPdQgIU007GQyCbuPPrV+K6MtSII3HBfE0stUIMXUb44L+LK1t6GXPP7wjSzjO6uKg==} engines: {node: '>=18'} @@ -2972,6 +3039,10 @@ packages: resolution: {integrity: sha512-m9ka3ptkPQbaHHZHqDXDF9C9B5/Mav0KTdky1k2HZ3/nrW2t1AgObxIVPyGDWQNS9FXT/FS6PIoSjpcP/No8rQ==} engines: {node: '>=18'} + '@ai-sdk/provider@3.0.8': + resolution: {integrity: sha512-oGMAgGoQdBXbZqNG0Ze56CHjDZ1IDYOwGYxYjO5KLSlz5HiNQ9udIXsPZ61VWaHGZ5XW/jyjmr6t2xz2jGVwbQ==} + engines: {node: '>=18'} + '@ai-sdk/react@1.0.0': resolution: {integrity: sha512-BDrZqQA07Btg64JCuhFvBgYV+tt2B8cXINzEqWknGoxqcwgdE8wSLG2gkXoLzyC2Rnj7oj0HHpOhLUxDCmoKZg==} engines: {node: '>=18'} @@ -3004,6 +3075,12 @@ packages: zod: optional: true + '@ai-sdk/react@3.0.84': + resolution: {integrity: sha512-caX8dsXGHDctQsFGgq05sdaw9YD2C8Y9SfnOk0b0LPPi4J7/V54tq22MPTGVO9zS3LmsfFQf0GDM4WFZNC5XZA==} + engines: {node: '>=18'} + peerDependencies: + react: ^18 || ~19.0.1 || ~19.1.2 || ^19.2.1 + '@ai-sdk/ui-utils@1.0.0': resolution: {integrity: sha512-oXBDIM/0niWeTWyw77RVl505dNxBUDLLple7bTsqo2d3i1UKwGlzBUX8XqZsh7GbY7I6V05nlG0Y8iGlWxv1Aw==} engines: {node: '>=18'} @@ -5936,6 +6013,9 @@ packages: '@next/env@15.2.4': resolution: {integrity: sha512-+SFtMgoiYP3WoSswuNmxJOCwi06TdWE733D+WPjpXIe4LXGULwEaofiiAy6kbS0+XjM5xF5n3lKuBwN2SnqD9g==} + '@next/env@15.3.3': + resolution: {integrity: sha512-OdiMrzCl2Xi0VTjiQQUK0Xh7bJHnOuET2s+3V+Y40WJBAXrJeGA3f+I8MZJ/YQ3mVGi5XGR1L66oFlgqXhQ4Vw==} + '@next/env@15.4.8': resolution: {integrity: sha512-LydLa2MDI1NMrOFSkO54mTc8iIHSttj6R6dthITky9ylXV2gCGi0bHQjVCtLGRshdRPjyh2kXbxJukDtBWQZtQ==} @@ -5960,6 +6040,12 @@ packages: cpu: [arm64] os: [darwin] + '@next/swc-darwin-arm64@15.3.3': + resolution: {integrity: sha512-WRJERLuH+O3oYB4yZNVahSVFmtxRNjNF1I1c34tYMoJb0Pve+7/RaLAJJizyYiFhjYNGHRAE1Ri2Fd23zgDqhg==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [darwin] + '@next/swc-darwin-arm64@15.4.8': resolution: {integrity: sha512-Pf6zXp7yyQEn7sqMxur6+kYcywx5up1J849psyET7/8pG2gQTVMjU3NzgIt8SeEP5to3If/SaWmaA6H6ysBr1A==} engines: {node: '>= 10'} @@ -5990,6 +6076,12 @@ packages: cpu: [x64] os: [darwin] + '@next/swc-darwin-x64@15.3.3': + resolution: {integrity: sha512-XHdzH/yBc55lu78k/XwtuFR/ZXUTcflpRXcsu0nKmF45U96jt1tsOZhVrn5YH+paw66zOANpOnFQ9i6/j+UYvw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [darwin] + '@next/swc-darwin-x64@15.4.8': resolution: {integrity: sha512-xla6AOfz68a6kq3gRQccWEvFC/VRGJmA/QuSLENSO7CZX5WIEkSz7r1FdXUjtGCQ1c2M+ndUAH7opdfLK1PQbw==} engines: {node: '>= 10'} @@ -6023,6 +6115,13 @@ packages: os: [linux] libc: [glibc] + '@next/swc-linux-arm64-gnu@15.3.3': + resolution: {integrity: sha512-VZ3sYL2LXB8znNGcjhocikEkag/8xiLgnvQts41tq6i+wql63SMS1Q6N8RVXHw5pEUjiof+II3HkDd7GFcgkzw==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + libc: [glibc] + '@next/swc-linux-arm64-gnu@15.4.8': resolution: {integrity: sha512-y3fmp+1Px/SJD+5ntve5QLZnGLycsxsVPkTzAc3zUiXYSOlTPqT8ynfmt6tt4fSo1tAhDPmryXpYKEAcoAPDJw==} engines: {node: '>= 10'} @@ -6058,6 +6157,13 @@ packages: os: [linux] libc: [musl] + '@next/swc-linux-arm64-musl@15.3.3': + resolution: {integrity: sha512-h6Y1fLU4RWAp1HPNJWDYBQ+e3G7sLckyBXhmH9ajn8l/RSMnhbuPBV/fXmy3muMcVwoJdHL+UtzRzs0nXOf9SA==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + libc: [musl] + '@next/swc-linux-arm64-musl@15.4.8': resolution: {integrity: sha512-DX/L8VHzrr1CfwaVjBQr3GWCqNNFgyWJbeQ10Lx/phzbQo3JNAxUok1DZ8JHRGcL6PgMRgj6HylnLNndxn4Z6A==} engines: {node: '>= 10'} @@ -6093,6 +6199,13 @@ packages: os: [linux] libc: [glibc] + '@next/swc-linux-x64-gnu@15.3.3': + resolution: {integrity: sha512-jJ8HRiF3N8Zw6hGlytCj5BiHyG/K+fnTKVDEKvUCyiQ/0r5tgwO7OgaRiOjjRoIx2vwLR+Rz8hQoPrnmFbJdfw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + libc: [glibc] + '@next/swc-linux-x64-gnu@15.4.8': resolution: {integrity: sha512-9fLAAXKAL3xEIFdKdzG5rUSvSiZTLLTCc6JKq1z04DR4zY7DbAPcRvNm3K1inVhTiQCs19ZRAgUerHiVKMZZIA==} engines: {node: '>= 10'} @@ -6128,6 +6241,13 @@ packages: os: [linux] libc: [musl] + '@next/swc-linux-x64-musl@15.3.3': + resolution: {integrity: sha512-HrUcTr4N+RgiiGn3jjeT6Oo208UT/7BuTr7K0mdKRBtTbT4v9zJqCDKO97DUqqoBK1qyzP1RwvrWTvU6EPh/Cw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + libc: [musl] + '@next/swc-linux-x64-musl@15.4.8': resolution: {integrity: sha512-s45V7nfb5g7dbS7JK6XZDcapicVrMMvX2uYgOHP16QuKH/JA285oy6HcxlKqwUNaFY/UC6EvQ8QZUOo19cBKSA==} engines: {node: '>= 10'} @@ -6160,6 +6280,12 @@ packages: cpu: [arm64] os: [win32] + '@next/swc-win32-arm64-msvc@15.3.3': + resolution: {integrity: sha512-SxorONgi6K7ZUysMtRF3mIeHC5aA3IQLmKFQzU0OuhuUYwpOBc1ypaLJLP5Bf3M9k53KUUUj4vTPwzGvl/NwlQ==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [win32] + '@next/swc-win32-arm64-msvc@15.4.8': resolution: {integrity: sha512-KjgeQyOAq7t/HzAJcWPGA8X+4WY03uSCZ2Ekk98S9OgCFsb6lfBE3dbUzUuEQAN2THbwYgFfxX2yFTCMm8Kehw==} engines: {node: '>= 10'} @@ -6202,6 +6328,12 @@ packages: cpu: [x64] os: [win32] + '@next/swc-win32-x64-msvc@15.3.3': + resolution: {integrity: sha512-4QZG6F8enl9/S2+yIiOiju0iCTFd93d8VC1q9LZS4p/Xuk81W2QDjCFeoogmrWWkAD59z8ZxepBQap2dKS5ruw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [win32] + '@next/swc-win32-x64-msvc@15.4.8': resolution: {integrity: sha512-Exsmf/+42fWVnLMaZHzshukTBxZrSwuuLKFvqhGHJ+mC1AokqieLY/XzAl3jc/CqhXLqLY3RRjkKJ9YnLPcRWg==} engines: {node: '>= 10'} @@ -11137,6 +11269,10 @@ packages: resolution: {integrity: sha512-fnYhv671l+eTTp48gB4zEsTW/YtRgRPnkI2nT7x6qw5rkI1Lq2hTmQIpHPgyThI0znLK+vX2n9XxKdXZ7BUbbw==} engines: {node: '>= 20'} + '@vercel/oidc@3.1.0': + resolution: {integrity: sha512-Fw28YZpRnA3cAHHDlkt7xQHiJ0fcL+NRcIqsocZQUSmbzeIKRpwttJjik5ZGanXP+vlA4SbTg+AbA3bP363l+w==} + engines: {node: '>= 20'} + '@vercel/otel@1.13.0': resolution: {integrity: sha512-esRkt470Y2jRK1B1g7S1vkt4Csu44gp83Zpu8rIyPoqy2BKgk4z7ik1uSMswzi45UogLHFl6yR5TauDurBQi4Q==} engines: {node: '>=18'} @@ -11484,6 +11620,12 @@ packages: peerDependencies: zod: ^3.25.76 || ^4.1.8 + ai@6.0.82: + resolution: {integrity: sha512-WLml1ab2IXtREgkxrq2Pl6lFO6NKgC17MqTzmK5mO1UO6tMAJiVjkednw9p0j4+/LaUIZQoRiIT8wA37LswZ9Q==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + ajv-formats@2.1.1: resolution: {integrity: sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==} peerDependencies: @@ -16280,6 +16422,28 @@ packages: sass: optional: true + next@15.3.3: + resolution: {integrity: sha512-JqNj29hHNmCLtNvd090SyRbXJiivQ+58XjCcrC50Crb5g5u2zi7Y2YivbsEfzk6AtVI80akdOQbaMZwWB1Hthw==} + engines: {node: ^18.18.0 || ^19.8.0 || >= 20.0.0} + deprecated: This version has a security vulnerability. Please upgrade to a patched version. See https://nextjs.org/blog/CVE-2025-66478 for more details. + hasBin: true + peerDependencies: + '@opentelemetry/api': ^1.1.0 + '@playwright/test': ^1.41.2 + babel-plugin-react-compiler: '*' + react: ^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0 + react-dom: ^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0 + sass: ^1.3.0 + peerDependenciesMeta: + '@opentelemetry/api': + optional: true + '@playwright/test': + optional: true + babel-plugin-react-compiler: + optional: true + sass: + optional: true + next@15.4.8: resolution: {integrity: sha512-jwOXTz/bo0Pvlf20FSb6VXVeWRssA2vbvq9SdrOPEg9x8E1B27C2rQtvriAn600o9hH61kjrVRexEffv3JybuA==} engines: {node: ^18.18.0 || ^19.8.0 || >= 20.0.0} @@ -17230,10 +17394,6 @@ packages: resolution: {integrity: sha512-Aweb9unOEpQ3ezu4Q00DPvvM2ZTUitJdNKeP/+uQgr1IBIqu574IaZoURId7BKtWMREwzKa9OgzPzezWGPWFQw==} engines: {node: ^10 || ^12 || >=14} - postcss@8.5.3: - resolution: {integrity: sha512-dle9A3yYxlBSrt8Fu+IpjGT8SY8hN0mlaA6GY8t0P5PjIOZemULz/E2Bnm/2dcUOena75OTNkHI76uZBNUUq3A==} - engines: {node: ^10 || ^12 || >=14} - postcss@8.5.4: resolution: {integrity: sha512-QSa9EBe+uwlGTFmHsPKokv3B/oEMQZxfqW0QqNCyhpa6mB1afzulwn8hihglqAb2pOw+BJgNlmXQ8la2VeHB7w==} engines: {node: ^10 || ^12 || >=14} @@ -20290,6 +20450,13 @@ snapshots: '@vercel/oidc': 3.0.5 zod: 3.25.76 + '@ai-sdk/gateway@3.0.42(zod@3.25.76)': + dependencies: + '@ai-sdk/provider': 3.0.8 + '@ai-sdk/provider-utils': 4.0.14(zod@3.25.76) + '@vercel/oidc': 3.1.0 + zod: 3.25.76 + '@ai-sdk/openai@1.0.1(zod@3.25.76)': dependencies: '@ai-sdk/provider': 1.0.0 @@ -20320,6 +20487,12 @@ snapshots: '@ai-sdk/provider-utils': 3.0.12(zod@3.25.76) zod: 3.25.76 + '@ai-sdk/openai@3.0.27(zod@3.25.76)': + dependencies: + '@ai-sdk/provider': 3.0.8 + '@ai-sdk/provider-utils': 4.0.14(zod@3.25.76) + zod: 3.25.76 + '@ai-sdk/provider-utils@1.0.22(zod@3.25.76)': dependencies: '@ai-sdk/provider': 0.0.26 @@ -20374,6 +20547,13 @@ snapshots: eventsource-parser: 3.0.6 zod: 3.25.76 + '@ai-sdk/provider-utils@4.0.14(zod@3.25.76)': + dependencies: + '@ai-sdk/provider': 3.0.8 + '@standard-schema/spec': 1.1.0 + eventsource-parser: 3.0.6 + zod: 3.25.76 + '@ai-sdk/provider@0.0.26': dependencies: json-schema: 0.4.0 @@ -20398,6 +20578,10 @@ snapshots: dependencies: json-schema: 0.4.0 + '@ai-sdk/provider@3.0.8': + dependencies: + json-schema: 0.4.0 + '@ai-sdk/react@1.0.0(react@18.3.1)(zod@3.25.76)': dependencies: '@ai-sdk/provider-utils': 2.0.0(zod@3.25.76) @@ -20428,6 +20612,16 @@ snapshots: optionalDependencies: zod: 3.25.76 + '@ai-sdk/react@3.0.84(react@19.1.0)(zod@3.25.76)': + dependencies: + '@ai-sdk/provider-utils': 4.0.14(zod@3.25.76) + ai: 6.0.82(zod@3.25.76) + react: 19.1.0 + swr: 2.2.5(react@19.1.0) + throttleit: 2.1.0 + transitivePeerDependencies: + - zod + '@ai-sdk/ui-utils@1.0.0(zod@3.25.76)': dependencies: '@ai-sdk/provider': 1.0.0 @@ -24454,6 +24648,8 @@ snapshots: '@next/env@15.2.4': {} + '@next/env@15.3.3': {} + '@next/env@15.4.8': {} '@next/env@15.5.6': {} @@ -24467,6 +24663,9 @@ snapshots: '@next/swc-darwin-arm64@15.2.4': optional: true + '@next/swc-darwin-arm64@15.3.3': + optional: true + '@next/swc-darwin-arm64@15.4.8': optional: true @@ -24482,6 +24681,9 @@ snapshots: '@next/swc-darwin-x64@15.2.4': optional: true + '@next/swc-darwin-x64@15.3.3': + optional: true + '@next/swc-darwin-x64@15.4.8': optional: true @@ -24497,6 +24699,9 @@ snapshots: '@next/swc-linux-arm64-gnu@15.2.4': optional: true + '@next/swc-linux-arm64-gnu@15.3.3': + optional: true + '@next/swc-linux-arm64-gnu@15.4.8': optional: true @@ -24512,6 +24717,9 @@ snapshots: '@next/swc-linux-arm64-musl@15.2.4': optional: true + '@next/swc-linux-arm64-musl@15.3.3': + optional: true + '@next/swc-linux-arm64-musl@15.4.8': optional: true @@ -24527,6 +24735,9 @@ snapshots: '@next/swc-linux-x64-gnu@15.2.4': optional: true + '@next/swc-linux-x64-gnu@15.3.3': + optional: true + '@next/swc-linux-x64-gnu@15.4.8': optional: true @@ -24542,6 +24753,9 @@ snapshots: '@next/swc-linux-x64-musl@15.2.4': optional: true + '@next/swc-linux-x64-musl@15.3.3': + optional: true + '@next/swc-linux-x64-musl@15.4.8': optional: true @@ -24557,6 +24771,9 @@ snapshots: '@next/swc-win32-arm64-msvc@15.2.4': optional: true + '@next/swc-win32-arm64-msvc@15.3.3': + optional: true + '@next/swc-win32-arm64-msvc@15.4.8': optional: true @@ -24578,6 +24795,9 @@ snapshots: '@next/swc-win32-x64-msvc@15.2.4': optional: true + '@next/swc-win32-x64-msvc@15.3.3': + optional: true + '@next/swc-win32-x64-msvc@15.4.8': optional: true @@ -30621,7 +30841,7 @@ snapshots: '@tailwindcss/node': 4.0.17 '@tailwindcss/oxide': 4.0.17 lightningcss: 1.29.2 - postcss: 8.5.3 + postcss: 8.5.6 tailwindcss: 4.0.17 '@tailwindcss/typography@0.5.9(tailwindcss@3.4.1)': @@ -31164,7 +31384,7 @@ snapshots: '@types/react@19.0.12': dependencies: - csstype: 3.1.3 + csstype: 3.2.0 '@types/readable-stream@4.0.14': dependencies: @@ -31503,6 +31723,8 @@ snapshots: '@vercel/oidc@3.0.5': {} + '@vercel/oidc@3.1.0': {} + '@vercel/otel@1.13.0(@opentelemetry/api-logs@0.203.0)(@opentelemetry/api@1.9.0)(@opentelemetry/instrumentation@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/resources@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-logs@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-metrics@2.0.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))': dependencies: '@opentelemetry/api': 1.9.0 @@ -31946,6 +32168,14 @@ snapshots: '@opentelemetry/api': 1.9.0 zod: 3.25.76 + ai@6.0.82(zod@3.25.76): + dependencies: + '@ai-sdk/gateway': 3.0.42(zod@3.25.76) + '@ai-sdk/provider': 3.0.8 + '@ai-sdk/provider-utils': 4.0.14(zod@3.25.76) + '@opentelemetry/api': 1.9.0 + zod: 3.25.76 + ajv-formats@2.1.1(ajv@8.17.1): optionalDependencies: ajv: 8.17.1 @@ -33727,7 +33957,7 @@ snapshots: enhanced-resolve@5.15.0: dependencies: graceful-fs: 4.2.11 - tapable: 2.2.1 + tapable: 2.3.0 enhanced-resolve@5.18.3: dependencies: @@ -37681,6 +37911,33 @@ snapshots: - '@babel/core' - babel-plugin-macros + next@15.3.3(@opentelemetry/api@1.9.0)(@playwright/test@1.37.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0): + dependencies: + '@next/env': 15.3.3 + '@swc/counter': 0.1.3 + '@swc/helpers': 0.5.15 + busboy: 1.6.0 + caniuse-lite: 1.0.30001754 + postcss: 8.4.31 + react: 19.1.0 + react-dom: 19.1.0(react@19.1.0) + styled-jsx: 5.1.6(react@19.1.0) + optionalDependencies: + '@next/swc-darwin-arm64': 15.3.3 + '@next/swc-darwin-x64': 15.3.3 + '@next/swc-linux-arm64-gnu': 15.3.3 + '@next/swc-linux-arm64-musl': 15.3.3 + '@next/swc-linux-x64-gnu': 15.3.3 + '@next/swc-linux-x64-musl': 15.3.3 + '@next/swc-win32-arm64-msvc': 15.3.3 + '@next/swc-win32-x64-msvc': 15.3.3 + '@opentelemetry/api': 1.9.0 + '@playwright/test': 1.37.0 + sharp: 0.34.5 + transitivePeerDependencies: + - '@babel/core' + - babel-plugin-macros + next@15.4.8(@opentelemetry/api@1.9.0)(@playwright/test@1.37.0)(react-dom@19.0.0(react@19.0.0))(react@19.0.0): dependencies: '@next/env': 15.4.8 @@ -38729,12 +38986,6 @@ snapshots: picocolors: 1.1.1 source-map-js: 1.2.0 - postcss@8.5.3: - dependencies: - nanoid: 3.3.8 - picocolors: 1.1.1 - source-map-js: 1.2.1 - postcss@8.5.4: dependencies: nanoid: 3.3.11 @@ -40962,6 +41213,12 @@ snapshots: react: 19.0.0 use-sync-external-store: 1.2.2(react@19.0.0) + swr@2.2.5(react@19.1.0): + dependencies: + client-only: 0.0.1 + react: 19.1.0 + use-sync-external-store: 1.2.2(react@19.1.0) + sync-content@2.0.1: dependencies: glob: 11.0.0 @@ -41906,6 +42163,10 @@ snapshots: dependencies: react: 19.0.0 + use-sync-external-store@1.2.2(react@19.1.0): + dependencies: + react: 19.1.0 + util-deprecate@1.0.2: {} util@0.12.5: diff --git a/references/ai-chat/package.json b/references/ai-chat/package.json index 228a09015df..b373eb364da 100644 --- a/references/ai-chat/package.json +++ b/references/ai-chat/package.json @@ -9,8 +9,8 @@ "dev:trigger": "trigger dev" }, "dependencies": { - "@ai-sdk/openai": "^2.0.0", - "@ai-sdk/react": "^2.0.0", + "@ai-sdk/openai": "^3.0.0", + "@ai-sdk/react": "^3.0.0", "@trigger.dev/sdk": "workspace:*", "ai": "^6.0.0", "next": "15.3.3", diff --git a/references/ai-chat/src/trigger/chat.ts b/references/ai-chat/src/trigger/chat.ts index 27a4002397d..8c77bbeebc5 100644 --- a/references/ai-chat/src/trigger/chat.ts +++ b/references/ai-chat/src/trigger/chat.ts @@ -8,7 +8,7 @@ export const chat = chatTask({ return streamText({ model: openai("gpt-4o-mini"), system: "You are a helpful assistant. Be concise and friendly.", - messages: convertToModelMessages(messages), + messages: await convertToModelMessages(messages), }); }, }); From 7e17b8f03b44692a716339d70e947b2c118326ee Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Sat, 21 Feb 2026 13:10:58 +0000 Subject: [PATCH 17/53] Use a single run with iterative waitpoint token completions --- packages/trigger-sdk/package.json | 2 +- packages/trigger-sdk/src/v3/ai.ts | 101 ++++- packages/trigger-sdk/src/v3/chat.test.ts | 428 ++++++++++++++++++++- packages/trigger-sdk/src/v3/chat.ts | 126 +++++- references/ai-chat/next-env.d.ts | 5 + references/ai-chat/src/app/actions.ts | 6 + references/ai-chat/src/app/page.tsx | 7 +- references/ai-chat/src/components/chat.tsx | 21 +- 8 files changed, 652 insertions(+), 44 deletions(-) create mode 100644 references/ai-chat/next-env.d.ts create mode 100644 references/ai-chat/src/app/actions.ts diff --git a/packages/trigger-sdk/package.json b/packages/trigger-sdk/package.json index a32eafadef2..d44c8836ec0 100644 --- a/packages/trigger-sdk/package.json +++ b/packages/trigger-sdk/package.json @@ -83,7 +83,7 @@ }, "peerDependencies": { "zod": "^3.0.0 || ^4.0.0", - "ai": "^4.2.0 || ^5.0.0 || ^6.0.0" + "ai": "^5.0.0 || ^6.0.0" }, "peerDependenciesMeta": { "ai": { diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index 9e79df22b8d..8bec798e981 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -4,15 +4,18 @@ import { Task, type inferSchemaIn, type PipeStreamOptions, + type TaskIdentifier, type TaskOptions, type TaskSchema, type TaskWithSchema, } from "@trigger.dev/core/v3"; import type { UIMessage } from "ai"; import { dynamicTool, jsonSchema, JSONSchema7, Schema, Tool, ToolCallOptions, zodSchema } from "ai"; +import { auth } from "./auth.js"; import { metadata } from "./metadata.js"; import { streams } from "./streams.js"; import { createTask } from "./shared.js"; +import { wait } from "./wait.js"; const METADATA_KEY = "tool.execute.options"; @@ -122,6 +125,29 @@ export const ai = { currentToolOptions: getToolOptionsFromMetadata, }; +/** + * Creates a public access token for a chat task. + * + * This is a convenience helper that creates a multi-use trigger public token + * scoped to the given task. Use it in a server action to provide the frontend + * `TriggerChatTransport` with an `accessToken`. + * + * @example + * ```ts + * // actions.ts + * "use server"; + * import { createChatAccessToken } from "@trigger.dev/sdk/ai"; + * import type { chat } from "@/trigger/chat"; + * + * export const getChatToken = () => createChatAccessToken("ai-chat"); + * ``` + */ +export async function createChatAccessToken( + taskId: TaskIdentifier +): Promise { + return auth.createTriggerPublicToken(taskId as string, { multipleUse: true }); +} + // --------------------------------------------------------------------------- // Chat transport helpers — backend side // --------------------------------------------------------------------------- @@ -161,6 +187,14 @@ export type ChatTaskPayload = { metadata?: unknown; }; +/** + * Tracks how many times `pipeChat` has been called in the current `chatTask` run. + * Used to prevent double-piping when a user both calls `pipeChat()` manually + * and returns a streamable from their `run` function. + * @internal + */ +let _chatPipeCount = 0; + /** * Options for `pipeChat`. */ @@ -248,6 +282,7 @@ export async function pipeChat( source: UIMessageStreamable | AsyncIterable | ReadableStream, options?: PipeChatOptions ): Promise { + _chatPipeCount++; const streamKey = options?.streamKey ?? CHAT_STREAM_KEY; let stream: AsyncIterable | ReadableStream; @@ -284,6 +319,11 @@ export async function pipeChat( * **Auto-piping:** If the `run` function returns a value with `.toUIMessageStream()` * (like a `StreamTextResult`), the stream is automatically piped to the frontend. * For complex flows, use `pipeChat()` manually from anywhere in your code. + * + * **Single-run mode:** By default, the task runs a waitpoint loop so that the + * entire conversation lives inside one run. After each AI response, the task + * emits a control chunk and pauses via `wait.forToken`. The frontend transport + * resumes the same run by completing the token with the next set of messages. */ export type ChatTaskOptions = Omit< TaskOptions, @@ -299,6 +339,23 @@ export type ChatTaskOptions = Omit< * the stream is automatically piped to the frontend. */ run: (payload: ChatTaskPayload) => Promise; + + /** + * Maximum number of conversational turns (message round-trips) a single run + * will handle before ending. After this many turns the run completes + * normally and the next message will start a fresh run. + * + * @default 100 + */ + maxTurns?: number; + + /** + * How long to wait for the next message before timing out and ending the run. + * Accepts any duration string recognised by `wait.createToken` (e.g. `"1h"`, `"30m"`). + * + * @default "1h" + */ + turnTimeout?: string; }; /** @@ -342,19 +399,49 @@ export type ChatTaskOptions = Omit< export function chatTask( options: ChatTaskOptions ): Task { - const { run: userRun, ...restOptions } = options; + const { run: userRun, maxTurns = 100, turnTimeout = "1h", ...restOptions } = options; return createTask({ ...restOptions, run: async (payload: ChatTaskPayload) => { - const result = await userRun(payload); + let currentPayload = payload; + + for (let turn = 0; turn < maxTurns; turn++) { + _chatPipeCount = 0; + + const result = await userRun(currentPayload); + + // Auto-pipe if the run function returned a StreamTextResult or similar, + // but only if pipeChat() wasn't already called manually during this turn + if (_chatPipeCount === 0 && isUIMessageStreamable(result)) { + await pipeChat(result); + } + + // Create a waitpoint token and emit a control chunk so the frontend + // knows to resume this run instead of triggering a new one. + const token = await wait.createToken({ timeout: turnTimeout }); + + const { waitUntilComplete } = streams.writer(CHAT_STREAM_KEY, { + execute: ({ write }) => { + write({ + type: "__trigger_waitpoint_ready", + tokenId: token.id, + publicAccessToken: token.publicAccessToken, + }); + }, + }); + await waitUntilComplete(); - // Auto-pipe if the run function returned a StreamTextResult or similar - if (isUIMessageStreamable(result)) { - await pipeChat(result); - } + // Pause until the frontend completes the token with the next message + const next = await wait.forToken(token); + + if (!next.ok) { + // Timed out waiting for the next message — end the conversation + return; + } - return result; + currentPayload = next.output; + } }, }); } diff --git a/packages/trigger-sdk/src/v3/chat.test.ts b/packages/trigger-sdk/src/v3/chat.test.ts index ae89f28a8ab..0f59c387f00 100644 --- a/packages/trigger-sdk/src/v3/chat.test.ts +++ b/packages/trigger-sdk/src/v3/chat.test.ts @@ -3,7 +3,7 @@ import type { UIMessage, UIMessageChunk } from "ai"; import { TriggerChatTransport, createChatTransport } from "./chat.js"; // Helper: encode text as SSE format -function sseEncode(chunks: UIMessageChunk[]): string { +function sseEncode(chunks: (UIMessageChunk | Record)[]): string { return chunks.map((chunk, i) => `id: ${i}\ndata: ${JSON.stringify(chunk)}\n\n`).join(""); } @@ -225,7 +225,7 @@ describe("TriggerChatTransport", () => { expect(triggerUrl).toContain("/api/v1/tasks/my-chat-task/trigger"); const triggerBody = JSON.parse(triggerCall![1]?.body as string); - const payload = JSON.parse(triggerBody.payload); + const payload = triggerBody.payload; expect(payload.messages).toEqual(messages); expect(payload.chatId).toBe("chat-123"); expect(payload.trigger).toBe("submit-message"); @@ -459,21 +459,19 @@ describe("TriggerChatTransport", () => { }); describe("publicAccessToken from trigger response", () => { - it("should use publicAccessToken from response body when x-trigger-jwt header is absent", async () => { + it("should use x-trigger-jwt from trigger response as the stream auth token", async () => { const fetchSpy = vi.fn().mockImplementation(async (url: string | URL, init?: RequestInit) => { const urlStr = typeof url === "string" ? url : url.toString(); if (urlStr.includes("/trigger")) { - // Return without x-trigger-jwt header — the ApiClient will attempt - // to generate a JWT from the access token. In this test the token - // generation will add a publicAccessToken to the result. + // Return with x-trigger-jwt header — this public token should be + // used for the subsequent stream subscription request. return new Response( JSON.stringify({ id: "run_pat" }), { status: 200, headers: { "content-type": "application/json", - // Include x-trigger-jwt to simulate the server returning a public token "x-trigger-jwt": "server-generated-public-token", }, } @@ -843,7 +841,7 @@ describe("TriggerChatTransport", () => { ); const triggerBody = JSON.parse(triggerCall![1]?.body as string); - const payload = JSON.parse(triggerBody.payload); + const payload = triggerBody.payload; // body properties should be merged into the payload expect(payload.systemPrompt).toBe("You are helpful"); @@ -912,9 +910,421 @@ describe("TriggerChatTransport", () => { ); const triggerBody = JSON.parse(triggerCall![1]?.body as string); - const payload = JSON.parse(triggerBody.payload); + const payload = triggerBody.payload; expect(payload.trigger).toBe("regenerate-message"); expect(payload.messageId).toBe("msg-to-regen"); }); }); + + describe("async accessToken", () => { + it("should accept an async function for accessToken", async () => { + let tokenCallCount = 0; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ id: `run_async_${tokenCallCount}` }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "stream-token", + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + const chunks: UIMessageChunk[] = [ + { type: "text-start", id: "p1" }, + { type: "text-end", id: "p1" }, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: async () => { + tokenCallCount++; + // Simulate async work (e.g. server action) + await new Promise((r) => setTimeout(r, 1)); + return `async-token-${tokenCallCount}`; + }, + baseURL: "https://api.test.trigger.dev", + }); + + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-async", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + expect(tokenCallCount).toBe(1); + }); + }); + + describe("single-run mode (waitpoint loop)", () => { + it("should store waitpoint token from control chunk and not forward it to consumer", async () => { + const controlChunk = { + type: "__trigger_waitpoint_ready", + tokenId: "wp_token_123", + publicAccessToken: "wp_access_abc", + }; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ id: "run_single" }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_token", + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + const chunks = [ + ...sampleChunks, + { type: "finish" as const, id: "part-1" } as UIMessageChunk, + controlChunk, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + const stream = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-single", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + // Read all chunks — the control chunk should NOT appear + const reader = stream.getReader(); + const receivedChunks: UIMessageChunk[] = []; + while (true) { + const { done, value } = await reader.read(); + if (done) break; + receivedChunks.push(value); + } + + // All AI SDK chunks should be forwarded + expect(receivedChunks.length).toBe(sampleChunks.length + 1); // +1 for the finish chunk + // Control chunk should not be in the output + expect(receivedChunks.every((c) => c.type !== ("__trigger_waitpoint_ready" as any))).toBe(true); + }); + + it("should complete waitpoint token on second message instead of triggering a new run", async () => { + const controlChunk = { + type: "__trigger_waitpoint_ready", + tokenId: "wp_token_456", + publicAccessToken: "wp_access_def", + }; + + let triggerCallCount = 0; + let completeWaitpointCalled = false; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL, init?: RequestInit) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/api/v1/tasks/") && urlStr.includes("/trigger")) { + triggerCallCount++; + return new Response( + JSON.stringify({ id: "run_resume" }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_token", + }, + } + ); + } + + // Handle waitpoint token completion + if (urlStr.includes("/api/v1/waitpoints/tokens/") && urlStr.includes("/complete")) { + completeWaitpointCalled = true; + return new Response( + JSON.stringify({ success: true }), + { + status: 200, + headers: { "content-type": "application/json" }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + const chunks = [ + ...sampleChunks, + { type: "finish" as const, id: "part-1" } as UIMessageChunk, + controlChunk, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + // First message — triggers a new run + const stream1 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-resume", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + // Consume stream to capture the control chunk + const reader1 = stream1.getReader(); + while (true) { + const { done } = await reader1.read(); + if (done) break; + } + + expect(triggerCallCount).toBe(1); + + // Second message — should complete the waitpoint instead of triggering + const stream2 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-resume", + messageId: undefined, + messages: [createUserMessage("Hello"), createAssistantMessage("Hi!"), createUserMessage("How are you?")], + abortSignal: undefined, + }); + + // Consume second stream + const reader2 = stream2.getReader(); + while (true) { + const { done } = await reader2.read(); + if (done) break; + } + + // Should NOT have triggered a second run + expect(triggerCallCount).toBe(1); + // Should have completed the waitpoint + expect(completeWaitpointCalled).toBe(true); + }); + + it("should fall back to triggering a new run if stream closes without control chunk", async () => { + let triggerCallCount = 0; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/api/v1/tasks/") && urlStr.includes("/trigger")) { + triggerCallCount++; + return new Response( + JSON.stringify({ id: `run_fallback_${triggerCallCount}` }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_token", + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + // No control chunk — stream just ends after the finish + const chunks: UIMessageChunk[] = [ + { type: "text-start", id: "p1" }, + { type: "text-delta", id: "p1", delta: "Hello" }, + { type: "text-end", id: "p1" }, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + // First message + const stream1 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-fallback", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + const reader1 = stream1.getReader(); + while (true) { + const { done } = await reader1.read(); + if (done) break; + } + + expect(triggerCallCount).toBe(1); + + // Second message — no waitpoint token stored, should trigger a new run + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-fallback", + messageId: undefined, + messages: [createUserMessage("Hello"), createAssistantMessage("Hi!"), createUserMessage("Again")], + abortSignal: undefined, + }); + + // Should have triggered a second run + expect(triggerCallCount).toBe(2); + }); + + it("should fall back to new run when completing waitpoint fails", async () => { + const controlChunk = { + type: "__trigger_waitpoint_ready", + tokenId: "wp_token_fail", + publicAccessToken: "wp_access_fail", + }; + + let triggerCallCount = 0; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/api/v1/tasks/") && urlStr.includes("/trigger")) { + triggerCallCount++; + return new Response( + JSON.stringify({ id: `run_fail_${triggerCallCount}` }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_token", + }, + } + ); + } + + // Waitpoint completion fails + if (urlStr.includes("/api/v1/waitpoints/tokens/") && urlStr.includes("/complete")) { + return new Response( + JSON.stringify({ error: "Token expired" }), + { + status: 400, + headers: { "content-type": "application/json" }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + // First call has control chunk, subsequent calls don't + const chunks: (UIMessageChunk | Record)[] = [ + ...sampleChunks, + { type: "finish" as const, id: "part-1" } as UIMessageChunk, + ]; + + if (triggerCallCount <= 1) { + chunks.push(controlChunk); + } + + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + // First message + const stream1 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-fail", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + const reader1 = stream1.getReader(); + while (true) { + const { done } = await reader1.read(); + if (done) break; + } + + expect(triggerCallCount).toBe(1); + + // Second message — waitpoint completion will fail, should fall back to new run + const stream2 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-fail", + messageId: undefined, + messages: [createUserMessage("Hello"), createAssistantMessage("Hi!"), createUserMessage("Again")], + abortSignal: undefined, + }); + + const reader2 = stream2.getReader(); + while (true) { + const { done } = await reader2.read(); + if (done) break; + } + + // Should have triggered a second run as fallback + expect(triggerCallCount).toBe(2); + }); + }); }); diff --git a/packages/trigger-sdk/src/v3/chat.ts b/packages/trigger-sdk/src/v3/chat.ts index 77378bded04..60c3445a759 100644 --- a/packages/trigger-sdk/src/v3/chat.ts +++ b/packages/trigger-sdk/src/v3/chat.ts @@ -47,9 +47,10 @@ export type TriggerChatTransportOptions = { * - A **trigger public token** created via `auth.createTriggerPublicToken(taskId)` (recommended for frontend use) * - A **secret API key** (for server-side use only — never expose in the browser) * - * Can also be a function that returns a token string, useful for dynamic token refresh. + * Can also be a function that returns a token string (sync or async), + * useful for dynamic token refresh or passing a Next.js server action directly. */ - accessToken: string | (() => string); + accessToken: string | (() => string | Promise); /** * Base URL for the Trigger.dev API. @@ -87,6 +88,12 @@ export type TriggerChatTransportOptions = { type ChatSessionState = { runId: string; publicAccessToken: string; + /** Token ID from the `__trigger_waitpoint_ready` control chunk. */ + waitpointTokenId?: string; + /** Access token scoped to complete the waitpoint (separate from the run's PAT). */ + waitpointAccessToken?: string; + /** Last SSE event ID — used to resume the stream without replaying old events. */ + lastEventId?: string; }; /** @@ -134,7 +141,7 @@ type ChatSessionState = { */ export class TriggerChatTransport implements ChatTransport { private readonly taskId: string; - private readonly resolveAccessToken: () => string; + private readonly resolveAccessToken: () => string | Promise; private readonly baseURL: string; private readonly streamKey: string; private readonly extraHeaders: Record; @@ -166,19 +173,48 @@ export class TriggerChatTransport implements ChatTransport { const { trigger, chatId, messageId, messages, abortSignal, body, metadata } = options; const payload = { + ...(body ?? {}), messages, chatId, trigger, messageId, metadata, - ...(body ?? {}), }; - const currentToken = this.resolveAccessToken(); + const session = this.sessions.get(chatId); + + // If we have a waitpoint token from a previous turn, complete it to + // resume the existing run instead of triggering a new one. + if (session?.waitpointTokenId && session.waitpointAccessToken) { + const tokenId = session.waitpointTokenId; + const tokenAccessToken = session.waitpointAccessToken; + + // Clear the used waitpoint so we don't try to reuse it + session.waitpointTokenId = undefined; + session.waitpointAccessToken = undefined; + + try { + const wpClient = new ApiClient(this.baseURL, tokenAccessToken); + await wpClient.completeWaitpointToken(tokenId, { data: payload }); + + return this.subscribeToStream( + session.runId, + session.publicAccessToken, + abortSignal, + chatId + ); + } catch { + // If completing the waitpoint fails (run died, token expired, etc.), + // fall through to trigger a new run. + this.sessions.delete(chatId); + } + } + + const currentToken = await this.resolveAccessToken(); const apiClient = new ApiClient(this.baseURL, currentToken); const triggerResponse = await apiClient.triggerTask(this.taskId, { - payload: JSON.stringify(payload), + payload, options: { payloadType: "application/json", }, @@ -195,7 +231,12 @@ export class TriggerChatTransport implements ChatTransport { publicAccessToken: publicAccessToken ?? currentToken, }); - return this.subscribeToStream(runId, publicAccessToken ?? currentToken, abortSignal); + return this.subscribeToStream( + runId, + publicAccessToken ?? currentToken, + abortSignal, + chatId + ); }; reconnectToStream = async ( @@ -208,25 +249,39 @@ export class TriggerChatTransport implements ChatTransport { return null; } - return this.subscribeToStream(session.runId, session.publicAccessToken, undefined); + return this.subscribeToStream(session.runId, session.publicAccessToken, undefined, options.chatId); }; private subscribeToStream( runId: string, accessToken: string, - abortSignal: AbortSignal | undefined + abortSignal: AbortSignal | undefined, + chatId?: string ): ReadableStream { const headers: Record = { Authorization: `Bearer ${accessToken}`, ...this.extraHeaders, }; + // When resuming a run via waitpoint, skip past previously-seen events + // so we only receive the new turn's response. + const session = chatId ? this.sessions.get(chatId) : undefined; + + // Create an internal AbortController so we can terminate the underlying + // fetch connection when we're done reading (e.g. after intercepting the + // control chunk). Without this, the SSE connection stays open and leaks. + const internalAbort = new AbortController(); + const combinedSignal = abortSignal + ? AbortSignal.any([abortSignal, internalAbort.signal]) + : internalAbort.signal; + const subscription = new SSEStreamSubscription( `${this.baseURL}/realtime/v1/streams/${runId}/${this.streamKey}`, { headers, - signal: abortSignal, + signal: combinedSignal, timeoutInSeconds: this.streamTimeoutSeconds, + lastEventId: session?.lastEventId, } ); @@ -241,20 +296,57 @@ export class TriggerChatTransport implements ChatTransport { const { done, value } = await reader.read(); if (done) { + // Stream closed without a control chunk — the run has + // ended (or was killed). Clear the session so that the + // next message triggers a fresh run. + if (chatId) { + const s = this.sessions.get(chatId); + if (s) { + s.waitpointTokenId = undefined; + s.waitpointAccessToken = undefined; + } + } controller.close(); return; } - if (abortSignal?.aborted) { - reader.cancel(); - reader.releaseLock(); + if (combinedSignal.aborted) { + internalAbort.abort(); + await reader.cancel(); controller.close(); return; } + // Track the last event ID so we can resume from here + if (value.id && session) { + session.lastEventId = value.id; + } + // Guard against heartbeat or malformed SSE events if (value.chunk != null && typeof value.chunk === "object") { - controller.enqueue(value.chunk as UIMessageChunk); + const chunk = value.chunk as Record; + + // Intercept the waitpoint-ready control chunk emitted by + // `chatTask` after the AI response stream completes. This + // chunk is never forwarded to the AI SDK consumer. + if (chunk.type === "__trigger_waitpoint_ready" && chatId) { + const s = this.sessions.get(chatId); + if (s) { + s.waitpointTokenId = chunk.tokenId as string; + s.waitpointAccessToken = chunk.publicAccessToken as string; + } + + // Abort the underlying fetch to close the SSE connection + internalAbort.abort(); + try { + controller.close(); + } catch { + // Controller may already be closed + } + return; + } + + controller.enqueue(chunk as unknown as UIMessageChunk); } } } catch (readError) { @@ -263,7 +355,11 @@ export class TriggerChatTransport implements ChatTransport { } } catch (error) { if (error instanceof Error && error.name === "AbortError") { - controller.close(); + try { + controller.close(); + } catch { + // Controller may already be closed + } return; } diff --git a/references/ai-chat/next-env.d.ts b/references/ai-chat/next-env.d.ts new file mode 100644 index 00000000000..1b3be0840f3 --- /dev/null +++ b/references/ai-chat/next-env.d.ts @@ -0,0 +1,5 @@ +/// +/// + +// NOTE: This file should not be edited +// see https://nextjs.org/docs/app/api-reference/config/typescript for more information. diff --git a/references/ai-chat/src/app/actions.ts b/references/ai-chat/src/app/actions.ts new file mode 100644 index 00000000000..6d230e271a5 --- /dev/null +++ b/references/ai-chat/src/app/actions.ts @@ -0,0 +1,6 @@ +"use server"; + +import { createChatAccessToken } from "@trigger.dev/sdk/ai"; +import type { chat } from "@/trigger/chat"; + +export const getChatToken = async () => createChatAccessToken("ai-chat"); diff --git a/references/ai-chat/src/app/page.tsx b/references/ai-chat/src/app/page.tsx index 16f01282c80..185d84b5e9e 100644 --- a/references/ai-chat/src/app/page.tsx +++ b/references/ai-chat/src/app/page.tsx @@ -1,16 +1,13 @@ -import { auth } from "@trigger.dev/sdk"; import { Chat } from "@/components/chat"; -export default async function Home() { - const accessToken = await auth.createTriggerPublicToken("ai-chat"); - +export default function Home() { return (

AI Chat — powered by Trigger.dev

- +
); diff --git a/references/ai-chat/src/components/chat.tsx b/references/ai-chat/src/components/chat.tsx index 34c68d8ba7e..1fa0e82fd0d 100644 --- a/references/ai-chat/src/components/chat.tsx +++ b/references/ai-chat/src/components/chat.tsx @@ -2,17 +2,24 @@ import { useChat } from "@ai-sdk/react"; import { TriggerChatTransport } from "@trigger.dev/sdk/chat"; -import { useState } from "react"; +import { useMemo, useState } from "react"; +import { getChatToken } from "@/app/actions"; -export function Chat({ accessToken }: { accessToken: string }) { +export function Chat() { const [input, setInput] = useState(""); + const transport = useMemo( + () => + new TriggerChatTransport({ + task: "ai-chat", + accessToken: getChatToken, + baseURL: process.env.NEXT_PUBLIC_TRIGGER_API_URL, + }), + [] + ); + const { messages, sendMessage, status, error } = useChat({ - transport: new TriggerChatTransport({ - task: "ai-chat", - accessToken, - baseURL: process.env.NEXT_PUBLIC_TRIGGER_API_URL, - }), + transport, }); function handleSubmit(e: React.FormEvent) { From a7538abbaed392b52b8600b69e296bf4b0ce123c Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Sat, 21 Feb 2026 13:42:09 +0000 Subject: [PATCH 18/53] Added tool example --- packages/trigger-sdk/src/v3/chat.test.ts | 285 +++++++++++++++++++++ pnpm-lock.yaml | 153 ++++++----- references/ai-chat/package.json | 3 +- references/ai-chat/src/components/chat.tsx | 70 +++++ references/ai-chat/src/trigger/chat.ts | 64 ++++- 5 files changed, 502 insertions(+), 73 deletions(-) diff --git a/packages/trigger-sdk/src/v3/chat.test.ts b/packages/trigger-sdk/src/v3/chat.test.ts index 0f59c387f00..03eceb1a8f7 100644 --- a/packages/trigger-sdk/src/v3/chat.test.ts +++ b/packages/trigger-sdk/src/v3/chat.test.ts @@ -916,6 +916,189 @@ describe("TriggerChatTransport", () => { }); }); + describe("lastEventId tracking", () => { + it("should pass lastEventId to SSE subscription on subsequent turns", async () => { + const controlChunk = { + type: "__trigger_waitpoint_ready", + tokenId: "wp_token_eid", + publicAccessToken: "wp_access_eid", + }; + + let triggerCallCount = 0; + const streamFetchCalls: { url: string; headers: Record }[] = []; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL, init?: RequestInit) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/api/v1/tasks/") && urlStr.includes("/trigger")) { + triggerCallCount++; + return new Response( + JSON.stringify({ id: "run_eid" }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_token_eid", + }, + } + ); + } + + if (urlStr.includes("/api/v1/waitpoints/tokens/") && urlStr.includes("/complete")) { + return new Response( + JSON.stringify({ success: true }), + { + status: 200, + headers: { "content-type": "application/json" }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + streamFetchCalls.push({ + url: urlStr, + headers: (init?.headers as Record) ?? {}, + }); + + const chunks = [ + ...sampleChunks, + { type: "finish" as const, id: "part-1" } as UIMessageChunk, + controlChunk, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + // First message — triggers a new run + const stream1 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-eid", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + const reader1 = stream1.getReader(); + while (true) { + const { done } = await reader1.read(); + if (done) break; + } + + // Second message — completes the waitpoint + const stream2 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-eid", + messageId: undefined, + messages: [createUserMessage("Hello"), createAssistantMessage("Hi!"), createUserMessage("What's up?")], + abortSignal: undefined, + }); + + const reader2 = stream2.getReader(); + while (true) { + const { done } = await reader2.read(); + if (done) break; + } + + // The second stream subscription should include a Last-Event-ID header + expect(streamFetchCalls.length).toBe(2); + const secondStreamHeaders = streamFetchCalls[1]!.headers; + // SSEStreamSubscription passes lastEventId as the Last-Event-ID header + expect(secondStreamHeaders["Last-Event-ID"]).toBeDefined(); + }); + }); + + describe("AbortController cleanup", () => { + it("should terminate SSE connection after intercepting control chunk", async () => { + const controlChunk = { + type: "__trigger_waitpoint_ready", + tokenId: "wp_token_abort", + publicAccessToken: "wp_access_abort", + }; + + let streamAborted = false; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL, init?: RequestInit) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ id: "run_abort_cleanup" }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_token", + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + // Track abort signal + const signal = init?.signal; + if (signal) { + signal.addEventListener("abort", () => { + streamAborted = true; + }); + } + + const chunks = [ + ...sampleChunks, + { type: "finish" as const, id: "part-1" } as UIMessageChunk, + controlChunk, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + const stream = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-abort-cleanup", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + // Consume all chunks + const reader = stream.getReader(); + while (true) { + const { done } = await reader.read(); + if (done) break; + } + + // The internal AbortController should have aborted the fetch + expect(streamAborted).toBe(true); + }); + }); + describe("async accessToken", () => { it("should accept an async function for accessToken", async () => { let tokenCallCount = 0; @@ -974,6 +1157,108 @@ describe("TriggerChatTransport", () => { expect(tokenCallCount).toBe(1); }); + + it("should resolve async token for waitpoint completion flow", async () => { + const controlChunk = { + type: "__trigger_waitpoint_ready", + tokenId: "wp_token_async", + publicAccessToken: "wp_access_async", + }; + + let tokenCallCount = 0; + let completeWaitpointCalled = false; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/api/v1/tasks/") && urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ id: "run_async_wp" }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "stream-token", + }, + } + ); + } + + if (urlStr.includes("/api/v1/waitpoints/tokens/") && urlStr.includes("/complete")) { + completeWaitpointCalled = true; + return new Response( + JSON.stringify({ success: true }), + { + status: 200, + headers: { "content-type": "application/json" }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + const chunks = [ + ...sampleChunks, + { type: "finish" as const, id: "part-1" } as UIMessageChunk, + controlChunk, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: async () => { + tokenCallCount++; + await new Promise((r) => setTimeout(r, 1)); + return `async-wp-token-${tokenCallCount}`; + }, + baseURL: "https://api.test.trigger.dev", + }); + + // First message — triggers a new run (calls async token) + const stream1 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-async-wp", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + const reader1 = stream1.getReader(); + while (true) { + const { done } = await reader1.read(); + if (done) break; + } + + const firstTokenCount = tokenCallCount; + + // Second message — should complete waitpoint (does NOT call async token) + const stream2 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-async-wp", + messageId: undefined, + messages: [createUserMessage("Hello"), createAssistantMessage("Hi!"), createUserMessage("More")], + abortSignal: undefined, + }); + + const reader2 = stream2.getReader(); + while (true) { + const { done } = await reader2.read(); + if (done) break; + } + + // Token function should NOT have been called again for the waitpoint path + expect(tokenCallCount).toBe(firstTokenCount); + expect(completeWaitpointCalled).toBe(true); + }); }); describe("single-run mode (waitpoint loop)", () => { diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 49b56c67faa..88c1806bc0d 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -459,8 +459,8 @@ importers: specifier: ^0.1.3 version: 0.1.3(@remix-run/react@2.1.0(react-dom@18.2.0(react@18.2.0))(react@18.2.0)(typescript@5.5.4))(@remix-run/server-runtime@2.1.0(typescript@5.5.4)) '@s2-dev/streamstore': - specifier: ^0.22.5 - version: 0.22.5(supports-color@10.0.0) + specifier: ^0.17.2 + version: 0.17.3(typescript@5.5.4) '@sentry/remix': specifier: 9.46.0 version: 9.46.0(patch_hash=146126b032581925294aaed63ab53ce3f5e0356a755f1763d7a9a76b9846943b)(@remix-run/node@2.1.0(typescript@5.5.4))(@remix-run/react@2.1.0(react-dom@18.2.0(react@18.2.0))(react@18.2.0)(typescript@5.5.4))(@remix-run/server-runtime@2.1.0(typescript@5.5.4))(encoding@0.1.13)(react@18.2.0) @@ -1104,7 +1104,7 @@ importers: version: 18.3.1 react-email: specifier: ^2.1.1 - version: 2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(eslint@8.31.0) + version: 2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(bufferutil@4.0.9)(eslint@8.31.0) resend: specifier: ^3.2.0 version: 3.2.0 @@ -1455,8 +1455,8 @@ importers: specifier: 1.36.0 version: 1.36.0 '@s2-dev/streamstore': - specifier: ^0.22.5 - version: 0.22.5(supports-color@10.0.0) + specifier: ^0.17.6 + version: 0.17.6 '@trigger.dev/build': specifier: workspace:4.4.3 version: link:../build @@ -1732,8 +1732,8 @@ importers: specifier: 1.36.0 version: 1.36.0 '@s2-dev/streamstore': - specifier: 0.22.5 - version: 0.22.5(supports-color@10.0.0) + specifier: 0.17.3 + version: 0.17.3(typescript@5.5.4) dequal: specifier: ^2.0.3 version: 2.0.3 @@ -2115,10 +2115,10 @@ importers: dependencies: '@ai-sdk/openai': specifier: ^3.0.0 - version: 3.0.27(zod@3.25.76) + version: 3.0.19(zod@3.25.76) '@ai-sdk/react': specifier: ^3.0.0 - version: 3.0.84(react@19.1.0)(zod@3.25.76) + version: 3.0.51(react@19.1.0)(zod@3.25.76) '@trigger.dev/sdk': specifier: workspace:* version: link:../../packages/trigger-sdk @@ -2134,6 +2134,9 @@ importers: react-dom: specifier: ^19.0.0 version: 19.1.0(react@19.1.0) + zod: + specifier: 3.25.76 + version: 3.25.76 devDependencies: '@tailwindcss/postcss': specifier: ^4 @@ -2919,8 +2922,8 @@ packages: peerDependencies: zod: ^3.25.76 || ^4.1.8 - '@ai-sdk/gateway@3.0.42': - resolution: {integrity: sha512-Il9lZWPUQMX59H5yJvA08gxfL2Py8oHwvAYRnK0Mt91S+JgPcyk/yEmXNDZG9ghJrwSawtK5Yocy8OnzsTOGsw==} + '@ai-sdk/gateway@3.0.22': + resolution: {integrity: sha512-NgnlY73JNuooACHqUIz5uMOEWvqR1MMVbb2soGLMozLY1fgwEIF5iJFDAGa5/YArlzw2ATVU7zQu7HkR/FUjgA==} engines: {node: '>=18'} peerDependencies: zod: ^3.25.76 || ^4.1.8 @@ -2955,8 +2958,8 @@ packages: peerDependencies: zod: ^3.25.76 || ^4.1.8 - '@ai-sdk/openai@3.0.27': - resolution: {integrity: sha512-pLMxWOypwroXiK9dxNpn60/HGhWWWDEOJ3lo9vZLoxvpJNtKnLKojwVIvlW3yEjlD7ll1+jUO2uzsABNTaP5Yg==} + '@ai-sdk/openai@3.0.19': + resolution: {integrity: sha512-qpMGKV6eYfW8IzErk/OppchQwVui3GPc4BEfg/sQGRzR89vf2Sa8qvSavXeZi5w/oUF56d+VtobwSH0FRooFCQ==} engines: {node: '>=18'} peerDependencies: zod: ^3.25.76 || ^4.1.8 @@ -3009,8 +3012,8 @@ packages: peerDependencies: zod: ^3.25.76 || ^4.1.8 - '@ai-sdk/provider-utils@4.0.14': - resolution: {integrity: sha512-7bzKd9lgiDeXM7O4U4nQ8iTxguAOkg8LZGD9AfDVZYjO5cKYRwBPwVjboFcVrxncRHu0tYxZtXZtiLKpG4pEng==} + '@ai-sdk/provider-utils@4.0.9': + resolution: {integrity: sha512-bB4r6nfhBOpmoS9mePxjRoCy+LnzP3AfhyMGCkGL4Mn9clVNlqEeKj26zEKEtB6yoSVcT1IQ0Zh9fytwMCDnow==} engines: {node: '>=18'} peerDependencies: zod: ^3.25.76 || ^4.1.8 @@ -3039,8 +3042,8 @@ packages: resolution: {integrity: sha512-m9ka3ptkPQbaHHZHqDXDF9C9B5/Mav0KTdky1k2HZ3/nrW2t1AgObxIVPyGDWQNS9FXT/FS6PIoSjpcP/No8rQ==} engines: {node: '>=18'} - '@ai-sdk/provider@3.0.8': - resolution: {integrity: sha512-oGMAgGoQdBXbZqNG0Ze56CHjDZ1IDYOwGYxYjO5KLSlz5HiNQ9udIXsPZ61VWaHGZ5XW/jyjmr6t2xz2jGVwbQ==} + '@ai-sdk/provider@3.0.5': + resolution: {integrity: sha512-2Xmoq6DBJqmSl80U6V9z5jJSJP7ehaJJQMy2iFUqTay06wdCqTnPVBBQbtEL8RCChenL+q5DC5H5WzU3vV3v8w==} engines: {node: '>=18'} '@ai-sdk/react@1.0.0': @@ -3075,8 +3078,8 @@ packages: zod: optional: true - '@ai-sdk/react@3.0.84': - resolution: {integrity: sha512-caX8dsXGHDctQsFGgq05sdaw9YD2C8Y9SfnOk0b0LPPi4J7/V54tq22MPTGVO9zS3LmsfFQf0GDM4WFZNC5XZA==} + '@ai-sdk/react@3.0.51': + resolution: {integrity: sha512-7nmCwEJM52NQZB4/ED8qJ4wbDg7EEWh94qJ7K9GSJxD6sWF3GOKrRZ5ivm4qNmKhY+JfCxCAxfghGY5mTKOsxw==} engines: {node: '>=18'} peerDependencies: react: ^18 || ~19.0.1 || ~19.1.2 || ^19.2.1 @@ -9560,8 +9563,13 @@ packages: '@rushstack/eslint-patch@1.2.0': resolution: {integrity: sha512-sXo/qW2/pAcmT43VoRKOJbDOfV3cYpq3szSVfIThQXNt+E4DfKj361vaAt3c88U5tPUxzEswam7GW48PJqtKAg==} - '@s2-dev/streamstore@0.22.5': - resolution: {integrity: sha512-GqdOKIbIoIxT+40fnKzHbrsHB6gBqKdECmFe7D3Ojk4FoN1Hu0LhFzZv6ZmVMjoHHU+55debS1xSWjZwQmbIyQ==} + '@s2-dev/streamstore@0.17.3': + resolution: {integrity: sha512-UeXL5+MgZQfNkbhCgEDVm7PrV5B3bxh6Zp4C5pUzQQwaoA+iGh2QiiIptRZynWgayzRv4vh0PYfnKpTzJEXegQ==} + peerDependencies: + typescript: 5.5.4 + + '@s2-dev/streamstore@0.17.6': + resolution: {integrity: sha512-ocjZfKaPKmo2yhudM58zVNHv3rBLSbTKkabVoLFn9nAxU6iLrR2CO3QmSo7/waohI3EZHAWxF/Pw8kA8d6QH2g==} '@sec-ant/readable-stream@0.4.1': resolution: {integrity: sha512-831qok9r2t8AlxLko40y2ebgSDhenenCatLVeW/uBtnHPyhHOvG0C7TvfgecV+wHzIm5KUICgzmVpWS+IMEAeg==} @@ -11620,8 +11628,8 @@ packages: peerDependencies: zod: ^3.25.76 || ^4.1.8 - ai@6.0.82: - resolution: {integrity: sha512-WLml1ab2IXtREgkxrq2Pl6lFO6NKgC17MqTzmK5mO1UO6tMAJiVjkednw9p0j4+/LaUIZQoRiIT8wA37LswZ9Q==} + ai@6.0.49: + resolution: {integrity: sha512-LABniBX/0R6Tv+iUK5keUZhZLaZUe4YjP5M2rZ4wAdZ8iKV3EfTAoJxuL1aaWTSJKIilKa9QUEkCgnp89/32bw==} engines: {node: '>=18'} peerDependencies: zod: ^3.25.76 || ^4.1.8 @@ -14404,7 +14412,7 @@ packages: glob@7.2.3: resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} - deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me + deprecated: Glob versions prior to v9 are no longer supported glob@9.3.5: resolution: {integrity: sha512-e1LleDykUz2Iu+MTYdkSsuWX8lvAjAcs0Xef0lNIu0S2wOAzuTxCJtcd9S3cijlwYF18EsU3rzb8jPVobxDh9Q==} @@ -19149,22 +19157,21 @@ packages: tar@6.1.13: resolution: {integrity: sha512-jdIBIN6LTIe2jqzay/2vtYLlBHa3JF42ot3h1dW8Q0PaAG4v8rm0cvpVePtau5C6OKXGGcgO9q2AMNSWxiLqKw==} engines: {node: '>=10'} - deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me + deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exhorbitant rates) by contacting i@izs.me tar@6.2.1: resolution: {integrity: sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==} engines: {node: '>=10'} - deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me + deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exhorbitant rates) by contacting i@izs.me tar@7.4.3: resolution: {integrity: sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==} engines: {node: '>=18'} - deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me + deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exhorbitant rates) by contacting i@izs.me tar@7.5.6: resolution: {integrity: sha512-xqUeu2JAIJpXyvskvU3uvQW8PAmHrtXp2KDuMJwQqW8Sqq0CaZBAQ+dKS3RBXVhU4wC5NjAdKrmh84241gO9cA==} engines: {node: '>=18'} - deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me tdigest@0.1.2: resolution: {integrity: sha512-+G0LLgjjo9BZX2MfdvPfH+MKLCrxlXSYec5DaPYP1fe6Iyhf0/fSmJ0bFiZ1F8BT6cGXl2LpltQptzjXKWEkKA==} @@ -20450,10 +20457,10 @@ snapshots: '@vercel/oidc': 3.0.5 zod: 3.25.76 - '@ai-sdk/gateway@3.0.42(zod@3.25.76)': + '@ai-sdk/gateway@3.0.22(zod@3.25.76)': dependencies: - '@ai-sdk/provider': 3.0.8 - '@ai-sdk/provider-utils': 4.0.14(zod@3.25.76) + '@ai-sdk/provider': 3.0.5 + '@ai-sdk/provider-utils': 4.0.9(zod@3.25.76) '@vercel/oidc': 3.1.0 zod: 3.25.76 @@ -20487,10 +20494,10 @@ snapshots: '@ai-sdk/provider-utils': 3.0.12(zod@3.25.76) zod: 3.25.76 - '@ai-sdk/openai@3.0.27(zod@3.25.76)': + '@ai-sdk/openai@3.0.19(zod@3.25.76)': dependencies: - '@ai-sdk/provider': 3.0.8 - '@ai-sdk/provider-utils': 4.0.14(zod@3.25.76) + '@ai-sdk/provider': 3.0.5 + '@ai-sdk/provider-utils': 4.0.9(zod@3.25.76) zod: 3.25.76 '@ai-sdk/provider-utils@1.0.22(zod@3.25.76)': @@ -20547,9 +20554,9 @@ snapshots: eventsource-parser: 3.0.6 zod: 3.25.76 - '@ai-sdk/provider-utils@4.0.14(zod@3.25.76)': + '@ai-sdk/provider-utils@4.0.9(zod@3.25.76)': dependencies: - '@ai-sdk/provider': 3.0.8 + '@ai-sdk/provider': 3.0.5 '@standard-schema/spec': 1.1.0 eventsource-parser: 3.0.6 zod: 3.25.76 @@ -20578,7 +20585,7 @@ snapshots: dependencies: json-schema: 0.4.0 - '@ai-sdk/provider@3.0.8': + '@ai-sdk/provider@3.0.5': dependencies: json-schema: 0.4.0 @@ -20612,10 +20619,10 @@ snapshots: optionalDependencies: zod: 3.25.76 - '@ai-sdk/react@3.0.84(react@19.1.0)(zod@3.25.76)': + '@ai-sdk/react@3.0.51(react@19.1.0)(zod@3.25.76)': dependencies: - '@ai-sdk/provider-utils': 4.0.14(zod@3.25.76) - ai: 6.0.82(zod@3.25.76) + '@ai-sdk/provider-utils': 4.0.9(zod@3.25.76) + ai: 6.0.49(zod@3.25.76) react: 19.1.0 swr: 2.2.5(react@19.1.0) throttleit: 2.1.0 @@ -24105,7 +24112,7 @@ snapshots: dependencies: hono: 4.11.8 - '@hono/node-ws@1.0.4(@hono/node-server@1.12.2(hono@4.11.8))(bufferutil@4.0.9)': + '@hono/node-ws@1.0.4(@hono/node-server@1.12.2(hono@4.5.11))(bufferutil@4.0.9)': dependencies: '@hono/node-server': 1.12.2(hono@4.5.11) ws: 8.18.3(bufferutil@4.0.9) @@ -25911,7 +25918,7 @@ snapshots: '@puppeteer/browsers@2.10.6': dependencies: - debug: 4.4.3(supports-color@10.0.0) + debug: 4.4.1(supports-color@10.0.0) extract-zip: 2.0.1 progress: 2.0.3 proxy-agent: 6.5.0 @@ -29474,12 +29481,14 @@ snapshots: '@rushstack/eslint-patch@1.2.0': {} - '@s2-dev/streamstore@0.22.5(supports-color@10.0.0)': + '@s2-dev/streamstore@0.17.3(typescript@5.5.4)': + dependencies: + '@protobuf-ts/runtime': 2.11.1 + typescript: 5.5.4 + + '@s2-dev/streamstore@0.17.6': dependencies: '@protobuf-ts/runtime': 2.11.1 - debug: 4.4.3(supports-color@10.0.0) - transitivePeerDependencies: - - supports-color '@sec-ant/readable-stream@0.4.1': {} @@ -31560,7 +31569,7 @@ snapshots: dependencies: '@typescript-eslint/typescript-estree': 5.59.6(typescript@5.5.4) '@typescript-eslint/utils': 5.59.6(eslint@8.31.0)(typescript@5.5.4) - debug: 4.4.3(supports-color@10.0.0) + debug: 4.4.1(supports-color@10.0.0) eslint: 8.31.0 tsutils: 3.21.0(typescript@5.5.4) optionalDependencies: @@ -31574,7 +31583,7 @@ snapshots: dependencies: '@typescript-eslint/types': 5.59.6 '@typescript-eslint/visitor-keys': 5.59.6 - debug: 4.4.3(supports-color@10.0.0) + debug: 4.4.1(supports-color@10.0.0) globby: 11.1.0 is-glob: 4.0.3 semver: 7.7.3 @@ -32168,11 +32177,11 @@ snapshots: '@opentelemetry/api': 1.9.0 zod: 3.25.76 - ai@6.0.82(zod@3.25.76): + ai@6.0.49(zod@3.25.76): dependencies: - '@ai-sdk/gateway': 3.0.42(zod@3.25.76) - '@ai-sdk/provider': 3.0.8 - '@ai-sdk/provider-utils': 4.0.14(zod@3.25.76) + '@ai-sdk/gateway': 3.0.22(zod@3.25.76) + '@ai-sdk/provider': 3.0.5 + '@ai-sdk/provider-utils': 4.0.9(zod@3.25.76) '@opentelemetry/api': 1.9.0 zod: 3.25.76 @@ -33595,9 +33604,11 @@ snapshots: dependencies: ms: 2.1.3 - debug@4.4.1: + debug@4.4.1(supports-color@10.0.0): dependencies: ms: 2.1.3 + optionalDependencies: + supports-color: 10.0.0 debug@4.4.3(supports-color@10.0.0): dependencies: @@ -34965,7 +34976,7 @@ snapshots: extract-zip@2.0.1: dependencies: - debug: 4.4.3(supports-color@10.0.0) + debug: 4.4.1(supports-color@10.0.0) get-stream: 5.2.0 yauzl: 2.10.0 optionalDependencies: @@ -35399,7 +35410,7 @@ snapshots: dependencies: basic-ftp: 5.0.3 data-uri-to-buffer: 5.0.1 - debug: 4.4.3(supports-color@10.0.0) + debug: 4.4.1(supports-color@10.0.0) fs-extra: 8.1.0 transitivePeerDependencies: - supports-color @@ -35558,7 +35569,7 @@ snapshots: '@types/node': 20.14.14 '@types/semver': 7.5.1 chalk: 4.1.2 - debug: 4.4.3(supports-color@10.0.0) + debug: 4.4.1(supports-color@10.0.0) interpret: 3.1.1 semver: 7.7.3 tslib: 2.8.1 @@ -35842,7 +35853,7 @@ snapshots: http-proxy-agent@7.0.2: dependencies: agent-base: 7.1.4 - debug: 4.4.3(supports-color@10.0.0) + debug: 4.4.1(supports-color@10.0.0) transitivePeerDependencies: - supports-color @@ -35862,7 +35873,7 @@ snapshots: https-proxy-agent@7.0.6: dependencies: agent-base: 7.1.4 - debug: 4.4.3(supports-color@10.0.0) + debug: 4.4.1(supports-color@10.0.0) transitivePeerDependencies: - supports-color @@ -36232,7 +36243,7 @@ snapshots: istanbul-lib-source-maps@5.0.6: dependencies: '@jridgewell/trace-mapping': 0.3.25 - debug: 4.4.3(supports-color@10.0.0) + debug: 4.4.1(supports-color@10.0.0) istanbul-lib-coverage: 3.2.2 transitivePeerDependencies: - supports-color @@ -38467,7 +38478,7 @@ snapshots: dependencies: '@tootallnate/quickjs-emscripten': 0.23.0 agent-base: 7.1.4 - debug: 4.4.3(supports-color@10.0.0) + debug: 4.4.1(supports-color@10.0.0) get-uri: 6.0.1 http-proxy-agent: 7.0.2 https-proxy-agent: 7.0.6 @@ -39220,7 +39231,7 @@ snapshots: proxy-agent@6.5.0: dependencies: agent-base: 7.1.4 - debug: 4.4.3(supports-color@10.0.0) + debug: 4.4.1(supports-color@10.0.0) http-proxy-agent: 7.0.2 https-proxy-agent: 7.0.6 lru-cache: 7.18.3 @@ -39260,7 +39271,7 @@ snapshots: dependencies: '@puppeteer/browsers': 2.10.6 chromium-bidi: 7.2.0(devtools-protocol@0.0.1464554) - debug: 4.4.1 + debug: 4.4.1(supports-color@10.0.0) devtools-protocol: 0.0.1464554 typed-query-selector: 2.12.0 ws: 8.18.3(bufferutil@4.0.9) @@ -39475,7 +39486,7 @@ snapshots: react: 18.2.0 react-dom: 18.2.0(react@18.2.0) - react-email@2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(eslint@8.31.0): + react-email@2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(bufferutil@4.0.9)(eslint@8.31.0): dependencies: '@babel/parser': 7.24.1 '@radix-ui/colors': 1.0.1 @@ -39512,8 +39523,8 @@ snapshots: react: 18.3.1 react-dom: 18.2.0(react@18.3.1) shelljs: 0.8.5 - socket.io: 4.7.3 - socket.io-client: 4.7.3 + socket.io: 4.7.3(bufferutil@4.0.9) + socket.io-client: 4.7.3(bufferutil@4.0.9) sonner: 1.3.1(react-dom@18.2.0(react@18.3.1))(react@18.3.1) source-map-js: 1.0.2 stacktrace-parser: 0.1.10 @@ -40145,7 +40156,7 @@ snapshots: require-in-the-middle@7.1.1(supports-color@10.0.0): dependencies: - debug: 4.4.3(supports-color@10.0.0) + debug: 4.4.1(supports-color@10.0.0) module-details-from-path: 1.0.3 resolve: 1.22.8 transitivePeerDependencies: @@ -40713,7 +40724,7 @@ snapshots: - supports-color - utf-8-validate - socket.io-client@4.7.3: + socket.io-client@4.7.3(bufferutil@4.0.9): dependencies: '@socket.io/component-emitter': 3.1.0 debug: 4.3.7(supports-color@10.0.0) @@ -40742,7 +40753,7 @@ snapshots: transitivePeerDependencies: - supports-color - socket.io@4.7.3: + socket.io@4.7.3(bufferutil@4.0.9): dependencies: accepts: 1.3.8 base64id: 2.0.0 @@ -40773,7 +40784,7 @@ snapshots: socks-proxy-agent@8.0.5: dependencies: agent-base: 7.1.4 - debug: 4.4.3(supports-color@10.0.0) + debug: 4.4.1(supports-color@10.0.0) socks: 2.8.3 transitivePeerDependencies: - supports-color @@ -41147,7 +41158,7 @@ snapshots: dependencies: component-emitter: 1.3.1 cookiejar: 2.1.4 - debug: 4.4.3(supports-color@10.0.0) + debug: 4.4.1(supports-color@10.0.0) fast-safe-stringify: 2.1.1 form-data: 4.0.4 formidable: 3.5.1 @@ -42355,7 +42366,7 @@ snapshots: '@vitest/spy': 3.1.4 '@vitest/utils': 3.1.4 chai: 5.2.0 - debug: 4.4.1 + debug: 4.4.1(supports-color@10.0.0) expect-type: 1.2.1 magic-string: 0.30.21 pathe: 2.0.3 diff --git a/references/ai-chat/package.json b/references/ai-chat/package.json index b373eb364da..9dcab80046f 100644 --- a/references/ai-chat/package.json +++ b/references/ai-chat/package.json @@ -15,7 +15,8 @@ "ai": "^6.0.0", "next": "15.3.3", "react": "^19.0.0", - "react-dom": "^19.0.0" + "react-dom": "^19.0.0", + "zod": "3.25.76" }, "devDependencies": { "@tailwindcss/postcss": "^4", diff --git a/references/ai-chat/src/components/chat.tsx b/references/ai-chat/src/components/chat.tsx index 1fa0e82fd0d..aa09d530bb4 100644 --- a/references/ai-chat/src/components/chat.tsx +++ b/references/ai-chat/src/components/chat.tsx @@ -5,6 +5,70 @@ import { TriggerChatTransport } from "@trigger.dev/sdk/chat"; import { useMemo, useState } from "react"; import { getChatToken } from "@/app/actions"; +function ToolInvocation({ part }: { part: any }) { + const [expanded, setExpanded] = useState(false); + // Static tools: type is "tool-{name}", dynamic tools have toolName property + const toolName = + part.type === "dynamic-tool" + ? (part.toolName ?? "tool") + : part.type.startsWith("tool-") + ? part.type.slice(5) + : "tool"; + const state = part.state ?? "input-available"; + const args = part.input; + const result = part.output; + + const isLoading = state === "input-streaming" || state === "input-available"; + const isError = state === "output-error"; + + return ( +
+ + + {expanded && ( +
+ {args && Object.keys(args).length > 0 && ( +
+
Input
+
+                {JSON.stringify(args, null, 2)}
+              
+
+ )} + {state === "output-available" && result !== undefined && ( +
+
Output
+
+                {JSON.stringify(result, null, 2)}
+              
+
+ )} + {isError && result !== undefined && ( +
+
Error
+
+                {typeof result === "string" ? result : JSON.stringify(result, null, 2)}
+              
+
+ )} +
+ )} +
+ ); +} + export function Chat() { const [input, setInput] = useState(""); @@ -54,6 +118,12 @@ export function Chat() { if (part.type === "text") { return {part.text}; } + + // Static tools: "tool-{toolName}", dynamic tools: "dynamic-tool" + if (part.type.startsWith("tool-") || part.type === "dynamic-tool") { + return ; + } + return null; })} diff --git a/references/ai-chat/src/trigger/chat.ts b/references/ai-chat/src/trigger/chat.ts index 8c77bbeebc5..b600977c593 100644 --- a/references/ai-chat/src/trigger/chat.ts +++ b/references/ai-chat/src/trigger/chat.ts @@ -1,6 +1,66 @@ import { chatTask } from "@trigger.dev/sdk/ai"; -import { streamText, convertToModelMessages } from "ai"; +import { streamText, convertToModelMessages, tool } from "ai"; import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; +import os from "node:os"; + +const inspectEnvironment = tool({ + description: + "Inspect the current execution environment. Returns runtime info (Node.js/Bun/Deno version), " + + "OS details, CPU architecture, memory usage, environment variables, and platform metadata.", + inputSchema: z.object({}), + execute: async () => { + const memUsage = process.memoryUsage(); + + return { + runtime: { + name: typeof Bun !== "undefined" ? "bun" : typeof Deno !== "undefined" ? "deno" : "node", + version: process.version, + versions: { + v8: process.versions.v8, + openssl: process.versions.openssl, + modules: process.versions.modules, + }, + }, + os: { + platform: process.platform, + arch: process.arch, + release: os.release(), + type: os.type(), + hostname: os.hostname(), + uptime: `${Math.floor(os.uptime())}s`, + }, + cpus: { + count: os.cpus().length, + model: os.cpus()[0]?.model, + }, + memory: { + total: `${Math.round(os.totalmem() / 1024 / 1024)}MB`, + free: `${Math.round(os.freemem() / 1024 / 1024)}MB`, + process: { + rss: `${Math.round(memUsage.rss / 1024 / 1024)}MB`, + heapUsed: `${Math.round(memUsage.heapUsed / 1024 / 1024)}MB`, + heapTotal: `${Math.round(memUsage.heapTotal / 1024 / 1024)}MB`, + }, + }, + env: { + NODE_ENV: process.env.NODE_ENV, + TZ: process.env.TZ ?? Intl.DateTimeFormat().resolvedOptions().timeZone, + LANG: process.env.LANG, + }, + process: { + pid: process.pid, + cwd: process.cwd(), + execPath: process.execPath, + argv: process.argv.slice(0, 3), + }, + }; + }, +}); + +// Silence TS errors for Bun/Deno global checks +declare const Bun: unknown; +declare const Deno: unknown; export const chat = chatTask({ id: "ai-chat", @@ -9,6 +69,8 @@ export const chat = chatTask({ model: openai("gpt-4o-mini"), system: "You are a helpful assistant. Be concise and friendly.", messages: await convertToModelMessages(messages), + tools: { inspectEnvironment }, + maxSteps: 3, }); }, }); From 89cfe472d4a900bfaadbb9e2110532dc2e824b6c Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Sat, 21 Feb 2026 15:18:22 +0000 Subject: [PATCH 19/53] expose a useTriggerChatTransport hook --- .../plan-graceful-oversized-batch-items.md | 257 +++++++++++ packages/trigger-sdk/package.json | 26 +- packages/trigger-sdk/src/v3/chat-react.ts | 84 ++++ packages/trigger-sdk/src/v3/chat.ts | 1 + pnpm-lock.yaml | 418 ++++++++++++------ references/ai-chat/src/components/chat.tsx | 19 +- 6 files changed, 659 insertions(+), 146 deletions(-) create mode 100644 .scratch/plan-graceful-oversized-batch-items.md create mode 100644 packages/trigger-sdk/src/v3/chat-react.ts diff --git a/.scratch/plan-graceful-oversized-batch-items.md b/.scratch/plan-graceful-oversized-batch-items.md new file mode 100644 index 00000000000..cb463b96252 --- /dev/null +++ b/.scratch/plan-graceful-oversized-batch-items.md @@ -0,0 +1,257 @@ +# Graceful handling of oversized batch items + +## Prerequisites + +This plan builds on top of PR #2980 which provides: +- `TriggerFailedTaskService` at `apps/webapp/app/runEngine/services/triggerFailedTask.server.ts` - creates pre-failed TaskRuns with proper trace events, waitpoint connections, and parent run associations +- `engine.createFailedTaskRun()` on RunEngine - creates a SYSTEM_FAILURE run with associated waitpoints +- Retry support in `processItemCallback` with `attempt` and `isFinalAttempt` params +- The callback already uses `TriggerFailedTaskService` for items that fail after retries + +## Problem + +When the NDJSON parser in `createNdjsonParserStream` detects an oversized line, it throws inside the TransformStream's `transform()` method. This aborts the request body stream (due to `pipeThrough` coupling), causing the client's `fetch()` to see `TypeError: fetch failed` instead of the server's 400 response. The SDK treats this as a connection error and retries with exponential backoff (~25s wasted). + +## Goal + +Instead of throwing, treat oversized items as per-item failures that flow through the existing batch failure pipeline. The batch seals normally, other items process fine, and the user sees a clear failure for the specific oversized item(s). + +## Approach + +The NDJSON parser emits an error marker object instead of throwing. `StreamBatchItemsService` detects these markers and enqueues the item to the FairQueue with error metadata in its options. The `processItemCallback` (already enhanced with `TriggerFailedTaskService` in PR #2980) detects the error metadata and creates a pre-failed run via `TriggerFailedTaskService`, which handles all the waitpoint/trace machinery. + +## Changes + +### 1. Byte-level key extractor for oversized lines + +**`apps/webapp/app/runEngine/services/streamBatchItems.server.ts`** - new function + +Add `extractIndexAndTask(bytes: Uint8Array): { index: number; task: string }` - a state machine that extracts top-level `"index"` and `"task"` values from raw bytes without decoding the full line. + +How it works: +- Scan bytes tracking JSON nesting depth (count `{`/`[` vs `}`/`]`) +- At depth 1 (inside the top-level object), look for byte sequences matching `"index"` and `"task"` key patterns +- For `"index"`: after the `:`, parse the digit sequence as a number +- For `"task"`: after the `:`, find opening `"`, read bytes until closing `"`, decode just that slice +- Stop when both found, or after scanning 512 bytes (whichever comes first) +- Fallback: `index = -1`, `task = "unknown"` if not found + +This avoids decoding/allocating the full 3MB line - only the first few hundred bytes are examined. + +### 2. Modify `createNdjsonParserStream` to emit error markers + +**`apps/webapp/app/runEngine/services/streamBatchItems.server.ts`** + +Define a marker type: +```typescript +type OversizedItemMarker = { + __batchItemError: "OVERSIZED"; + index: number; + task: string; + actualSize: number; + maxSize: number; +}; +``` + +**Case 1 - Complete line exceeds limit** (newline found, `newlineIndex > maxItemBytes`): +- Call `extractLine(newlineIndex)` to consume the line from the buffer +- Call `extractIndexAndTask(lineBytes)` on the extracted bytes +- `controller.enqueue(marker)` instead of throwing +- Increment `lineNumber` and continue + +**Case 2 - Incomplete line exceeds limit** (no newline, `totalBytes > maxItemBytes`): +- Call `extractIndexAndTask(concatenateChunks())` on current buffer +- `controller.enqueue(marker)` +- Clear the buffer (`chunks = []; totalBytes = 0`) +- Return from transform (don't throw) + +**Case 3 - Flush with oversized remaining** (`totalBytes > maxItemBytes` in flush): +- Same as case 2 but in `flush()`. + +### 3. Handle markers in `StreamBatchItemsService` + +**`apps/webapp/app/runEngine/services/streamBatchItems.server.ts`** - in the `for await` loop + +Before the existing `BatchItemNDJSONSchema.safeParse(rawItem)`, check for the marker: + +```typescript +if (rawItem && typeof rawItem === "object" && (rawItem as any).__batchItemError === "OVERSIZED") { + const marker = rawItem as OversizedItemMarker; + const itemIndex = marker.index >= 0 ? marker.index : lastIndex + 1; + + const errorMessage = `Batch item payload is too large (${(marker.actualSize / 1024).toFixed(1)} KB). Maximum allowed size is ${(marker.maxSize / 1024).toFixed(1)} KB. Reduce the payload size or offload large data to external storage.`; + + // Enqueue the item normally but with error metadata in options. + // The processItemCallback will detect __error and use TriggerFailedTaskService + // to create a pre-failed run with proper waitpoint connections. + const batchItem: BatchItem = { + task: marker.task, + payload: "{}", + payloadType: "application/json", + options: { + __error: errorMessage, + __errorCode: "PAYLOAD_TOO_LARGE", + }, + }; + + const result = await this._engine.enqueueBatchItem( + batchId, environment.id, itemIndex, batchItem + ); + + if (result.enqueued) { + itemsAccepted++; + } else { + itemsDeduplicated++; + } + lastIndex = itemIndex; + continue; +} +``` + +### 4. Handle `__error` items in `processItemCallback` + +**`apps/webapp/app/v3/runEngineHandlers.server.ts`** - in the `setupBatchQueueCallbacks` function + +In the `processItemCallback`, before the `TriggerTaskService.call()`, check for `__error` in `item.options`: + +```typescript +const itemError = item.options?.__error as string | undefined; +if (itemError) { + const errorCode = (item.options?.__errorCode as string) ?? "ITEM_ERROR"; + + // Use TriggerFailedTaskService to create a pre-failed run. + // This creates a proper TaskRun with waitpoint connections so the + // parent's batchTriggerAndWait resolves correctly for this item. + const failedRunId = await triggerFailedTaskService.call({ + taskId: item.task, + environment, + payload: item.payload ?? "{}", + payloadType: item.payloadType, + errorMessage: itemError, + errorCode: errorCode as TaskRunErrorCodes, + parentRunId: meta.parentRunId, + resumeParentOnCompletion: meta.resumeParentOnCompletion, + batch: { id: batchId, index: itemIndex }, + traceContext: meta.traceContext as Record | undefined, + spanParentAsLink: meta.spanParentAsLink, + }); + + if (failedRunId) { + span.setAttribute("batch.result.pre_failed", true); + span.setAttribute("batch.result.run_id", failedRunId); + span.end(); + return { success: true as const, runId: failedRunId }; + } + + // Fallback if TriggerFailedTaskService fails + span.end(); + return { success: false as const, error: itemError, errorCode }; +} +``` + +Note: this returns `{ success: true, runId }` because the pre-failed run IS a real run. The BatchQueue records it as a success (run was created). The run itself is already in SYSTEM_FAILURE status, so the batch completion flow handles it correctly. + +If `environment` is null (environment not found), fall through to the existing environment-not-found handling which already uses `triggerFailedTaskService.callWithoutTraceEvents()` on `isFinalAttempt`. + +### 5. Handle undefined/null payload in BatchQueue serialization + +**`internal-packages/run-engine/src/batch-queue/index.ts`** - in `#handleMessage` + +Both payload serialization blocks (in the `success: false` branch and the `catch` block) do: +```typescript +const str = typeof item.payload === "string" ? item.payload : JSON.stringify(item.payload); +innerSpan?.setAttribute("batch.payloadSize", str.length); +``` + +`JSON.stringify(undefined)` returns `undefined`, causing `str.length` to crash. Fix both: +```typescript +const str = + item.payload === undefined || item.payload === null + ? "{}" + : typeof item.payload === "string" + ? item.payload + : JSON.stringify(item.payload); +``` + +### 6. Remove stale error handling in route + +**`apps/webapp/app/routes/api.v3.batches.$batchId.items.ts`** + +The `error.message.includes("exceeds maximum size")` branch is no longer reachable since oversized items don't throw. Remove that condition, keep the `"Invalid JSON"` check. + +### 7. Remove `BatchItemTooLargeError` and SDK pre-validation + +**`packages/core/src/v3/apiClient/errors.ts`** - remove `BatchItemTooLargeError` class + +**`packages/core/src/v3/apiClient/index.ts`**: +- Remove `BatchItemTooLargeError` import +- Remove `instanceof BatchItemTooLargeError` check in the retry catch block +- Remove `MAX_BATCH_ITEM_BYTES` constant +- Remove size validation from `createNdjsonStream` (revert `encodeAndValidate` to simple encode) + +**`packages/trigger-sdk/src/v3/shared.ts`** - remove `BatchItemTooLargeError` import and handling in `buildBatchErrorMessage` + +**`packages/trigger-sdk/src/v3/index.ts`** - remove `BatchItemTooLargeError` re-export + +### 8. Update tests + +**`apps/webapp/test/engine/streamBatchItems.test.ts`**: +- Update "should reject lines exceeding maxItemBytes" to assert `OversizedItemMarker` emission instead of throw +- Update "should reject unbounded accumulation without newlines" similarly +- Update the emoji byte-size test to assert marker instead of throw + +### 9. Update reference project test task + +**`references/hello-world/src/trigger/batches.ts`**: +- Remove `BatchItemTooLargeError` import +- Update `batchSealFailureOversizedPayload` task to test the new behavior: + - Send 2 items: one normal, one oversized (~3.2MB) + - Assert `batchTriggerAndWait` returns (doesn't throw) + - Assert `results.runs[0].ok === true` (normal item succeeded) + - Assert `results.runs[1].ok === false` (oversized item failed) + - Assert error message contains "too large" + +## Data flow + +``` +NDJSON bytes arrive + | +createNdjsonParserStream + |-- Line <= limit --> parse JSON --> enqueue object + `-- Line > limit --> extractIndexAndTask(bytes) --> enqueue OversizedItemMarker + | +StreamBatchItemsService for-await loop + |-- OversizedItemMarker --> engine.enqueueBatchItem() with __error in options + `-- Normal item --> validate --> engine.enqueueBatchItem() + | +FairQueue consumer (#handleMessage) + |-- __error in options --> processItemCallback detects it + | --> TriggerFailedTaskService.call() + | --> Creates pre-failed TaskRun with SYSTEM_FAILURE status + | --> Proper waitpoint + TaskRunWaitpoint connections created + | --> Returns { success: true, runId: failedRunFriendlyId } + `-- Normal item --> TriggerTaskService.call() --> creates normal run + | +Batch sealing: enqueuedCount === runCount (all items go through enqueueBatchItem) +Batch completion: all items have runs (real or pre-failed), waitpoints resolve normally +Parent run: batchTriggerAndWait resolves with per-item results +``` + +## Why this works + +The key insight is that `TriggerFailedTaskService` (from PR #2980) creates a real `TaskRun` in `SYSTEM_FAILURE` status. This means: +1. A RUN waitpoint is created and connected to the parent via `TaskRunWaitpoint` with correct `batchId`/`batchIndex` +2. The run is immediately completed, which completes the waitpoint +3. The SDK's `waitForBatch` resolver for that index fires with the error result +4. The batch completion flow counts this as a processed item (it's a real run) +5. No special-casing needed in the batch completion callback + +## Verification + +1. Rebuild `@trigger.dev/core`, `@trigger.dev/sdk`, `@internal/run-engine` +2. Restart webapp + trigger dev +3. Trigger `batch-seal-failure-oversized` task - should complete in ~2-3s with: + - Normal item: `ok: true` + - Oversized item: `ok: false` with "too large" error +4. Run NDJSON parser tests: updated tests assert marker emission instead of throws +5. Run `pnpm run build --filter @internal/run-engine --filter @trigger.dev/core --filter @trigger.dev/sdk` diff --git a/packages/trigger-sdk/package.json b/packages/trigger-sdk/package.json index d44c8836ec0..fa352cddb7b 100644 --- a/packages/trigger-sdk/package.json +++ b/packages/trigger-sdk/package.json @@ -25,7 +25,8 @@ ".": "./src/v3/index.ts", "./v3": "./src/v3/index.ts", "./ai": "./src/v3/ai.ts", - "./chat": "./src/v3/chat.ts" + "./chat": "./src/v3/chat.ts", + "./chat/react": "./src/v3/chat-react.ts" }, "sourceDialects": [ "@triggerdotdev/source" @@ -41,6 +42,9 @@ ], "chat": [ "dist/commonjs/v3/chat.d.ts" + ], + "chat/react": [ + "dist/commonjs/v3/chat-react.d.ts" ] } }, @@ -70,6 +74,7 @@ "devDependencies": { "@arethetypeswrong/cli": "^0.15.4", "@types/debug": "^4.1.7", + "@types/react": "^19.2.14", "@types/slug": "^5.0.3", "@types/uuid": "^9.0.0", "@types/ws": "^8.5.3", @@ -82,12 +87,16 @@ "zod": "3.25.76" }, "peerDependencies": { - "zod": "^3.0.0 || ^4.0.0", - "ai": "^5.0.0 || ^6.0.0" + "ai": "^5.0.0 || ^6.0.0", + "react": "^18.0 || ^19.0", + "zod": "^3.0.0 || ^4.0.0" }, "peerDependenciesMeta": { "ai": { "optional": true + }, + "react": { + "optional": true } }, "engines": { @@ -138,6 +147,17 @@ "types": "./dist/commonjs/v3/chat.d.ts", "default": "./dist/commonjs/v3/chat.js" } + }, + "./chat/react": { + "import": { + "@triggerdotdev/source": "./src/v3/chat-react.ts", + "types": "./dist/esm/v3/chat-react.d.ts", + "default": "./dist/esm/v3/chat-react.js" + }, + "require": { + "types": "./dist/commonjs/v3/chat-react.d.ts", + "default": "./dist/commonjs/v3/chat-react.js" + } } }, "main": "./dist/commonjs/v3/index.js", diff --git a/packages/trigger-sdk/src/v3/chat-react.ts b/packages/trigger-sdk/src/v3/chat-react.ts new file mode 100644 index 00000000000..a62496463ae --- /dev/null +++ b/packages/trigger-sdk/src/v3/chat-react.ts @@ -0,0 +1,84 @@ +"use client"; + +/** + * @module @trigger.dev/sdk/chat/react + * + * React hooks for AI SDK chat transport integration. + * Use alongside `@trigger.dev/sdk/chat` for a type-safe, ergonomic DX. + * + * @example + * ```tsx + * import { useChat } from "@ai-sdk/react"; + * import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; + * import type { chat } from "@/trigger/chat"; + * + * function Chat() { + * const transport = useTriggerChatTransport({ + * task: "ai-chat", + * accessToken: () => fetchToken(), + * }); + * + * const { messages, sendMessage } = useChat({ transport }); + * } + * ``` + */ + +import { useRef } from "react"; +import { + TriggerChatTransport, + type TriggerChatTransportOptions, +} from "./chat.js"; +import type { AnyTask, TaskIdentifier } from "@trigger.dev/core/v3"; + +/** + * Options for `useTriggerChatTransport`, with a type-safe `task` field. + * + * Pass a task type parameter to get compile-time validation of the task ID: + * ```ts + * useTriggerChatTransport({ task: "my-task", ... }) + * ``` + */ +export type UseTriggerChatTransportOptions = Omit< + TriggerChatTransportOptions, + "task" +> & { + /** The task ID. Strongly typed when a task type parameter is provided. */ + task: TaskIdentifier; +}; + +/** + * React hook that creates and memoizes a `TriggerChatTransport` instance. + * + * The transport is created once on first render and reused for the lifetime + * of the component. This avoids the need for `useMemo` and ensures the + * transport's internal session state (waitpoint tokens, lastEventId, etc.) + * is preserved across re-renders. + * + * For dynamic access tokens, pass a function — it will be called on each + * request without needing to recreate the transport. + * + * @example + * ```tsx + * import { useChat } from "@ai-sdk/react"; + * import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; + * import type { chat } from "@/trigger/chat"; + * + * function Chat() { + * const transport = useTriggerChatTransport({ + * task: "ai-chat", + * accessToken: () => fetchToken(), + * }); + * + * const { messages, sendMessage } = useChat({ transport }); + * } + * ``` + */ +export function useTriggerChatTransport( + options: UseTriggerChatTransportOptions +): TriggerChatTransport { + const ref = useRef(null); + if (ref.current === null) { + ref.current = new TriggerChatTransport(options); + } + return ref.current; +} diff --git a/packages/trigger-sdk/src/v3/chat.ts b/packages/trigger-sdk/src/v3/chat.ts index 60c3445a759..e36c5761870 100644 --- a/packages/trigger-sdk/src/v3/chat.ts +++ b/packages/trigger-sdk/src/v3/chat.ts @@ -391,3 +391,4 @@ export class TriggerChatTransport implements ChatTransport { export function createChatTransport(options: TriggerChatTransportOptions): TriggerChatTransport { return new TriggerChatTransport(options); } + diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 88c1806bc0d..789a5dd8f04 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -2080,6 +2080,9 @@ importers: '@types/debug': specifier: ^4.1.7 version: 4.1.7 + '@types/react': + specifier: ^19.2.14 + version: 19.2.14 '@types/slug': specifier: ^5.0.3 version: 5.0.3 @@ -2683,7 +2686,7 @@ importers: version: 6.20.0-integration-next.8 '@prisma/client': specifier: 6.20.0-integration-next.8 - version: 6.20.0-integration-next.8(prisma@6.20.0-integration-next.8(@types/react@19.0.12)(magicast@0.3.5)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.5.4))(typescript@5.5.4) + version: 6.20.0-integration-next.8(prisma@6.20.0-integration-next.8(@types/react@19.2.14)(magicast@0.3.5)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.5.4))(typescript@5.5.4) '@trigger.dev/build': specifier: workspace:* version: link:../../packages/build @@ -2696,7 +2699,7 @@ importers: devDependencies: prisma: specifier: 6.20.0-integration-next.8 - version: 6.20.0-integration-next.8(@types/react@19.0.12)(magicast@0.3.5)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.5.4) + version: 6.20.0-integration-next.8(@types/react@19.2.14)(magicast@0.3.5)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.5.4) trigger.dev: specifier: workspace:* version: link:../../packages/cli-v3 @@ -11027,6 +11030,9 @@ packages: '@types/react@19.0.12': resolution: {integrity: sha512-V6Ar115dBDrjbtXSrS+/Oruobc+qVbbUxDFC1RSbRqLt5SYvxxyIDrSC85RWml54g+jfNeEMZhEj7wW07ONQhA==} + '@types/react@19.2.14': + resolution: {integrity: sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==} + '@types/readable-stream@4.0.14': resolution: {integrity: sha512-xZn/AuUbCMShGsqH/ehZtGDwQtbx00M9rZ2ENLe4tOjFZ/JFeWMhEZkk2fEe1jAUqqEAURIkFJ7Az/go8mM1/w==} @@ -12681,6 +12687,9 @@ packages: csstype@3.2.0: resolution: {integrity: sha512-si++xzRAY9iPp60roQiFta7OFbhrgvcthrhlNAGeQptSY25uJjkfUV8OArC3KLocB8JT8ohz+qgxWCmz8RhjIg==} + csstype@3.2.3: + resolution: {integrity: sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==} + csv-generate@3.4.3: resolution: {integrity: sha512-w/T+rqR0vwvHqWs/1ZyMDWtHHSJaN06klRqJXBEpDJaM/+dZkso0OKh1VcuuYvK3XM53KysVNq8Ko/epCK8wOw==} @@ -25717,11 +25726,11 @@ snapshots: prisma: 6.19.0(typescript@5.5.4) typescript: 5.5.4 - '@prisma/client@6.20.0-integration-next.8(prisma@6.20.0-integration-next.8(@types/react@19.0.12)(magicast@0.3.5)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.5.4))(typescript@5.5.4)': + '@prisma/client@6.20.0-integration-next.8(prisma@6.20.0-integration-next.8(@types/react@19.2.14)(magicast@0.3.5)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.5.4))(typescript@5.5.4)': dependencies: '@prisma/client-runtime-utils': 6.20.0-integration-next.8 optionalDependencies: - prisma: 6.20.0-integration-next.8(@types/react@19.0.12)(magicast@0.3.5)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.5.4) + prisma: 6.20.0-integration-next.8(@types/react@19.2.14)(magicast@0.3.5)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.5.4) typescript: 5.5.4 '@prisma/config@6.14.0(magicast@0.3.5)': @@ -25885,9 +25894,9 @@ snapshots: transitivePeerDependencies: - supports-color - '@prisma/studio-core-licensed@0.6.0(@types/react@19.0.12)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@prisma/studio-core-licensed@0.6.0(@types/react@19.2.14)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: - '@types/react': 19.0.12 + '@types/react': 19.2.14 react: 19.1.0 react-dom: 19.1.0(react@19.1.0) @@ -25993,14 +26002,14 @@ snapshots: react: 18.2.0 react-dom: 18.2.0(react@18.2.0) - '@radix-ui/react-arrow@1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-arrow@1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.28.4 - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.2.0(react@18.3.1) optionalDependencies: - '@types/react': 18.3.1 + '@types/react': 18.2.69 '@types/react-dom': 18.2.7 '@radix-ui/react-avatar@1.1.3(@types/react-dom@19.0.4(@types/react@19.0.12))(@types/react@19.0.12)(react-dom@19.0.0(react@19.0.0))(react@19.0.0)': @@ -26015,21 +26024,21 @@ snapshots: '@types/react': 19.0.12 '@types/react-dom': 19.0.4(@types/react@19.0.12) - '@radix-ui/react-collapsible@1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-collapsible@1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.28.4 '@radix-ui/primitive': 1.0.1 - '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-context': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-id': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-presence': 1.0.1(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-use-layout-effect': 1.0.1(@types/react@18.3.1)(react@18.3.1) + '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-context': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-id': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-presence': 1.0.1(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-use-layout-effect': 1.0.1(@types/react@18.2.69)(react@18.3.1) react: 18.3.1 react-dom: 18.2.0(react@18.3.1) optionalDependencies: - '@types/react': 18.3.1 + '@types/react': 18.2.69 '@types/react-dom': 18.2.7 '@radix-ui/react-collapsible@1.1.11(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.2.0))(react@18.2.0)': @@ -26081,17 +26090,17 @@ snapshots: '@types/react': 18.2.69 '@types/react-dom': 18.2.7 - '@radix-ui/react-collection@1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-collection@1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.26.7 - '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-context': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-slot': 1.0.2(@types/react@18.3.1)(react@18.3.1) + '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-context': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-slot': 1.0.2(@types/react@18.2.69)(react@18.3.1) react: 18.3.1 react-dom: 18.2.0(react@18.3.1) optionalDependencies: - '@types/react': 18.3.1 + '@types/react': 18.2.69 '@types/react-dom': 18.2.7 '@radix-ui/react-collection@1.1.7(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.2.0))(react@18.2.0)': @@ -26184,6 +26193,13 @@ snapshots: optionalDependencies: '@types/react': 18.2.69 + '@radix-ui/react-context@1.0.1(@types/react@18.2.69)(react@18.3.1)': + dependencies: + '@babel/runtime': 7.26.7 + react: 18.3.1 + optionalDependencies: + '@types/react': 18.2.69 + '@radix-ui/react-context@1.0.1(@types/react@18.3.1)(react@18.3.1)': dependencies: '@babel/runtime': 7.26.7 @@ -26294,12 +26310,12 @@ snapshots: optionalDependencies: '@types/react': 18.2.69 - '@radix-ui/react-direction@1.0.1(@types/react@18.3.1)(react@18.3.1)': + '@radix-ui/react-direction@1.0.1(@types/react@18.2.69)(react@18.3.1)': dependencies: '@babel/runtime': 7.26.7 react: 18.3.1 optionalDependencies: - '@types/react': 18.3.1 + '@types/react': 18.2.69 '@radix-ui/react-direction@1.1.0(@types/react@18.3.1)(react@18.3.1)': dependencies: @@ -26338,6 +26354,20 @@ snapshots: '@types/react': 18.2.69 '@types/react-dom': 18.2.7 + '@radix-ui/react-dismissable-layer@1.0.4(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': + dependencies: + '@babel/runtime': 7.28.4 + '@radix-ui/primitive': 1.0.1 + '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-use-escape-keydown': 1.0.3(@types/react@18.2.69)(react@18.3.1) + react: 18.3.1 + react-dom: 18.2.0(react@18.3.1) + optionalDependencies: + '@types/react': 18.2.69 + '@types/react-dom': 18.2.7 + '@radix-ui/react-dismissable-layer@1.0.4(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.28.4 @@ -26352,18 +26382,18 @@ snapshots: '@types/react': 18.3.1 '@types/react-dom': 18.2.7 - '@radix-ui/react-dismissable-layer@1.0.5(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-dismissable-layer@1.0.5(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.28.4 '@radix-ui/primitive': 1.0.1 - '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-use-escape-keydown': 1.0.3(@types/react@18.3.1)(react@18.3.1) + '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-use-escape-keydown': 1.0.3(@types/react@18.2.69)(react@18.3.1) react: 18.3.1 react-dom: 18.2.0(react@18.3.1) optionalDependencies: - '@types/react': 18.3.1 + '@types/react': 18.2.69 '@types/react-dom': 18.2.7 '@radix-ui/react-focus-guards@1.0.0(react@18.2.0)': @@ -26378,6 +26408,13 @@ snapshots: optionalDependencies: '@types/react': 18.2.69 + '@radix-ui/react-focus-guards@1.0.1(@types/react@18.2.69)(react@18.3.1)': + dependencies: + '@babel/runtime': 7.28.4 + react: 18.3.1 + optionalDependencies: + '@types/react': 18.2.69 + '@radix-ui/react-focus-guards@1.0.1(@types/react@18.3.1)(react@18.3.1)': dependencies: '@babel/runtime': 7.28.4 @@ -26418,16 +26455,16 @@ snapshots: '@types/react': 18.3.1 '@types/react-dom': 18.2.7 - '@radix-ui/react-focus-scope@1.0.4(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-focus-scope@1.0.4(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.28.4 - '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.3.1)(react@18.3.1) + '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.2.69)(react@18.3.1) react: 18.3.1 react-dom: 18.2.0(react@18.3.1) optionalDependencies: - '@types/react': 18.3.1 + '@types/react': 18.2.69 '@types/react-dom': 18.2.7 '@radix-ui/react-icons@1.3.0(react@18.3.1)': @@ -26454,6 +26491,14 @@ snapshots: optionalDependencies: '@types/react': 18.2.69 + '@radix-ui/react-id@1.0.1(@types/react@18.2.69)(react@18.3.1)': + dependencies: + '@babel/runtime': 7.28.4 + '@radix-ui/react-use-layout-effect': 1.0.1(@types/react@18.2.69)(react@18.3.1) + react: 18.3.1 + optionalDependencies: + '@types/react': 18.2.69 + '@radix-ui/react-id@1.0.1(@types/react@18.3.1)(react@18.3.1)': dependencies: '@babel/runtime': 7.28.4 @@ -26499,28 +26544,28 @@ snapshots: transitivePeerDependencies: - '@types/react' - '@radix-ui/react-popover@1.0.7(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-popover@1.0.7(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.28.4 '@radix-ui/primitive': 1.0.1 - '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-context': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-dismissable-layer': 1.0.5(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-focus-guards': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-focus-scope': 1.0.4(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-id': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-popper': 1.1.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-portal': 1.0.4(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-presence': 1.0.1(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-slot': 1.0.2(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.3.1)(react@18.3.1) + '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-context': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-dismissable-layer': 1.0.5(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-focus-guards': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-focus-scope': 1.0.4(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-id': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-popper': 1.1.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-portal': 1.0.4(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-presence': 1.0.1(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-slot': 1.0.2(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.2.69)(react@18.3.1) aria-hidden: 1.2.4 react: 18.3.1 react-dom: 18.2.0(react@18.3.1) - react-remove-scroll: 2.5.5(@types/react@18.3.1)(react@18.3.1) + react-remove-scroll: 2.5.5(@types/react@18.2.69)(react@18.3.1) optionalDependencies: - '@types/react': 18.3.1 + '@types/react': 18.2.69 '@types/react-dom': 18.2.7 '@radix-ui/react-popper@1.1.1(@types/react@18.2.69)(react-dom@18.2.0(react@18.2.0))(react@18.2.0)': @@ -26541,42 +26586,42 @@ snapshots: transitivePeerDependencies: - '@types/react' - '@radix-ui/react-popper@1.1.2(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-popper@1.1.2(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.28.4 '@floating-ui/react-dom': 2.0.9(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-arrow': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-context': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-use-layout-effect': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-use-rect': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-use-size': 1.0.1(@types/react@18.3.1)(react@18.3.1) + '@radix-ui/react-arrow': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-context': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-use-layout-effect': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-use-rect': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-use-size': 1.0.1(@types/react@18.2.69)(react@18.3.1) '@radix-ui/rect': 1.0.1 react: 18.3.1 react-dom: 18.2.0(react@18.3.1) optionalDependencies: - '@types/react': 18.3.1 + '@types/react': 18.2.69 '@types/react-dom': 18.2.7 - '@radix-ui/react-popper@1.1.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-popper@1.1.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.28.4 '@floating-ui/react-dom': 2.0.9(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-arrow': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-context': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-use-layout-effect': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-use-rect': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-use-size': 1.0.1(@types/react@18.3.1)(react@18.3.1) + '@radix-ui/react-arrow': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-context': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-use-layout-effect': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-use-rect': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-use-size': 1.0.1(@types/react@18.2.69)(react@18.3.1) '@radix-ui/rect': 1.0.1 react: 18.3.1 react-dom: 18.2.0(react@18.3.1) optionalDependencies: - '@types/react': 18.3.1 + '@types/react': 18.2.69 '@types/react-dom': 18.2.7 '@radix-ui/react-portal@1.0.2(react-dom@18.2.0(react@18.2.0))(react@18.2.0)': @@ -26596,6 +26641,16 @@ snapshots: '@types/react': 18.2.69 '@types/react-dom': 18.2.7 + '@radix-ui/react-portal@1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': + dependencies: + '@babel/runtime': 7.28.4 + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + react: 18.3.1 + react-dom: 18.2.0(react@18.3.1) + optionalDependencies: + '@types/react': 18.2.69 + '@types/react-dom': 18.2.7 + '@radix-ui/react-portal@1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.28.4 @@ -26606,14 +26661,14 @@ snapshots: '@types/react': 18.3.1 '@types/react-dom': 18.2.7 - '@radix-ui/react-portal@1.0.4(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-portal@1.0.4(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.28.4 - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.2.0(react@18.3.1) optionalDependencies: - '@types/react': 18.3.1 + '@types/react': 18.2.69 '@types/react-dom': 18.2.7 '@radix-ui/react-portal@1.1.9(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.2.0))(react@18.2.0)': @@ -26653,6 +26708,17 @@ snapshots: '@types/react': 18.2.69 '@types/react-dom': 18.2.7 + '@radix-ui/react-presence@1.0.1(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': + dependencies: + '@babel/runtime': 7.26.7 + '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-use-layout-effect': 1.0.1(@types/react@18.2.69)(react@18.3.1) + react: 18.3.1 + react-dom: 18.2.0(react@18.3.1) + optionalDependencies: + '@types/react': 18.2.69 + '@types/react-dom': 18.2.7 + '@radix-ui/react-presence@1.0.1(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.26.7 @@ -26708,6 +26774,16 @@ snapshots: '@types/react': 18.2.69 '@types/react-dom': 18.2.7 + '@radix-ui/react-primitive@1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': + dependencies: + '@babel/runtime': 7.26.7 + '@radix-ui/react-slot': 1.0.2(@types/react@18.2.69)(react@18.3.1) + react: 18.3.1 + react-dom: 18.2.0(react@18.3.1) + optionalDependencies: + '@types/react': 18.2.69 + '@types/react-dom': 18.2.7 + '@radix-ui/react-primitive@1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.26.7 @@ -26831,22 +26907,22 @@ snapshots: '@types/react': 18.2.69 '@types/react-dom': 18.2.7 - '@radix-ui/react-roving-focus@1.0.4(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-roving-focus@1.0.4(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.26.7 '@radix-ui/primitive': 1.0.1 - '@radix-ui/react-collection': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-context': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-direction': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-id': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.3.1)(react@18.3.1) + '@radix-ui/react-collection': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-context': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-direction': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-id': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.2.69)(react@18.3.1) react: 18.3.1 react-dom: 18.2.0(react@18.3.1) optionalDependencies: - '@types/react': 18.3.1 + '@types/react': 18.2.69 '@types/react-dom': 18.2.7 '@radix-ui/react-scroll-area@1.2.0(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': @@ -27023,32 +27099,32 @@ snapshots: react: 18.3.1 react-dom: 18.2.0(react@18.3.1) - '@radix-ui/react-toggle-group@1.0.4(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-toggle-group@1.0.4(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.28.4 '@radix-ui/primitive': 1.0.1 - '@radix-ui/react-context': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-direction': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-roving-focus': 1.0.4(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-toggle': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.3.1)(react@18.3.1) + '@radix-ui/react-context': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-direction': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-roving-focus': 1.0.4(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-toggle': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.2.69)(react@18.3.1) react: 18.3.1 react-dom: 18.2.0(react@18.3.1) optionalDependencies: - '@types/react': 18.3.1 + '@types/react': 18.2.69 '@types/react-dom': 18.2.7 - '@radix-ui/react-toggle@1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-toggle@1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.28.4 '@radix-ui/primitive': 1.0.1 - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.3.1)(react@18.3.1) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.2.69)(react@18.3.1) react: 18.3.1 react-dom: 18.2.0(react@18.3.1) optionalDependencies: - '@types/react': 18.3.1 + '@types/react': 18.2.69 '@types/react-dom': 18.2.7 '@radix-ui/react-tooltip@1.0.5(@types/react@18.2.69)(react-dom@18.2.0(react@18.2.0))(react@18.2.0)': @@ -27071,25 +27147,25 @@ snapshots: transitivePeerDependencies: - '@types/react' - '@radix-ui/react-tooltip@1.0.6(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-tooltip@1.0.6(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.28.4 '@radix-ui/primitive': 1.0.1 - '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-context': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-dismissable-layer': 1.0.4(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-id': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-popper': 1.1.2(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-portal': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-presence': 1.0.1(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-slot': 1.0.2(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-visually-hidden': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-context': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-dismissable-layer': 1.0.4(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-id': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-popper': 1.1.2(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-portal': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-presence': 1.0.1(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-slot': 1.0.2(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-visually-hidden': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.2.0(react@18.3.1) optionalDependencies: - '@types/react': 18.3.1 + '@types/react': 18.2.69 '@types/react-dom': 18.2.7 '@radix-ui/react-use-callback-ref@1.0.0(react@18.2.0)': @@ -27109,6 +27185,13 @@ snapshots: optionalDependencies: '@types/react': 18.2.69 + '@radix-ui/react-use-callback-ref@1.0.1(@types/react@18.2.69)(react@18.3.1)': + dependencies: + '@babel/runtime': 7.28.4 + react: 18.3.1 + optionalDependencies: + '@types/react': 18.2.69 + '@radix-ui/react-use-callback-ref@1.0.1(@types/react@18.3.1)(react@18.3.1)': dependencies: '@babel/runtime': 7.28.4 @@ -27148,6 +27231,14 @@ snapshots: optionalDependencies: '@types/react': 18.2.69 + '@radix-ui/react-use-controllable-state@1.0.1(@types/react@18.2.69)(react@18.3.1)': + dependencies: + '@babel/runtime': 7.27.4 + '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.2.69)(react@18.3.1) + react: 18.3.1 + optionalDependencies: + '@types/react': 18.2.69 + '@radix-ui/react-use-controllable-state@1.0.1(@types/react@18.3.1)(react@18.3.1)': dependencies: '@babel/runtime': 7.27.4 @@ -27185,6 +27276,14 @@ snapshots: optionalDependencies: '@types/react': 18.2.69 + '@radix-ui/react-use-escape-keydown@1.0.3(@types/react@18.2.69)(react@18.3.1)': + dependencies: + '@babel/runtime': 7.28.4 + '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.2.69)(react@18.3.1) + react: 18.3.1 + optionalDependencies: + '@types/react': 18.2.69 + '@radix-ui/react-use-escape-keydown@1.0.3(@types/react@18.3.1)(react@18.3.1)': dependencies: '@babel/runtime': 7.28.4 @@ -27210,6 +27309,13 @@ snapshots: optionalDependencies: '@types/react': 18.2.69 + '@radix-ui/react-use-layout-effect@1.0.1(@types/react@18.2.69)(react@18.3.1)': + dependencies: + '@babel/runtime': 7.27.4 + react: 18.3.1 + optionalDependencies: + '@types/react': 18.2.69 + '@radix-ui/react-use-layout-effect@1.0.1(@types/react@18.3.1)(react@18.3.1)': dependencies: '@babel/runtime': 7.27.4 @@ -27253,13 +27359,13 @@ snapshots: '@radix-ui/rect': 1.0.0 react: 18.2.0 - '@radix-ui/react-use-rect@1.0.1(@types/react@18.3.1)(react@18.3.1)': + '@radix-ui/react-use-rect@1.0.1(@types/react@18.2.69)(react@18.3.1)': dependencies: '@babel/runtime': 7.28.4 '@radix-ui/rect': 1.0.1 react: 18.3.1 optionalDependencies: - '@types/react': 18.3.1 + '@types/react': 18.2.69 '@radix-ui/react-use-size@1.0.0(react@18.2.0)': dependencies: @@ -27275,13 +27381,13 @@ snapshots: optionalDependencies: '@types/react': 18.2.69 - '@radix-ui/react-use-size@1.0.1(@types/react@18.3.1)(react@18.3.1)': + '@radix-ui/react-use-size@1.0.1(@types/react@18.2.69)(react@18.3.1)': dependencies: '@babel/runtime': 7.26.7 - '@radix-ui/react-use-layout-effect': 1.0.1(@types/react@18.3.1)(react@18.3.1) + '@radix-ui/react-use-layout-effect': 1.0.1(@types/react@18.2.69)(react@18.3.1) react: 18.3.1 optionalDependencies: - '@types/react': 18.3.1 + '@types/react': 18.2.69 '@radix-ui/react-visually-hidden@1.0.2(react-dom@18.2.0(react@18.2.0))(react@18.2.0)': dependencies: @@ -27290,14 +27396,14 @@ snapshots: react: 18.2.0 react-dom: 18.2.0(react@18.2.0) - '@radix-ui/react-visually-hidden@1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-visually-hidden@1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.28.4 - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.2.0(react@18.3.1) optionalDependencies: - '@types/react': 18.3.1 + '@types/react': 18.2.69 '@types/react-dom': 18.2.7 '@radix-ui/rect@1.0.0': @@ -28367,7 +28473,7 @@ snapshots: transitivePeerDependencies: - '@types/react' - '@react-email/components@0.0.17(@types/react@18.3.1)(react@18.3.1)': + '@react-email/components@0.0.17(@types/react@18.2.69)(react@18.3.1)': dependencies: '@react-email/body': 0.0.8(react@18.3.1) '@react-email/button': 0.0.15(react@18.3.1) @@ -28377,7 +28483,7 @@ snapshots: '@react-email/container': 0.0.12(react@18.3.1) '@react-email/font': 0.0.6(react@18.3.1) '@react-email/head': 0.0.8(react@18.3.1) - '@react-email/heading': 0.0.12(@types/react@18.3.1)(react@18.3.1) + '@react-email/heading': 0.0.12(@types/react@18.2.69)(react@18.3.1) '@react-email/hr': 0.0.8(react@18.3.1) '@react-email/html': 0.0.8(react@18.3.1) '@react-email/img': 0.0.8(react@18.3.1) @@ -28424,9 +28530,9 @@ snapshots: transitivePeerDependencies: - '@types/react' - '@react-email/heading@0.0.12(@types/react@18.3.1)(react@18.3.1)': + '@react-email/heading@0.0.12(@types/react@18.2.69)(react@18.3.1)': dependencies: - '@radix-ui/react-slot': 1.0.2(@types/react@18.3.1)(react@18.3.1) + '@radix-ui/react-slot': 1.0.2(@types/react@18.2.69)(react@18.3.1) react: 18.3.1 transitivePeerDependencies: - '@types/react' @@ -31368,7 +31474,7 @@ snapshots: '@types/react-dom@18.2.7': dependencies: - '@types/react': 18.3.1 + '@types/react': 18.2.69 '@types/react-dom@19.0.4(@types/react@19.0.12)': dependencies: @@ -31395,6 +31501,10 @@ snapshots: dependencies: csstype: 3.2.0 + '@types/react@19.2.14': + dependencies: + csstype: 3.2.3 + '@types/readable-stream@4.0.14': dependencies: '@types/node': 20.14.14 @@ -33337,6 +33447,8 @@ snapshots: csstype@3.2.0: {} + csstype@3.2.3: {} + csv-generate@3.4.3: {} csv-parse@4.16.3: {} @@ -39142,11 +39254,11 @@ snapshots: transitivePeerDependencies: - magicast - prisma@6.20.0-integration-next.8(@types/react@19.0.12)(magicast@0.3.5)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.5.4): + prisma@6.20.0-integration-next.8(@types/react@19.2.14)(magicast@0.3.5)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.5.4): dependencies: '@prisma/config': 6.20.0-integration-next.8(magicast@0.3.5) '@prisma/engines': 6.20.0-integration-next.8 - '@prisma/studio-core-licensed': 0.6.0(@types/react@19.0.12)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@prisma/studio-core-licensed': 0.6.0(@types/react@19.2.14)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) postgres: 3.4.7 optionalDependencies: typescript: 5.5.4 @@ -39490,15 +39602,15 @@ snapshots: dependencies: '@babel/parser': 7.24.1 '@radix-ui/colors': 1.0.1 - '@radix-ui/react-collapsible': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-popover': 1.0.7(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-slot': 1.0.2(@types/react@18.3.1)(react@18.3.1) - '@radix-ui/react-toggle-group': 1.0.4(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@radix-ui/react-tooltip': 1.0.6(@types/react-dom@18.2.7)(@types/react@18.3.1)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) - '@react-email/components': 0.0.17(@types/react@18.3.1)(react@18.3.1) + '@radix-ui/react-collapsible': 1.0.3(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-popover': 1.0.7(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-slot': 1.0.2(@types/react@18.2.69)(react@18.3.1) + '@radix-ui/react-toggle-group': 1.0.4(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@radix-ui/react-tooltip': 1.0.6(@types/react-dom@18.2.7)(@types/react@18.2.69)(react-dom@18.2.0(react@18.3.1))(react@18.3.1) + '@react-email/components': 0.0.17(@types/react@18.2.69)(react@18.3.1) '@react-email/render': 0.0.13 '@swc/core': 1.3.101(@swc/helpers@0.5.15) - '@types/react': 18.3.1 + '@types/react': 18.2.69 '@types/react-dom': 18.2.7 '@types/webpack': 5.28.5(@swc/core@1.3.101(@swc/helpers@0.5.15))(esbuild@0.19.11) autoprefixer: 10.4.14(postcss@8.4.35) @@ -39641,6 +39753,14 @@ snapshots: optionalDependencies: '@types/react': 18.2.69 + react-remove-scroll-bar@2.3.8(@types/react@18.2.69)(react@18.3.1): + dependencies: + react: 18.3.1 + react-style-singleton: 2.2.3(@types/react@18.2.69)(react@18.3.1) + tslib: 2.8.1 + optionalDependencies: + '@types/react': 18.2.69 + react-remove-scroll-bar@2.3.8(@types/react@18.3.1)(react@18.3.1): dependencies: react: 18.3.1 @@ -39660,6 +39780,17 @@ snapshots: optionalDependencies: '@types/react': 18.2.69 + react-remove-scroll@2.5.5(@types/react@18.2.69)(react@18.3.1): + dependencies: + react: 18.3.1 + react-remove-scroll-bar: 2.3.8(@types/react@18.2.69)(react@18.3.1) + react-style-singleton: 2.2.3(@types/react@18.2.69)(react@18.3.1) + tslib: 2.8.1 + use-callback-ref: 1.3.3(@types/react@18.2.69)(react@18.3.1) + use-sidecar: 1.1.3(@types/react@18.2.69)(react@18.3.1) + optionalDependencies: + '@types/react': 18.2.69 + react-remove-scroll@2.5.5(@types/react@18.3.1)(react@18.3.1): dependencies: react: 18.3.1 @@ -39738,6 +39869,14 @@ snapshots: optionalDependencies: '@types/react': 18.2.69 + react-style-singleton@2.2.3(@types/react@18.2.69)(react@18.3.1): + dependencies: + get-nonce: 1.0.1 + react: 18.3.1 + tslib: 2.8.1 + optionalDependencies: + '@types/react': 18.2.69 + react-style-singleton@2.2.3(@types/react@18.3.1)(react@18.3.1): dependencies: get-nonce: 1.0.1 @@ -42133,6 +42272,13 @@ snapshots: optionalDependencies: '@types/react': 18.2.69 + use-callback-ref@1.3.3(@types/react@18.2.69)(react@18.3.1): + dependencies: + react: 18.3.1 + tslib: 2.8.1 + optionalDependencies: + '@types/react': 18.2.69 + use-callback-ref@1.3.3(@types/react@18.3.1)(react@18.3.1): dependencies: react: 18.3.1 @@ -42154,6 +42300,14 @@ snapshots: optionalDependencies: '@types/react': 18.2.69 + use-sidecar@1.1.3(@types/react@18.2.69)(react@18.3.1): + dependencies: + detect-node-es: 1.1.0 + react: 18.3.1 + tslib: 2.8.1 + optionalDependencies: + '@types/react': 18.2.69 + use-sidecar@1.1.3(@types/react@18.3.1)(react@18.3.1): dependencies: detect-node-es: 1.1.0 diff --git a/references/ai-chat/src/components/chat.tsx b/references/ai-chat/src/components/chat.tsx index aa09d530bb4..9755e15f6e8 100644 --- a/references/ai-chat/src/components/chat.tsx +++ b/references/ai-chat/src/components/chat.tsx @@ -1,9 +1,10 @@ "use client"; import { useChat } from "@ai-sdk/react"; -import { TriggerChatTransport } from "@trigger.dev/sdk/chat"; -import { useMemo, useState } from "react"; +import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; +import { useState } from "react"; import { getChatToken } from "@/app/actions"; +import type { chat } from "@/trigger/chat"; function ToolInvocation({ part }: { part: any }) { const [expanded, setExpanded] = useState(false); @@ -72,15 +73,11 @@ function ToolInvocation({ part }: { part: any }) { export function Chat() { const [input, setInput] = useState(""); - const transport = useMemo( - () => - new TriggerChatTransport({ - task: "ai-chat", - accessToken: getChatToken, - baseURL: process.env.NEXT_PUBLIC_TRIGGER_API_URL, - }), - [] - ); + const transport = useTriggerChatTransport({ + task: "ai-chat", + accessToken: getChatToken, + baseURL: process.env.NEXT_PUBLIC_TRIGGER_API_URL, + }); const { messages, sendMessage, status, error } = useChat({ transport, From 90eb452ecb9f903844562dafc0b709ee794b05c9 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Tue, 3 Mar 2026 19:09:51 +0000 Subject: [PATCH 20/53] use input streams and rename chatTask and chatState to chat.task and chat.state --- docs/guides/ai-chat.mdx | 37 +-- packages/trigger-sdk/src/v3/ai.ts | 249 +++++++++++++----- packages/trigger-sdk/src/v3/chat-constants.ts | 13 + packages/trigger-sdk/src/v3/chat-react.ts | 2 +- packages/trigger-sdk/src/v3/chat.ts | 90 +++---- pnpm-lock.yaml | 84 +++--- references/ai-chat/src/app/actions.ts | 6 +- references/ai-chat/src/components/chat.tsx | 4 +- references/ai-chat/src/trigger/chat.ts | 7 +- 9 files changed, 295 insertions(+), 197 deletions(-) create mode 100644 packages/trigger-sdk/src/v3/chat-constants.ts diff --git a/docs/guides/ai-chat.mdx b/docs/guides/ai-chat.mdx index e549226b147..fddc9254f72 100644 --- a/docs/guides/ai-chat.mdx +++ b/docs/guides/ai-chat.mdx @@ -24,22 +24,24 @@ No custom API routes needed. Your chat backend is a Trigger.dev task. ### 1. Define a chat task -Use `chatTask` from `@trigger.dev/sdk/ai` to define a task that handles chat messages. The payload is automatically typed as `ChatTaskPayload`. +Use `chat.task` from `@trigger.dev/sdk/ai` to define a task that handles chat messages. The payload is automatically typed as `ChatTaskPayload` with abort signals. If you return a `StreamTextResult` from `run`, it's **automatically piped** to the frontend. ```ts trigger/chat.ts -import { chatTask } from "@trigger.dev/sdk/ai"; +import { chat } from "@trigger.dev/sdk/ai"; import { streamText, convertToModelMessages } from "ai"; import { openai } from "@ai-sdk/openai"; -export const myChat = chatTask({ +export const myChat = chat.task({ id: "my-chat", - run: async ({ messages }) => { + run: async ({ messages, signal }) => { // messages is UIMessage[] from the frontend + // signal fires on stop or run cancel return streamText({ model: openai("gpt-4o"), messages: convertToModelMessages(messages), + abortSignal: signal, }); // Returning a StreamTextResult auto-pipes it to the frontend }, @@ -116,35 +118,36 @@ export function Chat({ accessToken }: { accessToken: string }) { The easiest approach — return the `streamText` result from `run` and it's automatically piped to the frontend: ```ts -import { chatTask } from "@trigger.dev/sdk/ai"; +import { chat } from "@trigger.dev/sdk/ai"; import { streamText, convertToModelMessages } from "ai"; import { openai } from "@ai-sdk/openai"; -export const simpleChat = chatTask({ +export const simpleChat = chat.task({ id: "simple-chat", - run: async ({ messages }) => { + run: async ({ messages, signal }) => { return streamText({ model: openai("gpt-4o"), system: "You are a helpful assistant.", messages: convertToModelMessages(messages), + abortSignal: signal, }); }, }); ``` -### Complex: use pipeChat() from anywhere +### Complex: use chat.pipe() from anywhere -For complex agent flows where `streamText` is called deep inside your code, use `pipeChat()`. It works from **anywhere inside a task** — even nested function calls. +For complex agent flows where `streamText` is called deep inside your code, use `chat.pipe()`. It works from **anywhere inside a task** — even nested function calls. ```ts trigger/agent-chat.ts -import { chatTask, pipeChat } from "@trigger.dev/sdk/ai"; +import { chat } from "@trigger.dev/sdk/ai"; import { streamText, convertToModelMessages } from "ai"; import { openai } from "@ai-sdk/openai"; -export const agentChat = chatTask({ +export const agentChat = chat.task({ id: "agent-chat", run: async ({ messages }) => { - // Don't return anything — pipeChat is called inside + // Don't return anything — chat.pipe is called inside await runAgentLoop(convertToModelMessages(messages)); }, }); @@ -159,17 +162,17 @@ async function runAgentLoop(messages: CoreMessage[]) { }); // Pipe from anywhere — no need to return it - await pipeChat(result); + await chat.pipe(result); } ``` -### Manual: use task() with pipeChat() +### Manual: use task() with chat.pipe() -If you need full control over task options, use the standard `task()` with `ChatTaskPayload` and `pipeChat()`: +If you need full control over task options, use the standard `task()` with `ChatTaskPayload` and `chat.pipe()`: ```ts import { task } from "@trigger.dev/sdk"; -import { pipeChat, type ChatTaskPayload } from "@trigger.dev/sdk/ai"; +import { chat, type ChatTaskPayload } from "@trigger.dev/sdk/ai"; import { streamText, convertToModelMessages } from "ai"; import { openai } from "@ai-sdk/openai"; @@ -183,7 +186,7 @@ export const manualChat = task({ messages: convertToModelMessages(payload.messages), }); - await pipeChat(result); + await chat.pipe(result); }, }); ``` diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index 8bec798e981..f9ba018617b 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -15,7 +15,11 @@ import { auth } from "./auth.js"; import { metadata } from "./metadata.js"; import { streams } from "./streams.js"; import { createTask } from "./shared.js"; -import { wait } from "./wait.js"; +import { + CHAT_STREAM_KEY as _CHAT_STREAM_KEY, + CHAT_MESSAGES_STREAM_ID, + CHAT_STOP_STREAM_ID, +} from "./chat-constants.js"; const METADATA_KEY = "tool.execute.options"; @@ -136,13 +140,13 @@ export const ai = { * ```ts * // actions.ts * "use server"; - * import { createChatAccessToken } from "@trigger.dev/sdk/ai"; - * import type { chat } from "@/trigger/chat"; + * import { chat } from "@trigger.dev/sdk/ai"; + * import type { myChat } from "@/trigger/chat"; * - * export const getChatToken = () => createChatAccessToken("ai-chat"); + * export const getChatToken = () => chat.createAccessToken("my-chat"); * ``` */ -export async function createChatAccessToken( +function createChatAccessToken( taskId: TaskIdentifier ): Promise { return auth.createTriggerPublicToken(taskId as string, { multipleUse: true }); @@ -157,7 +161,10 @@ export async function createChatAccessToken( * Both `TriggerChatTransport` (frontend) and `pipeChat`/`chatTask` (backend) * use this key by default. */ -export const CHAT_STREAM_KEY = "chat"; +export const CHAT_STREAM_KEY = _CHAT_STREAM_KEY; + +// Re-export input stream IDs for advanced usage +export { CHAT_MESSAGES_STREAM_ID, CHAT_STOP_STREAM_ID }; /** * The payload shape that the chat transport sends to the triggered task. @@ -187,6 +194,28 @@ export type ChatTaskPayload = { metadata?: unknown; }; +/** + * Abort signals provided to the `chatTask` run function. + */ +export type ChatTaskSignals = { + /** Combined signal — fires on run cancel OR stop generation. Pass to `streamText`. */ + signal: AbortSignal; + /** Fires only when the run is cancelled, expired, or exceeds maxDuration. */ + cancelSignal: AbortSignal; + /** Fires only when the frontend stops generation for this turn (per-turn, reset each turn). */ + stopSignal: AbortSignal; +}; + +/** + * The full payload passed to a `chatTask` run function. + * Extends `ChatTaskPayload` (the wire payload) with abort signals. + */ +export type ChatTaskRunPayload = ChatTaskPayload & ChatTaskSignals; + +// Input streams for bidirectional chat communication +const messagesInput = streams.input({ id: CHAT_MESSAGES_STREAM_ID }); +const stopInput = streams.input<{ stop: true; message?: string }>({ id: CHAT_STOP_STREAM_ID }); + /** * Tracks how many times `pipeChat` has been called in the current `chatTask` run. * Used to prevent double-piping when a user both calls `pipeChat()` manually @@ -253,7 +282,7 @@ function isReadableStream(value: unknown): value is ReadableStream { * @example * ```ts * import { task } from "@trigger.dev/sdk"; - * import { pipeChat, type ChatTaskPayload } from "@trigger.dev/sdk/ai"; + * import { chat, type ChatTaskPayload } from "@trigger.dev/sdk/ai"; * import { streamText, convertToModelMessages } from "ai"; * * export const myChatTask = task({ @@ -264,7 +293,7 @@ function isReadableStream(value: unknown): value is ReadableStream { * messages: convertToModelMessages(payload.messages), * }); * - * await pipeChat(result); + * await chat.pipe(result); * }, * }); * ``` @@ -274,11 +303,11 @@ function isReadableStream(value: unknown): value is ReadableStream { * // Works from anywhere inside a task — even deep in your agent code * async function runAgentLoop(messages: CoreMessage[]) { * const result = streamText({ model, messages }); - * await pipeChat(result); + * await chat.pipe(result); * } * ``` */ -export async function pipeChat( +async function pipeChat( source: UIMessageStreamable | AsyncIterable | ReadableStream, options?: PipeChatOptions ): Promise { @@ -314,16 +343,15 @@ export async function pipeChat( * Options for defining a chat task. * * Extends the standard `TaskOptions` but pre-types the payload as `ChatTaskPayload` - * and overrides `run` to accept `ChatTaskPayload` directly. + * and overrides `run` to accept `ChatTaskRunPayload` (with abort signals). * * **Auto-piping:** If the `run` function returns a value with `.toUIMessageStream()` * (like a `StreamTextResult`), the stream is automatically piped to the frontend. - * For complex flows, use `pipeChat()` manually from anywhere in your code. * - * **Single-run mode:** By default, the task runs a waitpoint loop so that the + * **Single-run mode:** By default, the task uses input streams so that the * entire conversation lives inside one run. After each AI response, the task - * emits a control chunk and pauses via `wait.forToken`. The frontend transport - * resumes the same run by completing the token with the next set of messages. + * emits a control chunk and suspends via `messagesInput.wait()`. The frontend + * transport resumes the same run by sending the next message via input streams. */ export type ChatTaskOptions = Omit< TaskOptions, @@ -332,13 +360,13 @@ export type ChatTaskOptions = Omit< /** * The run function for the chat task. * - * Receives a `ChatTaskPayload` with the conversation messages, chat session ID, - * and trigger type. + * Receives a `ChatTaskRunPayload` with the conversation messages, chat session ID, + * trigger type, and abort signals (`signal`, `cancelSignal`, `stopSignal`). * * **Auto-piping:** If this function returns a value with `.toUIMessageStream()`, * the stream is automatically piped to the frontend. */ - run: (payload: ChatTaskPayload) => Promise; + run: (payload: ChatTaskRunPayload) => Promise; /** * Maximum number of conversational turns (message round-trips) a single run @@ -351,7 +379,7 @@ export type ChatTaskOptions = Omit< /** * How long to wait for the next message before timing out and ending the run. - * Accepts any duration string recognised by `wait.createToken` (e.g. `"1h"`, `"30m"`). + * Accepts any duration string (e.g. `"1h"`, `"30m"`). * * @default "1h" */ @@ -361,87 +389,164 @@ export type ChatTaskOptions = Omit< /** * Creates a Trigger.dev task pre-configured for AI SDK chat. * - * - **Pre-types the payload** as `ChatTaskPayload` — no manual typing needed + * - **Pre-types the payload** as `ChatTaskRunPayload` — includes abort signals * - **Auto-pipes the stream** if `run` returns a `StreamTextResult` + * - **Multi-turn**: keeps the conversation in a single run using input streams + * - **Stop support**: frontend can stop generation mid-stream via the stop input stream * - For complex flows, use `pipeChat()` from anywhere inside your task code * * @example * ```ts - * import { chatTask } from "@trigger.dev/sdk/ai"; + * import { chat } from "@trigger.dev/sdk/ai"; * import { streamText, convertToModelMessages } from "ai"; * import { openai } from "@ai-sdk/openai"; * - * // Simple: return streamText result — auto-piped to the frontend - * export const myChatTask = chatTask({ - * id: "my-chat-task", - * run: async ({ messages }) => { + * export const myChat = chat.task({ + * id: "my-chat", + * run: async ({ messages, signal }) => { * return streamText({ * model: openai("gpt-4o"), * messages: convertToModelMessages(messages), + * abortSignal: signal, // fires on stop or run cancel * }); * }, * }); * ``` - * - * @example - * ```ts - * import { chatTask, pipeChat } from "@trigger.dev/sdk/ai"; - * - * // Complex: pipeChat() from deep in your agent code - * export const myAgentTask = chatTask({ - * id: "my-agent-task", - * run: async ({ messages }) => { - * await runComplexAgentLoop(messages); - * }, - * }); - * ``` */ -export function chatTask( +function chatTask( options: ChatTaskOptions ): Task { const { run: userRun, maxTurns = 100, turnTimeout = "1h", ...restOptions } = options; return createTask({ ...restOptions, - run: async (payload: ChatTaskPayload) => { + run: async (payload: ChatTaskPayload, { signal: runSignal }) => { let currentPayload = payload; - for (let turn = 0; turn < maxTurns; turn++) { - _chatPipeCount = 0; - - const result = await userRun(currentPayload); - - // Auto-pipe if the run function returned a StreamTextResult or similar, - // but only if pipeChat() wasn't already called manually during this turn - if (_chatPipeCount === 0 && isUIMessageStreamable(result)) { - await pipeChat(result); - } - - // Create a waitpoint token and emit a control chunk so the frontend - // knows to resume this run instead of triggering a new one. - const token = await wait.createToken({ timeout: turnTimeout }); - - const { waitUntilComplete } = streams.writer(CHAT_STREAM_KEY, { - execute: ({ write }) => { - write({ - type: "__trigger_waitpoint_ready", - tokenId: token.id, - publicAccessToken: token.publicAccessToken, + // Mutable reference to the current turn's stop controller so the + // stop input stream listener (registered once) can abort the right turn. + let currentStopController: AbortController | undefined; + + // Listen for stop signals for the lifetime of the run + const stopSub = stopInput.on((data) => { + currentStopController?.abort(data?.message || "stopped"); + }); + + try { + for (let turn = 0; turn < maxTurns; turn++) { + _chatPipeCount = 0; + + // Per-turn stop controller (reset each turn) + const stopController = new AbortController(); + currentStopController = stopController; + + // Three signals for the user's run function + const stopSignal = stopController.signal; + const cancelSignal = runSignal; + const combinedSignal = AbortSignal.any([runSignal, stopController.signal]); + + // Buffer messages that arrive during streaming + const pendingMessages: ChatTaskPayload[] = []; + const msgSub = messagesInput.on((msg) => { + pendingMessages.push(msg as ChatTaskPayload); + }); + + try { + const result = await userRun({ + ...currentPayload, + signal: combinedSignal, + cancelSignal, + stopSignal, }); - }, - }); - await waitUntilComplete(); - // Pause until the frontend completes the token with the next message - const next = await wait.forToken(token); - - if (!next.ok) { - // Timed out waiting for the next message — end the conversation - return; + // Auto-pipe if the run function returned a StreamTextResult or similar, + // but only if pipeChat() wasn't already called manually during this turn + if (_chatPipeCount === 0 && isUIMessageStreamable(result)) { + await pipeChat(result, { signal: combinedSignal }); + } + } catch (error) { + // Handle AbortError from streamText gracefully + if (error instanceof Error && error.name === "AbortError") { + if (runSignal.aborted) { + return; // Full run cancellation — exit + } + // Stop generation — fall through to continue the loop + } else { + throw error; + } + } finally { + msgSub.off(); + } + + if (runSignal.aborted) return; + + // Write turn-complete control chunk so frontend closes its stream + await writeTurnCompleteChunk(); + + // If messages arrived during streaming, use the first one immediately + if (pendingMessages.length > 0) { + currentPayload = pendingMessages[0]!; + continue; + } + + // Suspend the task (frees compute) until the next message arrives + const next = await messagesInput.wait({ timeout: turnTimeout }); + + if (!next.ok) { + // Timed out waiting for the next message — end the conversation + return; + } + + currentPayload = next.output as ChatTaskPayload; } - - currentPayload = next.output; + } finally { + stopSub.off(); } }, }); } + +/** + * Namespace for AI SDK chat integration. + * + * @example + * ```ts + * import { chat } from "@trigger.dev/sdk/ai"; + * + * // Define a chat task + * export const myChat = chat.task({ + * id: "my-chat", + * run: async ({ messages, signal }) => { + * return streamText({ model, messages, abortSignal: signal }); + * }, + * }); + * + * // Pipe a stream manually (from inside a task) + * await chat.pipe(streamTextResult); + * + * // Create an access token (from a server action) + * const token = await chat.createAccessToken("my-chat"); + * ``` + */ +export const chat = { + /** Create a chat task. See {@link chatTask}. */ + task: chatTask, + /** Pipe a stream to the chat transport. See {@link pipeChat}. */ + pipe: pipeChat, + /** Create a public access token for a chat task. See {@link createChatAccessToken}. */ + createAccessToken: createChatAccessToken, +}; + +/** + * Writes a turn-complete control chunk to the chat output stream. + * The frontend transport intercepts this to close the ReadableStream for the current turn. + * @internal + */ +async function writeTurnCompleteChunk(): Promise { + const { waitUntilComplete } = streams.writer(CHAT_STREAM_KEY, { + execute: ({ write }) => { + write({ type: "__trigger_turn_complete" }); + }, + }); + await waitUntilComplete(); +} diff --git a/packages/trigger-sdk/src/v3/chat-constants.ts b/packages/trigger-sdk/src/v3/chat-constants.ts new file mode 100644 index 00000000000..dcd170f02d1 --- /dev/null +++ b/packages/trigger-sdk/src/v3/chat-constants.ts @@ -0,0 +1,13 @@ +/** + * Stream IDs used for bidirectional chat communication. + * Shared between backend (ai.ts) and frontend (chat.ts). + */ + +/** The output stream key where UIMessageChunks are written. */ +export const CHAT_STREAM_KEY = "chat"; + +/** Input stream ID for sending chat messages to the running task. */ +export const CHAT_MESSAGES_STREAM_ID = "chat-messages"; + +/** Input stream ID for sending stop signals to abort the current generation. */ +export const CHAT_STOP_STREAM_ID = "chat-stop"; diff --git a/packages/trigger-sdk/src/v3/chat-react.ts b/packages/trigger-sdk/src/v3/chat-react.ts index a62496463ae..e37e2e8e58f 100644 --- a/packages/trigger-sdk/src/v3/chat-react.ts +++ b/packages/trigger-sdk/src/v3/chat-react.ts @@ -51,7 +51,7 @@ export type UseTriggerChatTransportOptions = Om * * The transport is created once on first render and reused for the lifetime * of the component. This avoids the need for `useMemo` and ensures the - * transport's internal session state (waitpoint tokens, lastEventId, etc.) + * transport's internal session state (run IDs, lastEventId, etc.) * is preserved across re-renders. * * For dynamic access tokens, pass a function — it will be called on each diff --git a/packages/trigger-sdk/src/v3/chat.ts b/packages/trigger-sdk/src/v3/chat.ts index e36c5761870..7fd620ab908 100644 --- a/packages/trigger-sdk/src/v3/chat.ts +++ b/packages/trigger-sdk/src/v3/chat.ts @@ -24,6 +24,7 @@ import type { ChatTransport, UIMessage, UIMessageChunk, ChatRequestOptions } from "ai"; import { ApiClient, SSEStreamSubscription } from "@trigger.dev/core/v3"; +import { CHAT_MESSAGES_STREAM_ID, CHAT_STOP_STREAM_ID } from "./chat-constants.js"; const DEFAULT_STREAM_KEY = "chat"; const DEFAULT_BASE_URL = "https://api.trigger.dev"; @@ -88,10 +89,6 @@ export type TriggerChatTransportOptions = { type ChatSessionState = { runId: string; publicAccessToken: string; - /** Token ID from the `__trigger_waitpoint_ready` control chunk. */ - waitpointTokenId?: string; - /** Access token scoped to complete the waitpoint (separate from the run's PAT). */ - waitpointAccessToken?: string; /** Last SSE event ID — used to resume the stream without replaying old events. */ lastEventId?: string; }; @@ -100,44 +97,29 @@ type ChatSessionState = { * A custom AI SDK `ChatTransport` that runs chat completions as durable Trigger.dev tasks. * * When `sendMessages` is called, the transport: - * 1. Triggers a Trigger.dev task with the chat messages as payload + * 1. Triggers a Trigger.dev task (or sends to an existing run via input streams) * 2. Subscribes to the task's realtime stream to receive `UIMessageChunk` data * 3. Returns a `ReadableStream` that the AI SDK processes natively * + * Calling `stop()` from `useChat` sends a stop signal via input streams, which + * aborts the current `streamText` call in the task without ending the run. + * * @example * ```tsx * import { useChat } from "@ai-sdk/react"; * import { TriggerChatTransport } from "@trigger.dev/sdk/chat"; * * function Chat({ accessToken }: { accessToken: string }) { - * const { messages, sendMessage, status } = useChat({ + * const { messages, sendMessage, stop, status } = useChat({ * transport: new TriggerChatTransport({ * task: "my-chat-task", * accessToken, * }), * }); * - * // ... render messages + * // stop() sends a stop signal — the task aborts streamText but keeps the run alive * } * ``` - * - * On the backend, define the task using `chatTask` from `@trigger.dev/sdk/ai`: - * - * @example - * ```ts - * import { chatTask } from "@trigger.dev/sdk/ai"; - * import { streamText, convertToModelMessages } from "ai"; - * - * export const myChatTask = chatTask({ - * id: "my-chat-task", - * run: async ({ messages }) => { - * return streamText({ - * model: openai("gpt-4o"), - * messages: convertToModelMessages(messages), - * }); - * }, - * }); - * ``` */ export class TriggerChatTransport implements ChatTransport { private readonly taskId: string; @@ -183,19 +165,12 @@ export class TriggerChatTransport implements ChatTransport { const session = this.sessions.get(chatId); - // If we have a waitpoint token from a previous turn, complete it to - // resume the existing run instead of triggering a new one. - if (session?.waitpointTokenId && session.waitpointAccessToken) { - const tokenId = session.waitpointTokenId; - const tokenAccessToken = session.waitpointAccessToken; - - // Clear the used waitpoint so we don't try to reuse it - session.waitpointTokenId = undefined; - session.waitpointAccessToken = undefined; - + // If we have an existing run, send the message via input stream + // to resume the conversation in the same run. + if (session?.runId) { try { - const wpClient = new ApiClient(this.baseURL, tokenAccessToken); - await wpClient.completeWaitpointToken(tokenId, { data: payload }); + const apiClient = new ApiClient(this.baseURL, session.publicAccessToken); + await apiClient.sendInputStream(session.runId, CHAT_MESSAGES_STREAM_ID, payload); return this.subscribeToStream( session.runId, @@ -204,12 +179,12 @@ export class TriggerChatTransport implements ChatTransport { chatId ); } catch { - // If completing the waitpoint fails (run died, token expired, etc.), - // fall through to trigger a new run. + // If sending fails (run died, etc.), fall through to trigger a new run. this.sessions.delete(chatId); } } + // First message or run has ended — trigger a new run const currentToken = await this.resolveAccessToken(); const apiClient = new ApiClient(this.baseURL, currentToken); @@ -263,7 +238,7 @@ export class TriggerChatTransport implements ChatTransport { ...this.extraHeaders, }; - // When resuming a run via waitpoint, skip past previously-seen events + // When resuming a run, skip past previously-seen events // so we only receive the new turn's response. const session = chatId ? this.sessions.get(chatId) : undefined; @@ -275,6 +250,24 @@ export class TriggerChatTransport implements ChatTransport { ? AbortSignal.any([abortSignal, internalAbort.signal]) : internalAbort.signal; + // When the caller aborts (user calls stop()), send a stop signal to the + // running task via input streams, then close the SSE connection. + if (abortSignal) { + abortSignal.addEventListener( + "abort", + () => { + if (session?.runId) { + const api = new ApiClient(this.baseURL, session.publicAccessToken); + api + .sendInputStream(session.runId, CHAT_STOP_STREAM_ID, { stop: true }) + .catch(() => {}); // Best-effort + } + internalAbort.abort(); + }, + { once: true } + ); + } + const subscription = new SSEStreamSubscription( `${this.baseURL}/realtime/v1/streams/${runId}/${this.streamKey}`, { @@ -300,11 +293,7 @@ export class TriggerChatTransport implements ChatTransport { // ended (or was killed). Clear the session so that the // next message triggers a fresh run. if (chatId) { - const s = this.sessions.get(chatId); - if (s) { - s.waitpointTokenId = undefined; - s.waitpointAccessToken = undefined; - } + this.sessions.delete(chatId); } controller.close(); return; @@ -326,16 +315,10 @@ export class TriggerChatTransport implements ChatTransport { if (value.chunk != null && typeof value.chunk === "object") { const chunk = value.chunk as Record; - // Intercept the waitpoint-ready control chunk emitted by + // Intercept the turn-complete control chunk emitted by // `chatTask` after the AI response stream completes. This // chunk is never forwarded to the AI SDK consumer. - if (chunk.type === "__trigger_waitpoint_ready" && chatId) { - const s = this.sessions.get(chatId); - if (s) { - s.waitpointTokenId = chunk.tokenId as string; - s.waitpointAccessToken = chunk.publicAccessToken as string; - } - + if (chunk.type === "__trigger_turn_complete" && chatId) { // Abort the underlying fetch to close the SSE connection internalAbort.abort(); try { @@ -391,4 +374,3 @@ export class TriggerChatTransport implements ChatTransport { export function createChatTransport(options: TriggerChatTransportOptions): TriggerChatTransport { return new TriggerChatTransport(options); } - diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 789a5dd8f04..a2bdc672606 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -459,8 +459,8 @@ importers: specifier: ^0.1.3 version: 0.1.3(@remix-run/react@2.1.0(react-dom@18.2.0(react@18.2.0))(react@18.2.0)(typescript@5.5.4))(@remix-run/server-runtime@2.1.0(typescript@5.5.4)) '@s2-dev/streamstore': - specifier: ^0.17.2 - version: 0.17.3(typescript@5.5.4) + specifier: ^0.22.5 + version: 0.22.5(supports-color@10.0.0) '@sentry/remix': specifier: 9.46.0 version: 9.46.0(patch_hash=146126b032581925294aaed63ab53ce3f5e0356a755f1763d7a9a76b9846943b)(@remix-run/node@2.1.0(typescript@5.5.4))(@remix-run/react@2.1.0(react-dom@18.2.0(react@18.2.0))(react@18.2.0)(typescript@5.5.4))(@remix-run/server-runtime@2.1.0(typescript@5.5.4))(encoding@0.1.13)(react@18.2.0) @@ -1104,7 +1104,7 @@ importers: version: 18.3.1 react-email: specifier: ^2.1.1 - version: 2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(bufferutil@4.0.9)(eslint@8.31.0) + version: 2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(eslint@8.31.0) resend: specifier: ^3.2.0 version: 3.2.0 @@ -1455,8 +1455,8 @@ importers: specifier: 1.36.0 version: 1.36.0 '@s2-dev/streamstore': - specifier: ^0.17.6 - version: 0.17.6 + specifier: ^0.22.5 + version: 0.22.5(supports-color@10.0.0) '@trigger.dev/build': specifier: workspace:4.4.3 version: link:../build @@ -1732,8 +1732,8 @@ importers: specifier: 1.36.0 version: 1.36.0 '@s2-dev/streamstore': - specifier: 0.17.3 - version: 0.17.3(typescript@5.5.4) + specifier: 0.22.5 + version: 0.22.5(supports-color@10.0.0) dequal: specifier: ^2.0.3 version: 2.0.3 @@ -2058,6 +2058,9 @@ importers: evt: specifier: ^2.4.13 version: 2.4.13 + react: + specifier: ^18.0 || ^19.0 + version: 18.3.1 slug: specifier: ^6.0.0 version: 6.1.0 @@ -9566,13 +9569,8 @@ packages: '@rushstack/eslint-patch@1.2.0': resolution: {integrity: sha512-sXo/qW2/pAcmT43VoRKOJbDOfV3cYpq3szSVfIThQXNt+E4DfKj361vaAt3c88U5tPUxzEswam7GW48PJqtKAg==} - '@s2-dev/streamstore@0.17.3': - resolution: {integrity: sha512-UeXL5+MgZQfNkbhCgEDVm7PrV5B3bxh6Zp4C5pUzQQwaoA+iGh2QiiIptRZynWgayzRv4vh0PYfnKpTzJEXegQ==} - peerDependencies: - typescript: 5.5.4 - - '@s2-dev/streamstore@0.17.6': - resolution: {integrity: sha512-ocjZfKaPKmo2yhudM58zVNHv3rBLSbTKkabVoLFn9nAxU6iLrR2CO3QmSo7/waohI3EZHAWxF/Pw8kA8d6QH2g==} + '@s2-dev/streamstore@0.22.5': + resolution: {integrity: sha512-GqdOKIbIoIxT+40fnKzHbrsHB6gBqKdECmFe7D3Ojk4FoN1Hu0LhFzZv6ZmVMjoHHU+55debS1xSWjZwQmbIyQ==} '@sec-ant/readable-stream@0.4.1': resolution: {integrity: sha512-831qok9r2t8AlxLko40y2ebgSDhenenCatLVeW/uBtnHPyhHOvG0C7TvfgecV+wHzIm5KUICgzmVpWS+IMEAeg==} @@ -24121,7 +24119,7 @@ snapshots: dependencies: hono: 4.11.8 - '@hono/node-ws@1.0.4(@hono/node-server@1.12.2(hono@4.5.11))(bufferutil@4.0.9)': + '@hono/node-ws@1.0.4(@hono/node-server@1.12.2(hono@4.11.8))(bufferutil@4.0.9)': dependencies: '@hono/node-server': 1.12.2(hono@4.5.11) ws: 8.18.3(bufferutil@4.0.9) @@ -25927,7 +25925,7 @@ snapshots: '@puppeteer/browsers@2.10.6': dependencies: - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.3(supports-color@10.0.0) extract-zip: 2.0.1 progress: 2.0.3 proxy-agent: 6.5.0 @@ -29587,14 +29585,12 @@ snapshots: '@rushstack/eslint-patch@1.2.0': {} - '@s2-dev/streamstore@0.17.3(typescript@5.5.4)': - dependencies: - '@protobuf-ts/runtime': 2.11.1 - typescript: 5.5.4 - - '@s2-dev/streamstore@0.17.6': + '@s2-dev/streamstore@0.22.5(supports-color@10.0.0)': dependencies: '@protobuf-ts/runtime': 2.11.1 + debug: 4.4.3(supports-color@10.0.0) + transitivePeerDependencies: + - supports-color '@sec-ant/readable-stream@0.4.1': {} @@ -31679,7 +31675,7 @@ snapshots: dependencies: '@typescript-eslint/typescript-estree': 5.59.6(typescript@5.5.4) '@typescript-eslint/utils': 5.59.6(eslint@8.31.0)(typescript@5.5.4) - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.3(supports-color@10.0.0) eslint: 8.31.0 tsutils: 3.21.0(typescript@5.5.4) optionalDependencies: @@ -31693,7 +31689,7 @@ snapshots: dependencies: '@typescript-eslint/types': 5.59.6 '@typescript-eslint/visitor-keys': 5.59.6 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.3(supports-color@10.0.0) globby: 11.1.0 is-glob: 4.0.3 semver: 7.7.3 @@ -33716,11 +33712,9 @@ snapshots: dependencies: ms: 2.1.3 - debug@4.4.1(supports-color@10.0.0): + debug@4.4.1: dependencies: ms: 2.1.3 - optionalDependencies: - supports-color: 10.0.0 debug@4.4.3(supports-color@10.0.0): dependencies: @@ -35088,7 +35082,7 @@ snapshots: extract-zip@2.0.1: dependencies: - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.3(supports-color@10.0.0) get-stream: 5.2.0 yauzl: 2.10.0 optionalDependencies: @@ -35522,7 +35516,7 @@ snapshots: dependencies: basic-ftp: 5.0.3 data-uri-to-buffer: 5.0.1 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.3(supports-color@10.0.0) fs-extra: 8.1.0 transitivePeerDependencies: - supports-color @@ -35681,7 +35675,7 @@ snapshots: '@types/node': 20.14.14 '@types/semver': 7.5.1 chalk: 4.1.2 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.3(supports-color@10.0.0) interpret: 3.1.1 semver: 7.7.3 tslib: 2.8.1 @@ -35965,7 +35959,7 @@ snapshots: http-proxy-agent@7.0.2: dependencies: agent-base: 7.1.4 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.3(supports-color@10.0.0) transitivePeerDependencies: - supports-color @@ -35985,7 +35979,7 @@ snapshots: https-proxy-agent@7.0.6: dependencies: agent-base: 7.1.4 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.3(supports-color@10.0.0) transitivePeerDependencies: - supports-color @@ -36355,7 +36349,7 @@ snapshots: istanbul-lib-source-maps@5.0.6: dependencies: '@jridgewell/trace-mapping': 0.3.25 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 istanbul-lib-coverage: 3.2.2 transitivePeerDependencies: - supports-color @@ -38590,7 +38584,7 @@ snapshots: dependencies: '@tootallnate/quickjs-emscripten': 0.23.0 agent-base: 7.1.4 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.3(supports-color@10.0.0) get-uri: 6.0.1 http-proxy-agent: 7.0.2 https-proxy-agent: 7.0.6 @@ -39343,7 +39337,7 @@ snapshots: proxy-agent@6.5.0: dependencies: agent-base: 7.1.4 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.3(supports-color@10.0.0) http-proxy-agent: 7.0.2 https-proxy-agent: 7.0.6 lru-cache: 7.18.3 @@ -39383,7 +39377,7 @@ snapshots: dependencies: '@puppeteer/browsers': 2.10.6 chromium-bidi: 7.2.0(devtools-protocol@0.0.1464554) - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 devtools-protocol: 0.0.1464554 typed-query-selector: 2.12.0 ws: 8.18.3(bufferutil@4.0.9) @@ -39598,7 +39592,7 @@ snapshots: react: 18.2.0 react-dom: 18.2.0(react@18.2.0) - react-email@2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(bufferutil@4.0.9)(eslint@8.31.0): + react-email@2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(eslint@8.31.0): dependencies: '@babel/parser': 7.24.1 '@radix-ui/colors': 1.0.1 @@ -39635,8 +39629,8 @@ snapshots: react: 18.3.1 react-dom: 18.2.0(react@18.3.1) shelljs: 0.8.5 - socket.io: 4.7.3(bufferutil@4.0.9) - socket.io-client: 4.7.3(bufferutil@4.0.9) + socket.io: 4.7.3 + socket.io-client: 4.7.3 sonner: 1.3.1(react-dom@18.2.0(react@18.3.1))(react@18.3.1) source-map-js: 1.0.2 stacktrace-parser: 0.1.10 @@ -40295,7 +40289,7 @@ snapshots: require-in-the-middle@7.1.1(supports-color@10.0.0): dependencies: - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.3(supports-color@10.0.0) module-details-from-path: 1.0.3 resolve: 1.22.8 transitivePeerDependencies: @@ -40863,7 +40857,7 @@ snapshots: - supports-color - utf-8-validate - socket.io-client@4.7.3(bufferutil@4.0.9): + socket.io-client@4.7.3: dependencies: '@socket.io/component-emitter': 3.1.0 debug: 4.3.7(supports-color@10.0.0) @@ -40892,7 +40886,7 @@ snapshots: transitivePeerDependencies: - supports-color - socket.io@4.7.3(bufferutil@4.0.9): + socket.io@4.7.3: dependencies: accepts: 1.3.8 base64id: 2.0.0 @@ -40923,7 +40917,7 @@ snapshots: socks-proxy-agent@8.0.5: dependencies: agent-base: 7.1.4 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.3(supports-color@10.0.0) socks: 2.8.3 transitivePeerDependencies: - supports-color @@ -41297,7 +41291,7 @@ snapshots: dependencies: component-emitter: 1.3.1 cookiejar: 2.1.4 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.3(supports-color@10.0.0) fast-safe-stringify: 2.1.1 form-data: 4.0.4 formidable: 3.5.1 @@ -42520,7 +42514,7 @@ snapshots: '@vitest/spy': 3.1.4 '@vitest/utils': 3.1.4 chai: 5.2.0 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 expect-type: 1.2.1 magic-string: 0.30.21 pathe: 2.0.3 diff --git a/references/ai-chat/src/app/actions.ts b/references/ai-chat/src/app/actions.ts index 6d230e271a5..08657dd1a30 100644 --- a/references/ai-chat/src/app/actions.ts +++ b/references/ai-chat/src/app/actions.ts @@ -1,6 +1,6 @@ "use server"; -import { createChatAccessToken } from "@trigger.dev/sdk/ai"; -import type { chat } from "@/trigger/chat"; +import { chat } from "@trigger.dev/sdk/ai"; +import type { aiChat } from "@/trigger/chat"; -export const getChatToken = async () => createChatAccessToken("ai-chat"); +export const getChatToken = async () => chat.createAccessToken("ai-chat"); diff --git a/references/ai-chat/src/components/chat.tsx b/references/ai-chat/src/components/chat.tsx index 9755e15f6e8..c5eac1c34f9 100644 --- a/references/ai-chat/src/components/chat.tsx +++ b/references/ai-chat/src/components/chat.tsx @@ -4,7 +4,7 @@ import { useChat } from "@ai-sdk/react"; import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; import { useState } from "react"; import { getChatToken } from "@/app/actions"; -import type { chat } from "@/trigger/chat"; +import type { aiChat } from "@/trigger/chat"; function ToolInvocation({ part }: { part: any }) { const [expanded, setExpanded] = useState(false); @@ -73,7 +73,7 @@ function ToolInvocation({ part }: { part: any }) { export function Chat() { const [input, setInput] = useState(""); - const transport = useTriggerChatTransport({ + const transport = useTriggerChatTransport({ task: "ai-chat", accessToken: getChatToken, baseURL: process.env.NEXT_PUBLIC_TRIGGER_API_URL, diff --git a/references/ai-chat/src/trigger/chat.ts b/references/ai-chat/src/trigger/chat.ts index b600977c593..a798d35bde3 100644 --- a/references/ai-chat/src/trigger/chat.ts +++ b/references/ai-chat/src/trigger/chat.ts @@ -1,4 +1,4 @@ -import { chatTask } from "@trigger.dev/sdk/ai"; +import { chat } from "@trigger.dev/sdk/ai"; import { streamText, convertToModelMessages, tool } from "ai"; import { openai } from "@ai-sdk/openai"; import { z } from "zod"; @@ -62,15 +62,16 @@ const inspectEnvironment = tool({ declare const Bun: unknown; declare const Deno: unknown; -export const chat = chatTask({ +export const aiChat = chat.task({ id: "ai-chat", - run: async ({ messages }) => { + run: async ({ messages, signal }) => { return streamText({ model: openai("gpt-4o-mini"), system: "You are a helpful assistant. Be concise and friendly.", messages: await convertToModelMessages(messages), tools: { inspectEnvironment }, maxSteps: 3, + abortSignal: signal, }); }, }); From 7213ec81d33ea6fbf229db3f86024f71e2e969ef Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Wed, 4 Mar 2026 13:48:40 +0000 Subject: [PATCH 21/53] add stopping support and fix issue with the OpenAI responses API and stopped streams --- packages/trigger-sdk/src/v3/ai.ts | 23 ++++++++++++++ packages/trigger-sdk/src/v3/chat.ts | 35 ++++++++++++++-------- references/ai-chat/src/components/chat.tsx | 26 +++++++++++----- references/ai-chat/src/trigger/chat.ts | 8 ++--- 4 files changed, 68 insertions(+), 24 deletions(-) diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index f9ba018617b..e39a5bb1700 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -216,6 +216,28 @@ export type ChatTaskRunPayload = ChatTaskPayload & ChatTaskSignals; const messagesInput = streams.input({ id: CHAT_MESSAGES_STREAM_ID }); const stopInput = streams.input<{ stop: true; message?: string }>({ id: CHAT_STOP_STREAM_ID }); +/** + * Strips provider-specific IDs from message parts so that partial/stopped + * assistant responses don't cause 404s when sent back to the provider + * (e.g. OpenAI Responses API message IDs). + * @internal + */ +function sanitizeMessages(messages: TMessage[]): TMessage[] { + return messages.map((msg) => { + if (msg.role !== "assistant" || !msg.parts) return msg; + return { + ...msg, + parts: msg.parts.map((part: any) => { + // Strip provider-specific metadata (e.g. OpenAI Responses API itemId) + // and streaming state from assistant message parts. These cause 404s + // when partial/stopped responses are sent back to the provider. + const { providerMetadata, state, id, ...rest } = part; + return rest; + }), + }; + }); +} + /** * Tracks how many times `pipeChat` has been called in the current `chatTask` run. * Used to prevent double-piping when a user both calls `pipeChat()` manually @@ -454,6 +476,7 @@ function chatTask( try { const result = await userRun({ ...currentPayload, + messages: sanitizeMessages(currentPayload.messages), signal: combinedSignal, cancelSignal, stopSignal, diff --git a/packages/trigger-sdk/src/v3/chat.ts b/packages/trigger-sdk/src/v3/chat.ts index 7fd620ab908..9f4a36dd9be 100644 --- a/packages/trigger-sdk/src/v3/chat.ts +++ b/packages/trigger-sdk/src/v3/chat.ts @@ -30,6 +30,8 @@ const DEFAULT_STREAM_KEY = "chat"; const DEFAULT_BASE_URL = "https://api.trigger.dev"; const DEFAULT_STREAM_TIMEOUT_SECONDS = 120; + + /** * Options for creating a TriggerChatTransport. */ @@ -91,6 +93,8 @@ type ChatSessionState = { publicAccessToken: string; /** Last SSE event ID — used to resume the stream without replaying old events. */ lastEventId?: string; + /** Set when the stream was aborted mid-turn (stop). On reconnect, skip chunks until __trigger_turn_complete. */ + skipToTurnComplete?: boolean; }; /** @@ -164,14 +168,12 @@ export class TriggerChatTransport implements ChatTransport { }; const session = this.sessions.get(chatId); - // If we have an existing run, send the message via input stream // to resume the conversation in the same run. if (session?.runId) { try { const apiClient = new ApiClient(this.baseURL, session.publicAccessToken); await apiClient.sendInputStream(session.runId, CHAT_MESSAGES_STREAM_ID, payload); - return this.subscribeToStream( session.runId, session.publicAccessToken, @@ -205,7 +207,6 @@ export class TriggerChatTransport implements ChatTransport { runId, publicAccessToken: publicAccessToken ?? currentToken, }); - return this.subscribeToStream( runId, publicAccessToken ?? currentToken, @@ -256,7 +257,8 @@ export class TriggerChatTransport implements ChatTransport { abortSignal.addEventListener( "abort", () => { - if (session?.runId) { + if (session) { + session.skipToTurnComplete = true; const api = new ApiClient(this.baseURL, session.publicAccessToken); api .sendInputStream(session.runId, CHAT_STOP_STREAM_ID, { stop: true }) @@ -283,16 +285,18 @@ export class TriggerChatTransport implements ChatTransport { try { const sseStream = await subscription.subscribe(); const reader = sseStream.getReader(); + let chunkCount = 0; try { while (true) { const { done, value } = await reader.read(); if (done) { - // Stream closed without a control chunk — the run has - // ended (or was killed). Clear the session so that the - // next message triggers a fresh run. - if (chatId) { + // Only delete session if the stream ended naturally (not aborted by stop). + // When the user clicks stop, the abort closes the SSE reader which + // returns done=true, but the run is still alive and waiting for + // the next message via input streams. + if (chatId && !combinedSignal.aborted) { this.sessions.delete(chatId); } controller.close(); @@ -315,11 +319,17 @@ export class TriggerChatTransport implements ChatTransport { if (value.chunk != null && typeof value.chunk === "object") { const chunk = value.chunk as Record; - // Intercept the turn-complete control chunk emitted by - // `chatTask` after the AI response stream completes. This - // chunk is never forwarded to the AI SDK consumer. + // After a stop, skip leftover chunks from the stopped turn + // until we see the __trigger_turn_complete marker. + if (session?.skipToTurnComplete) { + if (chunk.type === "__trigger_turn_complete") { + session.skipToTurnComplete = false; + chunkCount = 0; + } + continue; + } + if (chunk.type === "__trigger_turn_complete" && chatId) { - // Abort the underlying fetch to close the SSE connection internalAbort.abort(); try { controller.close(); @@ -329,6 +339,7 @@ export class TriggerChatTransport implements ChatTransport { return; } + chunkCount++; controller.enqueue(chunk as unknown as UIMessageChunk); } } diff --git a/references/ai-chat/src/components/chat.tsx b/references/ai-chat/src/components/chat.tsx index c5eac1c34f9..645d7dbfc47 100644 --- a/references/ai-chat/src/components/chat.tsx +++ b/references/ai-chat/src/components/chat.tsx @@ -79,7 +79,7 @@ export function Chat() { baseURL: process.env.NEXT_PUBLIC_TRIGGER_API_URL, }); - const { messages, sendMessage, status, error } = useChat({ + const { messages, sendMessage, stop, status, error } = useChat({ transport, }); @@ -152,13 +152,23 @@ export function Chat() { placeholder="Type a message…" className="flex-1 rounded-lg border border-gray-300 px-3 py-2 text-sm outline-none focus:border-blue-500 focus:ring-1 focus:ring-blue-500" /> - + {status === "streaming" ? ( + + ) : ( + + )} ); diff --git a/references/ai-chat/src/trigger/chat.ts b/references/ai-chat/src/trigger/chat.ts index a798d35bde3..66d7d734f02 100644 --- a/references/ai-chat/src/trigger/chat.ts +++ b/references/ai-chat/src/trigger/chat.ts @@ -1,5 +1,5 @@ import { chat } from "@trigger.dev/sdk/ai"; -import { streamText, convertToModelMessages, tool } from "ai"; +import { streamText, convertToModelMessages, tool, stepCountIs } from "ai"; import { openai } from "@ai-sdk/openai"; import { z } from "zod"; import os from "node:os"; @@ -64,14 +64,14 @@ declare const Deno: unknown; export const aiChat = chat.task({ id: "ai-chat", - run: async ({ messages, signal }) => { + run: async ({ messages, stopSignal }) => { return streamText({ model: openai("gpt-4o-mini"), system: "You are a helpful assistant. Be concise and friendly.", messages: await convertToModelMessages(messages), tools: { inspectEnvironment }, - maxSteps: 3, - abortSignal: signal, + stopWhen: stepCountIs(10), + abortSignal: stopSignal, }); }, }); From b15b16f57bb1912cff36466a5e1de7b01e961709 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Wed, 4 Mar 2026 14:08:44 +0000 Subject: [PATCH 22/53] Add warmTimeoutInSeconds option --- packages/trigger-sdk/src/v3/ai.ts | 38 ++++++++++++++++++++++++-- packages/trigger-sdk/src/v3/streams.ts | 30 ++++++++++---------- references/ai-chat/src/trigger/chat.ts | 4 +++ 3 files changed, 53 insertions(+), 19 deletions(-) diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index e39a5bb1700..d633d59dbbc 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -406,6 +406,18 @@ export type ChatTaskOptions = Omit< * @default "1h" */ turnTimeout?: string; + + /** + * How long (in seconds) to keep the run warm after each turn before suspending. + * During this window the run stays active and can respond instantly to the + * next message. After this timeout, the run suspends (frees compute) and waits + * via `inputStream.wait()`. + * + * Set to `0` to suspend immediately after each turn. + * + * @default 30 + */ + warmTimeoutInSeconds?: number; }; /** @@ -438,7 +450,13 @@ export type ChatTaskOptions = Omit< function chatTask( options: ChatTaskOptions ): Task { - const { run: userRun, maxTurns = 100, turnTimeout = "1h", ...restOptions } = options; + const { + run: userRun, + maxTurns = 100, + turnTimeout = "1h", + warmTimeoutInSeconds = 30, + ...restOptions + } = options; return createTask({ ...restOptions, @@ -512,7 +530,21 @@ function chatTask( continue; } - // Suspend the task (frees compute) until the next message arrives + // Phase 1: Keep the run warm for quick response to the next message. + // The run stays active (using compute) during this window. + if (warmTimeoutInSeconds > 0) { + const warm = await messagesInput.once({ + timeoutMs: warmTimeoutInSeconds * 1000, + }); + + if (warm.ok) { + // Message arrived while warm — respond instantly + currentPayload = warm.output; + continue; + } + } + + // Phase 2: Suspend the task (frees compute) until the next message arrives const next = await messagesInput.wait({ timeout: turnTimeout }); if (!next.ok) { @@ -520,7 +552,7 @@ function chatTask( return; } - currentPayload = next.output as ChatTaskPayload; + currentPayload = next.output; } } finally { stopSub.off(); diff --git a/packages/trigger-sdk/src/v3/streams.ts b/packages/trigger-sdk/src/v3/streams.ts index 68edc2a64ab..fe3af6e61dd 100644 --- a/packages/trigger-sdk/src/v3/streams.ts +++ b/packages/trigger-sdk/src/v3/streams.ts @@ -750,23 +750,20 @@ function input(opts: { id: string }): RealtimeDefinedInputStream { const apiClient = apiClientManager.clientOrThrow(); + // Create the waitpoint before the span so we have the entity ID upfront + const response = await apiClient.createInputStreamWaitpoint(ctx.run.id, { + streamId: opts.id, + timeout: options?.timeout, + idempotencyKey: options?.idempotencyKey, + idempotencyKeyTTL: options?.idempotencyKeyTTL, + tags: options?.tags, + lastSeqNum: inputStreams.lastSeqNum(opts.id), + }); + const result = await tracer.startActiveSpan( `inputStream.wait()`, async (span) => { - // 1. Create a waitpoint linked to this input stream - const response = await apiClient.createInputStreamWaitpoint(ctx.run.id, { - streamId: opts.id, - timeout: options?.timeout, - idempotencyKey: options?.idempotencyKey, - idempotencyKeyTTL: options?.idempotencyKeyTTL, - tags: options?.tags, - lastSeqNum: inputStreams.lastSeqNum(opts.id), - }); - - // Set the entity ID now that we have the waitpoint ID - span.setAttribute(SemanticInternalAttributes.ENTITY_ID, response.waitpointId); - - // 2. Block the run on the waitpoint + // 1. Block the run on the waitpoint const waitResponse = await apiClient.waitForWaitpointToken({ runFriendlyId: ctx.run.id, waitpointFriendlyId: response.waitpointId, @@ -776,10 +773,10 @@ function input(opts: { id: string }): RealtimeDefinedInputStream { throw new Error("Failed to block on input stream waitpoint"); } - // 3. Suspend the task + // 2. Suspend the task const waitResult = await runtime.waitUntil(response.waitpointId); - // 4. Parse the output + // 3. Parse the output const data = waitResult.output !== undefined ? await conditionallyImportAndParsePacket( @@ -806,6 +803,7 @@ function input(opts: { id: string }): RealtimeDefinedInputStream { attributes: { [SemanticInternalAttributes.STYLE_ICON]: "wait", [SemanticInternalAttributes.ENTITY_TYPE]: "waitpoint", + [SemanticInternalAttributes.ENTITY_ID]: response.waitpointId, streamId: opts.id, ...accessoryAttributes({ items: [ diff --git a/references/ai-chat/src/trigger/chat.ts b/references/ai-chat/src/trigger/chat.ts index 66d7d734f02..d4b3bab6431 100644 --- a/references/ai-chat/src/trigger/chat.ts +++ b/references/ai-chat/src/trigger/chat.ts @@ -64,6 +64,7 @@ declare const Deno: unknown; export const aiChat = chat.task({ id: "ai-chat", + warmTimeoutInSeconds: 10, run: async ({ messages, stopSignal }) => { return streamText({ model: openai("gpt-4o-mini"), @@ -72,6 +73,9 @@ export const aiChat = chat.task({ tools: { inspectEnvironment }, stopWhen: stepCountIs(10), abortSignal: stopSignal, + experimental_telemetry: { + isEnabled: true, + } }); }, }); From 03e13f6c195e26e3e6cd087ffb9eced7c688462f Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Wed, 4 Mar 2026 15:13:32 +0000 Subject: [PATCH 23/53] Add clientData support --- .claude/rules/package-installation.md | 22 +++ CLAUDE.md | 2 + packages/trigger-sdk/src/v3/ai.ts | 52 +++--- packages/trigger-sdk/src/v3/chat.ts | 27 +++- pnpm-lock.yaml | 119 ++++++++++++++ references/ai-chat/package.json | 2 + references/ai-chat/src/app/globals.css | 1 + references/ai-chat/src/app/layout.tsx | 1 + references/ai-chat/src/components/chat.tsx | 178 +++++++++++++++------ references/ai-chat/src/trigger/chat.ts | 26 ++- 10 files changed, 356 insertions(+), 74 deletions(-) create mode 100644 .claude/rules/package-installation.md diff --git a/.claude/rules/package-installation.md b/.claude/rules/package-installation.md new file mode 100644 index 00000000000..310074823c5 --- /dev/null +++ b/.claude/rules/package-installation.md @@ -0,0 +1,22 @@ +--- +paths: + - "**/package.json" +--- + +# Installing Packages + +When adding a new dependency to any package.json in the monorepo: + +1. **Look up the latest version** on npm before adding: + ```bash + pnpm view version + ``` + If unsure which version to use (e.g. major version compatibility), confirm with the user. + +2. **Edit the package.json directly** — do NOT use `pnpm add` as it can cause issues in the monorepo. Add the dependency with the correct version range (typically `^x.y.z`). + +3. **Run `pnpm i` from the repo root** after editing to install and update the lockfile: + ```bash + pnpm i + ``` + Always run from the repo root, not from the package directory. diff --git a/CLAUDE.md b/CLAUDE.md index 93cf4624bcf..26141370b5e 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -6,6 +6,8 @@ This file provides guidance to Claude Code when working with this repository. Su This is a pnpm 10.23.0 monorepo using Turborepo. Run commands from root with `pnpm run`. +**Adding dependencies:** Edit `package.json` directly instead of using `pnpm add`, then run `pnpm i` from the repo root. See `.claude/rules/package-installation.md` for the full process. + ```bash pnpm run docker # Start Docker services (PostgreSQL, Redis, Electric) pnpm run db:migrate # Run database migrations diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index d633d59dbbc..e9994711d9b 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -167,11 +167,23 @@ export const CHAT_STREAM_KEY = _CHAT_STREAM_KEY; export { CHAT_MESSAGES_STREAM_ID, CHAT_STOP_STREAM_ID }; /** - * The payload shape that the chat transport sends to the triggered task. + * The wire payload shape sent by `TriggerChatTransport`. + * Uses `metadata` to match the AI SDK's `ChatRequestOptions` field name. + * @internal + */ +type ChatTaskWirePayload = { + messages: TMessage[]; + chatId: string; + trigger: "submit-message" | "regenerate-message"; + messageId?: string; + metadata?: unknown; +}; + +/** + * The payload shape passed to the `chatTask` run function. * - * When using `chatTask()`, the payload is automatically typed — you don't need - * to import this type. Use this type only if you're using `task()` directly - * with `pipeChat()`. + * The `metadata` field from the AI SDK transport is exposed as `clientData` + * to avoid confusion with Trigger.dev's run metadata. */ export type ChatTaskPayload = { /** The conversation messages */ @@ -190,8 +202,8 @@ export type ChatTaskPayload = { /** The ID of the message to regenerate (only for `"regenerate-message"`) */ messageId?: string; - /** Custom metadata from the frontend */ - metadata?: unknown; + /** Custom data from the frontend (passed via `metadata` on `sendMessage()` or the transport). */ + clientData?: unknown; }; /** @@ -213,7 +225,7 @@ export type ChatTaskSignals = { export type ChatTaskRunPayload = ChatTaskPayload & ChatTaskSignals; // Input streams for bidirectional chat communication -const messagesInput = streams.input({ id: CHAT_MESSAGES_STREAM_ID }); +const messagesInput = streams.input({ id: CHAT_MESSAGES_STREAM_ID }); const stopInput = streams.input<{ stop: true; message?: string }>({ id: CHAT_STOP_STREAM_ID }); /** @@ -449,7 +461,7 @@ export type ChatTaskOptions = Omit< */ function chatTask( options: ChatTaskOptions -): Task { +): Task { const { run: userRun, maxTurns = 100, @@ -458,10 +470,10 @@ function chatTask( ...restOptions } = options; - return createTask({ + return createTask({ ...restOptions, - run: async (payload: ChatTaskPayload, { signal: runSignal }) => { - let currentPayload = payload; + run: async (payload: ChatTaskWirePayload, { signal: runSignal }) => { + let currentWirePayload = payload; // Mutable reference to the current turn's stop controller so the // stop input stream listener (registered once) can abort the right turn. @@ -486,15 +498,19 @@ function chatTask( const combinedSignal = AbortSignal.any([runSignal, stopController.signal]); // Buffer messages that arrive during streaming - const pendingMessages: ChatTaskPayload[] = []; + const pendingMessages: ChatTaskWirePayload[] = []; const msgSub = messagesInput.on((msg) => { - pendingMessages.push(msg as ChatTaskPayload); + pendingMessages.push(msg); }); + // Remap wire payload to user-facing payload (metadata -> clientData) + const { metadata: wireMetadata, ...restWire } = currentWirePayload; + try { const result = await userRun({ - ...currentPayload, - messages: sanitizeMessages(currentPayload.messages), + ...restWire, + clientData: wireMetadata, + messages: sanitizeMessages(currentWirePayload.messages), signal: combinedSignal, cancelSignal, stopSignal, @@ -526,7 +542,7 @@ function chatTask( // If messages arrived during streaming, use the first one immediately if (pendingMessages.length > 0) { - currentPayload = pendingMessages[0]!; + currentWirePayload = pendingMessages[0]!; continue; } @@ -539,7 +555,7 @@ function chatTask( if (warm.ok) { // Message arrived while warm — respond instantly - currentPayload = warm.output; + currentWirePayload = warm.output; continue; } } @@ -552,7 +568,7 @@ function chatTask( return; } - currentPayload = next.output; + currentWirePayload = next.output; } } finally { stopSub.off(); diff --git a/packages/trigger-sdk/src/v3/chat.ts b/packages/trigger-sdk/src/v3/chat.ts index 9f4a36dd9be..d41b38e28e8 100644 --- a/packages/trigger-sdk/src/v3/chat.ts +++ b/packages/trigger-sdk/src/v3/chat.ts @@ -82,6 +82,24 @@ export type TriggerChatTransportOptions = { * @default 120 */ streamTimeoutSeconds?: number; + + /** + * Default metadata included in every request payload. + * Merged with per-call `metadata` from `sendMessage()` — per-call values + * take precedence over transport-level defaults. + * + * Useful for data that should accompany every message, like a user ID. + * + * @example + * ```ts + * new TriggerChatTransport({ + * task: "my-chat", + * accessToken, + * metadata: { userId: currentUser.id }, + * }); + * ``` + */ + metadata?: Record; }; /** @@ -132,6 +150,7 @@ export class TriggerChatTransport implements ChatTransport { private readonly streamKey: string; private readonly extraHeaders: Record; private readonly streamTimeoutSeconds: number; + private readonly defaultMetadata: Record | undefined; private sessions: Map = new Map(); @@ -145,6 +164,7 @@ export class TriggerChatTransport implements ChatTransport { this.streamKey = options.streamKey ?? DEFAULT_STREAM_KEY; this.extraHeaders = options.headers ?? {}; this.streamTimeoutSeconds = options.streamTimeoutSeconds ?? DEFAULT_STREAM_TIMEOUT_SECONDS; + this.defaultMetadata = options.metadata; } sendMessages = async ( @@ -158,13 +178,18 @@ export class TriggerChatTransport implements ChatTransport { ): Promise> => { const { trigger, chatId, messageId, messages, abortSignal, body, metadata } = options; + const mergedMetadata = + this.defaultMetadata || metadata + ? { ...(this.defaultMetadata ?? {}), ...((metadata as Record) ?? {}) } + : undefined; + const payload = { ...(body ?? {}), messages, chatId, trigger, messageId, - metadata, + metadata: mergedMetadata, }; const session = this.sessions.get(chatId); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index a2bdc672606..269b8df49f6 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -2119,6 +2119,9 @@ importers: references/ai-chat: dependencies: + '@ai-sdk/anthropic': + specifier: ^3.0.0 + version: 3.0.50(zod@3.25.76) '@ai-sdk/openai': specifier: ^3.0.0 version: 3.0.19(zod@3.25.76) @@ -2140,6 +2143,9 @@ importers: react-dom: specifier: ^19.0.0 version: 19.1.0(react@19.1.0) + streamdown: + specifier: ^2.3.0 + version: 2.3.0(react-dom@19.1.0(react@19.1.0))(react@19.1.0) zod: specifier: 3.25.76 version: 3.25.76 @@ -2910,6 +2916,12 @@ packages: peerDependencies: zod: ^3.25.76 || ^4 + '@ai-sdk/anthropic@3.0.50': + resolution: {integrity: sha512-BkCUgGTp/iZJuuFBF1wv7GGnrEJg7X7hqbaa+/t4HTBt9dZn3e6NFn5NhPUvo2p5SreUeHEl0As0r2uaVn3K9Q==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/gateway@1.0.6': resolution: {integrity: sha512-JuSj1MtTr4vw2VBBth4wlbciQnQIV0o1YV9qGLFA+r85nR5H+cJp3jaYE0nprqfzC9rYG8w9c6XGHB3SDKgcgA==} engines: {node: '>=18'} @@ -3018,6 +3030,12 @@ packages: peerDependencies: zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/provider-utils@4.0.16': + resolution: {integrity: sha512-kBvDqNkt5EwlzF9FujmNhhtl8FYg3e8FO8P5uneKliqfRThWemzBj+wfYr7ZCymAQhTRnwSSz1/SOqhOAwmx9g==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/provider-utils@4.0.9': resolution: {integrity: sha512-bB4r6nfhBOpmoS9mePxjRoCy+LnzP3AfhyMGCkGL4Mn9clVNlqEeKj26zEKEtB6yoSVcT1IQ0Zh9fytwMCDnow==} engines: {node: '>=18'} @@ -3052,6 +3070,10 @@ packages: resolution: {integrity: sha512-2Xmoq6DBJqmSl80U6V9z5jJSJP7ehaJJQMy2iFUqTay06wdCqTnPVBBQbtEL8RCChenL+q5DC5H5WzU3vV3v8w==} engines: {node: '>=18'} + '@ai-sdk/provider@3.0.8': + resolution: {integrity: sha512-oGMAgGoQdBXbZqNG0Ze56CHjDZ1IDYOwGYxYjO5KLSlz5HiNQ9udIXsPZ61VWaHGZ5XW/jyjmr6t2xz2jGVwbQ==} + engines: {node: '>=18'} + '@ai-sdk/react@1.0.0': resolution: {integrity: sha512-BDrZqQA07Btg64JCuhFvBgYV+tt2B8cXINzEqWknGoxqcwgdE8wSLG2gkXoLzyC2Rnj7oj0HHpOhLUxDCmoKZg==} engines: {node: '>=18'} @@ -14574,6 +14596,9 @@ packages: hast-util-raw@9.1.0: resolution: {integrity: sha512-Y8/SBAHkZGoNkpzqqfCldijcuUKh7/su31kEBp67cFY09Wy0mTRgtsLYsiIxMJxlu0f6AA5SUTbDR8K0rxnbUw==} + hast-util-sanitize@5.0.2: + resolution: {integrity: sha512-3yTWghByc50aGS7JlGhk61SPenfE/p1oaFeNwkOOyrscaOkMGrcW9+Cy/QAIOBpZxP1yqDIzFMR0+Np0i0+usg==} + hast-util-to-estree@2.1.0: resolution: {integrity: sha512-Vwch1etMRmm89xGgz+voWXvVHba2iiMdGMKmaMfYt35rbVtFDq8JNwwAIvi8zHMkO6Gvqo9oTMwJTmzVRfXh4g==} @@ -15746,6 +15771,11 @@ packages: engines: {node: '>= 20'} hasBin: true + marked@17.0.1: + resolution: {integrity: sha512-boeBdiS0ghpWcSwoNm/jJBwdpFaMnZWRzjA6SkUMYb40SVaN1x7mmfGKp0jvexGcx+7y2La5zRZsYFZI6Qpypg==} + engines: {node: '>= 20'} + hasBin: true + marked@4.2.5: resolution: {integrity: sha512-jPueVhumq7idETHkb203WDD4fMA3yV9emQ5vLwop58lu8bTclMghBWcYAavlDqIEMaisADinV1TooIFCfqOsYQ==} engines: {node: '>= 12'} @@ -18120,12 +18150,18 @@ packages: rehype-harden@1.1.5: resolution: {integrity: sha512-JrtBj5BVd/5vf3H3/blyJatXJbzQfRT9pJBmjafbTaPouQCAKxHwRyCc7dle9BXQKxv4z1OzZylz/tNamoiG3A==} + rehype-harden@1.1.8: + resolution: {integrity: sha512-Qn7vR1xrf6fZCrkm9TDWi/AB4ylrHy+jqsNm1EHOAmbARYA6gsnVJBq/sdBh6kmT4NEZxH5vgIjrscefJAOXcw==} + rehype-katex@7.0.1: resolution: {integrity: sha512-OiM2wrZ/wuhKkigASodFoo8wimG3H12LWQaH8qSPVJn9apWKFSH3YOCtbKpBorTVw/eI7cuT21XBbvwEswbIOA==} rehype-raw@7.0.0: resolution: {integrity: sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==} + rehype-sanitize@6.0.0: + resolution: {integrity: sha512-CsnhKNsyI8Tub6L4sm5ZFsme4puGfc6pYylvXo1AeqaGbjOYyzNv3qZPwvs0oMJ39eryyeOdmxwUIo94IpEhqg==} + remark-frontmatter@4.0.1: resolution: {integrity: sha512-38fJrB0KnmD3E33a5jZC/5+gGAC2WKNiPw1/fdXJvijBlhA7RCsvJklrYJakS0HedninvaCYW8lQGf9C918GfA==} @@ -18154,9 +18190,15 @@ packages: remark-rehype@11.1.1: resolution: {integrity: sha512-g/osARvjkBXb6Wo0XvAeXQohVta8i84ACbenPpoSsxTOQH/Ae0/RGP4WZgnMH5pMLpsj4FG7OHmcIcXxpza8eQ==} + remark-rehype@11.1.2: + resolution: {integrity: sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==} + remark-stringify@11.0.0: resolution: {integrity: sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==} + remend@1.2.1: + resolution: {integrity: sha512-4wC12bgXsfKAjF1ewwkNIQz5sqewz/z1xgIgjEMb3r1pEytQ37F0Cm6i+OhbTWEvguJD7lhOUJhK5fSasw9f0w==} + remix-auth-email-link@2.0.2: resolution: {integrity: sha512-Lze9c50fsqBpixXQKe37wI2Dm4rlYYkNA6Eskxk8erQ7tbyN8xiFXOgo7Y3Al0SSjzkezw8au3uc2vCLJ8A5mQ==} peerDependencies: @@ -18857,6 +18899,12 @@ packages: peerDependencies: react: ^18.0.0 || ^19.0.0 + streamdown@2.3.0: + resolution: {integrity: sha512-OqS3by/lt91lSicE8RQP2nTsYI6Q/dQgGP2vcyn9YesCmRHhNjswAuBAZA1z0F4+oBU3II/eV51LqjCqwTb1lw==} + peerDependencies: + react: ^18.0.0 || ^19.0.0 + react-dom: ^18.0.0 || ^19.0.0 + streamsearch@1.1.0: resolution: {integrity: sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==} engines: {node: '>=10.0.0'} @@ -19097,6 +19145,9 @@ packages: tailwind-merge@3.3.1: resolution: {integrity: sha512-gBXpgUm/3rp1lMZZrM/w7D8GKqshif0zAymAhbCyIt8KMe+0v9DQ7cdYLR4FHH/cKpdTXb+A/tKKU3eolfsI+g==} + tailwind-merge@3.4.0: + resolution: {integrity: sha512-uSaO4gnW+b3Y2aWoWfFpX62vn2sR3skfhbjsEnaBI81WD1wBLlHZe5sWf0AqjksNdYTbGBEd0UasQMT3SNV15g==} + tailwind-scrollbar-hide@1.1.7: resolution: {integrity: sha512-X324n9OtpTmOMqEgDUEA/RgLrNfBF/jwJdctaPZDzB3mppxJk7TLIDmOreEDm1Bq4R9LSPu4Epf8VSdovNU+iA==} @@ -20444,6 +20495,12 @@ snapshots: '@ai-sdk/provider-utils': 3.0.3(zod@3.25.76) zod: 3.25.76 + '@ai-sdk/anthropic@3.0.50(zod@3.25.76)': + dependencies: + '@ai-sdk/provider': 3.0.8 + '@ai-sdk/provider-utils': 4.0.16(zod@3.25.76) + zod: 3.25.76 + '@ai-sdk/gateway@1.0.6(zod@3.25.76)': dependencies: '@ai-sdk/provider': 2.0.0 @@ -20561,6 +20618,13 @@ snapshots: eventsource-parser: 3.0.6 zod: 3.25.76 + '@ai-sdk/provider-utils@4.0.16(zod@3.25.76)': + dependencies: + '@ai-sdk/provider': 3.0.8 + '@standard-schema/spec': 1.1.0 + eventsource-parser: 3.0.6 + zod: 3.25.76 + '@ai-sdk/provider-utils@4.0.9(zod@3.25.76)': dependencies: '@ai-sdk/provider': 3.0.5 @@ -20596,6 +20660,10 @@ snapshots: dependencies: json-schema: 0.4.0 + '@ai-sdk/provider@3.0.8': + dependencies: + json-schema: 0.4.0 + '@ai-sdk/react@1.0.0(react@18.3.1)(zod@3.25.76)': dependencies: '@ai-sdk/provider-utils': 2.0.0(zod@3.25.76) @@ -35812,6 +35880,12 @@ snapshots: web-namespaces: 2.0.1 zwitch: 2.0.4 + hast-util-sanitize@5.0.2: + dependencies: + '@types/hast': 3.0.4 + '@ungap/structured-clone': 1.3.0 + unist-util-position: 5.0.0 + hast-util-to-estree@2.1.0: dependencies: '@types/estree': 1.0.8 @@ -36925,6 +36999,8 @@ snapshots: marked@16.4.1: {} + marked@17.0.1: {} + marked@4.2.5: {} marked@7.0.4: {} @@ -40105,6 +40181,10 @@ snapshots: rehype-harden@1.1.5: {} + rehype-harden@1.1.8: + dependencies: + unist-util-visit: 5.0.0 + rehype-katex@7.0.1: dependencies: '@types/hast': 3.0.4 @@ -40121,6 +40201,11 @@ snapshots: hast-util-raw: 9.1.0 vfile: 6.0.3 + rehype-sanitize@6.0.0: + dependencies: + '@types/hast': 3.0.4 + hast-util-sanitize: 5.0.2 + remark-frontmatter@4.0.1: dependencies: '@types/mdast': 3.0.10 @@ -40194,12 +40279,22 @@ snapshots: unified: 11.0.5 vfile: 6.0.3 + remark-rehype@11.1.2: + dependencies: + '@types/hast': 3.0.4 + '@types/mdast': 4.0.4 + mdast-util-to-hast: 13.2.0 + unified: 11.0.5 + vfile: 6.0.3 + remark-stringify@11.0.0: dependencies: '@types/mdast': 4.0.4 mdast-util-to-markdown: 2.1.2 unified: 11.0.5 + remend@1.2.1: {} + remix-auth-email-link@2.0.2(@remix-run/server-runtime@2.1.0(typescript@5.5.4))(remix-auth@3.6.0(@remix-run/react@2.1.0(react-dom@18.2.0(react@18.2.0))(react@18.2.0)(typescript@5.5.4))(@remix-run/server-runtime@2.1.0(typescript@5.5.4))): dependencies: '@remix-run/server-runtime': 2.1.0(typescript@5.5.4) @@ -41121,6 +41216,28 @@ snapshots: - '@types/react' - supports-color + streamdown@2.3.0(react-dom@19.1.0(react@19.1.0))(react@19.1.0): + dependencies: + clsx: 2.1.1 + hast-util-to-jsx-runtime: 2.3.6 + html-url-attributes: 3.0.1 + marked: 17.0.1 + react: 19.1.0 + react-dom: 19.1.0(react@19.1.0) + rehype-harden: 1.1.8 + rehype-raw: 7.0.0 + rehype-sanitize: 6.0.0 + remark-gfm: 4.0.1 + remark-parse: 11.0.0 + remark-rehype: 11.1.2 + remend: 1.2.1 + tailwind-merge: 3.4.0 + unified: 11.0.5 + unist-util-visit: 5.0.0 + unist-util-visit-parents: 6.0.1 + transitivePeerDependencies: + - supports-color + streamsearch@1.1.0: {} streamx@2.22.0: @@ -41400,6 +41517,8 @@ snapshots: tailwind-merge@3.3.1: {} + tailwind-merge@3.4.0: {} + tailwind-scrollbar-hide@1.1.7: {} tailwind-scrollbar@3.0.1(tailwindcss@3.4.1): diff --git a/references/ai-chat/package.json b/references/ai-chat/package.json index 9dcab80046f..89ccc36889e 100644 --- a/references/ai-chat/package.json +++ b/references/ai-chat/package.json @@ -9,6 +9,7 @@ "dev:trigger": "trigger dev" }, "dependencies": { + "@ai-sdk/anthropic": "^3.0.0", "@ai-sdk/openai": "^3.0.0", "@ai-sdk/react": "^3.0.0", "@trigger.dev/sdk": "workspace:*", @@ -16,6 +17,7 @@ "next": "15.3.3", "react": "^19.0.0", "react-dom": "^19.0.0", + "streamdown": "^2.3.0", "zod": "3.25.76" }, "devDependencies": { diff --git a/references/ai-chat/src/app/globals.css b/references/ai-chat/src/app/globals.css index f1d8c73cdcf..92c4b9a7860 100644 --- a/references/ai-chat/src/app/globals.css +++ b/references/ai-chat/src/app/globals.css @@ -1 +1,2 @@ @import "tailwindcss"; +@source "../../../node_modules/streamdown/dist/*.js"; diff --git a/references/ai-chat/src/app/layout.tsx b/references/ai-chat/src/app/layout.tsx index f507028583d..544dd9142d8 100644 --- a/references/ai-chat/src/app/layout.tsx +++ b/references/ai-chat/src/app/layout.tsx @@ -1,5 +1,6 @@ import type { Metadata } from "next"; import "./globals.css"; +import "streamdown/styles.css"; export const metadata: Metadata = { title: "AI Chat — Trigger.dev", diff --git a/references/ai-chat/src/components/chat.tsx b/references/ai-chat/src/components/chat.tsx index 645d7dbfc47..9639382dd38 100644 --- a/references/ai-chat/src/components/chat.tsx +++ b/references/ai-chat/src/components/chat.tsx @@ -2,13 +2,14 @@ import { useChat } from "@ai-sdk/react"; import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; -import { useState } from "react"; +import { useRef, useState } from "react"; +import { Streamdown } from "streamdown"; import { getChatToken } from "@/app/actions"; +import { MODEL_OPTIONS, DEFAULT_MODEL } from "@/trigger/chat"; import type { aiChat } from "@/trigger/chat"; function ToolInvocation({ part }: { part: any }) { const [expanded, setExpanded] = useState(false); - // Static tools: type is "tool-{name}", dynamic tools have toolName property const toolName = part.type === "dynamic-tool" ? (part.toolName ?? "tool") @@ -72,6 +73,9 @@ function ToolInvocation({ part }: { part: any }) { export function Chat() { const [input, setInput] = useState(""); + const [model, setModel] = useState(DEFAULT_MODEL); + // Track which model was used for each assistant message (keyed by the preceding user message ID) + const modelByUserMsgId = useRef>(new Map()); const transport = useTriggerChatTransport({ task: "ai-chat", @@ -83,12 +87,34 @@ export function Chat() { transport, }); - function handleSubmit(e: React.FormEvent) { - e.preventDefault(); - if (!input.trim() || status === "streaming") return; + // Build a map of assistant message index -> model used + // Each assistant message follows a user message, so we track by position + function getModelForAssistantAt(index: number): string | undefined { + // Walk backwards to find the preceding user message + for (let i = index - 1; i >= 0; i--) { + if (messages[i]?.role === "user") { + return modelByUserMsgId.current.get(messages[i].id); + } + } + return undefined; + } - sendMessage({ text: input }); - setInput(""); + // When sending, record which model is selected for this user message + const originalSendMessage = sendMessage; + function trackedSendMessage(msg: Parameters[0], opts?: Parameters[1]) { + // We'll track it after the message appears — use a ref to store the pending model + pendingModel.current = model; + originalSendMessage(msg, opts); + } + const pendingModel = useRef(model); + + // Track model for new user messages as they appear + const trackedUserIds = useRef>(new Set()); + for (const msg of messages) { + if (msg.role === "user" && !trackedUserIds.current.has(msg.id)) { + trackedUserIds.current.add(msg.id); + modelByUserMsgId.current.set(msg.id, pendingModel.current); + } } return ( @@ -99,38 +125,61 @@ export function Chat() {

Send a message to start chatting.

)} - {messages.map((message) => ( + {messages.map((message, messageIndex) => (
-
- {message.parts.map((part, i) => { - if (part.type === "text") { - return {part.text}; - } - - // Static tools: "tool-{toolName}", dynamic tools: "dynamic-tool" - if (part.type.startsWith("tool-") || part.type === "dynamic-tool") { - return ; - } - - return null; - })} +
+ {/* Model badge for assistant messages */} + {message.role === "assistant" && ( +
+ + {getModelForAssistantAt(messageIndex) ?? DEFAULT_MODEL} + +
+ )} +
+ {message.parts.map((part, i) => { + if (part.type === "text") { + if (message.role === "assistant") { + return ( + + {part.text} + + ); + } + return {part.text}; + } + + if (part.type.startsWith("tool-") || part.type === "dynamic-tool") { + return ; + } + + return null; + })} +
))} - {status === "streaming" && ( + {status === "streaming" && messages[messages.length - 1]?.role !== "assistant" && (
- Thinking… + Thinking...
)} @@ -144,31 +193,54 @@ export function Chat() { )} {/* Input */} -
- setInput(e.target.value)} - placeholder="Type a message…" - className="flex-1 rounded-lg border border-gray-300 px-3 py-2 text-sm outline-none focus:border-blue-500 focus:ring-1 focus:ring-blue-500" - /> - {status === "streaming" ? ( - - ) : ( - + ) : ( + + )} +
+
+ +
); diff --git a/references/ai-chat/src/trigger/chat.ts b/references/ai-chat/src/trigger/chat.ts index d4b3bab6431..94a9d66be33 100644 --- a/references/ai-chat/src/trigger/chat.ts +++ b/references/ai-chat/src/trigger/chat.ts @@ -1,9 +1,27 @@ import { chat } from "@trigger.dev/sdk/ai"; import { streamText, convertToModelMessages, tool, stepCountIs } from "ai"; +import type { LanguageModel } from "ai"; import { openai } from "@ai-sdk/openai"; +import { anthropic } from "@ai-sdk/anthropic"; import { z } from "zod"; import os from "node:os"; +const MODELS: Record LanguageModel> = { + "gpt-4o-mini": () => openai("gpt-4o-mini"), + "gpt-4o": () => openai("gpt-4o"), + "claude-sonnet-4-6": () => anthropic("claude-sonnet-4-6"), + "claude-opus-4-6": () => anthropic("claude-opus-4-6"), +}; + +export const MODEL_OPTIONS = Object.keys(MODELS); +export const DEFAULT_MODEL = "gpt-4o-mini"; + +function getModel(modelId?: string): LanguageModel { + const factory = MODELS[modelId ?? DEFAULT_MODEL]; + if (!factory) return MODELS[DEFAULT_MODEL]!(); + return factory(); +} + const inspectEnvironment = tool({ description: "Inspect the current execution environment. Returns runtime info (Node.js/Bun/Deno version), " + @@ -65,9 +83,13 @@ declare const Deno: unknown; export const aiChat = chat.task({ id: "ai-chat", warmTimeoutInSeconds: 10, - run: async ({ messages, stopSignal }) => { + run: async ({ messages, clientData, stopSignal }) => { + const { model: modelId } = z + .object({ model: z.string().optional() }) + .parse(clientData ?? {}); + return streamText({ - model: openai("gpt-4o-mini"), + model: getModel(modelId), system: "You are a helpful assistant. Be concise and friendly.", messages: await convertToModelMessages(messages), tools: { inspectEnvironment }, From 1212867fbec07c23d7cbb42dfa345d6eb82372c4 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Wed, 4 Mar 2026 15:22:45 +0000 Subject: [PATCH 24/53] provide already converted UIMessages to the run function for better dx --- packages/trigger-sdk/src/v3/ai.ts | 36 ++++++++++++++++---------- references/ai-chat/src/trigger/chat.ts | 4 +-- 2 files changed, 24 insertions(+), 16 deletions(-) diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index e9994711d9b..134ec99c13e 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -9,8 +9,8 @@ import { type TaskSchema, type TaskWithSchema, } from "@trigger.dev/core/v3"; -import type { UIMessage } from "ai"; -import { dynamicTool, jsonSchema, JSONSchema7, Schema, Tool, ToolCallOptions, zodSchema } from "ai"; +import type { ModelMessage, UIMessage } from "ai"; +import { convertToModelMessages, dynamicTool, jsonSchema, JSONSchema7, Schema, Tool, ToolCallOptions, zodSchema } from "ai"; import { auth } from "./auth.js"; import { metadata } from "./metadata.js"; import { streams } from "./streams.js"; @@ -182,12 +182,17 @@ type ChatTaskWirePayload = { /** * The payload shape passed to the `chatTask` run function. * - * The `metadata` field from the AI SDK transport is exposed as `clientData` - * to avoid confusion with Trigger.dev's run metadata. + * - `messages` contains model-ready messages (converted via `convertToModelMessages`) — + * pass these directly to `streamText`. + * - `uiMessages` contains the raw `UIMessage[]` from the frontend. + * - `clientData` contains custom data from the frontend (the `metadata` field from `sendMessage()`). */ -export type ChatTaskPayload = { - /** The conversation messages */ - messages: TMessage[]; +export type ChatTaskPayload = { + /** Model-ready messages — pass directly to `streamText({ messages })`. */ + messages: ModelMessage[]; + + /** Raw UI messages from the frontend. */ + uiMessages: UIMessage[]; /** The unique identifier for the chat session */ chatId: string; @@ -324,7 +329,7 @@ function isReadableStream(value: unknown): value is ReadableStream { * run: async (payload: ChatTaskPayload) => { * const result = streamText({ * model: openai("gpt-4o"), - * messages: convertToModelMessages(payload.messages), + * messages: payload.messages, * }); * * await chat.pipe(result); @@ -388,7 +393,7 @@ async function pipeChat( * transport resumes the same run by sending the next message via input streams. */ export type ChatTaskOptions = Omit< - TaskOptions, + TaskOptions, "run" > & { /** @@ -452,8 +457,8 @@ export type ChatTaskOptions = Omit< * run: async ({ messages, signal }) => { * return streamText({ * model: openai("gpt-4o"), - * messages: convertToModelMessages(messages), - * abortSignal: signal, // fires on stop or run cancel + * messages, // already converted via convertToModelMessages + * abortSignal: signal, * }); * }, * }); @@ -503,14 +508,17 @@ function chatTask( pendingMessages.push(msg); }); - // Remap wire payload to user-facing payload (metadata -> clientData) - const { metadata: wireMetadata, ...restWire } = currentWirePayload; + // Convert wire payload to user-facing payload + const { metadata: wireMetadata, messages: uiMessages, ...restWire } = currentWirePayload; + const sanitized = sanitizeMessages(uiMessages); + const modelMessages = await convertToModelMessages(sanitized); try { const result = await userRun({ ...restWire, + messages: modelMessages, + uiMessages: sanitized, clientData: wireMetadata, - messages: sanitizeMessages(currentWirePayload.messages), signal: combinedSignal, cancelSignal, stopSignal, diff --git a/references/ai-chat/src/trigger/chat.ts b/references/ai-chat/src/trigger/chat.ts index 94a9d66be33..bd7069f1aeb 100644 --- a/references/ai-chat/src/trigger/chat.ts +++ b/references/ai-chat/src/trigger/chat.ts @@ -1,5 +1,5 @@ import { chat } from "@trigger.dev/sdk/ai"; -import { streamText, convertToModelMessages, tool, stepCountIs } from "ai"; +import { streamText, tool, stepCountIs } from "ai"; import type { LanguageModel } from "ai"; import { openai } from "@ai-sdk/openai"; import { anthropic } from "@ai-sdk/anthropic"; @@ -91,7 +91,7 @@ export const aiChat = chat.task({ return streamText({ model: getModel(modelId), system: "You are a helpful assistant. Be concise and friendly.", - messages: await convertToModelMessages(messages), + messages, tools: { inspectEnvironment }, stopWhen: stepCountIs(10), abortSignal: stopSignal, From 825c0387a400e228cc01ba4d227f6a50a632f196 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Wed, 4 Mar 2026 16:08:20 +0000 Subject: [PATCH 25/53] Added better telemetry support to view turns --- packages/core/src/v3/realtimeStreams/types.ts | 9 + packages/trigger-sdk/src/v3/ai.ts | 246 ++++++++++++------ packages/trigger-sdk/src/v3/streams.ts | 9 +- 3 files changed, 184 insertions(+), 80 deletions(-) diff --git a/packages/core/src/v3/realtimeStreams/types.ts b/packages/core/src/v3/realtimeStreams/types.ts index 174970c2830..cd420b1dce4 100644 --- a/packages/core/src/v3/realtimeStreams/types.ts +++ b/packages/core/src/v3/realtimeStreams/types.ts @@ -71,6 +71,10 @@ export type PipeStreamOptions = { * Additional request options for the API call. */ requestOptions?: ApiRequestOptions; + /** Override the default span name for this operation. */ + spanName?: string; + /** When true, the span will be collapsed in the dashboard. */ + collapsed?: boolean; }; /** @@ -199,6 +203,8 @@ export type InputStreamSubscription = { export type InputStreamOnceOptions = { signal?: AbortSignal; timeoutMs?: number; + /** Override the default span name for this operation. */ + spanName?: string; }; export type SendInputStreamOptions = { @@ -234,6 +240,9 @@ export type InputStreamWaitOptions = { * and filtering waitpoints via `wait.listTokens()`. */ tags?: string[]; + + /** Override the default span name for this operation. */ + spanName?: string; }; export type InferInputStreamType = T extends RealtimeDefinedInputStream diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index 134ec99c13e..50caa2aaa5e 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -1,6 +1,8 @@ import { + accessoryAttributes, AnyTask, isSchemaZodEsque, + SemanticInternalAttributes, Task, type inferSchemaIn, type PipeStreamOptions, @@ -11,10 +13,12 @@ import { } from "@trigger.dev/core/v3"; import type { ModelMessage, UIMessage } from "ai"; import { convertToModelMessages, dynamicTool, jsonSchema, JSONSchema7, Schema, Tool, ToolCallOptions, zodSchema } from "ai"; +import { type Attributes, trace } from "@opentelemetry/api"; import { auth } from "./auth.js"; import { metadata } from "./metadata.js"; import { streams } from "./streams.js"; import { createTask } from "./shared.js"; +import { tracer } from "./tracer.js"; import { CHAT_STREAM_KEY as _CHAT_STREAM_KEY, CHAT_MESSAGES_STREAM_ID, @@ -281,6 +285,9 @@ export type PipeChatOptions = { * @default "self" (current run) */ target?: string; + + /** Override the default span name for this operation. */ + spanName?: string; }; /** @@ -373,6 +380,9 @@ async function pipeChat( if (options?.target) { pipeOptions.target = options.target; } + if (options?.spanName) { + pipeOptions.spanName = options.spanName; + } const { waitUntilComplete } = streams.pipe(streamKey, stream, pipeOptions); await waitUntilComplete(); @@ -478,6 +488,12 @@ function chatTask( return createTask({ ...restOptions, run: async (payload: ChatTaskWirePayload, { signal: runSignal }) => { + // Set gen_ai.conversation.id on the run-level span for dashboard context + const activeSpan = trace.getActiveSpan(); + if (activeSpan) { + activeSpan.setAttribute("gen_ai.conversation.id", payload.chatId); + } + let currentWirePayload = payload; // Mutable reference to the current turn's stop controller so the @@ -491,92 +507,142 @@ function chatTask( try { for (let turn = 0; turn < maxTurns; turn++) { - _chatPipeCount = 0; + // Extract turn-level context before entering the span + const { metadata: wireMetadata, messages: uiMessages, ...restWire } = currentWirePayload; + const lastUserMessage = extractLastUserMessageText(uiMessages); + + const turnAttributes: Attributes = { + "turn.number": turn + 1, + "gen_ai.conversation.id": currentWirePayload.chatId, + "gen_ai.operation.name": "chat", + "chat.trigger": currentWirePayload.trigger, + [SemanticInternalAttributes.STYLE_ICON]: "tabler-message-chatbot", + [SemanticInternalAttributes.ENTITY_TYPE]: "chat-turn", + }; + + if (lastUserMessage) { + turnAttributes["chat.user_message"] = lastUserMessage; + + // Show a truncated preview of the user message as an accessory + const preview = + lastUserMessage.length > 80 + ? lastUserMessage.slice(0, 80) + "..." + : lastUserMessage; + Object.assign( + turnAttributes, + accessoryAttributes({ + items: [{ text: preview, variant: "normal" }], + style: "codepath", + }) + ); + } - // Per-turn stop controller (reset each turn) - const stopController = new AbortController(); - currentStopController = stopController; + if (wireMetadata !== undefined) { + turnAttributes["chat.client_data"] = + typeof wireMetadata === "string" ? wireMetadata : JSON.stringify(wireMetadata); + } - // Three signals for the user's run function - const stopSignal = stopController.signal; - const cancelSignal = runSignal; - const combinedSignal = AbortSignal.any([runSignal, stopController.signal]); + const turnResult = await tracer.startActiveSpan( + `chat turn ${turn + 1}`, + async () => { + _chatPipeCount = 0; + + // Per-turn stop controller (reset each turn) + const stopController = new AbortController(); + currentStopController = stopController; + + // Three signals for the user's run function + const stopSignal = stopController.signal; + const cancelSignal = runSignal; + const combinedSignal = AbortSignal.any([runSignal, stopController.signal]); + + // Buffer messages that arrive during streaming + const pendingMessages: ChatTaskWirePayload[] = []; + const msgSub = messagesInput.on((msg) => { + pendingMessages.push(msg); + }); + + // Convert wire payload to user-facing payload + const sanitized = sanitizeMessages(uiMessages); + const modelMessages = await convertToModelMessages(sanitized); + + try { + const result = await userRun({ + ...restWire, + messages: modelMessages, + uiMessages: sanitized, + clientData: wireMetadata, + signal: combinedSignal, + cancelSignal, + stopSignal, + }); + + // Auto-pipe if the run function returned a StreamTextResult or similar, + // but only if pipeChat() wasn't already called manually during this turn + if (_chatPipeCount === 0 && isUIMessageStreamable(result)) { + await pipeChat(result, { signal: combinedSignal, spanName: "stream response" }); + } + } catch (error) { + // Handle AbortError from streamText gracefully + if (error instanceof Error && error.name === "AbortError") { + if (runSignal.aborted) { + return "exit"; // Full run cancellation — exit + } + // Stop generation — fall through to continue the loop + } else { + throw error; + } + } finally { + msgSub.off(); + } - // Buffer messages that arrive during streaming - const pendingMessages: ChatTaskWirePayload[] = []; - const msgSub = messagesInput.on((msg) => { - pendingMessages.push(msg); - }); + if (runSignal.aborted) return "exit"; - // Convert wire payload to user-facing payload - const { metadata: wireMetadata, messages: uiMessages, ...restWire } = currentWirePayload; - const sanitized = sanitizeMessages(uiMessages); - const modelMessages = await convertToModelMessages(sanitized); - - try { - const result = await userRun({ - ...restWire, - messages: modelMessages, - uiMessages: sanitized, - clientData: wireMetadata, - signal: combinedSignal, - cancelSignal, - stopSignal, - }); - - // Auto-pipe if the run function returned a StreamTextResult or similar, - // but only if pipeChat() wasn't already called manually during this turn - if (_chatPipeCount === 0 && isUIMessageStreamable(result)) { - await pipeChat(result, { signal: combinedSignal }); - } - } catch (error) { - // Handle AbortError from streamText gracefully - if (error instanceof Error && error.name === "AbortError") { - if (runSignal.aborted) { - return; // Full run cancellation — exit + // Write turn-complete control chunk so frontend closes its stream + await writeTurnCompleteChunk(currentWirePayload.chatId); + + // If messages arrived during streaming, use the first one immediately + if (pendingMessages.length > 0) { + currentWirePayload = pendingMessages[0]!; + return "continue"; } - // Stop generation — fall through to continue the loop - } else { - throw error; - } - } finally { - msgSub.off(); - } - if (runSignal.aborted) return; + // Phase 1: Keep the run warm for quick response to the next message. + // The run stays active (using compute) during this window. + if (warmTimeoutInSeconds > 0) { + const warm = await messagesInput.once({ + timeoutMs: warmTimeoutInSeconds * 1000, + spanName: "waiting (warm)", + }); + + if (warm.ok) { + // Message arrived while warm — respond instantly + currentWirePayload = warm.output; + return "continue"; + } + } - // Write turn-complete control chunk so frontend closes its stream - await writeTurnCompleteChunk(); + // Phase 2: Suspend the task (frees compute) until the next message arrives + const next = await messagesInput.wait({ + timeout: turnTimeout, + spanName: "waiting (suspended)", + }); - // If messages arrived during streaming, use the first one immediately - if (pendingMessages.length > 0) { - currentWirePayload = pendingMessages[0]!; - continue; - } + if (!next.ok) { + // Timed out waiting for the next message — end the conversation + return "exit"; + } - // Phase 1: Keep the run warm for quick response to the next message. - // The run stays active (using compute) during this window. - if (warmTimeoutInSeconds > 0) { - const warm = await messagesInput.once({ - timeoutMs: warmTimeoutInSeconds * 1000, - }); - - if (warm.ok) { - // Message arrived while warm — respond instantly - currentWirePayload = warm.output; - continue; + currentWirePayload = next.output; + return "continue"; + }, + { + attributes: turnAttributes, } - } - - // Phase 2: Suspend the task (frees compute) until the next message arrives - const next = await messagesInput.wait({ timeout: turnTimeout }); - - if (!next.ok) { - // Timed out waiting for the next message — end the conversation - return; - } + ); - currentWirePayload = next.output; + if (turnResult === "exit") return; + // "continue" means proceed to next iteration } } finally { stopSub.off(); @@ -621,11 +687,39 @@ export const chat = { * The frontend transport intercepts this to close the ReadableStream for the current turn. * @internal */ -async function writeTurnCompleteChunk(): Promise { +async function writeTurnCompleteChunk(chatId?: string): Promise { const { waitUntilComplete } = streams.writer(CHAT_STREAM_KEY, { + spanName: "turn complete", + collapsed: true, execute: ({ write }) => { write({ type: "__trigger_turn_complete" }); }, }); await waitUntilComplete(); } + +/** + * Extracts the text content of the last user message from a UIMessage array. + * Returns undefined if no user message is found. + * @internal + */ +function extractLastUserMessageText(messages: UIMessage[]): string | undefined { + for (let i = messages.length - 1; i >= 0; i--) { + const msg = messages[i]!; + if (msg.role !== "user") continue; + + // UIMessage uses parts array + if (msg.parts) { + const textParts = msg.parts + .filter((p: any) => p.type === "text" && p.text) + .map((p: any) => p.text as string); + if (textParts.length > 0) { + return textParts.join("\n"); + } + } + + break; + } + + return undefined; +} diff --git a/packages/trigger-sdk/src/v3/streams.ts b/packages/trigger-sdk/src/v3/streams.ts index fe3af6e61dd..d8e6abf0d30 100644 --- a/packages/trigger-sdk/src/v3/streams.ts +++ b/packages/trigger-sdk/src/v3/streams.ts @@ -139,7 +139,7 @@ function pipe( opts = valueOrOptions as PipeStreamOptions | undefined; } - return pipeInternal(key, value, opts, "streams.pipe()"); + return pipeInternal(key, value, opts, opts?.spanName ?? "streams.pipe()"); } /** @@ -167,6 +167,7 @@ function pipeInternal( [SemanticInternalAttributes.ENTITY_TYPE]: "realtime-stream", [SemanticInternalAttributes.ENTITY_ID]: `${runId}:${key}`, [SemanticInternalAttributes.STYLE_ICON]: "streams", + ...(opts?.collapsed ? { [SemanticInternalAttributes.COLLAPSED]: true } : {}), ...accessoryAttributes({ items: [ { @@ -640,7 +641,7 @@ function writerInternal(key: string, options: WriterStreamOptions) } }); - return pipeInternal(key, stream, options, "streams.writer()"); + return pipeInternal(key, stream, options, options.spanName ?? "streams.writer()"); } export type RealtimeDefineStreamOptions = { @@ -713,7 +714,7 @@ function input(opts: { id: string }): RealtimeDefinedInputStream { return new InputStreamOncePromise((resolve, reject) => { tracer .startActiveSpan( - `inputStream.once()`, + options?.spanName ?? `inputStream.once()`, async () => { const result = await innerPromise; resolve(result as InputStreamOnceResult); @@ -761,7 +762,7 @@ function input(opts: { id: string }): RealtimeDefinedInputStream { }); const result = await tracer.startActiveSpan( - `inputStream.wait()`, + options?.spanName ?? `inputStream.wait()`, async (span) => { // 1. Block the run on the waitpoint const waitResponse = await apiClient.waitForWaitpointToken({ From 0b296f82f52da98ff388b0f9ed1ece546365a78f Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Wed, 4 Mar 2026 16:59:05 +0000 Subject: [PATCH 26/53] Fix double looping when resuming from an input stream waitpoint --- packages/core/src/v3/inputStreams/index.ts | 12 ++++++++ packages/core/src/v3/inputStreams/manager.ts | 29 +++++++++++++++++++ .../core/src/v3/inputStreams/noopManager.ts | 6 ++++ packages/core/src/v3/inputStreams/types.ts | 22 ++++++++++++++ packages/trigger-sdk/src/v3/streams.ts | 14 ++++++++- 5 files changed, 82 insertions(+), 1 deletion(-) diff --git a/packages/core/src/v3/inputStreams/index.ts b/packages/core/src/v3/inputStreams/index.ts index 4a871d6bfcc..0b3c7af063f 100644 --- a/packages/core/src/v3/inputStreams/index.ts +++ b/packages/core/src/v3/inputStreams/index.ts @@ -51,6 +51,18 @@ export class InputStreamsAPI implements InputStreamManager { return this.#getManager().lastSeqNum(streamId); } + public setLastSeqNum(streamId: string, seqNum: number): void { + this.#getManager().setLastSeqNum(streamId, seqNum); + } + + public shiftBuffer(streamId: string): boolean { + return this.#getManager().shiftBuffer(streamId); + } + + public disconnectStream(streamId: string): void { + this.#getManager().disconnectStream(streamId); + } + public clearHandlers(): void { this.#getManager().clearHandlers(); } diff --git a/packages/core/src/v3/inputStreams/manager.ts b/packages/core/src/v3/inputStreams/manager.ts index f393f4a169a..09212fb6a84 100644 --- a/packages/core/src/v3/inputStreams/manager.ts +++ b/packages/core/src/v3/inputStreams/manager.ts @@ -40,6 +40,26 @@ export class StandardInputStreamManager implements InputStreamManager { return this.seqNums.get(streamId); } + setLastSeqNum(streamId: string, seqNum: number): void { + const current = this.seqNums.get(streamId); + // Only advance forward, never backward + if (current === undefined || seqNum > current) { + this.seqNums.set(streamId, seqNum); + } + } + + shiftBuffer(streamId: string): boolean { + const buffered = this.buffer.get(streamId); + if (buffered && buffered.length > 0) { + buffered.shift(); + if (buffered.length === 0) { + this.buffer.delete(streamId); + } + return true; + } + return false; + } + setRunId(runId: string, streamsVersion?: string): void { this.currentRunId = runId; this.streamsVersion = streamsVersion; @@ -158,6 +178,15 @@ export class StandardInputStreamManager implements InputStreamManager { } } + disconnectStream(streamId: string): void { + const tail = this.tails.get(streamId); + if (tail) { + tail.abortController.abort(); + this.tails.delete(streamId); + } + this.buffer.delete(streamId); + } + connectTail(runId: string, _fromSeq?: number): void { // No-op: tails are now created per-stream lazily } diff --git a/packages/core/src/v3/inputStreams/noopManager.ts b/packages/core/src/v3/inputStreams/noopManager.ts index 6d72d9e2f76..612da832d7e 100644 --- a/packages/core/src/v3/inputStreams/noopManager.ts +++ b/packages/core/src/v3/inputStreams/noopManager.ts @@ -22,6 +22,12 @@ export class NoopInputStreamManager implements InputStreamManager { return undefined; } + setLastSeqNum(_streamId: string, _seqNum: number): void {} + + shiftBuffer(_streamId: string): boolean { return false; } + + disconnectStream(_streamId: string): void {} + clearHandlers(): void {} reset(): void {} disconnect(): void {} diff --git a/packages/core/src/v3/inputStreams/types.ts b/packages/core/src/v3/inputStreams/types.ts index 0816c06493f..c456bb61216 100644 --- a/packages/core/src/v3/inputStreams/types.ts +++ b/packages/core/src/v3/inputStreams/types.ts @@ -70,6 +70,28 @@ export interface InputStreamManager { */ lastSeqNum(streamId: string): number | undefined; + /** + * Advance the last-seen S2 sequence number for the given input stream. + * Used after `.wait()` resumes to prevent the SSE tail from replaying + * the record that was consumed via the waitpoint path. + */ + setLastSeqNum(streamId: string, seqNum: number): void; + + /** + * Remove and discard the first buffered item for the given input stream. + * Used after `.wait()` resumes to remove the duplicate that the SSE tail + * buffered while the waitpoint was being completed via a separate path. + * Returns true if an item was removed, false if the buffer was empty. + */ + shiftBuffer(streamId: string): boolean; + + /** + * Disconnect the SSE tail and clear the buffer for a specific input stream. + * Used before suspending via `.wait()` so the tail doesn't buffer duplicates + * of data that will be delivered through the waitpoint path. + */ + disconnectStream(streamId: string): void; + /** * Clear all persistent `.on()` handlers and abort tails that have no remaining once waiters. * Called automatically when a task run completes. diff --git a/packages/trigger-sdk/src/v3/streams.ts b/packages/trigger-sdk/src/v3/streams.ts index d8e6abf0d30..eafaea88fbc 100644 --- a/packages/trigger-sdk/src/v3/streams.ts +++ b/packages/trigger-sdk/src/v3/streams.ts @@ -774,7 +774,13 @@ function input(opts: { id: string }): RealtimeDefinedInputStream { throw new Error("Failed to block on input stream waitpoint"); } - // 2. Suspend the task + // 2. Disconnect the SSE tail and clear the buffer before suspending. + // Without this, the tail stays alive during the suspension window and + // may buffer a copy of the same message that will be delivered via the + // waitpoint, causing a duplicate on resume. + inputStreams.disconnectStream(opts.id); + + // 3. Suspend the task const waitResult = await runtime.waitUntil(response.waitpointId); // 3. Parse the output @@ -790,6 +796,12 @@ function input(opts: { id: string }): RealtimeDefinedInputStream { : undefined; if (waitResult.ok) { + // Advance the seq counter so the SSE tail doesn't replay + // the record that was consumed via the waitpoint path when + // it lazily reconnects on the next on()/once() call. + const prevSeq = inputStreams.lastSeqNum(opts.id); + inputStreams.setLastSeqNum(opts.id, (prevSeq ?? -1) + 1); + return { ok: true as const, output: data as TData }; } else { const error = new WaitpointTimeoutError(data?.message ?? "Timed out"); From 556686c59065d645ef8acd3035c8b67056b6740c Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Wed, 4 Mar 2026 17:50:59 +0000 Subject: [PATCH 27/53] Add some pending message support in the example --- references/ai-chat/src/app/page.tsx | 9 +-- references/ai-chat/src/components/chat.tsx | 67 ++++++++++++++++------ 2 files changed, 52 insertions(+), 24 deletions(-) diff --git a/references/ai-chat/src/app/page.tsx b/references/ai-chat/src/app/page.tsx index 185d84b5e9e..1a003392f55 100644 --- a/references/ai-chat/src/app/page.tsx +++ b/references/ai-chat/src/app/page.tsx @@ -2,13 +2,8 @@ import { Chat } from "@/components/chat"; export default function Home() { return ( -
-
-

- AI Chat — powered by Trigger.dev -

- -
+
+
); } diff --git a/references/ai-chat/src/components/chat.tsx b/references/ai-chat/src/components/chat.tsx index 9639382dd38..7e9d7a26636 100644 --- a/references/ai-chat/src/components/chat.tsx +++ b/references/ai-chat/src/components/chat.tsx @@ -2,7 +2,7 @@ import { useChat } from "@ai-sdk/react"; import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; -import { useRef, useState } from "react"; +import { useEffect, useRef, useState } from "react"; import { Streamdown } from "streamdown"; import { getChatToken } from "@/app/actions"; import { MODEL_OPTIONS, DEFAULT_MODEL } from "@/trigger/chat"; @@ -87,6 +87,21 @@ export function Chat() { transport, }); + // Pending message to send after the current turn completes + const [pendingMessage, setPendingMessage] = useState<{ text: string; model: string } | null>(null); + + // Auto-send the pending message when the turn completes + const prevStatus = useRef(status); + useEffect(() => { + if (prevStatus.current === "streaming" && status === "ready" && pendingMessage) { + const { text, model: pendingMsgModel } = pendingMessage; + setPendingMessage(null); + pendingModel.current = pendingMsgModel; + sendMessage({ text }, { metadata: { model: pendingMsgModel } }); + } + prevStatus.current = status; + }, [status, sendMessage, pendingMessage]); + // Build a map of assistant message index -> model used // Each assistant message follows a user message, so we track by position function getModelForAssistantAt(index: number): string | undefined { @@ -118,11 +133,11 @@ export function Chat() { } return ( -
+
{/* Messages */} -
+
{messages.length === 0 && ( -

Send a message to start chatting.

+

Send a message to start chatting.

)} {messages.map((message, messageIndex) => ( @@ -183,11 +198,25 @@ export function Chat() {
)} + + {/* Queued message indicator */} + {pendingMessage && ( +
+
+
+ {pendingMessage.text} +
+
+ Queued — will send when current response finishes +
+
+
+ )}
{/* Error */} {error && ( -
+
{error.message}
)} @@ -196,11 +225,16 @@ export function Chat() {
{ e.preventDefault(); - if (!input.trim() || status === "streaming") return; - trackedSendMessage({ text: input }, { metadata: { model } }); + if (!input.trim()) return; + if (status === "streaming") { + // Buffer the message — it will be sent when the current turn completes + setPendingMessage({ text: input, model }); + } else { + trackedSendMessage({ text: input }, { metadata: { model } }); + } setInput(""); }} - className="border-t border-gray-200 p-4" + className="shrink-0 border-t border-gray-200 bg-white p-4" >
- {status === "streaming" ? ( + + {status === "streaming" && ( - ) : ( - )}
From 68c719839d703daa3879a4fc4584afcb9589a673 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Thu, 5 Mar 2026 09:46:06 +0000 Subject: [PATCH 28/53] Accumulate messages in the task, allowing us to only have to send user mesages from the transport --- packages/trigger-sdk/src/v3/ai.ts | 117 ++++++--- packages/trigger-sdk/src/v3/chat.test.ts | 297 ++++++++++++++++------- packages/trigger-sdk/src/v3/chat.ts | 10 +- 3 files changed, 299 insertions(+), 125 deletions(-) diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index 50caa2aaa5e..ab2c1212283 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -188,16 +188,15 @@ type ChatTaskWirePayload = { * * - `messages` contains model-ready messages (converted via `convertToModelMessages`) — * pass these directly to `streamText`. - * - `uiMessages` contains the raw `UIMessage[]` from the frontend. * - `clientData` contains custom data from the frontend (the `metadata` field from `sendMessage()`). + * + * The backend accumulates the full conversation history across turns, so the frontend + * only needs to send new messages after the first turn. */ export type ChatTaskPayload = { /** Model-ready messages — pass directly to `streamText({ messages })`. */ messages: ModelMessage[]; - /** Raw UI messages from the frontend. */ - uiMessages: UIMessage[]; - /** The unique identifier for the chat session */ chatId: string; @@ -237,28 +236,6 @@ export type ChatTaskRunPayload = ChatTaskPayload & ChatTaskSignals; const messagesInput = streams.input({ id: CHAT_MESSAGES_STREAM_ID }); const stopInput = streams.input<{ stop: true; message?: string }>({ id: CHAT_STOP_STREAM_ID }); -/** - * Strips provider-specific IDs from message parts so that partial/stopped - * assistant responses don't cause 404s when sent back to the provider - * (e.g. OpenAI Responses API message IDs). - * @internal - */ -function sanitizeMessages(messages: TMessage[]): TMessage[] { - return messages.map((msg) => { - if (msg.role !== "assistant" || !msg.parts) return msg; - return { - ...msg, - parts: msg.parts.map((part: any) => { - // Strip provider-specific metadata (e.g. OpenAI Responses API itemId) - // and streaming state from assistant message parts. These cause 404s - // when partial/stopped responses are sent back to the provider. - const { providerMetadata, state, id, ...rest } = part; - return rest; - }), - }; - }); -} - /** * Tracks how many times `pipeChat` has been called in the current `chatTask` run. * Used to prevent double-piping when a user both calls `pipeChat()` manually @@ -496,6 +473,11 @@ function chatTask( let currentWirePayload = payload; + // Accumulated model messages across turns. Turn 1 initialises from the + // full history the frontend sends; subsequent turns append only the new + // user message(s) and the captured assistant response. + let accumulatedMessages: ModelMessage[] = []; + // Mutable reference to the current turn's stop controller so the // stop input stream listener (registered once) can abort the right turn. let currentStopController: AbortController | undefined; @@ -562,15 +544,29 @@ function chatTask( pendingMessages.push(msg); }); - // Convert wire payload to user-facing payload - const sanitized = sanitizeMessages(uiMessages); - const modelMessages = await convertToModelMessages(sanitized); + // Convert the incoming UIMessages to model messages and update the accumulator. + // Turn 1: full history from the frontend → replaces the accumulator. + // Turn 2+: only the new message(s) → appended to the accumulator. + const incomingModelMessages = await convertToModelMessages(uiMessages); + + if (turn === 0) { + accumulatedMessages = incomingModelMessages; + } else if (currentWirePayload.trigger === "regenerate-message") { + // Regenerate: frontend sent full history with last assistant message + // removed. Reset the accumulator to match. + accumulatedMessages = incomingModelMessages; + } else { + // Submit: frontend sent only the new user message(s). Append to accumulator. + accumulatedMessages.push(...incomingModelMessages); + } + + // Captured by the onFinish callback below — works even on abort/stop. + let capturedResponseMessage: UIMessage | undefined; try { const result = await userRun({ ...restWire, - messages: modelMessages, - uiMessages: sanitized, + messages: accumulatedMessages, clientData: wireMetadata, signal: combinedSignal, cancelSignal, @@ -578,9 +574,15 @@ function chatTask( }); // Auto-pipe if the run function returned a StreamTextResult or similar, - // but only if pipeChat() wasn't already called manually during this turn + // but only if pipeChat() wasn't already called manually during this turn. + // We call toUIMessageStream ourselves to attach onFinish for response capture. if (_chatPipeCount === 0 && isUIMessageStreamable(result)) { - await pipeChat(result, { signal: combinedSignal, spanName: "stream response" }); + const uiStream = result.toUIMessageStream({ + onFinish: ({ responseMessage }: { responseMessage: UIMessage }) => { + capturedResponseMessage = responseMessage; + }, + }); + await pipeChat(uiStream, { signal: combinedSignal, spanName: "stream response" }); } } catch (error) { // Handle AbortError from streamText gracefully @@ -596,6 +598,24 @@ function chatTask( msgSub.off(); } + // Append the assistant's response (partial or complete) to the accumulator. + // The onFinish callback fires even on abort/stop, so partial responses + // from stopped generation are captured correctly. + if (capturedResponseMessage) { + try { + const responseModelMessages = await convertToModelMessages([ + stripProviderMetadata(capturedResponseMessage), + ]); + accumulatedMessages.push(...responseModelMessages); + } catch { + // Conversion failed — skip accumulation for this turn + } + } + // TODO: When the user calls `pipeChat` manually instead of returning a + // StreamTextResult, we don't have access to onFinish. A future iteration + // should let manual-mode users report back response messages for + // accumulation (e.g. via a `chat.addMessages()` helper). + if (runSignal.aborted) return "exit"; // Write turn-complete control chunk so frontend closes its stream @@ -723,3 +743,34 @@ function extractLastUserMessageText(messages: UIMessage[]): string | undefined { return undefined; } + +/** + * Strips ephemeral OpenAI Responses API `itemId` from a UIMessage's parts. + * + * The OpenAI Responses provider attaches `itemId` to message parts via + * `providerMetadata.openai.itemId`. These IDs are ephemeral — sending them + * back in a subsequent `streamText` call causes 404s because the provider + * can't find the referenced item (especially for stopped/partial responses). + * + * @internal + */ +function stripProviderMetadata(message: UIMessage): UIMessage { + if (!message.parts) return message; + return { + ...message, + parts: message.parts.map((part: any) => { + const openai = part.providerMetadata?.openai; + if (!openai?.itemId) return part; + + const { itemId, ...restOpenai } = openai; + const { openai: _, ...restProviders } = part.providerMetadata; + return { + ...part, + providerMetadata: { + ...restProviders, + ...(Object.keys(restOpenai).length > 0 ? { openai: restOpenai } : {}), + }, + }; + }), + }; +} diff --git a/packages/trigger-sdk/src/v3/chat.test.ts b/packages/trigger-sdk/src/v3/chat.test.ts index 03eceb1a8f7..af39b6c9686 100644 --- a/packages/trigger-sdk/src/v3/chat.test.ts +++ b/packages/trigger-sdk/src/v3/chat.test.ts @@ -652,6 +652,8 @@ describe("TriggerChatTransport", () => { it("should track multiple chat sessions independently", async () => { let callCount = 0; + const turnCompleteChunk = { type: "__trigger_turn_complete" }; + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { const urlStr = typeof url === "string" ? url : url.toString(); @@ -670,7 +672,9 @@ describe("TriggerChatTransport", () => { } if (urlStr.includes("/realtime/v1/streams/")) { - return new Response(createSSEStream(""), { + // Include turn-complete chunk so the session is preserved + const chunks = [...sampleChunks, turnCompleteChunk]; + return new Response(createSSEStream(sseEncode(chunks)), { status: 200, headers: { "content-type": "text/event-stream", @@ -688,22 +692,26 @@ describe("TriggerChatTransport", () => { baseURL: "https://api.test.trigger.dev", }); - // Start two independent chat sessions - await transport.sendMessages({ + // Start two independent chat sessions and consume the streams + const s1 = await transport.sendMessages({ trigger: "submit-message", chatId: "session-a", messageId: undefined, messages: [createUserMessage("Hello A")], abortSignal: undefined, }); + const r1 = s1.getReader(); + while (!(await r1.read()).done) {} - await transport.sendMessages({ + const s2 = await transport.sendMessages({ trigger: "submit-message", chatId: "session-b", messageId: undefined, messages: [createUserMessage("Hello B")], abortSignal: undefined, }); + const r2 = s2.getReader(); + while (!(await r2.read()).done) {} // Both sessions should be independently reconnectable const streamA = await transport.reconnectToStream({ chatId: "session-a" }); @@ -918,11 +926,7 @@ describe("TriggerChatTransport", () => { describe("lastEventId tracking", () => { it("should pass lastEventId to SSE subscription on subsequent turns", async () => { - const controlChunk = { - type: "__trigger_waitpoint_ready", - tokenId: "wp_token_eid", - publicAccessToken: "wp_access_eid", - }; + const turnCompleteChunk = { type: "__trigger_turn_complete" }; let triggerCallCount = 0; const streamFetchCalls: { url: string; headers: Record }[] = []; @@ -944,14 +948,12 @@ describe("TriggerChatTransport", () => { ); } - if (urlStr.includes("/api/v1/waitpoints/tokens/") && urlStr.includes("/complete")) { - return new Response( - JSON.stringify({ success: true }), - { - status: 200, - headers: { "content-type": "application/json" }, - } - ); + // Handle input stream sends (for second message) + if (urlStr.includes("/realtime/v1/streams/") && urlStr.includes("/input/")) { + return new Response(JSON.stringify({ ok: true }), { + status: 200, + headers: { "content-type": "application/json" }, + }); } if (urlStr.includes("/realtime/v1/streams/")) { @@ -963,7 +965,7 @@ describe("TriggerChatTransport", () => { const chunks = [ ...sampleChunks, { type: "finish" as const, id: "part-1" } as UIMessageChunk, - controlChunk, + turnCompleteChunk, ]; return new Response(createSSEStream(sseEncode(chunks)), { status: 200, @@ -998,7 +1000,7 @@ describe("TriggerChatTransport", () => { if (done) break; } - // Second message — completes the waitpoint + // Second message — sends via input stream const stream2 = await transport.sendMessages({ trigger: "submit-message", chatId: "chat-eid", @@ -1021,13 +1023,151 @@ describe("TriggerChatTransport", () => { }); }); + describe("minimal wire payloads", () => { + it("should send only new messages via input stream on turn 2+", async () => { + const turnCompleteChunk = { type: "__trigger_turn_complete" }; + const inputStreamPayloads: any[] = []; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL, init?: RequestInit) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/api/v1/tasks/") && urlStr.includes("/trigger")) { + return new Response( + JSON.stringify({ id: "run_minimal" }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_token_minimal", + }, + } + ); + } + + // Capture input stream payloads (ApiClient wraps in { data: ... }) + if (urlStr.includes("/realtime/v1/streams/") && urlStr.includes("/input/")) { + const body = JSON.parse(init?.body as string); + inputStreamPayloads.push(body.data); + return new Response(JSON.stringify({ ok: true }), { + status: 200, + headers: { "content-type": "application/json" }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + const chunks = [ + ...sampleChunks, + turnCompleteChunk, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + const userMsg1 = createUserMessage("Hello"); + const assistantMsg = createAssistantMessage("Hi there!"); + const userMsg2 = createUserMessage("What's up?"); + + // Turn 1 — triggers a new run with full history + const stream1 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-minimal", + messageId: undefined, + messages: [userMsg1], + abortSignal: undefined, + }); + const r1 = stream1.getReader(); + while (!(await r1.read()).done) {} + + // Turn 2 — sends via input stream, should only include NEW messages + const stream2 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-minimal", + messageId: undefined, + messages: [userMsg1, assistantMsg, userMsg2], + abortSignal: undefined, + }); + const r2 = stream2.getReader(); + while (!(await r2.read()).done) {} + + // Verify: the input stream payload should only contain the new user message + expect(inputStreamPayloads).toHaveLength(1); + const sentPayload = inputStreamPayloads[0]; + // Only the new user message should be sent (backend already has the assistant response) + expect(sentPayload.messages).toHaveLength(1); + expect(sentPayload.messages[0]).toEqual(userMsg2); + }); + + it("should send full history on first message (trigger)", async () => { + let triggerPayload: any; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL, init?: RequestInit) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/api/v1/tasks/") && urlStr.includes("/trigger")) { + triggerPayload = JSON.parse(init?.body as string); + return new Response( + JSON.stringify({ id: "run_full" }), + { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_token_full", + }, + } + ); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + return new Response(createSSEStream(sseEncode(sampleChunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + const messages = [createUserMessage("Hello"), createAssistantMessage("Hi!"), createUserMessage("More")]; + + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-full", + messageId: undefined, + messages, + abortSignal: undefined, + }); + + // First message always sends full history via trigger + expect(triggerPayload.payload.messages).toHaveLength(3); + }); + }); + describe("AbortController cleanup", () => { it("should terminate SSE connection after intercepting control chunk", async () => { - const controlChunk = { - type: "__trigger_waitpoint_ready", - tokenId: "wp_token_abort", - publicAccessToken: "wp_access_abort", - }; + const controlChunk = { type: "__trigger_turn_complete" }; let streamAborted = false; @@ -1158,15 +1298,11 @@ describe("TriggerChatTransport", () => { expect(tokenCallCount).toBe(1); }); - it("should resolve async token for waitpoint completion flow", async () => { - const controlChunk = { - type: "__trigger_waitpoint_ready", - tokenId: "wp_token_async", - publicAccessToken: "wp_access_async", - }; + it("should not resolve async token for input stream send flow", async () => { + const turnCompleteChunk = { type: "__trigger_turn_complete" }; let tokenCallCount = 0; - let completeWaitpointCalled = false; + let inputStreamSendCalled = false; global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { const urlStr = typeof url === "string" ? url : url.toString(); @@ -1184,22 +1320,20 @@ describe("TriggerChatTransport", () => { ); } - if (urlStr.includes("/api/v1/waitpoints/tokens/") && urlStr.includes("/complete")) { - completeWaitpointCalled = true; - return new Response( - JSON.stringify({ success: true }), - { - status: 200, - headers: { "content-type": "application/json" }, - } - ); + // Handle input stream sends + if (urlStr.includes("/realtime/v1/streams/") && urlStr.includes("/input/")) { + inputStreamSendCalled = true; + return new Response(JSON.stringify({ ok: true }), { + status: 200, + headers: { "content-type": "application/json" }, + }); } if (urlStr.includes("/realtime/v1/streams/")) { const chunks = [ ...sampleChunks, { type: "finish" as const, id: "part-1" } as UIMessageChunk, - controlChunk, + turnCompleteChunk, ]; return new Response(createSSEStream(sseEncode(chunks)), { status: 200, @@ -1240,7 +1374,7 @@ describe("TriggerChatTransport", () => { const firstTokenCount = tokenCallCount; - // Second message — should complete waitpoint (does NOT call async token) + // Second message — should send via input stream (does NOT call async token) const stream2 = await transport.sendMessages({ trigger: "submit-message", chatId: "chat-async-wp", @@ -1255,19 +1389,15 @@ describe("TriggerChatTransport", () => { if (done) break; } - // Token function should NOT have been called again for the waitpoint path + // Token function should NOT have been called again for the input stream path expect(tokenCallCount).toBe(firstTokenCount); - expect(completeWaitpointCalled).toBe(true); + expect(inputStreamSendCalled).toBe(true); }); }); - describe("single-run mode (waitpoint loop)", () => { - it("should store waitpoint token from control chunk and not forward it to consumer", async () => { - const controlChunk = { - type: "__trigger_waitpoint_ready", - tokenId: "wp_token_123", - publicAccessToken: "wp_access_abc", - }; + describe("single-run mode (input stream loop)", () => { + it("should not forward turn-complete control chunk to consumer", async () => { + const turnCompleteChunk = { type: "__trigger_turn_complete" }; global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { const urlStr = typeof url === "string" ? url : url.toString(); @@ -1289,7 +1419,7 @@ describe("TriggerChatTransport", () => { const chunks = [ ...sampleChunks, { type: "finish" as const, id: "part-1" } as UIMessageChunk, - controlChunk, + turnCompleteChunk, ]; return new Response(createSSEStream(sseEncode(chunks)), { status: 200, @@ -1329,18 +1459,14 @@ describe("TriggerChatTransport", () => { // All AI SDK chunks should be forwarded expect(receivedChunks.length).toBe(sampleChunks.length + 1); // +1 for the finish chunk // Control chunk should not be in the output - expect(receivedChunks.every((c) => c.type !== ("__trigger_waitpoint_ready" as any))).toBe(true); + expect(receivedChunks.every((c) => c.type !== ("__trigger_turn_complete" as any))).toBe(true); }); - it("should complete waitpoint token on second message instead of triggering a new run", async () => { - const controlChunk = { - type: "__trigger_waitpoint_ready", - tokenId: "wp_token_456", - publicAccessToken: "wp_access_def", - }; + it("should send via input stream on second message instead of triggering a new run", async () => { + const turnCompleteChunk = { type: "__trigger_turn_complete" }; let triggerCallCount = 0; - let completeWaitpointCalled = false; + let inputStreamSendCalled = false; global.fetch = vi.fn().mockImplementation(async (url: string | URL, init?: RequestInit) => { const urlStr = typeof url === "string" ? url : url.toString(); @@ -1359,23 +1485,20 @@ describe("TriggerChatTransport", () => { ); } - // Handle waitpoint token completion - if (urlStr.includes("/api/v1/waitpoints/tokens/") && urlStr.includes("/complete")) { - completeWaitpointCalled = true; - return new Response( - JSON.stringify({ success: true }), - { - status: 200, - headers: { "content-type": "application/json" }, - } - ); + // Handle input stream sends + if (urlStr.includes("/realtime/v1/streams/") && urlStr.includes("/input/")) { + inputStreamSendCalled = true; + return new Response(JSON.stringify({ ok: true }), { + status: 200, + headers: { "content-type": "application/json" }, + }); } if (urlStr.includes("/realtime/v1/streams/")) { const chunks = [ ...sampleChunks, { type: "finish" as const, id: "part-1" } as UIMessageChunk, - controlChunk, + turnCompleteChunk, ]; return new Response(createSSEStream(sseEncode(chunks)), { status: 200, @@ -1404,7 +1527,7 @@ describe("TriggerChatTransport", () => { abortSignal: undefined, }); - // Consume stream to capture the control chunk + // Consume stream const reader1 = stream1.getReader(); while (true) { const { done } = await reader1.read(); @@ -1413,7 +1536,7 @@ describe("TriggerChatTransport", () => { expect(triggerCallCount).toBe(1); - // Second message — should complete the waitpoint instead of triggering + // Second message — should send via input stream instead of triggering const stream2 = await transport.sendMessages({ trigger: "submit-message", chatId: "chat-resume", @@ -1431,8 +1554,8 @@ describe("TriggerChatTransport", () => { // Should NOT have triggered a second run expect(triggerCallCount).toBe(1); - // Should have completed the waitpoint - expect(completeWaitpointCalled).toBe(true); + // Should have sent via input stream + expect(inputStreamSendCalled).toBe(true); }); it("should fall back to triggering a new run if stream closes without control chunk", async () => { @@ -1510,12 +1633,8 @@ describe("TriggerChatTransport", () => { expect(triggerCallCount).toBe(2); }); - it("should fall back to new run when completing waitpoint fails", async () => { - const controlChunk = { - type: "__trigger_waitpoint_ready", - tokenId: "wp_token_fail", - publicAccessToken: "wp_access_fail", - }; + it("should fall back to new run when sendInputStream fails", async () => { + const turnCompleteChunk = { type: "__trigger_turn_complete" }; let triggerCallCount = 0; @@ -1536,28 +1655,24 @@ describe("TriggerChatTransport", () => { ); } - // Waitpoint completion fails - if (urlStr.includes("/api/v1/waitpoints/tokens/") && urlStr.includes("/complete")) { + // Input stream send fails + if (urlStr.includes("/realtime/v1/streams/") && urlStr.includes("/input/")) { return new Response( - JSON.stringify({ error: "Token expired" }), + JSON.stringify({ error: "Run not found" }), { - status: 400, + status: 404, headers: { "content-type": "application/json" }, } ); } if (urlStr.includes("/realtime/v1/streams/")) { - // First call has control chunk, subsequent calls don't const chunks: (UIMessageChunk | Record)[] = [ ...sampleChunks, { type: "finish" as const, id: "part-1" } as UIMessageChunk, + turnCompleteChunk, ]; - if (triggerCallCount <= 1) { - chunks.push(controlChunk); - } - return new Response(createSSEStream(sseEncode(chunks)), { status: 200, headers: { @@ -1593,7 +1708,7 @@ describe("TriggerChatTransport", () => { expect(triggerCallCount).toBe(1); - // Second message — waitpoint completion will fail, should fall back to new run + // Second message — sendInputStream will fail, should fall back to new run const stream2 = await transport.sendMessages({ trigger: "submit-message", chatId: "chat-fail", diff --git a/packages/trigger-sdk/src/v3/chat.ts b/packages/trigger-sdk/src/v3/chat.ts index d41b38e28e8..ed7530b4bc4 100644 --- a/packages/trigger-sdk/src/v3/chat.ts +++ b/packages/trigger-sdk/src/v3/chat.ts @@ -197,8 +197,16 @@ export class TriggerChatTransport implements ChatTransport { // to resume the conversation in the same run. if (session?.runId) { try { + // Keep wire payloads minimal — the backend accumulates the full history. + // For submit-message: only send the new user message (always the last one). + // For regenerate-message: send full history so the backend can reset its accumulator. + const minimalPayload = { + ...payload, + messages: trigger === "submit-message" ? messages.slice(-1) : messages, + }; + const apiClient = new ApiClient(this.baseURL, session.publicAccessToken); - await apiClient.sendInputStream(session.runId, CHAT_MESSAGES_STREAM_ID, payload); + await apiClient.sendInputStream(session.runId, CHAT_MESSAGES_STREAM_ID, minimalPayload); return this.subscribeToStream( session.runId, session.publicAccessToken, From c3fddbdb373ed20f322f5a44f5c82ff7b1d659e4 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Thu, 5 Mar 2026 14:14:17 +0000 Subject: [PATCH 29/53] build full example with persisting messages, adding necessary hooks, and documenting it all --- docs/guides/ai-chat.mdx | 732 +++++++++++++++--- packages/trigger-sdk/src/v3/ai.ts | 226 +++++- packages/trigger-sdk/src/v3/chat-react.ts | 12 +- packages/trigger-sdk/src/v3/chat.test.ts | 183 +++++ packages/trigger-sdk/src/v3/chat.ts | 138 +++- pnpm-lock.yaml | 432 ++++++++++- references/ai-chat/.gitignore | 1 + references/ai-chat/package.json | 10 +- references/ai-chat/prisma.config.ts | 12 + .../20260305112427_init/migration.sql | 20 + .../prisma/migrations/migration_lock.toml | 3 + references/ai-chat/prisma/schema.prisma | 23 + references/ai-chat/src/app/actions.ts | 74 ++ references/ai-chat/src/app/page.tsx | 57 +- .../ai-chat/src/components/chat-app.tsx | 155 ++++ .../ai-chat/src/components/chat-sidebar.tsx | 82 ++ references/ai-chat/src/components/chat.tsx | 77 +- references/ai-chat/src/lib/models.ts | 8 + references/ai-chat/src/lib/prisma.ts | 15 + references/ai-chat/src/trigger/chat.ts | 23 +- references/ai-chat/trigger.config.ts | 8 + 21 files changed, 2114 insertions(+), 177 deletions(-) create mode 100644 references/ai-chat/.gitignore create mode 100644 references/ai-chat/prisma.config.ts create mode 100644 references/ai-chat/prisma/migrations/20260305112427_init/migration.sql create mode 100644 references/ai-chat/prisma/migrations/migration_lock.toml create mode 100644 references/ai-chat/prisma/schema.prisma create mode 100644 references/ai-chat/src/components/chat-app.tsx create mode 100644 references/ai-chat/src/components/chat-sidebar.tsx create mode 100644 references/ai-chat/src/lib/models.ts create mode 100644 references/ai-chat/src/lib/prisma.ts diff --git a/docs/guides/ai-chat.mdx b/docs/guides/ai-chat.mdx index fddc9254f72..b8fa9402196 100644 --- a/docs/guides/ai-chat.mdx +++ b/docs/guides/ai-chat.mdx @@ -1,7 +1,7 @@ --- title: "AI Chat with useChat" sidebarTitle: "AI Chat (useChat)" -description: "Run AI SDK chat completions as durable Trigger.dev tasks with built-in realtime streaming." +description: "Run AI SDK chat completions as durable Trigger.dev tasks with built-in realtime streaming, multi-turn conversations, and message persistence." --- ## Overview @@ -9,10 +9,11 @@ description: "Run AI SDK chat completions as durable Trigger.dev tasks with buil The `@trigger.dev/sdk` provides a custom [ChatTransport](https://sdk.vercel.ai/docs/ai-sdk-ui/transport) for the Vercel AI SDK's `useChat` hook. This lets you run chat completions as **durable Trigger.dev tasks** instead of fragile API routes — with automatic retries, observability, and realtime streaming built in. **How it works:** -1. The frontend sends messages via `useChat` → `TriggerChatTransport` -2. The transport triggers a Trigger.dev task with the conversation as payload +1. The frontend sends messages via `useChat` through `TriggerChatTransport` +2. The first message triggers a Trigger.dev task; subsequent messages resume the **same run** via input streams 3. The task streams `UIMessageChunk` events back via Trigger.dev's realtime streams 4. The AI SDK's `useChat` processes the stream natively — text, tool calls, reasoning, etc. +5. Between turns, the run stays warm briefly then suspends (freeing compute) until the next message No custom API routes needed. Your chat backend is a Trigger.dev task. @@ -22,62 +23,501 @@ No custom API routes needed. Your chat backend is a Trigger.dev task. ## Quick start -### 1. Define a chat task + + + Use `chat.task` from `@trigger.dev/sdk/ai` to define a task that handles chat messages. The `run` function receives `ModelMessage[]` (already converted from the frontend's `UIMessage[]`) — pass them directly to `streamText`. + + If you return a `StreamTextResult`, it's **automatically piped** to the frontend. + + ```ts trigger/chat.ts + import { chat } from "@trigger.dev/sdk/ai"; + import { streamText } from "ai"; + import { openai } from "@ai-sdk/openai"; + + export const myChat = chat.task({ + id: "my-chat", + run: async ({ messages, signal }) => { + // messages is ModelMessage[] — pass directly to streamText + // signal fires on stop or run cancel + return streamText({ + model: openai("gpt-4o"), + messages, + abortSignal: signal, + }); + }, + }); + ``` + + + + On your server (e.g. a Next.js server action), create a trigger public token scoped to your chat task: + + ```ts app/actions.ts + "use server"; + + import { chat } from "@trigger.dev/sdk/ai"; + import type { myChat } from "@/trigger/chat"; + + export const getChatToken = () => + chat.createAccessToken("my-chat"); + ``` + + + + Use the `useTriggerChatTransport` hook from `@trigger.dev/sdk/chat/react` to create a memoized transport instance, then pass it to `useChat`: + + ```tsx app/components/chat.tsx + "use client"; + + import { useChat } from "@ai-sdk/react"; + import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; + import type { myChat } from "@/trigger/chat"; + import { getChatToken } from "@/app/actions"; + + export function Chat() { + const transport = useTriggerChatTransport({ + task: "my-chat", + accessToken: getChatToken, + }); + + const { messages, sendMessage, stop, status } = useChat({ transport }); + + return ( +
+ {messages.map((m) => ( +
+ {m.role}: + {m.parts.map((part, i) => + part.type === "text" ? {part.text} : null + )} +
+ ))} + + { + e.preventDefault(); + const input = e.currentTarget.querySelector("input"); + if (input?.value) { + sendMessage({ text: input.value }); + input.value = ""; + } + }} + > + + + {status === "streaming" && ( + + )} + +
+ ); + } + ``` +
+
+ +## How multi-turn works + +### One run, many turns + +The entire conversation lives in a **single Trigger.dev run**. After each AI response, the run waits for the next message via input streams. The frontend transport handles this automatically — it triggers a new run for the first message, and sends subsequent messages to the existing run. + +This means your conversation has full observability in the Trigger.dev dashboard: every turn is a span inside the same run. + +### Warm and suspended states + +After each turn, the run goes through two phases of waiting: + +1. **Warm phase** (default 30s) — The run stays active and responds instantly to the next message. Uses compute. +2. **Suspended phase** (default up to 1h) — The run suspends, freeing compute. It wakes when the next message arrives. There's a brief delay as the run resumes. + +If no message arrives within the turn timeout, the run ends gracefully. The next message from the frontend will automatically start a fresh run. + + + You are not charged for compute during the suspended phase. Only the warm phase uses compute resources. + + +### What the backend accumulates + +The backend automatically accumulates the full conversation history across turns. After the first turn, the frontend transport only sends the new user message — not the entire history. This is handled transparently by the transport and task. + +The accumulated messages are available in: +- `run()` as `messages` (`ModelMessage[]`) — for passing to `streamText` +- `onTurnComplete()` as `uiMessages` (`UIMessage[]`) — for persistence + +## Backend patterns + +### Simple: return a StreamTextResult + +The easiest approach — return the `streamText` result from `run` and it's automatically piped to the frontend: + +```ts +import { chat } from "@trigger.dev/sdk/ai"; +import { streamText } from "ai"; +import { openai } from "@ai-sdk/openai"; + +export const simpleChat = chat.task({ + id: "simple-chat", + run: async ({ messages, signal }) => { + return streamText({ + model: openai("gpt-4o"), + system: "You are a helpful assistant.", + messages, + abortSignal: signal, + }); + }, +}); +``` + +### Using chat.pipe() for complex flows + +For complex agent flows where `streamText` is called deep inside your code, use `chat.pipe()`. It works from **anywhere inside a task** — even nested function calls. + +```ts trigger/agent-chat.ts +import { chat } from "@trigger.dev/sdk/ai"; +import { streamText } from "ai"; +import { openai } from "@ai-sdk/openai"; +import type { ModelMessage } from "ai"; + +export const agentChat = chat.task({ + id: "agent-chat", + run: async ({ messages }) => { + // Don't return anything — chat.pipe is called inside + await runAgentLoop(messages); + }, +}); + +async function runAgentLoop(messages: ModelMessage[]) { + // ... agent logic, tool calls, etc. + + const result = streamText({ + model: openai("gpt-4o"), + messages, + }); + + // Pipe from anywhere — no need to return it + await chat.pipe(result); +} +``` + +### Manual mode with task() + +If you need full control over task options, use the standard `task()` with `ChatTaskPayload` and `chat.pipe()`: -Use `chat.task` from `@trigger.dev/sdk/ai` to define a task that handles chat messages. The payload is automatically typed as `ChatTaskPayload` with abort signals. +```ts +import { task } from "@trigger.dev/sdk"; +import { chat, type ChatTaskPayload } from "@trigger.dev/sdk/ai"; +import { streamText } from "ai"; +import { openai } from "@ai-sdk/openai"; + +export const manualChat = task({ + id: "manual-chat", + retry: { maxAttempts: 3 }, + queue: { concurrencyLimit: 10 }, + run: async (payload: ChatTaskPayload) => { + const result = streamText({ + model: openai("gpt-4o"), + messages: payload.messages, + }); + + await chat.pipe(result); + }, +}); +``` + + + Manual mode does not get automatic message accumulation or the `onTurnComplete`/`onChatStart` lifecycle hooks. The `responseMessage` field in `onTurnComplete` will be `undefined` when using `chat.pipe()` directly. Use `chat.task()` for the full multi-turn experience. + + +## Lifecycle hooks + +### onChatStart + +Fires once on the first turn (turn 0) before `run()` executes. Use it to create a chat record in your database. + +```ts +export const myChat = chat.task({ + id: "my-chat", + onChatStart: async ({ chatId, clientData }) => { + const { userId } = clientData as { userId: string }; + await db.chat.create({ + data: { id: chatId, userId, title: "New chat" }, + }); + }, + run: async ({ messages, signal }) => { + return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); + }, +}); +``` + + + `clientData` contains custom data from the frontend — either the `metadata` option on the transport constructor (sent with every message) or the `metadata` option on `sendMessage()` (per-message). See [Client data and metadata](#client-data-and-metadata). + -If you return a `StreamTextResult` from `run`, it's **automatically piped** to the frontend. +### onTurnComplete + +Fires after each turn completes — after the response is captured, before waiting for the next message. This is the primary hook for persisting conversations. + +| Field | Type | Description | +|-------|------|-------------| +| `chatId` | `string` | Chat session ID | +| `messages` | `ModelMessage[]` | Full accumulated conversation (model format) | +| `uiMessages` | `UIMessage[]` | Full accumulated conversation (UI format) | +| `newMessages` | `ModelMessage[]` | Only this turn's messages (model format) | +| `newUIMessages` | `UIMessage[]` | Only this turn's messages (UI format) | +| `responseMessage` | `UIMessage \| undefined` | The assistant's response for this turn | +| `turn` | `number` | Turn number (0-indexed) | + +```ts +export const myChat = chat.task({ + id: "my-chat", + onTurnComplete: async ({ chatId, uiMessages }) => { + await db.chat.update({ + where: { id: chatId }, + data: { messages: uiMessages }, + }); + }, + run: async ({ messages, signal }) => { + return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); + }, +}); +``` + + + Use `uiMessages` to overwrite the full conversation each turn (simplest). Use `newUIMessages` if you prefer to store messages individually — for example, one database row per message. + + +## Persistence + +### What needs to be persisted + +To build a chat app that survives page refreshes, you need to persist two things: + +1. **Messages** — The conversation history. Persisted **server-side** in the task via `onTurnComplete`. +2. **Sessions** — The transport's connection state (`runId`, `publicAccessToken`, `lastEventId`). Persisted **client-side** via `onSessionChange`. + + + Sessions let the transport reconnect to an existing run after a page refresh. Without them, every page load would start a new run — losing the conversation context that was accumulated in the previous run. + + +### Persisting messages (server-side) + +Messages are stored inside the task itself, so they're durable even if the frontend disconnects mid-conversation. ```ts trigger/chat.ts import { chat } from "@trigger.dev/sdk/ai"; -import { streamText, convertToModelMessages } from "ai"; +import { streamText } from "ai"; import { openai } from "@ai-sdk/openai"; +import { db } from "@/lib/db"; export const myChat = chat.task({ id: "my-chat", + onChatStart: async ({ chatId, clientData }) => { + await db.chat.create({ + data: { id: chatId, title: "New chat", messages: [] }, + }); + }, + onTurnComplete: async ({ chatId, uiMessages }) => { + await db.chat.update({ + where: { id: chatId }, + data: { messages: uiMessages }, + }); + }, run: async ({ messages, signal }) => { - // messages is UIMessage[] from the frontend - // signal fires on stop or run cancel return streamText({ model: openai("gpt-4o"), - messages: convertToModelMessages(messages), + messages, abortSignal: signal, }); - // Returning a StreamTextResult auto-pipes it to the frontend }, }); ``` -### 2. Generate an access token +### Persisting sessions (frontend) + +The `onSessionChange` callback on the transport fires whenever a session's state changes: + +- **Session created** — After triggering a new task run +- **Turn completed** — The `lastEventId` is updated (used for stream resumption) +- **Session removed** — The run ended or failed. `session` is `null`. + +```tsx +const transport = useTriggerChatTransport({ + task: "my-chat", + accessToken: getChatToken, + sessions: loadedSessions, // Restored from DB on page load + onSessionChange: (chatId, session) => { + if (session) { + saveSession(chatId, session); // Server action + } else { + deleteSession(chatId); // Server action + } + }, +}); +``` + +### Restoring on page load -On your server (e.g. a Next.js API route or server action), create a trigger public token: +On page load, fetch both the messages and the session from your database, then pass them to `useChat` and the transport: + +```tsx app/page.tsx +"use client"; + +import { useEffect, useState } from "react"; +import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; +import { useChat } from "@ai-sdk/react"; +import { getChatToken, getChatMessages, getSession } from "@/app/actions"; + +export default function ChatPage({ chatId }: { chatId: string }) { + const [initialMessages, setInitialMessages] = useState([]); + const [initialSession, setInitialSession] = useState(undefined); + const [loaded, setLoaded] = useState(false); + + useEffect(() => { + async function load() { + const [messages, session] = await Promise.all([ + getChatMessages(chatId), + getSession(chatId), + ]); + setInitialMessages(messages); + setInitialSession(session ? { [chatId]: session } : undefined); + setLoaded(true); + } + load(); + }, [chatId]); + + if (!loaded) return null; + + return ( + + ); +} + +function ChatClient({ chatId, initialMessages, initialSessions }) { + const transport = useTriggerChatTransport({ + task: "my-chat", + accessToken: getChatToken, + sessions: initialSessions, + onSessionChange: (id, session) => { + if (session) saveSession(id, session); + else deleteSession(id); + }, + }); + + const { messages, sendMessage, stop, status } = useChat({ + id: chatId, + messages: initialMessages, + transport, + }); + + // ... render UI +} +``` + +### Full example + +Putting it all together — a complete chat app with server-side message persistence and session reconnection: + + +```ts trigger/chat.ts +import { chat } from "@trigger.dev/sdk/ai"; +import { streamText } from "ai"; +import { openai } from "@ai-sdk/openai"; +import { db } from "@/lib/db"; + +export const myChat = chat.task({ + id: "my-chat", + onChatStart: async ({ chatId }) => { + await db.chat.create({ + data: { id: chatId, title: "New chat", messages: [] }, + }); + }, + onTurnComplete: async ({ chatId, uiMessages }) => { + await db.chat.update({ + where: { id: chatId }, + data: { messages: uiMessages }, + }); + }, + run: async ({ messages, signal }) => { + return streamText({ + model: openai("gpt-4o"), + messages, + abortSignal: signal, + }); + }, +}); +``` ```ts app/actions.ts "use server"; -import { auth } from "@trigger.dev/sdk"; +import { chat } from "@trigger.dev/sdk/ai"; +import type { myChat } from "@/trigger/chat"; +import { db } from "@/lib/db"; + +export const getChatToken = () => + chat.createAccessToken("my-chat"); -export async function getChatToken() { - return await auth.createTriggerPublicToken("my-chat"); +export async function getChatMessages(chatId: string) { + const found = await db.chat.findUnique({ where: { id: chatId } }); + return found?.messages ?? []; +} + +export async function getSession(chatId: string) { + return await db.chatSession.findUnique({ where: { id: chatId } }); } -``` -### 3. Use in the frontend +export async function saveSession( + chatId: string, + session: { runId: string; publicAccessToken: string; lastEventId?: string } +) { + await db.chatSession.upsert({ + where: { id: chatId }, + create: { id: chatId, ...session }, + update: session, + }); +} -Import `TriggerChatTransport` from `@trigger.dev/sdk/chat` (browser-safe — no server dependencies). +export async function deleteSession(chatId: string) { + await db.chatSession.delete({ where: { id: chatId } }).catch(() => {}); +} +``` ```tsx app/components/chat.tsx "use client"; import { useChat } from "@ai-sdk/react"; -import { TriggerChatTransport } from "@trigger.dev/sdk/chat"; - -export function Chat({ accessToken }: { accessToken: string }) { - const { messages, sendMessage, status, error } = useChat({ - transport: new TriggerChatTransport({ - task: "my-chat", - accessToken, - }), +import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; +import type { myChat } from "@/trigger/chat"; +import { + getChatToken, + saveSession, + deleteSession, +} from "@/app/actions"; + +export function Chat({ chatId, initialMessages, initialSessions }) { + const transport = useTriggerChatTransport({ + task: "my-chat", + accessToken: getChatToken, + sessions: initialSessions, + onSessionChange: (id, session) => { + if (session) saveSession(id, session); + else deleteSession(id); + }, + }); + + const { messages, sendMessage, stop, status } = useChat({ + id: chatId, + messages: initialMessages, + transport, }); return ( @@ -105,158 +545,228 @@ export function Chat({ accessToken }: { accessToken: string }) { + {status === "streaming" && ( + + )}
); } ``` + -## Backend patterns +## Stop generation -### Simple: return a StreamTextResult +### How stop works -The easiest approach — return the `streamText` result from `run` and it's automatically piped to the frontend: +Calling `stop()` from `useChat` sends a stop signal to the running task via input streams. The task's `streamText` call aborts (if you passed `signal` or `stopSignal`), but the **run stays alive** and waits for the next message. The partial response is captured and accumulated normally. -```ts -import { chat } from "@trigger.dev/sdk/ai"; -import { streamText, convertToModelMessages } from "ai"; -import { openai } from "@ai-sdk/openai"; +### Abort signals -export const simpleChat = chat.task({ - id: "simple-chat", - run: async ({ messages, signal }) => { +The `run` function receives three abort signals: + +| Signal | Fires when | Use for | +|--------|-----------|---------| +| `signal` | Stop **or** cancel | Pass to `streamText` — handles both cases. **Use this in most cases.** | +| `stopSignal` | Stop only (per-turn, reset each turn) | Custom logic that should only run on user stop, not cancellation | +| `cancelSignal` | Run cancel, expire, or maxDuration exceeded | Cleanup that should only happen on full cancellation | + +```ts +export const myChat = chat.task({ + id: "my-chat", + run: async ({ messages, signal, stopSignal, cancelSignal }) => { return streamText({ model: openai("gpt-4o"), - system: "You are a helpful assistant.", - messages: convertToModelMessages(messages), - abortSignal: signal, + messages, + abortSignal: signal, // Handles both stop and cancel }); }, }); ``` -### Complex: use chat.pipe() from anywhere + + Use `signal` (the combined signal) in most cases. The separate `stopSignal` and `cancelSignal` are only needed if you want different behavior for stop vs cancel. + -For complex agent flows where `streamText` is called deep inside your code, use `chat.pipe()`. It works from **anywhere inside a task** — even nested function calls. +## Client data and metadata -```ts trigger/agent-chat.ts -import { chat } from "@trigger.dev/sdk/ai"; -import { streamText, convertToModelMessages } from "ai"; -import { openai } from "@ai-sdk/openai"; +### Transport-level metadata -export const agentChat = chat.task({ - id: "agent-chat", - run: async ({ messages }) => { - // Don't return anything — chat.pipe is called inside - await runAgentLoop(convertToModelMessages(messages)); - }, +Set default metadata on the transport that's included in every request: + +```ts +const transport = useTriggerChatTransport({ + task: "my-chat", + accessToken: getChatToken, + metadata: { userId: currentUser.id }, }); +``` -// This could be deep inside your agent library -async function runAgentLoop(messages: CoreMessage[]) { - // ... agent logic, tool calls, etc. +### Per-message metadata - const result = streamText({ - model: openai("gpt-4o"), - messages, - }); +Pass metadata with individual messages. Per-message values are merged with transport-level metadata (per-message wins on conflicts): - // Pipe from anywhere — no need to return it - await chat.pipe(result); -} +```ts +sendMessage( + { text: "Hello" }, + { metadata: { model: "gpt-4o", priority: "high" } } +); ``` -### Manual: use task() with chat.pipe() +### Accessing client data in the task -If you need full control over task options, use the standard `task()` with `ChatTaskPayload` and `chat.pipe()`: +Both transport-level and per-message metadata are available as `clientData` in the `run` function and in `onChatStart`: ```ts -import { task } from "@trigger.dev/sdk"; -import { chat, type ChatTaskPayload } from "@trigger.dev/sdk/ai"; -import { streamText, convertToModelMessages } from "ai"; -import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; -export const manualChat = task({ - id: "manual-chat", - retry: { maxAttempts: 3 }, - queue: { concurrencyLimit: 10 }, - run: async (payload: ChatTaskPayload) => { - const result = streamText({ - model: openai("gpt-4o"), - messages: convertToModelMessages(payload.messages), - }); +export const myChat = chat.task({ + id: "my-chat", + run: async ({ messages, clientData, signal }) => { + const { model, userId } = z.object({ + model: z.string().optional(), + userId: z.string(), + }).parse(clientData); - await chat.pipe(result); + return streamText({ + model: openai(model ?? "gpt-4o"), + messages, + abortSignal: signal, + }); }, }); ``` -## Frontend options +## Runtime configuration -### TriggerChatTransport options +### chat.setTurnTimeout() + +Override how long the run stays suspended waiting for the next message. Call from inside `run()`: ```ts -new TriggerChatTransport({ - // Required - task: "my-chat", // Task ID to trigger - accessToken: token, // Trigger public token or secret key - - // Optional - baseURL: "https://...", // Custom API URL (self-hosted) - streamKey: "chat", // Custom stream key (default: "chat") - headers: { ... }, // Extra headers for API requests - streamTimeoutSeconds: 120, // Stream timeout (default: 120s) -}); +run: async ({ messages, signal }) => { + chat.setTurnTimeout("2h"); // Wait longer for this conversation + return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); +}, ``` -### Dynamic access tokens +### chat.setWarmTimeoutInSeconds() -For token refresh patterns, pass a function: +Override how long the run stays warm (active, using compute) after each turn: ```ts -new TriggerChatTransport({ +run: async ({ messages, signal }) => { + chat.setWarmTimeoutInSeconds(60); // Stay warm for 1 minute + return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); +}, +``` + + + Longer warm timeout means faster responses but more compute usage. Set to `0` to suspend immediately after each turn (minimum latency cost, slight delay on next message). + + +## Frontend reference + +### TriggerChatTransport options + +| Option | Type | Default | Description | +|--------|------|---------|-------------| +| `task` | `string` | required | Task ID to trigger | +| `accessToken` | `string \| () => string \| Promise` | required | Auth token or function that returns one | +| `baseURL` | `string` | `"https://api.trigger.dev"` | API base URL (for self-hosted) | +| `streamKey` | `string` | `"chat"` | Stream key (only change if using custom key) | +| `headers` | `Record` | — | Extra headers for API requests | +| `streamTimeoutSeconds` | `number` | `120` | How long to wait for stream data | +| `metadata` | `Record` | — | Default metadata for every request | +| `sessions` | `Record` | — | Restore sessions from storage | +| `onSessionChange` | `(chatId, session \| null) => void` | — | Fires when session state changes | + +### useTriggerChatTransport + +React hook that creates and memoizes a `TriggerChatTransport` instance. Import from `@trigger.dev/sdk/chat/react`. + +```tsx +import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; +import type { myChat } from "@/trigger/chat"; + +const transport = useTriggerChatTransport({ task: "my-chat", - accessToken: () => getLatestToken(), // Called on each sendMessage + accessToken: () => getChatToken(), + sessions: savedSessions, + onSessionChange: handleSessionChange, }); ``` -### Passing extra data +The transport is created once on first render and reused across re-renders. Pass a type parameter for compile-time validation of the task ID. + + + The hook keeps `onSessionChange` up to date via a ref internally, so you don't need to memoize the callback or worry about stale closures. + -Use the `body` option on `sendMessage` to pass additional data to the task: +### Dynamic access tokens + +For token refresh, pass a function instead of a string. It's called on each `sendMessage`: ```ts -sendMessage({ - text: "Hello", -}, { - body: { - systemPrompt: "You are a pirate.", - temperature: 0.9, +const transport = useTriggerChatTransport({ + task: "my-chat", + accessToken: async () => { + const res = await fetch("/api/chat-token"); + return res.text(); }, }); ``` -The `body` fields are merged into the `ChatTaskPayload` and available in your task's `run` function. +## Backend reference + +### ChatTaskOptions -## ChatTaskPayload +| Option | Type | Default | Description | +|--------|------|---------|-------------| +| `id` | `string` | required | Task identifier | +| `run` | `(payload: ChatTaskRunPayload) => Promise` | required | Handler for each turn | +| `onChatStart` | `(event: ChatStartEvent) => Promise \| void` | — | Fires on turn 0 before `run()` | +| `onTurnComplete` | `(event: TurnCompleteEvent) => Promise \| void` | — | Fires after each turn completes | +| `maxTurns` | `number` | `100` | Max conversational turns per run | +| `turnTimeout` | `string` | `"1h"` | How long to wait for next message | +| `warmTimeoutInSeconds` | `number` | `30` | Seconds to stay warm before suspending | -The payload sent to the task has this shape: +Plus all standard [TaskOptions](/tasks/overview) — `retry`, `queue`, `machine`, `maxDuration`, etc. + +### ChatTaskRunPayload | Field | Type | Description | |-------|------|-------------| -| `messages` | `UIMessage[]` | The conversation history | +| `messages` | `ModelMessage[]` | Model-ready messages — pass directly to `streamText` | | `chatId` | `string` | Unique chat session ID | | `trigger` | `"submit-message" \| "regenerate-message"` | What triggered the request | -| `messageId` | `string \| undefined` | Message ID to regenerate (if applicable) | -| `metadata` | `unknown` | Custom metadata from the frontend | +| `messageId` | `string \| undefined` | Message ID (for regenerate) | +| `clientData` | `unknown` | Custom data from frontend metadata | +| `signal` | `AbortSignal` | Combined stop + cancel signal | +| `cancelSignal` | `AbortSignal` | Cancel-only signal | +| `stopSignal` | `AbortSignal` | Stop-only signal (per-turn) | + +### TurnCompleteEvent + +See [onTurnComplete](#onturncomplete) for the full field reference. + +### chat namespace -Plus any extra fields from the `body` option. +| Method | Description | +|--------|-------------| +| `chat.task(options)` | Create a chat task | +| `chat.pipe(source, options?)` | Pipe a stream to the frontend (from anywhere inside a task) | +| `chat.createAccessToken(taskId)` | Create a public access token for a chat task | +| `chat.setTurnTimeout(duration)` | Override turn timeout at runtime (e.g. `"2h"`) | +| `chat.setTurnTimeoutInSeconds(seconds)` | Override turn timeout at runtime (in seconds) | +| `chat.setWarmTimeoutInSeconds(seconds)` | Override warm timeout at runtime | ## Self-hosting If you're self-hosting Trigger.dev, pass the `baseURL` option: ```ts -new TriggerChatTransport({ +const transport = useTriggerChatTransport({ task: "my-chat", accessToken, baseURL: "https://your-trigger-instance.com", diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index ab2c1212283..3bb0db9dcd8 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -379,6 +379,47 @@ async function pipeChat( * emits a control chunk and suspends via `messagesInput.wait()`. The frontend * transport resumes the same run by sending the next message via input streams. */ +/** + * Event passed to the `onChatStart` callback. + */ +export type ChatStartEvent = { + /** The unique identifier for the chat session. */ + chatId: string; + /** The initial model-ready messages for this conversation. */ + messages: ModelMessage[]; + /** Custom data from the frontend (passed via `metadata` on `sendMessage()` or the transport). */ + clientData: unknown; +}; + +/** + * Event passed to the `onTurnComplete` callback. + */ +export type TurnCompleteEvent = { + /** The unique identifier for the chat session. */ + chatId: string; + /** The full accumulated conversation in model format (all turns so far). */ + messages: ModelMessage[]; + /** + * The full accumulated conversation in UI format (all turns so far). + * This is the format expected by `useChat` — store this for persistence. + */ + uiMessages: UIMessage[]; + /** + * Only the new model messages from this turn (user message(s) + assistant response). + * Useful for appending to an existing conversation record. + */ + newMessages: ModelMessage[]; + /** + * Only the new UI messages from this turn (user message(s) + assistant response). + * Useful for inserting individual message records instead of overwriting the full history. + */ + newUIMessages: UIMessage[]; + /** The assistant's response for this turn (undefined if `pipeChat` was used manually). */ + responseMessage: UIMessage | undefined; + /** The turn number (0-indexed). */ + turn: number; +}; + export type ChatTaskOptions = Omit< TaskOptions, "run" @@ -394,6 +435,35 @@ export type ChatTaskOptions = Omit< */ run: (payload: ChatTaskRunPayload) => Promise; + /** + * Called on the first turn (turn 0) of a new run, before the `run` function executes. + * + * Use this to create the chat record in your database when a new conversation starts. + * + * @example + * ```ts + * onChatStart: async ({ chatId, messages, clientData }) => { + * await db.chat.create({ data: { id: chatId, userId: clientData.userId } }); + * } + * ``` + */ + onChatStart?: (event: ChatStartEvent) => Promise | void; + + /** + * Called after each turn completes (after the response is captured, before waiting + * for the next message). Also fires on the final turn. + * + * Use this to persist the conversation to your database after each assistant response. + * + * @example + * ```ts + * onTurnComplete: async ({ chatId, messages }) => { + * await db.chat.update({ where: { id: chatId }, data: { messages } }); + * } + * ``` + */ + onTurnComplete?: (event: TurnCompleteEvent) => Promise | void; + /** * Maximum number of conversational turns (message round-trips) a single run * will handle before ending. After this many turns the run completes @@ -456,6 +526,8 @@ function chatTask( ): Task { const { run: userRun, + onChatStart, + onTurnComplete, maxTurns = 100, turnTimeout = "1h", warmTimeoutInSeconds = 30, @@ -478,6 +550,10 @@ function chatTask( // user message(s) and the captured assistant response. let accumulatedMessages: ModelMessage[] = []; + // Accumulated UI messages for persistence. Mirrors the model accumulator + // but in frontend-friendly UIMessage format (with parts, id, etc.). + let accumulatedUIMessages: UIMessage[] = []; + // Mutable reference to the current turn's stop controller so the // stop input stream listener (registered once) can abort the right turn. let currentStopController: AbortController | undefined; @@ -549,15 +625,52 @@ function chatTask( // Turn 2+: only the new message(s) → appended to the accumulator. const incomingModelMessages = await convertToModelMessages(uiMessages); + // Track new messages for this turn (user input + assistant response). + const turnNewModelMessages: ModelMessage[] = []; + const turnNewUIMessages: UIMessage[] = []; + if (turn === 0) { accumulatedMessages = incomingModelMessages; + accumulatedUIMessages = [...uiMessages]; + // On first turn, the "new" messages are just the last user message + // (the rest is history). We'll add the response after streaming. + if (uiMessages.length > 0) { + turnNewUIMessages.push(uiMessages[uiMessages.length - 1]!); + const lastModel = incomingModelMessages[incomingModelMessages.length - 1]; + if (lastModel) turnNewModelMessages.push(lastModel); + } } else if (currentWirePayload.trigger === "regenerate-message") { // Regenerate: frontend sent full history with last assistant message // removed. Reset the accumulator to match. accumulatedMessages = incomingModelMessages; + accumulatedUIMessages = [...uiMessages]; + // No new user messages for regenerate — just the response (added below) } else { // Submit: frontend sent only the new user message(s). Append to accumulator. accumulatedMessages.push(...incomingModelMessages); + accumulatedUIMessages.push(...uiMessages); + turnNewModelMessages.push(...incomingModelMessages); + turnNewUIMessages.push(...uiMessages); + } + + // Fire onChatStart on the first turn + if (turn === 0 && onChatStart) { + await tracer.startActiveSpan( + "onChatStart()", + async () => { + await onChatStart({ + chatId: currentWirePayload.chatId, + messages: accumulatedMessages, + clientData: wireMetadata, + }); + }, + { + attributes: { + [SemanticInternalAttributes.STYLE_ICON]: "task-hook-onStart", + [SemanticInternalAttributes.COLLAPSED]: true, + }, + } + ); } // Captured by the onFinish callback below — works even on abort/stop. @@ -602,11 +715,14 @@ function chatTask( // The onFinish callback fires even on abort/stop, so partial responses // from stopped generation are captured correctly. if (capturedResponseMessage) { + accumulatedUIMessages.push(capturedResponseMessage); + turnNewUIMessages.push(capturedResponseMessage); try { const responseModelMessages = await convertToModelMessages([ stripProviderMetadata(capturedResponseMessage), ]); accumulatedMessages.push(...responseModelMessages); + turnNewModelMessages.push(...responseModelMessages); } catch { // Conversion failed — skip accumulation for this turn } @@ -618,6 +734,30 @@ function chatTask( if (runSignal.aborted) return "exit"; + // Fire onTurnComplete after response capture + if (onTurnComplete) { + await tracer.startActiveSpan( + "onTurnComplete()", + async () => { + await onTurnComplete({ + chatId: currentWirePayload.chatId, + messages: accumulatedMessages, + uiMessages: accumulatedUIMessages, + newMessages: turnNewModelMessages, + newUIMessages: turnNewUIMessages, + responseMessage: capturedResponseMessage, + turn, + }); + }, + { + attributes: { + [SemanticInternalAttributes.STYLE_ICON]: "task-hook-onComplete", + [SemanticInternalAttributes.COLLAPSED]: true, + }, + } + ); + } + // Write turn-complete control chunk so frontend closes its stream await writeTurnCompleteChunk(currentWirePayload.chatId); @@ -629,9 +769,12 @@ function chatTask( // Phase 1: Keep the run warm for quick response to the next message. // The run stays active (using compute) during this window. - if (warmTimeoutInSeconds > 0) { + const effectiveWarmTimeout = + (metadata.get(WARM_TIMEOUT_METADATA_KEY) as number | undefined) ?? warmTimeoutInSeconds; + + if (effectiveWarmTimeout > 0) { const warm = await messagesInput.once({ - timeoutMs: warmTimeoutInSeconds * 1000, + timeoutMs: effectiveWarmTimeout * 1000, spanName: "waiting (warm)", }); @@ -643,8 +786,11 @@ function chatTask( } // Phase 2: Suspend the task (frees compute) until the next message arrives + const effectiveTurnTimeout = + (metadata.get(TURN_TIMEOUT_METADATA_KEY) as string | undefined) ?? turnTimeout; + const next = await messagesInput.wait({ - timeout: turnTimeout, + timeout: effectiveTurnTimeout, spanName: "waiting (suspended)", }); @@ -693,6 +839,74 @@ function chatTask( * const token = await chat.createAccessToken("my-chat"); * ``` */ +// --------------------------------------------------------------------------- +// Runtime configuration helpers +// --------------------------------------------------------------------------- + +const TURN_TIMEOUT_METADATA_KEY = "chat.turnTimeout"; +const WARM_TIMEOUT_METADATA_KEY = "chat.warmTimeout"; + +/** + * Override the turn timeout for subsequent turns in the current run. + * + * The turn timeout controls how long the run stays suspended (freeing compute) + * waiting for the next user message. When it expires, the run completes + * gracefully and the next message starts a fresh run. + * + * Call from inside a `chatTask` run function to adjust based on context. + * + * @param duration - A duration string (e.g. `"5m"`, `"1h"`, `"30s"`) + * + * @example + * ```ts + * run: async ({ messages, signal }) => { + * chat.setTurnTimeout("2h"); + * return streamText({ model, messages, abortSignal: signal }); + * } + * ``` + */ +function setTurnTimeout(duration: string): void { + metadata.set(TURN_TIMEOUT_METADATA_KEY, duration); +} + +/** + * Override the turn timeout in seconds for subsequent turns in the current run. + * + * @param seconds - Number of seconds to wait for the next message before ending the run + * + * @example + * ```ts + * run: async ({ messages, signal }) => { + * chat.setTurnTimeoutInSeconds(3600); // 1 hour + * return streamText({ model, messages, abortSignal: signal }); + * } + * ``` + */ +function setTurnTimeoutInSeconds(seconds: number): void { + metadata.set(TURN_TIMEOUT_METADATA_KEY, `${seconds}s`); +} + +/** + * Override the warm timeout for subsequent turns in the current run. + * + * The warm timeout controls how long the run stays active (using compute) + * after each turn, waiting for the next message. During this window, + * responses are instant. After it expires, the run suspends. + * + * @param seconds - Number of seconds to stay warm (0 to suspend immediately) + * + * @example + * ```ts + * run: async ({ messages, signal }) => { + * chat.setWarmTimeoutInSeconds(60); + * return streamText({ model, messages, abortSignal: signal }); + * } + * ``` + */ +function setWarmTimeoutInSeconds(seconds: number): void { + metadata.set(WARM_TIMEOUT_METADATA_KEY, seconds); +} + export const chat = { /** Create a chat task. See {@link chatTask}. */ task: chatTask, @@ -700,6 +914,12 @@ export const chat = { pipe: pipeChat, /** Create a public access token for a chat task. See {@link createChatAccessToken}. */ createAccessToken: createChatAccessToken, + /** Override the turn timeout at runtime (duration string). See {@link setTurnTimeout}. */ + setTurnTimeout, + /** Override the turn timeout at runtime (seconds). See {@link setTurnTimeoutInSeconds}. */ + setTurnTimeoutInSeconds, + /** Override the warm timeout at runtime. See {@link setWarmTimeoutInSeconds}. */ + setWarmTimeoutInSeconds, }; /** diff --git a/packages/trigger-sdk/src/v3/chat-react.ts b/packages/trigger-sdk/src/v3/chat-react.ts index e37e2e8e58f..1ee48a4b23b 100644 --- a/packages/trigger-sdk/src/v3/chat-react.ts +++ b/packages/trigger-sdk/src/v3/chat-react.ts @@ -23,7 +23,7 @@ * ``` */ -import { useRef } from "react"; +import { useEffect, useRef } from "react"; import { TriggerChatTransport, type TriggerChatTransportOptions, @@ -57,6 +57,9 @@ export type UseTriggerChatTransportOptions = Om * For dynamic access tokens, pass a function — it will be called on each * request without needing to recreate the transport. * + * The `onSessionChange` callback is kept in a ref so the transport always + * calls the latest version without needing to be recreated. + * * @example * ```tsx * import { useChat } from "@ai-sdk/react"; @@ -80,5 +83,12 @@ export function useTriggerChatTransport( if (ref.current === null) { ref.current = new TriggerChatTransport(options); } + + // Keep onSessionChange up to date without recreating the transport + const { onSessionChange } = options; + useEffect(() => { + ref.current?.setOnSessionChange(onSessionChange); + }, [onSessionChange]); + return ref.current; } diff --git a/packages/trigger-sdk/src/v3/chat.test.ts b/packages/trigger-sdk/src/v3/chat.test.ts index af39b6c9686..50138b57287 100644 --- a/packages/trigger-sdk/src/v3/chat.test.ts +++ b/packages/trigger-sdk/src/v3/chat.test.ts @@ -1727,4 +1727,187 @@ describe("TriggerChatTransport", () => { expect(triggerCallCount).toBe(2); }); }); + + describe("onSessionChange", () => { + it("should fire when a new session is created", async () => { + const onSessionChange = vi.fn(); + const triggerRunId = "run_session_new"; + const publicToken = "pub_session_new"; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: triggerRunId }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": publicToken, + }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + const chunks = [ + ...sampleChunks, + { type: "__trigger_turn_complete" }, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + onSessionChange, + }); + + const stream = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-1", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + // Session created notification should have fired + expect(onSessionChange).toHaveBeenCalledWith("chat-1", { + runId: triggerRunId, + publicAccessToken: publicToken, + lastEventId: undefined, + }); + + // Consume stream + const reader = stream.getReader(); + while (!(await reader.read()).done) {} + + // Should also fire with updated lastEventId on turn complete + const lastCall = onSessionChange.mock.calls[onSessionChange.mock.calls.length - 1]!; + expect(lastCall![0]).toBe("chat-1"); + expect(lastCall![1]).not.toBeNull(); + expect(lastCall![1].lastEventId).toBeDefined(); + }); + + it("should fire with null when session is deleted (stream ends naturally)", async () => { + const onSessionChange = vi.fn(); + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: "run_end" }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_end", + }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + // No turn-complete chunk — stream ends naturally (run completed) + return new Response(createSSEStream(sseEncode(sampleChunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + onSessionChange, + }); + + const stream = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-end", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + // Consume the stream fully + const reader = stream.getReader(); + while (!(await reader.read()).done) {} + + // Session should have been created then deleted + expect(onSessionChange).toHaveBeenCalledWith("chat-end", expect.objectContaining({ + runId: "run_end", + })); + expect(onSessionChange).toHaveBeenCalledWith("chat-end", null); + }); + + it("should be updatable via setOnSessionChange", async () => { + const onSessionChange1 = vi.fn(); + const onSessionChange2 = vi.fn(); + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: "run_update" }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_update", + }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + const chunks = [ + ...sampleChunks, + { type: "__trigger_turn_complete" }, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + onSessionChange: onSessionChange1, + }); + + // Update the callback before sending + transport.setOnSessionChange(onSessionChange2); + + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-update", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + // Only onSessionChange2 should have been called + expect(onSessionChange1).not.toHaveBeenCalled(); + expect(onSessionChange2).toHaveBeenCalled(); + }); + }); }); diff --git a/packages/trigger-sdk/src/v3/chat.ts b/packages/trigger-sdk/src/v3/chat.ts index ed7530b4bc4..eb0aae7ccde 100644 --- a/packages/trigger-sdk/src/v3/chat.ts +++ b/packages/trigger-sdk/src/v3/chat.ts @@ -100,6 +100,57 @@ export type TriggerChatTransportOptions = { * ``` */ metadata?: Record; + + /** + * Restore active chat sessions from external storage (e.g. localStorage). + * + * After a page refresh, pass previously persisted sessions here so the + * transport can reconnect to existing runs instead of starting new ones. + * Use `getSession()` to retrieve session state for persistence. + * + * @example + * ```ts + * new TriggerChatTransport({ + * task: "my-chat", + * accessToken, + * sessions: { + * "chat-abc": { runId: "run_123", publicAccessToken: "...", lastEventId: "42" }, + * }, + * }); + * ``` + */ + sessions?: Record; + + /** + * Called whenever a chat session's state changes. + * + * Fires when: + * - A new session is created (after triggering a task) + * - A turn completes (lastEventId updated) + * - A session is removed (run ended or input stream send failed) — `session` will be `null` + * + * Use this to persist session state for reconnection after page refreshes, + * without needing to call `getSession()` manually. + * + * @example + * ```ts + * new TriggerChatTransport({ + * task: "my-chat", + * accessToken, + * onSessionChange: (chatId, session) => { + * if (session) { + * localStorage.setItem(`session:${chatId}`, JSON.stringify(session)); + * } else { + * localStorage.removeItem(`session:${chatId}`); + * } + * }, + * }); + * ``` + */ + onSessionChange?: ( + chatId: string, + session: { runId: string; publicAccessToken: string; lastEventId?: string } | null + ) => void; }; /** @@ -151,6 +202,12 @@ export class TriggerChatTransport implements ChatTransport { private readonly extraHeaders: Record; private readonly streamTimeoutSeconds: number; private readonly defaultMetadata: Record | undefined; + private _onSessionChange: + | (( + chatId: string, + session: { runId: string; publicAccessToken: string; lastEventId?: string } | null + ) => void) + | undefined; private sessions: Map = new Map(); @@ -165,6 +222,18 @@ export class TriggerChatTransport implements ChatTransport { this.extraHeaders = options.headers ?? {}; this.streamTimeoutSeconds = options.streamTimeoutSeconds ?? DEFAULT_STREAM_TIMEOUT_SECONDS; this.defaultMetadata = options.metadata; + this._onSessionChange = options.onSessionChange; + + // Restore sessions from external storage + if (options.sessions) { + for (const [chatId, session] of Object.entries(options.sessions)) { + this.sessions.set(chatId, { + runId: session.runId, + publicAccessToken: session.publicAccessToken, + lastEventId: session.lastEventId, + }); + } + } } sendMessages = async ( @@ -216,6 +285,7 @@ export class TriggerChatTransport implements ChatTransport { } catch { // If sending fails (run died, etc.), fall through to trigger a new run. this.sessions.delete(chatId); + this.notifySessionChange(chatId, null); } } @@ -236,10 +306,12 @@ export class TriggerChatTransport implements ChatTransport { ? (triggerResponse as { publicAccessToken?: string }).publicAccessToken : undefined; - this.sessions.set(chatId, { + const newSession: ChatSessionState = { runId, publicAccessToken: publicAccessToken ?? currentToken, - }); + }; + this.sessions.set(chatId, newSession); + this.notifySessionChange(chatId, newSession); return this.subscribeToStream( runId, publicAccessToken ?? currentToken, @@ -261,6 +333,62 @@ export class TriggerChatTransport implements ChatTransport { return this.subscribeToStream(session.runId, session.publicAccessToken, undefined, options.chatId); }; + /** + * Get the current session state for a chat, suitable for external persistence. + * + * Returns `undefined` if no active session exists for this chatId. + * Persist the returned value to localStorage so it can be restored + * after a page refresh via `restoreSession()`. + * + * @example + * ```ts + * const session = transport.getSession(chatId); + * if (session) { + * localStorage.setItem(`session:${chatId}`, JSON.stringify(session)); + * } + * ``` + */ + getSession = (chatId: string): { runId: string; publicAccessToken: string; lastEventId?: string } | undefined => { + const session = this.sessions.get(chatId); + if (!session) return undefined; + return { + runId: session.runId, + publicAccessToken: session.publicAccessToken, + lastEventId: session.lastEventId, + }; + }; + + /** + * Update the `onSessionChange` callback. + * Useful for React hooks that need to update the callback without recreating the transport. + */ + setOnSessionChange( + callback: + | (( + chatId: string, + session: { runId: string; publicAccessToken: string; lastEventId?: string } | null + ) => void) + | undefined + ): void { + this._onSessionChange = callback; + } + + private notifySessionChange( + chatId: string, + session: ChatSessionState | null + ): void { + if (!this._onSessionChange) return; + if (session) { + this._onSessionChange(chatId, { + runId: session.runId, + publicAccessToken: session.publicAccessToken, + lastEventId: session.lastEventId, + }); + } else { + this._onSessionChange(chatId, null); + } + } + private subscribeToStream( runId: string, accessToken: string, @@ -331,6 +459,7 @@ export class TriggerChatTransport implements ChatTransport { // the next message via input streams. if (chatId && !combinedSignal.aborted) { this.sessions.delete(chatId); + this.notifySessionChange(chatId, null); } controller.close(); return; @@ -348,6 +477,7 @@ export class TriggerChatTransport implements ChatTransport { session.lastEventId = value.id; } + // Guard against heartbeat or malformed SSE events if (value.chunk != null && typeof value.chunk === "object") { const chunk = value.chunk as Record; @@ -363,6 +493,10 @@ export class TriggerChatTransport implements ChatTransport { } if (chunk.type === "__trigger_turn_complete" && chatId) { + // Notify with updated lastEventId before closing + if (session) { + this.notifySessionChange(chatId, session); + } internalAbort.abort(); try { controller.close(); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 269b8df49f6..e58d8fa4898 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1104,7 +1104,7 @@ importers: version: 18.3.1 react-email: specifier: ^2.1.1 - version: 2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(eslint@8.31.0) + version: 2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(bufferutil@4.0.9)(eslint@8.31.0) resend: specifier: ^3.2.0 version: 3.2.0 @@ -2128,6 +2128,12 @@ importers: '@ai-sdk/react': specifier: ^3.0.0 version: 3.0.51(react@19.1.0)(zod@3.25.76) + '@prisma/adapter-pg': + specifier: ^7.4.2 + version: 7.4.2 + '@prisma/client': + specifier: ^7.4.2 + version: 7.4.2(prisma@7.4.2(@types/react@19.0.12)(better-sqlite3@11.10.0)(magicast@0.3.5)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.5.4))(typescript@5.5.4) '@trigger.dev/sdk': specifier: workspace:* version: link:../../packages/trigger-sdk @@ -2137,6 +2143,9 @@ importers: next: specifier: 15.3.3 version: 15.3.3(@opentelemetry/api@1.9.0)(@playwright/test@1.37.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + pg: + specifier: ^8.16.3 + version: 8.16.3 react: specifier: ^19.0.0 version: 19.1.0 @@ -2165,6 +2174,9 @@ importers: '@types/react-dom': specifier: ^19 version: 19.0.4(@types/react@19.0.12) + prisma: + specifier: ^7.4.2 + version: 7.4.2(@types/react@19.0.12)(better-sqlite3@11.10.0)(magicast@0.3.5)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.5.4) tailwindcss: specifier: ^4 version: 4.0.17 @@ -4120,18 +4132,30 @@ packages: '@changesets/write@0.2.3': resolution: {integrity: sha512-Dbamr7AIMvslKnNYsLFafaVORx4H0pvCA2MHqgtNCySMe1blImEyAEOzDmcgKAkgz4+uwoLz7demIrX+JBr/Xw==} + '@chevrotain/cst-dts-gen@10.5.0': + resolution: {integrity: sha512-lhmC/FyqQ2o7pGK4Om+hzuDrm9rhFYIJ/AXoQBeongmn870Xeb0L6oGEiuR8nohFNL5sMaQEJWCxr1oIVIVXrw==} + '@chevrotain/cst-dts-gen@11.0.3': resolution: {integrity: sha512-BvIKpRLeS/8UbfxXxgC33xOumsacaeCKAjAeLyOn7Pcp95HiRbrpl14S+9vaZLolnbssPIUuiUd8IvgkRyt6NQ==} + '@chevrotain/gast@10.5.0': + resolution: {integrity: sha512-pXdMJ9XeDAbgOWKuD1Fldz4ieCs6+nLNmyVhe2gZVqoO7v8HXuHYs5OV2EzUtbuai37TlOAQHrTDvxMnvMJz3A==} + '@chevrotain/gast@11.0.3': resolution: {integrity: sha512-+qNfcoNk70PyS/uxmj3li5NiECO+2YKZZQMbmjTqRI3Qchu8Hig/Q9vgkHpI3alNjr7M+a2St5pw5w5F6NL5/Q==} '@chevrotain/regexp-to-ast@11.0.3': resolution: {integrity: sha512-1fMHaBZxLFvWI067AVbGJav1eRY7N8DDvYCTwGBiE/ytKBgP8azTdgyrKyWZ9Mfh09eHWb5PgTSO8wi7U824RA==} + '@chevrotain/types@10.5.0': + resolution: {integrity: sha512-f1MAia0x/pAVPWH/T73BJVyO2XU5tI4/iE7cnxb7tqdNTNhQI3Uq3XkqcoteTmD4t1aM0LbHCJOhgIDn07kl2A==} + '@chevrotain/types@11.0.3': resolution: {integrity: sha512-gsiM3G8b58kZC2HaWR50gu6Y1440cHiJ+i3JUvcp/35JchYejb2+5MVeJK0iKThYpAa/P2PYFV4hoi44HD+aHQ==} + '@chevrotain/utils@10.5.0': + resolution: {integrity: sha512-hBzuU5+JjB2cqNZyszkDHZgOSrUUT8V3dhgRl8Q9Gp6dAj/H5+KILGjbhDpc3Iy9qmqlm/akuOI2ut9VUtzJxQ==} + '@chevrotain/utils@11.0.3': resolution: {integrity: sha512-YslZMgtJUyuMbZ+aKvfF3x1f5liK4mWNxghFRv7jqRR9C3R3fAOGTTKvxXDa2Y1s9zSbcpuO0cAxDYsc9SrXoQ==} @@ -4325,6 +4349,20 @@ packages: '@electric-sql/client@1.0.14': resolution: {integrity: sha512-LtPAfeMxXRiYS0hyDQ5hue2PjljUiK9stvzsVyVb4nwxWQxfOWTSF42bHTs/o5i3x1T4kAQ7mwHpxa4A+f8X7Q==} + '@electric-sql/pglite-socket@0.0.20': + resolution: {integrity: sha512-J5nLGsicnD9wJHnno9r+DGxfcZWh+YJMCe0q/aCgtG6XOm9Z7fKeite8IZSNXgZeGltSigM9U/vAWZQWdgcSFg==} + hasBin: true + peerDependencies: + '@electric-sql/pglite': 0.3.15 + + '@electric-sql/pglite-tools@0.2.20': + resolution: {integrity: sha512-BK50ZnYa3IG7ztXhtgYf0Q7zijV32Iw1cYS8C+ThdQlwx12V5VZ9KRJ42y82Hyb4PkTxZQklVQA9JHyUlex33A==} + peerDependencies: + '@electric-sql/pglite': 0.3.15 + + '@electric-sql/pglite@0.3.15': + resolution: {integrity: sha512-Cj++n1Mekf9ETfdc16TlDi+cDDQF0W7EcbyRHYOAeZdsAe8M/FJg18itDTSwyHfar2WIezawM9o0EKaRGVKygQ==} + '@electric-sql/react@0.3.5': resolution: {integrity: sha512-qPrlF3BsRg5L8zAn1sLGzc3pkswfEHyQI3lNOu7Xllv1DBx85RvHR1zgGGPAUfC8iwyWupQu9pFPE63GdbeuhA==} peerDependencies: @@ -6024,6 +6062,10 @@ packages: '@cfworker/json-schema': optional: true + '@mrleebo/prisma-ast@0.13.1': + resolution: {integrity: sha512-XyroGQXcHrZdvmrGJvsA9KNeOOgGMg1Vg9OlheUsBOSKznLMDl+YChxbkboRHvtFYJEMRYmlV3uoo/njCw05iw==} + engines: {node: '>=16'} + '@msgpack/msgpack@3.0.0-beta2': resolution: {integrity: sha512-y+l1PNV0XDyY8sM3YtuMLK5vE3/hkfId+Do8pLo/OPxfxuFAUwcGz3oiiUuV46/aBpwTzZ+mRWVMtlSKbradhw==} engines: {node: '>= 14'} @@ -7036,9 +7078,15 @@ packages: '@prisma/adapter-pg@6.20.0-integration-next.8': resolution: {integrity: sha512-5+ZjSPMzyfDYMmWLH1IaQIOQGa8eJrqEz5A9V4vS4+b6LV6qvCOHjqlnbRQ5IKSNCwFP055SJ54RsPES+0jOyA==} + '@prisma/adapter-pg@7.4.2': + resolution: {integrity: sha512-oUo2Zhe9Tf6YwVL8kLPuOLTK1Z2pwi/Ua77t2PuGyBan2w7shRKqHvYK+3XXmRH9RWhPJ4SMtHZKpNo6Ax/4bQ==} + '@prisma/client-runtime-utils@6.20.0-integration-next.8': resolution: {integrity: sha512-prENLjPislFvRWDHNgXmg9yzixQYsFPVQGtDv5zIMs4pV2KPdNc5pCiZ3n77hAinvqGJVafASa+eU4TfpVphdA==} + '@prisma/client-runtime-utils@7.4.2': + resolution: {integrity: sha512-cID+rzOEb38VyMsx5LwJMEY4NGIrWCNpKu/0ImbeooQ2Px7TI+kOt7cm0NelxUzF2V41UVVXAmYjANZQtCu1/Q==} + '@prisma/client@4.9.0': resolution: {integrity: sha512-bz6QARw54sWcbyR1lLnF2QHvRW5R/Jxnbbmwh3u+969vUKXtBkXgSgjDA85nji31ZBlf7+FrHDy5x+5ydGyQDg==} engines: {node: '>=14.17'} @@ -7096,6 +7144,18 @@ packages: typescript: optional: true + '@prisma/client@7.4.2': + resolution: {integrity: sha512-ts2mu+cQHriAhSxngO3StcYubBGTWDtu/4juZhXCUKOwgh26l+s4KD3vT2kMUzFyrYnll9u/3qWrtzRv9CGWzA==} + engines: {node: ^20.19 || ^22.12 || >=24.0} + peerDependencies: + prisma: '*' + typescript: 5.5.4 + peerDependenciesMeta: + prisma: + optional: true + typescript: + optional: true + '@prisma/config@6.14.0': resolution: {integrity: sha512-IwC7o5KNNGhmblLs23swnfBjADkacBb7wvyDXUWLwuvUQciKJZqyecU0jw0d7JRkswrj+XTL8fdr0y2/VerKQQ==} @@ -7108,6 +7168,9 @@ packages: '@prisma/config@6.20.0-integration-next.8': resolution: {integrity: sha512-nwf+tczfiGSn0tnuHmBpnK+wmaYzcC20sn9Zt8BSoJVCewJxf8ASHPxZEGgvFLl05zbCfFtq3rMc6ZnAiYjowg==} + '@prisma/config@7.4.2': + resolution: {integrity: sha512-CftBjWxav99lzY1Z4oDgomdb1gh9BJFAOmWF6P2v1xRfXqQb56DfBub+QKcERRdNoAzCb3HXy3Zii8Vb4AsXhg==} + '@prisma/debug@4.16.2': resolution: {integrity: sha512-7L7WbG0qNNZYgLpsVB8rCHCXEyHFyIycRlRDNwkVfjQmACC2OW6AWCYCbfdjQhkF/t7+S3njj8wAWAocSs+Brw==} @@ -7123,12 +7186,24 @@ packages: '@prisma/debug@6.20.0-integration-next.8': resolution: {integrity: sha512-PqUUFXf8MDoIrsKMzpF4NYqA3gHE8l/CUWVnYa4hNIbynCcEhvk7iT+6ve0u9w1TiGVUFnIVMuqFGEb2aHCuFw==} + '@prisma/debug@7.2.0': + resolution: {integrity: sha512-YSGTiSlBAVJPzX4ONZmMotL+ozJwQjRmZweQNIq/ER0tQJKJynNkRB3kyvt37eOfsbMCXk3gnLF6J9OJ4QWftw==} + + '@prisma/debug@7.4.2': + resolution: {integrity: sha512-aP7qzu+g/JnbF6U69LMwHoUkELiserKmWsE2shYuEpNUJ4GrtxBCvZwCyCBHFSH2kLTF2l1goBlBh4wuvRq62w==} + + '@prisma/dev@0.20.0': + resolution: {integrity: sha512-ovlBYwWor0OzG+yH4J3Ot+AneD818BttLA+Ii7wjbcLHUrnC4tbUPVGyNd3c/+71KETPKZfjhkTSpdS15dmXNQ==} + '@prisma/driver-adapter-utils@6.16.0': resolution: {integrity: sha512-dsRHvEnifJ3xqpMKGBy1jRwR8yc+7Ko4TcHrdTQJIfq6NYN2gNoOf0k91hcbzs5AH19wDxjuHXCveklWq5AJdA==} '@prisma/driver-adapter-utils@6.20.0-integration-next.8': resolution: {integrity: sha512-TXpFugr3sCl2bHechoG3p9mvlq2Z3GgA0Cp73lUOEWQyUuoG8NW/4UA56Ax1r5fBUAs9hKbr20Ld6wKCZhnz8Q==} + '@prisma/driver-adapter-utils@7.4.2': + resolution: {integrity: sha512-REdjFpT/ye9KdDs+CXAXPIbMQkVLhne9G5Pe97sNY4Ovx4r2DAbWM9hOFvvB1Oq8H8bOCdu0Ri3AoGALquQqVw==} + '@prisma/engines-version@4.9.0-42.ceb5c99003b99c9ee2c1d2e618e359c14aef2ea5': resolution: {integrity: sha512-M16aibbxi/FhW7z1sJCX8u+0DriyQYY5AyeTH7plQm9MLnURoiyn3CZBqAyIoQ+Z1pS77usCIibYJWSgleBMBA==} @@ -7144,6 +7219,9 @@ packages: '@prisma/engines-version@6.20.0-11.next-80ee0a44bf5668992b0c909c946a755b86b56c95': resolution: {integrity: sha512-DqrQqRIgeocvWpgN7t9PymiJdV8ISSSrZCuilAtpKEaKIt4JUGIxsAdWNMRSHk188hYA2W1YFG5KvWUYBaCO1A==} + '@prisma/engines-version@7.5.0-10.94a226be1cf2967af2541cca5529f0f7ba866919': + resolution: {integrity: sha512-5FIKY3KoYQlBuZC2yc16EXfVRQ8HY+fLqgxkYfWCtKhRb3ajCRzP/rPeoSx11+NueJDANdh4hjY36mdmrTcGSg==} + '@prisma/engines@6.14.0': resolution: {integrity: sha512-LhJjqsALFEcoAtF07nSaOkVguaxw/ZsgfROIYZ8bAZDobe7y8Wy+PkYQaPOK1iLSsFgV2MhCO/eNrI1gdSOj6w==} @@ -7156,6 +7234,9 @@ packages: '@prisma/engines@6.20.0-integration-next.8': resolution: {integrity: sha512-XdzTxN0PFLIW2DcprG9xlMy39FrsjxW5J2qtHQ58FBtbllHSZGD0pK2nzATw5dRh7nGhmX+uNA02cqHv5oND3A==} + '@prisma/engines@7.4.2': + resolution: {integrity: sha512-B+ZZhI4rXlzjVqRw/93AothEKOU5/x4oVyJFGo9RpHPnBwaPwk4Pi0Q4iGXipKxeXPs/dqljgNBjK0m8nocOJA==} + '@prisma/fetch-engine@6.14.0': resolution: {integrity: sha512-MPzYPOKMENYOaY3AcAbaKrfvXVlvTc6iHmTXsp9RiwCX+bPyfDMqMFVUSVXPYrXnrvEzhGHfyiFy0PRLHPysNg==} @@ -7168,6 +7249,9 @@ packages: '@prisma/fetch-engine@6.20.0-integration-next.8': resolution: {integrity: sha512-zVNM5Q1hFclpqD1y7wujDzyc3l01S8ZMuP0Zddzuda4LOA7/F2enjro48VcD2/fxkBgzkkmO/quLOGnbQDKO7g==} + '@prisma/fetch-engine@7.4.2': + resolution: {integrity: sha512-f/c/MwYpdJO7taLETU8rahEstLeXfYgQGlz5fycG7Fbmva3iPdzGmjiSWHeSWIgNnlXnelUdCJqyZnFocurZuA==} + '@prisma/generator-helper@4.16.2': resolution: {integrity: sha512-bMOH7y73Ui7gpQrioFeavMQA+Tf8ksaVf8Nhs9rQNzuSg8SSV6E9baczob0L5KGZTSgYoqnrRxuo03kVJYrnIg==} @@ -7183,6 +7267,12 @@ packages: '@prisma/get-platform@6.20.0-integration-next.8': resolution: {integrity: sha512-21jEfhFpC8FuvPD7JEf1Qu02engBCBa3+1il3UiyHKcKS3Kbp9IgR+DVqqrqSWIGJg8+1oTfF/3AgbjunaQ1Ag==} + '@prisma/get-platform@7.2.0': + resolution: {integrity: sha512-k1V0l0Td1732EHpAfi2eySTezyllok9dXb6UQanajkJQzPUGi3vO2z7jdkz67SypFTdmbnyGYxvEvYZdZsMAVA==} + + '@prisma/get-platform@7.4.2': + resolution: {integrity: sha512-UTnChXRwiauzl/8wT4hhe7Xmixja9WE28oCnGpBtRejaHhvekx5kudr3R4Y9mLSA0kqGnAMeyTiKwDVMjaEVsw==} + '@prisma/instrumentation@6.11.1': resolution: {integrity: sha512-mrZOev24EDhnefmnZX7WVVT7v+r9LttPRqf54ONvj6re4XMF7wFTpK2tLJi4XHB7fFp/6xhYbgRel8YV7gQiyA==} peerDependencies: @@ -7193,6 +7283,9 @@ packages: peerDependencies: '@opentelemetry/api': ^1.8 + '@prisma/query-plan-executor@7.2.0': + resolution: {integrity: sha512-EOZmNzcV8uJ0mae3DhTsiHgoNCuu1J9mULQpGCh62zN3PxPTd+qI9tJvk5jOst8WHKQNwJWR3b39t0XvfBB0WQ==} + '@prisma/studio-core-licensed@0.6.0': resolution: {integrity: sha512-LNC8ohLosuWz6n9oKNqfR5Ep/JYiPavk4RxrU6inOS4LEvMQts8N+Vtt7NAB9i06BaiIRKnPsg1Hcaao5pRjSw==} peerDependencies: @@ -7200,6 +7293,13 @@ packages: react: ^18.0.0 || ^19.0.0 react-dom: ^18.0.0 || ^19.0.0 + '@prisma/studio-core@0.13.1': + resolution: {integrity: sha512-agdqaPEePRHcQ7CexEfkX1RvSH9uWDb6pXrZnhCRykhDFAV0/0P3d07WtfiY8hZWb7oRU4v+NkT4cGFHkQJIPg==} + peerDependencies: + '@types/react': ^18.0.0 || ^19.0.0 + react: ^18.0.0 || ^19.0.0 + react-dom: ^18.0.0 || ^19.0.0 + '@protobuf-ts/runtime@2.11.1': resolution: {integrity: sha512-KuDaT1IfHkugM2pyz+FwiY80ejWrkH1pAtOBOZFuR6SXEFTsnb/jiQWQ1rCIrcKx2BtyxnxW6BWwsVSA/Ie+WQ==} @@ -11899,6 +11999,10 @@ packages: aws-sign2@0.7.0: resolution: {integrity: sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA==} + aws-ssl-profiles@1.1.2: + resolution: {integrity: sha512-NZKeq9AfyQvEeNlN0zSYAaWrmBffJh3IELMZfRpJVWgrpEbtEpnjvzqBPf+mxoI287JohRDoa+/nsfqqiZmF6g==} + engines: {node: '>= 6.0.0'} + aws4@1.12.0: resolution: {integrity: sha512-NmWvPnx0F1SfrQbYwOi7OeaNGokp9XhzNioJ/CSBs8Qa4vxug81mhJEAVZwxXuBmYB5KDRfMq/F3RR0BIU7sWg==} @@ -12271,6 +12375,9 @@ packages: peerDependencies: chevrotain: ^11.0.0 + chevrotain@10.5.0: + resolution: {integrity: sha512-Pkv5rBY3+CsHOYfV5g/Vs5JY9WTHHDEKOlohI2XeygaZhUeqhAlldZ8Hz9cRmxu709bvS08YzxHdTPHhffc13A==} + chevrotain@11.0.3: resolution: {integrity: sha512-ci2iJH6LeIkvP9eJW6gpueU8cnZhv85ELY8w8WiFtNjMHA5ad6pQLaJo9mEly/9qUyCpvqX8/POVUTf18/HFdw==} @@ -14196,6 +14303,10 @@ packages: resolution: {integrity: sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==} engines: {node: '>=14'} + foreground-child@3.3.1: + resolution: {integrity: sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==} + engines: {node: '>=14'} + forever-agent@0.6.1: resolution: {integrity: sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw==} @@ -14319,6 +14430,9 @@ packages: functions-have-names@1.2.3: resolution: {integrity: sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==} + generate-function@2.3.1: + resolution: {integrity: sha512-eeB5GfMNeevm/GRYq20ShmsaGcmI81kIX2K9XQx5miC8KdHaC6Jm0qQ8ZNeGOi7wYB8OsdxKs+Y2oVuTFuVwKQ==} + generic-names@4.0.0: resolution: {integrity: sha512-ySFolZQfw9FoDb3ed9d80Cm9f0+r7qj+HJkWjeD9RBfpxEVTlVhol+gvaQB/78WbwYfbnNh8nWHHBSlg072y6A==} @@ -14342,6 +14456,9 @@ packages: resolution: {integrity: sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==} engines: {node: '>=6'} + get-port-please@3.2.0: + resolution: {integrity: sha512-I9QVvBw5U/hw3RmWpYKRumUeaDgxTPd401x364rLmWBJcOQ753eov1eTgzDqRG9bqFIfDc7gfzcQEWrUri3o1A==} + get-port@5.1.1: resolution: {integrity: sha512-g/Q1aTSDOxFpchXC4i8ZWvxA1lnPqx/JHqcpIw0/LX9T8x/GBbi6YnlN5nhaKIFkT8oFsscUKgDJYxfwfS6QsQ==} engines: {node: '>=8'} @@ -14493,6 +14610,9 @@ packages: resolution: {integrity: sha512-rEDCuqUQ4tbD78TpzsMtt5OIf0cBCSDWSJtUDaF6JsAh+k0v9r++NzxNEG87oDZx9ZwGhD8DaezR2L/yrw0Jdw==} engines: {node: '>=10'} + grammex@3.1.12: + resolution: {integrity: sha512-6ufJOsSA7LcQehIJNCO7HIBykfM7DXQual0Ny780/DEcJIpBlHRvcqEBWGPYd7hrXL2GJ3oJI1MIhaXjWmLQOQ==} + grapheme-splitter@1.0.4: resolution: {integrity: sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==} @@ -14505,6 +14625,9 @@ packages: engines: {node: '>=14.0.0'} hasBin: true + graphmatch@1.1.1: + resolution: {integrity: sha512-5ykVn/EXM1hF0XCaWh05VbYvEiOL2lY1kBxZtaYsyvjp7cmWOU1XsAdfQBwClraEofXDT197lFbXOEVMHpvQOg==} + graphql@16.6.0: resolution: {integrity: sha512-KPIBPDlW7NxrbT/eh4qPXz5FiFdL5UbaA0XUNz2Rp3Z3hqBSkbj0GVjwFDztsWVauZUWsbKHgMg++sk8UX0bkw==} engines: {node: ^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0} @@ -14633,6 +14756,10 @@ packages: hoist-non-react-statics@3.3.2: resolution: {integrity: sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==} + hono@4.11.4: + resolution: {integrity: sha512-U7tt8JsyrxSRKspfhtLET79pU8K+tInj5QZXs1jSugO1Vq5dFj3kmZsRldo29mTBfcjDRVRXrEZ6LS63Cog9ZA==} + engines: {node: '>=16.9.0'} + hono@4.11.8: resolution: {integrity: sha512-eVkB/CYCCei7K2WElZW9yYQFWssG0DhaDhVvr7wy5jJ22K+ck8fWW0EsLpB0sITUTvPnc97+rrbQqIr5iqiy9Q==} engines: {node: '>=16.9.0'} @@ -14687,6 +14814,9 @@ packages: resolution: {integrity: sha512-CAbnr6Rz4CYQkLYUtSNXxQPUH2gK8f3iWexVlsnMeD+GjlsQ0Xsy1cOX+mN3dtxYomRy21CiOzU8Uhw6OwncEQ==} engines: {node: '>=0.8', npm: '>=1.3.7'} + http-status-codes@2.3.0: + resolution: {integrity: sha512-RJ8XvFvpPM/Dmc5SV+dC4y5PCeOhT3x1Hq0NU3rjGeg5a/CqlhZ7uudknPwZFz4aeAXDcbAyaeP7GAo9lvngtA==} + https-proxy-agent@5.0.1: resolution: {integrity: sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==} engines: {node: '>= 6'} @@ -15018,6 +15148,9 @@ packages: is-promise@4.0.0: resolution: {integrity: sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==} + is-property@1.0.2: + resolution: {integrity: sha512-Ks/IoX00TtClbGQr4TWXemAnktAQvYB7HzcCxDGqEZU6oCmb2INHuOoKxbtR+HFkmYWBKv/dOZtGRiAjDhj92g==} + is-reference@3.0.3: resolution: {integrity: sha512-ixkJoqQvAP88E6wLydLGGqCJsrFUnqoH6HnaczB8XmDH1oaWU+xxdptvikTgaEhtZ53Ky6YXiBuUI2WXLMCwjw==} @@ -15696,6 +15829,10 @@ packages: resolution: {integrity: sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==} engines: {node: '>=12'} + lru.min@1.1.4: + resolution: {integrity: sha512-DqC6n3QQ77zdFpCMASA1a3Jlb64Hv2N2DciFGkO/4L9+q/IpIAuRlKOvCXabtRW6cQf8usbmM6BE/TOPysCdIA==} + engines: {bun: '>=1.0.0', deno: '>=1.30.0', node: '>=8.0.0'} + lucide-react@0.229.0: resolution: {integrity: sha512-b0/KSFXhPi++vUbnYEDUgP8Z8Rw9MQpRfBr+dRZNPMT3FD1HrVgMHXhSpkm9ZrrEtuqIfHf/O+tAGmw4WOmIog==} peerDependencies: @@ -16338,9 +16475,17 @@ packages: resolution: {integrity: sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==} hasBin: true + mysql2@3.15.3: + resolution: {integrity: sha512-FBrGau0IXmuqg4haEZRBfHNWB5mUARw6hNwPDXXGg0XzVJ50mr/9hb267lvpVMnhZ1FON3qNd4Xfcez1rbFwSg==} + engines: {node: '>= 8.0'} + mz@2.7.0: resolution: {integrity: sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==} + named-placeholders@1.1.6: + resolution: {integrity: sha512-Tz09sEL2EEuv5fFowm419c1+a/jSMiBjI9gHxVLrVdbUkkNUUfjsVYs9pVZu5oCon/kmRh9TfLEObFtkVxmY0w==} + engines: {node: '>=8.0.0'} + nan@2.23.1: resolution: {integrity: sha512-r7bBUGKzlqk8oPBDYxt6Z0aEdF1G1rwlMcLk8LCOMbOzf0mG+JUfUzG4fIMWwHWP0iyaLWEQZJmtB7nOHEm/qw==} @@ -17116,9 +17261,6 @@ packages: pg-cloudflare@1.2.7: resolution: {integrity: sha512-YgCtzMH0ptvZJslLM1ffsY4EuGaU0cx4XSdXLRFae8bPP4dS5xL1tNB3k2o/N64cHJpwU7dxKli/nZ2lUa5fLg==} - pg-connection-string@2.6.4: - resolution: {integrity: sha512-v+Z7W/0EO707aNMaAEfiGnGL9sxxumwLl2fJvCQtMn9Fxsg+lPpPkdcyBSv/KFgpGdYkMfn+EI1Or2EHjpgLCA==} - pg-connection-string@2.8.5: resolution: {integrity: sha512-Ni8FuZ8yAF+sWZzojvtLE2b03cqjO5jNULcHFfM9ZZ0/JXrgom5pBREbtnAw7oxsxJqHw9Nz/XWORUEL3/IFow==} @@ -17138,11 +17280,6 @@ packages: peerDependencies: pg: '>=8.0' - pg-pool@3.6.2: - resolution: {integrity: sha512-Htjbg8BlwXqSBQ9V8Vjtc+vzf/6fVUuak/3/XXKA9oxZprwW3IMDQTGHP+KDmVL7rtd+R1QjbnCFPuTHm3G4hg==} - peerDependencies: - pg: '>=8.0' - pg-pool@3.9.6: resolution: {integrity: sha512-rFen0G7adh1YmgvrmE5IPIqbb+IgEzENUm+tzm6MLLDSlPRoZVhzU1WdML9PV2W5GOdRA9qBKURlbt1OsXOsPw==} peerDependencies: @@ -17642,6 +17779,19 @@ packages: typescript: optional: true + prisma@7.4.2: + resolution: {integrity: sha512-2bP8Ruww3Q95Z2eH4Yqh4KAENRsj/SxbdknIVBfd6DmjPwmpsC4OVFMLOeHt6tM3Amh8ebjvstrUz3V/hOe1dA==} + engines: {node: ^20.19 || ^22.12 || >=24.0} + hasBin: true + peerDependencies: + better-sqlite3: '>=9.0.0' + typescript: 5.5.4 + peerDependenciesMeta: + better-sqlite3: + optional: true + typescript: + optional: true + prismjs@1.29.0: resolution: {integrity: sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==} engines: {node: '>=6'} @@ -18124,6 +18274,9 @@ packages: regex@6.0.1: resolution: {integrity: sha512-uorlqlzAKjKQZ5P+kTJr3eeJGSVroLKoHmquUj4zHWuR+hEyNqlXsSKlYYF5F4NI6nl7tWCs0apKJ0lmfsXAPA==} + regexp-to-ast@0.5.0: + resolution: {integrity: sha512-tlbJqcMHnPKI9zSrystikWKwHkBqu2a/Sgw01h3zFjvYrMxEDYHzzoMZnUrbIfpTFEsoRnnviOXNCzFiSc54Qw==} + regexp.prototype.flags@1.4.3: resolution: {integrity: sha512-fjggEOO3slI6Wvgjwflkc4NFRCTZAu5CnNfBd5qOMYhWdn67nJBBu34/TkD++eeFmd8C9r9jfXJ27+nSiRkSUA==} engines: {node: '>= 0.4'} @@ -18196,6 +18349,9 @@ packages: remark-stringify@11.0.0: resolution: {integrity: sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==} + remeda@2.33.4: + resolution: {integrity: sha512-ygHswjlc/opg2VrtiYvUOPLjxjtdKvjGz1/plDhkG66hjNjFr1xmfrs2ClNFo/E6TyUFiwYNh53bKV26oBoMGQ==} + remend@1.2.1: resolution: {integrity: sha512-4wC12bgXsfKAjF1ewwkNIQz5sqewz/z1xgIgjEMb3r1pEytQ37F0Cm6i+OhbTWEvguJD7lhOUJhK5fSasw9f0w==} @@ -18548,6 +18704,9 @@ packages: resolution: {integrity: sha512-1gnZf7DFcoIcajTjTwjwuDjzuz4PPcY2StKPlsGAQ1+YH20IRVrBaXSWmdjowTJ6u8Rc01PoYOGHXfP1mYcZNQ==} engines: {node: '>= 18'} + seq-queue@0.0.5: + resolution: {integrity: sha512-hr3Wtp/GZIc/6DAGPDcV4/9WoZhjrkXsi5B/07QgX8tsdc6ilr7BFM6PM6rbdAX1kFSDYeZGLipIZZKyQP0O5Q==} + serialize-javascript@6.0.1: resolution: {integrity: sha512-owoXEFjWRllis8/M1Q+Cw5k8ZH40e3zhp/ovX+Xr/vi1qj6QesbyXXViFbpNvWvPNAD62SutwEXavefrLJWj7w==} @@ -18820,6 +18979,10 @@ packages: resolution: {integrity: sha512-mkpF+RG402P66VMsnQkWewTRzDBWfu9iLbOfxaW/nAKOS/2A9MheQmcU5cmX0D0At9azrorZwpvcBRNNBozACQ==} hasBin: true + sqlstring@2.3.3: + resolution: {integrity: sha512-qC9iz2FlN7DQl3+wjwn3802RTyjCx7sDvfQEXchwa6CWOx07/WVfh91gBmQ9fahw8snwGEWU3xGzOt4tFyHLxg==} + engines: {node: '>= 0.6'} + sqs-consumer@7.5.0: resolution: {integrity: sha512-aY3akgMjuK1aj4E7ZVAURUUnC8aNgUBES+b4SN+6ccMmJhi37MamWl7g1JbPow8sjIp1fBPz1bXCCDJmtjOTAg==} engines: {node: '>=18.0.0'} @@ -18872,6 +19035,9 @@ packages: resolution: {integrity: sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==} engines: {node: '>= 0.8'} + std-env@3.10.0: + resolution: {integrity: sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==} + std-env@3.7.0: resolution: {integrity: sha512-JPbdCEQLj1w5GilpiHAx3qJvFndqybBysA3qUOnznweH4QbNYUsW/ea8QzSrnh0vNsezMMw5bcVool8lM0gwzg==} @@ -19970,6 +20136,14 @@ packages: typescript: optional: true + valibot@1.2.0: + resolution: {integrity: sha512-mm1rxUsmOxzrwnX5arGS+U4T25RdvpPjPN4yR0u9pUBov9+zGVtO84tif1eY4r6zWxVxu3KzIyknJy3rxfRZZg==} + peerDependencies: + typescript: 5.5.4 + peerDependenciesMeta: + typescript: + optional: true + validate-npm-package-license@3.0.4: resolution: {integrity: sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==} @@ -20428,6 +20602,9 @@ packages: yup@1.7.0: resolution: {integrity: sha512-VJce62dBd+JQvoc+fCVq+KZfPHr+hXaxCcVgotfwWvlR0Ja3ffYKaJBT8rptPOSKOGJDCUnW2C2JWpud7aRP6Q==} + zeptomatch@2.1.0: + resolution: {integrity: sha512-KiGErG2J0G82LSpniV0CtIzjlJ10E04j02VOudJsPyPwNZgGnRKQy7I1R7GMyg/QswnE4l7ohSGrQbQbjXPPDA==} + zip-stream@6.0.1: resolution: {integrity: sha512-zK7YHHz4ZXpW89AHXUPbQVGKI7uvkd3hzusTdotCg1UxyaVtg0zFJSTfW/Dq5f7OBBVnq6cZIaC8Ti4hb6dtCA==} engines: {node: '>= 14'} @@ -23182,12 +23359,23 @@ snapshots: human-id: 1.0.2 prettier: 2.8.8 + '@chevrotain/cst-dts-gen@10.5.0': + dependencies: + '@chevrotain/gast': 10.5.0 + '@chevrotain/types': 10.5.0 + lodash: 4.17.23 + '@chevrotain/cst-dts-gen@11.0.3': dependencies: '@chevrotain/gast': 11.0.3 '@chevrotain/types': 11.0.3 lodash-es: 4.17.21 + '@chevrotain/gast@10.5.0': + dependencies: + '@chevrotain/types': 10.5.0 + lodash: 4.17.23 + '@chevrotain/gast@11.0.3': dependencies: '@chevrotain/types': 11.0.3 @@ -23195,8 +23383,12 @@ snapshots: '@chevrotain/regexp-to-ast@11.0.3': {} + '@chevrotain/types@10.5.0': {} + '@chevrotain/types@11.0.3': {} + '@chevrotain/utils@10.5.0': {} + '@chevrotain/utils@11.0.3': {} '@clack/core@0.5.0': @@ -23407,6 +23599,16 @@ snapshots: optionalDependencies: '@rollup/rollup-darwin-arm64': 4.53.2 + '@electric-sql/pglite-socket@0.0.20(@electric-sql/pglite@0.3.15)': + dependencies: + '@electric-sql/pglite': 0.3.15 + + '@electric-sql/pglite-tools@0.2.20(@electric-sql/pglite@0.3.15)': + dependencies: + '@electric-sql/pglite': 0.3.15 + + '@electric-sql/pglite@0.3.15': {} + '@electric-sql/react@0.3.5(react@18.2.0)': dependencies: '@electric-sql/client': 0.4.0 @@ -24183,6 +24385,10 @@ snapshots: dependencies: hono: 4.5.11 + '@hono/node-server@1.19.9(hono@4.11.4)': + dependencies: + hono: 4.11.4 + '@hono/node-server@1.19.9(hono@4.11.8)': dependencies: hono: 4.11.8 @@ -24711,6 +24917,11 @@ snapshots: transitivePeerDependencies: - supports-color + '@mrleebo/prisma-ast@0.13.1': + dependencies: + chevrotain: 10.5.0 + lilconfig: 2.1.0 + '@msgpack/msgpack@3.0.0-beta2': {} '@neondatabase/serverless@0.9.5': @@ -25756,7 +25967,7 @@ snapshots: '@prisma/adapter-pg@6.16.0': dependencies: '@prisma/driver-adapter-utils': 6.16.0 - pg: 8.15.6 + pg: 8.16.3 postgres-array: 3.0.4 transitivePeerDependencies: - pg-native @@ -25769,8 +25980,18 @@ snapshots: transitivePeerDependencies: - pg-native + '@prisma/adapter-pg@7.4.2': + dependencies: + '@prisma/driver-adapter-utils': 7.4.2 + pg: 8.16.3 + postgres-array: 3.0.4 + transitivePeerDependencies: + - pg-native + '@prisma/client-runtime-utils@6.20.0-integration-next.8': {} + '@prisma/client-runtime-utils@7.4.2': {} + '@prisma/client@4.9.0(prisma@6.14.0(magicast@0.3.5)(typescript@5.5.4))': dependencies: '@prisma/engines-version': 4.9.0-42.ceb5c99003b99c9ee2c1d2e618e359c14aef2ea5 @@ -25799,6 +26020,13 @@ snapshots: prisma: 6.20.0-integration-next.8(@types/react@19.2.14)(magicast@0.3.5)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.5.4) typescript: 5.5.4 + '@prisma/client@7.4.2(prisma@7.4.2(@types/react@19.0.12)(better-sqlite3@11.10.0)(magicast@0.3.5)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.5.4))(typescript@5.5.4)': + dependencies: + '@prisma/client-runtime-utils': 7.4.2 + optionalDependencies: + prisma: 7.4.2(@types/react@19.0.12)(better-sqlite3@11.10.0)(magicast@0.3.5)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.5.4) + typescript: 5.5.4 + '@prisma/config@6.14.0(magicast@0.3.5)': dependencies: c12: 3.1.0(magicast@0.3.5) @@ -25835,6 +26063,15 @@ snapshots: transitivePeerDependencies: - magicast + '@prisma/config@7.4.2(magicast@0.3.5)': + dependencies: + c12: 3.1.0(magicast@0.3.5) + deepmerge-ts: 7.1.5 + effect: 3.18.4 + empathic: 2.0.0 + transitivePeerDependencies: + - magicast + '@prisma/debug@4.16.2': dependencies: '@types/debug': 4.1.8 @@ -25851,6 +26088,32 @@ snapshots: '@prisma/debug@6.20.0-integration-next.8': {} + '@prisma/debug@7.2.0': {} + + '@prisma/debug@7.4.2': {} + + '@prisma/dev@0.20.0(typescript@5.5.4)': + dependencies: + '@electric-sql/pglite': 0.3.15 + '@electric-sql/pglite-socket': 0.0.20(@electric-sql/pglite@0.3.15) + '@electric-sql/pglite-tools': 0.2.20(@electric-sql/pglite@0.3.15) + '@hono/node-server': 1.19.9(hono@4.11.4) + '@mrleebo/prisma-ast': 0.13.1 + '@prisma/get-platform': 7.2.0 + '@prisma/query-plan-executor': 7.2.0 + foreground-child: 3.3.1 + get-port-please: 3.2.0 + hono: 4.11.4 + http-status-codes: 2.3.0 + pathe: 2.0.3 + proper-lockfile: 4.1.2 + remeda: 2.33.4 + std-env: 3.10.0 + valibot: 1.2.0(typescript@5.5.4) + zeptomatch: 2.1.0 + transitivePeerDependencies: + - typescript + '@prisma/driver-adapter-utils@6.16.0': dependencies: '@prisma/debug': 6.16.0 @@ -25859,6 +26122,10 @@ snapshots: dependencies: '@prisma/debug': 6.20.0-integration-next.8 + '@prisma/driver-adapter-utils@7.4.2': + dependencies: + '@prisma/debug': 7.4.2 + '@prisma/engines-version@4.9.0-42.ceb5c99003b99c9ee2c1d2e618e359c14aef2ea5': {} '@prisma/engines-version@6.14.0-25.717184b7b35ea05dfa71a3236b7af656013e1e49': {} @@ -25869,6 +26136,8 @@ snapshots: '@prisma/engines-version@6.20.0-11.next-80ee0a44bf5668992b0c909c946a755b86b56c95': {} + '@prisma/engines-version@7.5.0-10.94a226be1cf2967af2541cca5529f0f7ba866919': {} + '@prisma/engines@6.14.0': dependencies: '@prisma/debug': 6.14.0 @@ -25897,6 +26166,13 @@ snapshots: '@prisma/fetch-engine': 6.20.0-integration-next.8 '@prisma/get-platform': 6.20.0-integration-next.8 + '@prisma/engines@7.4.2': + dependencies: + '@prisma/debug': 7.4.2 + '@prisma/engines-version': 7.5.0-10.94a226be1cf2967af2541cca5529f0f7ba866919 + '@prisma/fetch-engine': 7.4.2 + '@prisma/get-platform': 7.4.2 + '@prisma/fetch-engine@6.14.0': dependencies: '@prisma/debug': 6.14.0 @@ -25921,6 +26197,12 @@ snapshots: '@prisma/engines-version': 6.20.0-11.next-80ee0a44bf5668992b0c909c946a755b86b56c95 '@prisma/get-platform': 6.20.0-integration-next.8 + '@prisma/fetch-engine@7.4.2': + dependencies: + '@prisma/debug': 7.4.2 + '@prisma/engines-version': 7.5.0-10.94a226be1cf2967af2541cca5529f0f7ba866919 + '@prisma/get-platform': 7.4.2 + '@prisma/generator-helper@4.16.2': dependencies: '@prisma/debug': 4.16.2 @@ -25946,6 +26228,14 @@ snapshots: dependencies: '@prisma/debug': 6.20.0-integration-next.8 + '@prisma/get-platform@7.2.0': + dependencies: + '@prisma/debug': 7.2.0 + + '@prisma/get-platform@7.4.2': + dependencies: + '@prisma/debug': 7.4.2 + '@prisma/instrumentation@6.11.1(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 @@ -25960,12 +26250,20 @@ snapshots: transitivePeerDependencies: - supports-color + '@prisma/query-plan-executor@7.2.0': {} + '@prisma/studio-core-licensed@0.6.0(@types/react@19.2.14)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: '@types/react': 19.2.14 react: 19.1.0 react-dom: 19.1.0(react@19.1.0) + '@prisma/studio-core@0.13.1(@types/react@19.0.12)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + dependencies: + '@types/react': 19.0.12 + react: 19.1.0 + react-dom: 19.1.0(react@19.1.0) + '@protobuf-ts/runtime@2.11.1': {} '@protobufjs/aspromise@1.1.2': {} @@ -31502,7 +31800,7 @@ snapshots: '@types/pg@8.11.6': dependencies: '@types/node': 20.14.14 - pg-protocol: 1.6.1 + pg-protocol: 1.10.3 pg-types: 4.0.2 '@types/pg@8.6.1': @@ -32642,6 +32940,8 @@ snapshots: aws-sign2@0.7.0: {} + aws-ssl-profiles@1.1.2: {} + aws4@1.12.0: {} aws4fetch@1.0.18: {} @@ -33063,6 +33363,15 @@ snapshots: chevrotain: 11.0.3 lodash-es: 4.17.21 + chevrotain@10.5.0: + dependencies: + '@chevrotain/cst-dts-gen': 10.5.0 + '@chevrotain/gast': 10.5.0 + '@chevrotain/types': 10.5.0 + '@chevrotain/utils': 10.5.0 + lodash: 4.17.23 + regexp-to-ast: 0.5.0 + chevrotain@11.0.3: dependencies: '@chevrotain/cst-dts-gen': 11.0.3 @@ -35380,6 +35689,11 @@ snapshots: cross-spawn: 7.0.6 signal-exit: 4.1.0 + foreground-child@3.3.1: + dependencies: + cross-spawn: 7.0.6 + signal-exit: 4.1.0 + forever-agent@0.6.1: {} form-data-encoder@1.7.2: {} @@ -35504,6 +35818,10 @@ snapshots: functions-have-names@1.2.3: {} + generate-function@2.3.1: + dependencies: + is-property: 1.0.2 + generic-names@4.0.0: dependencies: loader-utils: 3.2.1 @@ -35535,6 +35853,8 @@ snapshots: get-nonce@1.0.1: {} + get-port-please@3.2.0: {} + get-port@5.1.1: {} get-port@7.1.0: {} @@ -35735,6 +36055,8 @@ snapshots: chalk: 4.1.2 tinygradient: 1.1.5 + grammex@3.1.12: {} + grapheme-splitter@1.0.4: {} graphile-config@0.0.1-beta.8: @@ -35767,6 +36089,8 @@ snapshots: - supports-color - typescript + graphmatch@1.1.1: {} + graphql@16.6.0: {} gunzip-maybe@1.4.2: @@ -35979,6 +36303,8 @@ snapshots: dependencies: react-is: 16.13.1 + hono@4.11.4: {} + hono@4.11.8: {} hono@4.5.11: {} @@ -36043,6 +36369,8 @@ snapshots: jsprim: 1.4.2 sshpk: 1.18.0 + http-status-codes@2.3.0: {} + https-proxy-agent@5.0.1: dependencies: agent-base: 6.0.2 @@ -36333,6 +36661,8 @@ snapshots: is-promise@4.0.0: {} + is-property@1.0.2: {} + is-reference@3.0.3: dependencies: '@types/estree': 1.0.8 @@ -36927,6 +37257,8 @@ snapshots: lru-cache@7.18.3: {} + lru.min@1.1.4: {} + lucide-react@0.229.0(react@18.2.0): dependencies: react: 18.2.0 @@ -37965,12 +38297,28 @@ snapshots: mustache@4.2.0: {} + mysql2@3.15.3: + dependencies: + aws-ssl-profiles: 1.1.2 + denque: 2.1.0 + generate-function: 2.3.1 + iconv-lite: 0.7.2 + long: 5.2.3 + lru.min: 1.1.4 + named-placeholders: 1.1.6 + seq-queue: 0.0.5 + sqlstring: 2.3.3 + mz@2.7.0: dependencies: any-promise: 1.3.0 object-assign: 4.1.1 thenify-all: 1.6.0 + named-placeholders@1.1.6: + dependencies: + lru.min: 1.1.4 + nan@2.23.1: optional: true @@ -38814,8 +39162,6 @@ snapshots: pg-cloudflare@1.2.7: optional: true - pg-connection-string@2.6.4: {} - pg-connection-string@2.8.5: {} pg-connection-string@2.9.1: {} @@ -38824,13 +39170,13 @@ snapshots: pg-numeric@1.0.2: {} - pg-pool@3.10.1(pg@8.16.3): + pg-pool@3.10.1(pg@8.11.5): dependencies: - pg: 8.16.3 + pg: 8.11.5 - pg-pool@3.6.2(pg@8.11.5): + pg-pool@3.10.1(pg@8.16.3): dependencies: - pg: 8.11.5 + pg: 8.16.3 pg-pool@3.9.6(pg@8.15.6): dependencies: @@ -38862,9 +39208,9 @@ snapshots: pg@8.11.5: dependencies: - pg-connection-string: 2.6.4 - pg-pool: 3.6.2(pg@8.11.5) - pg-protocol: 1.6.1 + pg-connection-string: 2.9.1 + pg-pool: 3.10.1(pg@8.11.5) + pg-protocol: 1.10.3 pg-types: 2.2.0 pgpass: 1.0.5 optionalDependencies: @@ -39338,6 +39684,23 @@ snapshots: - react - react-dom + prisma@7.4.2(@types/react@19.0.12)(better-sqlite3@11.10.0)(magicast@0.3.5)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.5.4): + dependencies: + '@prisma/config': 7.4.2(magicast@0.3.5) + '@prisma/dev': 0.20.0(typescript@5.5.4) + '@prisma/engines': 7.4.2 + '@prisma/studio-core': 0.13.1(@types/react@19.0.12)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + mysql2: 3.15.3 + postgres: 3.4.7 + optionalDependencies: + better-sqlite3: 11.10.0 + typescript: 5.5.4 + transitivePeerDependencies: + - '@types/react' + - magicast + - react + - react-dom + prismjs@1.29.0: {} prismjs@1.30.0: {} @@ -39668,7 +40031,7 @@ snapshots: react: 18.2.0 react-dom: 18.2.0(react@18.2.0) - react-email@2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(eslint@8.31.0): + react-email@2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(bufferutil@4.0.9)(eslint@8.31.0): dependencies: '@babel/parser': 7.24.1 '@radix-ui/colors': 1.0.1 @@ -39705,8 +40068,8 @@ snapshots: react: 18.3.1 react-dom: 18.2.0(react@18.3.1) shelljs: 0.8.5 - socket.io: 4.7.3 - socket.io-client: 4.7.3 + socket.io: 4.7.3(bufferutil@4.0.9) + socket.io-client: 4.7.3(bufferutil@4.0.9) sonner: 1.3.1(react-dom@18.2.0(react@18.3.1))(react@18.3.1) source-map-js: 1.0.2 stacktrace-parser: 0.1.10 @@ -40154,6 +40517,8 @@ snapshots: dependencies: regex-utilities: 2.3.0 + regexp-to-ast@0.5.0: {} + regexp.prototype.flags@1.4.3: dependencies: call-bind: 1.0.8 @@ -40293,6 +40658,8 @@ snapshots: mdast-util-to-markdown: 2.1.2 unified: 11.0.5 + remeda@2.33.4: {} + remend@1.2.1: {} remix-auth-email-link@2.0.2(@remix-run/server-runtime@2.1.0(typescript@5.5.4))(remix-auth@3.6.0(@remix-run/react@2.1.0(react-dom@18.2.0(react@18.2.0))(react@18.2.0)(typescript@5.5.4))(@remix-run/server-runtime@2.1.0(typescript@5.5.4))): @@ -40700,6 +41067,8 @@ snapshots: transitivePeerDependencies: - supports-color + seq-queue@0.0.5: {} + serialize-javascript@6.0.1: dependencies: randombytes: 2.1.0 @@ -40952,7 +41321,7 @@ snapshots: - supports-color - utf-8-validate - socket.io-client@4.7.3: + socket.io-client@4.7.3(bufferutil@4.0.9): dependencies: '@socket.io/component-emitter': 3.1.0 debug: 4.3.7(supports-color@10.0.0) @@ -40981,7 +41350,7 @@ snapshots: transitivePeerDependencies: - supports-color - socket.io@4.7.3: + socket.io@4.7.3(bufferutil@4.0.9): dependencies: accepts: 1.3.8 base64id: 2.0.0 @@ -41095,6 +41464,8 @@ snapshots: argparse: 2.0.1 nearley: 2.20.1 + sqlstring@2.3.3: {} + sqs-consumer@7.5.0(@aws-sdk/client-sqs@3.454.0): dependencies: '@aws-sdk/client-sqs': 3.454.0 @@ -41160,6 +41531,8 @@ snapshots: statuses@2.0.2: {} + std-env@3.10.0: {} + std-env@3.7.0: {} std-env@3.8.1: {} @@ -42480,6 +42853,10 @@ snapshots: optionalDependencies: typescript: 5.5.4 + valibot@1.2.0(typescript@5.5.4): + optionalDependencies: + typescript: 5.5.4 + validate-npm-package-license@3.0.4: dependencies: spdx-correct: 3.1.1 @@ -42998,6 +43375,11 @@ snapshots: toposort: 2.0.2 type-fest: 2.19.0 + zeptomatch@2.1.0: + dependencies: + grammex: 3.1.12 + graphmatch: 1.1.1 + zip-stream@6.0.1: dependencies: archiver-utils: 5.0.2 diff --git a/references/ai-chat/.gitignore b/references/ai-chat/.gitignore new file mode 100644 index 00000000000..30838110ecc --- /dev/null +++ b/references/ai-chat/.gitignore @@ -0,0 +1 @@ +lib/generated/ diff --git a/references/ai-chat/package.json b/references/ai-chat/package.json index 89ccc36889e..a1d7f184850 100644 --- a/references/ai-chat/package.json +++ b/references/ai-chat/package.json @@ -6,15 +6,22 @@ "dev": "next dev --turbopack", "build": "next build", "start": "next start", - "dev:trigger": "trigger dev" + "dev:trigger": "trigger dev", + "db:migrate": "prisma migrate dev", + "db:push": "prisma db push", + "db:generate": "prisma generate", + "postinstall": "prisma generate" }, "dependencies": { "@ai-sdk/anthropic": "^3.0.0", "@ai-sdk/openai": "^3.0.0", "@ai-sdk/react": "^3.0.0", + "@prisma/adapter-pg": "^7.4.2", + "@prisma/client": "^7.4.2", "@trigger.dev/sdk": "workspace:*", "ai": "^6.0.0", "next": "15.3.3", + "pg": "^8.16.3", "react": "^19.0.0", "react-dom": "^19.0.0", "streamdown": "^2.3.0", @@ -27,6 +34,7 @@ "@types/react": "^19", "@types/react-dom": "^19", "tailwindcss": "^4", + "prisma": "^7.4.2", "trigger.dev": "workspace:*", "typescript": "^5" } diff --git a/references/ai-chat/prisma.config.ts b/references/ai-chat/prisma.config.ts new file mode 100644 index 00000000000..d73df7b3168 --- /dev/null +++ b/references/ai-chat/prisma.config.ts @@ -0,0 +1,12 @@ +import "dotenv/config"; +import { defineConfig, env } from "prisma/config"; + +export default defineConfig({ + schema: "prisma/schema.prisma", + migrations: { + path: "prisma/migrations", + }, + datasource: { + url: env("DATABASE_URL"), + }, +}); diff --git a/references/ai-chat/prisma/migrations/20260305112427_init/migration.sql b/references/ai-chat/prisma/migrations/20260305112427_init/migration.sql new file mode 100644 index 00000000000..951cd33d94e --- /dev/null +++ b/references/ai-chat/prisma/migrations/20260305112427_init/migration.sql @@ -0,0 +1,20 @@ +-- CreateTable +CREATE TABLE "Chat" ( + "id" TEXT NOT NULL, + "title" TEXT NOT NULL, + "messages" JSONB NOT NULL DEFAULT '[]', + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL, + + CONSTRAINT "Chat_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "ChatSession" ( + "id" TEXT NOT NULL, + "runId" TEXT NOT NULL, + "publicAccessToken" TEXT NOT NULL, + "lastEventId" TEXT, + + CONSTRAINT "ChatSession_pkey" PRIMARY KEY ("id") +); diff --git a/references/ai-chat/prisma/migrations/migration_lock.toml b/references/ai-chat/prisma/migrations/migration_lock.toml new file mode 100644 index 00000000000..044d57cdb0d --- /dev/null +++ b/references/ai-chat/prisma/migrations/migration_lock.toml @@ -0,0 +1,3 @@ +# Please do not edit this file manually +# It should be added in your version-control system (e.g., Git) +provider = "postgresql" diff --git a/references/ai-chat/prisma/schema.prisma b/references/ai-chat/prisma/schema.prisma new file mode 100644 index 00000000000..4899b46f73e --- /dev/null +++ b/references/ai-chat/prisma/schema.prisma @@ -0,0 +1,23 @@ +generator client { + provider = "prisma-client" + output = "../lib/generated/prisma" +} + +datasource db { + provider = "postgresql" +} + +model Chat { + id String @id + title String + messages Json @default("[]") + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt +} + +model ChatSession { + id String @id // chatId + runId String + publicAccessToken String + lastEventId String? +} diff --git a/references/ai-chat/src/app/actions.ts b/references/ai-chat/src/app/actions.ts index 08657dd1a30..ccf6319f63c 100644 --- a/references/ai-chat/src/app/actions.ts +++ b/references/ai-chat/src/app/actions.ts @@ -2,5 +2,79 @@ import { chat } from "@trigger.dev/sdk/ai"; import type { aiChat } from "@/trigger/chat"; +import { prisma } from "@/lib/prisma"; export const getChatToken = async () => chat.createAccessToken("ai-chat"); + +export async function getChatList() { + const chats = await prisma.chat.findMany({ + select: { id: true, title: true, createdAt: true, updatedAt: true }, + orderBy: { updatedAt: "desc" }, + }); + return chats.map((c) => ({ + id: c.id, + title: c.title, + createdAt: c.createdAt.getTime(), + updatedAt: c.updatedAt.getTime(), + })); +} + +export async function getChatMessages(chatId: string) { + const found = await prisma.chat.findUnique({ where: { id: chatId } }); + if (!found) return []; + return found.messages as any[]; +} + +export async function saveChatMessages(chatId: string, messages: unknown[]) { + await prisma.chat.update({ + where: { id: chatId }, + data: { messages: messages as any }, + }).catch(() => {}); +} + +export async function deleteChat(chatId: string) { + await prisma.chat.delete({ where: { id: chatId } }).catch(() => {}); + await prisma.chatSession.delete({ where: { id: chatId } }).catch(() => {}); +} + +export async function updateChatTitle(chatId: string, title: string) { + await prisma.chat.update({ where: { id: chatId }, data: { title } }).catch(() => {}); +} + +export async function saveSessionAction( + chatId: string, + session: { runId: string; publicAccessToken: string; lastEventId?: string } +) { + await prisma.chatSession.upsert({ + where: { id: chatId }, + create: { + id: chatId, + runId: session.runId, + publicAccessToken: session.publicAccessToken, + lastEventId: session.lastEventId, + }, + update: { + runId: session.runId, + publicAccessToken: session.publicAccessToken, + lastEventId: session.lastEventId, + }, + }); +} + +export async function deleteSessionAction(chatId: string) { + await prisma.chatSession.delete({ where: { id: chatId } }).catch(() => {}); +} + +export async function getAllSessions() { + const sessions = await prisma.chatSession.findMany(); + const result: Record = + {}; + for (const s of sessions) { + result[s.id] = { + runId: s.runId, + publicAccessToken: s.publicAccessToken, + lastEventId: s.lastEventId ?? undefined, + }; + } + return result; +} diff --git a/references/ai-chat/src/app/page.tsx b/references/ai-chat/src/app/page.tsx index 1a003392f55..cd2eac83cc5 100644 --- a/references/ai-chat/src/app/page.tsx +++ b/references/ai-chat/src/app/page.tsx @@ -1,9 +1,58 @@ -import { Chat } from "@/components/chat"; +"use client"; + +import type { UIMessage } from "ai"; +import { useEffect, useState } from "react"; +import { ChatApp } from "@/components/chat-app"; +import { + getChatList, + getChatMessages, + getAllSessions, +} from "@/app/actions"; + +type ChatMeta = { + id: string; + title: string; + createdAt: number; + updatedAt: number; +}; export default function Home() { + const [chatList, setChatList] = useState([]); + const [activeChatId, setActiveChatId] = useState(null); + const [initialMessages, setInitialMessages] = useState([]); + const [initialSessions, setInitialSessions] = useState< + Record + >({}); + const [loaded, setLoaded] = useState(false); + + useEffect(() => { + async function load() { + const [list, sessions] = await Promise.all([getChatList(), getAllSessions()]); + setChatList(list); + setInitialSessions(sessions); + + let firstChatId: string | null = null; + let firstMessages: UIMessage[] = []; + if (list.length > 0) { + firstChatId = list[0]!.id; + firstMessages = await getChatMessages(firstChatId); + } + + setActiveChatId(firstChatId); + setInitialMessages(firstMessages); + setLoaded(true); + } + load(); + }, []); + + if (!loaded) return null; + return ( -
- -
+ ); } diff --git a/references/ai-chat/src/components/chat-app.tsx b/references/ai-chat/src/components/chat-app.tsx new file mode 100644 index 00000000000..9a8f4642a88 --- /dev/null +++ b/references/ai-chat/src/components/chat-app.tsx @@ -0,0 +1,155 @@ +"use client"; + +import type { UIMessage } from "ai"; +import { generateId } from "ai"; +import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; +import { useCallback, useEffect, useState } from "react"; +import { Chat } from "@/components/chat"; +import { ChatSidebar } from "@/components/chat-sidebar"; +import { + getChatToken, + getChatList, + getChatMessages, + deleteChat as deleteChatAction, + updateChatTitle, + saveSessionAction, + deleteSessionAction, + saveChatMessages, +} from "@/app/actions"; + +type ChatMeta = { + id: string; + title: string; + createdAt: number; + updatedAt: number; +}; + +type ChatAppProps = { + initialChatList: ChatMeta[]; + initialActiveChatId: string | null; + initialMessages: UIMessage[]; + initialSessions: Record< + string, + { runId: string; publicAccessToken: string; lastEventId?: string } + >; +}; + +export function ChatApp({ + initialChatList, + initialActiveChatId, + initialMessages, + initialSessions, +}: ChatAppProps) { + const [chatList, setChatList] = useState(initialChatList); + const [activeChatId, setActiveChatId] = useState(initialActiveChatId); + const [messages, setMessages] = useState(initialMessages); + + const handleSessionChange = useCallback( + ( + chatId: string, + session: { runId: string; publicAccessToken: string; lastEventId?: string } | null + ) => { + if (session) { + saveSessionAction(chatId, session); + } else { + deleteSessionAction(chatId); + } + }, + [] + ); + + const transport = useTriggerChatTransport({ + task: "ai-chat", + accessToken: getChatToken, + baseURL: process.env.NEXT_PUBLIC_TRIGGER_API_URL, + sessions: initialSessions, + onSessionChange: handleSessionChange, + }); + + // Load messages when active chat changes + useEffect(() => { + if (!activeChatId) { + setMessages([]); + return; + } + // Don't reload if we already have the initial messages for the initial chat + if (activeChatId === initialActiveChatId && messages === initialMessages) { + return; + } + getChatMessages(activeChatId).then(setMessages); + }, [activeChatId]); + + function handleNewChat() { + const id = generateId(); + setActiveChatId(id); + setMessages([]); + } + + function handleSelectChat(id: string) { + setActiveChatId(id); + } + + async function handleDeleteChat(id: string) { + await deleteChatAction(id); + const list = await getChatList(); + setChatList(list); + if (activeChatId === id) { + if (list.length > 0) { + setActiveChatId(list[0]!.id); + } else { + setActiveChatId(null); + } + } + } + + const handleFirstMessage = useCallback(async (chatId: string, text: string) => { + const title = text.slice(0, 40).trim() || "New chat"; + await updateChatTitle(chatId, title); + const list = await getChatList(); + setChatList(list); + }, []); + + const handleMessagesChange = useCallback(async (_chatId: string, _messages: UIMessage[]) => { + // Messages are persisted server-side via onTurnComplete. + // Refresh the chat list to update timestamps. + const list = await getChatList(); + setChatList(list); + }, []); + + return ( +
+ +
+ {activeChatId ? ( + + ) : ( +
+
+

No conversation selected

+ +
+
+ )} +
+
+ ); +} diff --git a/references/ai-chat/src/components/chat-sidebar.tsx b/references/ai-chat/src/components/chat-sidebar.tsx new file mode 100644 index 00000000000..bb688e99b14 --- /dev/null +++ b/references/ai-chat/src/components/chat-sidebar.tsx @@ -0,0 +1,82 @@ +"use client"; + +type ChatMeta = { + id: string; + title: string; + createdAt: number; + updatedAt: number; +}; + +function timeAgo(ts: number): string { + const seconds = Math.floor((Date.now() - ts) / 1000); + if (seconds < 60) return "just now"; + const minutes = Math.floor(seconds / 60); + if (minutes < 60) return `${minutes}m ago`; + const hours = Math.floor(minutes / 60); + if (hours < 24) return `${hours}h ago`; + const days = Math.floor(hours / 24); + return `${days}d ago`; +} + +type ChatSidebarProps = { + chats: ChatMeta[]; + activeChatId: string | null; + onSelectChat: (id: string) => void; + onNewChat: () => void; + onDeleteChat: (id: string) => void; +}; + +export function ChatSidebar({ + chats, + activeChatId, + onSelectChat, + onNewChat, + onDeleteChat, +}: ChatSidebarProps) { + const sorted = [...chats].sort((a, b) => b.updatedAt - a.updatedAt); + + return ( +
+
+ +
+ +
+ {sorted.length === 0 && ( +

No conversations yet

+ )} + + {sorted.map((chat) => ( + + ))} +
+
+ ); +} diff --git a/references/ai-chat/src/components/chat.tsx b/references/ai-chat/src/components/chat.tsx index 7e9d7a26636..e85361b4d69 100644 --- a/references/ai-chat/src/components/chat.tsx +++ b/references/ai-chat/src/components/chat.tsx @@ -1,12 +1,11 @@ "use client"; +import type { UIMessage } from "ai"; import { useChat } from "@ai-sdk/react"; -import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; +import type { TriggerChatTransport } from "@trigger.dev/sdk/chat"; import { useEffect, useRef, useState } from "react"; import { Streamdown } from "streamdown"; -import { getChatToken } from "@/app/actions"; -import { MODEL_OPTIONS, DEFAULT_MODEL } from "@/trigger/chat"; -import type { aiChat } from "@/trigger/chat"; +import { MODEL_OPTIONS, DEFAULT_MODEL } from "@/lib/models"; function ToolInvocation({ part }: { part: any }) { const [expanded, setExpanded] = useState(false); @@ -71,41 +70,73 @@ function ToolInvocation({ part }: { part: any }) { ); } -export function Chat() { +type ChatProps = { + chatId: string; + initialMessages: UIMessage[]; + transport: TriggerChatTransport; + onFirstMessage?: (chatId: string, text: string) => void; + onMessagesChange?: (chatId: string, messages: UIMessage[]) => void; +}; + +export function Chat({ + chatId, + initialMessages, + transport, + onFirstMessage, + onMessagesChange, +}: ChatProps) { const [input, setInput] = useState(""); const [model, setModel] = useState(DEFAULT_MODEL); - // Track which model was used for each assistant message (keyed by the preceding user message ID) const modelByUserMsgId = useRef>(new Map()); - - const transport = useTriggerChatTransport({ - task: "ai-chat", - accessToken: getChatToken, - baseURL: process.env.NEXT_PUBLIC_TRIGGER_API_URL, - }); + const hasCalledFirstMessage = useRef(false); const { messages, sendMessage, stop, status, error } = useChat({ + id: chatId, + messages: initialMessages, transport, }); + // Notify parent of first user message (for chat metadata creation) + useEffect(() => { + if (hasCalledFirstMessage.current) return; + const firstUser = messages.find((m) => m.role === "user"); + if (firstUser) { + hasCalledFirstMessage.current = true; + const text = firstUser.parts + .filter((p: any) => p.type === "text") + .map((p: any) => p.text) + .join(" "); + onFirstMessage?.(chatId, text); + } + }, [messages, chatId, onFirstMessage]); + // Pending message to send after the current turn completes const [pendingMessage, setPendingMessage] = useState<{ text: string; model: string } | null>(null); - // Auto-send the pending message when the turn completes + // Handle turn completion: persist messages and auto-send pending message const prevStatus = useRef(status); useEffect(() => { - if (prevStatus.current === "streaming" && status === "ready" && pendingMessage) { + const turnCompleted = prevStatus.current === "streaming" && status === "ready"; + prevStatus.current = status; + + if (!turnCompleted) return; + + // Persist messages when a turn completes — this ensures the final assistant + // message content is saved (not the empty placeholder from mid-stream). + if (messages.length > 0) { + onMessagesChange?.(chatId, messages); + } + + // Auto-send the pending message + if (pendingMessage) { const { text, model: pendingMsgModel } = pendingMessage; setPendingMessage(null); pendingModel.current = pendingMsgModel; sendMessage({ text }, { metadata: { model: pendingMsgModel } }); } - prevStatus.current = status; - }, [status, sendMessage, pendingMessage]); + }, [status, messages, chatId, onMessagesChange, sendMessage, pendingMessage]); - // Build a map of assistant message index -> model used - // Each assistant message follows a user message, so we track by position function getModelForAssistantAt(index: number): string | undefined { - // Walk backwards to find the preceding user message for (let i = index - 1; i >= 0; i--) { if (messages[i]?.role === "user") { return modelByUserMsgId.current.get(messages[i].id); @@ -114,16 +145,13 @@ export function Chat() { return undefined; } - // When sending, record which model is selected for this user message const originalSendMessage = sendMessage; function trackedSendMessage(msg: Parameters[0], opts?: Parameters[1]) { - // We'll track it after the message appears — use a ref to store the pending model pendingModel.current = model; originalSendMessage(msg, opts); } const pendingModel = useRef(model); - // Track model for new user messages as they appear const trackedUserIds = useRef>(new Set()); for (const msg of messages) { if (msg.role === "user" && !trackedUserIds.current.has(msg.id)) { @@ -146,7 +174,6 @@ export function Chat() { className={`flex ${message.role === "user" ? "justify-end" : "justify-start"}`} >
- {/* Model badge for assistant messages */} {message.role === "assistant" && (
@@ -199,7 +226,6 @@ export function Chat() {
)} - {/* Queued message indicator */} {pendingMessage && (
@@ -214,20 +240,17 @@ export function Chat() { )}
- {/* Error */} {error && (
{error.message}
)} - {/* Input */}
{ e.preventDefault(); if (!input.trim()) return; if (status === "streaming") { - // Buffer the message — it will be sent when the current turn completes setPendingMessage({ text: input, model }); } else { trackedSendMessage({ text: input }, { metadata: { model } }); diff --git a/references/ai-chat/src/lib/models.ts b/references/ai-chat/src/lib/models.ts new file mode 100644 index 00000000000..5261a800ab8 --- /dev/null +++ b/references/ai-chat/src/lib/models.ts @@ -0,0 +1,8 @@ +export const MODEL_OPTIONS = [ + "gpt-4o-mini", + "gpt-4o", + "claude-sonnet-4-6", + "claude-opus-4-6", +]; + +export const DEFAULT_MODEL = "gpt-4o-mini"; diff --git a/references/ai-chat/src/lib/prisma.ts b/references/ai-chat/src/lib/prisma.ts new file mode 100644 index 00000000000..5e78334aa82 --- /dev/null +++ b/references/ai-chat/src/lib/prisma.ts @@ -0,0 +1,15 @@ +import { PrismaPg } from "@prisma/adapter-pg"; +import { PrismaClient } from "../../lib/generated/prisma/client"; + +const globalForPrisma = globalThis as unknown as { prisma: PrismaClient | undefined }; + +function createClient() { + const adapter = new PrismaPg({ connectionString: process.env.DATABASE_URL! }); + return new PrismaClient({ adapter }); +} + +export const prisma = globalForPrisma.prisma ?? createClient(); + +if (process.env.NODE_ENV !== "production") { + globalForPrisma.prisma = prisma; +} diff --git a/references/ai-chat/src/trigger/chat.ts b/references/ai-chat/src/trigger/chat.ts index bd7069f1aeb..3eb6d4402ce 100644 --- a/references/ai-chat/src/trigger/chat.ts +++ b/references/ai-chat/src/trigger/chat.ts @@ -5,6 +5,13 @@ import { openai } from "@ai-sdk/openai"; import { anthropic } from "@ai-sdk/anthropic"; import { z } from "zod"; import os from "node:os"; +import { PrismaPg } from "@prisma/adapter-pg"; +import { PrismaClient } from "../../lib/generated/prisma/client"; + +const adapter = new PrismaPg({ connectionString: process.env.DATABASE_URL! }); +const prisma = new PrismaClient({ adapter }); + +import { DEFAULT_MODEL } from "@/lib/models"; const MODELS: Record LanguageModel> = { "gpt-4o-mini": () => openai("gpt-4o-mini"), @@ -13,9 +20,6 @@ const MODELS: Record LanguageModel> = { "claude-opus-4-6": () => anthropic("claude-opus-4-6"), }; -export const MODEL_OPTIONS = Object.keys(MODELS); -export const DEFAULT_MODEL = "gpt-4o-mini"; - function getModel(modelId?: string): LanguageModel { const factory = MODELS[modelId ?? DEFAULT_MODEL]; if (!factory) return MODELS[DEFAULT_MODEL]!(); @@ -83,6 +87,19 @@ declare const Deno: unknown; export const aiChat = chat.task({ id: "ai-chat", warmTimeoutInSeconds: 10, + onChatStart: async ({ chatId }) => { + await prisma.chat.upsert({ + where: { id: chatId }, + create: { id: chatId, title: "New chat" }, + update: {}, + }); + }, + onTurnComplete: async ({ chatId, uiMessages }) => { + await prisma.chat.update({ + where: { id: chatId }, + data: { messages: uiMessages as any }, + }); + }, run: async ({ messages, clientData, stopSignal }) => { const { model: modelId } = z .object({ model: z.string().optional() }) diff --git a/references/ai-chat/trigger.config.ts b/references/ai-chat/trigger.config.ts index 4412bfc9325..334df4bc146 100644 --- a/references/ai-chat/trigger.config.ts +++ b/references/ai-chat/trigger.config.ts @@ -1,7 +1,15 @@ import { defineConfig } from "@trigger.dev/sdk"; +import { prismaExtension } from "@trigger.dev/build/extensions/prisma"; export default defineConfig({ project: process.env.TRIGGER_PROJECT_REF!, dirs: ["./src/trigger"], maxDuration: 300, + build: { + extensions: [ + prismaExtension({ + mode: "modern", + }), + ], + }, }); From c83e010c2b983f4143e9631f3165dbff65a77b5e Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Thu, 5 Mar 2026 14:19:20 +0000 Subject: [PATCH 30/53] Add ai chat to the sidebar for now --- docs/docs.json | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/docs.json b/docs/docs.json index 911c912711b..1591359473e 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -74,13 +74,16 @@ "tags", "runs/metadata", "tasks/streams", - "guides/ai-chat", "run-usage", "context", "runs/priority", "hidden-tasks" ] }, + { + "group": "AI Chat", + "pages": ["guides/ai-chat"] + }, { "group": "Configuration", "pages": [ From e8a895c19e266155d7be0f4aa3885599f8b8c1ba Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Thu, 5 Mar 2026 14:20:09 +0000 Subject: [PATCH 31/53] remove postinstall hook --- references/ai-chat/package.json | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/references/ai-chat/package.json b/references/ai-chat/package.json index a1d7f184850..38df927947d 100644 --- a/references/ai-chat/package.json +++ b/references/ai-chat/package.json @@ -9,8 +9,7 @@ "dev:trigger": "trigger dev", "db:migrate": "prisma migrate dev", "db:push": "prisma db push", - "db:generate": "prisma generate", - "postinstall": "prisma generate" + "db:generate": "prisma generate" }, "dependencies": { "@ai-sdk/anthropic": "^3.0.0", @@ -38,4 +37,4 @@ "trigger.dev": "workspace:*", "typescript": "^5" } -} +} \ No newline at end of file From 8dacf08d49ea8e503e7d7cfb1bf8c40647ac38bf Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Thu, 5 Mar 2026 18:29:30 +0000 Subject: [PATCH 32/53] feat: add onTurnStart hook, lastEventId support, and stream resume deduplication --- docs/guides/ai-chat.mdx | 173 +++++++++++++----- .../core/src/v3/realtimeStreams/manager.ts | 3 +- .../src/v3/realtimeStreams/noopManager.ts | 2 +- .../src/v3/realtimeStreams/streamInstance.ts | 7 +- .../src/v3/realtimeStreams/streamsWriterV1.ts | 7 +- .../src/v3/realtimeStreams/streamsWriterV2.ts | 10 +- packages/core/src/v3/realtimeStreams/types.ts | 10 +- packages/trigger-sdk/src/v3/ai.ts | 135 +++++++++++++- packages/trigger-sdk/src/v3/chat.ts | 23 ++- packages/trigger-sdk/src/v3/streams.ts | 5 +- references/ai-chat/src/app/actions.ts | 29 +-- .../ai-chat/src/components/chat-app.tsx | 9 +- references/ai-chat/src/components/chat.tsx | 3 + references/ai-chat/src/trigger/chat.ts | 30 ++- 14 files changed, 342 insertions(+), 104 deletions(-) diff --git a/docs/guides/ai-chat.mdx b/docs/guides/ai-chat.mdx index b8fa9402196..e4b2bc8219c 100644 --- a/docs/guides/ai-chat.mdx +++ b/docs/guides/ai-chat.mdx @@ -147,7 +147,8 @@ The backend automatically accumulates the full conversation history across turns The accumulated messages are available in: - `run()` as `messages` (`ModelMessage[]`) — for passing to `streamText` -- `onTurnComplete()` as `uiMessages` (`UIMessage[]`) — for persistence +- `onTurnStart()` as `uiMessages` (`UIMessage[]`) — for persisting before streaming +- `onTurnComplete()` as `uiMessages` (`UIMessage[]`) — for persisting after the response ## Backend patterns @@ -258,9 +259,46 @@ export const myChat = chat.task({ `clientData` contains custom data from the frontend — either the `metadata` option on the transport constructor (sent with every message) or the `metadata` option on `sendMessage()` (per-message). See [Client data and metadata](#client-data-and-metadata). +### onTurnStart + +Fires at the start of every turn, after message accumulation and `onChatStart` (turn 0), but **before** `run()` executes. Use it to persist messages before streaming begins — so a mid-stream page refresh still shows the user's message. + +| Field | Type | Description | +|-------|------|-------------| +| `chatId` | `string` | Chat session ID | +| `messages` | `ModelMessage[]` | Full accumulated conversation (model format) | +| `uiMessages` | `UIMessage[]` | Full accumulated conversation (UI format) | +| `turn` | `number` | Turn number (0-indexed) | +| `runId` | `string` | The Trigger.dev run ID | +| `chatAccessToken` | `string` | Scoped access token for this run | + +```ts +export const myChat = chat.task({ + id: "my-chat", + onTurnStart: async ({ chatId, uiMessages, runId, chatAccessToken }) => { + await db.chat.update({ + where: { id: chatId }, + data: { messages: uiMessages }, + }); + await db.chatSession.upsert({ + where: { id: chatId }, + create: { id: chatId, runId, publicAccessToken: chatAccessToken }, + update: { runId, publicAccessToken: chatAccessToken }, + }); + }, + run: async ({ messages, signal }) => { + return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); + }, +}); +``` + + + By persisting in `onTurnStart`, the user's message is saved to your database before the AI starts streaming. If the user refreshes mid-stream, the message is already there. + + ### onTurnComplete -Fires after each turn completes — after the response is captured, before waiting for the next message. This is the primary hook for persisting conversations. +Fires after each turn completes — after the response is captured, before waiting for the next message. This is the primary hook for persisting the assistant's response. | Field | Type | Description | |-------|------|-------------| @@ -271,15 +309,23 @@ Fires after each turn completes — after the response is captured, before waiti | `newUIMessages` | `UIMessage[]` | Only this turn's messages (UI format) | | `responseMessage` | `UIMessage \| undefined` | The assistant's response for this turn | | `turn` | `number` | Turn number (0-indexed) | +| `runId` | `string` | The Trigger.dev run ID | +| `chatAccessToken` | `string` | Scoped access token for this run | +| `lastEventId` | `string \| undefined` | Stream position for resumption. Persist this with the session. | ```ts export const myChat = chat.task({ id: "my-chat", - onTurnComplete: async ({ chatId, uiMessages }) => { + onTurnComplete: async ({ chatId, uiMessages, runId, chatAccessToken, lastEventId }) => { await db.chat.update({ where: { id: chatId }, data: { messages: uiMessages }, }); + await db.chatSession.upsert({ + where: { id: chatId }, + create: { id: chatId, runId, publicAccessToken: chatAccessToken, lastEventId }, + update: { runId, publicAccessToken: chatAccessToken, lastEventId }, + }); }, run: async ({ messages, signal }) => { return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); @@ -291,22 +337,26 @@ export const myChat = chat.task({ Use `uiMessages` to overwrite the full conversation each turn (simplest). Use `newUIMessages` if you prefer to store messages individually — for example, one database row per message. + + Persist `lastEventId` alongside the session. When the transport reconnects after a page refresh, it uses this to skip past already-seen events — preventing duplicate messages. + + ## Persistence ### What needs to be persisted To build a chat app that survives page refreshes, you need to persist two things: -1. **Messages** — The conversation history. Persisted **server-side** in the task via `onTurnComplete`. -2. **Sessions** — The transport's connection state (`runId`, `publicAccessToken`, `lastEventId`). Persisted **client-side** via `onSessionChange`. +1. **Messages** — The conversation history. Persisted **server-side** in the task via `onTurnStart` and `onTurnComplete`. +2. **Sessions** — The transport's connection state (`runId`, `publicAccessToken`, `lastEventId`). Persisted **server-side** via `onTurnStart` and `onTurnComplete`. Sessions let the transport reconnect to an existing run after a page refresh. Without them, every page load would start a new run — losing the conversation context that was accumulated in the previous run. -### Persisting messages (server-side) +### Persisting messages and sessions (server-side) -Messages are stored inside the task itself, so they're durable even if the frontend disconnects mid-conversation. +Both messages and sessions are persisted server-side in the lifecycle hooks. `onTurnStart` saves the user's message before streaming begins, while `onTurnComplete` saves the assistant's response and the `lastEventId` for stream resumption. ```ts trigger/chat.ts import { chat } from "@trigger.dev/sdk/ai"; @@ -316,16 +366,34 @@ import { db } from "@/lib/db"; export const myChat = chat.task({ id: "my-chat", - onChatStart: async ({ chatId, clientData }) => { + onChatStart: async ({ chatId }) => { await db.chat.create({ data: { id: chatId, title: "New chat", messages: [] }, }); }, - onTurnComplete: async ({ chatId, uiMessages }) => { + onTurnStart: async ({ chatId, uiMessages, runId, chatAccessToken }) => { + // Save user message + session before streaming starts await db.chat.update({ where: { id: chatId }, data: { messages: uiMessages }, }); + await db.chatSession.upsert({ + where: { id: chatId }, + create: { id: chatId, runId, publicAccessToken: chatAccessToken }, + update: { runId, publicAccessToken: chatAccessToken }, + }); + }, + onTurnComplete: async ({ chatId, uiMessages, runId, chatAccessToken, lastEventId }) => { + // Save assistant response + stream position after turn completes + await db.chat.update({ + where: { id: chatId }, + data: { messages: uiMessages }, + }); + await db.chatSession.upsert({ + where: { id: chatId }, + create: { id: chatId, runId, publicAccessToken: chatAccessToken, lastEventId }, + update: { runId, publicAccessToken: chatAccessToken, lastEventId }, + }); }, run: async ({ messages, signal }) => { return streamText({ @@ -337,13 +405,9 @@ export const myChat = chat.task({ }); ``` -### Persisting sessions (frontend) - -The `onSessionChange` callback on the transport fires whenever a session's state changes: +### Session cleanup (frontend) -- **Session created** — After triggering a new task run -- **Turn completed** — The `lastEventId` is updated (used for stream resumption) -- **Session removed** — The run ended or failed. `session` is `null`. +Since session creation and updates are handled server-side, the frontend only needs to handle session deletion when a run ends: ```tsx const transport = useTriggerChatTransport({ @@ -351,10 +415,8 @@ const transport = useTriggerChatTransport({ accessToken: getChatToken, sessions: loadedSessions, // Restored from DB on page load onSessionChange: (chatId, session) => { - if (session) { - saveSession(chatId, session); // Server action - } else { - deleteSession(chatId); // Server action + if (!session) { + deleteSession(chatId); // Server action — run ended } }, }); @@ -362,7 +424,7 @@ const transport = useTriggerChatTransport({ ### Restoring on page load -On page load, fetch both the messages and the session from your database, then pass them to `useChat` and the transport: +On page load, fetch both the messages and the session from your database, then pass them to `useChat` and the transport. Pass `resume: true` to `useChat` when there's an existing conversation — this tells the AI SDK to reconnect to the stream via the transport. ```tsx app/page.tsx "use client"; @@ -370,7 +432,7 @@ On page load, fetch both the messages and the session from your database, then p import { useEffect, useState } from "react"; import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; import { useChat } from "@ai-sdk/react"; -import { getChatToken, getChatMessages, getSession } from "@/app/actions"; +import { getChatToken, getChatMessages, getSession, deleteSession } from "@/app/actions"; export default function ChatPage({ chatId }: { chatId: string }) { const [initialMessages, setInitialMessages] = useState([]); @@ -407,8 +469,7 @@ function ChatClient({ chatId, initialMessages, initialSessions }) { accessToken: getChatToken, sessions: initialSessions, onSessionChange: (id, session) => { - if (session) saveSession(id, session); - else deleteSession(id); + if (!session) deleteSession(id); }, }); @@ -416,15 +477,20 @@ function ChatClient({ chatId, initialMessages, initialSessions }) { id: chatId, messages: initialMessages, transport, + resume: initialMessages.length > 0, // Resume if there's an existing conversation }); // ... render UI } ``` + + `resume: true` causes `useChat` to call `reconnectToStream` on the transport when the component mounts. The transport uses the session's `lastEventId` to skip past already-seen stream events, so the frontend only receives new data. Only enable `resume` when there are existing messages — for brand new chats, there's nothing to reconnect to. + + ### Full example -Putting it all together — a complete chat app with server-side message persistence and session reconnection: +Putting it all together — a complete chat app with server-side persistence, session reconnection, and stream resumption: ```ts trigger/chat.ts @@ -440,11 +506,29 @@ export const myChat = chat.task({ data: { id: chatId, title: "New chat", messages: [] }, }); }, - onTurnComplete: async ({ chatId, uiMessages }) => { + onTurnStart: async ({ chatId, uiMessages, runId, chatAccessToken }) => { + // Persist messages + session before streaming await db.chat.update({ where: { id: chatId }, data: { messages: uiMessages }, }); + await db.chatSession.upsert({ + where: { id: chatId }, + create: { id: chatId, runId, publicAccessToken: chatAccessToken }, + update: { runId, publicAccessToken: chatAccessToken }, + }); + }, + onTurnComplete: async ({ chatId, uiMessages, runId, chatAccessToken, lastEventId }) => { + // Persist assistant response + stream position + await db.chat.update({ + where: { id: chatId }, + data: { messages: uiMessages }, + }); + await db.chatSession.upsert({ + where: { id: chatId }, + create: { id: chatId, runId, publicAccessToken: chatAccessToken, lastEventId }, + update: { runId, publicAccessToken: chatAccessToken, lastEventId }, + }); }, run: async ({ messages, signal }) => { return streamText({ @@ -471,19 +555,21 @@ export async function getChatMessages(chatId: string) { return found?.messages ?? []; } -export async function getSession(chatId: string) { - return await db.chatSession.findUnique({ where: { id: chatId } }); -} - -export async function saveSession( - chatId: string, - session: { runId: string; publicAccessToken: string; lastEventId?: string } -) { - await db.chatSession.upsert({ - where: { id: chatId }, - create: { id: chatId, ...session }, - update: session, - }); +export async function getAllSessions() { + const sessions = await db.chatSession.findMany(); + const result: Record = {}; + for (const s of sessions) { + result[s.id] = { + runId: s.runId, + publicAccessToken: s.publicAccessToken, + lastEventId: s.lastEventId ?? undefined, + }; + } + return result; } export async function deleteSession(chatId: string) { @@ -497,11 +583,7 @@ export async function deleteSession(chatId: string) { import { useChat } from "@ai-sdk/react"; import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; import type { myChat } from "@/trigger/chat"; -import { - getChatToken, - saveSession, - deleteSession, -} from "@/app/actions"; +import { getChatToken, deleteSession } from "@/app/actions"; export function Chat({ chatId, initialMessages, initialSessions }) { const transport = useTriggerChatTransport({ @@ -509,8 +591,7 @@ export function Chat({ chatId, initialMessages, initialSessions }) { accessToken: getChatToken, sessions: initialSessions, onSessionChange: (id, session) => { - if (session) saveSession(id, session); - else deleteSession(id); + if (!session) deleteSession(id); }, }); @@ -518,6 +599,7 @@ export function Chat({ chatId, initialMessages, initialSessions }) { id: chatId, messages: initialMessages, transport, + resume: initialMessages.length > 0, }); return ( @@ -726,6 +808,7 @@ const transport = useTriggerChatTransport({ | `id` | `string` | required | Task identifier | | `run` | `(payload: ChatTaskRunPayload) => Promise` | required | Handler for each turn | | `onChatStart` | `(event: ChatStartEvent) => Promise \| void` | — | Fires on turn 0 before `run()` | +| `onTurnStart` | `(event: TurnStartEvent) => Promise \| void` | — | Fires every turn before `run()` | | `onTurnComplete` | `(event: TurnCompleteEvent) => Promise \| void` | — | Fires after each turn completes | | `maxTurns` | `number` | `100` | Max conversational turns per run | | `turnTimeout` | `string` | `"1h"` | How long to wait for next message | diff --git a/packages/core/src/v3/realtimeStreams/manager.ts b/packages/core/src/v3/realtimeStreams/manager.ts index 323735df106..beda3535fb4 100644 --- a/packages/core/src/v3/realtimeStreams/manager.ts +++ b/packages/core/src/v3/realtimeStreams/manager.ts @@ -6,6 +6,7 @@ import { RealtimeStreamInstance, RealtimeStreamOperationOptions, RealtimeStreamsManager, + StreamWriteResult, } from "./types.js"; export class StandardRealtimeStreamsManager implements RealtimeStreamsManager { @@ -16,7 +17,7 @@ export class StandardRealtimeStreamsManager implements RealtimeStreamsManager { ) {} // Track active streams - using a Set allows multiple streams for the same key to coexist private activeStreams = new Set<{ - wait: () => Promise; + wait: () => Promise; abortController: AbortController; }>(); diff --git a/packages/core/src/v3/realtimeStreams/noopManager.ts b/packages/core/src/v3/realtimeStreams/noopManager.ts index 542e66fd53a..881a82294e2 100644 --- a/packages/core/src/v3/realtimeStreams/noopManager.ts +++ b/packages/core/src/v3/realtimeStreams/noopManager.ts @@ -15,7 +15,7 @@ export class NoopRealtimeStreamsManager implements RealtimeStreamsManager { options?: RealtimeStreamOperationOptions ): RealtimeStreamInstance { return { - wait: () => Promise.resolve(), + wait: () => Promise.resolve({}), get stream(): AsyncIterableStream { return createAsyncIterableStreamFromAsyncIterable(source); }, diff --git a/packages/core/src/v3/realtimeStreams/streamInstance.ts b/packages/core/src/v3/realtimeStreams/streamInstance.ts index 6d8106ffe6c..07ee0158bfb 100644 --- a/packages/core/src/v3/realtimeStreams/streamInstance.ts +++ b/packages/core/src/v3/realtimeStreams/streamInstance.ts @@ -3,7 +3,7 @@ import { AsyncIterableStream } from "../streams/asyncIterableStream.js"; import { AnyZodFetchOptions } from "../zodfetch.js"; import { StreamsWriterV1 } from "./streamsWriterV1.js"; import { StreamsWriterV2 } from "./streamsWriterV2.js"; -import { StreamsWriter } from "./types.js"; +import { StreamsWriter, StreamWriteResult } from "./types.js"; export type StreamInstanceOptions = { apiClient: ApiClient; @@ -63,8 +63,9 @@ export class StreamInstance implements StreamsWriter { return streamWriter; } - public async wait(): Promise { - return this.streamPromise.then((writer) => writer.wait()); + public async wait(): Promise { + const writer = await this.streamPromise; + return writer.wait(); } public get stream(): AsyncIterableStream { diff --git a/packages/core/src/v3/realtimeStreams/streamsWriterV1.ts b/packages/core/src/v3/realtimeStreams/streamsWriterV1.ts index 2f2b4af1682..c19faf6c2f8 100644 --- a/packages/core/src/v3/realtimeStreams/streamsWriterV1.ts +++ b/packages/core/src/v3/realtimeStreams/streamsWriterV1.ts @@ -2,7 +2,7 @@ import { request as httpsRequest } from "node:https"; import { request as httpRequest } from "node:http"; import { URL } from "node:url"; import { randomBytes } from "node:crypto"; -import { StreamsWriter } from "./types.js"; +import { StreamsWriter, StreamWriteResult } from "./types.js"; export type StreamsWriterV1Options = { baseUrl: string; @@ -258,8 +258,9 @@ export class StreamsWriterV1 implements StreamsWriter { await this.makeRequest(0); } - public async wait(): Promise { - return this.streamPromise; + public async wait(): Promise { + await this.streamPromise; + return {}; } public [Symbol.asyncIterator]() { diff --git a/packages/core/src/v3/realtimeStreams/streamsWriterV2.ts b/packages/core/src/v3/realtimeStreams/streamsWriterV2.ts index 91713630dbe..4d01c8267d8 100644 --- a/packages/core/src/v3/realtimeStreams/streamsWriterV2.ts +++ b/packages/core/src/v3/realtimeStreams/streamsWriterV2.ts @@ -1,5 +1,5 @@ import { S2, AppendRecord, BatchTransform } from "@s2-dev/streamstore"; -import { StreamsWriter } from "./types.js"; +import { StreamsWriter, StreamWriteResult } from "./types.js"; import { nanoid } from "nanoid"; export type StreamsWriterV2Options = { @@ -54,6 +54,7 @@ export class StreamsWriterV2 implements StreamsWriter { private readonly maxInflightBytes: number; private aborted = false; private sessionWritable: WritableStream | null = null; + private lastSeqNum: number | undefined; constructor(private options: StreamsWriterV2Options) { this.debug = options.debug ?? false; @@ -169,9 +170,9 @@ export class StreamsWriterV2 implements StreamsWriter { const lastAcked = session.lastAckedPosition(); if (lastAcked?.end) { - const recordsWritten = lastAcked.end.seqNum; + this.lastSeqNum = lastAcked.end.seqNum; this.log( - `[S2MetadataStream] Written ${recordsWritten} records, ending at seqNum=${lastAcked.end.seqNum}` + `[S2MetadataStream] Written ${this.lastSeqNum} records, ending at seqNum=${this.lastSeqNum}` ); } } catch (error) { @@ -184,8 +185,9 @@ export class StreamsWriterV2 implements StreamsWriter { } } - public async wait(): Promise { + public async wait(): Promise { await this.streamPromise; + return { lastEventId: this.lastSeqNum?.toString() }; } public [Symbol.asyncIterator]() { diff --git a/packages/core/src/v3/realtimeStreams/types.ts b/packages/core/src/v3/realtimeStreams/types.ts index cd420b1dce4..1b7455ebd25 100644 --- a/packages/core/src/v3/realtimeStreams/types.ts +++ b/packages/core/src/v3/realtimeStreams/types.ts @@ -26,13 +26,17 @@ export interface RealtimeStreamsManager { ): Promise; } +export type StreamWriteResult = { + lastEventId?: string; +}; + export interface RealtimeStreamInstance { - wait(): Promise; + wait(): Promise; get stream(): AsyncIterableStream; } export interface StreamsWriter { - wait(): Promise; + wait(): Promise; } export type RealtimeDefinedStream = { @@ -93,7 +97,7 @@ export type PipeStreamResult = { * to the realtime stream. Use this to wait for the stream to complete before * finishing your task. */ - waitUntilComplete: () => Promise; + waitUntilComplete: () => Promise; }; /** diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index 3bb0db9dcd8..16c6667da36 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -4,6 +4,7 @@ import { isSchemaZodEsque, SemanticInternalAttributes, Task, + taskContext, type inferSchemaIn, type PipeStreamOptions, type TaskIdentifier, @@ -12,7 +13,8 @@ import { type TaskWithSchema, } from "@trigger.dev/core/v3"; import type { ModelMessage, UIMessage } from "ai"; -import { convertToModelMessages, dynamicTool, jsonSchema, JSONSchema7, Schema, Tool, ToolCallOptions, zodSchema } from "ai"; +import type { StreamWriteResult } from "@trigger.dev/core/v3"; +import { convertToModelMessages, dynamicTool, generateId as generateMessageId, jsonSchema, JSONSchema7, Schema, Tool, ToolCallOptions, zodSchema } from "ai"; import { type Attributes, trace } from "@opentelemetry/api"; import { auth } from "./auth.js"; import { metadata } from "./metadata.js"; @@ -153,7 +155,7 @@ export const ai = { function createChatAccessToken( taskId: TaskIdentifier ): Promise { - return auth.createTriggerPublicToken(taskId as string, { multipleUse: true }); + return auth.createTriggerPublicToken(taskId as string, { expirationTime: "24h" }); } // --------------------------------------------------------------------------- @@ -389,6 +391,28 @@ export type ChatStartEvent = { messages: ModelMessage[]; /** Custom data from the frontend (passed via `metadata` on `sendMessage()` or the transport). */ clientData: unknown; + /** The Trigger.dev run ID for this conversation. */ + runId: string; + /** A scoped access token for this chat run. Persist this for frontend reconnection. */ + chatAccessToken: string; +}; + +/** + * Event passed to the `onTurnStart` callback. + */ +export type TurnStartEvent = { + /** The unique identifier for the chat session. */ + chatId: string; + /** The accumulated model-ready messages (all turns so far, including new user message). */ + messages: ModelMessage[]; + /** The accumulated UI messages (all turns so far, including new user message). */ + uiMessages: UIMessage[]; + /** The turn number (0-indexed). */ + turn: number; + /** The Trigger.dev run ID for this conversation. */ + runId: string; + /** A scoped access token for this chat run. */ + chatAccessToken: string; }; /** @@ -418,6 +442,12 @@ export type TurnCompleteEvent = { responseMessage: UIMessage | undefined; /** The turn number (0-indexed). */ turn: number; + /** The Trigger.dev run ID for this conversation. */ + runId: string; + /** A fresh scoped access token for this chat run (renewed each turn). Persist this for frontend reconnection. */ + chatAccessToken: string; + /** The last event ID from the stream writer. Use this with `resume: true` to avoid replaying events after refresh. */ + lastEventId?: string; }; export type ChatTaskOptions = Omit< @@ -449,6 +479,22 @@ export type ChatTaskOptions = Omit< */ onChatStart?: (event: ChatStartEvent) => Promise | void; + /** + * Called at the start of every turn, after message accumulation and `onChatStart` (turn 0), + * but before the `run` function executes. + * + * Use this to persist messages before streaming begins, so a mid-stream page refresh + * still shows the user's message. + * + * @example + * ```ts + * onTurnStart: async ({ chatId, uiMessages }) => { + * await db.chat.update({ where: { id: chatId }, data: { messages: uiMessages } }); + * } + * ``` + */ + onTurnStart?: (event: TurnStartEvent) => Promise | void; + /** * Called after each turn completes (after the response is captured, before waiting * for the next message). Also fires on the final turn. @@ -492,6 +538,17 @@ export type ChatTaskOptions = Omit< * @default 30 */ warmTimeoutInSeconds?: number; + + /** + * How long the `chatAccessToken` (scoped to this run) remains valid. + * A fresh token is minted after each turn, so this only needs to cover + * the gap between turns. + * + * Accepts a duration string (e.g. `"1h"`, `"30m"`, `"2h"`). + * + * @default "1h" + */ + chatAccessTokenTTL?: string; }; /** @@ -527,10 +584,12 @@ function chatTask( const { run: userRun, onChatStart, + onTurnStart, onTurnComplete, maxTurns = 100, turnTimeout = "1h", warmTimeoutInSeconds = 30, + chatAccessTokenTTL = "1h", ...restOptions } = options; @@ -653,6 +712,24 @@ function chatTask( turnNewUIMessages.push(...uiMessages); } + // Mint a scoped public access token once per turn, reused for + // onChatStart, onTurnStart, onTurnComplete, and the turn-complete chunk. + const currentRunId = taskContext.ctx?.run.id ?? ""; + let turnAccessToken = ""; + if (currentRunId) { + try { + turnAccessToken = await auth.createPublicToken({ + scopes: { + read: { runs: currentRunId }, + write: { inputStreams: currentRunId }, + }, + expirationTime: chatAccessTokenTTL, + }); + } catch { + // Token creation failed + } + } + // Fire onChatStart on the first turn if (turn === 0 && onChatStart) { await tracer.startActiveSpan( @@ -662,6 +739,32 @@ function chatTask( chatId: currentWirePayload.chatId, messages: accumulatedMessages, clientData: wireMetadata, + runId: currentRunId, + chatAccessToken: turnAccessToken, + }); + }, + { + attributes: { + [SemanticInternalAttributes.STYLE_ICON]: "task-hook-onStart", + [SemanticInternalAttributes.COLLAPSED]: true, + }, + } + ); + } + + // Fire onTurnStart before running user code — persist messages + // so a mid-stream page refresh still shows the user's message. + if (onTurnStart) { + await tracer.startActiveSpan( + "onTurnStart()", + async () => { + await onTurnStart({ + chatId: currentWirePayload.chatId, + messages: accumulatedMessages, + uiMessages: accumulatedUIMessages, + turn, + runId: currentRunId, + chatAccessToken: turnAccessToken, }); }, { @@ -715,6 +818,12 @@ function chatTask( // The onFinish callback fires even on abort/stop, so partial responses // from stopped generation are captured correctly. if (capturedResponseMessage) { + // Ensure the response message has an ID (the stream's onFinish + // may produce a message with an empty ID since IDs are normally + // assigned by the frontend's useChat). + if (!capturedResponseMessage.id) { + capturedResponseMessage = { ...capturedResponseMessage, id: generateMessageId() }; + } accumulatedUIMessages.push(capturedResponseMessage); turnNewUIMessages.push(capturedResponseMessage); try { @@ -734,6 +843,13 @@ function chatTask( if (runSignal.aborted) return "exit"; + // Write turn-complete control chunk so frontend closes its stream. + // Capture the lastEventId from the stream writer for resume support. + const turnCompleteResult = await writeTurnCompleteChunk( + currentWirePayload.chatId, + turnAccessToken + ); + // Fire onTurnComplete after response capture if (onTurnComplete) { await tracer.startActiveSpan( @@ -747,6 +863,9 @@ function chatTask( newUIMessages: turnNewUIMessages, responseMessage: capturedResponseMessage, turn, + runId: currentRunId, + chatAccessToken: turnAccessToken, + lastEventId: turnCompleteResult.lastEventId, }); }, { @@ -758,9 +877,6 @@ function chatTask( ); } - // Write turn-complete control chunk so frontend closes its stream - await writeTurnCompleteChunk(currentWirePayload.chatId); - // If messages arrived during streaming, use the first one immediately if (pendingMessages.length > 0) { currentWirePayload = pendingMessages[0]!; @@ -927,15 +1043,18 @@ export const chat = { * The frontend transport intercepts this to close the ReadableStream for the current turn. * @internal */ -async function writeTurnCompleteChunk(chatId?: string): Promise { +async function writeTurnCompleteChunk(chatId?: string, publicAccessToken?: string): Promise { const { waitUntilComplete } = streams.writer(CHAT_STREAM_KEY, { spanName: "turn complete", collapsed: true, execute: ({ write }) => { - write({ type: "__trigger_turn_complete" }); + write({ + type: "__trigger_turn_complete", + ...(publicAccessToken ? { publicAccessToken } : {}), + }); }, }); - await waitUntilComplete(); + return await waitUntilComplete(); } /** diff --git a/packages/trigger-sdk/src/v3/chat.ts b/packages/trigger-sdk/src/v3/chat.ts index eb0aae7ccde..8cbdb486de9 100644 --- a/packages/trigger-sdk/src/v3/chat.ts +++ b/packages/trigger-sdk/src/v3/chat.ts @@ -210,6 +210,7 @@ export class TriggerChatTransport implements ChatTransport { | undefined; private sessions: Map = new Map(); + private activeReconnects: Map = new Map(); constructor(options: TriggerChatTransportOptions) { this.taskId = options.task; @@ -330,7 +331,21 @@ export class TriggerChatTransport implements ChatTransport { return null; } - return this.subscribeToStream(session.runId, session.publicAccessToken, undefined, options.chatId); + // Abort any previous reconnect for this chatId (e.g. React strict mode + // double-firing the effect) to avoid duplicate SSE connections. + const prev = this.activeReconnects.get(options.chatId); + if (prev) { + prev.abort(); + } + const reconnectAbort = new AbortController(); + this.activeReconnects.set(options.chatId, reconnectAbort); + + return this.subscribeToStream( + session.runId, + session.publicAccessToken, + reconnectAbort.signal, + options.chatId + ); }; /** @@ -493,7 +508,11 @@ export class TriggerChatTransport implements ChatTransport { } if (chunk.type === "__trigger_turn_complete" && chatId) { - // Notify with updated lastEventId before closing + // Update token if a refreshed one was provided in the chunk + if (session && typeof chunk.publicAccessToken === "string") { + session.publicAccessToken = chunk.publicAccessToken; + } + // Notify with updated session (including refreshed token) if (session) { this.notifySessionChange(chatId, session); } diff --git a/packages/trigger-sdk/src/v3/streams.ts b/packages/trigger-sdk/src/v3/streams.ts index eafaea88fbc..6bdf862ebdc 100644 --- a/packages/trigger-sdk/src/v3/streams.ts +++ b/packages/trigger-sdk/src/v3/streams.ts @@ -27,6 +27,7 @@ import { type InputStreamWaitOptions, type SendInputStreamOptions, type InferInputStreamType, + type StreamWriteResult, } from "@trigger.dev/core/v3"; import { conditionallyImportAndParsePacket } from "@trigger.dev/core/v3/utils/ioSerialization"; import { tracer } from "./tracer.js"; @@ -195,7 +196,9 @@ function pipeInternal( return { stream: instance.stream, - waitUntilComplete: () => instance.wait(), + waitUntilComplete: async () => { + return instance.wait(); + }, }; } catch (error) { // if the error is a signal abort error, we need to end the span but not record an exception diff --git a/references/ai-chat/src/app/actions.ts b/references/ai-chat/src/app/actions.ts index ccf6319f63c..3b6c55e7146 100644 --- a/references/ai-chat/src/app/actions.ts +++ b/references/ai-chat/src/app/actions.ts @@ -25,13 +25,6 @@ export async function getChatMessages(chatId: string) { return found.messages as any[]; } -export async function saveChatMessages(chatId: string, messages: unknown[]) { - await prisma.chat.update({ - where: { id: chatId }, - data: { messages: messages as any }, - }).catch(() => {}); -} - export async function deleteChat(chatId: string) { await prisma.chat.delete({ where: { id: chatId } }).catch(() => {}); await prisma.chatSession.delete({ where: { id: chatId } }).catch(() => {}); @@ -41,24 +34,10 @@ export async function updateChatTitle(chatId: string, title: string) { await prisma.chat.update({ where: { id: chatId }, data: { title } }).catch(() => {}); } -export async function saveSessionAction( - chatId: string, - session: { runId: string; publicAccessToken: string; lastEventId?: string } -) { - await prisma.chatSession.upsert({ - where: { id: chatId }, - create: { - id: chatId, - runId: session.runId, - publicAccessToken: session.publicAccessToken, - lastEventId: session.lastEventId, - }, - update: { - runId: session.runId, - publicAccessToken: session.publicAccessToken, - lastEventId: session.lastEventId, - }, - }); +export async function updateSessionLastEventId(chatId: string, lastEventId: string) { + await prisma.chatSession + .update({ where: { id: chatId }, data: { lastEventId } }) + .catch(() => {}); } export async function deleteSessionAction(chatId: string) { diff --git a/references/ai-chat/src/components/chat-app.tsx b/references/ai-chat/src/components/chat-app.tsx index 9a8f4642a88..1d065146bfc 100644 --- a/references/ai-chat/src/components/chat-app.tsx +++ b/references/ai-chat/src/components/chat-app.tsx @@ -12,9 +12,7 @@ import { getChatMessages, deleteChat as deleteChatAction, updateChatTitle, - saveSessionAction, deleteSessionAction, - saveChatMessages, } from "@/app/actions"; type ChatMeta = { @@ -49,9 +47,9 @@ export function ChatApp({ chatId: string, session: { runId: string; publicAccessToken: string; lastEventId?: string } | null ) => { - if (session) { - saveSessionAction(chatId, session); - } else { + // Session creation and token updates are handled server-side via onChatStart/onTurnComplete. + // We only need to clean up when the run ends (session = null). + if (!session) { deleteSessionAction(chatId); } }, @@ -132,6 +130,7 @@ export function ChatApp({ chatId={activeChatId} initialMessages={messages} transport={transport} + resume={messages.length > 0} onFirstMessage={handleFirstMessage} onMessagesChange={handleMessagesChange} /> diff --git a/references/ai-chat/src/components/chat.tsx b/references/ai-chat/src/components/chat.tsx index e85361b4d69..1ee49e8781c 100644 --- a/references/ai-chat/src/components/chat.tsx +++ b/references/ai-chat/src/components/chat.tsx @@ -74,6 +74,7 @@ type ChatProps = { chatId: string; initialMessages: UIMessage[]; transport: TriggerChatTransport; + resume?: boolean; onFirstMessage?: (chatId: string, text: string) => void; onMessagesChange?: (chatId: string, messages: UIMessage[]) => void; }; @@ -82,6 +83,7 @@ export function Chat({ chatId, initialMessages, transport, + resume: resumeProp, onFirstMessage, onMessagesChange, }: ChatProps) { @@ -94,6 +96,7 @@ export function Chat({ id: chatId, messages: initialMessages, transport, + resume: resumeProp, }); // Notify parent of first user message (for chat metadata creation) diff --git a/references/ai-chat/src/trigger/chat.ts b/references/ai-chat/src/trigger/chat.ts index 3eb6d4402ce..61c455341d3 100644 --- a/references/ai-chat/src/trigger/chat.ts +++ b/references/ai-chat/src/trigger/chat.ts @@ -86,19 +86,43 @@ declare const Deno: unknown; export const aiChat = chat.task({ id: "ai-chat", - warmTimeoutInSeconds: 10, - onChatStart: async ({ chatId }) => { + warmTimeoutInSeconds: 60, + chatAccessTokenTTL: "2h", + onChatStart: async ({ chatId, runId, chatAccessToken }) => { await prisma.chat.upsert({ where: { id: chatId }, create: { id: chatId, title: "New chat" }, update: {}, }); + await prisma.chatSession.upsert({ + where: { id: chatId }, + create: { id: chatId, runId, publicAccessToken: chatAccessToken }, + update: { runId, publicAccessToken: chatAccessToken }, + }); }, - onTurnComplete: async ({ chatId, uiMessages }) => { + onTurnStart: async ({ chatId, uiMessages, runId, chatAccessToken }) => { + // Persist messages BEFORE streaming so mid-stream refresh has the user message await prisma.chat.update({ where: { id: chatId }, data: { messages: uiMessages as any }, }); + await prisma.chatSession.upsert({ + where: { id: chatId }, + create: { id: chatId, runId, publicAccessToken: chatAccessToken }, + update: { runId, publicAccessToken: chatAccessToken }, + }); + }, + onTurnComplete: async ({ chatId, uiMessages, runId, chatAccessToken, lastEventId }) => { + // Persist final messages + assistant response + stream position + await prisma.chat.update({ + where: { id: chatId }, + data: { messages: uiMessages as any }, + }); + await prisma.chatSession.upsert({ + where: { id: chatId }, + create: { id: chatId, runId, publicAccessToken: chatAccessToken, lastEventId }, + update: { runId, publicAccessToken: chatAccessToken, lastEventId }, + }); }, run: async ({ messages, clientData, stopSignal }) => { const { model: modelId } = z From 7f83f4cd751603cd929dd69169e4906e91523442 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Fri, 6 Mar 2026 10:47:30 +0000 Subject: [PATCH 33/53] Minor fixes around reconnecting streams --- packages/trigger-sdk/src/v3/chat.ts | 48 ++++++++++++---------- references/ai-chat/README.md | 62 +++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+), 22 deletions(-) create mode 100644 references/ai-chat/README.md diff --git a/packages/trigger-sdk/src/v3/chat.ts b/packages/trigger-sdk/src/v3/chat.ts index 8cbdb486de9..265bce1f4f9 100644 --- a/packages/trigger-sdk/src/v3/chat.ts +++ b/packages/trigger-sdk/src/v3/chat.ts @@ -210,7 +210,7 @@ export class TriggerChatTransport implements ChatTransport { | undefined; private sessions: Map = new Map(); - private activeReconnects: Map = new Map(); + private activeStreams: Map = new Map(); constructor(options: TriggerChatTransportOptions) { this.taskId = options.task; @@ -277,6 +277,15 @@ export class TriggerChatTransport implements ChatTransport { const apiClient = new ApiClient(this.baseURL, session.publicAccessToken); await apiClient.sendInputStream(session.runId, CHAT_MESSAGES_STREAM_ID, minimalPayload); + + // Cancel any active reconnect stream for this chatId before + // opening a new subscription for the new turn. + const activeStream = this.activeStreams.get(chatId); + if (activeStream) { + activeStream.abort(); + this.activeStreams.delete(chatId); + } + return this.subscribeToStream( session.runId, session.publicAccessToken, @@ -331,20 +340,21 @@ export class TriggerChatTransport implements ChatTransport { return null; } - // Abort any previous reconnect for this chatId (e.g. React strict mode - // double-firing the effect) to avoid duplicate SSE connections. - const prev = this.activeReconnects.get(options.chatId); - if (prev) { - prev.abort(); + // Deduplicate: if there's already an active stream for this chatId, + // return null so the second caller no-ops. + if (this.activeStreams.has(options.chatId)) { + return null; } - const reconnectAbort = new AbortController(); - this.activeReconnects.set(options.chatId, reconnectAbort); + + const abortController = new AbortController(); + this.activeStreams.set(options.chatId, abortController); return this.subscribeToStream( session.runId, session.publicAccessToken, - reconnectAbort.signal, - options.chatId + abortController.signal, + options.chatId, + { sendStopOnAbort: false } ); }; @@ -408,7 +418,8 @@ export class TriggerChatTransport implements ChatTransport { runId: string, accessToken: string, abortSignal: AbortSignal | undefined, - chatId?: string + chatId?: string, + options?: { sendStopOnAbort?: boolean } ): ReadableStream { const headers: Record = { Authorization: `Bearer ${accessToken}`, @@ -427,13 +438,14 @@ export class TriggerChatTransport implements ChatTransport { ? AbortSignal.any([abortSignal, internalAbort.signal]) : internalAbort.signal; - // When the caller aborts (user calls stop()), send a stop signal to the - // running task via input streams, then close the SSE connection. + // When the caller aborts (user calls stop()), close the SSE connection. + // Only send a stop signal to the task if this is a user-initiated stop + // (sendStopOnAbort), not an internal stream management abort. if (abortSignal) { abortSignal.addEventListener( "abort", () => { - if (session) { + if (options?.sendStopOnAbort !== false && session) { session.skipToTurnComplete = true; const api = new ApiClient(this.baseURL, session.publicAccessToken); api @@ -468,14 +480,6 @@ export class TriggerChatTransport implements ChatTransport { const { done, value } = await reader.read(); if (done) { - // Only delete session if the stream ended naturally (not aborted by stop). - // When the user clicks stop, the abort closes the SSE reader which - // returns done=true, but the run is still alive and waiting for - // the next message via input streams. - if (chatId && !combinedSignal.aborted) { - this.sessions.delete(chatId); - this.notifySessionChange(chatId, null); - } controller.close(); return; } diff --git a/references/ai-chat/README.md b/references/ai-chat/README.md new file mode 100644 index 00000000000..39a6038f8c8 --- /dev/null +++ b/references/ai-chat/README.md @@ -0,0 +1,62 @@ +# AI Chat Reference App + +A multi-turn chat app built with the AI SDK's `useChat` hook and Trigger.dev's `chat.task`. Conversations run as durable Trigger.dev tasks with realtime streaming, automatic message accumulation, and persistence across page refreshes. + +## Data Models + +### Chat + +The conversation itself — your application data. + +| Column | Description | +| ---------- | ---------------------------------------- | +| `id` | Unique chat ID (generated on the client) | +| `title` | Display title for the sidebar | +| `messages` | Full `UIMessage[]` history (JSON) | + +A Chat lives forever (until the user deletes it). It is independent of any particular Trigger.dev run. + +### ChatSession + +The transport's connection state for a chat — what the frontend needs to reconnect to the same Trigger.dev run after a page refresh. + +| Column | Description | +| ------------------- | --------------------------------------------------------------------------- | +| `id` | Same as the chat ID (1:1 relationship) | +| `runId` | The Trigger.dev run handling this conversation | +| `publicAccessToken` | Scoped token for reading the run's stream and sending input stream messages | +| `lastEventId` | Stream position — used to resume without replaying old events | + +A Chat can outlive many ChatSessions. When the run ends (turn timeout, max turns reached, crash), the ChatSession is gone but the Chat and its messages remain. The next message from the user starts a fresh run and creates a new ChatSession for the same Chat. + +**Think of it as: Chat = the conversation, ChatSession = the live connection to the run handling it.** + +## Lifecycle Hooks + +Persistence is handled server-side in the Trigger.dev task via three hooks: + +- **`onChatStart`** — Creates the Chat and ChatSession records when a new conversation starts (turn 0). +- **`onTurnStart`** — Saves messages and updates the session _before_ streaming begins, so a mid-stream page refresh still shows the user's message. +- **`onTurnComplete`** — Saves the assistant's response and the `lastEventId` for stream resumption. + +## Setup + +```bash +# From the repo root +pnpm run docker # Start PostgreSQL, Redis, Electric +pnpm run db:migrate # Run webapp migrations +pnpm run db:seed # Seed the database + +# Set up the reference app's database +cd references/ai-chat +cp .env.example .env # Edit DATABASE_URL if needed +npx prisma migrate deploy + +# Build and run +pnpm run build --filter trigger.dev --filter @trigger.dev/sdk +pnpm run dev --filter webapp # In one terminal +cd references/ai-chat && pnpm exec trigger dev # In another +cd references/ai-chat && pnpm run dev # In another +``` + +Open http://localhost:3000 to use the chat app. From 9ebf6a8d3d0d35a0b004cb71d552fb895e171acf Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Fri, 6 Mar 2026 13:33:01 +0000 Subject: [PATCH 34/53] update pnpm link file --- pnpm-lock.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index e58d8fa4898..9827964dc8a 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1104,7 +1104,7 @@ importers: version: 18.3.1 react-email: specifier: ^2.1.1 - version: 2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(bufferutil@4.0.9)(eslint@8.31.0) + version: 2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(eslint@8.31.0) resend: specifier: ^3.2.0 version: 3.2.0 @@ -40031,7 +40031,7 @@ snapshots: react: 18.2.0 react-dom: 18.2.0(react@18.2.0) - react-email@2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(bufferutil@4.0.9)(eslint@8.31.0): + react-email@2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(eslint@8.31.0): dependencies: '@babel/parser': 7.24.1 '@radix-ui/colors': 1.0.1 @@ -40068,8 +40068,8 @@ snapshots: react: 18.3.1 react-dom: 18.2.0(react@18.3.1) shelljs: 0.8.5 - socket.io: 4.7.3(bufferutil@4.0.9) - socket.io-client: 4.7.3(bufferutil@4.0.9) + socket.io: 4.7.3 + socket.io-client: 4.7.3 sonner: 1.3.1(react-dom@18.2.0(react@18.3.1))(react@18.3.1) source-map-js: 1.0.2 stacktrace-parser: 0.1.10 @@ -41321,7 +41321,7 @@ snapshots: - supports-color - utf-8-validate - socket.io-client@4.7.3(bufferutil@4.0.9): + socket.io-client@4.7.3: dependencies: '@socket.io/component-emitter': 3.1.0 debug: 4.3.7(supports-color@10.0.0) @@ -41350,7 +41350,7 @@ snapshots: transitivePeerDependencies: - supports-color - socket.io@4.7.3(bufferutil@4.0.9): + socket.io@4.7.3: dependencies: accepts: 1.3.8 base64id: 2.0.0 From 155336b10cd4636c6bbf3abf14843d64c1c9e42d Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Fri, 6 Mar 2026 14:34:49 +0000 Subject: [PATCH 35/53] fixed chat tests --- packages/trigger-sdk/src/v3/chat.test.ts | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/packages/trigger-sdk/src/v3/chat.test.ts b/packages/trigger-sdk/src/v3/chat.test.ts index 50138b57287..a615f9668da 100644 --- a/packages/trigger-sdk/src/v3/chat.test.ts +++ b/packages/trigger-sdk/src/v3/chat.test.ts @@ -1797,7 +1797,7 @@ describe("TriggerChatTransport", () => { expect(lastCall![1].lastEventId).toBeDefined(); }); - it("should fire with null when session is deleted (stream ends naturally)", async () => { + it("should preserve session when stream ends naturally (run stays alive between turns)", async () => { const onSessionChange = vi.fn(); global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { @@ -1846,11 +1846,12 @@ describe("TriggerChatTransport", () => { const reader = stream.getReader(); while (!(await reader.read()).done) {} - // Session should have been created then deleted + // Session should have been created but NOT deleted — the run stays + // alive between turns and the session is needed for reconnection. expect(onSessionChange).toHaveBeenCalledWith("chat-end", expect.objectContaining({ runId: "run_end", })); - expect(onSessionChange).toHaveBeenCalledWith("chat-end", null); + expect(onSessionChange).not.toHaveBeenCalledWith("chat-end", null); }); it("should be updatable via setOnSessionChange", async () => { From 016a44634d8af6c6b873bf66a07b49d4355ae296 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Fri, 6 Mar 2026 14:52:10 +0000 Subject: [PATCH 36/53] use locals for the chat pipe counter instead of a module global --- packages/trigger-sdk/src/v3/ai.ts | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index 16c6667da36..3140402d4ad 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -17,6 +17,7 @@ import type { StreamWriteResult } from "@trigger.dev/core/v3"; import { convertToModelMessages, dynamicTool, generateId as generateMessageId, jsonSchema, JSONSchema7, Schema, Tool, ToolCallOptions, zodSchema } from "ai"; import { type Attributes, trace } from "@opentelemetry/api"; import { auth } from "./auth.js"; +import { locals } from "./locals.js"; import { metadata } from "./metadata.js"; import { streams } from "./streams.js"; import { createTask } from "./shared.js"; @@ -239,12 +240,11 @@ const messagesInput = streams.input({ id: CHAT_MESSAGES_STR const stopInput = streams.input<{ stop: true; message?: string }>({ id: CHAT_STOP_STREAM_ID }); /** - * Tracks how many times `pipeChat` has been called in the current `chatTask` run. - * Used to prevent double-piping when a user both calls `pipeChat()` manually - * and returns a streamable from their `run` function. + * Run-scoped pipe counter. Stored in locals so concurrent runs in the + * same worker don't share state. * @internal */ -let _chatPipeCount = 0; +const chatPipeCountKey = locals.create("chat.pipeCount"); /** * Options for `pipeChat`. @@ -336,7 +336,7 @@ async function pipeChat( source: UIMessageStreamable | AsyncIterable | ReadableStream, options?: PipeChatOptions ): Promise { - _chatPipeCount++; + locals.set(chatPipeCountKey, (locals.get(chatPipeCountKey) ?? 0) + 1); const streamKey = options?.streamKey ?? CHAT_STREAM_KEY; let stream: AsyncIterable | ReadableStream; @@ -662,7 +662,7 @@ function chatTask( const turnResult = await tracer.startActiveSpan( `chat turn ${turn + 1}`, async () => { - _chatPipeCount = 0; + locals.set(chatPipeCountKey, 0); // Per-turn stop controller (reset each turn) const stopController = new AbortController(); @@ -792,7 +792,7 @@ function chatTask( // Auto-pipe if the run function returned a StreamTextResult or similar, // but only if pipeChat() wasn't already called manually during this turn. // We call toUIMessageStream ourselves to attach onFinish for response capture. - if (_chatPipeCount === 0 && isUIMessageStreamable(result)) { + if ((locals.get(chatPipeCountKey) ?? 0) === 0 && isUIMessageStreamable(result)) { const uiStream = result.toUIMessageStream({ onFinish: ({ responseMessage }: { responseMessage: UIMessage }) => { capturedResponseMessage = responseMessage; From 9fe4c8a13a670eaecab942506f9511f1506c4100 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Fri, 6 Mar 2026 15:28:22 +0000 Subject: [PATCH 37/53] Add triggerOptions to the transport, auto-tag with the chat ID --- .../plan-graceful-oversized-batch-items.md | 257 ------------------ docs/guides/ai-chat.mdx | 30 ++ packages/trigger-sdk/src/v3/chat.ts | 43 +++ .../ai-chat/src/components/chat-app.tsx | 3 + 4 files changed, 76 insertions(+), 257 deletions(-) delete mode 100644 .scratch/plan-graceful-oversized-batch-items.md diff --git a/.scratch/plan-graceful-oversized-batch-items.md b/.scratch/plan-graceful-oversized-batch-items.md deleted file mode 100644 index cb463b96252..00000000000 --- a/.scratch/plan-graceful-oversized-batch-items.md +++ /dev/null @@ -1,257 +0,0 @@ -# Graceful handling of oversized batch items - -## Prerequisites - -This plan builds on top of PR #2980 which provides: -- `TriggerFailedTaskService` at `apps/webapp/app/runEngine/services/triggerFailedTask.server.ts` - creates pre-failed TaskRuns with proper trace events, waitpoint connections, and parent run associations -- `engine.createFailedTaskRun()` on RunEngine - creates a SYSTEM_FAILURE run with associated waitpoints -- Retry support in `processItemCallback` with `attempt` and `isFinalAttempt` params -- The callback already uses `TriggerFailedTaskService` for items that fail after retries - -## Problem - -When the NDJSON parser in `createNdjsonParserStream` detects an oversized line, it throws inside the TransformStream's `transform()` method. This aborts the request body stream (due to `pipeThrough` coupling), causing the client's `fetch()` to see `TypeError: fetch failed` instead of the server's 400 response. The SDK treats this as a connection error and retries with exponential backoff (~25s wasted). - -## Goal - -Instead of throwing, treat oversized items as per-item failures that flow through the existing batch failure pipeline. The batch seals normally, other items process fine, and the user sees a clear failure for the specific oversized item(s). - -## Approach - -The NDJSON parser emits an error marker object instead of throwing. `StreamBatchItemsService` detects these markers and enqueues the item to the FairQueue with error metadata in its options. The `processItemCallback` (already enhanced with `TriggerFailedTaskService` in PR #2980) detects the error metadata and creates a pre-failed run via `TriggerFailedTaskService`, which handles all the waitpoint/trace machinery. - -## Changes - -### 1. Byte-level key extractor for oversized lines - -**`apps/webapp/app/runEngine/services/streamBatchItems.server.ts`** - new function - -Add `extractIndexAndTask(bytes: Uint8Array): { index: number; task: string }` - a state machine that extracts top-level `"index"` and `"task"` values from raw bytes without decoding the full line. - -How it works: -- Scan bytes tracking JSON nesting depth (count `{`/`[` vs `}`/`]`) -- At depth 1 (inside the top-level object), look for byte sequences matching `"index"` and `"task"` key patterns -- For `"index"`: after the `:`, parse the digit sequence as a number -- For `"task"`: after the `:`, find opening `"`, read bytes until closing `"`, decode just that slice -- Stop when both found, or after scanning 512 bytes (whichever comes first) -- Fallback: `index = -1`, `task = "unknown"` if not found - -This avoids decoding/allocating the full 3MB line - only the first few hundred bytes are examined. - -### 2. Modify `createNdjsonParserStream` to emit error markers - -**`apps/webapp/app/runEngine/services/streamBatchItems.server.ts`** - -Define a marker type: -```typescript -type OversizedItemMarker = { - __batchItemError: "OVERSIZED"; - index: number; - task: string; - actualSize: number; - maxSize: number; -}; -``` - -**Case 1 - Complete line exceeds limit** (newline found, `newlineIndex > maxItemBytes`): -- Call `extractLine(newlineIndex)` to consume the line from the buffer -- Call `extractIndexAndTask(lineBytes)` on the extracted bytes -- `controller.enqueue(marker)` instead of throwing -- Increment `lineNumber` and continue - -**Case 2 - Incomplete line exceeds limit** (no newline, `totalBytes > maxItemBytes`): -- Call `extractIndexAndTask(concatenateChunks())` on current buffer -- `controller.enqueue(marker)` -- Clear the buffer (`chunks = []; totalBytes = 0`) -- Return from transform (don't throw) - -**Case 3 - Flush with oversized remaining** (`totalBytes > maxItemBytes` in flush): -- Same as case 2 but in `flush()`. - -### 3. Handle markers in `StreamBatchItemsService` - -**`apps/webapp/app/runEngine/services/streamBatchItems.server.ts`** - in the `for await` loop - -Before the existing `BatchItemNDJSONSchema.safeParse(rawItem)`, check for the marker: - -```typescript -if (rawItem && typeof rawItem === "object" && (rawItem as any).__batchItemError === "OVERSIZED") { - const marker = rawItem as OversizedItemMarker; - const itemIndex = marker.index >= 0 ? marker.index : lastIndex + 1; - - const errorMessage = `Batch item payload is too large (${(marker.actualSize / 1024).toFixed(1)} KB). Maximum allowed size is ${(marker.maxSize / 1024).toFixed(1)} KB. Reduce the payload size or offload large data to external storage.`; - - // Enqueue the item normally but with error metadata in options. - // The processItemCallback will detect __error and use TriggerFailedTaskService - // to create a pre-failed run with proper waitpoint connections. - const batchItem: BatchItem = { - task: marker.task, - payload: "{}", - payloadType: "application/json", - options: { - __error: errorMessage, - __errorCode: "PAYLOAD_TOO_LARGE", - }, - }; - - const result = await this._engine.enqueueBatchItem( - batchId, environment.id, itemIndex, batchItem - ); - - if (result.enqueued) { - itemsAccepted++; - } else { - itemsDeduplicated++; - } - lastIndex = itemIndex; - continue; -} -``` - -### 4. Handle `__error` items in `processItemCallback` - -**`apps/webapp/app/v3/runEngineHandlers.server.ts`** - in the `setupBatchQueueCallbacks` function - -In the `processItemCallback`, before the `TriggerTaskService.call()`, check for `__error` in `item.options`: - -```typescript -const itemError = item.options?.__error as string | undefined; -if (itemError) { - const errorCode = (item.options?.__errorCode as string) ?? "ITEM_ERROR"; - - // Use TriggerFailedTaskService to create a pre-failed run. - // This creates a proper TaskRun with waitpoint connections so the - // parent's batchTriggerAndWait resolves correctly for this item. - const failedRunId = await triggerFailedTaskService.call({ - taskId: item.task, - environment, - payload: item.payload ?? "{}", - payloadType: item.payloadType, - errorMessage: itemError, - errorCode: errorCode as TaskRunErrorCodes, - parentRunId: meta.parentRunId, - resumeParentOnCompletion: meta.resumeParentOnCompletion, - batch: { id: batchId, index: itemIndex }, - traceContext: meta.traceContext as Record | undefined, - spanParentAsLink: meta.spanParentAsLink, - }); - - if (failedRunId) { - span.setAttribute("batch.result.pre_failed", true); - span.setAttribute("batch.result.run_id", failedRunId); - span.end(); - return { success: true as const, runId: failedRunId }; - } - - // Fallback if TriggerFailedTaskService fails - span.end(); - return { success: false as const, error: itemError, errorCode }; -} -``` - -Note: this returns `{ success: true, runId }` because the pre-failed run IS a real run. The BatchQueue records it as a success (run was created). The run itself is already in SYSTEM_FAILURE status, so the batch completion flow handles it correctly. - -If `environment` is null (environment not found), fall through to the existing environment-not-found handling which already uses `triggerFailedTaskService.callWithoutTraceEvents()` on `isFinalAttempt`. - -### 5. Handle undefined/null payload in BatchQueue serialization - -**`internal-packages/run-engine/src/batch-queue/index.ts`** - in `#handleMessage` - -Both payload serialization blocks (in the `success: false` branch and the `catch` block) do: -```typescript -const str = typeof item.payload === "string" ? item.payload : JSON.stringify(item.payload); -innerSpan?.setAttribute("batch.payloadSize", str.length); -``` - -`JSON.stringify(undefined)` returns `undefined`, causing `str.length` to crash. Fix both: -```typescript -const str = - item.payload === undefined || item.payload === null - ? "{}" - : typeof item.payload === "string" - ? item.payload - : JSON.stringify(item.payload); -``` - -### 6. Remove stale error handling in route - -**`apps/webapp/app/routes/api.v3.batches.$batchId.items.ts`** - -The `error.message.includes("exceeds maximum size")` branch is no longer reachable since oversized items don't throw. Remove that condition, keep the `"Invalid JSON"` check. - -### 7. Remove `BatchItemTooLargeError` and SDK pre-validation - -**`packages/core/src/v3/apiClient/errors.ts`** - remove `BatchItemTooLargeError` class - -**`packages/core/src/v3/apiClient/index.ts`**: -- Remove `BatchItemTooLargeError` import -- Remove `instanceof BatchItemTooLargeError` check in the retry catch block -- Remove `MAX_BATCH_ITEM_BYTES` constant -- Remove size validation from `createNdjsonStream` (revert `encodeAndValidate` to simple encode) - -**`packages/trigger-sdk/src/v3/shared.ts`** - remove `BatchItemTooLargeError` import and handling in `buildBatchErrorMessage` - -**`packages/trigger-sdk/src/v3/index.ts`** - remove `BatchItemTooLargeError` re-export - -### 8. Update tests - -**`apps/webapp/test/engine/streamBatchItems.test.ts`**: -- Update "should reject lines exceeding maxItemBytes" to assert `OversizedItemMarker` emission instead of throw -- Update "should reject unbounded accumulation without newlines" similarly -- Update the emoji byte-size test to assert marker instead of throw - -### 9. Update reference project test task - -**`references/hello-world/src/trigger/batches.ts`**: -- Remove `BatchItemTooLargeError` import -- Update `batchSealFailureOversizedPayload` task to test the new behavior: - - Send 2 items: one normal, one oversized (~3.2MB) - - Assert `batchTriggerAndWait` returns (doesn't throw) - - Assert `results.runs[0].ok === true` (normal item succeeded) - - Assert `results.runs[1].ok === false` (oversized item failed) - - Assert error message contains "too large" - -## Data flow - -``` -NDJSON bytes arrive - | -createNdjsonParserStream - |-- Line <= limit --> parse JSON --> enqueue object - `-- Line > limit --> extractIndexAndTask(bytes) --> enqueue OversizedItemMarker - | -StreamBatchItemsService for-await loop - |-- OversizedItemMarker --> engine.enqueueBatchItem() with __error in options - `-- Normal item --> validate --> engine.enqueueBatchItem() - | -FairQueue consumer (#handleMessage) - |-- __error in options --> processItemCallback detects it - | --> TriggerFailedTaskService.call() - | --> Creates pre-failed TaskRun with SYSTEM_FAILURE status - | --> Proper waitpoint + TaskRunWaitpoint connections created - | --> Returns { success: true, runId: failedRunFriendlyId } - `-- Normal item --> TriggerTaskService.call() --> creates normal run - | -Batch sealing: enqueuedCount === runCount (all items go through enqueueBatchItem) -Batch completion: all items have runs (real or pre-failed), waitpoints resolve normally -Parent run: batchTriggerAndWait resolves with per-item results -``` - -## Why this works - -The key insight is that `TriggerFailedTaskService` (from PR #2980) creates a real `TaskRun` in `SYSTEM_FAILURE` status. This means: -1. A RUN waitpoint is created and connected to the parent via `TaskRunWaitpoint` with correct `batchId`/`batchIndex` -2. The run is immediately completed, which completes the waitpoint -3. The SDK's `waitForBatch` resolver for that index fires with the error result -4. The batch completion flow counts this as a processed item (it's a real run) -5. No special-casing needed in the batch completion callback - -## Verification - -1. Rebuild `@trigger.dev/core`, `@trigger.dev/sdk`, `@internal/run-engine` -2. Restart webapp + trigger dev -3. Trigger `batch-seal-failure-oversized` task - should complete in ~2-3s with: - - Normal item: `ok: true` - - Oversized item: `ok: false` with "too large" error -4. Run NDJSON parser tests: updated tests assert marker emission instead of throws -5. Run `pnpm run build --filter @internal/run-engine --filter @trigger.dev/core --filter @trigger.dev/sdk` diff --git a/docs/guides/ai-chat.mdx b/docs/guides/ai-chat.mdx index e4b2bc8219c..d4afe34f331 100644 --- a/docs/guides/ai-chat.mdx +++ b/docs/guides/ai-chat.mdx @@ -488,6 +488,10 @@ function ChatClient({ chatId, initialMessages, initialSessions }) { `resume: true` causes `useChat` to call `reconnectToStream` on the transport when the component mounts. The transport uses the session's `lastEventId` to skip past already-seen stream events, so the frontend only receives new data. Only enable `resume` when there are existing messages — for brand new chats, there's nothing to reconnect to. + + In React strict mode (enabled by default in Next.js dev), you may see a `TypeError: Cannot read properties of undefined (reading 'state')` in the console when using `resume`. This is a [known bug in the AI SDK](https://github.com/vercel/ai/issues/8477) caused by React strict mode double-firing the resume effect. The error is caught internally and **does not affect functionality** — streaming and message display work correctly. It only appears in development and will not occur in production builds. + + ### Full example Putting it all together — a complete chat app with server-side persistence, session reconnection, and stream resumption: @@ -762,6 +766,32 @@ run: async ({ messages, signal }) => { | `metadata` | `Record` | — | Default metadata for every request | | `sessions` | `Record` | — | Restore sessions from storage | | `onSessionChange` | `(chatId, session \| null) => void` | — | Fires when session state changes | +| `triggerOptions` | `{...}` | — | Options for the initial task trigger (see below) | + +#### triggerOptions + +Options forwarded to the Trigger.dev API when starting a new run. Only applies to the first message — subsequent messages reuse the same run. + +A `chat:{chatId}` tag is automatically added to every run. + +| Option | Type | Description | +|--------|------|-------------| +| `tags` | `string[]` | Additional tags for the run (merged with auto-tags, max 5 total) | +| `queue` | `string` | Queue name for the run | +| `maxAttempts` | `number` | Maximum retry attempts | +| `machine` | `"micro" \| "small-1x" \| ...` | Machine preset for the run | +| `priority` | `number` | Priority (lower = higher priority) | + +```ts +const transport = useTriggerChatTransport({ + task: "my-chat", + accessToken: getChatToken, + triggerOptions: { + tags: ["user:123"], + queue: "chat-queue", + }, +}); +``` ### useTriggerChatTransport diff --git a/packages/trigger-sdk/src/v3/chat.ts b/packages/trigger-sdk/src/v3/chat.ts index 265bce1f4f9..6c7d39424ba 100644 --- a/packages/trigger-sdk/src/v3/chat.ts +++ b/packages/trigger-sdk/src/v3/chat.ts @@ -151,6 +151,37 @@ export type TriggerChatTransportOptions = { chatId: string, session: { runId: string; publicAccessToken: string; lastEventId?: string } | null ) => void; + + /** + * Options forwarded to the Trigger.dev API when starting a new run. + * Only applies to the first message — subsequent messages reuse the same run. + * + * A `chat:{chatId}` tag is automatically added to every run. + * + * @example + * ```ts + * new TriggerChatTransport({ + * task: "my-chat", + * accessToken, + * triggerOptions: { + * tags: ["user:123"], + * queue: "chat-queue", + * }, + * }); + * ``` + */ + triggerOptions?: { + /** Additional tags for the run. A `chat:{chatId}` tag is always added automatically. */ + tags?: string[]; + /** Queue name for the run. */ + queue?: string; + /** Maximum retry attempts. */ + maxAttempts?: number; + /** Machine preset for the run. */ + machine?: "micro" | "small-1x" | "small-2x" | "medium-1x" | "medium-2x" | "large-1x" | "large-2x"; + /** Priority (lower = higher priority). */ + priority?: number; + }; }; /** @@ -202,6 +233,7 @@ export class TriggerChatTransport implements ChatTransport { private readonly extraHeaders: Record; private readonly streamTimeoutSeconds: number; private readonly defaultMetadata: Record | undefined; + private readonly triggerOptions: TriggerChatTransportOptions["triggerOptions"]; private _onSessionChange: | (( chatId: string, @@ -223,6 +255,7 @@ export class TriggerChatTransport implements ChatTransport { this.extraHeaders = options.headers ?? {}; this.streamTimeoutSeconds = options.streamTimeoutSeconds ?? DEFAULT_STREAM_TIMEOUT_SECONDS; this.defaultMetadata = options.metadata; + this.triggerOptions = options.triggerOptions; this._onSessionChange = options.onSessionChange; // Restore sessions from external storage @@ -303,10 +336,20 @@ export class TriggerChatTransport implements ChatTransport { const currentToken = await this.resolveAccessToken(); const apiClient = new ApiClient(this.baseURL, currentToken); + // Auto-tag with chatId; merge with user-provided tags (API limit: 5 tags) + const autoTags = [`chat:${chatId}`]; + const userTags = this.triggerOptions?.tags ?? []; + const tags = [...autoTags, ...userTags].slice(0, 5); + const triggerResponse = await apiClient.triggerTask(this.taskId, { payload, options: { payloadType: "application/json", + tags, + queue: this.triggerOptions?.queue ? { name: this.triggerOptions.queue } : undefined, + maxAttempts: this.triggerOptions?.maxAttempts, + machine: this.triggerOptions?.machine, + priority: this.triggerOptions?.priority, }, }); diff --git a/references/ai-chat/src/components/chat-app.tsx b/references/ai-chat/src/components/chat-app.tsx index 1d065146bfc..8ffc7b41bc8 100644 --- a/references/ai-chat/src/components/chat-app.tsx +++ b/references/ai-chat/src/components/chat-app.tsx @@ -62,6 +62,9 @@ export function ChatApp({ baseURL: process.env.NEXT_PUBLIC_TRIGGER_API_URL, sessions: initialSessions, onSessionChange: handleSessionChange, + triggerOptions: { + tags: ["user:user_123"], + }, }); // Load messages when active chat changes From 6f30a07a6793b37378eb268fd38d555f3bf5f289 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Fri, 6 Mar 2026 16:19:25 +0000 Subject: [PATCH 38/53] Make clientData typesafe and pass to all chat.task hooks --- docs/guides/ai-chat.mdx | 64 ++++++++--- packages/core/src/v3/index.ts | 1 + packages/trigger-sdk/src/v3/ai.ts | 108 ++++++++++++++---- packages/trigger-sdk/src/v3/chat-react.ts | 3 +- packages/trigger-sdk/src/v3/chat.ts | 13 ++- .../ai-chat/src/components/chat-app.tsx | 4 +- references/ai-chat/src/trigger/chat.ts | 13 ++- 7 files changed, 150 insertions(+), 56 deletions(-) diff --git a/docs/guides/ai-chat.mdx b/docs/guides/ai-chat.mdx index d4afe34f331..ab2d689dbf9 100644 --- a/docs/guides/ai-chat.mdx +++ b/docs/guides/ai-chat.mdx @@ -256,7 +256,7 @@ export const myChat = chat.task({ ``` - `clientData` contains custom data from the frontend — either the `metadata` option on the transport constructor (sent with every message) or the `metadata` option on `sendMessage()` (per-message). See [Client data and metadata](#client-data-and-metadata). + `clientData` contains custom data from the frontend — either the `clientData` option on the transport constructor (sent with every message) or the `metadata` option on `sendMessage()` (per-message). See [Client data and metadata](#client-data-and-metadata). ### onTurnStart @@ -501,13 +501,17 @@ Putting it all together — a complete chat app with server-side persistence, se import { chat } from "@trigger.dev/sdk/ai"; import { streamText } from "ai"; import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; import { db } from "@/lib/db"; export const myChat = chat.task({ id: "my-chat", - onChatStart: async ({ chatId }) => { + clientDataSchema: z.object({ + userId: z.string(), + }), + onChatStart: async ({ chatId, clientData }) => { await db.chat.create({ - data: { id: chatId, title: "New chat", messages: [] }, + data: { id: chatId, userId: clientData.userId, title: "New chat", messages: [] }, }); }, onTurnStart: async ({ chatId, uiMessages, runId, chatAccessToken }) => { @@ -593,6 +597,7 @@ export function Chat({ chatId, initialMessages, initialSessions }) { const transport = useTriggerChatTransport({ task: "my-chat", accessToken: getChatToken, + clientData: { userId: currentUser.id }, // Type-checked against clientDataSchema sessions: initialSessions, onSessionChange: (id, session) => { if (!session) deleteSession(id); @@ -676,21 +681,21 @@ export const myChat = chat.task({ ## Client data and metadata -### Transport-level metadata +### Transport-level client data -Set default metadata on the transport that's included in every request: +Set default client data on the transport that's included in every request. When the task uses `clientDataSchema`, this is type-checked to match: ```ts -const transport = useTriggerChatTransport({ +const transport = useTriggerChatTransport({ task: "my-chat", accessToken: getChatToken, - metadata: { userId: currentUser.id }, + clientData: { userId: currentUser.id }, }); ``` ### Per-message metadata -Pass metadata with individual messages. Per-message values are merged with transport-level metadata (per-message wins on conflicts): +Pass metadata with individual messages via `sendMessage`. Per-message values are merged with transport-level client data (per-message wins on conflicts): ```ts sendMessage( @@ -699,23 +704,32 @@ sendMessage( ); ``` -### Accessing client data in the task +### Typed client data with `clientDataSchema` -Both transport-level and per-message metadata are available as `clientData` in the `run` function and in `onChatStart`: +Instead of manually parsing `clientData` with Zod in every hook, pass a `clientDataSchema` to `chat.task`. The schema validates the data once per turn, and `clientData` is typed in all hooks and `run`: ```ts +import { chat } from "@trigger.dev/sdk/ai"; +import { streamText } from "ai"; +import { openai } from "@ai-sdk/openai"; import { z } from "zod"; export const myChat = chat.task({ id: "my-chat", + clientDataSchema: z.object({ + model: z.string().optional(), + userId: z.string(), + }), + onChatStart: async ({ chatId, clientData }) => { + // clientData is typed as { model?: string; userId: string } + await db.chat.create({ + data: { id: chatId, userId: clientData.userId }, + }); + }, run: async ({ messages, clientData, signal }) => { - const { model, userId } = z.object({ - model: z.string().optional(), - userId: z.string(), - }).parse(clientData); - + // Same typed clientData — no manual parsing needed return streamText({ - model: openai(model ?? "gpt-4o"), + model: openai(clientData?.model ?? "gpt-4o"), messages, abortSignal: signal, }); @@ -723,6 +737,19 @@ export const myChat = chat.task({ }); ``` +The schema also types the `clientData` option on the frontend transport: + +```ts +// TypeScript enforces that clientData matches the schema +const transport = useTriggerChatTransport({ + task: "my-chat", + accessToken: getChatToken, + clientData: { userId: currentUser.id }, +}); +``` + +Supports Zod, ArkType, Valibot, and other schema libraries supported by the SDK. + ## Runtime configuration ### chat.setTurnTimeout() @@ -763,7 +790,7 @@ run: async ({ messages, signal }) => { | `streamKey` | `string` | `"chat"` | Stream key (only change if using custom key) | | `headers` | `Record` | — | Extra headers for API requests | | `streamTimeoutSeconds` | `number` | `120` | How long to wait for stream data | -| `metadata` | `Record` | — | Default metadata for every request | +| `clientData` | Typed by `clientDataSchema` | — | Default client data for every request | | `sessions` | `Record` | — | Restore sessions from storage | | `onSessionChange` | `(chatId, session \| null) => void` | — | Fires when session state changes | | `triggerOptions` | `{...}` | — | Options for the initial task trigger (see below) | @@ -837,6 +864,7 @@ const transport = useTriggerChatTransport({ |--------|------|---------|-------------| | `id` | `string` | required | Task identifier | | `run` | `(payload: ChatTaskRunPayload) => Promise` | required | Handler for each turn | +| `clientDataSchema` | `TaskSchema` | — | Schema for validating and typing `clientData` | | `onChatStart` | `(event: ChatStartEvent) => Promise \| void` | — | Fires on turn 0 before `run()` | | `onTurnStart` | `(event: TurnStartEvent) => Promise \| void` | — | Fires every turn before `run()` | | `onTurnComplete` | `(event: TurnCompleteEvent) => Promise \| void` | — | Fires after each turn completes | @@ -854,7 +882,7 @@ Plus all standard [TaskOptions](/tasks/overview) — `retry`, `queue`, `machine` | `chatId` | `string` | Unique chat session ID | | `trigger` | `"submit-message" \| "regenerate-message"` | What triggered the request | | `messageId` | `string \| undefined` | Message ID (for regenerate) | -| `clientData` | `unknown` | Custom data from frontend metadata | +| `clientData` | Typed by `clientDataSchema` | Custom data from the frontend (typed when schema is provided) | | `signal` | `AbortSignal` | Combined stop + cancel signal | | `cancelSignal` | `AbortSignal` | Cancel-only signal | | `stopSignal` | `AbortSignal` | Stop-only signal (per-turn) | diff --git a/packages/core/src/v3/index.ts b/packages/core/src/v3/index.ts index 2757363f4be..883da288556 100644 --- a/packages/core/src/v3/index.ts +++ b/packages/core/src/v3/index.ts @@ -80,6 +80,7 @@ export { getSchemaParseFn, type AnySchemaParseFn, type SchemaParseFn, + type inferSchemaOut, isSchemaZodEsque, isSchemaValibotEsque, isSchemaArkTypeEsque, diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index 3140402d4ad..3fad1565d87 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -1,11 +1,13 @@ import { accessoryAttributes, AnyTask, + getSchemaParseFn, isSchemaZodEsque, SemanticInternalAttributes, Task, taskContext, type inferSchemaIn, + type inferSchemaOut, type PipeStreamOptions, type TaskIdentifier, type TaskOptions, @@ -178,12 +180,12 @@ export { CHAT_MESSAGES_STREAM_ID, CHAT_STOP_STREAM_ID }; * Uses `metadata` to match the AI SDK's `ChatRequestOptions` field name. * @internal */ -type ChatTaskWirePayload = { +type ChatTaskWirePayload = { messages: TMessage[]; chatId: string; trigger: "submit-message" | "regenerate-message"; messageId?: string; - metadata?: unknown; + metadata?: TMetadata; }; /** @@ -196,7 +198,7 @@ type ChatTaskWirePayload = { * The backend accumulates the full conversation history across turns, so the frontend * only needs to send new messages after the first turn. */ -export type ChatTaskPayload = { +export type ChatTaskPayload = { /** Model-ready messages — pass directly to `streamText({ messages })`. */ messages: ModelMessage[]; @@ -214,7 +216,7 @@ export type ChatTaskPayload = { messageId?: string; /** Custom data from the frontend (passed via `metadata` on `sendMessage()` or the transport). */ - clientData?: unknown; + clientData?: TClientData; }; /** @@ -233,7 +235,7 @@ export type ChatTaskSignals = { * The full payload passed to a `chatTask` run function. * Extends `ChatTaskPayload` (the wire payload) with abort signals. */ -export type ChatTaskRunPayload = ChatTaskPayload & ChatTaskSignals; +export type ChatTaskRunPayload = ChatTaskPayload & ChatTaskSignals; // Input streams for bidirectional chat communication const messagesInput = streams.input({ id: CHAT_MESSAGES_STREAM_ID }); @@ -384,13 +386,13 @@ async function pipeChat( /** * Event passed to the `onChatStart` callback. */ -export type ChatStartEvent = { +export type ChatStartEvent = { /** The unique identifier for the chat session. */ chatId: string; /** The initial model-ready messages for this conversation. */ messages: ModelMessage[]; /** Custom data from the frontend (passed via `metadata` on `sendMessage()` or the transport). */ - clientData: unknown; + clientData: TClientData; /** The Trigger.dev run ID for this conversation. */ runId: string; /** A scoped access token for this chat run. Persist this for frontend reconnection. */ @@ -400,7 +402,7 @@ export type ChatStartEvent = { /** * Event passed to the `onTurnStart` callback. */ -export type TurnStartEvent = { +export type TurnStartEvent = { /** The unique identifier for the chat session. */ chatId: string; /** The accumulated model-ready messages (all turns so far, including new user message). */ @@ -413,12 +415,14 @@ export type TurnStartEvent = { runId: string; /** A scoped access token for this chat run. */ chatAccessToken: string; + /** Custom data from the frontend. */ + clientData?: TClientData; }; /** * Event passed to the `onTurnComplete` callback. */ -export type TurnCompleteEvent = { +export type TurnCompleteEvent = { /** The unique identifier for the chat session. */ chatId: string; /** The full accumulated conversation in model format (all turns so far). */ @@ -448,12 +452,34 @@ export type TurnCompleteEvent = { chatAccessToken: string; /** The last event ID from the stream writer. Use this with `resume: true` to avoid replaying events after refresh. */ lastEventId?: string; + /** Custom data from the frontend. */ + clientData?: TClientData; }; -export type ChatTaskOptions = Omit< - TaskOptions, - "run" -> & { +export type ChatTaskOptions< + TIdentifier extends string, + TClientDataSchema extends TaskSchema | undefined = undefined, +> = Omit, "run"> & { + /** + * Schema for validating `clientData` from the frontend. + * Accepts Zod, ArkType, Valibot, or any supported schema library. + * When provided, `clientData` is parsed and typed in all hooks and `run`. + * + * @example + * ```ts + * import { z } from "zod"; + * + * chat.task({ + * id: "my-chat", + * clientDataSchema: z.object({ model: z.string().optional(), userId: z.string() }), + * run: async ({ messages, clientData, signal }) => { + * // clientData is typed as { model?: string; userId: string } + * }, + * }); + * ``` + */ + clientDataSchema?: TClientDataSchema; + /** * The run function for the chat task. * @@ -463,7 +489,7 @@ export type ChatTaskOptions = Omit< * **Auto-piping:** If this function returns a value with `.toUIMessageStream()`, * the stream is automatically piped to the frontend. */ - run: (payload: ChatTaskRunPayload) => Promise; + run: (payload: ChatTaskRunPayload>) => Promise; /** * Called on the first turn (turn 0) of a new run, before the `run` function executes. @@ -477,7 +503,7 @@ export type ChatTaskOptions = Omit< * } * ``` */ - onChatStart?: (event: ChatStartEvent) => Promise | void; + onChatStart?: (event: ChatStartEvent>) => Promise | void; /** * Called at the start of every turn, after message accumulation and `onChatStart` (turn 0), @@ -493,7 +519,7 @@ export type ChatTaskOptions = Omit< * } * ``` */ - onTurnStart?: (event: TurnStartEvent) => Promise | void; + onTurnStart?: (event: TurnStartEvent>) => Promise | void; /** * Called after each turn completes (after the response is captured, before waiting @@ -508,7 +534,7 @@ export type ChatTaskOptions = Omit< * } * ``` */ - onTurnComplete?: (event: TurnCompleteEvent) => Promise | void; + onTurnComplete?: (event: TurnCompleteEvent>) => Promise | void; /** * Maximum number of conversational turns (message round-trips) a single run @@ -578,11 +604,15 @@ export type ChatTaskOptions = Omit< * }); * ``` */ -function chatTask( - options: ChatTaskOptions -): Task { +function chatTask< + TIdentifier extends string, + TClientDataSchema extends TaskSchema | undefined = undefined, +>( + options: ChatTaskOptions +): Task>, unknown> { const { run: userRun, + clientDataSchema, onChatStart, onTurnStart, onTurnComplete, @@ -593,7 +623,11 @@ function chatTask( ...restOptions } = options; - return createTask({ + const parseClientData = clientDataSchema + ? getSchemaParseFn(clientDataSchema) + : undefined; + + return createTask>, unknown>({ ...restOptions, run: async (payload: ChatTaskWirePayload, { signal: runSignal }) => { // Set gen_ai.conversation.id on the run-level span for dashboard context @@ -626,6 +660,9 @@ function chatTask( for (let turn = 0; turn < maxTurns; turn++) { // Extract turn-level context before entering the span const { metadata: wireMetadata, messages: uiMessages, ...restWire } = currentWirePayload; + const clientData = (parseClientData + ? await parseClientData(wireMetadata) + : wireMetadata) as inferSchemaOut; const lastUserMessage = extractLastUserMessageText(uiMessages); const turnAttributes: Attributes = { @@ -738,7 +775,7 @@ function chatTask( await onChatStart({ chatId: currentWirePayload.chatId, messages: accumulatedMessages, - clientData: wireMetadata, + clientData, runId: currentRunId, chatAccessToken: turnAccessToken, }); @@ -765,6 +802,7 @@ function chatTask( turn, runId: currentRunId, chatAccessToken: turnAccessToken, + clientData, }); }, { @@ -783,11 +821,11 @@ function chatTask( const result = await userRun({ ...restWire, messages: accumulatedMessages, - clientData: wireMetadata, + clientData, signal: combinedSignal, cancelSignal, stopSignal, - }); + } as any); // Auto-pipe if the run function returned a StreamTextResult or similar, // but only if pipeChat() wasn't already called manually during this turn. @@ -866,6 +904,7 @@ function chatTask( runId: currentRunId, chatAccessToken: turnAccessToken, lastEventId: turnCompleteResult.lastEventId, + clientData, }); }, { @@ -1023,6 +1062,27 @@ function setWarmTimeoutInSeconds(seconds: number): void { metadata.set(WARM_TIMEOUT_METADATA_KEY, seconds); } +/** + * Extracts the client data (metadata) type from a chat task. + * Use this to type the `metadata` option on the transport. + * + * @example + * ```ts + * import type { InferChatClientData } from "@trigger.dev/sdk/ai"; + * import type { myChat } from "@/trigger/chat"; + * + * type MyClientData = InferChatClientData; + * // { model?: string; userId: string } + * ``` + */ +export type InferChatClientData = TTask extends Task< + string, + ChatTaskWirePayload, + any +> + ? TMetadata + : unknown; + export const chat = { /** Create a chat task. See {@link chatTask}. */ task: chatTask, diff --git a/packages/trigger-sdk/src/v3/chat-react.ts b/packages/trigger-sdk/src/v3/chat-react.ts index 1ee48a4b23b..612f0c184ba 100644 --- a/packages/trigger-sdk/src/v3/chat-react.ts +++ b/packages/trigger-sdk/src/v3/chat-react.ts @@ -29,6 +29,7 @@ import { type TriggerChatTransportOptions, } from "./chat.js"; import type { AnyTask, TaskIdentifier } from "@trigger.dev/core/v3"; +import type { InferChatClientData } from "./ai.js"; /** * Options for `useTriggerChatTransport`, with a type-safe `task` field. @@ -39,7 +40,7 @@ import type { AnyTask, TaskIdentifier } from "@trigger.dev/core/v3"; * ``` */ export type UseTriggerChatTransportOptions = Omit< - TriggerChatTransportOptions, + TriggerChatTransportOptions>, "task" > & { /** The task ID. Strongly typed when a task type parameter is provided. */ diff --git a/packages/trigger-sdk/src/v3/chat.ts b/packages/trigger-sdk/src/v3/chat.ts index 6c7d39424ba..366ecaf5230 100644 --- a/packages/trigger-sdk/src/v3/chat.ts +++ b/packages/trigger-sdk/src/v3/chat.ts @@ -35,7 +35,7 @@ const DEFAULT_STREAM_TIMEOUT_SECONDS = 120; /** * Options for creating a TriggerChatTransport. */ -export type TriggerChatTransportOptions = { +export type TriggerChatTransportOptions = { /** * The Trigger.dev task ID to trigger for chat completions. * This task should be defined using `chatTask()` from `@trigger.dev/sdk/ai`, @@ -84,22 +84,23 @@ export type TriggerChatTransportOptions = { streamTimeoutSeconds?: number; /** - * Default metadata included in every request payload. + * Default client data included in every request payload. * Merged with per-call `metadata` from `sendMessage()` — per-call values * take precedence over transport-level defaults. * - * Useful for data that should accompany every message, like a user ID. + * When the task uses `clientDataSchema`, this is typed to match the schema. * * @example * ```ts * new TriggerChatTransport({ * task: "my-chat", * accessToken, - * metadata: { userId: currentUser.id }, + * clientData: { userId: currentUser.id }, * }); * ``` */ - metadata?: Record; + clientData?: TClientData extends Record ? TClientData : Record; + /** * Restore active chat sessions from external storage (e.g. localStorage). @@ -254,7 +255,7 @@ export class TriggerChatTransport implements ChatTransport { this.streamKey = options.streamKey ?? DEFAULT_STREAM_KEY; this.extraHeaders = options.headers ?? {}; this.streamTimeoutSeconds = options.streamTimeoutSeconds ?? DEFAULT_STREAM_TIMEOUT_SECONDS; - this.defaultMetadata = options.metadata; + this.defaultMetadata = options.clientData; this.triggerOptions = options.triggerOptions; this._onSessionChange = options.onSessionChange; diff --git a/references/ai-chat/src/components/chat-app.tsx b/references/ai-chat/src/components/chat-app.tsx index 8ffc7b41bc8..a00695ec428 100644 --- a/references/ai-chat/src/components/chat-app.tsx +++ b/references/ai-chat/src/components/chat-app.tsx @@ -3,6 +3,7 @@ import type { UIMessage } from "ai"; import { generateId } from "ai"; import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; +import type { aiChat } from "@/trigger/chat"; import { useCallback, useEffect, useState } from "react"; import { Chat } from "@/components/chat"; import { ChatSidebar } from "@/components/chat-sidebar"; @@ -56,12 +57,13 @@ export function ChatApp({ [] ); - const transport = useTriggerChatTransport({ + const transport = useTriggerChatTransport({ task: "ai-chat", accessToken: getChatToken, baseURL: process.env.NEXT_PUBLIC_TRIGGER_API_URL, sessions: initialSessions, onSessionChange: handleSessionChange, + clientData: { userId: "user_123" }, triggerOptions: { tags: ["user:user_123"], }, diff --git a/references/ai-chat/src/trigger/chat.ts b/references/ai-chat/src/trigger/chat.ts index 61c455341d3..68c65d75055 100644 --- a/references/ai-chat/src/trigger/chat.ts +++ b/references/ai-chat/src/trigger/chat.ts @@ -86,6 +86,7 @@ declare const Deno: unknown; export const aiChat = chat.task({ id: "ai-chat", + clientDataSchema: z.object({ model: z.string().optional(), userId: z.string() }), warmTimeoutInSeconds: 60, chatAccessTokenTTL: "2h", onChatStart: async ({ chatId, runId, chatAccessToken }) => { @@ -125,20 +126,20 @@ export const aiChat = chat.task({ }); }, run: async ({ messages, clientData, stopSignal }) => { - const { model: modelId } = z - .object({ model: z.string().optional() }) - .parse(clientData ?? {}); - return streamText({ - model: getModel(modelId), + model: getModel(clientData?.model), system: "You are a helpful assistant. Be concise and friendly.", messages, tools: { inspectEnvironment }, stopWhen: stepCountIs(10), abortSignal: stopSignal, + providerOptions: { + openai: { user: clientData?.userId }, + anthropic: { metadata: { user_id: clientData?.userId } }, + }, experimental_telemetry: { isEnabled: true, - } + }, }); }, }); From 157421d313fb30ec0786ece1d46d2115f2f60ca0 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Fri, 6 Mar 2026 17:27:06 +0000 Subject: [PATCH 39/53] feat: add chat.local for per-run typed data with Proxy access and dirty tracking --- docs/guides/ai-chat.mdx | 100 +++++++++++ packages/trigger-sdk/src/v3/ai.ts | 161 ++++++++++++++++++ .../migration.sql | 18 ++ references/ai-chat/prisma/schema.prisma | 13 ++ references/ai-chat/src/trigger/chat.ts | 55 +++++- 5 files changed, 342 insertions(+), 5 deletions(-) create mode 100644 references/ai-chat/prisma/migrations/20260306165319_add_user_model/migration.sql diff --git a/docs/guides/ai-chat.mdx b/docs/guides/ai-chat.mdx index ab2d689dbf9..8350a1a2d1f 100644 --- a/docs/guides/ai-chat.mdx +++ b/docs/guides/ai-chat.mdx @@ -778,6 +778,105 @@ run: async ({ messages, signal }) => { Longer warm timeout means faster responses but more compute usage. Set to `0` to suspend immediately after each turn (minimum latency cost, slight delay on next message). +## Per-run data with `chat.local` + +Use `chat.local` to create typed, run-scoped data that persists across turns and is accessible from anywhere — the run function, tools, nested helpers. Each run gets its own isolated copy, and locals are automatically cleared between runs. + +### Declaring and initializing + +Declare locals at module level, then initialize them inside a lifecycle hook where you have context (chatId, clientData, etc.): + +```ts +import { chat } from "@trigger.dev/sdk/ai"; +import { streamText, tool } from "ai"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; +import { db } from "@/lib/db"; + +// Declare at module level — multiple locals can coexist +const userContext = chat.local<{ + name: string; + plan: "free" | "pro"; + messageCount: number; +}>(); + +export const myChat = chat.task({ + id: "my-chat", + clientDataSchema: z.object({ userId: z.string() }), + onChatStart: async ({ clientData }) => { + // Initialize with real data from your database + const user = await db.user.findUnique({ + where: { id: clientData.userId }, + }); + userContext.init({ + name: user.name, + plan: user.plan, + messageCount: user.messageCount, + }); + }, + run: async ({ messages, signal }) => { + userContext.messageCount++; + + return streamText({ + model: openai("gpt-4o"), + system: `Helping ${userContext.name} (${userContext.plan} plan).`, + messages, + abortSignal: signal, + }); + }, +}); +``` + +### Accessing from tools + +Locals are accessible from anywhere during task execution — including AI SDK tools: + +```ts +const userContext = chat.local<{ plan: "free" | "pro" }>(); + +const premiumTool = tool({ + description: "Access premium features", + inputSchema: z.object({ feature: z.string() }), + execute: async ({ feature }) => { + if (userContext.plan !== "pro") { + return { error: "This feature requires a Pro plan." }; + } + // ... premium logic + }, +}); +``` + +### Dirty tracking and persistence + +The `hasChanged()` method returns `true` if any property was set since the last check, then resets the flag. Use it in lifecycle hooks to only persist when data actually changed: + +```ts +onTurnComplete: async ({ chatId }) => { + if (userContext.hasChanged()) { + await db.user.update({ + where: { id: userContext.get().userId }, + data: { + messageCount: userContext.messageCount, + }, + }); + } +}, +``` + +### API reference + +| Method | Description | +|--------|-------------| +| `chat.local()` | Create a typed local (declare at module level) | +| `local.init(value)` | Initialize with a value (call in hooks or `run`) | +| `local.hasChanged()` | Returns `true` if modified since last check, resets flag | +| `local.get()` | Returns a plain object copy (for serialization) | +| `local.property` | Direct property access (read/write via Proxy) | + + + Locals use shallow proxying. Nested object mutations like `local.prefs.theme = "dark"` won't trigger the dirty flag. Instead, replace the whole property: `local.prefs = { ...local.prefs, theme: "dark" }`. + + ## Frontend reference ### TriggerChatTransport options @@ -897,6 +996,7 @@ See [onTurnComplete](#onturncomplete) for the full field reference. |--------|-------------| | `chat.task(options)` | Create a chat task | | `chat.pipe(source, options?)` | Pipe a stream to the frontend (from anywhere inside a task) | +| `chat.local()` | Create a per-run typed local (see [Per-run data](#per-run-data-with-chatlocal)) | | `chat.createAccessToken(taskId)` | Create a public access token for a chat task | | `chat.setTurnTimeout(duration)` | Override turn timeout at runtime (e.g. `"2h"`) | | `chat.setTurnTimeoutInSeconds(seconds)` | Override turn timeout at runtime (in seconds) | diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index 3fad1565d87..ee75cbd654b 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -1062,6 +1062,165 @@ function setWarmTimeoutInSeconds(seconds: number): void { metadata.set(WARM_TIMEOUT_METADATA_KEY, seconds); } +// --------------------------------------------------------------------------- +// chat.local — per-run typed data with Proxy access +// --------------------------------------------------------------------------- + +/** @internal Symbol for storing the locals key on the proxy target. */ +const CHAT_LOCAL_KEY: unique symbol = Symbol("chatLocalKey"); +/** @internal Symbol for storing the dirty-tracking locals key. */ +const CHAT_LOCAL_DIRTY_KEY: unique symbol = Symbol("chatLocalDirtyKey"); +/** @internal Counter for generating unique locals IDs. */ +let chatLocalCounter = 0; + +/** + * A Proxy-backed, run-scoped data object that appears as `T` to users. + * Includes helper methods for initialization, dirty tracking, and serialization. + * Internal metadata is stored behind Symbols and invisible to + * `Object.keys()`, `JSON.stringify()`, and spread. + */ +export type ChatLocal> = T & { + /** Initialize the local with a value. Call in `onChatStart` or `run()`. */ + init(value: T): void; + /** Returns `true` if any property was set since the last check. Resets the dirty flag. */ + hasChanged(): boolean; + /** Returns a plain object copy of the current value. Useful for persistence. */ + get(): T; + readonly [CHAT_LOCAL_KEY]: ReturnType>; + readonly [CHAT_LOCAL_DIRTY_KEY]: ReturnType>; +}; + +/** + * Creates a per-run typed data object accessible from anywhere during task execution. + * + * Declare at module level, then initialize inside a lifecycle hook (e.g. `onChatStart`) + * using `chat.initLocal()`. Properties are accessible directly via the Proxy. + * + * Multiple locals can coexist — each gets its own isolated run-scoped storage. + * + * @example + * ```ts + * import { chat } from "@trigger.dev/sdk/ai"; + * + * const userPrefs = chat.local<{ theme: string; language: string }>(); + * const gameState = chat.local<{ score: number; streak: number }>(); + * + * export const myChat = chat.task({ + * id: "my-chat", + * onChatStart: async ({ clientData }) => { + * const prefs = await db.prefs.findUnique({ where: { userId: clientData.userId } }); + * userPrefs.init(prefs ?? { theme: "dark", language: "en" }); + * gameState.init({ score: 0, streak: 0 }); + * }, + * onTurnComplete: async ({ chatId }) => { + * if (gameState.hasChanged()) { + * await db.save({ where: { chatId }, data: gameState.get() }); + * } + * }, + * run: async ({ messages }) => { + * gameState.score++; + * return streamText({ + * system: `User prefers ${userPrefs.theme} theme. Score: ${gameState.score}`, + * messages, + * }); + * }, + * }); + * ``` + */ +function chatLocal>(): ChatLocal { + const localKey = locals.create(`chat.local.${chatLocalCounter++}`); + const dirtyKey = locals.create(`chat.local.${chatLocalCounter++}.dirty`); + + const target = {} as any; + target[CHAT_LOCAL_KEY] = localKey; + target[CHAT_LOCAL_DIRTY_KEY] = dirtyKey; + + return new Proxy(target, { + get(_target, prop, _receiver) { + // Internal Symbol properties + if (prop === CHAT_LOCAL_KEY) return _target[CHAT_LOCAL_KEY]; + if (prop === CHAT_LOCAL_DIRTY_KEY) return _target[CHAT_LOCAL_DIRTY_KEY]; + + // Instance methods + if (prop === "init") { + return (value: T) => { + locals.set(localKey, value); + locals.set(dirtyKey, false); + }; + } + if (prop === "hasChanged") { + return () => { + const dirty = locals.get(dirtyKey) ?? false; + locals.set(dirtyKey, false); + return dirty; + }; + } + if (prop === "get") { + return () => { + const current = locals.get(localKey); + if (current === undefined) { + throw new Error( + "local.get() called before initialization. Call local.init() first." + ); + } + return { ...current }; + }; + } + // toJSON for serialization (JSON.stringify(local)) + if (prop === "toJSON") { + return () => { + const current = locals.get(localKey); + return current ? { ...current } : undefined; + }; + } + + const current = locals.get(localKey); + if (current === undefined) return undefined; + return (current as any)[prop]; + }, + + set(_target, prop, value) { + // Don't allow setting internal Symbols + if (typeof prop === "symbol") return false; + + const current = locals.get(localKey); + if (current === undefined) { + throw new Error( + "chat.local can only be modified after initialization. " + + "Call local.init() in onChatStart or run() first." + ); + } + locals.set(localKey, { ...current, [prop]: value }); + locals.set(dirtyKey, true); + return true; + }, + + has(_target, prop) { + if (typeof prop === "symbol") return prop in _target; + const current = locals.get(localKey); + return current !== undefined && prop in current; + }, + + ownKeys() { + const current = locals.get(localKey); + return current ? Reflect.ownKeys(current) : []; + }, + + getOwnPropertyDescriptor(_target, prop) { + if (typeof prop === "symbol") return undefined; + const current = locals.get(localKey); + if (current === undefined || !(prop in current)) return undefined; + return { + configurable: true, + enumerable: true, + writable: true, + value: (current as any)[prop], + }; + }, + }) as ChatLocal; +} + + /** * Extracts the client data (metadata) type from a chat task. * Use this to type the `metadata` option on the transport. @@ -1088,6 +1247,8 @@ export const chat = { task: chatTask, /** Pipe a stream to the chat transport. See {@link pipeChat}. */ pipe: pipeChat, + /** Create a per-run typed local. See {@link chatLocal}. */ + local: chatLocal, /** Create a public access token for a chat task. See {@link createChatAccessToken}. */ createAccessToken: createChatAccessToken, /** Override the turn timeout at runtime (duration string). See {@link setTurnTimeout}. */ diff --git a/references/ai-chat/prisma/migrations/20260306165319_add_user_model/migration.sql b/references/ai-chat/prisma/migrations/20260306165319_add_user_model/migration.sql new file mode 100644 index 00000000000..4a1bca35872 --- /dev/null +++ b/references/ai-chat/prisma/migrations/20260306165319_add_user_model/migration.sql @@ -0,0 +1,18 @@ +-- AlterTable +ALTER TABLE "Chat" ADD COLUMN "userId" TEXT; + +-- CreateTable +CREATE TABLE "User" ( + "id" TEXT NOT NULL, + "name" TEXT NOT NULL, + "plan" TEXT NOT NULL DEFAULT 'free', + "preferredModel" TEXT, + "messageCount" INTEGER NOT NULL DEFAULT 0, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL, + + CONSTRAINT "User_pkey" PRIMARY KEY ("id") +); + +-- AddForeignKey +ALTER TABLE "Chat" ADD CONSTRAINT "Chat_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE SET NULL ON UPDATE CASCADE; diff --git a/references/ai-chat/prisma/schema.prisma b/references/ai-chat/prisma/schema.prisma index 4899b46f73e..5b58955c268 100644 --- a/references/ai-chat/prisma/schema.prisma +++ b/references/ai-chat/prisma/schema.prisma @@ -7,10 +7,23 @@ datasource db { provider = "postgresql" } +model User { + id String @id + name String + plan String @default("free") // "free" | "pro" + preferredModel String? + messageCount Int @default(0) + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt + chats Chat[] +} + model Chat { id String @id title String messages Json @default("[]") + userId String? + user User? @relation(fields: [userId], references: [id]) createdAt DateTime @default(now()) updatedAt DateTime @updatedAt } diff --git a/references/ai-chat/src/trigger/chat.ts b/references/ai-chat/src/trigger/chat.ts index 68c65d75055..3469cf9d43f 100644 --- a/references/ai-chat/src/trigger/chat.ts +++ b/references/ai-chat/src/trigger/chat.ts @@ -84,15 +84,38 @@ const inspectEnvironment = tool({ declare const Bun: unknown; declare const Deno: unknown; +// Per-run user context — loaded from DB in onChatStart, accessible everywhere +const userContext = chat.local<{ + userId: string; + name: string; + plan: "free" | "pro"; + preferredModel: string | null; + messageCount: number; +}>(); + export const aiChat = chat.task({ id: "ai-chat", clientDataSchema: z.object({ model: z.string().optional(), userId: z.string() }), warmTimeoutInSeconds: 60, chatAccessTokenTTL: "2h", - onChatStart: async ({ chatId, runId, chatAccessToken }) => { + onChatStart: async ({ chatId, runId, chatAccessToken, clientData }) => { + // Load user context from DB — available for the entire run + const user = await prisma.user.upsert({ + where: { id: clientData.userId }, + create: { id: clientData.userId, name: "User" }, + update: {}, + }); + userContext.init({ + userId: user.id, + name: user.name, + plan: user.plan as "free" | "pro", + preferredModel: user.preferredModel, + messageCount: user.messageCount, + }); + await prisma.chat.upsert({ where: { id: chatId }, - create: { id: chatId, title: "New chat" }, + create: { id: chatId, title: "New chat", userId: user.id }, update: {}, }); await prisma.chatSession.upsert({ @@ -113,7 +136,7 @@ export const aiChat = chat.task({ update: { runId, publicAccessToken: chatAccessToken }, }); }, - onTurnComplete: async ({ chatId, uiMessages, runId, chatAccessToken, lastEventId }) => { + onTurnComplete: async ({ chatId, uiMessages, runId, chatAccessToken, lastEventId, clientData }) => { // Persist final messages + assistant response + stream position await prisma.chat.update({ where: { id: chatId }, @@ -124,11 +147,33 @@ export const aiChat = chat.task({ create: { id: chatId, runId, publicAccessToken: chatAccessToken, lastEventId }, update: { runId, publicAccessToken: chatAccessToken, lastEventId }, }); + + // Persist user context changes (message count, preferred model) if anything changed + if (userContext.hasChanged()) { + await prisma.user.update({ + where: { id: userContext.userId }, + data: { + messageCount: userContext.messageCount, + preferredModel: userContext.preferredModel, + }, + }); + } }, run: async ({ messages, clientData, stopSignal }) => { + // Track usage + userContext.messageCount++; + + // Remember their model choice + if (clientData?.model) { + userContext.preferredModel = clientData.model; + } + + // Use preferred model if none specified + const modelId = clientData?.model ?? userContext.preferredModel ?? undefined; + return streamText({ - model: getModel(clientData?.model), - system: "You are a helpful assistant. Be concise and friendly.", + model: getModel(modelId), + system: `You are a helpful assistant for ${userContext.name} (${userContext.plan} plan). Be concise and friendly.`, messages, tools: { inspectEnvironment }, stopWhen: stepCountIs(10), From 4d6668d90c42aeba58a5125ab94f7125c9329a2b Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Sat, 7 Mar 2026 10:28:42 +0000 Subject: [PATCH 40/53] feat(chat): add stop handling, abort cleanup, continuation support, and reference project enhancements - Fix onFinish race condition: await onFinishPromise so capturedResponseMessage is set before accumulation - Add chat.isStopped() helper accessible from anywhere during a turn - Add chat.cleanupAbortedParts() to remove incomplete tool/reasoning/text parts on stop - Auto-cleanup aborted parts before passing to onTurnComplete - Clean incoming messages from frontend to prevent tool_use without tool_result API errors - Add stopped and rawResponseMessage fields to TurnCompleteEvent - Add continuation and previousRunId fields to all lifecycle hooks and run payload - Add span attributes (chat.id, chat.turn, chat.stopped, chat.continuation, chat.previous_run_id, etc.) - Add webFetch tool and reasoning model support to ai-chat reference project - Render reasoning parts in frontend chat component - Document all new fields in ai-chat guide --- docs/guides/ai-chat.mdx | 76 +++++++- packages/trigger-sdk/src/v3/ai.ts | 211 ++++++++++++++++++++- packages/trigger-sdk/src/v3/chat.ts | 11 +- references/ai-chat/src/components/chat.tsx | 13 ++ references/ai-chat/src/lib/models.ts | 2 + references/ai-chat/src/trigger/chat.ts | 71 ++++++- 6 files changed, 364 insertions(+), 20 deletions(-) diff --git a/docs/guides/ai-chat.mdx b/docs/guides/ai-chat.mdx index 8350a1a2d1f..39a93c196e9 100644 --- a/docs/guides/ai-chat.mdx +++ b/docs/guides/ai-chat.mdx @@ -240,10 +240,17 @@ export const manualChat = task({ Fires once on the first turn (turn 0) before `run()` executes. Use it to create a chat record in your database. +The `continuation` field tells you whether this is a brand new chat or a continuation of an existing one (where the previous run timed out or was cancelled): + ```ts export const myChat = chat.task({ id: "my-chat", - onChatStart: async ({ chatId, clientData }) => { + onChatStart: async ({ chatId, clientData, continuation }) => { + if (continuation) { + // Previous run ended — chat record already exists, just update session + return; + } + // Brand new chat — create the record const { userId } = clientData as { userId: string }; await db.chat.create({ data: { id: chatId, userId, title: "New chat" }, @@ -271,6 +278,7 @@ Fires at the start of every turn, after message accumulation and `onChatStart` ( | `turn` | `number` | Turn number (0-indexed) | | `runId` | `string` | The Trigger.dev run ID | | `chatAccessToken` | `string` | Scoped access token for this run | +| `continuation` | `boolean` | Whether this run is continuing an existing chat | ```ts export const myChat = chat.task({ @@ -312,6 +320,9 @@ Fires after each turn completes — after the response is captured, before waiti | `runId` | `string` | The Trigger.dev run ID | | `chatAccessToken` | `string` | Scoped access token for this run | | `lastEventId` | `string \| undefined` | Stream position for resumption. Persist this with the session. | +| `stopped` | `boolean` | Whether the user stopped generation during this turn | +| `continuation` | `boolean` | Whether this run is continuing an existing chat | +| `rawResponseMessage` | `UIMessage \| undefined` | The raw assistant response before abort cleanup (same as `responseMessage` when not stopped) | ```ts export const myChat = chat.task({ @@ -679,6 +690,66 @@ export const myChat = chat.task({ Use `signal` (the combined signal) in most cases. The separate `stopSignal` and `cancelSignal` are only needed if you want different behavior for stop vs cancel. +### Detecting stop in callbacks + +The `onTurnComplete` event includes a `stopped` boolean that indicates whether the user stopped generation during that turn: + +```ts +export const myChat = chat.task({ + id: "my-chat", + onTurnComplete: async ({ chatId, uiMessages, stopped }) => { + await db.chat.update({ + where: { id: chatId }, + data: { messages: uiMessages, lastStoppedAt: stopped ? new Date() : undefined }, + }); + }, + run: async ({ messages, signal }) => { + return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); + }, +}); +``` + +You can also check stop status from **anywhere** during a turn using `chat.isStopped()`. This is useful inside `streamText`'s `onFinish` callback where the AI SDK's `isAborted` flag can be unreliable (e.g. when using `createUIMessageStream` + `writer.merge()`): + +```ts +import { chat } from "@trigger.dev/sdk/ai"; +import { streamText } from "ai"; + +export const myChat = chat.task({ + id: "my-chat", + run: async ({ messages, signal }) => { + return streamText({ + model: openai("gpt-4o"), + messages, + abortSignal: signal, + onFinish: ({ isAborted }) => { + // isAborted may be false even after stop when using createUIMessageStream + const wasStopped = isAborted || chat.isStopped(); + if (wasStopped) { + // handle stop — e.g. log analytics + } + }, + }); + }, +}); +``` + +### Cleaning up aborted messages + +When stop happens mid-stream, the captured response message can contain parts in an incomplete state — tool calls stuck in `partial-call`, reasoning blocks still marked as `streaming`, etc. These can cause UI issues like permanent spinners. + +`chat.task` automatically cleans up the `responseMessage` when stop is detected before passing it to `onTurnComplete`. If you use `chat.pipe()` manually and capture response messages yourself, use `chat.cleanupAbortedParts()`: + +```ts +const cleaned = chat.cleanupAbortedParts(rawResponseMessage); +``` + +This removes tool invocation parts stuck in `partial-call` state and marks any `streaming` text or reasoning parts as `done`. + + + Stop signal delivery is best-effort. There is a small race window where the model may finish before the stop signal arrives, in which case the turn completes normally with `stopped: false`. This is expected and does not require special handling. + + ## Client data and metadata ### Transport-level client data @@ -982,6 +1053,7 @@ Plus all standard [TaskOptions](/tasks/overview) — `retry`, `queue`, `machine` | `trigger` | `"submit-message" \| "regenerate-message"` | What triggered the request | | `messageId` | `string \| undefined` | Message ID (for regenerate) | | `clientData` | Typed by `clientDataSchema` | Custom data from the frontend (typed when schema is provided) | +| `continuation` | `boolean` | Whether this run is continuing an existing chat (previous run ended) | | `signal` | `AbortSignal` | Combined stop + cancel signal | | `cancelSignal` | `AbortSignal` | Cancel-only signal | | `stopSignal` | `AbortSignal` | Stop-only signal (per-turn) | @@ -1001,6 +1073,8 @@ See [onTurnComplete](#onturncomplete) for the full field reference. | `chat.setTurnTimeout(duration)` | Override turn timeout at runtime (e.g. `"2h"`) | | `chat.setTurnTimeoutInSeconds(seconds)` | Override turn timeout at runtime (in seconds) | | `chat.setWarmTimeoutInSeconds(seconds)` | Override warm timeout at runtime | +| `chat.isStopped()` | Check if the current turn was stopped by the user (works anywhere during a turn) | +| `chat.cleanupAbortedParts(message)` | Remove incomplete parts from a stopped response message | ## Self-hosting diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index ee75cbd654b..544bdd7a5b3 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -186,6 +186,10 @@ type ChatTaskWirePayload = { /** Custom data from the frontend (passed via `metadata` on `sendMessage()` or the transport). */ clientData?: TClientData; + + /** Whether this run is continuing an existing chat (previous run timed out or was cancelled). False for brand new chats. */ + continuation: boolean; + /** The run ID of the previous run (only set when `continuation` is true). */ + previousRunId?: string; }; /** @@ -247,6 +256,7 @@ const stopInput = streams.input<{ stop: true; message?: string }>({ id: CHAT_STO * @internal */ const chatPipeCountKey = locals.create("chat.pipeCount"); +const chatStopControllerKey = locals.create("chat.stopController"); /** * Options for `pipeChat`. @@ -397,6 +407,10 @@ export type ChatStartEvent = { runId: string; /** A scoped access token for this chat run. Persist this for frontend reconnection. */ chatAccessToken: string; + /** Whether this run is continuing an existing chat (previous run timed out or was cancelled). False for brand new chats. */ + continuation: boolean; + /** The run ID of the previous run (only set when `continuation` is true). */ + previousRunId?: string; }; /** @@ -417,6 +431,10 @@ export type TurnStartEvent = { chatAccessToken: string; /** Custom data from the frontend. */ clientData?: TClientData; + /** Whether this run is continuing an existing chat (previous run timed out or was cancelled). False for brand new chats. */ + continuation: boolean; + /** The run ID of the previous run (only set when `continuation` is true). */ + previousRunId?: string; }; /** @@ -442,8 +460,14 @@ export type TurnCompleteEvent = { * Useful for inserting individual message records instead of overwriting the full history. */ newUIMessages: UIMessage[]; - /** The assistant's response for this turn (undefined if `pipeChat` was used manually). */ + /** The assistant's response for this turn, with aborted parts cleaned up when `stopped` is true. Undefined if `pipeChat` was used manually. */ responseMessage: UIMessage | undefined; + /** + * The raw assistant response before abort cleanup. Includes incomplete tool parts + * (`input-available`, `partial-call`) and streaming reasoning/text parts. + * Use this if you need custom cleanup logic. Same as `responseMessage` when not stopped. + */ + rawResponseMessage: UIMessage | undefined; /** The turn number (0-indexed). */ turn: number; /** The Trigger.dev run ID for this conversation. */ @@ -454,6 +478,12 @@ export type TurnCompleteEvent = { lastEventId?: string; /** Custom data from the frontend. */ clientData?: TClientData; + /** Whether the user stopped generation during this turn. */ + stopped: boolean; + /** Whether this run is continuing an existing chat (previous run timed out or was cancelled). False for brand new chats. */ + continuation: boolean; + /** The run ID of the previous run (only set when `continuation` is true). */ + previousRunId?: string; }; export type ChatTaskOptions< @@ -637,6 +667,8 @@ function chatTask< } let currentWirePayload = payload; + const continuation = payload.continuation ?? false; + const previousRunId = payload.previousRunId; // Accumulated model messages across turns. Turn 1 initialises from the // full history the frontend sends; subsequent turns append only the new @@ -704,6 +736,7 @@ function chatTask< // Per-turn stop controller (reset each turn) const stopController = new AbortController(); currentStopController = stopController; + locals.set(chatStopControllerKey, stopController); // Three signals for the user's run function const stopSignal = stopController.signal; @@ -716,10 +749,19 @@ function chatTask< pendingMessages.push(msg); }); + // Clean up any incomplete tool parts in the incoming history. + // When a previous run was stopped mid-tool-call, the frontend's + // useChat state may still contain assistant messages with tool parts + // in partial/input-available state. These cause API errors (e.g. + // Anthropic requires every tool_use to have a matching tool_result). + const cleanedUIMessages = uiMessages.map((msg) => + msg.role === "assistant" ? cleanupAbortedParts(msg) : msg + ); + // Convert the incoming UIMessages to model messages and update the accumulator. // Turn 1: full history from the frontend → replaces the accumulator. // Turn 2+: only the new message(s) → appended to the accumulator. - const incomingModelMessages = await convertToModelMessages(uiMessages); + const incomingModelMessages = await convertToModelMessages(cleanedUIMessages); // Track new messages for this turn (user input + assistant response). const turnNewModelMessages: ModelMessage[] = []; @@ -727,11 +769,11 @@ function chatTask< if (turn === 0) { accumulatedMessages = incomingModelMessages; - accumulatedUIMessages = [...uiMessages]; + accumulatedUIMessages = [...cleanedUIMessages]; // On first turn, the "new" messages are just the last user message // (the rest is history). We'll add the response after streaming. - if (uiMessages.length > 0) { - turnNewUIMessages.push(uiMessages[uiMessages.length - 1]!); + if (cleanedUIMessages.length > 0) { + turnNewUIMessages.push(cleanedUIMessages[cleanedUIMessages.length - 1]!); const lastModel = incomingModelMessages[incomingModelMessages.length - 1]; if (lastModel) turnNewModelMessages.push(lastModel); } @@ -739,14 +781,14 @@ function chatTask< // Regenerate: frontend sent full history with last assistant message // removed. Reset the accumulator to match. accumulatedMessages = incomingModelMessages; - accumulatedUIMessages = [...uiMessages]; + accumulatedUIMessages = [...cleanedUIMessages]; // No new user messages for regenerate — just the response (added below) } else { // Submit: frontend sent only the new user message(s). Append to accumulator. accumulatedMessages.push(...incomingModelMessages); - accumulatedUIMessages.push(...uiMessages); + accumulatedUIMessages.push(...cleanedUIMessages); turnNewModelMessages.push(...incomingModelMessages); - turnNewUIMessages.push(...uiMessages); + turnNewUIMessages.push(...cleanedUIMessages); } // Mint a scoped public access token once per turn, reused for @@ -778,12 +820,18 @@ function chatTask< clientData, runId: currentRunId, chatAccessToken: turnAccessToken, + continuation, + previousRunId, }); }, { attributes: { [SemanticInternalAttributes.STYLE_ICON]: "task-hook-onStart", [SemanticInternalAttributes.COLLAPSED]: true, + "chat.id": currentWirePayload.chatId, + "chat.messages.count": accumulatedMessages.length, + "chat.continuation": continuation, + ...(previousRunId ? { "chat.previous_run_id": previousRunId } : {}), }, } ); @@ -803,12 +851,20 @@ function chatTask< runId: currentRunId, chatAccessToken: turnAccessToken, clientData, + continuation, + previousRunId, }); }, { attributes: { [SemanticInternalAttributes.STYLE_ICON]: "task-hook-onStart", [SemanticInternalAttributes.COLLAPSED]: true, + "chat.id": currentWirePayload.chatId, + "chat.turn": turn + 1, + "chat.messages.count": accumulatedMessages.length, + "chat.trigger": currentWirePayload.trigger, + "chat.continuation": continuation, + ...(previousRunId ? { "chat.previous_run_id": previousRunId } : {}), }, } ); @@ -817,11 +873,22 @@ function chatTask< // Captured by the onFinish callback below — works even on abort/stop. let capturedResponseMessage: UIMessage | undefined; + // Promise that resolves when the AI SDK's onFinish fires. + // On abort, the stream's cancel() handler calls onFinish + // asynchronously AFTER pipeChat resolves, so we must await + // this to avoid a race where we check capturedResponseMessage + // before it's been set. + let resolveOnFinish: () => void; + const onFinishPromise = new Promise((r) => { resolveOnFinish = r; }); + let onFinishAttached = false; + try { const result = await userRun({ ...restWire, messages: accumulatedMessages, clientData, + continuation, + previousRunId, signal: combinedSignal, cancelSignal, stopSignal, @@ -831,9 +898,11 @@ function chatTask< // but only if pipeChat() wasn't already called manually during this turn. // We call toUIMessageStream ourselves to attach onFinish for response capture. if ((locals.get(chatPipeCountKey) ?? 0) === 0 && isUIMessageStreamable(result)) { + onFinishAttached = true; const uiStream = result.toUIMessageStream({ onFinish: ({ responseMessage }: { responseMessage: UIMessage }) => { capturedResponseMessage = responseMessage; + resolveOnFinish!(); }, }); await pipeChat(uiStream, { signal: combinedSignal, spanName: "stream response" }); @@ -852,10 +921,26 @@ function chatTask< msgSub.off(); } + // Wait for onFinish to fire — on abort this may resolve slightly + // after pipeChat, since the stream's cancel() handler is async. + if (onFinishAttached) { + await onFinishPromise; + } + + // Determine if the user stopped generation this turn (not a full run cancel). + const wasStopped = stopController.signal.aborted && !runSignal.aborted; + // Append the assistant's response (partial or complete) to the accumulator. // The onFinish callback fires even on abort/stop, so partial responses // from stopped generation are captured correctly. + let rawResponseMessage: UIMessage | undefined; if (capturedResponseMessage) { + // Keep the raw message before cleanup for users who want custom handling + rawResponseMessage = capturedResponseMessage; + // Clean up aborted parts (streaming tool calls, reasoning) when stopped + if (wasStopped) { + capturedResponseMessage = cleanupAbortedParts(capturedResponseMessage); + } // Ensure the response message has an ID (the stream's onFinish // may produce a message with an empty ID since IDs are normally // assigned by the frontend's useChat). @@ -900,17 +985,29 @@ function chatTask< newMessages: turnNewModelMessages, newUIMessages: turnNewUIMessages, responseMessage: capturedResponseMessage, + rawResponseMessage, turn, runId: currentRunId, chatAccessToken: turnAccessToken, lastEventId: turnCompleteResult.lastEventId, clientData, + stopped: wasStopped, + continuation, + previousRunId, }); }, { attributes: { [SemanticInternalAttributes.STYLE_ICON]: "task-hook-onComplete", [SemanticInternalAttributes.COLLAPSED]: true, + "chat.id": currentWirePayload.chatId, + "chat.turn": turn + 1, + "chat.stopped": wasStopped, + "chat.continuation": continuation, + ...(previousRunId ? { "chat.previous_run_id": previousRunId } : {}), + "chat.messages.count": accumulatedMessages.length, + "chat.response.parts.count": capturedResponseMessage?.parts?.length ?? 0, + "chat.new_messages.count": turnNewUIMessages.length, }, } ); @@ -1062,6 +1159,100 @@ function setWarmTimeoutInSeconds(seconds: number): void { metadata.set(WARM_TIMEOUT_METADATA_KEY, seconds); } +// --------------------------------------------------------------------------- +// Stop detection +// --------------------------------------------------------------------------- + +/** + * Check whether the user stopped generation during the current turn. + * + * Works from **anywhere** inside a `chat.task` run — including inside + * `streamText`'s `onFinish` callback — without needing to thread the + * `stopSignal` through closures. + * + * This is especially useful when the AI SDK's `isAborted` flag is unreliable + * (e.g. when using `createUIMessageStream` + `writer.merge()`). + * + * @example + * ```ts + * onFinish: ({ isAborted }) => { + * const wasStopped = isAborted || chat.isStopped(); + * if (wasStopped) { + * // handle stop + * } + * } + * ``` + */ +function isStopped(): boolean { + const controller = locals.get(chatStopControllerKey); + return controller?.signal.aborted ?? false; +} + +// --------------------------------------------------------------------------- +// Aborted message cleanup +// --------------------------------------------------------------------------- + +/** + * Clean up a UIMessage that was captured during an aborted/stopped turn. + * + * When generation is stopped mid-stream, the captured message may contain: + * - Tool parts stuck in incomplete states (`partial-call`, `input-available`, + * `input-streaming`) that cause permanent UI spinners + * - Reasoning parts with `state: "streaming"` instead of `"done"` + * - Text parts with `state: "streaming"` instead of `"done"` + * + * This function returns a cleaned copy with: + * - Incomplete tool parts removed entirely + * - Reasoning and text parts marked as `"done"` + * + * `chat.task` calls this automatically when stop is detected before passing + * the response to `onTurnComplete`. Use this manually when calling `pipeChat` + * directly and capturing response messages yourself. + * + * @example + * ```ts + * onTurnComplete: async ({ responseMessage, stopped }) => { + * // Already cleaned automatically by chat.task — but if you captured + * // your own message via pipeChat, clean it manually: + * const cleaned = chat.cleanupAbortedParts(myMessage); + * await db.messages.save(cleaned); + * } + * ``` + */ +function cleanupAbortedParts(message: UIMessage): UIMessage { + if (!message.parts) return message; + + const isToolPart = (part: any) => + part.type === "tool-invocation" || + part.type?.startsWith("tool-") || + part.type === "dynamic-tool"; + + return { + ...message, + parts: message.parts + .filter((part: any) => { + if (!isToolPart(part)) return true; + // Remove tool parts that never completed execution. + // partial-call: input was still streaming when aborted. + // input-available: input was complete but tool never ran. + // input-streaming: input was mid-stream. + const state = part.toolInvocation?.state ?? part.state; + return state !== "partial-call" && state !== "input-available" && state !== "input-streaming"; + }) + .map((part: any) => { + // Mark streaming reasoning as done + if (part.type === "reasoning" && part.state === "streaming") { + return { ...part, state: "done" }; + } + // Mark streaming text as done + if (part.type === "text" && part.state === "streaming") { + return { ...part, state: "done" }; + } + return part; + }), + }; +} + // --------------------------------------------------------------------------- // chat.local — per-run typed data with Proxy access // --------------------------------------------------------------------------- @@ -1257,6 +1448,10 @@ export const chat = { setTurnTimeoutInSeconds, /** Override the warm timeout at runtime. See {@link setWarmTimeoutInSeconds}. */ setWarmTimeoutInSeconds, + /** Check if the current turn was stopped by the user. See {@link isStopped}. */ + isStopped, + /** Clean up aborted parts from a UIMessage. See {@link cleanupAbortedParts}. */ + cleanupAbortedParts, }; /** diff --git a/packages/trigger-sdk/src/v3/chat.ts b/packages/trigger-sdk/src/v3/chat.ts index 366ecaf5230..f8aff0726ec 100644 --- a/packages/trigger-sdk/src/v3/chat.ts +++ b/packages/trigger-sdk/src/v3/chat.ts @@ -297,6 +297,8 @@ export class TriggerChatTransport implements ChatTransport { }; const session = this.sessions.get(chatId); + let isContinuation = false; + let previousRunId: string | undefined; // If we have an existing run, send the message via input stream // to resume the conversation in the same run. if (session?.runId) { @@ -328,8 +330,11 @@ export class TriggerChatTransport implements ChatTransport { ); } catch { // If sending fails (run died, etc.), fall through to trigger a new run. + // Mark as continuation so the task knows this chat already existed. + previousRunId = session.runId; this.sessions.delete(chatId); this.notifySessionChange(chatId, null); + isContinuation = true; } } @@ -343,7 +348,11 @@ export class TriggerChatTransport implements ChatTransport { const tags = [...autoTags, ...userTags].slice(0, 5); const triggerResponse = await apiClient.triggerTask(this.taskId, { - payload, + payload: { + ...payload, + continuation: isContinuation, + ...(previousRunId ? { previousRunId } : {}), + }, options: { payloadType: "application/json", tags, diff --git a/references/ai-chat/src/components/chat.tsx b/references/ai-chat/src/components/chat.tsx index 1ee49e8781c..13abe0df6e0 100644 --- a/references/ai-chat/src/components/chat.tsx +++ b/references/ai-chat/src/components/chat.tsx @@ -210,6 +210,19 @@ export function Chat({ return {part.text}; } + if (part.type === "reasoning") { + return ( +
+ + Thinking... + +
+ {part.text} +
+
+ ); + } + if (part.type.startsWith("tool-") || part.type === "dynamic-tool") { return ; } diff --git a/references/ai-chat/src/lib/models.ts b/references/ai-chat/src/lib/models.ts index 5261a800ab8..72173ebc030 100644 --- a/references/ai-chat/src/lib/models.ts +++ b/references/ai-chat/src/lib/models.ts @@ -6,3 +6,5 @@ export const MODEL_OPTIONS = [ ]; export const DEFAULT_MODEL = "gpt-4o-mini"; + +export const REASONING_MODELS = new Set(["claude-opus-4-6"]); diff --git a/references/ai-chat/src/trigger/chat.ts b/references/ai-chat/src/trigger/chat.ts index 3469cf9d43f..d8e171c31b6 100644 --- a/references/ai-chat/src/trigger/chat.ts +++ b/references/ai-chat/src/trigger/chat.ts @@ -11,7 +11,7 @@ import { PrismaClient } from "../../lib/generated/prisma/client"; const adapter = new PrismaPg({ connectionString: process.env.DATABASE_URL! }); const prisma = new PrismaClient({ adapter }); -import { DEFAULT_MODEL } from "@/lib/models"; +import { DEFAULT_MODEL, REASONING_MODELS } from "@/lib/models"; const MODELS: Record LanguageModel> = { "gpt-4o-mini": () => openai("gpt-4o-mini"), @@ -80,6 +80,48 @@ const inspectEnvironment = tool({ }, }); +const webFetch = tool({ + description: + "Fetch a URL and return the response as text. " + + "Use this to retrieve web pages, APIs, or any HTTP resource.", + inputSchema: z.object({ + url: z.string().url().describe("The URL to fetch"), + }), + execute: async ({ url }) => { + const latency = Number(process.env.WEBFETCH_LATENCY_MS); + if (latency > 0) { + await new Promise((r) => setTimeout(r, latency)); + } + + const response = await fetch(url); + let text = await response.text(); + const contentType = response.headers.get("content-type") ?? ""; + + // Strip HTML to plain text for readability + if (contentType.includes("html")) { + text = text + .replace(//gi, "") + .replace(//gi, "") + .replace(/<[^>]+>/g, " ") + .replace(/ /g, " ") + .replace(/&/g, "&") + .replace(/</g, "<") + .replace(/>/g, ">") + .replace(/"/g, '"') + .replace(/'/g, "'") + .replace(/\s+/g, " ") + .trim(); + } + + return { + status: response.status, + contentType, + body: text.slice(0, 2000), + truncated: text.length > 2000, + }; + }, +}); + // Silence TS errors for Bun/Deno global checks declare const Bun: unknown; declare const Deno: unknown; @@ -98,7 +140,7 @@ export const aiChat = chat.task({ clientDataSchema: z.object({ model: z.string().optional(), userId: z.string() }), warmTimeoutInSeconds: 60, chatAccessTokenTTL: "2h", - onChatStart: async ({ chatId, runId, chatAccessToken, clientData }) => { + onChatStart: async ({ chatId, runId, chatAccessToken, clientData, continuation }) => { // Load user context from DB — available for the entire run const user = await prisma.user.upsert({ where: { id: clientData.userId }, @@ -113,11 +155,16 @@ export const aiChat = chat.task({ messageCount: user.messageCount, }); - await prisma.chat.upsert({ - where: { id: chatId }, - create: { id: chatId, title: "New chat", userId: user.id }, - update: {}, - }); + if (!continuation) { + // Brand new chat — create the record + await prisma.chat.upsert({ + where: { id: chatId }, + create: { id: chatId, title: "New chat", userId: user.id }, + update: {}, + }); + } + + // Always update session for the new run await prisma.chatSession.upsert({ where: { id: chatId }, create: { id: chatId, runId, publicAccessToken: chatAccessToken }, @@ -136,7 +183,7 @@ export const aiChat = chat.task({ update: { runId, publicAccessToken: chatAccessToken }, }); }, - onTurnComplete: async ({ chatId, uiMessages, runId, chatAccessToken, lastEventId, clientData }) => { + onTurnComplete: async ({ chatId, uiMessages, runId, chatAccessToken, lastEventId, clientData, stopped }) => { // Persist final messages + assistant response + stream position await prisma.chat.update({ where: { id: chatId }, @@ -170,17 +217,21 @@ export const aiChat = chat.task({ // Use preferred model if none specified const modelId = clientData?.model ?? userContext.preferredModel ?? undefined; + const useReasoning = REASONING_MODELS.has(modelId ?? DEFAULT_MODEL); return streamText({ model: getModel(modelId), system: `You are a helpful assistant for ${userContext.name} (${userContext.plan} plan). Be concise and friendly.`, messages, - tools: { inspectEnvironment }, + tools: { inspectEnvironment, webFetch }, stopWhen: stepCountIs(10), abortSignal: stopSignal, providerOptions: { openai: { user: clientData?.userId }, - anthropic: { metadata: { user_id: clientData?.userId } }, + anthropic: { + metadata: { user_id: clientData?.userId }, + ...(useReasoning ? { thinking: { type: "enabled", budgetTokens: 10000 } } : {}), + }, }, experimental_telemetry: { isEnabled: true, From 2d6df6155be811390832bf7331361025579d4d38 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Sat, 7 Mar 2026 15:37:57 +0000 Subject: [PATCH 41/53] Some improvements to the example ai-chat --- references/ai-chat/prisma/schema.prisma | 1 + references/ai-chat/src/app/actions.ts | 3 +- .../ai-chat/src/components/chat-app.tsx | 47 +++- references/ai-chat/src/components/chat.tsx | 209 +++++++++++++----- references/ai-chat/src/trigger/chat.ts | 9 +- 5 files changed, 198 insertions(+), 71 deletions(-) diff --git a/references/ai-chat/prisma/schema.prisma b/references/ai-chat/prisma/schema.prisma index 5b58955c268..d3941b7508c 100644 --- a/references/ai-chat/prisma/schema.prisma +++ b/references/ai-chat/prisma/schema.prisma @@ -21,6 +21,7 @@ model User { model Chat { id String @id title String + model String @default("gpt-4o-mini") messages Json @default("[]") userId String? user User? @relation(fields: [userId], references: [id]) diff --git a/references/ai-chat/src/app/actions.ts b/references/ai-chat/src/app/actions.ts index 3b6c55e7146..56398c9c84f 100644 --- a/references/ai-chat/src/app/actions.ts +++ b/references/ai-chat/src/app/actions.ts @@ -8,12 +8,13 @@ export const getChatToken = async () => chat.createAccessToken("a export async function getChatList() { const chats = await prisma.chat.findMany({ - select: { id: true, title: true, createdAt: true, updatedAt: true }, + select: { id: true, title: true, model: true, createdAt: true, updatedAt: true }, orderBy: { updatedAt: "desc" }, }); return chats.map((c) => ({ id: c.id, title: c.title, + model: c.model, createdAt: c.createdAt.getTime(), updatedAt: c.updatedAt.getTime(), })); diff --git a/references/ai-chat/src/components/chat-app.tsx b/references/ai-chat/src/components/chat-app.tsx index a00695ec428..c1008e2fe83 100644 --- a/references/ai-chat/src/components/chat-app.tsx +++ b/references/ai-chat/src/components/chat-app.tsx @@ -7,6 +7,7 @@ import type { aiChat } from "@/trigger/chat"; import { useCallback, useEffect, useState } from "react"; import { Chat } from "@/components/chat"; import { ChatSidebar } from "@/components/chat-sidebar"; +import { DEFAULT_MODEL } from "@/lib/models"; import { getChatToken, getChatList, @@ -19,18 +20,22 @@ import { type ChatMeta = { id: string; title: string; + model: string; createdAt: number; updatedAt: number; }; +type SessionInfo = { + runId: string; + publicAccessToken: string; + lastEventId?: string; +}; + type ChatAppProps = { initialChatList: ChatMeta[]; initialActiveChatId: string | null; initialMessages: UIMessage[]; - initialSessions: Record< - string, - { runId: string; publicAccessToken: string; lastEventId?: string } - >; + initialSessions: Record; }; export function ChatApp({ @@ -42,15 +47,21 @@ export function ChatApp({ const [chatList, setChatList] = useState(initialChatList); const [activeChatId, setActiveChatId] = useState(initialActiveChatId); const [messages, setMessages] = useState(initialMessages); + const [sessions, setSessions] = useState>(initialSessions); + + // Model for new chats (before first message is sent) + const [newChatModel, setNewChatModel] = useState(DEFAULT_MODEL); const handleSessionChange = useCallback( - ( - chatId: string, - session: { runId: string; publicAccessToken: string; lastEventId?: string } | null - ) => { - // Session creation and token updates are handled server-side via onChatStart/onTurnComplete. - // We only need to clean up when the run ends (session = null). - if (!session) { + (chatId: string, session: SessionInfo | null) => { + if (session) { + setSessions((prev) => ({ ...prev, [chatId]: session })); + } else { + setSessions((prev) => { + const next = { ...prev }; + delete next[chatId]; + return next; + }); deleteSessionAction(chatId); } }, @@ -86,6 +97,7 @@ export function ChatApp({ const id = generateId(); setActiveChatId(id); setMessages([]); + setNewChatModel(DEFAULT_MODEL); } function handleSelectChat(id: string) { @@ -119,6 +131,14 @@ export function ChatApp({ setChatList(list); }, []); + // Determine the model for the active chat + const activeChatMeta = chatList.find((c) => c.id === activeChatId); + const isNewChat = activeChatId != null && !activeChatMeta; + const activeModel = isNewChat ? newChatModel : (activeChatMeta?.model ?? DEFAULT_MODEL); + + // Get session for the active chat + const activeSession = activeChatId ? sessions[activeChatId] : undefined; + return (
0} + model={activeModel} + isNewChat={isNewChat} + onModelChange={isNewChat ? setNewChatModel : undefined} + session={activeSession} + dashboardUrl={process.env.NEXT_PUBLIC_TRIGGER_DASHBOARD_URL} onFirstMessage={handleFirstMessage} onMessagesChange={handleMessagesChange} /> diff --git a/references/ai-chat/src/components/chat.tsx b/references/ai-chat/src/components/chat.tsx index 13abe0df6e0..f6e1916b566 100644 --- a/references/ai-chat/src/components/chat.tsx +++ b/references/ai-chat/src/components/chat.tsx @@ -5,7 +5,7 @@ import { useChat } from "@ai-sdk/react"; import type { TriggerChatTransport } from "@trigger.dev/sdk/chat"; import { useEffect, useRef, useState } from "react"; import { Streamdown } from "streamdown"; -import { MODEL_OPTIONS, DEFAULT_MODEL } from "@/lib/models"; +import { MODEL_OPTIONS } from "@/lib/models"; function ToolInvocation({ part }: { part: any }) { const [expanded, setExpanded] = useState(false); @@ -70,11 +70,112 @@ function ToolInvocation({ part }: { part: any }) { ); } +function DebugPanel({ + chatId, + model, + status, + session, + dashboardUrl, + messageCount, +}: { + chatId: string; + model: string; + status: string; + session?: { runId: string; publicAccessToken: string; lastEventId?: string }; + dashboardUrl?: string; + messageCount: number; +}) { + const [open, setOpen] = useState(false); + + const runUrl = + session?.runId && dashboardUrl + ? `${dashboardUrl}/runs/${session.runId}` + : undefined; + + return ( +
+ + + {open && ( +
+ + + + + {session ? ( + <> + + + + ) : ( + + )} +
+ )} +
+ ); +} + +function Row({ + label, + value, + mono, + link, +}: { + label: string; + value: string; + mono?: boolean; + link?: string; +}) { + return ( +
+ {label} + {link ? ( + + {value} + + ) : ( + {value} + )} +
+ ); +} + type ChatProps = { chatId: string; initialMessages: UIMessage[]; transport: TriggerChatTransport; resume?: boolean; + model: string; + isNewChat: boolean; + onModelChange?: (model: string) => void; + session?: { runId: string; publicAccessToken: string; lastEventId?: string }; + dashboardUrl?: string; onFirstMessage?: (chatId: string, text: string) => void; onMessagesChange?: (chatId: string, messages: UIMessage[]) => void; }; @@ -84,12 +185,15 @@ export function Chat({ initialMessages, transport, resume: resumeProp, + model, + isNewChat, + onModelChange, + session, + dashboardUrl, onFirstMessage, onMessagesChange, }: ChatProps) { const [input, setInput] = useState(""); - const [model, setModel] = useState(DEFAULT_MODEL); - const modelByUserMsgId = useRef>(new Map()); const hasCalledFirstMessage = useRef(false); const { messages, sendMessage, stop, status, error } = useChat({ @@ -114,7 +218,7 @@ export function Chat({ }, [messages, chatId, onFirstMessage]); // Pending message to send after the current turn completes - const [pendingMessage, setPendingMessage] = useState<{ text: string; model: string } | null>(null); + const [pendingMessage, setPendingMessage] = useState(null); // Handle turn completion: persist messages and auto-send pending message const prevStatus = useRef(status); @@ -124,47 +228,48 @@ export function Chat({ if (!turnCompleted) return; - // Persist messages when a turn completes — this ensures the final assistant - // message content is saved (not the empty placeholder from mid-stream). + // Persist messages when a turn completes if (messages.length > 0) { onMessagesChange?.(chatId, messages); } // Auto-send the pending message if (pendingMessage) { - const { text, model: pendingMsgModel } = pendingMessage; + const text = pendingMessage; setPendingMessage(null); - pendingModel.current = pendingMsgModel; - sendMessage({ text }, { metadata: { model: pendingMsgModel } }); - } - }, [status, messages, chatId, onMessagesChange, sendMessage, pendingMessage]); - - function getModelForAssistantAt(index: number): string | undefined { - for (let i = index - 1; i >= 0; i--) { - if (messages[i]?.role === "user") { - return modelByUserMsgId.current.get(messages[i].id); - } - } - return undefined; - } - - const originalSendMessage = sendMessage; - function trackedSendMessage(msg: Parameters[0], opts?: Parameters[1]) { - pendingModel.current = model; - originalSendMessage(msg, opts); - } - const pendingModel = useRef(model); - - const trackedUserIds = useRef>(new Set()); - for (const msg of messages) { - if (msg.role === "user" && !trackedUserIds.current.has(msg.id)) { - trackedUserIds.current.add(msg.id); - modelByUserMsgId.current.set(msg.id, pendingModel.current); + sendMessage({ text }, { metadata: { model } }); } - } + }, [status, messages, chatId, onMessagesChange, sendMessage, pendingMessage, model]); return (
+ {/* Model selector for new chats */} + {isNewChat && messages.length === 0 && onModelChange && ( +
+ Model: + +
+ )} + + {/* Model badge for existing chats */} + {(!isNewChat || messages.length > 0) && ( +
+ + {model} + +
+ )} + {/* Messages */}
{messages.length === 0 && ( @@ -177,13 +282,6 @@ export function Chat({ className={`flex ${message.role === "user" ? "justify-end" : "justify-start"}`} >
- {message.role === "assistant" && ( -
- - {getModelForAssistantAt(messageIndex) ?? DEFAULT_MODEL} - -
- )}
- {pendingMessage.text} + {pendingMessage}
Queued — will send when current response finishes @@ -262,14 +360,24 @@ export function Chat({
)} + {/* Debug panel */} + + { e.preventDefault(); if (!input.trim()) return; if (status === "streaming") { - setPendingMessage({ text: input, model }); + setPendingMessage(input); } else { - trackedSendMessage({ text: input }, { metadata: { model } }); + sendMessage({ text: input }, { metadata: { model } }); } setInput(""); }} @@ -300,19 +408,6 @@ export function Chat({ )}
-
- -
); diff --git a/references/ai-chat/src/trigger/chat.ts b/references/ai-chat/src/trigger/chat.ts index d8e171c31b6..226e6bcadca 100644 --- a/references/ai-chat/src/trigger/chat.ts +++ b/references/ai-chat/src/trigger/chat.ts @@ -156,10 +156,15 @@ export const aiChat = chat.task({ }); if (!continuation) { - // Brand new chat — create the record + // Brand new chat — create the record with the selected model await prisma.chat.upsert({ where: { id: chatId }, - create: { id: chatId, title: "New chat", userId: user.id }, + create: { + id: chatId, + title: "New chat", + userId: user.id, + model: clientData.model ?? DEFAULT_MODEL, + }, update: {}, }); } From 76a6b9a44f013c9344e26f78ff90d6ea8576b86a Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Sun, 8 Mar 2026 16:33:07 +0000 Subject: [PATCH 42/53] feat(chat): expose typed chat.stream, add deepResearch subtask example, per-chat model persistence, debug panel - Export chat.stream (typed RealtimeDefinedStream) for writing custom data to the chat stream - Add deepResearch subtask using data-* chunks to stream progress back to parent chat via target: root - Use AI SDK data-research-progress chunk protocol with id-based updates for live progress - Add ResearchProgress component and generic data-* fallback renderer in frontend - Persist model per chat in DB (schema + onChatStart), model selector only on new chats - Add collapsible debug panel showing run ID (with dashboard link), chat ID, model, status, session info - Document chat.stream API, data-* chunks, and subtask streaming pattern in docs --- docs/guides/ai-chat.mdx | 135 +++++++++++++++++++++ packages/trigger-sdk/src/v3/ai.ts | 27 ++++- references/ai-chat/src/components/chat.tsx | 55 +++++++++ references/ai-chat/src/trigger/chat.ts | 110 ++++++++++++++++- 4 files changed, 323 insertions(+), 4 deletions(-) diff --git a/docs/guides/ai-chat.mdx b/docs/guides/ai-chat.mdx index 39a93c196e9..982c0c57fb0 100644 --- a/docs/guides/ai-chat.mdx +++ b/docs/guides/ai-chat.mdx @@ -750,6 +750,140 @@ This removes tool invocation parts stuck in `partial-call` state and marks any ` Stop signal delivery is best-effort. There is a small race window where the model may finish before the stop signal arrives, in which case the turn completes normally with `stopped: false`. This is expected and does not require special handling. +## Writing to the chat stream + +### Custom chunks with `chat.stream` + +`chat.stream` is a typed stream bound to the chat output. Use it to write custom `UIMessageChunk` data alongside the AI-generated response — for example, status updates or progress indicators. + +```ts +import { chat } from "@trigger.dev/sdk/ai"; + +export const myChat = chat.task({ + id: "my-chat", + run: async ({ messages, signal }) => { + // Write a custom data part to the chat stream. + // The AI SDK's data-* chunk protocol adds this to message.parts + // on the frontend, where you can render it however you like. + const { waitUntilComplete } = chat.stream.writer({ + execute: ({ write }) => { + write({ + type: "data-status", + id: "search-progress", + data: { message: "Searching the web...", progress: 0.5 }, + }); + }, + }); + await waitUntilComplete(); + + // Then stream the AI response + return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); + }, +}); +``` + + + Use `data-*` chunk types (e.g. `data-status`, `data-progress`) for custom data. The AI SDK processes these into `DataUIPart` objects in `message.parts` on the frontend. Writing the same `type` + `id` again updates the existing part instead of creating a new one — useful for live progress. + + +`chat.stream` exposes the full stream API: + +| Method | Description | +|--------|-------------| +| `chat.stream.writer(options)` | Write individual chunks via a callback | +| `chat.stream.pipe(stream, options?)` | Pipe a `ReadableStream` or `AsyncIterable` | +| `chat.stream.append(value, options?)` | Append raw data | +| `chat.stream.read(runId, options?)` | Read the stream by run ID | + +### Streaming from subtasks + +When a tool invokes a subtask via `triggerAndWait`, the subtask can stream directly to the parent chat using `target: "root"`: + +```ts +import { chat, ai } from "@trigger.dev/sdk/ai"; +import { schemaTask } from "@trigger.dev/sdk"; +import { streamText, generateId } from "ai"; +import { z } from "zod"; + +// A subtask that streams progress back to the parent chat +export const researchTask = schemaTask({ + id: "research", + schema: z.object({ query: z.string() }), + run: async ({ query }) => { + const partId = generateId(); + + // Write a data-* chunk to the root run's chat stream. + // The frontend receives this as a DataUIPart in message.parts. + const { waitUntilComplete } = chat.stream.writer({ + target: "root", + execute: ({ write }) => { + write({ + type: "data-research-status", + id: partId, + data: { query, status: "in-progress" }, + }); + }, + }); + await waitUntilComplete(); + + // Do the work... + const result = await doResearch(query); + + // Update the same part with the final status + const { waitUntilComplete: waitDone } = chat.stream.writer({ + target: "root", + execute: ({ write }) => { + write({ + type: "data-research-status", + id: partId, + data: { query, status: "done", resultCount: result.length }, + }); + }, + }); + await waitDone(); + + return result; + }, +}); + +// The chat task uses it as a tool via ai.tool() +export const myChat = chat.task({ + id: "my-chat", + run: async ({ messages, signal }) => { + return streamText({ + model: openai("gpt-4o"), + messages, + abortSignal: signal, + tools: { + research: ai.tool(researchTask), + }, + }); + }, +}); +``` + +On the frontend, render the custom data part: + +```tsx +{message.parts.map((part, i) => { + if (part.type === "data-research-status") { + const { query, status, resultCount } = part.data; + return ( +
+ {status === "done" ? `Found ${resultCount} results` : `Researching "${query}"...`} +
+ ); + } + // ...other part types +})} +``` + +The `target` option accepts: +- `"self"` — current run (default) +- `"parent"` — parent task's run +- `"root"` — root task's run (the chat task) +- A specific run ID string + ## Client data and metadata ### Transport-level client data @@ -1075,6 +1209,7 @@ See [onTurnComplete](#onturncomplete) for the full field reference. | `chat.setWarmTimeoutInSeconds(seconds)` | Override warm timeout at runtime | | `chat.isStopped()` | Check if the current turn was stopped by the user (works anywhere during a turn) | | `chat.cleanupAbortedParts(message)` | Remove incomplete parts from a stopped response message | +| `chat.stream` | Typed chat output stream — use `.writer()`, `.pipe()`, `.append()`, `.read()` | ## Self-hosting diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index 544bdd7a5b3..c2ffd55bea5 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -14,7 +14,7 @@ import { type TaskSchema, type TaskWithSchema, } from "@trigger.dev/core/v3"; -import type { ModelMessage, UIMessage } from "ai"; +import type { ModelMessage, UIMessage, UIMessageChunk } from "ai"; import type { StreamWriteResult } from "@trigger.dev/core/v3"; import { convertToModelMessages, dynamicTool, generateId as generateMessageId, jsonSchema, JSONSchema7, Schema, Tool, ToolCallOptions, zodSchema } from "ai"; import { type Attributes, trace } from "@opentelemetry/api"; @@ -175,6 +175,29 @@ export const CHAT_STREAM_KEY = _CHAT_STREAM_KEY; // Re-export input stream IDs for advanced usage export { CHAT_MESSAGES_STREAM_ID, CHAT_STOP_STREAM_ID }; +/** + * Typed chat output stream. Provides `.writer()`, `.pipe()`, `.append()`, + * and `.read()` methods pre-bound to the chat stream key and typed to `UIMessageChunk`. + * + * Use from within a `chat.task` run to write custom chunks: + * ```ts + * const { waitUntilComplete } = chat.stream.writer({ + * execute: ({ write }) => { + * write({ type: "text-start", id: "status-1" }); + * write({ type: "text-delta", id: "status-1", delta: "Processing..." }); + * write({ type: "text-end", id: "status-1" }); + * }, + * }); + * await waitUntilComplete(); + * ``` + * + * Use from a subtask to stream back to the parent chat: + * ```ts + * chat.stream.pipe(myStream, { target: "root" }); + * ``` + */ +const chatStream = streams.define({ id: _CHAT_STREAM_KEY }); + /** * The wire payload shape sent by `TriggerChatTransport`. * Uses `metadata` to match the AI SDK's `ChatRequestOptions` field name. @@ -1452,6 +1475,8 @@ export const chat = { isStopped, /** Clean up aborted parts from a UIMessage. See {@link cleanupAbortedParts}. */ cleanupAbortedParts, + /** Typed chat output stream for writing custom chunks or piping from subtasks. */ + stream: chatStream, }; /** diff --git a/references/ai-chat/src/components/chat.tsx b/references/ai-chat/src/components/chat.tsx index f6e1916b566..e41b132dbdb 100644 --- a/references/ai-chat/src/components/chat.tsx +++ b/references/ai-chat/src/components/chat.tsx @@ -70,6 +70,46 @@ function ToolInvocation({ part }: { part: any }) { ); } +function ResearchProgress({ part }: { part: any }) { + const data = part.data as { + status: "fetching" | "done"; + query: string; + current: number; + total: number; + currentUrl?: string; + completedUrls: string[]; + }; + + const isDone = data.status === "done"; + + return ( +
+
+ {isDone ? ( + + ) : ( + + )} + + {isDone + ? `Research complete — ${data.total} sources fetched` + : `Researching "${data.query}" (${data.current}/${data.total})`} + +
+ {data.currentUrl && !isDone && ( +
Fetching {data.currentUrl}
+ )} + {data.completedUrls.length > 0 && ( +
+ {data.completedUrls.map((url, i) => ( +
✓ {url}
+ ))} +
+ )} +
+ ); +} + function DebugPanel({ chatId, model, @@ -321,10 +361,25 @@ export function Chat({ ); } + if (part.type === "data-research-progress") { + return ; + } + if (part.type.startsWith("tool-") || part.type === "dynamic-tool") { return ; } + if (part.type.startsWith("data-")) { + return ( +
+ {part.type} +
+                          {JSON.stringify((part as any).data, null, 2)}
+                        
+
+ ); + } + return null; })}
diff --git a/references/ai-chat/src/trigger/chat.ts b/references/ai-chat/src/trigger/chat.ts index 226e6bcadca..0eed9cf0805 100644 --- a/references/ai-chat/src/trigger/chat.ts +++ b/references/ai-chat/src/trigger/chat.ts @@ -1,5 +1,6 @@ -import { chat } from "@trigger.dev/sdk/ai"; -import { streamText, tool, stepCountIs } from "ai"; +import { chat, ai } from "@trigger.dev/sdk/ai"; +import { schemaTask } from "@trigger.dev/sdk"; +import { streamText, tool, stepCountIs, generateId } from "ai"; import type { LanguageModel } from "ai"; import { openai } from "@ai-sdk/openai"; import { anthropic } from "@ai-sdk/anthropic"; @@ -135,6 +136,105 @@ const userContext = chat.local<{ messageCount: number; }>(); +// -------------------------------------------------------------------------- +// Subtask: deep research — fetches multiple URLs and streams progress +// back to the parent chat via chat.stream using data-* chunks +// -------------------------------------------------------------------------- +export const deepResearch = schemaTask({ + id: "deep-research", + description: + "Research a topic by fetching multiple URLs and synthesizing the results. " + + "Streams progress updates to the chat as it works.", + schema: z.object({ + query: z.string().describe("The research query or topic"), + urls: z.array(z.string().url()).describe("URLs to fetch and analyze"), + }), + run: async ({ query, urls }) => { + const partId = generateId(); + const results: { url: string; status: number; snippet: string }[] = []; + + // Stream progress using data-research-progress chunks. + // Using the same id means each write updates the same part in the message. + function streamProgress(progress: { + status: "fetching" | "done"; + query: string; + current: number; + total: number; + currentUrl?: string; + completedUrls: string[]; + }) { + return chat.stream.writer({ + target: "root", + execute: ({ write }) => { + write({ + type: "data-research-progress" as any, + id: partId, + data: progress, + }); + }, + }); + } + + for (let i = 0; i < urls.length; i++) { + const url = urls[i]!; + + // Update progress — fetching + const { waitUntilComplete } = streamProgress({ + status: "fetching", + query, + current: i + 1, + total: urls.length, + currentUrl: url, + completedUrls: results.map((r) => r.url), + }); + await waitUntilComplete(); + + try { + const response = await fetch(url); + let text = await response.text(); + const contentType = response.headers.get("content-type") ?? ""; + + if (contentType.includes("html")) { + text = text + .replace(//gi, "") + .replace(//gi, "") + .replace(/<[^>]+>/g, " ") + .replace(/ /g, " ") + .replace(/&/g, "&") + .replace(/</g, "<") + .replace(/>/g, ">") + .replace(/\s+/g, " ") + .trim(); + } + + results.push({ + url, + status: response.status, + snippet: text.slice(0, 500), + }); + } catch (err) { + results.push({ + url, + status: 0, + snippet: `Error: ${err instanceof Error ? err.message : String(err)}`, + }); + } + } + + // Final progress update — done + const { waitUntilComplete: waitForDone } = streamProgress({ + status: "done", + query, + current: urls.length, + total: urls.length, + completedUrls: results.map((r) => r.url), + }); + await waitForDone(); + + return { query, results }; + }, +}); + export const aiChat = chat.task({ id: "ai-chat", clientDataSchema: z.object({ model: z.string().optional(), userId: z.string() }), @@ -228,7 +328,11 @@ export const aiChat = chat.task({ model: getModel(modelId), system: `You are a helpful assistant for ${userContext.name} (${userContext.plan} plan). Be concise and friendly.`, messages, - tools: { inspectEnvironment, webFetch }, + tools: { + inspectEnvironment, + webFetch, + deepResearch: ai.tool(deepResearch), + }, stopWhen: stepCountIs(10), abortSignal: stopSignal, providerOptions: { From 8f0e6656ea33c0ae10653fea1b49c3eb8424faf8 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Sun, 8 Mar 2026 18:30:45 +0000 Subject: [PATCH 43/53] feat(ai): pass chat context and toolCallId to subtasks, add typed ai.chatContext helpers - Store chat turn context (chatId, turn, continuation, clientData) in locals for auto-detection - toolFromTask now auto-detects chat context and passes it to subtask metadata - Skip serializing messages array (can be large, rarely needed by subtasks) - Tag subtask runs with toolCallId for dashboard visibility - Add ai.toolCallId() convenience helper - Add ai.chatContext() with typed clientData inference - Add ai.chatContextOrThrow() that throws if not in a chat context - Update deepResearch example to use ai.chatContextOrThrow - Document all helpers in ai-chat guide --- docs/guides/ai-chat.mdx | 44 ++++++++++ packages/trigger-sdk/src/v3/ai.ts | 108 ++++++++++++++++++++++++- references/ai-chat/src/trigger/chat.ts | 4 + 3 files changed, 153 insertions(+), 3 deletions(-) diff --git a/docs/guides/ai-chat.mdx b/docs/guides/ai-chat.mdx index 982c0c57fb0..12021036945 100644 --- a/docs/guides/ai-chat.mdx +++ b/docs/guides/ai-chat.mdx @@ -884,6 +884,50 @@ The `target` option accepts: - `"root"` — root task's run (the chat task) - A specific run ID string +### Accessing tool context in subtasks + +When a subtask runs via `ai.tool()`, it can access the tool call context and chat context from the parent: + +```ts +import { ai, chat } from "@trigger.dev/sdk/ai"; +import type { myChat } from "./chat"; + +export const mySubtask = schemaTask({ + id: "my-subtask", + schema: z.object({ query: z.string() }), + run: async ({ query }) => { + // Get the AI SDK's tool call ID (useful for data-* chunk IDs) + const toolCallId = ai.toolCallId(); + + // Get typed chat context — pass typeof yourChatTask for typed clientData + const { chatId, clientData } = ai.chatContextOrThrow(); + // clientData is typed based on myChat's clientDataSchema + + // Write a data chunk using the tool call ID + const { waitUntilComplete } = chat.stream.writer({ + target: "root", + execute: ({ write }) => { + write({ + type: "data-progress", + id: toolCallId, + data: { status: "working", query, userId: clientData?.userId }, + }); + }, + }); + await waitUntilComplete(); + + return { result: "done" }; + }, +}); +``` + +| Helper | Returns | Description | +|--------|---------|-------------| +| `ai.toolCallId()` | `string \| undefined` | The AI SDK tool call ID | +| `ai.chatContext()` | `{ chatId, turn, continuation, clientData } \| undefined` | Chat context with typed `clientData`. Returns `undefined` if not in a chat context. | +| `ai.chatContextOrThrow()` | `{ chatId, turn, continuation, clientData }` | Same as above but throws if not in a chat context | +| `ai.currentToolOptions()` | `ToolCallExecutionOptions \| undefined` | Full tool execution options | + ## Client data and metadata ### Transport-level client data diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index c2ffd55bea5..47cad62b524 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -32,7 +32,24 @@ import { const METADATA_KEY = "tool.execute.options"; -export type ToolCallExecutionOptions = Omit; +export type ToolCallExecutionOptions = { + toolCallId: string; + experimental_context?: unknown; + /** Chat context — only present when the tool runs inside a chat.task turn. */ + chatId?: string; + turn?: number; + continuation?: boolean; + clientData?: unknown; +}; + +/** Chat context stored in locals during each chat.task turn for auto-detection. */ +type ChatTurnContext = { + chatId: string; + turn: number; + continuation: boolean; + clientData?: TClientData; +}; +const chatTurnContextKey = locals.create("chat.turnContext"); type ToolResultContent = Array< | { @@ -83,13 +100,33 @@ function toolFromTask< description: task.description, inputSchema: convertTaskSchemaToToolParameters(task), execute: async (input, options) => { - const serializedOptions = options ? JSON.parse(JSON.stringify(options)) : undefined; + // Build tool metadata — skip messages (can be large) and abortSignal (non-serializable) + const toolMeta: ToolCallExecutionOptions = { + toolCallId: options?.toolCallId ?? "", + }; + if (options?.experimental_context !== undefined) { + try { + toolMeta.experimental_context = JSON.parse(JSON.stringify(options.experimental_context)); + } catch { + // Non-serializable context — skip + } + } + + // Auto-detect chat context from the parent turn + const chatCtx = locals.get(chatTurnContextKey); + if (chatCtx) { + toolMeta.chatId = chatCtx.chatId; + toolMeta.turn = chatCtx.turn; + toolMeta.continuation = chatCtx.continuation; + toolMeta.clientData = chatCtx.clientData; + } return await task .triggerAndWait(input as inferSchemaIn, { metadata: { - [METADATA_KEY]: serializedOptions, + [METADATA_KEY]: toolMeta as any, }, + tags: options?.toolCallId ? [`toolCallId:${options.toolCallId}`] : undefined, }) .unwrap(); }, @@ -109,6 +146,57 @@ function getToolOptionsFromMetadata(): ToolCallExecutionOptions | undefined { return tool as ToolCallExecutionOptions; } +/** + * Get the current tool call ID from inside a subtask invoked via `ai.tool()`. + * Returns `undefined` if not running as a tool subtask. + */ +function getToolCallId(): string | undefined { + return getToolOptionsFromMetadata()?.toolCallId; +} + +/** + * Get the chat context from inside a subtask invoked via `ai.tool()` within a `chat.task`. + * Pass `typeof yourChatTask` as the type parameter to get typed `clientData`. + * Returns `undefined` if the parent is not a chat task. + * + * @example + * ```ts + * const ctx = ai.chatContext(); + * // ctx?.clientData is typed based on myChat's clientDataSchema + * ``` + */ +function getToolChatContext(): ChatTurnContext> | undefined { + const opts = getToolOptionsFromMetadata(); + if (!opts?.chatId) return undefined; + return { + chatId: opts.chatId, + turn: opts.turn ?? 0, + continuation: opts.continuation ?? false, + clientData: opts.clientData as InferChatClientData, + }; +} + +/** + * Get the chat context from inside a subtask, throwing if not in a chat context. + * Pass `typeof yourChatTask` as the type parameter to get typed `clientData`. + * + * @example + * ```ts + * const ctx = ai.chatContextOrThrow(); + * // ctx.chatId, ctx.clientData are guaranteed non-null + * ``` + */ +function getToolChatContextOrThrow(): ChatTurnContext> { + const ctx = getToolChatContext(); + if (!ctx) { + throw new Error( + "ai.chatContextOrThrow() called outside of a chat.task context. " + + "This helper can only be used inside a subtask invoked via ai.tool() from a chat.task." + ); + } + return ctx; +} + function convertTaskSchemaToToolParameters( task: AnyTask | TaskWithSchema ): Schema { @@ -136,6 +224,12 @@ function convertTaskSchemaToToolParameters( export const ai = { tool: toolFromTask, currentToolOptions: getToolOptionsFromMetadata, + /** Get the tool call ID from inside a subtask invoked via `ai.tool()`. */ + toolCallId: getToolCallId, + /** Get chat context (chatId, turn, clientData, etc.) from inside a subtask of a `chat.task`. Returns undefined if not in a chat context. */ + chatContext: getToolChatContext, + /** Get chat context or throw if not in a chat context. Pass `typeof yourChatTask` for typed clientData. */ + chatContextOrThrow: getToolChatContextOrThrow, }; /** @@ -756,6 +850,14 @@ function chatTask< async () => { locals.set(chatPipeCountKey, 0); + // Store chat context for auto-detection by ai.tool subtasks + locals.set(chatTurnContextKey, { + chatId: currentWirePayload.chatId, + turn, + continuation, + clientData, + }); + // Per-turn stop controller (reset each turn) const stopController = new AbortController(); currentStopController = stopController; diff --git a/references/ai-chat/src/trigger/chat.ts b/references/ai-chat/src/trigger/chat.ts index 0eed9cf0805..321f279ba12 100644 --- a/references/ai-chat/src/trigger/chat.ts +++ b/references/ai-chat/src/trigger/chat.ts @@ -150,6 +150,10 @@ export const deepResearch = schemaTask({ urls: z.array(z.string().url()).describe("URLs to fetch and analyze"), }), run: async ({ query, urls }) => { + // Access chat context from the parent chat.task — typed via typeof aiChat + const { chatId, clientData } = ai.chatContextOrThrow(); + console.log(`Deep research for chat ${chatId}, user ${clientData?.userId}`); + const partId = generateId(); const results: { url: string; status: number; snippet: string }[] = []; From 69d1ceb28a7f68f8a1317ed29e2c489434bcb87b Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Mon, 9 Mar 2026 07:51:22 +0000 Subject: [PATCH 44/53] feat(chat): add preload support, dynamic tools, and preload-specific timeouts - Add onPreload hook and preloaded field to all lifecycle events - Add transport.preload(chatId) for eagerly starting runs before first message - Add preloadWarmTimeoutInSeconds and preloadTimeout task options - Add preload:true run tag and chat.preloaded span attributes - Add UserTool model for per-user dynamic tools loaded from DB - Load dynamic tools in onPreload/onChatStart via chat.local - Build dynamicTool() instances in run and spread into streamText tools - Reference project: preload on new chat, dynamic company-info and user-preferences tools --- packages/trigger-sdk/src/v3/ai.ts | 164 +++++++++++++++++- packages/trigger-sdk/src/v3/chat.ts | 57 ++++++ references/ai-chat/prisma/schema.prisma | 20 ++- .../ai-chat/src/components/chat-app.tsx | 2 + references/ai-chat/src/trigger/chat.ts | 80 ++++++++- 5 files changed, 310 insertions(+), 13 deletions(-) diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index 47cad62b524..7ce8c4714b5 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -300,7 +300,7 @@ const chatStream = streams.define({ id: _CHAT_STREAM_KEY }); type ChatTaskWirePayload = { messages: TMessage[]; chatId: string; - trigger: "submit-message" | "regenerate-message"; + trigger: "submit-message" | "regenerate-message" | "preload"; messageId?: string; metadata?: TMetadata; /** Whether this run is continuing an existing chat whose previous run ended. */ @@ -330,8 +330,9 @@ export type ChatTaskPayload = { * The trigger type: * - `"submit-message"`: A new user message * - `"regenerate-message"`: Regenerate the last assistant response + * - `"preload"`: Run was preloaded before the first message (only on turn 0) */ - trigger: "submit-message" | "regenerate-message"; + trigger: "submit-message" | "regenerate-message" | "preload"; /** The ID of the message to regenerate (only for `"regenerate-message"`) */ messageId?: string; @@ -343,6 +344,8 @@ export type ChatTaskPayload = { continuation: boolean; /** The run ID of the previous run (only set when `continuation` is true). */ previousRunId?: string; + /** Whether this run was preloaded before the first message. */ + preloaded: boolean; }; /** @@ -510,6 +513,20 @@ async function pipeChat( * emits a control chunk and suspends via `messagesInput.wait()`. The frontend * transport resumes the same run by sending the next message via input streams. */ +/** + * Event passed to the `onPreload` callback. + */ +export type PreloadEvent = { + /** The unique identifier for the chat session. */ + chatId: string; + /** The Trigger.dev run ID for this conversation. */ + runId: string; + /** A scoped access token for this chat run. */ + chatAccessToken: string; + /** Custom data from the frontend. */ + clientData?: TClientData; +}; + /** * Event passed to the `onChatStart` callback. */ @@ -528,6 +545,8 @@ export type ChatStartEvent = { continuation: boolean; /** The run ID of the previous run (only set when `continuation` is true). */ previousRunId?: string; + /** Whether this run was preloaded before the first message. */ + preloaded: boolean; }; /** @@ -552,6 +571,8 @@ export type TurnStartEvent = { continuation: boolean; /** The run ID of the previous run (only set when `continuation` is true). */ previousRunId?: string; + /** Whether this run was preloaded before the first message. */ + preloaded: boolean; }; /** @@ -601,6 +622,8 @@ export type TurnCompleteEvent = { continuation: boolean; /** The run ID of the previous run (only set when `continuation` is true). */ previousRunId?: string; + /** Whether this run was preloaded before the first message. */ + preloaded: boolean; }; export type ChatTaskOptions< @@ -638,6 +661,22 @@ export type ChatTaskOptions< */ run: (payload: ChatTaskRunPayload>) => Promise; + /** + * Called when a preloaded run starts, before the first message arrives. + * + * Use this to initialize state, create DB records, and load context early — + * so everything is ready when the user's first message comes through. + * + * @example + * ```ts + * onPreload: async ({ chatId, clientData }) => { + * await db.chat.create({ data: { id: chatId } }); + * userContext.init(await loadUser(clientData.userId)); + * } + * ``` + */ + onPreload?: (event: PreloadEvent>) => Promise | void; + /** * Called on the first turn (turn 0) of a new run, before the `run` function executes. * @@ -722,6 +761,26 @@ export type ChatTaskOptions< * @default "1h" */ chatAccessTokenTTL?: string; + + /** + * How long (in seconds) to keep the run warm after `onPreload` fires, + * waiting for the first message before suspending. + * + * Only applies to preloaded runs (triggered via `transport.preload()`). + * + * @default Same as `warmTimeoutInSeconds` + */ + preloadWarmTimeoutInSeconds?: number; + + /** + * How long to wait (suspended) for the first message after a preloaded run starts. + * If no message arrives within this time, the run ends. + * + * Only applies to preloaded runs. + * + * @default Same as `turnTimeout` + */ + preloadTimeout?: string; }; /** @@ -760,6 +819,7 @@ function chatTask< const { run: userRun, clientDataSchema, + onPreload, onChatStart, onTurnStart, onTurnComplete, @@ -767,6 +827,8 @@ function chatTask< turnTimeout = "1h", warmTimeoutInSeconds = 30, chatAccessTokenTTL = "1h", + preloadWarmTimeoutInSeconds, + preloadTimeout, ...restOptions } = options; @@ -786,6 +848,7 @@ function chatTask< let currentWirePayload = payload; const continuation = payload.continuation ?? false; const previousRunId = payload.previousRunId; + const preloaded = payload.trigger === "preload"; // Accumulated model messages across turns. Turn 1 initialises from the // full history the frontend sends; subsequent turns append only the new @@ -806,6 +869,96 @@ function chatTask< }); try { + // Handle preloaded runs — fire onPreload, then wait for the first real message + if (preloaded) { + if (activeSpan) { + activeSpan.setAttribute("chat.preloaded", true); + } + + const currentRunId = taskContext.ctx?.run.id ?? ""; + let preloadAccessToken = ""; + if (currentRunId) { + try { + preloadAccessToken = await auth.createPublicToken({ + scopes: { + read: { runs: currentRunId }, + write: { inputStreams: currentRunId }, + }, + expirationTime: chatAccessTokenTTL, + }); + } catch { + // Token creation failed + } + } + + // Parse client data for the preload hook + const preloadClientData = (parseClientData + ? await parseClientData(payload.metadata) + : payload.metadata) as inferSchemaOut; + + // Fire onPreload hook + if (onPreload) { + await tracer.startActiveSpan( + "onPreload()", + async () => { + await onPreload({ + chatId: payload.chatId, + runId: currentRunId, + chatAccessToken: preloadAccessToken, + clientData: preloadClientData, + }); + }, + { + attributes: { + [SemanticInternalAttributes.STYLE_ICON]: "task-hook-onStart", + [SemanticInternalAttributes.COLLAPSED]: true, + "chat.id": payload.chatId, + "chat.preloaded": true, + }, + } + ); + } + + // Wait for the first real message — use preload-specific timeouts if configured + const effectivePreloadWarmTimeout = + (metadata.get(WARM_TIMEOUT_METADATA_KEY) as number | undefined) + ?? preloadWarmTimeoutInSeconds + ?? warmTimeoutInSeconds; + + let firstMessage: ChatTaskWirePayload | undefined; + + if (effectivePreloadWarmTimeout > 0) { + const warm = await messagesInput.once({ + timeoutMs: effectivePreloadWarmTimeout * 1000, + spanName: "preload wait (warm)", + }); + + if (warm.ok) { + firstMessage = warm.output; + } + } + + if (!firstMessage) { + const effectivePreloadTimeout = + (metadata.get(TURN_TIMEOUT_METADATA_KEY) as string | undefined) + ?? preloadTimeout + ?? turnTimeout; + + const suspended = await messagesInput.wait({ + timeout: effectivePreloadTimeout, + spanName: "preload wait (suspended)", + }); + + if (!suspended.ok) { + return; // Timed out waiting for first message — end run + } + + firstMessage = suspended.output; + } + + currentWirePayload = firstMessage; + } + for (let turn = 0; turn < maxTurns; turn++) { // Extract turn-level context before entering the span const { metadata: wireMetadata, messages: uiMessages, ...restWire } = currentWirePayload; @@ -947,6 +1100,7 @@ function chatTask< chatAccessToken: turnAccessToken, continuation, previousRunId, + preloaded, }); }, { @@ -956,6 +1110,7 @@ function chatTask< "chat.id": currentWirePayload.chatId, "chat.messages.count": accumulatedMessages.length, "chat.continuation": continuation, + "chat.preloaded": preloaded, ...(previousRunId ? { "chat.previous_run_id": previousRunId } : {}), }, } @@ -978,6 +1133,7 @@ function chatTask< clientData, continuation, previousRunId, + preloaded, }); }, { @@ -989,6 +1145,7 @@ function chatTask< "chat.messages.count": accumulatedMessages.length, "chat.trigger": currentWirePayload.trigger, "chat.continuation": continuation, + "chat.preloaded": preloaded, ...(previousRunId ? { "chat.previous_run_id": previousRunId } : {}), }, } @@ -1014,6 +1171,7 @@ function chatTask< clientData, continuation, previousRunId, + preloaded, signal: combinedSignal, cancelSignal, stopSignal, @@ -1119,6 +1277,7 @@ function chatTask< stopped: wasStopped, continuation, previousRunId, + preloaded, }); }, { @@ -1129,6 +1288,7 @@ function chatTask< "chat.turn": turn + 1, "chat.stopped": wasStopped, "chat.continuation": continuation, + "chat.preloaded": preloaded, ...(previousRunId ? { "chat.previous_run_id": previousRunId } : {}), "chat.messages.count": accumulatedMessages.length, "chat.response.parts.count": capturedResponseMessage?.parts?.length ?? 0, diff --git a/packages/trigger-sdk/src/v3/chat.ts b/packages/trigger-sdk/src/v3/chat.ts index f8aff0726ec..bf269c88ebc 100644 --- a/packages/trigger-sdk/src/v3/chat.ts +++ b/packages/trigger-sdk/src/v3/chat.ts @@ -183,6 +183,7 @@ export type TriggerChatTransportOptions = { /** Priority (lower = higher priority). */ priority?: number; }; + }; /** @@ -451,6 +452,62 @@ export class TriggerChatTransport implements ChatTransport { this._onSessionChange = callback; } + /** + * Eagerly trigger a run for a chat before the first message is sent. + * This allows initialization (DB setup, context loading) to happen + * while the user is still typing, reducing first-response latency. + * + * The task's `onPreload` hook fires immediately. The run then waits + * for the first message via input stream. When `sendMessages` is called + * later, it detects the existing session and sends via input stream + * instead of triggering a new run. + * + * No-op if a session already exists for this chatId. + */ + async preload(chatId: string): Promise { + // Don't preload if session already exists + if (this.sessions.get(chatId)?.runId) return; + + const payload = { + messages: [] as never[], + chatId, + trigger: "preload" as const, + metadata: this.defaultMetadata, + }; + + const currentToken = await this.resolveAccessToken(); + const apiClient = new ApiClient(this.baseURL, currentToken); + + const autoTags = [`chat:${chatId}`, "preload:true"]; + const userTags = this.triggerOptions?.tags ?? []; + const tags = [...autoTags, ...userTags].slice(0, 5); + + const triggerResponse = await apiClient.triggerTask(this.taskId, { + payload, + options: { + payloadType: "application/json", + tags, + queue: this.triggerOptions?.queue ? { name: this.triggerOptions.queue } : undefined, + maxAttempts: this.triggerOptions?.maxAttempts, + machine: this.triggerOptions?.machine, + priority: this.triggerOptions?.priority, + }, + }); + + const runId = triggerResponse.id; + const publicAccessToken = + "publicAccessToken" in triggerResponse + ? (triggerResponse as { publicAccessToken?: string }).publicAccessToken + : undefined; + + const newSession: ChatSessionState = { + runId, + publicAccessToken: publicAccessToken ?? currentToken, + }; + this.sessions.set(chatId, newSession); + this.notifySessionChange(chatId, newSession); + } + private notifySessionChange( chatId: string, session: ChatSessionState | null diff --git a/references/ai-chat/prisma/schema.prisma b/references/ai-chat/prisma/schema.prisma index d3941b7508c..4cecb78bc69 100644 --- a/references/ai-chat/prisma/schema.prisma +++ b/references/ai-chat/prisma/schema.prisma @@ -8,14 +8,24 @@ datasource db { } model User { - id String @id + id String @id name String - plan String @default("free") // "free" | "pro" + plan String @default("free") // "free" | "pro" preferredModel String? - messageCount Int @default(0) - createdAt DateTime @default(now()) - updatedAt DateTime @updatedAt + messageCount Int @default(0) + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt chats Chat[] + tools UserTool[] +} + +model UserTool { + id String @id @default(cuid()) + userId String + name String + description String + responseTemplate String @default("") + user User @relation(fields: [userId], references: [id]) } model Chat { diff --git a/references/ai-chat/src/components/chat-app.tsx b/references/ai-chat/src/components/chat-app.tsx index c1008e2fe83..e304e2b3f65 100644 --- a/references/ai-chat/src/components/chat-app.tsx +++ b/references/ai-chat/src/components/chat-app.tsx @@ -98,6 +98,8 @@ export function ChatApp({ setActiveChatId(id); setMessages([]); setNewChatModel(DEFAULT_MODEL); + // Eagerly start the run — onPreload fires immediately for initialization + transport.preload(id); } function handleSelectChat(id: string) { diff --git a/references/ai-chat/src/trigger/chat.ts b/references/ai-chat/src/trigger/chat.ts index 321f279ba12..8611266eef9 100644 --- a/references/ai-chat/src/trigger/chat.ts +++ b/references/ai-chat/src/trigger/chat.ts @@ -1,7 +1,7 @@ import { chat, ai } from "@trigger.dev/sdk/ai"; import { schemaTask } from "@trigger.dev/sdk"; -import { streamText, tool, stepCountIs, generateId } from "ai"; -import type { LanguageModel } from "ai"; +import { streamText, tool, dynamicTool, stepCountIs, generateId } from "ai"; +import type { LanguageModel, Tool as AITool } from "ai"; import { openai } from "@ai-sdk/openai"; import { anthropic } from "@ai-sdk/anthropic"; import { z } from "zod"; @@ -136,6 +136,11 @@ const userContext = chat.local<{ messageCount: number; }>(); +// Per-run dynamic tools — loaded from DB in onPreload/onChatStart +const userToolDefs = chat.local< + Array<{ name: string; description: string; responseTemplate: string }> +>(); + // -------------------------------------------------------------------------- // Subtask: deep research — fetches multiple URLs and streams progress // back to the parent chat via chat.stream using data-* chunks @@ -244,8 +249,54 @@ export const aiChat = chat.task({ clientDataSchema: z.object({ model: z.string().optional(), userId: z.string() }), warmTimeoutInSeconds: 60, chatAccessTokenTTL: "2h", - onChatStart: async ({ chatId, runId, chatAccessToken, clientData, continuation }) => { - // Load user context from DB — available for the entire run + onPreload: async ({ chatId, runId, chatAccessToken, clientData }) => { + // Eagerly initialize before the user's first message arrives + const user = await prisma.user.upsert({ + where: { id: clientData.userId }, + create: { id: clientData.userId, name: "User" }, + update: {}, + }); + userContext.init({ + userId: user.id, + name: user.name, + plan: user.plan as "free" | "pro", + preferredModel: user.preferredModel, + messageCount: user.messageCount, + }); + + // Load user-specific dynamic tools + const tools = await prisma.userTool.findMany({ where: { userId: clientData.userId } }); + userToolDefs.init(tools); + + // Create chat record and session + await prisma.chat.upsert({ + where: { id: chatId }, + create: { + id: chatId, + title: "New chat", + userId: user.id, + model: clientData?.model ?? DEFAULT_MODEL, + }, + update: {}, + }); + await prisma.chatSession.upsert({ + where: { id: chatId }, + create: { id: chatId, runId, publicAccessToken: chatAccessToken }, + update: { runId, publicAccessToken: chatAccessToken }, + }); + }, + onChatStart: async ({ chatId, runId, chatAccessToken, clientData, continuation, preloaded }) => { + if (preloaded) { + // Already initialized in onPreload — just update session + await prisma.chatSession.upsert({ + where: { id: chatId }, + create: { id: chatId, runId, publicAccessToken: chatAccessToken }, + update: { runId, publicAccessToken: chatAccessToken }, + }); + return; + } + + // Non-preloaded path: full initialization const user = await prisma.user.upsert({ where: { id: clientData.userId }, create: { id: clientData.userId, name: "User" }, @@ -259,8 +310,11 @@ export const aiChat = chat.task({ messageCount: user.messageCount, }); + // Load user-specific dynamic tools + const tools = await prisma.userTool.findMany({ where: { userId: clientData.userId } }); + userToolDefs.init(tools); + if (!continuation) { - // Brand new chat — create the record with the selected model await prisma.chat.upsert({ where: { id: chatId }, create: { @@ -273,7 +327,6 @@ export const aiChat = chat.task({ }); } - // Always update session for the new run await prisma.chatSession.upsert({ where: { id: chatId }, create: { id: chatId, runId, publicAccessToken: chatAccessToken }, @@ -328,6 +381,20 @@ export const aiChat = chat.task({ const modelId = clientData?.model ?? userContext.preferredModel ?? undefined; const useReasoning = REASONING_MODELS.has(modelId ?? DEFAULT_MODEL); + // Build dynamic tools from user's DB-configured tools (loaded in onPreload/onChatStart) + const dynamicTools: Record> = {}; + for (const t of userToolDefs.value ?? []) { + dynamicTools[t.name] = dynamicTool({ + description: t.description, + inputSchema: z.object({ + query: z.string().describe("The query or topic to look up"), + }), + execute: async (input) => { + return { result: t.responseTemplate.replace("{{query}}", (input as any).query) }; + }, + }); + } + return streamText({ model: getModel(modelId), system: `You are a helpful assistant for ${userContext.name} (${userContext.plan} plan). Be concise and friendly.`, @@ -336,6 +403,7 @@ export const aiChat = chat.task({ inspectEnvironment, webFetch, deepResearch: ai.tool(deepResearch), + ...dynamicTools, }, stopWhen: stepCountIs(10), abortSignal: stopSignal, From 6ccafaa2eacf69071503985c39f5ec075cf6f742 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Mon, 9 Mar 2026 08:13:17 +0000 Subject: [PATCH 45/53] docs: add mermaid architecture diagrams for ai-chat system --- references/ai-chat/ARCHITECTURE.md | 311 +++++++++++++++++++++++++++++ 1 file changed, 311 insertions(+) create mode 100644 references/ai-chat/ARCHITECTURE.md diff --git a/references/ai-chat/ARCHITECTURE.md b/references/ai-chat/ARCHITECTURE.md new file mode 100644 index 00000000000..8adbc0c4a1a --- /dev/null +++ b/references/ai-chat/ARCHITECTURE.md @@ -0,0 +1,311 @@ +# AI Chat Architecture + +## System Overview + +```mermaid +graph TB + subgraph Frontend["Frontend (Browser)"] + UC[useChat Hook] + TCT[TriggerChatTransport] + UI[Chat UI Components] + end + + subgraph Platform["Trigger.dev Platform"] + API[REST API] + RS[Realtime Streams] + RE[Run Engine] + end + + subgraph Worker["Task Worker"] + CT[chat.task Turn Loop] + ST[streamText / AI SDK] + LLM[LLM Provider] + SUB[Subtasks via ai.tool] + end + + UI -->|user types| UC + UC -->|sendMessages| TCT + TCT -->|triggerTask / sendInputStream| API + API -->|queue run / deliver input| RE + RE -->|execute| CT + CT -->|call| ST + ST -->|API call| LLM + LLM -->|stream chunks| ST + ST -->|UIMessageChunks| RS + RS -->|SSE| TCT + TCT -->|ReadableStream| UC + UC -->|update| UI + CT -->|triggerAndWait| SUB + SUB -->|chat.stream target:root| RS +``` + +## Detailed Flow: New Chat (First Message) + +```mermaid +sequenceDiagram + participant User + participant useChat as useChat + Transport + participant API as Trigger.dev API + participant Task as chat.task Worker + participant LLM as LLM Provider + + User->>useChat: sendMessage("Hello") + useChat->>useChat: No session for chatId → trigger new run + + useChat->>API: triggerTask(payload, tags: [chat:id]) + API-->>useChat: { runId, publicAccessToken } + useChat->>useChat: Store session, subscribe to SSE + + API->>Task: Start run with ChatTaskWirePayload + + Note over Task: Preload phase skipped (trigger ≠ "preload") + + rect rgb(240, 248, 255) + Note over Task: Turn 0 + Task->>Task: convertToModelMessages(uiMessages) + Task->>Task: Mint access token + Task->>Task: onChatStart({ chatId, messages, clientData }) + Task->>Task: onTurnStart({ chatId, messages, uiMessages }) + Task->>LLM: streamText({ model, messages, abortSignal }) + LLM-->>Task: Stream response chunks + Task->>API: streams.pipe("chat", uiStream) + API-->>useChat: SSE: UIMessageChunks + useChat-->>User: Render streaming text + Task->>Task: onFinish → capturedResponseMessage + Task->>Task: Accumulate response in messages + Task->>API: Write __trigger_turn_complete chunk + API-->>useChat: SSE: { type: __trigger_turn_complete, publicAccessToken } + useChat->>useChat: Close stream, update session + Task->>Task: onTurnComplete({ messages, uiMessages, stopped }) + end + + rect rgb(255, 248, 240) + Note over Task: Wait for next message + Task->>Task: messagesInput.once() [warm, 30s] + Note over Task: No message → suspend + Task->>Task: messagesInput.wait() [suspended, 1h] + end +``` + +## Detailed Flow: Multi-Turn (Subsequent Messages) + +```mermaid +sequenceDiagram + participant User + participant useChat as useChat + Transport + participant API as Trigger.dev API + participant Task as chat.task Worker + participant LLM as LLM Provider + + Note over Task: Suspended, waiting for message + + User->>useChat: sendMessage("Tell me more") + useChat->>useChat: Session exists → send via input stream + useChat->>API: sendInputStream(runId, "chat-messages", payload) + Note right of useChat: Only sends new message
(not full history) + + API->>Task: Deliver to messagesInput + Task->>Task: Wake from suspend + + rect rgb(240, 248, 255) + Note over Task: Turn 1 + Task->>Task: Append new message to accumulators + Task->>Task: Mint fresh access token + Task->>Task: onTurnStart({ turn: 1, messages }) + Task->>LLM: streamText({ messages: [all accumulated] }) + LLM-->>Task: Stream response + Task->>API: streams.pipe("chat", uiStream) + API-->>useChat: SSE: UIMessageChunks + useChat-->>User: Render streaming text + Task->>API: Write __trigger_turn_complete + Task->>Task: onTurnComplete({ turn: 1 }) + end + + Task->>Task: Wait for next message (warm → suspend) +``` + +## Stop Signal Flow + +```mermaid +sequenceDiagram + participant User + participant useChat as useChat + Transport + participant API as Trigger.dev API + participant Task as chat.task Worker + participant LLM as LLM Provider + + Note over Task: Streaming response... + + User->>useChat: Click "Stop" + useChat->>API: sendInputStream(runId, "chat-stop", { stop: true }) + useChat->>useChat: Set skipToTurnComplete = true + + API->>Task: Deliver to stopInput + Task->>Task: stopController.abort() + Task->>LLM: AbortSignal fires + LLM-->>Task: Stream ends (AbortError) + Task->>Task: Catch AbortError, fall through + Task->>Task: await onFinishPromise (race condition fix) + Task->>Task: cleanupAbortedParts(responseMessage) + Note right of Task: Remove partial tool calls
Mark streaming parts as done + + Task->>API: Write __trigger_turn_complete + API-->>useChat: SSE: __trigger_turn_complete + useChat->>useChat: skipToTurnComplete = false, close stream + + Task->>Task: onTurnComplete({ stopped: true, responseMessage: cleaned }) + Task->>Task: Wait for next message +``` + +## Preload Flow + +```mermaid +sequenceDiagram + participant User + participant useChat as useChat + Transport + participant API as Trigger.dev API + participant Task as chat.task Worker + + User->>useChat: Click "New Chat" + useChat->>API: transport.preload(chatId) + Note right of useChat: payload: { messages: [], trigger: "preload" }
tags: [chat:id, preload:true] + API-->>useChat: { runId, publicAccessToken } + useChat->>useChat: Store session + + API->>Task: Start run (trigger = "preload") + + rect rgb(240, 255, 240) + Note over Task: Preload Phase + Task->>Task: Mint access token + Task->>Task: onPreload({ chatId, clientData }) + Note right of Task: DB setup, load user context,
load dynamic tools + Task->>Task: messagesInput.once() [warm] + Note over Task: Waiting for first message... + end + + Note over User: User is typing... + + User->>useChat: sendMessage("Hello") + useChat->>useChat: Session exists → send via input stream + useChat->>API: sendInputStream(runId, "chat-messages", payload) + API->>Task: Deliver message + + rect rgb(240, 248, 255) + Note over Task: Turn 0 (preloaded = true) + Task->>Task: onChatStart({ preloaded: true }) + Task->>Task: onTurnStart({ preloaded: true }) + Task->>Task: run() with preloaded dynamic tools ready + end +``` + +## Subtask Streaming (Tool as Task) + +```mermaid +sequenceDiagram + participant useChat as useChat + Transport + participant API as Trigger.dev API + participant Chat as chat.task + participant LLM as LLM Provider + participant Sub as Subtask (ai.tool) + + Chat->>LLM: streamText({ tools: { research: ai.tool(task) } }) + LLM-->>Chat: Tool call: research({ query, urls }) + + Chat->>API: triggerAndWait(subtask, input) + Note right of Chat: Passes toolCallId, chatId,
clientData via metadata + + API->>Sub: Start subtask + + Sub->>Sub: ai.chatContextOrThrow() → { chatId, clientData } + Sub->>API: chat.stream.writer({ target: "root" }) + Note right of Sub: Write data-research-progress
chunks to parent's stream + API-->>useChat: SSE: data-* chunks + useChat-->>useChat: Render progress UI + + Sub-->>Chat: Return result + Chat->>LLM: Tool result + LLM-->>Chat: Continue response +``` + +## Continuation Flow (Run Timeout / Cancel) + +```mermaid +sequenceDiagram + participant User + participant useChat as useChat + Transport + participant API as Trigger.dev API + participant Task as chat.task Worker + + Note over Task: Previous run timed out / was cancelled + + User->>useChat: sendMessage("Continue") + useChat->>API: sendInputStream(runId, payload) + API-->>useChat: Error (run dead) + + useChat->>useChat: Delete session, set isContinuation = true + useChat->>API: triggerTask(payload, continuation: true, previousRunId) + API-->>useChat: New { runId, publicAccessToken } + + API->>Task: Start new run + + rect rgb(255, 245, 238) + Note over Task: Turn 0 (continuation = true) + Task->>Task: cleanupAbortedParts(incoming messages) + Note right of Task: Strip incomplete tool calls
from previous run's response + Task->>Task: onChatStart({ continuation: true, previousRunId }) + Task->>Task: Normal turn flow... + end +``` + +## Hook Lifecycle + +```mermaid +graph TD + START([Run Starts]) --> IS_PRELOAD{trigger = preload?} + + IS_PRELOAD -->|Yes| PRELOAD[onPreload] + PRELOAD --> WAIT_MSG[Wait for first message
warm → suspend] + WAIT_MSG --> TURN0 + + IS_PRELOAD -->|No| TURN0 + + TURN0[Turn 0] --> CHAT_START[onChatStart
continuation, preloaded] + CHAT_START --> TURN_START_0[onTurnStart] + TURN_START_0 --> RUN_0[run → streamText] + RUN_0 --> TURN_COMPLETE_0[onTurnComplete
stopped, responseMessage] + + TURN_COMPLETE_0 --> WAIT{Wait for
next message} + WAIT -->|Message arrives| TURN_N[Turn N] + WAIT -->|Timeout| END_RUN([Run Ends]) + + TURN_N --> TURN_START_N[onTurnStart] + TURN_START_N --> RUN_N[run → streamText] + RUN_N --> TURN_COMPLETE_N[onTurnComplete] + TURN_COMPLETE_N --> WAIT +``` + +## Stream Architecture + +```mermaid +graph LR + subgraph Output["Output Stream (chat)"] + direction TB + O1[UIMessageChunks
text, reasoning, tools] + O2[data-* custom chunks] + O3[__trigger_turn_complete
control chunk] + end + + subgraph Input["Input Streams"] + direction TB + I1[chat-messages
User messages] + I2[chat-stop
Stop signal] + end + + Frontend -->|sendInputStream| I1 + Frontend -->|sendInputStream| I2 + I1 -->|messagesInput.once/wait| Worker + I2 -->|stopInput.on| Worker + Worker -->|streams.pipe / chat.stream| Output + Subtask -->|chat.stream target:root| Output + Output -->|SSE /realtime/v1/streams| Frontend +``` From dac71e2a65dd46f7b7f88d34f74da757844ffe8e Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Mon, 9 Mar 2026 10:02:03 +0000 Subject: [PATCH 46/53] docs: add sequence diagrams to ai-chat guide --- docs/guides/ai-chat.mdx | 91 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 91 insertions(+) diff --git a/docs/guides/ai-chat.mdx b/docs/guides/ai-chat.mdx index 12021036945..8343b840af6 100644 --- a/docs/guides/ai-chat.mdx +++ b/docs/guides/ai-chat.mdx @@ -17,6 +17,97 @@ The `@trigger.dev/sdk` provides a custom [ChatTransport](https://sdk.vercel.ai/d No custom API routes needed. Your chat backend is a Trigger.dev task. + + +### First message flow + +```mermaid +sequenceDiagram + participant User + participant useChat as useChat + Transport + participant API as Trigger.dev API + participant Task as chat.task Worker + participant LLM as LLM Provider + + User->>useChat: sendMessage("Hello") + useChat->>useChat: No session for chatId → trigger new run + useChat->>API: triggerTask(payload, tags: [chat:id]) + API-->>useChat: { runId, publicAccessToken } + useChat->>useChat: Store session, subscribe to SSE + + API->>Task: Start run with ChatTaskWirePayload + Task->>Task: onChatStart({ chatId, messages, clientData }) + Task->>Task: onTurnStart({ chatId, messages }) + Task->>LLM: streamText({ model, messages, abortSignal }) + LLM-->>Task: Stream response chunks + Task->>API: streams.pipe("chat", uiStream) + API-->>useChat: SSE: UIMessageChunks + useChat-->>User: Render streaming text + Task->>API: Write __trigger_turn_complete + API-->>useChat: SSE: turn complete + refreshed token + useChat->>useChat: Close stream, update session + Task->>Task: onTurnComplete({ messages, stopped: false }) + Task->>Task: Wait for next message (warm → suspend) +``` + +### Multi-turn flow + +```mermaid +sequenceDiagram + participant User + participant useChat as useChat + Transport + participant API as Trigger.dev API + participant Task as chat.task Worker + participant LLM as LLM Provider + + Note over Task: Suspended, waiting for message + + User->>useChat: sendMessage("Tell me more") + useChat->>useChat: Session exists → send via input stream + useChat->>API: sendInputStream(runId, "chat-messages", payload) + Note right of useChat: Only sends new message (not full history) + + API->>Task: Deliver to messagesInput + Task->>Task: Wake from suspend + Task->>Task: Append to accumulated messages + Task->>Task: onTurnStart({ turn: 1 }) + Task->>LLM: streamText({ messages: [all accumulated] }) + LLM-->>Task: Stream response + Task->>API: streams.pipe("chat", uiStream) + API-->>useChat: SSE: UIMessageChunks + useChat-->>User: Render streaming text + Task->>API: Write __trigger_turn_complete + Task->>Task: onTurnComplete({ turn: 1 }) + Task->>Task: Wait for next message (warm → suspend) +``` + +### Stop signal flow + +```mermaid +sequenceDiagram + participant User + participant useChat as useChat + Transport + participant API as Trigger.dev API + participant Task as chat.task Worker + participant LLM as LLM Provider + + Note over Task: Streaming response... + + User->>useChat: Click "Stop" + useChat->>API: sendInputStream(runId, "chat-stop", { stop: true }) + API->>Task: Deliver to stopInput + Task->>Task: stopController.abort() + LLM-->>Task: Stream ends (AbortError) + Task->>Task: cleanupAbortedParts(responseMessage) + Note right of Task: Remove partial tool calls,
mark streaming parts as done + Task->>API: Write __trigger_turn_complete + API-->>useChat: SSE: turn complete + Task->>Task: onTurnComplete({ stopped: true }) + Task->>Task: Wait for next message +``` + +
+ Requires `@trigger.dev/sdk` version **4.4.0 or later** and the `ai` package **v5.0.0 or later**. From f22a0b243e25b3a7b1719586ce7d50239324e0c8 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Mon, 9 Mar 2026 11:08:48 +0000 Subject: [PATCH 47/53] feat(chat): auto-hydrate chat.local values in ai.tool subtasks --- docs/guides/ai-chat.mdx | 57 ++++++++++++++-- packages/trigger-sdk/src/v3/ai.ts | 95 ++++++++++++++++++++++---- references/ai-chat/src/trigger/chat.ts | 4 +- 3 files changed, 135 insertions(+), 21 deletions(-) diff --git a/docs/guides/ai-chat.mdx b/docs/guides/ai-chat.mdx index 8343b840af6..ff19407b4d2 100644 --- a/docs/guides/ai-chat.mdx +++ b/docs/guides/ai-chat.mdx @@ -1122,9 +1122,11 @@ run: async ({ messages, signal }) => { Use `chat.local` to create typed, run-scoped data that persists across turns and is accessible from anywhere — the run function, tools, nested helpers. Each run gets its own isolated copy, and locals are automatically cleared between runs. +When a subtask is invoked via `ai.tool()`, initialized locals are automatically serialized into the subtask's metadata and hydrated on first access — no extra code needed. Subtask changes to hydrated locals are local to the subtask and don't propagate back to the parent. + ### Declaring and initializing -Declare locals at module level, then initialize them inside a lifecycle hook where you have context (chatId, clientData, etc.): +Declare locals at module level with a unique `id`, then initialize them inside a lifecycle hook where you have context (chatId, clientData, etc.): ```ts import { chat } from "@trigger.dev/sdk/ai"; @@ -1133,12 +1135,12 @@ import { openai } from "@ai-sdk/openai"; import { z } from "zod"; import { db } from "@/lib/db"; -// Declare at module level — multiple locals can coexist +// Declare at module level — each local needs a unique id const userContext = chat.local<{ name: string; plan: "free" | "pro"; messageCount: number; -}>(); +}>({ id: "userContext" }); export const myChat = chat.task({ id: "my-chat", @@ -1172,7 +1174,7 @@ export const myChat = chat.task({ Locals are accessible from anywhere during task execution — including AI SDK tools: ```ts -const userContext = chat.local<{ plan: "free" | "pro" }>(); +const userContext = chat.local<{ plan: "free" | "pro" }>({ id: "userContext" }); const premiumTool = tool({ description: "Access premium features", @@ -1186,6 +1188,49 @@ const premiumTool = tool({ }); ``` +### Accessing from subtasks + +When you use `ai.tool()` to expose a subtask, chat locals are automatically available read-only: + +```ts +import { chat, ai } from "@trigger.dev/sdk/ai"; +import { schemaTask } from "@trigger.dev/sdk"; +import { streamText } from "ai"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const userContext = chat.local<{ name: string; plan: "free" | "pro" }>({ id: "userContext" }); + +export const analyzeData = schemaTask({ + id: "analyze-data", + schema: z.object({ query: z.string() }), + run: async ({ query }) => { + // userContext.name just works — auto-hydrated from parent metadata + console.log(`Analyzing for ${userContext.name}`); + // Changes here are local to this subtask and don't propagate back + }, +}); + +export const myChat = chat.task({ + id: "my-chat", + onChatStart: async ({ clientData }) => { + userContext.init({ name: "Alice", plan: "pro" }); + }, + run: async ({ messages, signal }) => { + return streamText({ + model: openai("gpt-4o"), + messages, + tools: { analyzeData: ai.tool(analyzeData) }, + abortSignal: signal, + }); + }, +}); +``` + + + Values must be JSON-serializable for subtask access. Non-serializable values (functions, class instances, etc.) will be lost during transfer. + + ### Dirty tracking and persistence The `hasChanged()` method returns `true` if any property was set since the last check, then resets the flag. Use it in lifecycle hooks to only persist when data actually changed: @@ -1207,7 +1252,7 @@ onTurnComplete: async ({ chatId }) => { | Method | Description | |--------|-------------| -| `chat.local()` | Create a typed local (declare at module level) | +| `chat.local({ id })` | Create a typed local with a unique id (declare at module level) | | `local.init(value)` | Initialize with a value (call in hooks or `run`) | | `local.hasChanged()` | Returns `true` if modified since last check, resets flag | | `local.get()` | Returns a plain object copy (for serialization) | @@ -1337,7 +1382,7 @@ See [onTurnComplete](#onturncomplete) for the full field reference. |--------|-------------| | `chat.task(options)` | Create a chat task | | `chat.pipe(source, options?)` | Pipe a stream to the frontend (from anywhere inside a task) | -| `chat.local()` | Create a per-run typed local (see [Per-run data](#per-run-data-with-chatlocal)) | +| `chat.local({ id })` | Create a per-run typed local (see [Per-run data](#per-run-data-with-chatlocal)) | | `chat.createAccessToken(taskId)` | Create a public access token for a chat task | | `chat.setTurnTimeout(duration)` | Override turn timeout at runtime (e.g. `"2h"`) | | `chat.setTurnTimeoutInSeconds(seconds)` | Override turn timeout at runtime (in seconds) | diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index 7ce8c4714b5..b4717def0ec 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -40,6 +40,8 @@ export type ToolCallExecutionOptions = { turn?: number; continuation?: boolean; clientData?: unknown; + /** Serialized chat.local values from the parent run. @internal */ + chatLocals?: Record; }; /** Chat context stored in locals during each chat.task turn for auto-detection. */ @@ -121,6 +123,18 @@ function toolFromTask< toolMeta.clientData = chatCtx.clientData; } + // Serialize initialized chat.local values for subtask hydration + const chatLocals: Record = {}; + for (const entry of chatLocalRegistry) { + const value = locals.get(entry.key); + if (value !== undefined) { + chatLocals[entry.id] = value; + } + } + if (Object.keys(chatLocals).length > 0) { + toolMeta.chatLocals = chatLocals; + } + return await task .triggerAndWait(input as inferSchemaIn, { metadata: { @@ -1546,8 +1560,31 @@ function cleanupAbortedParts(message: UIMessage): UIMessage { const CHAT_LOCAL_KEY: unique symbol = Symbol("chatLocalKey"); /** @internal Symbol for storing the dirty-tracking locals key. */ const CHAT_LOCAL_DIRTY_KEY: unique symbol = Symbol("chatLocalDirtyKey"); -/** @internal Counter for generating unique locals IDs. */ -let chatLocalCounter = 0; + +// --------------------------------------------------------------------------- +// chat.local registry — tracks all declared locals for serialization +// --------------------------------------------------------------------------- + +type ChatLocalEntry = { key: ReturnType; id: string }; +const chatLocalRegistry = new Set(); + +/** @internal Run-scoped flag to ensure hydration happens at most once per run. */ +const chatLocalsHydratedKey = locals.create("chat.locals.hydrated"); + +/** + * Hydrate chat.local values from subtask metadata (set by toolFromTask). + * Runs once per run — subsequent calls are no-ops. + * @internal + */ +function hydrateLocalsFromMetadata(): void { + if (locals.get(chatLocalsHydratedKey)) return; + locals.set(chatLocalsHydratedKey, true); + const opts = metadata.get(METADATA_KEY) as ToolCallExecutionOptions | undefined; + if (!opts?.chatLocals) return; + for (const [id, value] of Object.entries(opts.chatLocals)) { + locals.set(locals.create(id), value); + } +} /** * A Proxy-backed, run-scoped data object that appears as `T` to users. @@ -1574,12 +1611,16 @@ export type ChatLocal> = T & { * * Multiple locals can coexist — each gets its own isolated run-scoped storage. * + * The `id` is required and must be unique across all `chat.local()` calls in + * your project. It's used to serialize values into subtask metadata so that + * `ai.tool()` subtasks can auto-hydrate parent locals (read-only). + * * @example * ```ts * import { chat } from "@trigger.dev/sdk/ai"; * - * const userPrefs = chat.local<{ theme: string; language: string }>(); - * const gameState = chat.local<{ score: number; streak: number }>(); + * const userPrefs = chat.local<{ theme: string; language: string }>({ id: "userPrefs" }); + * const gameState = chat.local<{ score: number; streak: number }>({ id: "gameState" }); * * export const myChat = chat.task({ * id: "my-chat", @@ -1603,9 +1644,12 @@ export type ChatLocal> = T & { * }); * ``` */ -function chatLocal>(): ChatLocal { - const localKey = locals.create(`chat.local.${chatLocalCounter++}`); - const dirtyKey = locals.create(`chat.local.${chatLocalCounter++}.dirty`); +function chatLocal>(options: { id: string }): ChatLocal { + const id = `chat.local.${options.id}`; + const localKey = locals.create(id); + const dirtyKey = locals.create(`${id}.dirty`); + + chatLocalRegistry.add({ key: localKey, id }); const target = {} as any; target[CHAT_LOCAL_KEY] = localKey; @@ -1633,7 +1677,11 @@ function chatLocal>(): ChatLocal { } if (prop === "get") { return () => { - const current = locals.get(localKey); + let current = locals.get(localKey); + if (current === undefined) { + hydrateLocalsFromMetadata(); + current = locals.get(localKey); + } if (current === undefined) { throw new Error( "local.get() called before initialization. Call local.init() first." @@ -1645,12 +1693,21 @@ function chatLocal>(): ChatLocal { // toJSON for serialization (JSON.stringify(local)) if (prop === "toJSON") { return () => { - const current = locals.get(localKey); + let current = locals.get(localKey); + if (current === undefined) { + hydrateLocalsFromMetadata(); + current = locals.get(localKey); + } return current ? { ...current } : undefined; }; } - const current = locals.get(localKey); + let current = locals.get(localKey); + if (current === undefined) { + // Auto-hydrate from parent metadata in subtask context + hydrateLocalsFromMetadata(); + current = locals.get(localKey); + } if (current === undefined) return undefined; return (current as any)[prop]; }, @@ -1673,18 +1730,30 @@ function chatLocal>(): ChatLocal { has(_target, prop) { if (typeof prop === "symbol") return prop in _target; - const current = locals.get(localKey); + let current = locals.get(localKey); + if (current === undefined) { + hydrateLocalsFromMetadata(); + current = locals.get(localKey); + } return current !== undefined && prop in current; }, ownKeys() { - const current = locals.get(localKey); + let current = locals.get(localKey); + if (current === undefined) { + hydrateLocalsFromMetadata(); + current = locals.get(localKey); + } return current ? Reflect.ownKeys(current) : []; }, getOwnPropertyDescriptor(_target, prop) { if (typeof prop === "symbol") return undefined; - const current = locals.get(localKey); + let current = locals.get(localKey); + if (current === undefined) { + hydrateLocalsFromMetadata(); + current = locals.get(localKey); + } if (current === undefined || !(prop in current)) return undefined; return { configurable: true, diff --git a/references/ai-chat/src/trigger/chat.ts b/references/ai-chat/src/trigger/chat.ts index 8611266eef9..52ba1865da6 100644 --- a/references/ai-chat/src/trigger/chat.ts +++ b/references/ai-chat/src/trigger/chat.ts @@ -134,12 +134,12 @@ const userContext = chat.local<{ plan: "free" | "pro"; preferredModel: string | null; messageCount: number; -}>(); +}>({ id: "userContext" }); // Per-run dynamic tools — loaded from DB in onPreload/onChatStart const userToolDefs = chat.local< Array<{ name: string; description: string; responseTemplate: string }> ->(); +>({ id: "userToolDefs" }); // -------------------------------------------------------------------------- // Subtask: deep research — fetches multiple URLs and streams progress From 3c4b5e570e6dea358d498c4af1ba17acc2ba3273 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Mon, 9 Mar 2026 15:34:50 +0000 Subject: [PATCH 48/53] feat(chat): add chat.defer(), preload toggle, TTFB measurement, and fix ChatTaskWirePayload export --- docs/guides/ai-chat.mdx | 1 + packages/trigger-sdk/src/v3/ai.ts | 49 ++++++++++++++++- references/ai-chat/src/app/page.tsx | 1 + .../ai-chat/src/components/chat-app.tsx | 9 +++- .../ai-chat/src/components/chat-sidebar.tsx | 16 ++++++ references/ai-chat/src/components/chat.tsx | 52 +++++++++++++++++++ references/ai-chat/src/trigger/chat.ts | 34 +++++------- 7 files changed, 136 insertions(+), 26 deletions(-) diff --git a/docs/guides/ai-chat.mdx b/docs/guides/ai-chat.mdx index ff19407b4d2..44ac2a97381 100644 --- a/docs/guides/ai-chat.mdx +++ b/docs/guides/ai-chat.mdx @@ -1387,6 +1387,7 @@ See [onTurnComplete](#onturncomplete) for the full field reference. | `chat.setTurnTimeout(duration)` | Override turn timeout at runtime (e.g. `"2h"`) | | `chat.setTurnTimeoutInSeconds(seconds)` | Override turn timeout at runtime (in seconds) | | `chat.setWarmTimeoutInSeconds(seconds)` | Override warm timeout at runtime | +| `chat.defer(promise)` | Run background work in parallel with streaming, awaited before `onTurnComplete` | | `chat.isStopped()` | Check if the current turn was stopped by the user (works anywhere during a turn) | | `chat.cleanupAbortedParts(message)` | Remove incomplete parts from a stopped response message | | `chat.stream` | Typed chat output stream — use `.writer()`, `.pipe()`, `.append()`, `.read()` | diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index b4717def0ec..122e1f716c6 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -309,9 +309,8 @@ const chatStream = streams.define({ id: _CHAT_STREAM_KEY }); /** * The wire payload shape sent by `TriggerChatTransport`. * Uses `metadata` to match the AI SDK's `ChatRequestOptions` field name. - * @internal */ -type ChatTaskWirePayload = { +export type ChatTaskWirePayload = { messages: TMessage[]; chatId: string; trigger: "submit-message" | "regenerate-message" | "preload"; @@ -384,6 +383,13 @@ export type ChatTaskRunPayload = ChatTaskPayload({ id: CHAT_MESSAGES_STREAM_ID }); const stopInput = streams.input<{ stop: true; message?: string }>({ id: CHAT_STOP_STREAM_ID }); +/** + * Per-turn deferred promises. Registered via `chat.defer()`, awaited + * before `onTurnComplete` fires. Reset each turn. + * @internal + */ +const chatDeferKey = locals.create>>("chat.defer"); + /** * Run-scoped pipe counter. Stored in locals so concurrent runs in the * same worker don't share state. @@ -1016,6 +1022,7 @@ function chatTask< `chat turn ${turn + 1}`, async () => { locals.set(chatPipeCountKey, 0); + locals.set(chatDeferKey, new Set()); // Store chat context for auto-detection by ai.tool subtasks locals.set(chatTurnContextKey, { @@ -1270,6 +1277,16 @@ function chatTask< turnAccessToken ); + // Await deferred background work (e.g. DB writes from onTurnStart) + // before firing onTurnComplete so hooks can rely on the work being done. + const deferredWork = locals.get(chatDeferKey); + if (deferredWork && deferredWork.size > 0) { + await Promise.race([ + Promise.allSettled(deferredWork), + new Promise((r) => setTimeout(r, 5_000)), + ]); + } + // Fire onTurnComplete after response capture if (onTurnComplete) { await tracer.startActiveSpan( @@ -1487,6 +1504,32 @@ function isStopped(): boolean { return controller?.signal.aborted ?? false; } +// --------------------------------------------------------------------------- +// Per-turn deferred work +// --------------------------------------------------------------------------- + +/** + * Register a promise that runs in the background during the current turn. + * + * Use this to move non-blocking work (DB writes, analytics, etc.) out of + * the critical path. The promise runs in parallel with streaming and is + * awaited (with a 5 s timeout) before `onTurnComplete` fires. + * + * @example + * ```ts + * onTurnStart: async ({ chatId, uiMessages }) => { + * // Persist messages without blocking the LLM call + * chat.defer(db.chat.update({ where: { id: chatId }, data: { messages: uiMessages } })); + * }, + * ``` + */ +function chatDefer(promise: Promise): void { + const promises = locals.get(chatDeferKey); + if (promises) { + promises.add(promise); + } +} + // --------------------------------------------------------------------------- // Aborted message cleanup // --------------------------------------------------------------------------- @@ -1806,6 +1849,8 @@ export const chat = { isStopped, /** Clean up aborted parts from a UIMessage. See {@link cleanupAbortedParts}. */ cleanupAbortedParts, + /** Register background work that runs in parallel with streaming. See {@link chatDefer}. */ + defer: chatDefer, /** Typed chat output stream for writing custom chunks or piping from subtasks. */ stream: chatStream, }; diff --git a/references/ai-chat/src/app/page.tsx b/references/ai-chat/src/app/page.tsx index cd2eac83cc5..7b5df23764e 100644 --- a/references/ai-chat/src/app/page.tsx +++ b/references/ai-chat/src/app/page.tsx @@ -12,6 +12,7 @@ import { type ChatMeta = { id: string; title: string; + model: string; createdAt: number; updatedAt: number; }; diff --git a/references/ai-chat/src/components/chat-app.tsx b/references/ai-chat/src/components/chat-app.tsx index e304e2b3f65..6077fbbea96 100644 --- a/references/ai-chat/src/components/chat-app.tsx +++ b/references/ai-chat/src/components/chat-app.tsx @@ -51,6 +51,7 @@ export function ChatApp({ // Model for new chats (before first message is sent) const [newChatModel, setNewChatModel] = useState(DEFAULT_MODEL); + const [preloadEnabled, setPreloadEnabled] = useState(true); const handleSessionChange = useCallback( (chatId: string, session: SessionInfo | null) => { @@ -98,8 +99,10 @@ export function ChatApp({ setActiveChatId(id); setMessages([]); setNewChatModel(DEFAULT_MODEL); - // Eagerly start the run — onPreload fires immediately for initialization - transport.preload(id); + if (preloadEnabled) { + // Eagerly start the run — onPreload fires immediately for initialization + transport.preload(id); + } } function handleSelectChat(id: string) { @@ -149,6 +152,8 @@ export function ChatApp({ onSelectChat={handleSelectChat} onNewChat={handleNewChat} onDeleteChat={handleDeleteChat} + preloadEnabled={preloadEnabled} + onPreloadChange={setPreloadEnabled} />
{activeChatId ? ( diff --git a/references/ai-chat/src/components/chat-sidebar.tsx b/references/ai-chat/src/components/chat-sidebar.tsx index bb688e99b14..50861c112d8 100644 --- a/references/ai-chat/src/components/chat-sidebar.tsx +++ b/references/ai-chat/src/components/chat-sidebar.tsx @@ -24,6 +24,8 @@ type ChatSidebarProps = { onSelectChat: (id: string) => void; onNewChat: () => void; onDeleteChat: (id: string) => void; + preloadEnabled: boolean; + onPreloadChange: (enabled: boolean) => void; }; export function ChatSidebar({ @@ -32,6 +34,8 @@ export function ChatSidebar({ onSelectChat, onNewChat, onDeleteChat, + preloadEnabled, + onPreloadChange, }: ChatSidebarProps) { const sorted = [...chats].sort((a, b) => b.updatedAt - a.updatedAt); @@ -77,6 +81,18 @@ export function ChatSidebar({ ))}
+ +
+ +
); } diff --git a/references/ai-chat/src/components/chat.tsx b/references/ai-chat/src/components/chat.tsx index e41b132dbdb..7f8ab77ec95 100644 --- a/references/ai-chat/src/components/chat.tsx +++ b/references/ai-chat/src/components/chat.tsx @@ -110,6 +110,8 @@ function ResearchProgress({ part }: { part: any }) { ); } +type TtfbEntry = { turn: number; ttfbMs: number }; + function DebugPanel({ chatId, model, @@ -117,6 +119,7 @@ function DebugPanel({ session, dashboardUrl, messageCount, + ttfbHistory, }: { chatId: string; model: string; @@ -124,6 +127,7 @@ function DebugPanel({ session?: { runId: string; publicAccessToken: string; lastEventId?: string }; dashboardUrl?: string; messageCount: number; + ttfbHistory: TtfbEntry[]; }) { const [open, setOpen] = useState(false); @@ -132,6 +136,12 @@ function DebugPanel({ ? `${dashboardUrl}/runs/${session.runId}` : undefined; + const latestTtfb = ttfbHistory.length > 0 ? ttfbHistory[ttfbHistory.length - 1]! : undefined; + const avgTtfb = + ttfbHistory.length > 0 + ? Math.round(ttfbHistory.reduce((sum, e) => sum + e.ttfbMs, 0) / ttfbHistory.length) + : undefined; + return (
)}
@@ -236,6 +265,11 @@ export function Chat({ const [input, setInput] = useState(""); const hasCalledFirstMessage = useRef(false); + // TTFB tracking + const sendTimestamp = useRef(null); + const turnCounter = useRef(0); + const [ttfbHistory, setTtfbHistory] = useState([]); + const { messages, sendMessage, stop, status, error } = useChat({ id: chatId, messages: initialMessages, @@ -257,6 +291,19 @@ export function Chat({ } }, [messages, chatId, onFirstMessage]); + // TTFB detection: record when first assistant content appears after send + useEffect(() => { + if (status !== "streaming") return; + if (sendTimestamp.current === null) return; + const lastMsg = messages[messages.length - 1]; + if (lastMsg?.role === "assistant") { + const ttfbMs = Date.now() - sendTimestamp.current; + const turn = turnCounter.current; + sendTimestamp.current = null; + setTtfbHistory((prev) => [...prev, { turn, ttfbMs }]); + } + }, [status, messages]); + // Pending message to send after the current turn completes const [pendingMessage, setPendingMessage] = useState(null); @@ -277,6 +324,8 @@ export function Chat({ if (pendingMessage) { const text = pendingMessage; setPendingMessage(null); + turnCounter.current++; + sendTimestamp.current = Date.now(); sendMessage({ text }, { metadata: { model } }); } }, [status, messages, chatId, onMessagesChange, sendMessage, pendingMessage, model]); @@ -423,6 +472,7 @@ export function Chat({ session={session} dashboardUrl={dashboardUrl} messageCount={messages.length} + ttfbHistory={ttfbHistory} />
({ id: "userContext" }); // Per-run dynamic tools — loaded from DB in onPreload/onChatStart -const userToolDefs = chat.local< - Array<{ name: string; description: string; responseTemplate: string }> ->({ id: "userToolDefs" }); +const userToolDefs = chat.local<{ + value: Array<{ name: string; description: string; responseTemplate: string }>; +}>({ id: "userToolDefs" }); // -------------------------------------------------------------------------- // Subtask: deep research — fetches multiple URLs and streams progress @@ -250,6 +250,7 @@ export const aiChat = chat.task({ warmTimeoutInSeconds: 60, chatAccessTokenTTL: "2h", onPreload: async ({ chatId, runId, chatAccessToken, clientData }) => { + if (!clientData) return; // Eagerly initialize before the user's first message arrives const user = await prisma.user.upsert({ where: { id: clientData.userId }, @@ -266,7 +267,7 @@ export const aiChat = chat.task({ // Load user-specific dynamic tools const tools = await prisma.userTool.findMany({ where: { userId: clientData.userId } }); - userToolDefs.init(tools); + userToolDefs.init({ value: tools }); // Create chat record and session await prisma.chat.upsert({ @@ -287,12 +288,8 @@ export const aiChat = chat.task({ }, onChatStart: async ({ chatId, runId, chatAccessToken, clientData, continuation, preloaded }) => { if (preloaded) { - // Already initialized in onPreload — just update session - await prisma.chatSession.upsert({ - where: { id: chatId }, - create: { id: chatId, runId, publicAccessToken: chatAccessToken }, - update: { runId, publicAccessToken: chatAccessToken }, - }); + // Everything was already initialized in onPreload — skip entirely. + // The session, chat record, user context, and tools are all set up. return; } @@ -312,7 +309,7 @@ export const aiChat = chat.task({ // Load user-specific dynamic tools const tools = await prisma.userTool.findMany({ where: { userId: clientData.userId } }); - userToolDefs.init(tools); + userToolDefs.init({ value: tools }); if (!continuation) { await prisma.chat.upsert({ @@ -333,17 +330,10 @@ export const aiChat = chat.task({ update: { runId, publicAccessToken: chatAccessToken }, }); }, - onTurnStart: async ({ chatId, uiMessages, runId, chatAccessToken }) => { - // Persist messages BEFORE streaming so mid-stream refresh has the user message - await prisma.chat.update({ - where: { id: chatId }, - data: { messages: uiMessages as any }, - }); - await prisma.chatSession.upsert({ - where: { id: chatId }, - create: { id: chatId, runId, publicAccessToken: chatAccessToken }, - update: { runId, publicAccessToken: chatAccessToken }, - }); + onTurnStart: async ({ chatId, uiMessages }) => { + // Persist messages so mid-stream refresh still shows the user message. + // Deferred — runs in parallel with streaming, awaited before onTurnComplete. + chat.defer(prisma.chat.update({ where: { id: chatId }, data: { messages: uiMessages as any } })); }, onTurnComplete: async ({ chatId, uiMessages, runId, chatAccessToken, lastEventId, clientData, stopped }) => { // Persist final messages + assistant response + stream position From 4ca281e050c44476943a22f96bf1d1f51255a09b Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Mon, 9 Mar 2026 15:40:27 +0000 Subject: [PATCH 49/53] fix(reference): replace hand-rolled HTML stripping with turndown --- pnpm-lock.yaml | 23 +++++++++++++++++++++ references/ai-chat/package.json | 2 ++ references/ai-chat/src/trigger/chat.ts | 28 +++++--------------------- 3 files changed, 30 insertions(+), 23 deletions(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 9827964dc8a..e93b9047d85 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -2155,6 +2155,9 @@ importers: streamdown: specifier: ^2.3.0 version: 2.3.0(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + turndown: + specifier: ^7.2.2 + version: 7.2.2 zod: specifier: 3.25.76 version: 3.25.76 @@ -2174,6 +2177,9 @@ importers: '@types/react-dom': specifier: ^19 version: 19.0.4(@types/react@19.0.12) + '@types/turndown': + specifier: ^5.0.6 + version: 5.0.6 prisma: specifier: ^7.4.2 version: 7.4.2(@types/react@19.0.12)(better-sqlite3@11.10.0)(magicast@0.3.5)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.5.4) @@ -6042,6 +6048,9 @@ packages: '@microsoft/fetch-event-source@2.0.1': resolution: {integrity: sha512-W6CLUJ2eBMw3Rec70qrsEW0jOm/3twwJv21mrmj2yORiaVmVYGS4sSS5yUwvQc1ZlDLYGPnClVWmUUMagKNsfA==} + '@mixmark-io/domino@2.2.0': + resolution: {integrity: sha512-Y28PR25bHXUg88kCV7nivXrP2Nj2RueZ3/l/jdx6J9f8J4nsEGcgX0Qe6lt7Pa+J79+kPiJU3LguR6O/6zrLOw==} + '@modelcontextprotocol/sdk@1.25.2': resolution: {integrity: sha512-LZFeo4F9M5qOhC/Uc1aQSrBHxMrvxett+9KLHt7OhcExtoiRN9DKgbZffMP/nxjutWDQpfMDfP3nkHI4X9ijww==} engines: {node: '>=18'} @@ -11241,6 +11250,9 @@ packages: '@types/trusted-types@2.0.7': resolution: {integrity: sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==} + '@types/turndown@5.0.6': + resolution: {integrity: sha512-ru00MoyeeouE5BX4gRL+6m/BsDfbRayOskWqUvh7CLGW+UXxHQItqALa38kKnOiZPqJrtzJUgAC2+F0rL1S4Pg==} + '@types/unist@2.0.6': resolution: {integrity: sha512-PBjIUxZHOuj0R15/xuwJYjFi+KZdNFrehocChv4g5hu6aFroHue8m0lBP0POdK2nKzbw0cgV1mws8+V/JAcEkQ==} @@ -19798,6 +19810,9 @@ packages: resolution: {integrity: sha512-U4gKCWcKgLcCjQd4Pl8KJdfEKumpyWbzRu75A6FCj6Ctea1PIm58W6Ltw1QXKqHrl2pF9e1raAskf/h6dlrPCA==} hasBin: true + turndown@7.2.2: + resolution: {integrity: sha512-1F7db8BiExOKxjSMU2b7if62D/XOyQyZbPKq/nUwopfgnHlqXHqQ0lvfUTeUIr1lZJzOPFn43dODyMSIfvWRKQ==} + tw-animate-css@1.2.4: resolution: {integrity: sha512-yt+HkJB41NAvOffe4NweJU6fLqAlVx/mBX6XmHRp15kq0JxTtOKaIw8pVSWM1Z+n2nXtyi7cW6C9f0WG/F/QAQ==} @@ -24873,6 +24888,8 @@ snapshots: '@microsoft/fetch-event-source@2.0.1': {} + '@mixmark-io/domino@2.2.0': {} + '@modelcontextprotocol/sdk@1.25.2(hono@4.11.8)(supports-color@10.0.0)(zod@3.25.76)': dependencies: '@hono/node-server': 1.19.9(hono@4.11.8) @@ -31965,6 +31982,8 @@ snapshots: '@types/trusted-types@2.0.7': optional: true + '@types/turndown@5.0.6': {} + '@types/unist@2.0.6': {} '@types/unist@3.0.3': {} @@ -42479,6 +42498,10 @@ snapshots: turbo-windows-64: 1.10.3 turbo-windows-arm64: 1.10.3 + turndown@7.2.2: + dependencies: + '@mixmark-io/domino': 2.2.0 + tw-animate-css@1.2.4: {} tweetnacl@0.14.5: {} diff --git a/references/ai-chat/package.json b/references/ai-chat/package.json index 38df927947d..8c030cac577 100644 --- a/references/ai-chat/package.json +++ b/references/ai-chat/package.json @@ -24,6 +24,7 @@ "react": "^19.0.0", "react-dom": "^19.0.0", "streamdown": "^2.3.0", + "turndown": "^7.2.2", "zod": "3.25.76" }, "devDependencies": { @@ -32,6 +33,7 @@ "@types/node": "^22", "@types/react": "^19", "@types/react-dom": "^19", + "@types/turndown": "^5.0.6", "tailwindcss": "^4", "prisma": "^7.4.2", "trigger.dev": "workspace:*", diff --git a/references/ai-chat/src/trigger/chat.ts b/references/ai-chat/src/trigger/chat.ts index 0b48fc1287b..102db28f0d7 100644 --- a/references/ai-chat/src/trigger/chat.ts +++ b/references/ai-chat/src/trigger/chat.ts @@ -12,8 +12,11 @@ import { PrismaClient } from "../../lib/generated/prisma/client"; const adapter = new PrismaPg({ connectionString: process.env.DATABASE_URL! }); const prisma = new PrismaClient({ adapter }); +import TurndownService from "turndown"; import { DEFAULT_MODEL, REASONING_MODELS } from "@/lib/models"; +const turndown = new TurndownService(); + const MODELS: Record LanguageModel> = { "gpt-4o-mini": () => openai("gpt-4o-mini"), "gpt-4o": () => openai("gpt-4o"), @@ -98,20 +101,8 @@ const webFetch = tool({ let text = await response.text(); const contentType = response.headers.get("content-type") ?? ""; - // Strip HTML to plain text for readability if (contentType.includes("html")) { - text = text - .replace(//gi, "") - .replace(//gi, "") - .replace(/<[^>]+>/g, " ") - .replace(/ /g, " ") - .replace(/&/g, "&") - .replace(/</g, "<") - .replace(/>/g, ">") - .replace(/"/g, '"') - .replace(/'/g, "'") - .replace(/\s+/g, " ") - .trim(); + text = turndown.turndown(text); } return { @@ -204,16 +195,7 @@ export const deepResearch = schemaTask({ const contentType = response.headers.get("content-type") ?? ""; if (contentType.includes("html")) { - text = text - .replace(//gi, "") - .replace(//gi, "") - .replace(/<[^>]+>/g, " ") - .replace(/ /g, " ") - .replace(/&/g, "&") - .replace(/</g, "<") - .replace(/>/g, ">") - .replace(/\s+/g, " ") - .trim(); + text = turndown.turndown(text); } results.push({ From 2ab5bf512d781b8b368076d511cb0029e1d2be9d Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Mon, 9 Mar 2026 17:23:37 +0000 Subject: [PATCH 50/53] feat(streams): add inputStream.waitWithWarmup(), warm timeout config in sidebar, preload payload option --- packages/core/src/v3/realtimeStreams/types.ts | 17 +++++ packages/trigger-sdk/src/v3/ai.ts | 66 ++++++------------- packages/trigger-sdk/src/v3/chat.ts | 5 +- packages/trigger-sdk/src/v3/streams.ts | 43 +++++++++++- .../ai-chat/src/components/chat-app.tsx | 5 +- .../ai-chat/src/components/chat-sidebar.tsx | 18 ++++- 6 files changed, 103 insertions(+), 51 deletions(-) diff --git a/packages/core/src/v3/realtimeStreams/types.ts b/packages/core/src/v3/realtimeStreams/types.ts index 1b7455ebd25..b3c8d82709b 100644 --- a/packages/core/src/v3/realtimeStreams/types.ts +++ b/packages/core/src/v3/realtimeStreams/types.ts @@ -193,6 +193,14 @@ export type RealtimeDefinedInputStream = { * Uses a waitpoint token internally. Can only be called inside a task.run(). */ wait: (options?: InputStreamWaitOptions) => ManualWaitpointPromise; + /** + * Wait for data with a warm phase before suspending. + * + * Keeps the task warm (active, using compute) for `warmTimeoutInSeconds`, + * then suspends via `.wait()` if no data arrives. If data arrives during + * the warm phase the task responds instantly without suspending. + */ + waitWithWarmup: (options: InputStreamWaitWithWarmupOptions) => Promise<{ ok: true; output: TData } | { ok: false; error?: any }>; /** * Send data to this input stream on a specific run. * This is used from outside the task (e.g., from your backend or another task). @@ -249,6 +257,15 @@ export type InputStreamWaitOptions = { spanName?: string; }; +export type InputStreamWaitWithWarmupOptions = { + /** Seconds to keep the task warm before suspending. */ + warmTimeoutInSeconds: number; + /** Maximum time to wait after suspending (duration string, e.g. "1h"). */ + timeout?: string; + /** Override the default span name for the outer operation. */ + spanName?: string; +}; + export type InferInputStreamType = T extends RealtimeDefinedInputStream ? TData : unknown; diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index 122e1f716c6..cdfcae980d3 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -320,6 +320,8 @@ export type ChatTaskWirePayload 0) { - const warm = await messagesInput.once({ - timeoutMs: effectivePreloadWarmTimeout * 1000, - spanName: "preload wait (warm)", - }); + const preloadResult = await messagesInput.waitWithWarmup({ + warmTimeoutInSeconds: effectivePreloadWarmTimeout, + timeout: effectivePreloadTimeout, + spanName: "waiting for first message", + }); - if (warm.ok) { - firstMessage = warm.output; - } + if (!preloadResult.ok) { + return; // Timed out waiting for first message — end run } - if (!firstMessage) { - const effectivePreloadTimeout = - (metadata.get(TURN_TIMEOUT_METADATA_KEY) as string | undefined) - ?? preloadTimeout - ?? turnTimeout; - - const suspended = await messagesInput.wait({ - timeout: effectivePreloadTimeout, - spanName: "preload wait (suspended)", - }); - - if (!suspended.ok) { - return; // Timed out waiting for first message — end run - } - - firstMessage = suspended.output; - } + let firstMessage = preloadResult.output; currentWirePayload = firstMessage; } @@ -1335,35 +1323,19 @@ function chatTask< return "continue"; } - // Phase 1: Keep the run warm for quick response to the next message. - // The run stays active (using compute) during this window. + // Wait for the next message — stay warm briefly, then suspend const effectiveWarmTimeout = (metadata.get(WARM_TIMEOUT_METADATA_KEY) as number | undefined) ?? warmTimeoutInSeconds; - - if (effectiveWarmTimeout > 0) { - const warm = await messagesInput.once({ - timeoutMs: effectiveWarmTimeout * 1000, - spanName: "waiting (warm)", - }); - - if (warm.ok) { - // Message arrived while warm — respond instantly - currentWirePayload = warm.output; - return "continue"; - } - } - - // Phase 2: Suspend the task (frees compute) until the next message arrives const effectiveTurnTimeout = (metadata.get(TURN_TIMEOUT_METADATA_KEY) as string | undefined) ?? turnTimeout; - const next = await messagesInput.wait({ + const next = await messagesInput.waitWithWarmup({ + warmTimeoutInSeconds: effectiveWarmTimeout, timeout: effectiveTurnTimeout, - spanName: "waiting (suspended)", + spanName: "waiting for next message", }); if (!next.ok) { - // Timed out waiting for the next message — end the conversation return "exit"; } diff --git a/packages/trigger-sdk/src/v3/chat.ts b/packages/trigger-sdk/src/v3/chat.ts index bf269c88ebc..977679430d0 100644 --- a/packages/trigger-sdk/src/v3/chat.ts +++ b/packages/trigger-sdk/src/v3/chat.ts @@ -464,7 +464,7 @@ export class TriggerChatTransport implements ChatTransport { * * No-op if a session already exists for this chatId. */ - async preload(chatId: string): Promise { + async preload(chatId: string, options?: { warmTimeoutInSeconds?: number }): Promise { // Don't preload if session already exists if (this.sessions.get(chatId)?.runId) return; @@ -473,6 +473,9 @@ export class TriggerChatTransport implements ChatTransport { chatId, trigger: "preload" as const, metadata: this.defaultMetadata, + ...(options?.warmTimeoutInSeconds !== undefined + ? { warmTimeoutInSeconds: options.warmTimeoutInSeconds } + : {}), }; const currentToken = await this.resolveAccessToken(); diff --git a/packages/trigger-sdk/src/v3/streams.ts b/packages/trigger-sdk/src/v3/streams.ts index 6bdf862ebdc..13fd9f82220 100644 --- a/packages/trigger-sdk/src/v3/streams.ts +++ b/packages/trigger-sdk/src/v3/streams.ts @@ -25,6 +25,7 @@ import { InputStreamOncePromise, type InputStreamOnceResult, type InputStreamWaitOptions, + type InputStreamWaitWithWarmupOptions, type SendInputStreamOptions, type InferInputStreamType, type StreamWriteResult, @@ -767,6 +768,7 @@ function input(opts: { id: string }): RealtimeDefinedInputStream { const result = await tracer.startActiveSpan( options?.spanName ?? `inputStream.wait()`, async (span) => { + // 1. Block the run on the waitpoint const waitResponse = await apiClient.waitForWaitpointToken({ runFriendlyId: ctx.run.id, @@ -786,7 +788,7 @@ function input(opts: { id: string }): RealtimeDefinedInputStream { // 3. Suspend the task const waitResult = await runtime.waitUntil(response.waitpointId); - // 3. Parse the output + // 4. Parse the output const data = waitResult.output !== undefined ? await conditionallyImportAndParsePacket( @@ -840,6 +842,45 @@ function input(opts: { id: string }): RealtimeDefinedInputStream { } }); }, + async waitWithWarmup(options) { + const self = this; + const spanName = options.spanName ?? `inputStream.waitWithWarmup()`; + + return tracer.startActiveSpan( + spanName, + async (span) => { + // Warm phase: keep compute alive + if (options.warmTimeoutInSeconds > 0) { + const warm = await inputStreams.once(opts.id, { + timeoutMs: options.warmTimeoutInSeconds * 1000, + }); + if (warm.ok) { + span.setAttribute("wait.resolved", "warm"); + return { ok: true as const, output: warm.output as TData }; + } + } + + // Cold phase: suspend via .wait() — creates a child span + span.setAttribute("wait.resolved", "suspended"); + const waitResult = await self.wait({ + timeout: options.timeout, + spanName: "suspended", + }); + + return waitResult; + }, + { + attributes: { + [SemanticInternalAttributes.STYLE_ICON]: "streams", + streamId: opts.id, + ...accessoryAttributes({ + items: [{ text: opts.id, variant: "normal" }], + style: "codepath", + }), + }, + } + ); + }, async send(runId, data, options) { return tracer.startActiveSpan( `inputStream.send()`, diff --git a/references/ai-chat/src/components/chat-app.tsx b/references/ai-chat/src/components/chat-app.tsx index 6077fbbea96..b7a1d25a511 100644 --- a/references/ai-chat/src/components/chat-app.tsx +++ b/references/ai-chat/src/components/chat-app.tsx @@ -52,6 +52,7 @@ export function ChatApp({ // Model for new chats (before first message is sent) const [newChatModel, setNewChatModel] = useState(DEFAULT_MODEL); const [preloadEnabled, setPreloadEnabled] = useState(true); + const [warmTimeoutInSeconds, setWarmTimeoutInSeconds] = useState(60); const handleSessionChange = useCallback( (chatId: string, session: SessionInfo | null) => { @@ -101,7 +102,7 @@ export function ChatApp({ setNewChatModel(DEFAULT_MODEL); if (preloadEnabled) { // Eagerly start the run — onPreload fires immediately for initialization - transport.preload(id); + transport.preload(id, { warmTimeoutInSeconds }); } } @@ -154,6 +155,8 @@ export function ChatApp({ onDeleteChat={handleDeleteChat} preloadEnabled={preloadEnabled} onPreloadChange={setPreloadEnabled} + warmTimeoutInSeconds={warmTimeoutInSeconds} + onWarmTimeoutChange={setWarmTimeoutInSeconds} />
{activeChatId ? ( diff --git a/references/ai-chat/src/components/chat-sidebar.tsx b/references/ai-chat/src/components/chat-sidebar.tsx index 50861c112d8..73136f31cb1 100644 --- a/references/ai-chat/src/components/chat-sidebar.tsx +++ b/references/ai-chat/src/components/chat-sidebar.tsx @@ -26,6 +26,8 @@ type ChatSidebarProps = { onDeleteChat: (id: string) => void; preloadEnabled: boolean; onPreloadChange: (enabled: boolean) => void; + warmTimeoutInSeconds: number; + onWarmTimeoutChange: (seconds: number) => void; }; export function ChatSidebar({ @@ -36,6 +38,8 @@ export function ChatSidebar({ onDeleteChat, preloadEnabled, onPreloadChange, + warmTimeoutInSeconds, + onWarmTimeoutChange, }: ChatSidebarProps) { const sorted = [...chats].sort((a, b) => b.updatedAt - a.updatedAt); @@ -82,7 +86,7 @@ export function ChatSidebar({ ))}
-
+
+
+ Warm timeout + onWarmTimeoutChange(Number(e.target.value))} + className="w-16 rounded border border-gray-300 px-1.5 py-0.5 text-xs text-gray-600 outline-none focus:border-blue-500" + /> + s +
); From e45533a674064ecfdc8ec0a3f149550d241f19c8 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Tue, 10 Mar 2026 09:59:11 +0000 Subject: [PATCH 51/53] feat(chat): add composable primitives, raw task example, and task mode switcher --- packages/trigger-sdk/src/v3/ai.ts | 160 ++++++++++++++++ references/ai-chat/src/app/actions.ts | 3 +- references/ai-chat/src/app/page.tsx | 4 + .../ai-chat/src/components/chat-app.tsx | 12 +- .../ai-chat/src/components/chat-sidebar.tsx | 15 ++ references/ai-chat/src/trigger/chat.ts | 177 +++++++++++++++++- 6 files changed, 364 insertions(+), 7 deletions(-) diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index cdfcae980d3..bdff3b11db3 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -1567,6 +1567,156 @@ function cleanupAbortedParts(message: UIMessage): UIMessage { }; } +// --------------------------------------------------------------------------- +// Composable primitives for raw task chat +// --------------------------------------------------------------------------- + +/** + * Create a managed stop signal wired to the chat stop input stream. + * + * Call once at the start of your run. Use `signal` as the abort signal for + * `streamText`. Call `reset()` at the start of each turn to get a fresh + * per-turn signal. Call `cleanup()` when the run ends. + * + * @example + * ```ts + * const stop = chat.createStopSignal(); + * for (let turn = 0; turn < 100; turn++) { + * stop.reset(); + * const result = streamText({ model, messages, abortSignal: stop.signal }); + * await chat.pipe(result); + * // ... + * } + * stop.cleanup(); + * ``` + */ +function createStopSignal(): { readonly signal: AbortSignal; reset: () => void; cleanup: () => void } { + let controller = new AbortController(); + const sub = stopInput.on((data) => { + controller.abort(data?.message || "stopped"); + }); + return { + get signal() { return controller.signal; }, + reset() { controller = new AbortController(); }, + cleanup() { sub.off(); }, + }; +} + +/** + * Signal the frontend that the current turn is complete. + * + * The `TriggerChatTransport` intercepts this to close the ReadableStream + * for the current turn. Call after piping the response stream. + * + * @example + * ```ts + * await chat.pipe(result); + * await chat.writeTurnComplete(); + * ``` + */ +async function chatWriteTurnComplete(options?: { publicAccessToken?: string }): Promise { + await writeTurnCompleteChunk(undefined, options?.publicAccessToken); +} + +/** + * Pipe a `StreamTextResult` (or similar) to the chat stream and capture + * the assistant's response message via `onFinish`. + * + * Combines `toUIMessageStream()` + `onFinish` callback + `chat.pipe()`. + * Returns the captured `UIMessage`, or `undefined` if capture failed. + * + * @example + * ```ts + * const result = streamText({ model, messages, abortSignal: signal }); + * const response = await chat.pipeAndCapture(result, { signal }); + * if (response) conversation.addResponse(response); + * ``` + */ +async function pipeChatAndCapture( + source: UIMessageStreamable, + options?: { signal?: AbortSignal; spanName?: string } +): Promise { + let captured: UIMessage | undefined; + let resolveOnFinish: () => void; + const onFinishPromise = new Promise((r) => { resolveOnFinish = r; }); + + const uiStream = source.toUIMessageStream({ + onFinish: ({ responseMessage }: { responseMessage: UIMessage }) => { + captured = responseMessage; + resolveOnFinish!(); + }, + }); + + await pipeChat(uiStream, { signal: options?.signal, spanName: options?.spanName ?? "stream response" }); + await onFinishPromise; + + return captured; +} + +/** + * Accumulates conversation messages across turns. + * + * Handles the transport protocol: turn 0 sends full history (replace), + * subsequent turns send only new messages (append), regenerate sends + * full history minus last assistant message (replace). + * + * @example + * ```ts + * const conversation = new chat.MessageAccumulator(); + * for (let turn = 0; turn < 100; turn++) { + * const messages = await conversation.addIncoming(payload.messages, payload.trigger, turn); + * const result = streamText({ model, messages }); + * const response = await chat.pipeAndCapture(result); + * if (response) await conversation.addResponse(response); + * } + * ``` + */ +class ChatMessageAccumulator { + modelMessages: ModelMessage[] = []; + uiMessages: UIMessage[] = []; + + /** + * Add incoming messages from the transport payload. + * Returns the full accumulated model messages for `streamText`. + */ + async addIncoming( + messages: UIMessage[], + trigger: string, + turn: number + ): Promise { + const cleaned = messages.map((m) => + m.role === "assistant" ? cleanupAbortedParts(m) : m + ); + const model = await convertToModelMessages(cleaned); + + if (turn === 0 || trigger === "regenerate-message") { + this.modelMessages = model; + this.uiMessages = [...cleaned]; + } else { + this.modelMessages.push(...model); + this.uiMessages.push(...cleaned); + } + return this.modelMessages; + } + + /** + * Add the assistant's response to the accumulator. + * Call after `pipeAndCapture` with the captured response. + */ + async addResponse(response: UIMessage): Promise { + if (!response.id) { + response = { ...response, id: generateMessageId() }; + } + this.uiMessages.push(response); + try { + const msgs = await convertToModelMessages([stripProviderMetadata(response)]); + this.modelMessages.push(...msgs); + } catch { + // Conversion failed — skip model message accumulation for this response + } + } +} + // --------------------------------------------------------------------------- // chat.local — per-run typed data with Proxy access // --------------------------------------------------------------------------- @@ -1825,6 +1975,16 @@ export const chat = { defer: chatDefer, /** Typed chat output stream for writing custom chunks or piping from subtasks. */ stream: chatStream, + /** Pre-built input stream for receiving messages from the transport. */ + messages: messagesInput, + /** Create a managed stop signal wired to the stop input stream. See {@link createStopSignal}. */ + createStopSignal, + /** Signal the frontend that the current turn is complete. See {@link chatWriteTurnComplete}. */ + writeTurnComplete: chatWriteTurnComplete, + /** Pipe a stream and capture the response message. See {@link pipeChatAndCapture}. */ + pipeAndCapture: pipeChatAndCapture, + /** Message accumulator class for raw task chat. See {@link ChatMessageAccumulator}. */ + MessageAccumulator: ChatMessageAccumulator, }; /** diff --git a/references/ai-chat/src/app/actions.ts b/references/ai-chat/src/app/actions.ts index 56398c9c84f..9f21252538f 100644 --- a/references/ai-chat/src/app/actions.ts +++ b/references/ai-chat/src/app/actions.ts @@ -4,7 +4,8 @@ import { chat } from "@trigger.dev/sdk/ai"; import type { aiChat } from "@/trigger/chat"; import { prisma } from "@/lib/prisma"; -export const getChatToken = async () => chat.createAccessToken("ai-chat"); +export const getChatToken = async (taskId?: string) => + chat.createAccessToken((taskId ?? "ai-chat") as any); export async function getChatList() { const chats = await prisma.chat.findMany({ diff --git a/references/ai-chat/src/app/page.tsx b/references/ai-chat/src/app/page.tsx index 7b5df23764e..b2559082334 100644 --- a/references/ai-chat/src/app/page.tsx +++ b/references/ai-chat/src/app/page.tsx @@ -25,6 +25,7 @@ export default function Home() { Record >({}); const [loaded, setLoaded] = useState(false); + const [taskMode, setTaskMode] = useState("ai-chat"); useEffect(() => { async function load() { @@ -50,6 +51,9 @@ export default function Home() { return ( void; initialChatList: ChatMeta[]; initialActiveChatId: string | null; initialMessages: UIMessage[]; @@ -39,6 +41,8 @@ type ChatAppProps = { }; export function ChatApp({ + taskMode, + onTaskModeChange, initialChatList, initialActiveChatId, initialMessages, @@ -70,9 +74,9 @@ export function ChatApp({ [] ); - const transport = useTriggerChatTransport({ - task: "ai-chat", - accessToken: getChatToken, + const transport = useTriggerChatTransport({ + task: taskMode, + accessToken: () => getChatToken(taskMode), baseURL: process.env.NEXT_PUBLIC_TRIGGER_API_URL, sessions: initialSessions, onSessionChange: handleSessionChange, @@ -157,6 +161,8 @@ export function ChatApp({ onPreloadChange={setPreloadEnabled} warmTimeoutInSeconds={warmTimeoutInSeconds} onWarmTimeoutChange={setWarmTimeoutInSeconds} + taskMode={taskMode} + onTaskModeChange={onTaskModeChange} />
{activeChatId ? ( diff --git a/references/ai-chat/src/components/chat-sidebar.tsx b/references/ai-chat/src/components/chat-sidebar.tsx index 73136f31cb1..2f6182f3b80 100644 --- a/references/ai-chat/src/components/chat-sidebar.tsx +++ b/references/ai-chat/src/components/chat-sidebar.tsx @@ -28,6 +28,8 @@ type ChatSidebarProps = { onPreloadChange: (enabled: boolean) => void; warmTimeoutInSeconds: number; onWarmTimeoutChange: (seconds: number) => void; + taskMode: string; + onTaskModeChange: (mode: string) => void; }; export function ChatSidebar({ @@ -40,6 +42,8 @@ export function ChatSidebar({ onPreloadChange, warmTimeoutInSeconds, onWarmTimeoutChange, + taskMode, + onTaskModeChange, }: ChatSidebarProps) { const sorted = [...chats].sort((a, b) => b.updatedAt - a.updatedAt); @@ -108,6 +112,17 @@ export function ChatSidebar({ /> s
+
+ Task + +
); diff --git a/references/ai-chat/src/trigger/chat.ts b/references/ai-chat/src/trigger/chat.ts index 102db28f0d7..cc2a8952371 100644 --- a/references/ai-chat/src/trigger/chat.ts +++ b/references/ai-chat/src/trigger/chat.ts @@ -1,7 +1,7 @@ -import { chat, ai } from "@trigger.dev/sdk/ai"; -import { schemaTask } from "@trigger.dev/sdk"; +import { chat, ai, type ChatTaskWirePayload } from "@trigger.dev/sdk/ai"; +import { schemaTask, task } from "@trigger.dev/sdk"; import { streamText, tool, dynamicTool, stepCountIs, generateId } from "ai"; -import type { LanguageModel, Tool as AITool } from "ai"; +import type { LanguageModel, Tool as AITool, UIMessage } from "ai"; import { openai } from "@ai-sdk/openai"; import { anthropic } from "@ai-sdk/anthropic"; import { z } from "zod"; @@ -392,3 +392,174 @@ export const aiChat = chat.task({ }); }, }); + +// -------------------------------------------------------------------------- +// Raw task version — same functionality using composable primitives +// -------------------------------------------------------------------------- + +async function initUserContext(userId: string, chatId: string, model?: string) { + const user = await prisma.user.upsert({ + where: { id: userId }, + create: { id: userId, name: "User" }, + update: {}, + }); + userContext.init({ + userId: user.id, + name: user.name, + plan: user.plan as "free" | "pro", + preferredModel: user.preferredModel, + messageCount: user.messageCount, + }); + + const tools = await prisma.userTool.findMany({ where: { userId } }); + userToolDefs.init({ value: tools }); + + await prisma.chat.upsert({ + where: { id: chatId }, + create: { id: chatId, title: "New chat", userId: user.id, model: model ?? DEFAULT_MODEL }, + update: {}, + }); +} + +export const aiChatRaw = task({ + id: "ai-chat-raw", + run: async (payload: ChatTaskWirePayload, { signal: runSignal }) => { + let currentPayload = payload; + const clientData = payload.metadata as { userId: string; model?: string } | undefined; + + // Handle preload — init early, then wait for first message + if (currentPayload.trigger === "preload") { + if (clientData) { + await initUserContext(clientData.userId, currentPayload.chatId, clientData.model); + } + + const result = await chat.messages.waitWithWarmup({ + warmTimeoutInSeconds: payload.warmTimeoutInSeconds ?? 60, + timeout: "1h", + spanName: "waiting for first message", + }); + if (!result.ok) return; + currentPayload = result.output; + } + + // Non-preloaded: init now + const currentClientData = (currentPayload.metadata ?? clientData) as + | { userId: string; model?: string } + | undefined; + + if (!userContext.userId && currentClientData) { + await initUserContext(currentClientData.userId, currentPayload.chatId, currentClientData.model); + } + + const stop = chat.createStopSignal(); + const conversation = new chat.MessageAccumulator(); + + for (let turn = 0; turn < 100; turn++) { + stop.reset(); + + const messages = await conversation.addIncoming( + currentPayload.messages, + currentPayload.trigger, + turn + ); + + const turnClientData = (currentPayload.metadata ?? currentClientData) as + | { userId: string; model?: string } + | undefined; + + userContext.messageCount++; + if (turnClientData?.model) { + userContext.preferredModel = turnClientData.model; + } + + const modelId = turnClientData?.model ?? userContext.preferredModel ?? undefined; + const useReasoning = REASONING_MODELS.has(modelId ?? DEFAULT_MODEL); + const combinedSignal = AbortSignal.any([runSignal, stop.signal]); + + const dynamicTools: Record> = {}; + for (const t of userToolDefs.value ?? []) { + dynamicTools[t.name] = dynamicTool({ + description: t.description, + inputSchema: z.object({ + query: z.string().describe("The query or topic to look up"), + }), + execute: async (input) => { + return { result: t.responseTemplate.replace("{{query}}", (input as any).query) }; + }, + }); + } + + const result = streamText({ + model: getModel(modelId), + system: `You are a helpful assistant for ${userContext.name} (${userContext.plan} plan). Be concise and friendly.`, + messages, + tools: { + inspectEnvironment, + webFetch, + deepResearch: ai.tool(deepResearch), + ...dynamicTools, + }, + stopWhen: stepCountIs(10), + abortSignal: combinedSignal, + providerOptions: { + openai: { user: turnClientData?.userId }, + anthropic: { + metadata: { user_id: turnClientData?.userId }, + ...(useReasoning ? { thinking: { type: "enabled", budgetTokens: 10000 } } : {}), + }, + }, + experimental_telemetry: { isEnabled: true }, + }); + + let response: UIMessage | undefined; + try { + response = await chat.pipeAndCapture(result, { signal: combinedSignal }); + } catch (error) { + if (error instanceof Error && error.name === "AbortError") { + if (runSignal.aborted) break; + // Stop — fall through + } else { + throw error; + } + } + + if (response) { + if (stop.signal.aborted && !runSignal.aborted) { + await conversation.addResponse(chat.cleanupAbortedParts(response)); + } else { + await conversation.addResponse(response); + } + } + + if (runSignal.aborted) break; + + // Persist messages + await prisma.chat.update({ + where: { id: currentPayload.chatId }, + data: { messages: conversation.uiMessages as any }, + }); + + if (userContext.hasChanged()) { + await prisma.user.update({ + where: { id: userContext.userId }, + data: { + messageCount: userContext.messageCount, + preferredModel: userContext.preferredModel, + }, + }); + } + + await chat.writeTurnComplete(); + + const next = await chat.messages.waitWithWarmup({ + warmTimeoutInSeconds: 60, + timeout: "1h", + spanName: "waiting for next message", + }); + if (!next.ok) break; + currentPayload = next.output; + } + + stop.cleanup(); + }, +}); From 97f967e8c8cecbd3215985daa0e695615ddcd12d Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Tue, 10 Mar 2026 11:39:41 +0000 Subject: [PATCH 52/53] Introduce the chat session API and better docs organization --- docs/ai-chat/backend.mdx | 771 +++++++++ docs/ai-chat/features.mdx | 421 +++++ docs/ai-chat/frontend.mdx | 234 +++ docs/ai-chat/overview.mdx | 161 ++ docs/ai-chat/quick-start.mdx | 108 ++ docs/ai-chat/reference.mdx | 240 +++ docs/docs.json | 13 +- docs/guides/ai-chat.mdx | 1412 ----------------- packages/trigger-sdk/src/v3/ai.ts | 215 +++ .../ai-chat/src/components/chat-sidebar.tsx | 1 + references/ai-chat/src/trigger/chat.ts | 73 + 11 files changed, 2236 insertions(+), 1413 deletions(-) create mode 100644 docs/ai-chat/backend.mdx create mode 100644 docs/ai-chat/features.mdx create mode 100644 docs/ai-chat/frontend.mdx create mode 100644 docs/ai-chat/overview.mdx create mode 100644 docs/ai-chat/quick-start.mdx create mode 100644 docs/ai-chat/reference.mdx delete mode 100644 docs/guides/ai-chat.mdx diff --git a/docs/ai-chat/backend.mdx b/docs/ai-chat/backend.mdx new file mode 100644 index 00000000000..4d730b0cbb5 --- /dev/null +++ b/docs/ai-chat/backend.mdx @@ -0,0 +1,771 @@ +--- +title: "Backend" +sidebarTitle: "Backend" +description: "Three approaches to building your chat backend — chat.task(), session iterator, or raw task primitives." +--- + +## chat.task() + +The highest-level approach. Handles message accumulation, stop signals, turn lifecycle, and auto-piping automatically. + +### Simple: return a StreamTextResult + +Return the `streamText` result from `run` and it's automatically piped to the frontend: + +```ts +import { chat } from "@trigger.dev/sdk/ai"; +import { streamText } from "ai"; +import { openai } from "@ai-sdk/openai"; + +export const simpleChat = chat.task({ + id: "simple-chat", + run: async ({ messages, signal }) => { + return streamText({ + model: openai("gpt-4o"), + system: "You are a helpful assistant.", + messages, + abortSignal: signal, + }); + }, +}); +``` + +### Using chat.pipe() for complex flows + +For complex agent flows where `streamText` is called deep inside your code, use `chat.pipe()`. It works from **anywhere inside a task** — even nested function calls. + +```ts trigger/agent-chat.ts +import { chat } from "@trigger.dev/sdk/ai"; +import { streamText } from "ai"; +import { openai } from "@ai-sdk/openai"; +import type { ModelMessage } from "ai"; + +export const agentChat = chat.task({ + id: "agent-chat", + run: async ({ messages }) => { + // Don't return anything — chat.pipe is called inside + await runAgentLoop(messages); + }, +}); + +async function runAgentLoop(messages: ModelMessage[]) { + // ... agent logic, tool calls, etc. + + const result = streamText({ + model: openai("gpt-4o"), + messages, + }); + + // Pipe from anywhere — no need to return it + await chat.pipe(result); +} +``` + +### Lifecycle hooks + +#### onPreload + +Fires when a preloaded run starts — before any messages arrive. Use it to eagerly initialize state (DB records, user context) while the user is still typing. + +Preloaded runs are triggered by calling `transport.preload(chatId)` on the frontend. See [Preload](/ai-chat/features#preload) for details. + +```ts +export const myChat = chat.task({ + id: "my-chat", + clientDataSchema: z.object({ userId: z.string() }), + onPreload: async ({ chatId, clientData, runId, chatAccessToken }) => { + // Initialize early — before the first message arrives + const user = await db.user.findUnique({ where: { id: clientData.userId } }); + userContext.init({ name: user.name, plan: user.plan }); + + await db.chat.create({ data: { id: chatId, userId: clientData.userId } }); + await db.chatSession.upsert({ + where: { id: chatId }, + create: { id: chatId, runId, publicAccessToken: chatAccessToken }, + update: { runId, publicAccessToken: chatAccessToken }, + }); + }, + onChatStart: async ({ preloaded }) => { + if (preloaded) return; // Already initialized in onPreload + // ... non-preloaded initialization + }, + run: async ({ messages, signal }) => { + return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); + }, +}); +``` + +| Field | Type | Description | +|-------|------|-------------| +| `chatId` | `string` | Chat session ID | +| `runId` | `string` | The Trigger.dev run ID | +| `chatAccessToken` | `string` | Scoped access token for this run | +| `clientData` | Typed by `clientDataSchema` | Custom data from the frontend | + +#### onChatStart + +Fires once on the first turn (turn 0) before `run()` executes. Use it to create a chat record in your database. + +The `continuation` field tells you whether this is a brand new chat or a continuation of an existing one (where the previous run timed out or was cancelled). The `preloaded` field tells you whether `onPreload` already ran. + +```ts +export const myChat = chat.task({ + id: "my-chat", + onChatStart: async ({ chatId, clientData, continuation, preloaded }) => { + if (preloaded) return; // Already set up in onPreload + if (continuation) return; // Chat record already exists + + const { userId } = clientData as { userId: string }; + await db.chat.create({ + data: { id: chatId, userId, title: "New chat" }, + }); + }, + run: async ({ messages, signal }) => { + return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); + }, +}); +``` + + + `clientData` contains custom data from the frontend — either the `clientData` option on the transport constructor (sent with every message) or the `metadata` option on `sendMessage()` (per-message). See [Client data and metadata](/ai-chat/frontend#client-data-and-metadata). + + +#### onTurnStart + +Fires at the start of every turn, after message accumulation and `onChatStart` (turn 0), but **before** `run()` executes. Use it to persist messages before streaming begins — so a mid-stream page refresh still shows the user's message. + +| Field | Type | Description | +|-------|------|-------------| +| `chatId` | `string` | Chat session ID | +| `messages` | `ModelMessage[]` | Full accumulated conversation (model format) | +| `uiMessages` | `UIMessage[]` | Full accumulated conversation (UI format) | +| `turn` | `number` | Turn number (0-indexed) | +| `runId` | `string` | The Trigger.dev run ID | +| `chatAccessToken` | `string` | Scoped access token for this run | +| `continuation` | `boolean` | Whether this run is continuing an existing chat | +| `preloaded` | `boolean` | Whether this run was preloaded | +| `clientData` | Typed by `clientDataSchema` | Custom data from the frontend | + +```ts +export const myChat = chat.task({ + id: "my-chat", + onTurnStart: async ({ chatId, uiMessages, runId, chatAccessToken }) => { + await db.chat.update({ + where: { id: chatId }, + data: { messages: uiMessages }, + }); + await db.chatSession.upsert({ + where: { id: chatId }, + create: { id: chatId, runId, publicAccessToken: chatAccessToken }, + update: { runId, publicAccessToken: chatAccessToken }, + }); + }, + run: async ({ messages, signal }) => { + return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); + }, +}); +``` + + + By persisting in `onTurnStart`, the user's message is saved to your database before the AI starts streaming. If the user refreshes mid-stream, the message is already there. + + +#### onTurnComplete + +Fires after each turn completes — after the response is captured, before waiting for the next message. This is the primary hook for persisting the assistant's response. + +| Field | Type | Description | +|-------|------|-------------| +| `chatId` | `string` | Chat session ID | +| `messages` | `ModelMessage[]` | Full accumulated conversation (model format) | +| `uiMessages` | `UIMessage[]` | Full accumulated conversation (UI format) | +| `newMessages` | `ModelMessage[]` | Only this turn's messages (model format) | +| `newUIMessages` | `UIMessage[]` | Only this turn's messages (UI format) | +| `responseMessage` | `UIMessage \| undefined` | The assistant's response for this turn | +| `turn` | `number` | Turn number (0-indexed) | +| `runId` | `string` | The Trigger.dev run ID | +| `chatAccessToken` | `string` | Scoped access token for this run | +| `lastEventId` | `string \| undefined` | Stream position for resumption. Persist this with the session. | +| `stopped` | `boolean` | Whether the user stopped generation during this turn | +| `continuation` | `boolean` | Whether this run is continuing an existing chat | +| `rawResponseMessage` | `UIMessage \| undefined` | The raw assistant response before abort cleanup (same as `responseMessage` when not stopped) | + +```ts +export const myChat = chat.task({ + id: "my-chat", + onTurnComplete: async ({ chatId, uiMessages, runId, chatAccessToken, lastEventId }) => { + await db.chat.update({ + where: { id: chatId }, + data: { messages: uiMessages }, + }); + await db.chatSession.upsert({ + where: { id: chatId }, + create: { id: chatId, runId, publicAccessToken: chatAccessToken, lastEventId }, + update: { runId, publicAccessToken: chatAccessToken, lastEventId }, + }); + }, + run: async ({ messages, signal }) => { + return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); + }, +}); +``` + + + Use `uiMessages` to overwrite the full conversation each turn (simplest). Use `newUIMessages` if you prefer to store messages individually — for example, one database row per message. + + + + Persist `lastEventId` alongside the session. When the transport reconnects after a page refresh, it uses this to skip past already-seen events — preventing duplicate messages. + + +### Stop generation + +#### How stop works + +Calling `stop()` from `useChat` sends a stop signal to the running task via input streams. The task's `streamText` call aborts (if you passed `signal` or `stopSignal`), but the **run stays alive** and waits for the next message. The partial response is captured and accumulated normally. + +#### Abort signals + +The `run` function receives three abort signals: + +| Signal | Fires when | Use for | +|--------|-----------|---------| +| `signal` | Stop **or** cancel | Pass to `streamText` — handles both cases. **Use this in most cases.** | +| `stopSignal` | Stop only (per-turn, reset each turn) | Custom logic that should only run on user stop, not cancellation | +| `cancelSignal` | Run cancel, expire, or maxDuration exceeded | Cleanup that should only happen on full cancellation | + +```ts +export const myChat = chat.task({ + id: "my-chat", + run: async ({ messages, signal, stopSignal, cancelSignal }) => { + return streamText({ + model: openai("gpt-4o"), + messages, + abortSignal: signal, // Handles both stop and cancel + }); + }, +}); +``` + + + Use `signal` (the combined signal) in most cases. The separate `stopSignal` and `cancelSignal` are only needed if you want different behavior for stop vs cancel. + + +#### Detecting stop in callbacks + +The `onTurnComplete` event includes a `stopped` boolean that indicates whether the user stopped generation during that turn: + +```ts +export const myChat = chat.task({ + id: "my-chat", + onTurnComplete: async ({ chatId, uiMessages, stopped }) => { + await db.chat.update({ + where: { id: chatId }, + data: { messages: uiMessages, lastStoppedAt: stopped ? new Date() : undefined }, + }); + }, + run: async ({ messages, signal }) => { + return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); + }, +}); +``` + +You can also check stop status from **anywhere** during a turn using `chat.isStopped()`. This is useful inside `streamText`'s `onFinish` callback where the AI SDK's `isAborted` flag can be unreliable (e.g. when using `createUIMessageStream` + `writer.merge()`): + +```ts +import { chat } from "@trigger.dev/sdk/ai"; +import { streamText } from "ai"; + +export const myChat = chat.task({ + id: "my-chat", + run: async ({ messages, signal }) => { + return streamText({ + model: openai("gpt-4o"), + messages, + abortSignal: signal, + onFinish: ({ isAborted }) => { + // isAborted may be false even after stop when using createUIMessageStream + const wasStopped = isAborted || chat.isStopped(); + if (wasStopped) { + // handle stop — e.g. log analytics + } + }, + }); + }, +}); +``` + +#### Cleaning up aborted messages + +When stop happens mid-stream, the captured response message can contain parts in an incomplete state — tool calls stuck in `partial-call`, reasoning blocks still marked as `streaming`, etc. These can cause UI issues like permanent spinners. + +`chat.task` automatically cleans up the `responseMessage` when stop is detected before passing it to `onTurnComplete`. If you use `chat.pipe()` manually and capture response messages yourself, use `chat.cleanupAbortedParts()`: + +```ts +const cleaned = chat.cleanupAbortedParts(rawResponseMessage); +``` + +This removes tool invocation parts stuck in `partial-call` state and marks any `streaming` text or reasoning parts as `done`. + + + Stop signal delivery is best-effort. There is a small race window where the model may finish before the stop signal arrives, in which case the turn completes normally with `stopped: false`. This is expected and does not require special handling. + + +### Persistence + +#### What needs to be persisted + +To build a chat app that survives page refreshes, you need to persist two things: + +1. **Messages** — The conversation history. Persisted **server-side** in the task via `onTurnStart` and `onTurnComplete`. +2. **Sessions** — The transport's connection state (`runId`, `publicAccessToken`, `lastEventId`). Persisted **server-side** via `onTurnStart` and `onTurnComplete`. + + + Sessions let the transport reconnect to an existing run after a page refresh. Without them, every page load would start a new run — losing the conversation context that was accumulated in the previous run. + + +#### Full persistence example + + +```ts trigger/chat.ts +import { chat } from "@trigger.dev/sdk/ai"; +import { streamText } from "ai"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; +import { db } from "@/lib/db"; + +export const myChat = chat.task({ + id: "my-chat", + clientDataSchema: z.object({ + userId: z.string(), + }), + onChatStart: async ({ chatId, clientData }) => { + await db.chat.create({ + data: { id: chatId, userId: clientData.userId, title: "New chat", messages: [] }, + }); + }, + onTurnStart: async ({ chatId, uiMessages, runId, chatAccessToken }) => { + // Persist messages + session before streaming + await db.chat.update({ + where: { id: chatId }, + data: { messages: uiMessages }, + }); + await db.chatSession.upsert({ + where: { id: chatId }, + create: { id: chatId, runId, publicAccessToken: chatAccessToken }, + update: { runId, publicAccessToken: chatAccessToken }, + }); + }, + onTurnComplete: async ({ chatId, uiMessages, runId, chatAccessToken, lastEventId }) => { + // Persist assistant response + stream position + await db.chat.update({ + where: { id: chatId }, + data: { messages: uiMessages }, + }); + await db.chatSession.upsert({ + where: { id: chatId }, + create: { id: chatId, runId, publicAccessToken: chatAccessToken, lastEventId }, + update: { runId, publicAccessToken: chatAccessToken, lastEventId }, + }); + }, + run: async ({ messages, signal }) => { + return streamText({ + model: openai("gpt-4o"), + messages, + abortSignal: signal, + }); + }, +}); +``` + +```ts app/actions.ts +"use server"; + +import { chat } from "@trigger.dev/sdk/ai"; +import type { myChat } from "@/trigger/chat"; +import { db } from "@/lib/db"; + +export const getChatToken = () => + chat.createAccessToken("my-chat"); + +export async function getChatMessages(chatId: string) { + const found = await db.chat.findUnique({ where: { id: chatId } }); + return found?.messages ?? []; +} + +export async function getAllSessions() { + const sessions = await db.chatSession.findMany(); + const result: Record = {}; + for (const s of sessions) { + result[s.id] = { + runId: s.runId, + publicAccessToken: s.publicAccessToken, + lastEventId: s.lastEventId ?? undefined, + }; + } + return result; +} + +export async function deleteSession(chatId: string) { + await db.chatSession.delete({ where: { id: chatId } }).catch(() => {}); +} +``` + +```tsx app/components/chat.tsx +"use client"; + +import { useChat } from "@ai-sdk/react"; +import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; +import type { myChat } from "@/trigger/chat"; +import { getChatToken, deleteSession } from "@/app/actions"; + +export function Chat({ chatId, initialMessages, initialSessions }) { + const transport = useTriggerChatTransport({ + task: "my-chat", + accessToken: getChatToken, + clientData: { userId: currentUser.id }, // Type-checked against clientDataSchema + sessions: initialSessions, + onSessionChange: (id, session) => { + if (!session) deleteSession(id); + }, + }); + + const { messages, sendMessage, stop, status } = useChat({ + id: chatId, + messages: initialMessages, + transport, + resume: initialMessages.length > 0, + }); + + return ( +
+ {messages.map((m) => ( +
+ {m.role}: + {m.parts.map((part, i) => + part.type === "text" ? {part.text} : null + )} +
+ ))} + + { + e.preventDefault(); + const input = e.currentTarget.querySelector("input"); + if (input?.value) { + sendMessage({ text: input.value }); + input.value = ""; + } + }} + > + + + {status === "streaming" && ( + + )} + +
+ ); +} +``` +
+ +### Runtime configuration + +#### chat.setTurnTimeout() + +Override how long the run stays suspended waiting for the next message. Call from inside `run()`: + +```ts +run: async ({ messages, signal }) => { + chat.setTurnTimeout("2h"); // Wait longer for this conversation + return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); +}, +``` + +#### chat.setWarmTimeoutInSeconds() + +Override how long the run stays warm (active, using compute) after each turn: + +```ts +run: async ({ messages, signal }) => { + chat.setWarmTimeoutInSeconds(60); // Stay warm for 1 minute + return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); +}, +``` + + + Longer warm timeout means faster responses but more compute usage. Set to `0` to suspend immediately after each turn (minimum latency cost, slight delay on next message). + + +### Manual mode with task() + +If you need full control over task options, use the standard `task()` with `ChatTaskPayload` and `chat.pipe()`: + +```ts +import { task } from "@trigger.dev/sdk"; +import { chat, type ChatTaskPayload } from "@trigger.dev/sdk/ai"; +import { streamText } from "ai"; +import { openai } from "@ai-sdk/openai"; + +export const manualChat = task({ + id: "manual-chat", + retry: { maxAttempts: 3 }, + queue: { concurrencyLimit: 10 }, + run: async (payload: ChatTaskPayload) => { + const result = streamText({ + model: openai("gpt-4o"), + messages: payload.messages, + }); + + await chat.pipe(result); + }, +}); +``` + + + Manual mode does not get automatic message accumulation or the `onTurnComplete`/`onChatStart` lifecycle hooks. The `responseMessage` field in `onTurnComplete` will be `undefined` when using `chat.pipe()` directly. Use `chat.task()` for the full multi-turn experience. + + +--- + +## chat.createSession() + +A middle ground between `chat.task()` and raw primitives. You get an async iterator that yields `ChatTurn` objects — each turn handles stop signals, message accumulation, and turn-complete signaling automatically. You control initialization, model/tool selection, persistence, and any custom per-turn logic. + +Use `chat.createSession()` inside a standard `task()`: + +```ts +import { task } from "@trigger.dev/sdk"; +import { chat, type ChatTaskWirePayload } from "@trigger.dev/sdk/ai"; +import { streamText } from "ai"; +import { openai } from "@ai-sdk/openai"; + +export const myChat = task({ + id: "my-chat", + run: async (payload: ChatTaskWirePayload, { signal }) => { + // One-time initialization — just code, no hooks + const clientData = payload.metadata as { userId: string }; + await db.chat.create({ data: { id: payload.chatId, userId: clientData.userId } }); + + const session = chat.createSession(payload, { + signal, + warmTimeoutInSeconds: 60, + timeout: "1h", + }); + + for await (const turn of session) { + const result = streamText({ + model: openai("gpt-4o"), + messages: turn.messages, + abortSignal: turn.signal, + }); + + // Pipe, capture, accumulate, and signal turn-complete — all in one call + await turn.complete(result); + + // Persist after each turn + await db.chat.update({ + where: { id: turn.chatId }, + data: { messages: turn.uiMessages }, + }); + } + }, +}); +``` + +### ChatSessionOptions + +| Option | Type | Default | Description | +|--------|------|---------|-------------| +| `signal` | `AbortSignal` | required | Run-level cancel signal (from task context) | +| `warmTimeoutInSeconds` | `number` | `30` | Seconds to stay warm between turns | +| `timeout` | `string` | `"1h"` | Duration string for suspend timeout | +| `maxTurns` | `number` | `100` | Max turns before ending | + +### ChatTurn + +Each turn yielded by the iterator provides: + +| Field | Type | Description | +|-------|------|-------------| +| `number` | `number` | Turn number (0-indexed) | +| `chatId` | `string` | Chat session ID | +| `trigger` | `string` | What triggered this turn | +| `clientData` | `unknown` | Client data from the transport | +| `messages` | `ModelMessage[]` | Full accumulated model messages — pass to `streamText` | +| `uiMessages` | `UIMessage[]` | Full accumulated UI messages — use for persistence | +| `signal` | `AbortSignal` | Combined stop+cancel signal (fresh each turn) | +| `stopped` | `boolean` | Whether the user stopped generation this turn | +| `continuation` | `boolean` | Whether this is a continuation run | + +| Method | Description | +|--------|-------------| +| `turn.complete(source)` | Pipe stream, capture response, accumulate, and signal turn-complete | +| `turn.done()` | Just signal turn-complete (when you've piped manually) | +| `turn.addResponse(response)` | Add a response to the accumulator manually | + +### turn.complete() vs manual control + +`turn.complete(result)` is the easy path — it handles piping, capturing the response, accumulating messages, cleaning up aborted parts, and writing the turn-complete chunk. + +For more control, you can do each step manually: + +```ts +for await (const turn of session) { + const result = streamText({ + model: openai("gpt-4o"), + messages: turn.messages, + abortSignal: turn.signal, + }); + + // Manual: pipe and capture separately + const response = await chat.pipeAndCapture(result, { signal: turn.signal }); + + if (response) { + // Custom processing before accumulating + await turn.addResponse(response); + } + + // Custom persistence, analytics, etc. + await db.chat.update({ ... }); + + // Must call done() when not using complete() + await turn.done(); +} +``` + +--- + +## Raw task with primitives + +For full control, use a standard `task()` with the composable primitives from the `chat` namespace. You manage everything: the turn loop, stop signals, message accumulation, and turn-complete signaling. + +### Primitives + +| Primitive | Description | +|-----------|-------------| +| `chat.messages` | Input stream for incoming messages — use `.waitWithWarmup()` to wait for the next turn | +| `chat.createStopSignal()` | Create a managed stop signal wired to the stop input stream | +| `chat.pipeAndCapture(result)` | Pipe a `StreamTextResult` to the chat stream and capture the response | +| `chat.writeTurnComplete()` | Signal the frontend that the current turn is complete | +| `chat.MessageAccumulator` | Accumulates conversation messages across turns | +| `chat.pipe(stream)` | Pipe a stream to the frontend (no response capture) | +| `chat.cleanupAbortedParts(msg)` | Clean up incomplete parts from a stopped response | + +### Example + +```ts +import { task } from "@trigger.dev/sdk"; +import { chat, type ChatTaskWirePayload } from "@trigger.dev/sdk/ai"; +import { streamText } from "ai"; +import { openai } from "@ai-sdk/openai"; + +export const myChat = task({ + id: "my-chat-raw", + run: async (payload: ChatTaskWirePayload, { signal: runSignal }) => { + let currentPayload = payload; + + // Handle preload — wait for the first real message + if (currentPayload.trigger === "preload") { + const result = await chat.messages.waitWithWarmup({ + warmTimeoutInSeconds: 60, + timeout: "1h", + spanName: "waiting for first message", + }); + if (!result.ok) return; + currentPayload = result.output; + } + + const stop = chat.createStopSignal(); + const conversation = new chat.MessageAccumulator(); + + for (let turn = 0; turn < 100; turn++) { + stop.reset(); + + const messages = await conversation.addIncoming( + currentPayload.messages, + currentPayload.trigger, + turn + ); + + const combinedSignal = AbortSignal.any([runSignal, stop.signal]); + + const result = streamText({ + model: openai("gpt-4o"), + messages, + abortSignal: combinedSignal, + }); + + let response; + try { + response = await chat.pipeAndCapture(result, { signal: combinedSignal }); + } catch (error) { + if (error instanceof Error && error.name === "AbortError") { + if (runSignal.aborted) break; + // Stop — fall through to accumulate partial + } else { + throw error; + } + } + + if (response) { + const cleaned = stop.signal.aborted && !runSignal.aborted + ? chat.cleanupAbortedParts(response) + : response; + await conversation.addResponse(cleaned); + } + + if (runSignal.aborted) break; + + // Persist, analytics, etc. + await db.chat.update({ + where: { id: currentPayload.chatId }, + data: { messages: conversation.uiMessages }, + }); + + await chat.writeTurnComplete(); + + // Wait for the next message + const next = await chat.messages.waitWithWarmup({ + warmTimeoutInSeconds: 60, + timeout: "1h", + spanName: "waiting for next message", + }); + if (!next.ok) break; + currentPayload = next.output; + } + + stop.cleanup(); + }, +}); +``` + +### MessageAccumulator + +The `MessageAccumulator` handles the transport protocol automatically: + +- Turn 0: replaces messages (full history from frontend) +- Subsequent turns: appends new messages (frontend only sends the new user message) +- Regenerate: replaces messages (full history minus last assistant message) + +```ts +const conversation = new chat.MessageAccumulator(); + +// Returns full accumulated ModelMessage[] for streamText +const messages = await conversation.addIncoming(payload.messages, payload.trigger, turn); + +// After piping, add the response +const response = await chat.pipeAndCapture(result); +if (response) await conversation.addResponse(response); + +// Access accumulated messages for persistence +conversation.uiMessages; // UIMessage[] +conversation.modelMessages; // ModelMessage[] +``` diff --git a/docs/ai-chat/features.mdx b/docs/ai-chat/features.mdx new file mode 100644 index 00000000000..fd4b63789a1 --- /dev/null +++ b/docs/ai-chat/features.mdx @@ -0,0 +1,421 @@ +--- +title: "Features" +sidebarTitle: "Features" +description: "Per-run data, deferred work, custom streaming, subtask integration, and preload." +--- + +## Per-run data with chat.local + +Use `chat.local` to create typed, run-scoped data that persists across turns and is accessible from anywhere — the run function, tools, nested helpers. Each run gets its own isolated copy, and locals are automatically cleared between runs. + +When a subtask is invoked via `ai.tool()`, initialized locals are automatically serialized into the subtask's metadata and hydrated on first access — no extra code needed. Subtask changes to hydrated locals are local to the subtask and don't propagate back to the parent. + +### Declaring and initializing + +Declare locals at module level with a unique `id`, then initialize them inside a lifecycle hook where you have context (chatId, clientData, etc.): + +```ts +import { chat } from "@trigger.dev/sdk/ai"; +import { streamText, tool } from "ai"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; +import { db } from "@/lib/db"; + +// Declare at module level — each local needs a unique id +const userContext = chat.local<{ + name: string; + plan: "free" | "pro"; + messageCount: number; +}>({ id: "userContext" }); + +export const myChat = chat.task({ + id: "my-chat", + clientDataSchema: z.object({ userId: z.string() }), + onChatStart: async ({ clientData }) => { + // Initialize with real data from your database + const user = await db.user.findUnique({ + where: { id: clientData.userId }, + }); + userContext.init({ + name: user.name, + plan: user.plan, + messageCount: user.messageCount, + }); + }, + run: async ({ messages, signal }) => { + userContext.messageCount++; + + return streamText({ + model: openai("gpt-4o"), + system: `Helping ${userContext.name} (${userContext.plan} plan).`, + messages, + abortSignal: signal, + }); + }, +}); +``` + +### Accessing from tools + +Locals are accessible from anywhere during task execution — including AI SDK tools: + +```ts +const userContext = chat.local<{ plan: "free" | "pro" }>({ id: "userContext" }); + +const premiumTool = tool({ + description: "Access premium features", + inputSchema: z.object({ feature: z.string() }), + execute: async ({ feature }) => { + if (userContext.plan !== "pro") { + return { error: "This feature requires a Pro plan." }; + } + // ... premium logic + }, +}); +``` + +### Accessing from subtasks + +When you use `ai.tool()` to expose a subtask, chat locals are automatically available read-only: + +```ts +import { chat, ai } from "@trigger.dev/sdk/ai"; +import { schemaTask } from "@trigger.dev/sdk"; +import { streamText } from "ai"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const userContext = chat.local<{ name: string; plan: "free" | "pro" }>({ id: "userContext" }); + +export const analyzeData = schemaTask({ + id: "analyze-data", + schema: z.object({ query: z.string() }), + run: async ({ query }) => { + // userContext.name just works — auto-hydrated from parent metadata + console.log(`Analyzing for ${userContext.name}`); + // Changes here are local to this subtask and don't propagate back + }, +}); + +export const myChat = chat.task({ + id: "my-chat", + onChatStart: async ({ clientData }) => { + userContext.init({ name: "Alice", plan: "pro" }); + }, + run: async ({ messages, signal }) => { + return streamText({ + model: openai("gpt-4o"), + messages, + tools: { analyzeData: ai.tool(analyzeData) }, + abortSignal: signal, + }); + }, +}); +``` + + + Values must be JSON-serializable for subtask access. Non-serializable values (functions, class instances, etc.) will be lost during transfer. + + +### Dirty tracking and persistence + +The `hasChanged()` method returns `true` if any property was set since the last check, then resets the flag. Use it in lifecycle hooks to only persist when data actually changed: + +```ts +onTurnComplete: async ({ chatId }) => { + if (userContext.hasChanged()) { + await db.user.update({ + where: { id: userContext.get().userId }, + data: { + messageCount: userContext.messageCount, + }, + }); + } +}, +``` + +### chat.local API + +| Method | Description | +|--------|-------------| +| `chat.local({ id })` | Create a typed local with a unique id (declare at module level) | +| `local.init(value)` | Initialize with a value (call in hooks or `run`) | +| `local.hasChanged()` | Returns `true` if modified since last check, resets flag | +| `local.get()` | Returns a plain object copy (for serialization) | +| `local.property` | Direct property access (read/write via Proxy) | + + + Locals use shallow proxying. Nested object mutations like `local.prefs.theme = "dark"` won't trigger the dirty flag. Instead, replace the whole property: `local.prefs = { ...local.prefs, theme: "dark" }`. + + +--- + +## chat.defer() + +Use `chat.defer()` to run background work in parallel with streaming. The deferred promise runs alongside the LLM response and is awaited (with a 5s timeout) before `onTurnComplete` fires. + +This moves non-blocking work (DB writes, analytics, etc.) out of the critical path: + +```ts +export const myChat = chat.task({ + id: "my-chat", + onTurnStart: async ({ chatId, uiMessages }) => { + // Persist messages without blocking the LLM call + chat.defer(db.chat.update({ where: { id: chatId }, data: { messages: uiMessages } })); + }, + run: async ({ messages, signal }) => { + return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); + }, +}); +``` + +`chat.defer()` can be called from anywhere during a turn — hooks, `run()`, or nested helpers. All deferred promises are collected and awaited together before `onTurnComplete`. + +--- + +## Custom streaming with chat.stream + +`chat.stream` is a typed stream bound to the chat output. Use it to write custom `UIMessageChunk` data alongside the AI-generated response — for example, status updates or progress indicators. + +```ts +import { chat } from "@trigger.dev/sdk/ai"; + +export const myChat = chat.task({ + id: "my-chat", + run: async ({ messages, signal }) => { + // Write a custom data part to the chat stream. + // The AI SDK's data-* chunk protocol adds this to message.parts + // on the frontend, where you can render it however you like. + const { waitUntilComplete } = chat.stream.writer({ + execute: ({ write }) => { + write({ + type: "data-status", + id: "search-progress", + data: { message: "Searching the web...", progress: 0.5 }, + }); + }, + }); + await waitUntilComplete(); + + // Then stream the AI response + return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); + }, +}); +``` + + + Use `data-*` chunk types (e.g. `data-status`, `data-progress`) for custom data. The AI SDK processes these into `DataUIPart` objects in `message.parts` on the frontend. Writing the same `type` + `id` again updates the existing part instead of creating a new one — useful for live progress. + + +`chat.stream` exposes the full stream API: + +| Method | Description | +|--------|-------------| +| `chat.stream.writer(options)` | Write individual chunks via a callback | +| `chat.stream.pipe(stream, options?)` | Pipe a `ReadableStream` or `AsyncIterable` | +| `chat.stream.append(value, options?)` | Append raw data | +| `chat.stream.read(runId, options?)` | Read the stream by run ID | + +### Streaming from subtasks + +When a tool invokes a subtask via `triggerAndWait`, the subtask can stream directly to the parent chat using `target: "root"`: + +```ts +import { chat, ai } from "@trigger.dev/sdk/ai"; +import { schemaTask } from "@trigger.dev/sdk"; +import { streamText, generateId } from "ai"; +import { z } from "zod"; + +// A subtask that streams progress back to the parent chat +export const researchTask = schemaTask({ + id: "research", + schema: z.object({ query: z.string() }), + run: async ({ query }) => { + const partId = generateId(); + + // Write a data-* chunk to the root run's chat stream. + // The frontend receives this as a DataUIPart in message.parts. + const { waitUntilComplete } = chat.stream.writer({ + target: "root", + execute: ({ write }) => { + write({ + type: "data-research-status", + id: partId, + data: { query, status: "in-progress" }, + }); + }, + }); + await waitUntilComplete(); + + // Do the work... + const result = await doResearch(query); + + // Update the same part with the final status + const { waitUntilComplete: waitDone } = chat.stream.writer({ + target: "root", + execute: ({ write }) => { + write({ + type: "data-research-status", + id: partId, + data: { query, status: "done", resultCount: result.length }, + }); + }, + }); + await waitDone(); + + return result; + }, +}); + +// The chat task uses it as a tool via ai.tool() +export const myChat = chat.task({ + id: "my-chat", + run: async ({ messages, signal }) => { + return streamText({ + model: openai("gpt-4o"), + messages, + abortSignal: signal, + tools: { + research: ai.tool(researchTask), + }, + }); + }, +}); +``` + +On the frontend, render the custom data part: + +```tsx +{message.parts.map((part, i) => { + if (part.type === "data-research-status") { + const { query, status, resultCount } = part.data; + return ( +
+ {status === "done" ? `Found ${resultCount} results` : `Researching "${query}"...`} +
+ ); + } + // ...other part types +})} +``` + +The `target` option accepts: +- `"self"` — current run (default) +- `"parent"` — parent task's run +- `"root"` — root task's run (the chat task) +- A specific run ID string + +--- + +## ai.tool() — subtask integration + +When a subtask runs via `ai.tool()`, it can access the tool call context and chat context from the parent: + +```ts +import { ai, chat } from "@trigger.dev/sdk/ai"; +import type { myChat } from "./chat"; + +export const mySubtask = schemaTask({ + id: "my-subtask", + schema: z.object({ query: z.string() }), + run: async ({ query }) => { + // Get the AI SDK's tool call ID (useful for data-* chunk IDs) + const toolCallId = ai.toolCallId(); + + // Get typed chat context — pass typeof yourChatTask for typed clientData + const { chatId, clientData } = ai.chatContextOrThrow(); + // clientData is typed based on myChat's clientDataSchema + + // Write a data chunk using the tool call ID + const { waitUntilComplete } = chat.stream.writer({ + target: "root", + execute: ({ write }) => { + write({ + type: "data-progress", + id: toolCallId, + data: { status: "working", query, userId: clientData?.userId }, + }); + }, + }); + await waitUntilComplete(); + + return { result: "done" }; + }, +}); +``` + +| Helper | Returns | Description | +|--------|---------|-------------| +| `ai.toolCallId()` | `string \| undefined` | The AI SDK tool call ID | +| `ai.chatContext()` | `{ chatId, turn, continuation, clientData } \| undefined` | Chat context with typed `clientData`. Returns `undefined` if not in a chat context. | +| `ai.chatContextOrThrow()` | `{ chatId, turn, continuation, clientData }` | Same as above but throws if not in a chat context | +| `ai.currentToolOptions()` | `ToolCallExecutionOptions \| undefined` | Full tool execution options | + +--- + +## Preload + +Preload eagerly triggers a run for a chat before the first message is sent. This allows initialization (DB setup, context loading) to happen while the user is still typing, reducing first-response latency. + +### Frontend + +Call `transport.preload(chatId)` to start a run early: + +```tsx +import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; +import { useChat } from "@ai-sdk/react"; + +export function Chat({ chatId }) { + const transport = useTriggerChatTransport({ + task: "my-chat", + accessToken: getChatToken, + clientData: { userId: currentUser.id }, + }); + + // Preload on mount — run starts before the user types anything + useEffect(() => { + transport.preload(chatId, { warmTimeoutInSeconds: 60 }); + }, [chatId]); + + const { messages, sendMessage } = useChat({ id: chatId, transport }); + // ... +} +``` + +Preload is a no-op if a session already exists for this chatId. + +### Backend + +On the backend, the `onPreload` hook fires immediately. The run then waits for the first message. When the user sends a message, `onChatStart` fires with `preloaded: true` — you can skip initialization that was already done in `onPreload`: + +```ts +export const myChat = chat.task({ + id: "my-chat", + onPreload: async ({ chatId, clientData }) => { + // Eagerly initialize — runs before the first message + userContext.init(await loadUser(clientData.userId)); + await db.chat.create({ data: { id: chatId } }); + }, + onChatStart: async ({ preloaded }) => { + if (preloaded) return; // Already initialized in onPreload + // ... fallback initialization for non-preloaded runs + }, + run: async ({ messages, signal }) => { + return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); + }, +}); +``` + +With `chat.createSession()` or raw tasks, check `payload.trigger === "preload"` and wait for the first message: + +```ts +if (payload.trigger === "preload") { + // Initialize early... + const result = await chat.messages.waitWithWarmup({ + warmTimeoutInSeconds: 60, + timeout: "1h", + }); + if (!result.ok) return; + currentPayload = result.output; +} +``` diff --git a/docs/ai-chat/frontend.mdx b/docs/ai-chat/frontend.mdx new file mode 100644 index 00000000000..0e7854e4d5d --- /dev/null +++ b/docs/ai-chat/frontend.mdx @@ -0,0 +1,234 @@ +--- +title: "Frontend" +sidebarTitle: "Frontend" +description: "Transport setup, session management, client data, and frontend patterns for AI Chat." +--- + +## Transport setup + +Use the `useTriggerChatTransport` hook from `@trigger.dev/sdk/chat/react` to create a memoized transport instance, then pass it to `useChat`: + +```tsx +import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; +import { useChat } from "@ai-sdk/react"; +import type { myChat } from "@/trigger/chat"; +import { getChatToken } from "@/app/actions"; + +export function Chat() { + const transport = useTriggerChatTransport({ + task: "my-chat", + accessToken: getChatToken, + }); + + const { messages, sendMessage, stop, status } = useChat({ transport }); + // ... render UI +} +``` + +The transport is created once on first render and reused across re-renders. Pass a type parameter for compile-time validation of the task ID. + + + The hook keeps `onSessionChange` up to date via a ref internally, so you don't need to memoize the callback or worry about stale closures. + + +### Dynamic access tokens + +For token refresh, pass a function instead of a string. It's called on each `sendMessage`: + +```ts +const transport = useTriggerChatTransport({ + task: "my-chat", + accessToken: async () => { + const res = await fetch("/api/chat-token"); + return res.text(); + }, +}); +``` + +## Session management + +### Session cleanup (frontend) + +Since session creation and updates are handled server-side, the frontend only needs to handle session deletion when a run ends: + +```tsx +const transport = useTriggerChatTransport({ + task: "my-chat", + accessToken: getChatToken, + sessions: loadedSessions, // Restored from DB on page load + onSessionChange: (chatId, session) => { + if (!session) { + deleteSession(chatId); // Server action — run ended + } + }, +}); +``` + +### Restoring on page load + +On page load, fetch both the messages and the session from your database, then pass them to `useChat` and the transport. Pass `resume: true` to `useChat` when there's an existing conversation — this tells the AI SDK to reconnect to the stream via the transport. + +```tsx app/page.tsx +"use client"; + +import { useEffect, useState } from "react"; +import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; +import { useChat } from "@ai-sdk/react"; +import { getChatToken, getChatMessages, getSession, deleteSession } from "@/app/actions"; + +export default function ChatPage({ chatId }: { chatId: string }) { + const [initialMessages, setInitialMessages] = useState([]); + const [initialSession, setInitialSession] = useState(undefined); + const [loaded, setLoaded] = useState(false); + + useEffect(() => { + async function load() { + const [messages, session] = await Promise.all([ + getChatMessages(chatId), + getSession(chatId), + ]); + setInitialMessages(messages); + setInitialSession(session ? { [chatId]: session } : undefined); + setLoaded(true); + } + load(); + }, [chatId]); + + if (!loaded) return null; + + return ( + + ); +} + +function ChatClient({ chatId, initialMessages, initialSessions }) { + const transport = useTriggerChatTransport({ + task: "my-chat", + accessToken: getChatToken, + sessions: initialSessions, + onSessionChange: (id, session) => { + if (!session) deleteSession(id); + }, + }); + + const { messages, sendMessage, stop, status } = useChat({ + id: chatId, + messages: initialMessages, + transport, + resume: initialMessages.length > 0, // Resume if there's an existing conversation + }); + + // ... render UI +} +``` + + + `resume: true` causes `useChat` to call `reconnectToStream` on the transport when the component mounts. The transport uses the session's `lastEventId` to skip past already-seen stream events, so the frontend only receives new data. Only enable `resume` when there are existing messages — for brand new chats, there's nothing to reconnect to. + + + + In React strict mode (enabled by default in Next.js dev), you may see a `TypeError: Cannot read properties of undefined (reading 'state')` in the console when using `resume`. This is a [known bug in the AI SDK](https://github.com/vercel/ai/issues/8477) caused by React strict mode double-firing the resume effect. The error is caught internally and **does not affect functionality** — streaming and message display work correctly. It only appears in development and will not occur in production builds. + + +## Client data and metadata + +### Transport-level client data + +Set default client data on the transport that's included in every request. When the task uses `clientDataSchema`, this is type-checked to match: + +```ts +const transport = useTriggerChatTransport({ + task: "my-chat", + accessToken: getChatToken, + clientData: { userId: currentUser.id }, +}); +``` + +### Per-message metadata + +Pass metadata with individual messages via `sendMessage`. Per-message values are merged with transport-level client data (per-message wins on conflicts): + +```ts +sendMessage( + { text: "Hello" }, + { metadata: { model: "gpt-4o", priority: "high" } } +); +``` + +### Typed client data with clientDataSchema + +Instead of manually parsing `clientData` with Zod in every hook, pass a `clientDataSchema` to `chat.task`. The schema validates the data once per turn, and `clientData` is typed in all hooks and `run`: + +```ts +import { chat } from "@trigger.dev/sdk/ai"; +import { streamText } from "ai"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +export const myChat = chat.task({ + id: "my-chat", + clientDataSchema: z.object({ + model: z.string().optional(), + userId: z.string(), + }), + onChatStart: async ({ chatId, clientData }) => { + // clientData is typed as { model?: string; userId: string } + await db.chat.create({ + data: { id: chatId, userId: clientData.userId }, + }); + }, + run: async ({ messages, clientData, signal }) => { + // Same typed clientData — no manual parsing needed + return streamText({ + model: openai(clientData?.model ?? "gpt-4o"), + messages, + abortSignal: signal, + }); + }, +}); +``` + +The schema also types the `clientData` option on the frontend transport: + +```ts +// TypeScript enforces that clientData matches the schema +const transport = useTriggerChatTransport({ + task: "my-chat", + accessToken: getChatToken, + clientData: { userId: currentUser.id }, +}); +``` + +Supports Zod, ArkType, Valibot, and other schema libraries supported by the SDK. + +## Stop generation + +Calling `stop()` from `useChat` sends a stop signal to the running task via input streams. The task aborts the current `streamText` call, but the run stays alive for the next message: + +```tsx +const { messages, sendMessage, stop, status } = useChat({ transport }); + +{status === "streaming" && ( + +)} +``` + +See [Stop generation](/ai-chat/backend#stop-generation) in the backend docs for how to handle stop signals in your task. + +## Self-hosting + +If you're self-hosting Trigger.dev, pass the `baseURL` option: + +```ts +const transport = useTriggerChatTransport({ + task: "my-chat", + accessToken, + baseURL: "https://your-trigger-instance.com", +}); +``` diff --git a/docs/ai-chat/overview.mdx b/docs/ai-chat/overview.mdx new file mode 100644 index 00000000000..eb3d1ab23df --- /dev/null +++ b/docs/ai-chat/overview.mdx @@ -0,0 +1,161 @@ +--- +title: "AI Chat" +sidebarTitle: "Overview" +description: "Run AI SDK chat completions as durable Trigger.dev tasks with built-in realtime streaming, multi-turn conversations, and message persistence." +--- + +## Overview + +The `@trigger.dev/sdk` provides a custom [ChatTransport](https://sdk.vercel.ai/docs/ai-sdk-ui/transport) for the Vercel AI SDK's `useChat` hook. This lets you run chat completions as **durable Trigger.dev tasks** instead of fragile API routes — with automatic retries, observability, and realtime streaming built in. + +**How it works:** +1. The frontend sends messages via `useChat` through `TriggerChatTransport` +2. The first message triggers a Trigger.dev task; subsequent messages resume the **same run** via input streams +3. The task streams `UIMessageChunk` events back via Trigger.dev's realtime streams +4. The AI SDK's `useChat` processes the stream natively — text, tool calls, reasoning, etc. +5. Between turns, the run stays warm briefly then suspends (freeing compute) until the next message + +No custom API routes needed. Your chat backend is a Trigger.dev task. + + + +### First message flow + +```mermaid +sequenceDiagram + participant User + participant useChat as useChat + Transport + participant API as Trigger.dev API + participant Task as chat.task Worker + participant LLM as LLM Provider + + User->>useChat: sendMessage("Hello") + useChat->>useChat: No session for chatId → trigger new run + useChat->>API: triggerTask(payload, tags: [chat:id]) + API-->>useChat: { runId, publicAccessToken } + useChat->>useChat: Store session, subscribe to SSE + + API->>Task: Start run with ChatTaskWirePayload + Task->>Task: onChatStart({ chatId, messages, clientData }) + Task->>Task: onTurnStart({ chatId, messages }) + Task->>LLM: streamText({ model, messages, abortSignal }) + LLM-->>Task: Stream response chunks + Task->>API: streams.pipe("chat", uiStream) + API-->>useChat: SSE: UIMessageChunks + useChat-->>User: Render streaming text + Task->>API: Write __trigger_turn_complete + API-->>useChat: SSE: turn complete + refreshed token + useChat->>useChat: Close stream, update session + Task->>Task: onTurnComplete({ messages, stopped: false }) + Task->>Task: Wait for next message (warm → suspend) +``` + +### Multi-turn flow + +```mermaid +sequenceDiagram + participant User + participant useChat as useChat + Transport + participant API as Trigger.dev API + participant Task as chat.task Worker + participant LLM as LLM Provider + + Note over Task: Suspended, waiting for message + + User->>useChat: sendMessage("Tell me more") + useChat->>useChat: Session exists → send via input stream + useChat->>API: sendInputStream(runId, "chat-messages", payload) + Note right of useChat: Only sends new message (not full history) + + API->>Task: Deliver to messagesInput + Task->>Task: Wake from suspend + Task->>Task: Append to accumulated messages + Task->>Task: onTurnStart({ turn: 1 }) + Task->>LLM: streamText({ messages: [all accumulated] }) + LLM-->>Task: Stream response + Task->>API: streams.pipe("chat", uiStream) + API-->>useChat: SSE: UIMessageChunks + useChat-->>User: Render streaming text + Task->>API: Write __trigger_turn_complete + Task->>Task: onTurnComplete({ turn: 1 }) + Task->>Task: Wait for next message (warm → suspend) +``` + +### Stop signal flow + +```mermaid +sequenceDiagram + participant User + participant useChat as useChat + Transport + participant API as Trigger.dev API + participant Task as chat.task Worker + participant LLM as LLM Provider + + Note over Task: Streaming response... + + User->>useChat: Click "Stop" + useChat->>API: sendInputStream(runId, "chat-stop", { stop: true }) + API->>Task: Deliver to stopInput + Task->>Task: stopController.abort() + LLM-->>Task: Stream ends (AbortError) + Task->>Task: cleanupAbortedParts(responseMessage) + Note right of Task: Remove partial tool calls,
mark streaming parts as done + Task->>API: Write __trigger_turn_complete + API-->>useChat: SSE: turn complete + Task->>Task: onTurnComplete({ stopped: true }) + Task->>Task: Wait for next message +``` + +
+ + + Requires `@trigger.dev/sdk` version **4.4.0 or later** and the `ai` package **v5.0.0 or later**. + + +## How multi-turn works + +### One run, many turns + +The entire conversation lives in a **single Trigger.dev run**. After each AI response, the run waits for the next message via input streams. The frontend transport handles this automatically — it triggers a new run for the first message, and sends subsequent messages to the existing run. + +This means your conversation has full observability in the Trigger.dev dashboard: every turn is a span inside the same run. + +### Warm and suspended states + +After each turn, the run goes through two phases of waiting: + +1. **Warm phase** (default 30s) — The run stays active and responds instantly to the next message. Uses compute. +2. **Suspended phase** (default up to 1h) — The run suspends, freeing compute. It wakes when the next message arrives. There's a brief delay as the run resumes. + +If no message arrives within the turn timeout, the run ends gracefully. The next message from the frontend will automatically start a fresh run. + + + You are not charged for compute during the suspended phase. Only the warm phase uses compute resources. + + +### What the backend accumulates + +The backend automatically accumulates the full conversation history across turns. After the first turn, the frontend transport only sends the new user message — not the entire history. This is handled transparently by the transport and task. + +The accumulated messages are available in: +- `run()` as `messages` (`ModelMessage[]`) — for passing to `streamText` +- `onTurnStart()` as `uiMessages` (`UIMessage[]`) — for persisting before streaming +- `onTurnComplete()` as `uiMessages` (`UIMessage[]`) — for persisting after the response + +## Three approaches + +There are three ways to build the backend, from most opinionated to most flexible: + +| Approach | Use when | What you get | +|----------|----------|--------------| +| [chat.task()](/ai-chat/backend#chattask) | Most apps | Auto-piping, lifecycle hooks, message accumulation, stop handling | +| [chat.createSession()](/ai-chat/backend#chatcreatesession) | Need a loop but not hooks | Async iterator with per-turn helpers, message accumulation, stop handling | +| [Raw task + primitives](/ai-chat/backend#raw-task-with-primitives) | Full control | Manual control of every step — use `chat.messages`, `chat.createStopSignal()`, etc. | + +## Related + +- [Quick Start](/ai-chat/quick-start) — Get a working chat in 3 steps +- [Backend](/ai-chat/backend) — Backend approaches in detail +- [Frontend](/ai-chat/frontend) — Transport setup, sessions, client data +- [Features](/ai-chat/features) — Per-run data, deferred work, streaming, subtasks +- [API Reference](/ai-chat/reference) — Complete reference tables diff --git a/docs/ai-chat/quick-start.mdx b/docs/ai-chat/quick-start.mdx new file mode 100644 index 00000000000..b8245d92372 --- /dev/null +++ b/docs/ai-chat/quick-start.mdx @@ -0,0 +1,108 @@ +--- +title: "Quick Start" +sidebarTitle: "Quick Start" +description: "Get a working AI chat in 3 steps — define a task, generate a token, and wire up the frontend." +--- + + + + Use `chat.task` from `@trigger.dev/sdk/ai` to define a task that handles chat messages. The `run` function receives `ModelMessage[]` (already converted from the frontend's `UIMessage[]`) — pass them directly to `streamText`. + + If you return a `StreamTextResult`, it's **automatically piped** to the frontend. + + ```ts trigger/chat.ts + import { chat } from "@trigger.dev/sdk/ai"; + import { streamText } from "ai"; + import { openai } from "@ai-sdk/openai"; + + export const myChat = chat.task({ + id: "my-chat", + run: async ({ messages, signal }) => { + // messages is ModelMessage[] — pass directly to streamText + // signal fires on stop or run cancel + return streamText({ + model: openai("gpt-4o"), + messages, + abortSignal: signal, + }); + }, + }); + ``` + + + + On your server (e.g. a Next.js server action), create a trigger public token scoped to your chat task: + + ```ts app/actions.ts + "use server"; + + import { chat } from "@trigger.dev/sdk/ai"; + import type { myChat } from "@/trigger/chat"; + + export const getChatToken = () => + chat.createAccessToken("my-chat"); + ``` + + + + Use the `useTriggerChatTransport` hook from `@trigger.dev/sdk/chat/react` to create a memoized transport instance, then pass it to `useChat`: + + ```tsx app/components/chat.tsx + "use client"; + + import { useChat } from "@ai-sdk/react"; + import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; + import type { myChat } from "@/trigger/chat"; + import { getChatToken } from "@/app/actions"; + + export function Chat() { + const transport = useTriggerChatTransport({ + task: "my-chat", + accessToken: getChatToken, + }); + + const { messages, sendMessage, stop, status } = useChat({ transport }); + + return ( +
+ {messages.map((m) => ( +
+ {m.role}: + {m.parts.map((part, i) => + part.type === "text" ? {part.text} : null + )} +
+ ))} + +
{ + e.preventDefault(); + const input = e.currentTarget.querySelector("input"); + if (input?.value) { + sendMessage({ text: input.value }); + input.value = ""; + } + }} + > + + + {status === "streaming" && ( + + )} +
+
+ ); + } + ``` +
+
+ +## Next steps + +- [Backend](/ai-chat/backend) — Lifecycle hooks, persistence, session iterator, raw task primitives +- [Frontend](/ai-chat/frontend) — Session management, client data, reconnection +- [Features](/ai-chat/features) — Per-run data, deferred work, streaming, subtasks diff --git a/docs/ai-chat/reference.mdx b/docs/ai-chat/reference.mdx new file mode 100644 index 00000000000..96c30b3b03b --- /dev/null +++ b/docs/ai-chat/reference.mdx @@ -0,0 +1,240 @@ +--- +title: "API Reference" +sidebarTitle: "API Reference" +description: "Complete API reference for the AI Chat SDK — backend options, events, frontend transport, and hooks." +--- + +## ChatTaskOptions + +Options for `chat.task()`. + +| Option | Type | Default | Description | +|--------|------|---------|-------------| +| `id` | `string` | required | Task identifier | +| `run` | `(payload: ChatTaskRunPayload) => Promise` | required | Handler for each turn | +| `clientDataSchema` | `TaskSchema` | — | Schema for validating and typing `clientData` | +| `onPreload` | `(event: PreloadEvent) => Promise \| void` | — | Fires on preloaded runs before the first message | +| `onChatStart` | `(event: ChatStartEvent) => Promise \| void` | — | Fires on turn 0 before `run()` | +| `onTurnStart` | `(event: TurnStartEvent) => Promise \| void` | — | Fires every turn before `run()` | +| `onTurnComplete` | `(event: TurnCompleteEvent) => Promise \| void` | — | Fires after each turn completes | +| `maxTurns` | `number` | `100` | Max conversational turns per run | +| `turnTimeout` | `string` | `"1h"` | How long to wait for next message | +| `warmTimeoutInSeconds` | `number` | `30` | Seconds to stay warm before suspending | +| `chatAccessTokenTTL` | `string` | `"1h"` | How long the scoped access token remains valid | +| `preloadWarmTimeoutInSeconds` | `number` | Same as `warmTimeoutInSeconds` | Warm timeout after `onPreload` fires | +| `preloadTimeout` | `string` | Same as `turnTimeout` | Suspend timeout for preloaded runs | + +Plus all standard [TaskOptions](/tasks/overview) — `retry`, `queue`, `machine`, `maxDuration`, etc. + +## ChatTaskRunPayload + +The payload passed to the `run` function. + +| Field | Type | Description | +|-------|------|-------------| +| `messages` | `ModelMessage[]` | Model-ready messages — pass directly to `streamText` | +| `chatId` | `string` | Unique chat session ID | +| `trigger` | `"submit-message" \| "regenerate-message"` | What triggered the request | +| `messageId` | `string \| undefined` | Message ID (for regenerate) | +| `clientData` | Typed by `clientDataSchema` | Custom data from the frontend (typed when schema is provided) | +| `continuation` | `boolean` | Whether this run is continuing an existing chat (previous run ended) | +| `signal` | `AbortSignal` | Combined stop + cancel signal | +| `cancelSignal` | `AbortSignal` | Cancel-only signal | +| `stopSignal` | `AbortSignal` | Stop-only signal (per-turn) | + +## PreloadEvent + +Passed to the `onPreload` callback. + +| Field | Type | Description | +|-------|------|-------------| +| `chatId` | `string` | Chat session ID | +| `runId` | `string` | The Trigger.dev run ID | +| `chatAccessToken` | `string` | Scoped access token for this run | +| `clientData` | Typed by `clientDataSchema` | Custom data from the frontend | + +## ChatStartEvent + +Passed to the `onChatStart` callback. + +| Field | Type | Description | +|-------|------|-------------| +| `chatId` | `string` | Chat session ID | +| `messages` | `ModelMessage[]` | Initial model-ready messages | +| `clientData` | Typed by `clientDataSchema` | Custom data from the frontend | +| `runId` | `string` | The Trigger.dev run ID | +| `chatAccessToken` | `string` | Scoped access token for this run | +| `continuation` | `boolean` | Whether this run is continuing an existing chat | +| `previousRunId` | `string \| undefined` | Previous run ID (only when `continuation` is true) | +| `preloaded` | `boolean` | Whether this run was preloaded before the first message | + +## TurnStartEvent + +Passed to the `onTurnStart` callback. + +| Field | Type | Description | +|-------|------|-------------| +| `chatId` | `string` | Chat session ID | +| `messages` | `ModelMessage[]` | Full accumulated conversation (model format) | +| `uiMessages` | `UIMessage[]` | Full accumulated conversation (UI format) | +| `turn` | `number` | Turn number (0-indexed) | +| `runId` | `string` | The Trigger.dev run ID | +| `chatAccessToken` | `string` | Scoped access token for this run | +| `clientData` | Typed by `clientDataSchema` | Custom data from the frontend | +| `continuation` | `boolean` | Whether this run is continuing an existing chat | +| `previousRunId` | `string \| undefined` | Previous run ID (only when `continuation` is true) | +| `preloaded` | `boolean` | Whether this run was preloaded | + +## TurnCompleteEvent + +Passed to the `onTurnComplete` callback. + +| Field | Type | Description | +|-------|------|-------------| +| `chatId` | `string` | Chat session ID | +| `messages` | `ModelMessage[]` | Full accumulated conversation (model format) | +| `uiMessages` | `UIMessage[]` | Full accumulated conversation (UI format) | +| `newMessages` | `ModelMessage[]` | Only this turn's messages (model format) | +| `newUIMessages` | `UIMessage[]` | Only this turn's messages (UI format) | +| `responseMessage` | `UIMessage \| undefined` | The assistant's response for this turn | +| `rawResponseMessage` | `UIMessage \| undefined` | Raw response before abort cleanup | +| `turn` | `number` | Turn number (0-indexed) | +| `runId` | `string` | The Trigger.dev run ID | +| `chatAccessToken` | `string` | Scoped access token for this run | +| `lastEventId` | `string \| undefined` | Stream position for resumption | +| `stopped` | `boolean` | Whether the user stopped generation during this turn | +| `continuation` | `boolean` | Whether this run is continuing an existing chat | + +## ChatSessionOptions + +Options for `chat.createSession()`. + +| Option | Type | Default | Description | +|--------|------|---------|-------------| +| `signal` | `AbortSignal` | required | Run-level cancel signal | +| `warmTimeoutInSeconds` | `number` | `30` | Seconds to stay warm between turns | +| `timeout` | `string` | `"1h"` | Duration string for suspend timeout | +| `maxTurns` | `number` | `100` | Max turns before ending | + +## ChatTurn + +Each turn yielded by `chat.createSession()`. + +| Field | Type | Description | +|-------|------|-------------| +| `number` | `number` | Turn number (0-indexed) | +| `chatId` | `string` | Chat session ID | +| `trigger` | `string` | What triggered this turn | +| `clientData` | `unknown` | Client data from the transport | +| `messages` | `ModelMessage[]` | Full accumulated model messages | +| `uiMessages` | `UIMessage[]` | Full accumulated UI messages | +| `signal` | `AbortSignal` | Combined stop+cancel signal (fresh each turn) | +| `stopped` | `boolean` | Whether the user stopped generation this turn | +| `continuation` | `boolean` | Whether this is a continuation run | + +| Method | Returns | Description | +|--------|---------|-------------| +| `complete(source)` | `Promise` | Pipe, capture, accumulate, cleanup, and signal turn-complete | +| `done()` | `Promise` | Signal turn-complete (when you've piped manually) | +| `addResponse(response)` | `Promise` | Add response to accumulator manually | + +## chat namespace + +All methods available on the `chat` object from `@trigger.dev/sdk/ai`. + +| Method | Description | +|--------|-------------| +| `chat.task(options)` | Create a chat task | +| `chat.createSession(payload, options)` | Create an async iterator for chat turns | +| `chat.pipe(source, options?)` | Pipe a stream to the frontend (from anywhere inside a task) | +| `chat.pipeAndCapture(source, options?)` | Pipe and capture the response `UIMessage` | +| `chat.writeTurnComplete(options?)` | Signal the frontend that the current turn is complete | +| `chat.createStopSignal()` | Create a managed stop signal wired to the stop input stream | +| `chat.messages` | Input stream for incoming messages — use `.waitWithWarmup()` | +| `chat.local({ id })` | Create a per-run typed local (see [Per-run data](/ai-chat/features#per-run-data-with-chatlocal)) | +| `chat.createAccessToken(taskId)` | Create a public access token for a chat task | +| `chat.setTurnTimeout(duration)` | Override turn timeout at runtime (e.g. `"2h"`) | +| `chat.setTurnTimeoutInSeconds(seconds)` | Override turn timeout at runtime (in seconds) | +| `chat.setWarmTimeoutInSeconds(seconds)` | Override warm timeout at runtime | +| `chat.defer(promise)` | Run background work in parallel with streaming, awaited before `onTurnComplete` | +| `chat.isStopped()` | Check if the current turn was stopped by the user | +| `chat.cleanupAbortedParts(message)` | Remove incomplete parts from a stopped response message | +| `chat.stream` | Typed chat output stream — use `.writer()`, `.pipe()`, `.append()`, `.read()` | +| `chat.MessageAccumulator` | Class that accumulates conversation messages across turns | + +## TriggerChatTransport options + +Options for the frontend transport constructor and `useTriggerChatTransport` hook. + +| Option | Type | Default | Description | +|--------|------|---------|-------------| +| `task` | `string` | required | Task ID to trigger | +| `accessToken` | `string \| () => string \| Promise` | required | Auth token or function that returns one | +| `baseURL` | `string` | `"https://api.trigger.dev"` | API base URL (for self-hosted) | +| `streamKey` | `string` | `"chat"` | Stream key (only change if using custom key) | +| `headers` | `Record` | — | Extra headers for API requests | +| `streamTimeoutSeconds` | `number` | `120` | How long to wait for stream data | +| `clientData` | Typed by `clientDataSchema` | — | Default client data for every request | +| `sessions` | `Record` | — | Restore sessions from storage | +| `onSessionChange` | `(chatId, session \| null) => void` | — | Fires when session state changes | +| `triggerOptions` | `{...}` | — | Options for the initial task trigger (see below) | + +### triggerOptions + +Options forwarded to the Trigger.dev API when starting a new run. Only applies to the first message — subsequent messages reuse the same run. + +A `chat:{chatId}` tag is automatically added to every run. + +| Option | Type | Description | +|--------|------|-------------| +| `tags` | `string[]` | Additional tags for the run (merged with auto-tags, max 5 total) | +| `queue` | `string` | Queue name for the run | +| `maxAttempts` | `number` | Maximum retry attempts | +| `machine` | `"micro" \| "small-1x" \| ...` | Machine preset for the run | +| `priority` | `number` | Priority (lower = higher priority) | + +```ts +const transport = useTriggerChatTransport({ + task: "my-chat", + accessToken: getChatToken, + triggerOptions: { + tags: ["user:123"], + queue: "chat-queue", + }, +}); +``` + +### transport.preload() + +Eagerly trigger a run before the first message. + +```ts +transport.preload(chatId, { warmTimeoutInSeconds?: number }): Promise +``` + +No-op if a session already exists for this chatId. See [Preload](/ai-chat/features#preload) for full details. + +## useTriggerChatTransport + +React hook that creates and memoizes a `TriggerChatTransport` instance. Import from `@trigger.dev/sdk/chat/react`. + +```tsx +import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; +import type { myChat } from "@/trigger/chat"; + +const transport = useTriggerChatTransport({ + task: "my-chat", + accessToken: () => getChatToken(), + sessions: savedSessions, + onSessionChange: handleSessionChange, +}); +``` + +The transport is created once on first render and reused across re-renders. Pass a type parameter for compile-time validation of the task ID. + +## Related + +- [Realtime Streams](/tasks/streams) — How streams work under the hood +- [Using the Vercel AI SDK](/guides/examples/vercel-ai-sdk) — Basic AI SDK usage with Trigger.dev +- [Realtime React Hooks](/realtime/react-hooks/overview) — Lower-level realtime hooks +- [Authentication](/realtime/auth) — Public access tokens and trigger tokens diff --git a/docs/docs.json b/docs/docs.json index 1591359473e..79a66c3d358 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -82,7 +82,14 @@ }, { "group": "AI Chat", - "pages": ["guides/ai-chat"] + "pages": [ + "ai-chat/overview", + "ai-chat/quick-start", + "ai-chat/backend", + "ai-chat/frontend", + "ai-chat/features", + "ai-chat/reference" + ] }, { "group": "Configuration", @@ -733,6 +740,10 @@ { "source": "/insights/metrics", "destination": "/observability/dashboards" + }, + { + "source": "/guides/ai-chat", + "destination": "/ai-chat/overview" } ] } diff --git a/docs/guides/ai-chat.mdx b/docs/guides/ai-chat.mdx deleted file mode 100644 index 44ac2a97381..00000000000 --- a/docs/guides/ai-chat.mdx +++ /dev/null @@ -1,1412 +0,0 @@ ---- -title: "AI Chat with useChat" -sidebarTitle: "AI Chat (useChat)" -description: "Run AI SDK chat completions as durable Trigger.dev tasks with built-in realtime streaming, multi-turn conversations, and message persistence." ---- - -## Overview - -The `@trigger.dev/sdk` provides a custom [ChatTransport](https://sdk.vercel.ai/docs/ai-sdk-ui/transport) for the Vercel AI SDK's `useChat` hook. This lets you run chat completions as **durable Trigger.dev tasks** instead of fragile API routes — with automatic retries, observability, and realtime streaming built in. - -**How it works:** -1. The frontend sends messages via `useChat` through `TriggerChatTransport` -2. The first message triggers a Trigger.dev task; subsequent messages resume the **same run** via input streams -3. The task streams `UIMessageChunk` events back via Trigger.dev's realtime streams -4. The AI SDK's `useChat` processes the stream natively — text, tool calls, reasoning, etc. -5. Between turns, the run stays warm briefly then suspends (freeing compute) until the next message - -No custom API routes needed. Your chat backend is a Trigger.dev task. - - - -### First message flow - -```mermaid -sequenceDiagram - participant User - participant useChat as useChat + Transport - participant API as Trigger.dev API - participant Task as chat.task Worker - participant LLM as LLM Provider - - User->>useChat: sendMessage("Hello") - useChat->>useChat: No session for chatId → trigger new run - useChat->>API: triggerTask(payload, tags: [chat:id]) - API-->>useChat: { runId, publicAccessToken } - useChat->>useChat: Store session, subscribe to SSE - - API->>Task: Start run with ChatTaskWirePayload - Task->>Task: onChatStart({ chatId, messages, clientData }) - Task->>Task: onTurnStart({ chatId, messages }) - Task->>LLM: streamText({ model, messages, abortSignal }) - LLM-->>Task: Stream response chunks - Task->>API: streams.pipe("chat", uiStream) - API-->>useChat: SSE: UIMessageChunks - useChat-->>User: Render streaming text - Task->>API: Write __trigger_turn_complete - API-->>useChat: SSE: turn complete + refreshed token - useChat->>useChat: Close stream, update session - Task->>Task: onTurnComplete({ messages, stopped: false }) - Task->>Task: Wait for next message (warm → suspend) -``` - -### Multi-turn flow - -```mermaid -sequenceDiagram - participant User - participant useChat as useChat + Transport - participant API as Trigger.dev API - participant Task as chat.task Worker - participant LLM as LLM Provider - - Note over Task: Suspended, waiting for message - - User->>useChat: sendMessage("Tell me more") - useChat->>useChat: Session exists → send via input stream - useChat->>API: sendInputStream(runId, "chat-messages", payload) - Note right of useChat: Only sends new message (not full history) - - API->>Task: Deliver to messagesInput - Task->>Task: Wake from suspend - Task->>Task: Append to accumulated messages - Task->>Task: onTurnStart({ turn: 1 }) - Task->>LLM: streamText({ messages: [all accumulated] }) - LLM-->>Task: Stream response - Task->>API: streams.pipe("chat", uiStream) - API-->>useChat: SSE: UIMessageChunks - useChat-->>User: Render streaming text - Task->>API: Write __trigger_turn_complete - Task->>Task: onTurnComplete({ turn: 1 }) - Task->>Task: Wait for next message (warm → suspend) -``` - -### Stop signal flow - -```mermaid -sequenceDiagram - participant User - participant useChat as useChat + Transport - participant API as Trigger.dev API - participant Task as chat.task Worker - participant LLM as LLM Provider - - Note over Task: Streaming response... - - User->>useChat: Click "Stop" - useChat->>API: sendInputStream(runId, "chat-stop", { stop: true }) - API->>Task: Deliver to stopInput - Task->>Task: stopController.abort() - LLM-->>Task: Stream ends (AbortError) - Task->>Task: cleanupAbortedParts(responseMessage) - Note right of Task: Remove partial tool calls,
mark streaming parts as done - Task->>API: Write __trigger_turn_complete - API-->>useChat: SSE: turn complete - Task->>Task: onTurnComplete({ stopped: true }) - Task->>Task: Wait for next message -``` - -
- - - Requires `@trigger.dev/sdk` version **4.4.0 or later** and the `ai` package **v5.0.0 or later**. - - -## Quick start - - - - Use `chat.task` from `@trigger.dev/sdk/ai` to define a task that handles chat messages. The `run` function receives `ModelMessage[]` (already converted from the frontend's `UIMessage[]`) — pass them directly to `streamText`. - - If you return a `StreamTextResult`, it's **automatically piped** to the frontend. - - ```ts trigger/chat.ts - import { chat } from "@trigger.dev/sdk/ai"; - import { streamText } from "ai"; - import { openai } from "@ai-sdk/openai"; - - export const myChat = chat.task({ - id: "my-chat", - run: async ({ messages, signal }) => { - // messages is ModelMessage[] — pass directly to streamText - // signal fires on stop or run cancel - return streamText({ - model: openai("gpt-4o"), - messages, - abortSignal: signal, - }); - }, - }); - ``` - - - - On your server (e.g. a Next.js server action), create a trigger public token scoped to your chat task: - - ```ts app/actions.ts - "use server"; - - import { chat } from "@trigger.dev/sdk/ai"; - import type { myChat } from "@/trigger/chat"; - - export const getChatToken = () => - chat.createAccessToken("my-chat"); - ``` - - - - Use the `useTriggerChatTransport` hook from `@trigger.dev/sdk/chat/react` to create a memoized transport instance, then pass it to `useChat`: - - ```tsx app/components/chat.tsx - "use client"; - - import { useChat } from "@ai-sdk/react"; - import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; - import type { myChat } from "@/trigger/chat"; - import { getChatToken } from "@/app/actions"; - - export function Chat() { - const transport = useTriggerChatTransport({ - task: "my-chat", - accessToken: getChatToken, - }); - - const { messages, sendMessage, stop, status } = useChat({ transport }); - - return ( -
- {messages.map((m) => ( -
- {m.role}: - {m.parts.map((part, i) => - part.type === "text" ? {part.text} : null - )} -
- ))} - -
{ - e.preventDefault(); - const input = e.currentTarget.querySelector("input"); - if (input?.value) { - sendMessage({ text: input.value }); - input.value = ""; - } - }} - > - - - {status === "streaming" && ( - - )} -
-
- ); - } - ``` -
-
- -## How multi-turn works - -### One run, many turns - -The entire conversation lives in a **single Trigger.dev run**. After each AI response, the run waits for the next message via input streams. The frontend transport handles this automatically — it triggers a new run for the first message, and sends subsequent messages to the existing run. - -This means your conversation has full observability in the Trigger.dev dashboard: every turn is a span inside the same run. - -### Warm and suspended states - -After each turn, the run goes through two phases of waiting: - -1. **Warm phase** (default 30s) — The run stays active and responds instantly to the next message. Uses compute. -2. **Suspended phase** (default up to 1h) — The run suspends, freeing compute. It wakes when the next message arrives. There's a brief delay as the run resumes. - -If no message arrives within the turn timeout, the run ends gracefully. The next message from the frontend will automatically start a fresh run. - - - You are not charged for compute during the suspended phase. Only the warm phase uses compute resources. - - -### What the backend accumulates - -The backend automatically accumulates the full conversation history across turns. After the first turn, the frontend transport only sends the new user message — not the entire history. This is handled transparently by the transport and task. - -The accumulated messages are available in: -- `run()` as `messages` (`ModelMessage[]`) — for passing to `streamText` -- `onTurnStart()` as `uiMessages` (`UIMessage[]`) — for persisting before streaming -- `onTurnComplete()` as `uiMessages` (`UIMessage[]`) — for persisting after the response - -## Backend patterns - -### Simple: return a StreamTextResult - -The easiest approach — return the `streamText` result from `run` and it's automatically piped to the frontend: - -```ts -import { chat } from "@trigger.dev/sdk/ai"; -import { streamText } from "ai"; -import { openai } from "@ai-sdk/openai"; - -export const simpleChat = chat.task({ - id: "simple-chat", - run: async ({ messages, signal }) => { - return streamText({ - model: openai("gpt-4o"), - system: "You are a helpful assistant.", - messages, - abortSignal: signal, - }); - }, -}); -``` - -### Using chat.pipe() for complex flows - -For complex agent flows where `streamText` is called deep inside your code, use `chat.pipe()`. It works from **anywhere inside a task** — even nested function calls. - -```ts trigger/agent-chat.ts -import { chat } from "@trigger.dev/sdk/ai"; -import { streamText } from "ai"; -import { openai } from "@ai-sdk/openai"; -import type { ModelMessage } from "ai"; - -export const agentChat = chat.task({ - id: "agent-chat", - run: async ({ messages }) => { - // Don't return anything — chat.pipe is called inside - await runAgentLoop(messages); - }, -}); - -async function runAgentLoop(messages: ModelMessage[]) { - // ... agent logic, tool calls, etc. - - const result = streamText({ - model: openai("gpt-4o"), - messages, - }); - - // Pipe from anywhere — no need to return it - await chat.pipe(result); -} -``` - -### Manual mode with task() - -If you need full control over task options, use the standard `task()` with `ChatTaskPayload` and `chat.pipe()`: - -```ts -import { task } from "@trigger.dev/sdk"; -import { chat, type ChatTaskPayload } from "@trigger.dev/sdk/ai"; -import { streamText } from "ai"; -import { openai } from "@ai-sdk/openai"; - -export const manualChat = task({ - id: "manual-chat", - retry: { maxAttempts: 3 }, - queue: { concurrencyLimit: 10 }, - run: async (payload: ChatTaskPayload) => { - const result = streamText({ - model: openai("gpt-4o"), - messages: payload.messages, - }); - - await chat.pipe(result); - }, -}); -``` - - - Manual mode does not get automatic message accumulation or the `onTurnComplete`/`onChatStart` lifecycle hooks. The `responseMessage` field in `onTurnComplete` will be `undefined` when using `chat.pipe()` directly. Use `chat.task()` for the full multi-turn experience. - - -## Lifecycle hooks - -### onChatStart - -Fires once on the first turn (turn 0) before `run()` executes. Use it to create a chat record in your database. - -The `continuation` field tells you whether this is a brand new chat or a continuation of an existing one (where the previous run timed out or was cancelled): - -```ts -export const myChat = chat.task({ - id: "my-chat", - onChatStart: async ({ chatId, clientData, continuation }) => { - if (continuation) { - // Previous run ended — chat record already exists, just update session - return; - } - // Brand new chat — create the record - const { userId } = clientData as { userId: string }; - await db.chat.create({ - data: { id: chatId, userId, title: "New chat" }, - }); - }, - run: async ({ messages, signal }) => { - return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); - }, -}); -``` - - - `clientData` contains custom data from the frontend — either the `clientData` option on the transport constructor (sent with every message) or the `metadata` option on `sendMessage()` (per-message). See [Client data and metadata](#client-data-and-metadata). - - -### onTurnStart - -Fires at the start of every turn, after message accumulation and `onChatStart` (turn 0), but **before** `run()` executes. Use it to persist messages before streaming begins — so a mid-stream page refresh still shows the user's message. - -| Field | Type | Description | -|-------|------|-------------| -| `chatId` | `string` | Chat session ID | -| `messages` | `ModelMessage[]` | Full accumulated conversation (model format) | -| `uiMessages` | `UIMessage[]` | Full accumulated conversation (UI format) | -| `turn` | `number` | Turn number (0-indexed) | -| `runId` | `string` | The Trigger.dev run ID | -| `chatAccessToken` | `string` | Scoped access token for this run | -| `continuation` | `boolean` | Whether this run is continuing an existing chat | - -```ts -export const myChat = chat.task({ - id: "my-chat", - onTurnStart: async ({ chatId, uiMessages, runId, chatAccessToken }) => { - await db.chat.update({ - where: { id: chatId }, - data: { messages: uiMessages }, - }); - await db.chatSession.upsert({ - where: { id: chatId }, - create: { id: chatId, runId, publicAccessToken: chatAccessToken }, - update: { runId, publicAccessToken: chatAccessToken }, - }); - }, - run: async ({ messages, signal }) => { - return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); - }, -}); -``` - - - By persisting in `onTurnStart`, the user's message is saved to your database before the AI starts streaming. If the user refreshes mid-stream, the message is already there. - - -### onTurnComplete - -Fires after each turn completes — after the response is captured, before waiting for the next message. This is the primary hook for persisting the assistant's response. - -| Field | Type | Description | -|-------|------|-------------| -| `chatId` | `string` | Chat session ID | -| `messages` | `ModelMessage[]` | Full accumulated conversation (model format) | -| `uiMessages` | `UIMessage[]` | Full accumulated conversation (UI format) | -| `newMessages` | `ModelMessage[]` | Only this turn's messages (model format) | -| `newUIMessages` | `UIMessage[]` | Only this turn's messages (UI format) | -| `responseMessage` | `UIMessage \| undefined` | The assistant's response for this turn | -| `turn` | `number` | Turn number (0-indexed) | -| `runId` | `string` | The Trigger.dev run ID | -| `chatAccessToken` | `string` | Scoped access token for this run | -| `lastEventId` | `string \| undefined` | Stream position for resumption. Persist this with the session. | -| `stopped` | `boolean` | Whether the user stopped generation during this turn | -| `continuation` | `boolean` | Whether this run is continuing an existing chat | -| `rawResponseMessage` | `UIMessage \| undefined` | The raw assistant response before abort cleanup (same as `responseMessage` when not stopped) | - -```ts -export const myChat = chat.task({ - id: "my-chat", - onTurnComplete: async ({ chatId, uiMessages, runId, chatAccessToken, lastEventId }) => { - await db.chat.update({ - where: { id: chatId }, - data: { messages: uiMessages }, - }); - await db.chatSession.upsert({ - where: { id: chatId }, - create: { id: chatId, runId, publicAccessToken: chatAccessToken, lastEventId }, - update: { runId, publicAccessToken: chatAccessToken, lastEventId }, - }); - }, - run: async ({ messages, signal }) => { - return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); - }, -}); -``` - - - Use `uiMessages` to overwrite the full conversation each turn (simplest). Use `newUIMessages` if you prefer to store messages individually — for example, one database row per message. - - - - Persist `lastEventId` alongside the session. When the transport reconnects after a page refresh, it uses this to skip past already-seen events — preventing duplicate messages. - - -## Persistence - -### What needs to be persisted - -To build a chat app that survives page refreshes, you need to persist two things: - -1. **Messages** — The conversation history. Persisted **server-side** in the task via `onTurnStart` and `onTurnComplete`. -2. **Sessions** — The transport's connection state (`runId`, `publicAccessToken`, `lastEventId`). Persisted **server-side** via `onTurnStart` and `onTurnComplete`. - - - Sessions let the transport reconnect to an existing run after a page refresh. Without them, every page load would start a new run — losing the conversation context that was accumulated in the previous run. - - -### Persisting messages and sessions (server-side) - -Both messages and sessions are persisted server-side in the lifecycle hooks. `onTurnStart` saves the user's message before streaming begins, while `onTurnComplete` saves the assistant's response and the `lastEventId` for stream resumption. - -```ts trigger/chat.ts -import { chat } from "@trigger.dev/sdk/ai"; -import { streamText } from "ai"; -import { openai } from "@ai-sdk/openai"; -import { db } from "@/lib/db"; - -export const myChat = chat.task({ - id: "my-chat", - onChatStart: async ({ chatId }) => { - await db.chat.create({ - data: { id: chatId, title: "New chat", messages: [] }, - }); - }, - onTurnStart: async ({ chatId, uiMessages, runId, chatAccessToken }) => { - // Save user message + session before streaming starts - await db.chat.update({ - where: { id: chatId }, - data: { messages: uiMessages }, - }); - await db.chatSession.upsert({ - where: { id: chatId }, - create: { id: chatId, runId, publicAccessToken: chatAccessToken }, - update: { runId, publicAccessToken: chatAccessToken }, - }); - }, - onTurnComplete: async ({ chatId, uiMessages, runId, chatAccessToken, lastEventId }) => { - // Save assistant response + stream position after turn completes - await db.chat.update({ - where: { id: chatId }, - data: { messages: uiMessages }, - }); - await db.chatSession.upsert({ - where: { id: chatId }, - create: { id: chatId, runId, publicAccessToken: chatAccessToken, lastEventId }, - update: { runId, publicAccessToken: chatAccessToken, lastEventId }, - }); - }, - run: async ({ messages, signal }) => { - return streamText({ - model: openai("gpt-4o"), - messages, - abortSignal: signal, - }); - }, -}); -``` - -### Session cleanup (frontend) - -Since session creation and updates are handled server-side, the frontend only needs to handle session deletion when a run ends: - -```tsx -const transport = useTriggerChatTransport({ - task: "my-chat", - accessToken: getChatToken, - sessions: loadedSessions, // Restored from DB on page load - onSessionChange: (chatId, session) => { - if (!session) { - deleteSession(chatId); // Server action — run ended - } - }, -}); -``` - -### Restoring on page load - -On page load, fetch both the messages and the session from your database, then pass them to `useChat` and the transport. Pass `resume: true` to `useChat` when there's an existing conversation — this tells the AI SDK to reconnect to the stream via the transport. - -```tsx app/page.tsx -"use client"; - -import { useEffect, useState } from "react"; -import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; -import { useChat } from "@ai-sdk/react"; -import { getChatToken, getChatMessages, getSession, deleteSession } from "@/app/actions"; - -export default function ChatPage({ chatId }: { chatId: string }) { - const [initialMessages, setInitialMessages] = useState([]); - const [initialSession, setInitialSession] = useState(undefined); - const [loaded, setLoaded] = useState(false); - - useEffect(() => { - async function load() { - const [messages, session] = await Promise.all([ - getChatMessages(chatId), - getSession(chatId), - ]); - setInitialMessages(messages); - setInitialSession(session ? { [chatId]: session } : undefined); - setLoaded(true); - } - load(); - }, [chatId]); - - if (!loaded) return null; - - return ( - - ); -} - -function ChatClient({ chatId, initialMessages, initialSessions }) { - const transport = useTriggerChatTransport({ - task: "my-chat", - accessToken: getChatToken, - sessions: initialSessions, - onSessionChange: (id, session) => { - if (!session) deleteSession(id); - }, - }); - - const { messages, sendMessage, stop, status } = useChat({ - id: chatId, - messages: initialMessages, - transport, - resume: initialMessages.length > 0, // Resume if there's an existing conversation - }); - - // ... render UI -} -``` - - - `resume: true` causes `useChat` to call `reconnectToStream` on the transport when the component mounts. The transport uses the session's `lastEventId` to skip past already-seen stream events, so the frontend only receives new data. Only enable `resume` when there are existing messages — for brand new chats, there's nothing to reconnect to. - - - - In React strict mode (enabled by default in Next.js dev), you may see a `TypeError: Cannot read properties of undefined (reading 'state')` in the console when using `resume`. This is a [known bug in the AI SDK](https://github.com/vercel/ai/issues/8477) caused by React strict mode double-firing the resume effect. The error is caught internally and **does not affect functionality** — streaming and message display work correctly. It only appears in development and will not occur in production builds. - - -### Full example - -Putting it all together — a complete chat app with server-side persistence, session reconnection, and stream resumption: - - -```ts trigger/chat.ts -import { chat } from "@trigger.dev/sdk/ai"; -import { streamText } from "ai"; -import { openai } from "@ai-sdk/openai"; -import { z } from "zod"; -import { db } from "@/lib/db"; - -export const myChat = chat.task({ - id: "my-chat", - clientDataSchema: z.object({ - userId: z.string(), - }), - onChatStart: async ({ chatId, clientData }) => { - await db.chat.create({ - data: { id: chatId, userId: clientData.userId, title: "New chat", messages: [] }, - }); - }, - onTurnStart: async ({ chatId, uiMessages, runId, chatAccessToken }) => { - // Persist messages + session before streaming - await db.chat.update({ - where: { id: chatId }, - data: { messages: uiMessages }, - }); - await db.chatSession.upsert({ - where: { id: chatId }, - create: { id: chatId, runId, publicAccessToken: chatAccessToken }, - update: { runId, publicAccessToken: chatAccessToken }, - }); - }, - onTurnComplete: async ({ chatId, uiMessages, runId, chatAccessToken, lastEventId }) => { - // Persist assistant response + stream position - await db.chat.update({ - where: { id: chatId }, - data: { messages: uiMessages }, - }); - await db.chatSession.upsert({ - where: { id: chatId }, - create: { id: chatId, runId, publicAccessToken: chatAccessToken, lastEventId }, - update: { runId, publicAccessToken: chatAccessToken, lastEventId }, - }); - }, - run: async ({ messages, signal }) => { - return streamText({ - model: openai("gpt-4o"), - messages, - abortSignal: signal, - }); - }, -}); -``` - -```ts app/actions.ts -"use server"; - -import { chat } from "@trigger.dev/sdk/ai"; -import type { myChat } from "@/trigger/chat"; -import { db } from "@/lib/db"; - -export const getChatToken = () => - chat.createAccessToken("my-chat"); - -export async function getChatMessages(chatId: string) { - const found = await db.chat.findUnique({ where: { id: chatId } }); - return found?.messages ?? []; -} - -export async function getAllSessions() { - const sessions = await db.chatSession.findMany(); - const result: Record = {}; - for (const s of sessions) { - result[s.id] = { - runId: s.runId, - publicAccessToken: s.publicAccessToken, - lastEventId: s.lastEventId ?? undefined, - }; - } - return result; -} - -export async function deleteSession(chatId: string) { - await db.chatSession.delete({ where: { id: chatId } }).catch(() => {}); -} -``` - -```tsx app/components/chat.tsx -"use client"; - -import { useChat } from "@ai-sdk/react"; -import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; -import type { myChat } from "@/trigger/chat"; -import { getChatToken, deleteSession } from "@/app/actions"; - -export function Chat({ chatId, initialMessages, initialSessions }) { - const transport = useTriggerChatTransport({ - task: "my-chat", - accessToken: getChatToken, - clientData: { userId: currentUser.id }, // Type-checked against clientDataSchema - sessions: initialSessions, - onSessionChange: (id, session) => { - if (!session) deleteSession(id); - }, - }); - - const { messages, sendMessage, stop, status } = useChat({ - id: chatId, - messages: initialMessages, - transport, - resume: initialMessages.length > 0, - }); - - return ( -
- {messages.map((m) => ( -
- {m.role}: - {m.parts.map((part, i) => - part.type === "text" ? {part.text} : null - )} -
- ))} - -
{ - e.preventDefault(); - const input = e.currentTarget.querySelector("input"); - if (input?.value) { - sendMessage({ text: input.value }); - input.value = ""; - } - }} - > - - - {status === "streaming" && ( - - )} -
-
- ); -} -``` -
- -## Stop generation - -### How stop works - -Calling `stop()` from `useChat` sends a stop signal to the running task via input streams. The task's `streamText` call aborts (if you passed `signal` or `stopSignal`), but the **run stays alive** and waits for the next message. The partial response is captured and accumulated normally. - -### Abort signals - -The `run` function receives three abort signals: - -| Signal | Fires when | Use for | -|--------|-----------|---------| -| `signal` | Stop **or** cancel | Pass to `streamText` — handles both cases. **Use this in most cases.** | -| `stopSignal` | Stop only (per-turn, reset each turn) | Custom logic that should only run on user stop, not cancellation | -| `cancelSignal` | Run cancel, expire, or maxDuration exceeded | Cleanup that should only happen on full cancellation | - -```ts -export const myChat = chat.task({ - id: "my-chat", - run: async ({ messages, signal, stopSignal, cancelSignal }) => { - return streamText({ - model: openai("gpt-4o"), - messages, - abortSignal: signal, // Handles both stop and cancel - }); - }, -}); -``` - - - Use `signal` (the combined signal) in most cases. The separate `stopSignal` and `cancelSignal` are only needed if you want different behavior for stop vs cancel. - - -### Detecting stop in callbacks - -The `onTurnComplete` event includes a `stopped` boolean that indicates whether the user stopped generation during that turn: - -```ts -export const myChat = chat.task({ - id: "my-chat", - onTurnComplete: async ({ chatId, uiMessages, stopped }) => { - await db.chat.update({ - where: { id: chatId }, - data: { messages: uiMessages, lastStoppedAt: stopped ? new Date() : undefined }, - }); - }, - run: async ({ messages, signal }) => { - return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); - }, -}); -``` - -You can also check stop status from **anywhere** during a turn using `chat.isStopped()`. This is useful inside `streamText`'s `onFinish` callback where the AI SDK's `isAborted` flag can be unreliable (e.g. when using `createUIMessageStream` + `writer.merge()`): - -```ts -import { chat } from "@trigger.dev/sdk/ai"; -import { streamText } from "ai"; - -export const myChat = chat.task({ - id: "my-chat", - run: async ({ messages, signal }) => { - return streamText({ - model: openai("gpt-4o"), - messages, - abortSignal: signal, - onFinish: ({ isAborted }) => { - // isAborted may be false even after stop when using createUIMessageStream - const wasStopped = isAborted || chat.isStopped(); - if (wasStopped) { - // handle stop — e.g. log analytics - } - }, - }); - }, -}); -``` - -### Cleaning up aborted messages - -When stop happens mid-stream, the captured response message can contain parts in an incomplete state — tool calls stuck in `partial-call`, reasoning blocks still marked as `streaming`, etc. These can cause UI issues like permanent spinners. - -`chat.task` automatically cleans up the `responseMessage` when stop is detected before passing it to `onTurnComplete`. If you use `chat.pipe()` manually and capture response messages yourself, use `chat.cleanupAbortedParts()`: - -```ts -const cleaned = chat.cleanupAbortedParts(rawResponseMessage); -``` - -This removes tool invocation parts stuck in `partial-call` state and marks any `streaming` text or reasoning parts as `done`. - - - Stop signal delivery is best-effort. There is a small race window where the model may finish before the stop signal arrives, in which case the turn completes normally with `stopped: false`. This is expected and does not require special handling. - - -## Writing to the chat stream - -### Custom chunks with `chat.stream` - -`chat.stream` is a typed stream bound to the chat output. Use it to write custom `UIMessageChunk` data alongside the AI-generated response — for example, status updates or progress indicators. - -```ts -import { chat } from "@trigger.dev/sdk/ai"; - -export const myChat = chat.task({ - id: "my-chat", - run: async ({ messages, signal }) => { - // Write a custom data part to the chat stream. - // The AI SDK's data-* chunk protocol adds this to message.parts - // on the frontend, where you can render it however you like. - const { waitUntilComplete } = chat.stream.writer({ - execute: ({ write }) => { - write({ - type: "data-status", - id: "search-progress", - data: { message: "Searching the web...", progress: 0.5 }, - }); - }, - }); - await waitUntilComplete(); - - // Then stream the AI response - return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); - }, -}); -``` - - - Use `data-*` chunk types (e.g. `data-status`, `data-progress`) for custom data. The AI SDK processes these into `DataUIPart` objects in `message.parts` on the frontend. Writing the same `type` + `id` again updates the existing part instead of creating a new one — useful for live progress. - - -`chat.stream` exposes the full stream API: - -| Method | Description | -|--------|-------------| -| `chat.stream.writer(options)` | Write individual chunks via a callback | -| `chat.stream.pipe(stream, options?)` | Pipe a `ReadableStream` or `AsyncIterable` | -| `chat.stream.append(value, options?)` | Append raw data | -| `chat.stream.read(runId, options?)` | Read the stream by run ID | - -### Streaming from subtasks - -When a tool invokes a subtask via `triggerAndWait`, the subtask can stream directly to the parent chat using `target: "root"`: - -```ts -import { chat, ai } from "@trigger.dev/sdk/ai"; -import { schemaTask } from "@trigger.dev/sdk"; -import { streamText, generateId } from "ai"; -import { z } from "zod"; - -// A subtask that streams progress back to the parent chat -export const researchTask = schemaTask({ - id: "research", - schema: z.object({ query: z.string() }), - run: async ({ query }) => { - const partId = generateId(); - - // Write a data-* chunk to the root run's chat stream. - // The frontend receives this as a DataUIPart in message.parts. - const { waitUntilComplete } = chat.stream.writer({ - target: "root", - execute: ({ write }) => { - write({ - type: "data-research-status", - id: partId, - data: { query, status: "in-progress" }, - }); - }, - }); - await waitUntilComplete(); - - // Do the work... - const result = await doResearch(query); - - // Update the same part with the final status - const { waitUntilComplete: waitDone } = chat.stream.writer({ - target: "root", - execute: ({ write }) => { - write({ - type: "data-research-status", - id: partId, - data: { query, status: "done", resultCount: result.length }, - }); - }, - }); - await waitDone(); - - return result; - }, -}); - -// The chat task uses it as a tool via ai.tool() -export const myChat = chat.task({ - id: "my-chat", - run: async ({ messages, signal }) => { - return streamText({ - model: openai("gpt-4o"), - messages, - abortSignal: signal, - tools: { - research: ai.tool(researchTask), - }, - }); - }, -}); -``` - -On the frontend, render the custom data part: - -```tsx -{message.parts.map((part, i) => { - if (part.type === "data-research-status") { - const { query, status, resultCount } = part.data; - return ( -
- {status === "done" ? `Found ${resultCount} results` : `Researching "${query}"...`} -
- ); - } - // ...other part types -})} -``` - -The `target` option accepts: -- `"self"` — current run (default) -- `"parent"` — parent task's run -- `"root"` — root task's run (the chat task) -- A specific run ID string - -### Accessing tool context in subtasks - -When a subtask runs via `ai.tool()`, it can access the tool call context and chat context from the parent: - -```ts -import { ai, chat } from "@trigger.dev/sdk/ai"; -import type { myChat } from "./chat"; - -export const mySubtask = schemaTask({ - id: "my-subtask", - schema: z.object({ query: z.string() }), - run: async ({ query }) => { - // Get the AI SDK's tool call ID (useful for data-* chunk IDs) - const toolCallId = ai.toolCallId(); - - // Get typed chat context — pass typeof yourChatTask for typed clientData - const { chatId, clientData } = ai.chatContextOrThrow(); - // clientData is typed based on myChat's clientDataSchema - - // Write a data chunk using the tool call ID - const { waitUntilComplete } = chat.stream.writer({ - target: "root", - execute: ({ write }) => { - write({ - type: "data-progress", - id: toolCallId, - data: { status: "working", query, userId: clientData?.userId }, - }); - }, - }); - await waitUntilComplete(); - - return { result: "done" }; - }, -}); -``` - -| Helper | Returns | Description | -|--------|---------|-------------| -| `ai.toolCallId()` | `string \| undefined` | The AI SDK tool call ID | -| `ai.chatContext()` | `{ chatId, turn, continuation, clientData } \| undefined` | Chat context with typed `clientData`. Returns `undefined` if not in a chat context. | -| `ai.chatContextOrThrow()` | `{ chatId, turn, continuation, clientData }` | Same as above but throws if not in a chat context | -| `ai.currentToolOptions()` | `ToolCallExecutionOptions \| undefined` | Full tool execution options | - -## Client data and metadata - -### Transport-level client data - -Set default client data on the transport that's included in every request. When the task uses `clientDataSchema`, this is type-checked to match: - -```ts -const transport = useTriggerChatTransport({ - task: "my-chat", - accessToken: getChatToken, - clientData: { userId: currentUser.id }, -}); -``` - -### Per-message metadata - -Pass metadata with individual messages via `sendMessage`. Per-message values are merged with transport-level client data (per-message wins on conflicts): - -```ts -sendMessage( - { text: "Hello" }, - { metadata: { model: "gpt-4o", priority: "high" } } -); -``` - -### Typed client data with `clientDataSchema` - -Instead of manually parsing `clientData` with Zod in every hook, pass a `clientDataSchema` to `chat.task`. The schema validates the data once per turn, and `clientData` is typed in all hooks and `run`: - -```ts -import { chat } from "@trigger.dev/sdk/ai"; -import { streamText } from "ai"; -import { openai } from "@ai-sdk/openai"; -import { z } from "zod"; - -export const myChat = chat.task({ - id: "my-chat", - clientDataSchema: z.object({ - model: z.string().optional(), - userId: z.string(), - }), - onChatStart: async ({ chatId, clientData }) => { - // clientData is typed as { model?: string; userId: string } - await db.chat.create({ - data: { id: chatId, userId: clientData.userId }, - }); - }, - run: async ({ messages, clientData, signal }) => { - // Same typed clientData — no manual parsing needed - return streamText({ - model: openai(clientData?.model ?? "gpt-4o"), - messages, - abortSignal: signal, - }); - }, -}); -``` - -The schema also types the `clientData` option on the frontend transport: - -```ts -// TypeScript enforces that clientData matches the schema -const transport = useTriggerChatTransport({ - task: "my-chat", - accessToken: getChatToken, - clientData: { userId: currentUser.id }, -}); -``` - -Supports Zod, ArkType, Valibot, and other schema libraries supported by the SDK. - -## Runtime configuration - -### chat.setTurnTimeout() - -Override how long the run stays suspended waiting for the next message. Call from inside `run()`: - -```ts -run: async ({ messages, signal }) => { - chat.setTurnTimeout("2h"); // Wait longer for this conversation - return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); -}, -``` - -### chat.setWarmTimeoutInSeconds() - -Override how long the run stays warm (active, using compute) after each turn: - -```ts -run: async ({ messages, signal }) => { - chat.setWarmTimeoutInSeconds(60); // Stay warm for 1 minute - return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); -}, -``` - - - Longer warm timeout means faster responses but more compute usage. Set to `0` to suspend immediately after each turn (minimum latency cost, slight delay on next message). - - -## Per-run data with `chat.local` - -Use `chat.local` to create typed, run-scoped data that persists across turns and is accessible from anywhere — the run function, tools, nested helpers. Each run gets its own isolated copy, and locals are automatically cleared between runs. - -When a subtask is invoked via `ai.tool()`, initialized locals are automatically serialized into the subtask's metadata and hydrated on first access — no extra code needed. Subtask changes to hydrated locals are local to the subtask and don't propagate back to the parent. - -### Declaring and initializing - -Declare locals at module level with a unique `id`, then initialize them inside a lifecycle hook where you have context (chatId, clientData, etc.): - -```ts -import { chat } from "@trigger.dev/sdk/ai"; -import { streamText, tool } from "ai"; -import { openai } from "@ai-sdk/openai"; -import { z } from "zod"; -import { db } from "@/lib/db"; - -// Declare at module level — each local needs a unique id -const userContext = chat.local<{ - name: string; - plan: "free" | "pro"; - messageCount: number; -}>({ id: "userContext" }); - -export const myChat = chat.task({ - id: "my-chat", - clientDataSchema: z.object({ userId: z.string() }), - onChatStart: async ({ clientData }) => { - // Initialize with real data from your database - const user = await db.user.findUnique({ - where: { id: clientData.userId }, - }); - userContext.init({ - name: user.name, - plan: user.plan, - messageCount: user.messageCount, - }); - }, - run: async ({ messages, signal }) => { - userContext.messageCount++; - - return streamText({ - model: openai("gpt-4o"), - system: `Helping ${userContext.name} (${userContext.plan} plan).`, - messages, - abortSignal: signal, - }); - }, -}); -``` - -### Accessing from tools - -Locals are accessible from anywhere during task execution — including AI SDK tools: - -```ts -const userContext = chat.local<{ plan: "free" | "pro" }>({ id: "userContext" }); - -const premiumTool = tool({ - description: "Access premium features", - inputSchema: z.object({ feature: z.string() }), - execute: async ({ feature }) => { - if (userContext.plan !== "pro") { - return { error: "This feature requires a Pro plan." }; - } - // ... premium logic - }, -}); -``` - -### Accessing from subtasks - -When you use `ai.tool()` to expose a subtask, chat locals are automatically available read-only: - -```ts -import { chat, ai } from "@trigger.dev/sdk/ai"; -import { schemaTask } from "@trigger.dev/sdk"; -import { streamText } from "ai"; -import { openai } from "@ai-sdk/openai"; -import { z } from "zod"; - -const userContext = chat.local<{ name: string; plan: "free" | "pro" }>({ id: "userContext" }); - -export const analyzeData = schemaTask({ - id: "analyze-data", - schema: z.object({ query: z.string() }), - run: async ({ query }) => { - // userContext.name just works — auto-hydrated from parent metadata - console.log(`Analyzing for ${userContext.name}`); - // Changes here are local to this subtask and don't propagate back - }, -}); - -export const myChat = chat.task({ - id: "my-chat", - onChatStart: async ({ clientData }) => { - userContext.init({ name: "Alice", plan: "pro" }); - }, - run: async ({ messages, signal }) => { - return streamText({ - model: openai("gpt-4o"), - messages, - tools: { analyzeData: ai.tool(analyzeData) }, - abortSignal: signal, - }); - }, -}); -``` - - - Values must be JSON-serializable for subtask access. Non-serializable values (functions, class instances, etc.) will be lost during transfer. - - -### Dirty tracking and persistence - -The `hasChanged()` method returns `true` if any property was set since the last check, then resets the flag. Use it in lifecycle hooks to only persist when data actually changed: - -```ts -onTurnComplete: async ({ chatId }) => { - if (userContext.hasChanged()) { - await db.user.update({ - where: { id: userContext.get().userId }, - data: { - messageCount: userContext.messageCount, - }, - }); - } -}, -``` - -### API reference - -| Method | Description | -|--------|-------------| -| `chat.local({ id })` | Create a typed local with a unique id (declare at module level) | -| `local.init(value)` | Initialize with a value (call in hooks or `run`) | -| `local.hasChanged()` | Returns `true` if modified since last check, resets flag | -| `local.get()` | Returns a plain object copy (for serialization) | -| `local.property` | Direct property access (read/write via Proxy) | - - - Locals use shallow proxying. Nested object mutations like `local.prefs.theme = "dark"` won't trigger the dirty flag. Instead, replace the whole property: `local.prefs = { ...local.prefs, theme: "dark" }`. - - -## Frontend reference - -### TriggerChatTransport options - -| Option | Type | Default | Description | -|--------|------|---------|-------------| -| `task` | `string` | required | Task ID to trigger | -| `accessToken` | `string \| () => string \| Promise` | required | Auth token or function that returns one | -| `baseURL` | `string` | `"https://api.trigger.dev"` | API base URL (for self-hosted) | -| `streamKey` | `string` | `"chat"` | Stream key (only change if using custom key) | -| `headers` | `Record` | — | Extra headers for API requests | -| `streamTimeoutSeconds` | `number` | `120` | How long to wait for stream data | -| `clientData` | Typed by `clientDataSchema` | — | Default client data for every request | -| `sessions` | `Record` | — | Restore sessions from storage | -| `onSessionChange` | `(chatId, session \| null) => void` | — | Fires when session state changes | -| `triggerOptions` | `{...}` | — | Options for the initial task trigger (see below) | - -#### triggerOptions - -Options forwarded to the Trigger.dev API when starting a new run. Only applies to the first message — subsequent messages reuse the same run. - -A `chat:{chatId}` tag is automatically added to every run. - -| Option | Type | Description | -|--------|------|-------------| -| `tags` | `string[]` | Additional tags for the run (merged with auto-tags, max 5 total) | -| `queue` | `string` | Queue name for the run | -| `maxAttempts` | `number` | Maximum retry attempts | -| `machine` | `"micro" \| "small-1x" \| ...` | Machine preset for the run | -| `priority` | `number` | Priority (lower = higher priority) | - -```ts -const transport = useTriggerChatTransport({ - task: "my-chat", - accessToken: getChatToken, - triggerOptions: { - tags: ["user:123"], - queue: "chat-queue", - }, -}); -``` - -### useTriggerChatTransport - -React hook that creates and memoizes a `TriggerChatTransport` instance. Import from `@trigger.dev/sdk/chat/react`. - -```tsx -import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; -import type { myChat } from "@/trigger/chat"; - -const transport = useTriggerChatTransport({ - task: "my-chat", - accessToken: () => getChatToken(), - sessions: savedSessions, - onSessionChange: handleSessionChange, -}); -``` - -The transport is created once on first render and reused across re-renders. Pass a type parameter for compile-time validation of the task ID. - - - The hook keeps `onSessionChange` up to date via a ref internally, so you don't need to memoize the callback or worry about stale closures. - - -### Dynamic access tokens - -For token refresh, pass a function instead of a string. It's called on each `sendMessage`: - -```ts -const transport = useTriggerChatTransport({ - task: "my-chat", - accessToken: async () => { - const res = await fetch("/api/chat-token"); - return res.text(); - }, -}); -``` - -## Backend reference - -### ChatTaskOptions - -| Option | Type | Default | Description | -|--------|------|---------|-------------| -| `id` | `string` | required | Task identifier | -| `run` | `(payload: ChatTaskRunPayload) => Promise` | required | Handler for each turn | -| `clientDataSchema` | `TaskSchema` | — | Schema for validating and typing `clientData` | -| `onChatStart` | `(event: ChatStartEvent) => Promise \| void` | — | Fires on turn 0 before `run()` | -| `onTurnStart` | `(event: TurnStartEvent) => Promise \| void` | — | Fires every turn before `run()` | -| `onTurnComplete` | `(event: TurnCompleteEvent) => Promise \| void` | — | Fires after each turn completes | -| `maxTurns` | `number` | `100` | Max conversational turns per run | -| `turnTimeout` | `string` | `"1h"` | How long to wait for next message | -| `warmTimeoutInSeconds` | `number` | `30` | Seconds to stay warm before suspending | - -Plus all standard [TaskOptions](/tasks/overview) — `retry`, `queue`, `machine`, `maxDuration`, etc. - -### ChatTaskRunPayload - -| Field | Type | Description | -|-------|------|-------------| -| `messages` | `ModelMessage[]` | Model-ready messages — pass directly to `streamText` | -| `chatId` | `string` | Unique chat session ID | -| `trigger` | `"submit-message" \| "regenerate-message"` | What triggered the request | -| `messageId` | `string \| undefined` | Message ID (for regenerate) | -| `clientData` | Typed by `clientDataSchema` | Custom data from the frontend (typed when schema is provided) | -| `continuation` | `boolean` | Whether this run is continuing an existing chat (previous run ended) | -| `signal` | `AbortSignal` | Combined stop + cancel signal | -| `cancelSignal` | `AbortSignal` | Cancel-only signal | -| `stopSignal` | `AbortSignal` | Stop-only signal (per-turn) | - -### TurnCompleteEvent - -See [onTurnComplete](#onturncomplete) for the full field reference. - -### chat namespace - -| Method | Description | -|--------|-------------| -| `chat.task(options)` | Create a chat task | -| `chat.pipe(source, options?)` | Pipe a stream to the frontend (from anywhere inside a task) | -| `chat.local({ id })` | Create a per-run typed local (see [Per-run data](#per-run-data-with-chatlocal)) | -| `chat.createAccessToken(taskId)` | Create a public access token for a chat task | -| `chat.setTurnTimeout(duration)` | Override turn timeout at runtime (e.g. `"2h"`) | -| `chat.setTurnTimeoutInSeconds(seconds)` | Override turn timeout at runtime (in seconds) | -| `chat.setWarmTimeoutInSeconds(seconds)` | Override warm timeout at runtime | -| `chat.defer(promise)` | Run background work in parallel with streaming, awaited before `onTurnComplete` | -| `chat.isStopped()` | Check if the current turn was stopped by the user (works anywhere during a turn) | -| `chat.cleanupAbortedParts(message)` | Remove incomplete parts from a stopped response message | -| `chat.stream` | Typed chat output stream — use `.writer()`, `.pipe()`, `.append()`, `.read()` | - -## Self-hosting - -If you're self-hosting Trigger.dev, pass the `baseURL` option: - -```ts -const transport = useTriggerChatTransport({ - task: "my-chat", - accessToken, - baseURL: "https://your-trigger-instance.com", -}); -``` - -## Related - -- [Realtime Streams](/tasks/streams) — How streams work under the hood -- [Using the Vercel AI SDK](/guides/examples/vercel-ai-sdk) — Basic AI SDK usage with Trigger.dev -- [Realtime React Hooks](/realtime/react-hooks/overview) — Lower-level realtime hooks -- [Authentication](/realtime/auth) — Public access tokens and trigger tokens diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index bdff3b11db3..46237d9cd43 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -1717,6 +1717,219 @@ class ChatMessageAccumulator { } } +// --------------------------------------------------------------------------- +// chat.createSession — async iterator for chat turns +// --------------------------------------------------------------------------- + +export type ChatSessionOptions = { + /** Run-level cancel signal (from task context). */ + signal: AbortSignal; + /** Seconds to stay warm between turns before suspending. @default 30 */ + warmTimeoutInSeconds?: number; + /** Duration string for suspend timeout. @default "1h" */ + timeout?: string; + /** Max turns before ending. @default 100 */ + maxTurns?: number; +}; + +export type ChatTurn = { + /** Turn number (0-indexed). */ + number: number; + /** Chat session ID. */ + chatId: string; + /** What triggered this turn. */ + trigger: string; + /** Client data from the transport (`metadata` field on the wire payload). */ + clientData: unknown; + /** Full accumulated model messages — pass directly to `streamText`. */ + messages: ModelMessage[]; + /** Full accumulated UI messages — use for persistence. */ + uiMessages: UIMessage[]; + /** Combined stop+cancel AbortSignal (fresh each turn). */ + signal: AbortSignal; + /** Whether the user stopped generation this turn. */ + readonly stopped: boolean; + /** Whether this is a continuation run. */ + continuation: boolean; + + /** + * Easy path: pipe stream, capture response, accumulate it, + * clean up aborted parts if stopped, and write turn-complete chunk. + */ + complete(source: UIMessageStreamable): Promise; + + /** + * Manual path: just write turn-complete chunk. + * Use when you've already piped and accumulated manually. + */ + done(): Promise; + + /** + * Add the response to the accumulator manually. + * Use with `chat.pipeAndCapture` when you need control between pipe and done. + */ + addResponse(response: UIMessage): Promise; +}; + +/** + * Create a chat session that yields turns as an async iterator. + * + * Handles: preload wait, stop signals, message accumulation, turn-complete + * signaling, and warm/suspend between turns. You control: initialization, + * model/tool selection, persistence, and any custom per-turn logic. + * + * @example + * ```ts + * import { task } from "@trigger.dev/sdk"; + * import { chat, type ChatTaskWirePayload } from "@trigger.dev/sdk/ai"; + * import { streamText } from "ai"; + * import { openai } from "@ai-sdk/openai"; + * + * export const myChat = task({ + * id: "my-chat", + * run: async (payload: ChatTaskWirePayload, { signal }) => { + * const session = chat.createSession(payload, { signal }); + * + * for await (const turn of session) { + * const result = streamText({ + * model: openai("gpt-4o"), + * messages: turn.messages, + * abortSignal: turn.signal, + * }); + * await turn.complete(result); + * } + * }, + * }); + * ``` + */ +function createChatSession( + payload: ChatTaskWirePayload, + options: ChatSessionOptions +): AsyncIterable { + const { + signal: runSignal, + warmTimeoutInSeconds = 30, + timeout = "1h", + maxTurns = 100, + } = options; + + return { + [Symbol.asyncIterator]() { + let currentPayload = payload; + let turn = -1; + const stop = createStopSignal(); + const accumulator = new ChatMessageAccumulator(); + + return { + async next(): Promise> { + turn++; + + // First turn: handle preload — wait for the first real message + if (turn === 0 && currentPayload.trigger === "preload") { + const result = await messagesInput.waitWithWarmup({ + warmTimeoutInSeconds: currentPayload.warmTimeoutInSeconds ?? warmTimeoutInSeconds, + timeout, + spanName: "waiting for first message", + }); + if (!result.ok || runSignal.aborted) { + stop.cleanup(); + return { done: true, value: undefined }; + } + currentPayload = result.output; + } + + // Subsequent turns: wait for the next message + if (turn > 0) { + const next = await messagesInput.waitWithWarmup({ + warmTimeoutInSeconds, + timeout, + spanName: "waiting for next message", + }); + if (!next.ok || runSignal.aborted) { + stop.cleanup(); + return { done: true, value: undefined }; + } + currentPayload = next.output; + } + + // Check limits + if (turn >= maxTurns || runSignal.aborted) { + stop.cleanup(); + return { done: true, value: undefined }; + } + + // Reset stop signal for this turn + stop.reset(); + + // Accumulate messages + const messages = await accumulator.addIncoming( + currentPayload.messages, + currentPayload.trigger, + turn, + ); + + const combinedSignal = AbortSignal.any([runSignal, stop.signal]); + + const turnObj: ChatTurn = { + number: turn, + chatId: currentPayload.chatId, + trigger: currentPayload.trigger, + clientData: currentPayload.metadata, + messages, + uiMessages: accumulator.uiMessages, + signal: combinedSignal, + get stopped() { return stop.signal.aborted && !runSignal.aborted; }, + continuation: currentPayload.continuation ?? false, + + async complete(source: UIMessageStreamable) { + let response: UIMessage | undefined; + try { + response = await pipeChatAndCapture(source, { signal: combinedSignal }); + } catch (error) { + if (error instanceof Error && error.name === "AbortError") { + if (runSignal.aborted) { + // Full cancel — don't accumulate + await chatWriteTurnComplete(); + return undefined; + } + // Stop — fall through to accumulate partial response + } else { + throw error; + } + } + + if (response) { + const cleaned = (stop.signal.aborted && !runSignal.aborted) + ? cleanupAbortedParts(response) + : response; + await accumulator.addResponse(cleaned); + } + + await chatWriteTurnComplete(); + return response; + }, + + async addResponse(response: UIMessage) { + await accumulator.addResponse(response); + }, + + async done() { + await chatWriteTurnComplete(); + }, + }; + + return { done: false, value: turnObj }; + }, + + async return() { + stop.cleanup(); + return { done: true, value: undefined }; + }, + }; + }, + }; +} + // --------------------------------------------------------------------------- // chat.local — per-run typed data with Proxy access // --------------------------------------------------------------------------- @@ -1985,6 +2198,8 @@ export const chat = { pipeAndCapture: pipeChatAndCapture, /** Message accumulator class for raw task chat. See {@link ChatMessageAccumulator}. */ MessageAccumulator: ChatMessageAccumulator, + /** Create a chat session (async iterator). See {@link createChatSession}. */ + createSession: createChatSession, }; /** diff --git a/references/ai-chat/src/components/chat-sidebar.tsx b/references/ai-chat/src/components/chat-sidebar.tsx index 2f6182f3b80..cff6a6be634 100644 --- a/references/ai-chat/src/components/chat-sidebar.tsx +++ b/references/ai-chat/src/components/chat-sidebar.tsx @@ -121,6 +121,7 @@ export function ChatSidebar({ > +
diff --git a/references/ai-chat/src/trigger/chat.ts b/references/ai-chat/src/trigger/chat.ts index cc2a8952371..3cb32a16a43 100644 --- a/references/ai-chat/src/trigger/chat.ts +++ b/references/ai-chat/src/trigger/chat.ts @@ -563,3 +563,76 @@ export const aiChatRaw = task({ stop.cleanup(); }, }); + +// -------------------------------------------------------------------------- +// Session iterator version — middle ground between chat.task and raw task +// -------------------------------------------------------------------------- + +export const aiChatSession = task({ + id: "ai-chat-session", + run: async (payload: ChatTaskWirePayload, { signal }) => { + const clientData = payload.metadata as { userId: string; model?: string } | undefined; + + // One-time init — just code at the top, no hooks needed + if (clientData) { + await initUserContext(clientData.userId, payload.chatId, clientData.model); + } + + const session = chat.createSession(payload, { + signal, + warmTimeoutInSeconds: payload.warmTimeoutInSeconds ?? 60, + timeout: "1h", + }); + + for await (const turn of session) { + const turnClientData = (turn.clientData ?? clientData) as + | { userId: string; model?: string } + | undefined; + + userContext.messageCount++; + if (turnClientData?.model) userContext.preferredModel = turnClientData.model; + + const modelId = turnClientData?.model ?? userContext.preferredModel ?? undefined; + const useReasoning = REASONING_MODELS.has(modelId ?? DEFAULT_MODEL); + + const result = streamText({ + model: getModel(modelId), + system: `You are a helpful assistant for ${userContext.name} (${userContext.plan} plan). Be concise and friendly.`, + messages: turn.messages, + tools: { + inspectEnvironment, + webFetch, + deepResearch: ai.tool(deepResearch), + }, + stopWhen: stepCountIs(10), + abortSignal: turn.signal, + providerOptions: { + openai: { user: turnClientData?.userId }, + anthropic: { + metadata: { user_id: turnClientData?.userId }, + ...(useReasoning ? { thinking: { type: "enabled", budgetTokens: 10000 } } : {}), + }, + }, + experimental_telemetry: { isEnabled: true }, + }); + + await turn.complete(result); + + // Persist after each turn + await prisma.chat.update({ + where: { id: turn.chatId }, + data: { messages: turn.uiMessages as any }, + }); + + if (userContext.hasChanged()) { + await prisma.user.update({ + where: { id: userContext.userId }, + data: { + messageCount: userContext.messageCount, + preferredModel: userContext.preferredModel, + }, + }); + } + } + }, +}); From 0b4b963755277005696220c465ea632709b2284d Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Tue, 10 Mar 2026 14:54:34 +0000 Subject: [PATCH 53/53] Add support for toUIMessageStream() options --- docs/ai-chat/backend.mdx | 85 ++++++++++++++++++++ docs/ai-chat/reference.mdx | 17 ++++ packages/trigger-sdk/src/v3/ai.ts | 104 ++++++++++++++++++++++++- pnpm-lock.yaml | 23 +++--- references/ai-chat/src/trigger/chat.ts | 14 +++- 5 files changed, 230 insertions(+), 13 deletions(-) diff --git a/docs/ai-chat/backend.mdx b/docs/ai-chat/backend.mdx index 4d730b0cbb5..5c21e88ee65 100644 --- a/docs/ai-chat/backend.mdx +++ b/docs/ai-chat/backend.mdx @@ -504,6 +504,89 @@ run: async ({ messages, signal }) => { Longer warm timeout means faster responses but more compute usage. Set to `0` to suspend immediately after each turn (minimum latency cost, slight delay on next message). +#### Stream options + +Control how `streamText` results are converted to the frontend stream via `toUIMessageStream()`. Set static defaults on the task, or override per-turn. + +##### Error handling with onError + +When `streamText` encounters an error mid-stream (rate limits, API failures, network errors), the `onError` callback converts it to a string that's sent to the frontend as an `{ type: "error", errorText }` chunk. The AI SDK's `useChat` receives this via its `onError` callback. + +By default, the raw error message is sent to the frontend. Use `onError` to sanitize errors and avoid leaking internal details: + +```ts +export const myChat = chat.task({ + id: "my-chat", + uiMessageStreamOptions: { + onError: (error) => { + // Log the full error server-side for debugging + console.error("Stream error:", error); + // Return a sanitized message — this is what the frontend sees + if (error instanceof Error && error.message.includes("rate limit")) { + return "Rate limited — please wait a moment and try again."; + } + return "Something went wrong. Please try again."; + }, + }, + run: async ({ messages, signal }) => { + return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); + }, +}); +``` + +`onError` is also called for tool execution errors, so a single handler covers both LLM errors and tool failures. + +On the frontend, handle the error in `useChat`: + +```tsx +const { messages, sendMessage } = useChat({ + transport, + onError: (error) => { + // error.message contains the string returned by your onError handler + toast.error(error.message); + }, +}); +``` + +##### Reasoning and sources + +Control which AI SDK features are forwarded to the frontend: + +```ts +export const myChat = chat.task({ + id: "my-chat", + uiMessageStreamOptions: { + sendReasoning: true, // Forward model reasoning (default: true) + sendSources: true, // Forward source citations (default: false) + }, + run: async ({ messages, signal }) => { + return streamText({ model: openai("gpt-4o"), messages, abortSignal: signal }); + }, +}); +``` + +##### Per-turn overrides + +Override per-turn with `chat.setUIMessageStreamOptions()` — per-turn values merge with the static config (per-turn wins on conflicts). The override is cleared automatically after each turn. + +```ts +run: async ({ messages, clientData, signal }) => { + // Enable reasoning only for certain models + if (clientData.model?.includes("claude")) { + chat.setUIMessageStreamOptions({ sendReasoning: true }); + } + return streamText({ model: openai(clientData.model ?? "gpt-4o"), messages, abortSignal: signal }); +}, +``` + +`chat.setUIMessageStreamOptions()` works across all abstraction levels — `chat.task()`, `chat.createSession()` / `turn.complete()`, and `chat.pipeAndCapture()`. + +See [ChatUIMessageStreamOptions](/ai-chat/reference#chatuimessagestreamoptions) for the full reference. + + + `onFinish` is managed internally for response capture and cannot be overridden here. Use `streamText`'s `onFinish` callback for custom finish handling, or use [raw task mode](#raw-task-with-primitives) for full control over `toUIMessageStream()`. + + ### Manual mode with task() If you need full control over task options, use the standard `task()` with `ChatTaskPayload` and `chat.pipe()`: @@ -647,6 +730,8 @@ for await (const turn of session) { For full control, use a standard `task()` with the composable primitives from the `chat` namespace. You manage everything: the turn loop, stop signals, message accumulation, and turn-complete signaling. +Raw task mode also lets you call `.toUIMessageStream()` yourself with any options — including `onFinish` and `originalMessages`. This is the right choice when you need complete control over the stream conversion beyond what `chat.setUIMessageStreamOptions()` provides. + ### Primitives | Primitive | Description | diff --git a/docs/ai-chat/reference.mdx b/docs/ai-chat/reference.mdx index 96c30b3b03b..420decee98b 100644 --- a/docs/ai-chat/reference.mdx +++ b/docs/ai-chat/reference.mdx @@ -23,6 +23,7 @@ Options for `chat.task()`. | `chatAccessTokenTTL` | `string` | `"1h"` | How long the scoped access token remains valid | | `preloadWarmTimeoutInSeconds` | `number` | Same as `warmTimeoutInSeconds` | Warm timeout after `onPreload` fires | | `preloadTimeout` | `string` | Same as `turnTimeout` | Suspend timeout for preloaded runs | +| `uiMessageStreamOptions` | `ChatUIMessageStreamOptions` | — | Default options for `toUIMessageStream()`. Per-turn override via `chat.setUIMessageStreamOptions()` | Plus all standard [TaskOptions](/tasks/overview) — `retry`, `queue`, `machine`, `maxDuration`, etc. @@ -156,12 +157,28 @@ All methods available on the `chat` object from `@trigger.dev/sdk/ai`. | `chat.setTurnTimeout(duration)` | Override turn timeout at runtime (e.g. `"2h"`) | | `chat.setTurnTimeoutInSeconds(seconds)` | Override turn timeout at runtime (in seconds) | | `chat.setWarmTimeoutInSeconds(seconds)` | Override warm timeout at runtime | +| `chat.setUIMessageStreamOptions(options)` | Override `toUIMessageStream()` options for the current turn | | `chat.defer(promise)` | Run background work in parallel with streaming, awaited before `onTurnComplete` | | `chat.isStopped()` | Check if the current turn was stopped by the user | | `chat.cleanupAbortedParts(message)` | Remove incomplete parts from a stopped response message | | `chat.stream` | Typed chat output stream — use `.writer()`, `.pipe()`, `.append()`, `.read()` | | `chat.MessageAccumulator` | Class that accumulates conversation messages across turns | +## ChatUIMessageStreamOptions + +Options for customizing `toUIMessageStream()`. Set as static defaults via `uiMessageStreamOptions` on `chat.task()`, or override per-turn via `chat.setUIMessageStreamOptions()`. See [Stream options](/ai-chat/backend#stream-options) for usage examples. + +Derived from the AI SDK's `UIMessageStreamOptions` with `onFinish`, `originalMessages`, and `generateMessageId` omitted (managed internally). + +| Option | Type | Default | Description | +|--------|------|---------|-------------| +| `onError` | `(error: unknown) => string` | Raw error message | Called on LLM errors and tool execution errors. Return a sanitized string — sent as `{ type: "error", errorText }` to the frontend. | +| `sendReasoning` | `boolean` | `true` | Send reasoning parts to the client | +| `sendSources` | `boolean` | `false` | Send source parts to the client | +| `sendFinish` | `boolean` | `true` | Send the finish event. Set to `false` when chaining multiple `streamText` calls. | +| `sendStart` | `boolean` | `true` | Send the message start event. Set to `false` when chaining. | +| `messageMetadata` | `(options: { part }) => metadata` | — | Extract message metadata to send to the client. Called on `start` and `finish` events. | + ## TriggerChatTransport options Options for the frontend transport constructor and `useTriggerChatTransport` hook. diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index 46237d9cd43..a54eaa73ec4 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -14,7 +14,7 @@ import { type TaskSchema, type TaskWithSchema, } from "@trigger.dev/core/v3"; -import type { ModelMessage, UIMessage, UIMessageChunk } from "ai"; +import type { ModelMessage, UIMessage, UIMessageChunk, UIMessageStreamOptions } from "ai"; import type { StreamWriteResult } from "@trigger.dev/core/v3"; import { convertToModelMessages, dynamicTool, generateId as generateMessageId, jsonSchema, JSONSchema7, Schema, Tool, ToolCallOptions, zodSchema } from "ai"; import { type Attributes, trace } from "@opentelemetry/api"; @@ -399,6 +399,10 @@ const chatDeferKey = locals.create>>("chat.defer"); */ const chatPipeCountKey = locals.create("chat.pipeCount"); const chatStopControllerKey = locals.create("chat.stopController"); +/** Static (task-level) UIMessageStream options, set once during chatTask setup. @internal */ +const chatUIStreamStaticKey = locals.create("chat.uiMessageStreamOptions.static"); +/** Per-turn UIMessageStream options, set via chat.setUIMessageStreamOptions(). @internal */ +const chatUIStreamPerTurnKey = locals.create("chat.uiMessageStreamOptions.perTurn"); /** * Options for `pipeChat`. @@ -423,6 +427,23 @@ export type PipeChatOptions = { spanName?: string; }; +/** + * Options for customizing the `toUIMessageStream()` call used when piping + * `streamText` results to the frontend. + * + * Set static defaults via `uiMessageStreamOptions` on `chat.task()`, or + * override per-turn via `chat.setUIMessageStreamOptions()`. + * + * `onFinish`, `originalMessages`, and `generateMessageId` are omitted because + * they are managed internally for response capture and message accumulation. + * Use `streamText`'s `onFinish` for custom finish handling, or drop down to + * raw task mode with `chat.pipe()` for full control. + */ +export type ChatUIMessageStreamOptions = Omit< + UIMessageStreamOptions, + "onFinish" | "originalMessages" | "generateMessageId" +>; + /** * An object with a `toUIMessageStream()` method (e.g. `StreamTextResult` from `streamText()`). */ @@ -803,6 +824,35 @@ export type ChatTaskOptions< * @default Same as `turnTimeout` */ preloadTimeout?: string; + + /** + * Default options for `toUIMessageStream()` when auto-piping or using + * `turn.complete()` / `chat.pipeAndCapture()`. + * + * Controls how the `StreamTextResult` is converted to a `UIMessageChunk` + * stream — error handling, reasoning/source visibility, metadata, etc. + * + * Can be overridden per-turn by calling `chat.setUIMessageStreamOptions()` + * inside `run()` or lifecycle hooks. Per-turn values are merged on top + * of these defaults (per-turn wins on conflicts). + * + * `onFinish`, `originalMessages`, and `generateMessageId` are managed + * internally and cannot be overridden here. Use `streamText`'s `onFinish` + * for custom finish handling, or drop to raw task mode for full control. + * + * @example + * ```ts + * chat.task({ + * id: "my-chat", + * uiMessageStreamOptions: { + * sendReasoning: true, + * onError: (error) => error instanceof Error ? error.message : "An error occurred.", + * }, + * run: async ({ messages, signal }) => { ... }, + * }); + * ``` + */ + uiMessageStreamOptions?: ChatUIMessageStreamOptions; }; /** @@ -851,6 +901,7 @@ function chatTask< chatAccessTokenTTL = "1h", preloadWarmTimeoutInSeconds, preloadTimeout, + uiMessageStreamOptions, ...restOptions } = options; @@ -867,6 +918,11 @@ function chatTask< activeSpan.setAttribute("gen_ai.conversation.id", payload.chatId); } + // Store static UIMessageStream options in locals so resolveUIMessageStreamOptions() can read them + if (uiMessageStreamOptions) { + locals.set(chatUIStreamStaticKey, uiMessageStreamOptions); + } + let currentWirePayload = payload; const continuation = payload.continuation ?? false; const previousRunId = payload.previousRunId; @@ -1192,6 +1248,7 @@ function chatTask< if ((locals.get(chatPipeCountKey) ?? 0) === 0 && isUIMessageStreamable(result)) { onFinishAttached = true; const uiStream = result.toUIMessageStream({ + ...resolveUIMessageStreamOptions(), onFinish: ({ responseMessage }: { responseMessage: UIMessage }) => { capturedResponseMessage = responseMessage; resolveOnFinish!(); @@ -1447,6 +1504,48 @@ function setWarmTimeoutInSeconds(seconds: number): void { metadata.set(WARM_TIMEOUT_METADATA_KEY, seconds); } +/** + * Override the `toUIMessageStream()` options for the current turn. + * + * These options control how the `StreamTextResult` is converted to a + * `UIMessageChunk` stream — error handling, reasoning/source visibility, + * message metadata, etc. + * + * Per-turn options are merged on top of the static `uiMessageStreamOptions` + * set on `chat.task()`. Per-turn values win on conflicts. + * + * @example + * ```ts + * run: async ({ messages, signal }) => { + * chat.setUIMessageStreamOptions({ + * sendReasoning: true, + * onError: (error) => error instanceof Error ? error.message : "An error occurred.", + * }); + * return streamText({ model, messages, abortSignal: signal }); + * } + * ``` + */ +function setUIMessageStreamOptions(options: ChatUIMessageStreamOptions): void { + locals.set(chatUIStreamPerTurnKey, options); +} + +/** + * Resolve the effective UIMessageStream options by merging: + * 1. Static task-level options (from `chat.task({ uiMessageStreamOptions })`) + * 2. Per-turn overrides (from `chat.setUIMessageStreamOptions()`) + * + * Per-turn values win on conflicts. Clears the per-turn override after reading + * so it doesn't leak into subsequent turns. + * @internal + */ +function resolveUIMessageStreamOptions(): ChatUIMessageStreamOptions { + const staticOptions = locals.get(chatUIStreamStaticKey) ?? {}; + const perTurnOptions = locals.get(chatUIStreamPerTurnKey) ?? {}; + // Clear per-turn override so it doesn't leak into subsequent turns + locals.set(chatUIStreamPerTurnKey, undefined); + return { ...staticOptions, ...perTurnOptions }; +} + // --------------------------------------------------------------------------- // Stop detection // --------------------------------------------------------------------------- @@ -1641,6 +1740,7 @@ async function pipeChatAndCapture( const onFinishPromise = new Promise((r) => { resolveOnFinish = r; }); const uiStream = source.toUIMessageStream({ + ...resolveUIMessageStreamOptions(), onFinish: ({ responseMessage }: { responseMessage: UIMessage }) => { captured = responseMessage; resolveOnFinish!(); @@ -2180,6 +2280,8 @@ export const chat = { setTurnTimeoutInSeconds, /** Override the warm timeout at runtime. See {@link setWarmTimeoutInSeconds}. */ setWarmTimeoutInSeconds, + /** Override toUIMessageStream() options for the current turn. See {@link setUIMessageStreamOptions}. */ + setUIMessageStreamOptions, /** Check if the current turn was stopped by the user. See {@link isStopped}. */ isStopped, /** Clean up aborted parts from a UIMessage. See {@link cleanupAbortedParts}. */ diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index e93b9047d85..79fa59a6368 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1104,7 +1104,7 @@ importers: version: 18.3.1 react-email: specifier: ^2.1.1 - version: 2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(eslint@8.31.0) + version: 2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(bufferutil@4.0.9)(eslint@8.31.0) resend: specifier: ^3.2.0 version: 3.2.0 @@ -19393,21 +19393,22 @@ packages: tar@6.1.13: resolution: {integrity: sha512-jdIBIN6LTIe2jqzay/2vtYLlBHa3JF42ot3h1dW8Q0PaAG4v8rm0cvpVePtau5C6OKXGGcgO9q2AMNSWxiLqKw==} engines: {node: '>=10'} - deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exhorbitant rates) by contacting i@izs.me + deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me tar@6.2.1: resolution: {integrity: sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==} engines: {node: '>=10'} - deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exhorbitant rates) by contacting i@izs.me + deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me tar@7.4.3: resolution: {integrity: sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==} engines: {node: '>=18'} - deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exhorbitant rates) by contacting i@izs.me + deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me tar@7.5.6: resolution: {integrity: sha512-xqUeu2JAIJpXyvskvU3uvQW8PAmHrtXp2KDuMJwQqW8Sqq0CaZBAQ+dKS3RBXVhU4wC5NjAdKrmh84241gO9cA==} engines: {node: '>=18'} + deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me tdigest@0.1.2: resolution: {integrity: sha512-+G0LLgjjo9BZX2MfdvPfH+MKLCrxlXSYec5DaPYP1fe6Iyhf0/fSmJ0bFiZ1F8BT6cGXl2LpltQptzjXKWEkKA==} @@ -23649,7 +23650,7 @@ snapshots: '@epic-web/test-server@0.1.0(bufferutil@4.0.9)': dependencies: '@hono/node-server': 1.12.2(hono@4.5.11) - '@hono/node-ws': 1.0.4(@hono/node-server@1.12.2(hono@4.11.8))(bufferutil@4.0.9) + '@hono/node-ws': 1.0.4(@hono/node-server@1.12.2(hono@4.5.11))(bufferutil@4.0.9) '@open-draft/deferred-promise': 2.2.0 '@types/ws': 8.5.12 hono: 4.5.11 @@ -24408,7 +24409,7 @@ snapshots: dependencies: hono: 4.11.8 - '@hono/node-ws@1.0.4(@hono/node-server@1.12.2(hono@4.11.8))(bufferutil@4.0.9)': + '@hono/node-ws@1.0.4(@hono/node-server@1.12.2(hono@4.5.11))(bufferutil@4.0.9)': dependencies: '@hono/node-server': 1.12.2(hono@4.5.11) ws: 8.18.3(bufferutil@4.0.9) @@ -40050,7 +40051,7 @@ snapshots: react: 18.2.0 react-dom: 18.2.0(react@18.2.0) - react-email@2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(eslint@8.31.0): + react-email@2.1.2(@opentelemetry/api@1.9.0)(@swc/helpers@0.5.15)(bufferutil@4.0.9)(eslint@8.31.0): dependencies: '@babel/parser': 7.24.1 '@radix-ui/colors': 1.0.1 @@ -40087,8 +40088,8 @@ snapshots: react: 18.3.1 react-dom: 18.2.0(react@18.3.1) shelljs: 0.8.5 - socket.io: 4.7.3 - socket.io-client: 4.7.3 + socket.io: 4.7.3(bufferutil@4.0.9) + socket.io-client: 4.7.3(bufferutil@4.0.9) sonner: 1.3.1(react-dom@18.2.0(react@18.3.1))(react@18.3.1) source-map-js: 1.0.2 stacktrace-parser: 0.1.10 @@ -41340,7 +41341,7 @@ snapshots: - supports-color - utf-8-validate - socket.io-client@4.7.3: + socket.io-client@4.7.3(bufferutil@4.0.9): dependencies: '@socket.io/component-emitter': 3.1.0 debug: 4.3.7(supports-color@10.0.0) @@ -41369,7 +41370,7 @@ snapshots: transitivePeerDependencies: - supports-color - socket.io@4.7.3: + socket.io@4.7.3(bufferutil@4.0.9): dependencies: accepts: 1.3.8 base64id: 2.0.0 diff --git a/references/ai-chat/src/trigger/chat.ts b/references/ai-chat/src/trigger/chat.ts index 3cb32a16a43..ed93042820a 100644 --- a/references/ai-chat/src/trigger/chat.ts +++ b/references/ai-chat/src/trigger/chat.ts @@ -1,5 +1,5 @@ import { chat, ai, type ChatTaskWirePayload } from "@trigger.dev/sdk/ai"; -import { schemaTask, task } from "@trigger.dev/sdk"; +import { logger, schemaTask, task } from "@trigger.dev/sdk"; import { streamText, tool, dynamicTool, stepCountIs, generateId } from "ai"; import type { LanguageModel, Tool as AITool, UIMessage } from "ai"; import { openai } from "@ai-sdk/openai"; @@ -231,6 +231,18 @@ export const aiChat = chat.task({ clientDataSchema: z.object({ model: z.string().optional(), userId: z.string() }), warmTimeoutInSeconds: 60, chatAccessTokenTTL: "2h", + uiMessageStreamOptions: { + sendReasoning: true, + onError: (error) => { + // Log the full error server-side for debugging + logger.error("Stream error", { error }); + // Return a sanitized message — this is what the frontend sees + if (error instanceof Error && error.message.includes("rate limit")) { + return "Rate limited — please wait a moment and try again."; + } + return "Something went wrong. Please try again."; + }, + }, onPreload: async ({ chatId, runId, chatAccessToken, clientData }) => { if (!clientData) return; // Eagerly initialize before the user's first message arrives