diff --git a/.changeset/cjs-output-and-json-response.md b/.changeset/cjs-output-and-json-response.md new file mode 100644 index 000000000..b6af3a79f --- /dev/null +++ b/.changeset/cjs-output-and-json-response.md @@ -0,0 +1,14 @@ +--- +'@tanstack/ai': minor +'@tanstack/ai-client': minor +'@tanstack/ai-event-client': patch +'@tanstack/ai-react': patch +'@tanstack/ai-preact': patch +'@tanstack/ai-solid': patch +'@tanstack/ai-vue': patch +'@tanstack/ai-svelte': patch +--- + +**Dual ESM + CJS output.** `@tanstack/ai`, `@tanstack/ai-client`, and `@tanstack/ai-event-client` now ship both ESM and CJS builds with type-aware dual `exports` maps (`import` → `./dist/esm/*.js`, `require` → `./dist/cjs/*.cjs`), plus a `main` field pointing at CJS. Fixes Metro / Expo / CJS-only resolvers that previously couldn't find `@tanstack/ai/adapters` or `@tanstack/ai-client` because the packages were ESM-only (#308). + +**New `toJSONResponse(stream, init?)` on `@tanstack/ai`.** Drains the chat stream fully and returns a JSON-array `Response` with `Content-Type: application/json`. Use on server runtimes that can't emit `ReadableStream` responses (Expo's `@expo/server`, some edge proxies). Pair with the new `fetchJSON(url, options?)` connection adapter on `@tanstack/ai-client` — it fetches the array and replays each chunk into the normal `ChatClient` pipeline. Trade-off: no incremental rendering (every chunk arrives at once when the request resolves). Closes #309. diff --git a/docs/api/ai-client.md b/docs/api/ai-client.md index 379e58589..87ed954ae 100644 --- a/docs/api/ai-client.md +++ b/docs/api/ai-client.md @@ -166,6 +166,22 @@ import { fetchHttpStream } from "@tanstack/ai-client"; const adapter = fetchHttpStream("/api/chat"); ``` +### `fetchJSON(url, options?)` + +Creates a connection adapter for non-streaming runtimes — pair with [`toJSONResponse`](./ai#tojsonresponsestream-init) on the server. The adapter POSTs `{ messages, data }`, expects a `StreamChunk[]` JSON body, and replays each chunk into the normal `ChatClient` pipeline. + +```typescript +import { fetchJSON } from "@tanstack/ai-client"; + +const adapter = fetchJSON("/api/chat", { + headers: { + Authorization: "Bearer token", + }, +}); +``` + +Use this on Expo / React Native / edge proxies that can't emit `ReadableStream` responses. Trade-off: no incremental rendering — the UI sees every chunk at once when the request resolves. Full walkthrough: [React Native & Expo](../chat/non-streaming-runtimes). + ### `stream(connectFn)` Creates a custom connection adapter. diff --git a/docs/api/ai.md b/docs/api/ai.md index da0970d14..b76184107 100644 --- a/docs/api/ai.md +++ b/docs/api/ai.md @@ -191,6 +191,32 @@ return toServerSentEventsResponse(stream); A `Response` object suitable for HTTP endpoints with SSE headers (`Content-Type: text/event-stream`, `Cache-Control: no-cache`, `Connection: keep-alive`). +## `toJSONResponse(stream, init?)` + +Drains the whole stream, then returns a JSON-array `Response` containing every `StreamChunk`. For runtimes that can't emit `ReadableStream` bodies (Expo's `@expo/server`, some edge proxies). Pair with [`fetchJSON`](./ai-client#fetchjsonurl-options) on the client. + +```typescript +import { chat, toJSONResponse } from "@tanstack/ai"; +import { openaiText } from "@tanstack/ai-openai"; + +const stream = chat({ + adapter: openaiText("gpt-5.2"), + messages: [...], +}); +return toJSONResponse(stream); +``` + +### Parameters + +- `stream` - Async iterable of `StreamChunk` +- `init?` - Optional ResponseInit options (including `abortController`). Caller-provided headers are preserved; `Content-Type` defaults to `application/json`. + +### Returns + +A `Promise` with the stringified `StreamChunk[]` as the body. If the upstream stream throws mid-drain, a provided `abortController` is aborted and the error propagates. + +> **Trade-off:** no incremental rendering — the UI sees every chunk at once when the request resolves. Use SSE / HTTP-stream responses when the runtime supports them. See [React Native & Expo](../chat/non-streaming-runtimes) for the full walkthrough. + ## `maxIterations(count)` Creates an agent loop strategy that limits iterations. diff --git a/docs/chat/connection-adapters.md b/docs/chat/connection-adapters.md index 0c4460b2b..246b2bc1f 100644 --- a/docs/chat/connection-adapters.md +++ b/docs/chat/connection-adapters.md @@ -81,6 +81,21 @@ const { messages } = useChat({ }); ``` +### JSON Array (non-streaming runtimes) + +For runtimes that can't emit `ReadableStream` responses — Expo / React Native, some edge proxies, certain legacy serverless runtimes — pair `fetchJSON` on the client with [`toJSONResponse`](../api/ai#tojsonresponsestream-init) on the server: + +```typescript +import { useChat } from "@tanstack/ai-react"; +import { fetchJSON } from "@tanstack/ai-client"; + +const { messages } = useChat({ + connection: fetchJSON("/api/chat"), +}); +``` + +The server drains the whole chat stream before responding, and this adapter replays each chunk into the normal `ChatClient` pipeline. Trade-off: no incremental rendering — the UI sees every chunk at once when the request resolves. See [React Native & Expo](./non-streaming-runtimes) for the full walkthrough, or run the [`/tanchat-json` route](https://github.com/TanStack/ai/tree/main/examples/ts-react-chat) in the React example to see it side-by-side with the streaming path. + ## Custom Adapters For specialized use cases, you can create custom adapters to meet specific protocols or requirements: diff --git a/docs/chat/non-streaming-runtimes.md b/docs/chat/non-streaming-runtimes.md new file mode 100644 index 000000000..22d9fe9e3 --- /dev/null +++ b/docs/chat/non-streaming-runtimes.md @@ -0,0 +1,119 @@ +--- +title: React Native & Expo +id: non-streaming-runtimes +order: 4 +description: "Run TanStack AI on React Native, Expo, and other runtimes that can't emit ReadableStream responses — using toJSONResponse on the server and fetchJSON on the client." +keywords: + - tanstack ai + - react native + - expo + - expo router + - metro bundler + - non-streaming + - toJSONResponse + - fetchJSON + - edge runtime +--- + +You have a React Native or Expo app and you want to add AI chat, but the usual `toServerSentEventsResponse()` helper crashes on Expo's server runtime with: + +``` +TypeError: Cannot read properties of undefined (reading 'statusText') +``` + +…and Metro refuses to resolve `@tanstack/ai/adapters` at all. By the end of this guide, you'll have a working chat flow on Expo/React Native using a JSON-array fallback path. The same approach works for any deployment target that can't stream `ReadableStream` responses (some edge proxies, legacy serverless runtimes, etc.). + +> **Want to see it working before you swap your own app?** The [ts-react-chat example](https://github.com/TanStack/ai/tree/main/examples/ts-react-chat) ships a `/tanchat-json` route that uses this exact pair (`toJSONResponse` on the server, `fetchJSON` on the client). Run `pnpm dev` from `examples/ts-react-chat` and open `/tanchat-json` to compare it against the streaming `/` route side-by-side. + +## What's actually going wrong + +Two separate problems show up on React Native / Expo: + +1. **Module resolution.** `@tanstack/ai` and `@tanstack/ai-client` ship dual ESM + CJS builds with `main`/`module`/`exports` all wired up. If your version is new enough, Metro resolves them out of the box. If you're stuck on an older version, upgrade — older releases were ESM-only and Metro can't consume them. + +2. **Response shape.** Expo's `@expo/server` runtime (and a few edge proxies) can't emit a `ReadableStream` body, which is what `toServerSentEventsResponse` and `toHttpResponse` return. The request silently fails on the client side and `isLoading` flips back to `false` immediately. + +The fix for (2) is to drain the chat stream on the server, send the collected chunks as a single JSON array, and replay them on the client. You lose incremental rendering — the UI sees every chunk at once when the request resolves — but every other piece of the chat pipeline keeps working as-is. + +## Step 1: Return a JSON-array response on the server + +Swap `toServerSentEventsResponse` for `toJSONResponse` in your API route. On Expo Router: + +```typescript +// app/api/chat+api.ts +import { chat, toJSONResponse } from "@tanstack/ai"; +import { openaiText } from "@tanstack/ai-openai"; + +export async function POST(request: Request) { + const { messages } = await request.json(); + + const stream = chat({ + adapter: openaiText("gpt-5.2"), + messages, + }); + + return toJSONResponse(stream); +} +``` + +`toJSONResponse` iterates the whole stream, collects each `StreamChunk` into an array, and returns a plain `Response` with `Content-Type: application/json`. It accepts the same `init` options as `toServerSentEventsResponse` (including `abortController`) and honours any `Content-Type` you pass in `headers`. + +## Step 2: Use `fetchJSON` as the connection adapter on the client + +Swap `fetchServerSentEvents` for `fetchJSON` in your `useChat` call: + +```typescript +import { useChat } from "@tanstack/ai-react"; +import { fetchJSON } from "@tanstack/ai-client"; + +export function ChatScreen() { + const { messages, sendMessage, isLoading } = useChat({ + connection: fetchJSON("/api/chat"), + }); + + // messages and isLoading behave identically to the streaming path — + // they just update all at once when the request resolves. + return ; +} +``` + +`fetchJSON` accepts the same `url` + `options` signature as the other connection adapters (static string or function, headers, credentials, custom `fetchClient`, extra body, abort signal). It POSTs the usual `{ messages, data }` body, decodes the response as a `StreamChunk[]`, and replays each chunk into the normal `ChatClient` pipeline — tool calls, approvals, thinking content, errors all behave the same way they do with SSE. + +## Step 3: Expect no incremental rendering + +The one thing you give up: the UI won't update character-by-character. The request hangs until the server finishes the whole run, then the full message — including tool calls, results, and the final assistant turn — appears at once. + +If this becomes a problem, the answer is to move to a runtime that supports streaming responses (Hono on Node, Next.js, TanStack Start, a real SSE endpoint proxied through a CDN that doesn't buffer) rather than to work around the limitation further. The JSON-array path is a pragmatic escape hatch, not the intended happy path. + +## Troubleshooting + +`fetchJSON` surfaces upstream failures with enough context to skip a debugger trip — match the error string against the cases below. + +**`fetchJSON: failed to parse response body as JSON from /api/chat (status 502): …`** + +The server returned a non-JSON body — usually an HTML gateway error page from a proxy in front of your handler (Cloudflare, a Vercel edge buffer, an API gateway that intercepted before your route ran). The cause is upstream of TanStack AI; check the proxy logs or hit the URL directly with `curl` to see what's actually being returned. + +**`HTTP error! status: 429 Too Many Requests — {"error":{"type":"rate_limit_error",…}}`** + +The body snippet (truncated to 500 chars) is the raw response from your server route. For provider-relayed errors like this rate-limit JSON, the snippet preserves the upstream `type` / `message` fields — surface them in your UI rather than showing a generic "request failed". + +**`fetchJSON: expected response body to be a JSON array of StreamChunks. Did you forget to use \`toJSONResponse(stream)\` on the server?`** + +The route returned valid JSON, but not an array. Almost always a server-side mistake: returning `Response.json({ messages: [...] })` or similar instead of `toJSONResponse(stream)`. Check the API route matches the [Step 1 example](#step-1-return-a-json-array-response-on-the-server). + +**The request hangs forever and `isLoading` stays `true`.** + +The server is buffering the response and never flushing. If the runtime supports streaming at all, switch to `toServerSentEventsResponse` — it'll fail loudly instead of silently buffering. If buffering is unavoidable (Expo, sandboxed previews), confirm the server route is actually reaching `toJSONResponse` and not crashing earlier; check server logs. + +If you abort the request from the client (e.g. the user navigates away), `fetchJSON` honours the abort signal and stops yielding chunks even after the response has been received. No extra cleanup is needed in `useChat` consumers. + +## Going back to streaming when you can + +If you later deploy your server code to a runtime that *does* support streaming, you only need to change two call sites — `toJSONResponse` → `toServerSentEventsResponse` and `fetchJSON` → `fetchServerSentEvents`. Everything downstream (messages, tool calls, approvals, `useChat` state, error handling) is identical between the two paths, so there's no cleanup to chase through the app. + +## Next Steps + +- [Streaming](./streaming) — the normal incremental-rendering path +- [Connection Adapters](./connection-adapters) — full list of client-side adapters, including `fetchJSON` +- [API Reference: `toJSONResponse`](../api/ai#tojsonresponsestream-init) — server-side helper reference +- [API Reference: `fetchJSON`](../api/ai-client#fetchjsonurl-options) — client-side adapter reference diff --git a/docs/chat/streaming.md b/docs/chat/streaming.md index a11bd2ca2..5a9a2afad 100644 --- a/docs/chat/streaming.md +++ b/docs/chat/streaming.md @@ -55,6 +55,8 @@ export async function POST(request: Request) { } ``` +> **Running on Expo, React Native, or another runtime that can't emit `ReadableStream` responses?** See [React Native & Expo](./non-streaming-runtimes) for the `toJSONResponse` + `fetchJSON` fallback pair. + ## Client-Side Streaming The `useChat` hook automatically handles streaming: diff --git a/docs/config.json b/docs/config.json index 89d4f5abc..e9d9fb42e 100644 --- a/docs/config.json +++ b/docs/config.json @@ -96,6 +96,10 @@ "label": "Connection Adapters", "to": "chat/connection-adapters" }, + { + "label": "React Native & Expo", + "to": "chat/non-streaming-runtimes" + }, { "label": "Structured Outputs", "to": "chat/structured-outputs" diff --git a/examples/ts-react-chat/src/routeTree.gen.ts b/examples/ts-react-chat/src/routeTree.gen.ts index f9b2ac825..15623f631 100644 --- a/examples/ts-react-chat/src/routeTree.gen.ts +++ b/examples/ts-react-chat/src/routeTree.gen.ts @@ -9,6 +9,7 @@ // Additionally, you should also exclude this file from your linter and/or formatter to prevent it from being checked or modified. import { Route as rootRouteImport } from './routes/__root' +import { Route as TanchatJsonRouteImport } from './routes/tanchat-json' import { Route as RealtimeRouteImport } from './routes/realtime' import { Route as ImageGenRouteImport } from './routes/image-gen' import { Route as IndexRouteImport } from './routes/index' @@ -20,6 +21,7 @@ import { Route as GenerationsSpeechRouteImport } from './routes/generations.spee import { Route as GenerationsImageRouteImport } from './routes/generations.image' import { Route as GenerationsAudioRouteImport } from './routes/generations.audio' import { Route as ApiTranscribeRouteImport } from './routes/api.transcribe' +import { Route as ApiTanchatJsonRouteImport } from './routes/api.tanchat-json' import { Route as ApiTanchatRouteImport } from './routes/api.tanchat' import { Route as ApiSummarizeRouteImport } from './routes/api.summarize' import { Route as ApiStructuredOutputRouteImport } from './routes/api.structured-output' @@ -31,6 +33,11 @@ import { Route as ApiGenerateSpeechRouteImport } from './routes/api.generate.spe import { Route as ApiGenerateImageRouteImport } from './routes/api.generate.image' import { Route as ApiGenerateAudioRouteImport } from './routes/api.generate.audio' +const TanchatJsonRoute = TanchatJsonRouteImport.update({ + id: '/tanchat-json', + path: '/tanchat-json', + getParentRoute: () => rootRouteImport, +} as any) const RealtimeRoute = RealtimeRouteImport.update({ id: '/realtime', path: '/realtime', @@ -88,6 +95,11 @@ const ApiTranscribeRoute = ApiTranscribeRouteImport.update({ path: '/api/transcribe', getParentRoute: () => rootRouteImport, } as any) +const ApiTanchatJsonRoute = ApiTanchatJsonRouteImport.update({ + id: '/api/tanchat-json', + path: '/api/tanchat-json', + getParentRoute: () => rootRouteImport, +} as any) const ApiTanchatRoute = ApiTanchatRouteImport.update({ id: '/api/tanchat', path: '/api/tanchat', @@ -143,10 +155,12 @@ export interface FileRoutesByFullPath { '/': typeof IndexRoute '/image-gen': typeof ImageGenRoute '/realtime': typeof RealtimeRoute + '/tanchat-json': typeof TanchatJsonRoute '/api/image-gen': typeof ApiImageGenRoute '/api/structured-output': typeof ApiStructuredOutputRoute '/api/summarize': typeof ApiSummarizeRoute '/api/tanchat': typeof ApiTanchatRoute + '/api/tanchat-json': typeof ApiTanchatJsonRoute '/api/transcribe': typeof ApiTranscribeRoute '/generations/audio': typeof GenerationsAudioRoute '/generations/image': typeof GenerationsImageRoute @@ -166,10 +180,12 @@ export interface FileRoutesByTo { '/': typeof IndexRoute '/image-gen': typeof ImageGenRoute '/realtime': typeof RealtimeRoute + '/tanchat-json': typeof TanchatJsonRoute '/api/image-gen': typeof ApiImageGenRoute '/api/structured-output': typeof ApiStructuredOutputRoute '/api/summarize': typeof ApiSummarizeRoute '/api/tanchat': typeof ApiTanchatRoute + '/api/tanchat-json': typeof ApiTanchatJsonRoute '/api/transcribe': typeof ApiTranscribeRoute '/generations/audio': typeof GenerationsAudioRoute '/generations/image': typeof GenerationsImageRoute @@ -190,10 +206,12 @@ export interface FileRoutesById { '/': typeof IndexRoute '/image-gen': typeof ImageGenRoute '/realtime': typeof RealtimeRoute + '/tanchat-json': typeof TanchatJsonRoute '/api/image-gen': typeof ApiImageGenRoute '/api/structured-output': typeof ApiStructuredOutputRoute '/api/summarize': typeof ApiSummarizeRoute '/api/tanchat': typeof ApiTanchatRoute + '/api/tanchat-json': typeof ApiTanchatJsonRoute '/api/transcribe': typeof ApiTranscribeRoute '/generations/audio': typeof GenerationsAudioRoute '/generations/image': typeof GenerationsImageRoute @@ -215,10 +233,12 @@ export interface FileRouteTypes { | '/' | '/image-gen' | '/realtime' + | '/tanchat-json' | '/api/image-gen' | '/api/structured-output' | '/api/summarize' | '/api/tanchat' + | '/api/tanchat-json' | '/api/transcribe' | '/generations/audio' | '/generations/image' @@ -238,10 +258,12 @@ export interface FileRouteTypes { | '/' | '/image-gen' | '/realtime' + | '/tanchat-json' | '/api/image-gen' | '/api/structured-output' | '/api/summarize' | '/api/tanchat' + | '/api/tanchat-json' | '/api/transcribe' | '/generations/audio' | '/generations/image' @@ -261,10 +283,12 @@ export interface FileRouteTypes { | '/' | '/image-gen' | '/realtime' + | '/tanchat-json' | '/api/image-gen' | '/api/structured-output' | '/api/summarize' | '/api/tanchat' + | '/api/tanchat-json' | '/api/transcribe' | '/generations/audio' | '/generations/image' @@ -285,10 +309,12 @@ export interface RootRouteChildren { IndexRoute: typeof IndexRoute ImageGenRoute: typeof ImageGenRoute RealtimeRoute: typeof RealtimeRoute + TanchatJsonRoute: typeof TanchatJsonRoute ApiImageGenRoute: typeof ApiImageGenRoute ApiStructuredOutputRoute: typeof ApiStructuredOutputRoute ApiSummarizeRoute: typeof ApiSummarizeRoute ApiTanchatRoute: typeof ApiTanchatRoute + ApiTanchatJsonRoute: typeof ApiTanchatJsonRoute ApiTranscribeRoute: typeof ApiTranscribeRoute GenerationsAudioRoute: typeof GenerationsAudioRoute GenerationsImageRoute: typeof GenerationsImageRoute @@ -307,6 +333,13 @@ export interface RootRouteChildren { declare module '@tanstack/react-router' { interface FileRoutesByPath { + '/tanchat-json': { + id: '/tanchat-json' + path: '/tanchat-json' + fullPath: '/tanchat-json' + preLoaderRoute: typeof TanchatJsonRouteImport + parentRoute: typeof rootRouteImport + } '/realtime': { id: '/realtime' path: '/realtime' @@ -384,6 +417,13 @@ declare module '@tanstack/react-router' { preLoaderRoute: typeof ApiTranscribeRouteImport parentRoute: typeof rootRouteImport } + '/api/tanchat-json': { + id: '/api/tanchat-json' + path: '/api/tanchat-json' + fullPath: '/api/tanchat-json' + preLoaderRoute: typeof ApiTanchatJsonRouteImport + parentRoute: typeof rootRouteImport + } '/api/tanchat': { id: '/api/tanchat' path: '/api/tanchat' @@ -461,10 +501,12 @@ const rootRouteChildren: RootRouteChildren = { IndexRoute: IndexRoute, ImageGenRoute: ImageGenRoute, RealtimeRoute: RealtimeRoute, + TanchatJsonRoute: TanchatJsonRoute, ApiImageGenRoute: ApiImageGenRoute, ApiStructuredOutputRoute: ApiStructuredOutputRoute, ApiSummarizeRoute: ApiSummarizeRoute, ApiTanchatRoute: ApiTanchatRoute, + ApiTanchatJsonRoute: ApiTanchatJsonRoute, ApiTranscribeRoute: ApiTranscribeRoute, GenerationsAudioRoute: GenerationsAudioRoute, GenerationsImageRoute: GenerationsImageRoute, diff --git a/examples/ts-react-chat/src/routes/api.tanchat-json.ts b/examples/ts-react-chat/src/routes/api.tanchat-json.ts new file mode 100644 index 000000000..0a569fed3 --- /dev/null +++ b/examples/ts-react-chat/src/routes/api.tanchat-json.ts @@ -0,0 +1,163 @@ +import { createFileRoute } from '@tanstack/react-router' +import { + chat, + createChatOptions, + maxIterations, + toJSONResponse, +} from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai' +import { ollamaText } from '@tanstack/ai-ollama' +import { anthropicText } from '@tanstack/ai-anthropic' +import { geminiText } from '@tanstack/ai-gemini' +import { openRouterText } from '@tanstack/ai-openrouter' +import { grokText } from '@tanstack/ai-grok' +import { groqText } from '@tanstack/ai-groq' +import type { AnyTextAdapter } from '@tanstack/ai' +import { + addToCartToolDef, + addToWishListToolDef, + calculateFinancing, + compareGuitars, + getGuitars, + getPersonalGuitarPreferenceToolDef, + recommendGuitarToolDef, + searchGuitars, +} from '@/lib/guitar-tools' + +// Companion to /api/tanchat that returns the full chat as a single JSON +// array via toJSONResponse(stream). Use this when the target runtime can't +// emit a streaming Response — e.g. Expo's @expo/server, certain Cloudflare +// or edge proxy setups. Pair with fetchJSON('/api/tanchat-json') on the +// client. Trade-off: the UI sees nothing until the request resolves. + +type Provider = + | 'openai' + | 'anthropic' + | 'gemini' + | 'ollama' + | 'grok' + | 'groq' + | 'openrouter' + +const SYSTEM_PROMPT = `You are a helpful assistant for a guitar store. + +When a user asks for a guitar recommendation: +1. FIRST: Use the getGuitars tool (no parameters needed) +2. SECOND: Use the recommendGuitar tool with the ID of the guitar you want to recommend +3. NEVER write a recommendation directly — ALWAYS use the recommendGuitar tool +` + +const addToCartToolServer = addToCartToolDef.server((args) => ({ + success: true, + cartId: 'CART_' + Date.now(), + guitarId: args.guitarId, + quantity: args.quantity, + totalItems: args.quantity, +})) + +export const Route = createFileRoute('/api/tanchat-json')({ + server: { + handlers: { + POST: async ({ request }) => { + if (request.signal.aborted) { + return new Response(null, { status: 499 }) + } + + const abortController = new AbortController() + const body = await request.json() + const { messages, data } = body + + const provider: Provider = data?.provider || 'openai' + const model: string = data?.model || 'gpt-4o' + const conversationId: string | undefined = data?.conversationId + + const adapterConfig: Record< + Provider, + () => { adapter: AnyTextAdapter } + > = { + anthropic: () => + createChatOptions({ + adapter: anthropicText( + (model || 'claude-sonnet-4-5') as 'claude-sonnet-4-5', + ), + }), + openrouter: () => + createChatOptions({ + adapter: openRouterText('openai/gpt-5.1'), + }), + gemini: () => + createChatOptions({ + adapter: geminiText( + (model || 'gemini-2.5-flash') as 'gemini-2.5-flash', + ), + }), + grok: () => + createChatOptions({ + adapter: grokText((model || 'grok-3') as 'grok-3'), + }), + groq: () => + createChatOptions({ + adapter: groqText( + (model || + 'llama-3.3-70b-versatile') as 'llama-3.3-70b-versatile', + ), + }), + ollama: () => + createChatOptions({ + adapter: ollamaText((model || 'gpt-oss:120b') as 'gpt-oss:120b'), + }), + openai: () => + createChatOptions({ + adapter: openaiText((model || 'gpt-4o') as 'gpt-4o'), + }), + } + + try { + const options = adapterConfig[provider]() + + const stream = chat({ + ...options, + tools: [ + getGuitars, + recommendGuitarToolDef, + addToCartToolServer, + addToWishListToolDef, + getPersonalGuitarPreferenceToolDef, + compareGuitars, + calculateFinancing, + searchGuitars, + ], + systemPrompts: [SYSTEM_PROMPT], + agentLoopStrategy: maxIterations(20), + messages, + abortController, + conversationId, + }) + + // The only difference from /api/tanchat: the entire stream is + // drained and serialised as a JSON array instead of an SSE stream. + return toJSONResponse(stream, { abortController }) + } catch (error: any) { + console.error('[api.tanchat-json] Error in chat request:', { + message: error?.message, + name: error?.name, + status: error?.status, + stack: error?.stack, + }) + if (error.name === 'AbortError' || abortController.signal.aborted) { + return new Response(null, { status: 499 }) + } + return new Response( + JSON.stringify({ + error: error.message || 'An error occurred', + }), + { + status: 500, + headers: { 'Content-Type': 'application/json' }, + }, + ) + } + }, + }, + }, +}) diff --git a/examples/ts-react-chat/src/routes/index.tsx b/examples/ts-react-chat/src/routes/index.tsx index c91dbd746..789510b92 100644 --- a/examples/ts-react-chat/src/routes/index.tsx +++ b/examples/ts-react-chat/src/routes/index.tsx @@ -512,6 +512,13 @@ function ChatPage() { ))} + + JSON mode + }) { + if (messages.length === 0) { + return ( +
+
+

+ Send a message — the response will arrive in one shot when the + server finishes draining the stream. +

+
+
+ ) + } + + return ( +
+ {messages.map((message) => ( +
+
+
+ {message.role === 'assistant' ? 'AI' : 'U'} +
+
+ {message.parts.map((part, index) => { + if (part.type === 'text' && part.content) { + return ( + {part.content} + ) + } + return null + })} +
+
+
+ ))} +
+ ) +} + +function TanChatJsonPage() { + const [input, setInput] = useState('') + const textareaRef = useRef(null) + + const { messages, sendMessage, isLoading, error, stop } = useChat({ + connection: fetchJSON('/api/tanchat-json'), + body: { provider: 'openai', model: 'gpt-4o' }, + }) + + const handleSend = () => { + if (!input.trim()) return + sendMessage(input.trim()) + setInput('') + if (textareaRef.current) textareaRef.current.style.height = 'auto' + } + + return ( +
+
+
+
+

+ Non-streaming chat (toJSONResponse / fetchJSON) +

+

+ Server drains the chat stream and returns it as a single JSON + array. Use this on runtimes that can't emit ReadableStream + responses (e.g. Expo). UI sees everything at once. +

+
+ + Streaming demo → + +
+ + + + {error && ( +
+ {error.message} +
+ )} + +
+
+ {isLoading && ( +
+ +
+ )} + +
+
+