|
1 | | -import { generateText } from 'ai' |
| 1 | +import { streamText } from 'ai' |
2 | 2 | import { RecipeContext } from '../types' |
3 | 3 | import { createOpenAI } from '@ai-sdk/openai' |
4 | 4 |
|
5 | 5 | /* |
6 | | - * The captioning API mirrors our multi-turn chat route but returns a single |
7 | | - * string instead of a streaming response. Because Modular MAX speaks the |
8 | | - * OpenAI-compatible protocol, the Vercel AI SDK can works with Modular MAX |
9 | | - * out of the box. |
| 6 | + * Image Captioning API with NDJSON Streaming and Performance Metrics |
| 7 | + * |
| 8 | + * This API demonstrates progressive response streaming using NDJSON (newline-delimited JSON). |
| 9 | + * Instead of waiting for all captions to complete, we stream each result as it's generated, |
| 10 | + * providing immediate feedback to users along with detailed performance metrics. |
| 11 | + * |
| 12 | + * Key concepts: |
| 13 | + * - NDJSON format: One JSON object per line, easy to parse progressively |
| 14 | + * - Parallel processing: All images caption simultaneously for speed |
| 15 | + * - Stream-as-you-go: Results appear in the UI the moment they're ready |
| 16 | + * - Performance tracking: TTFT (time to first token) and duration (generation time) per image |
| 17 | + * - OpenAI-compatible: Works with Modular MAX or any OpenAI-compatible server |
| 18 | + * |
| 19 | + * Timing metrics explained: |
| 20 | + * - TTFT: Time from request start to first token (measures latency) |
| 21 | + * - Duration: Time from first token to completion (measures generation speed) |
10 | 22 | */ |
11 | 23 |
|
12 | | -// ============================================================================ |
13 | | -// POST /api — generates an image caption |
14 | | -// ============================================================================ |
15 | 24 | export default async function POST(req: Request, context: RecipeContext) { |
16 | 25 | const { apiKey, baseUrl, modelName } = context |
17 | | - const { messages } = await req.json() |
18 | | - if (!messages) { |
19 | | - return new Response('Client did not provide messages', { status: 400 }) |
| 26 | + const body = await req.json() |
| 27 | + |
| 28 | + const isBatch = Array.isArray(body.batch) |
| 29 | + |
| 30 | + if (!isBatch && !body.messages) { |
| 31 | + return new Response('Client did not provide messages or batch', { status: 400 }) |
20 | 32 | } |
21 | 33 |
|
22 | | - // Use the the Vercel AI SDK to connect to the MAX endpoint |
23 | 34 | try { |
24 | | - // createOpenAI returns an OpenAI-compatible client |
| 35 | + // The Vercel AI SDK's createOpenAI works with any OpenAI-compatible endpoint |
25 | 36 | const client = createOpenAI({ baseURL: baseUrl, apiKey }) |
26 | | - |
27 | | - // chat(modelName) works with LLM servers like MAX that |
28 | | - // implement the chat-completions format |
29 | 37 | const model = client.chat(modelName) |
30 | 38 |
|
31 | | - // Finally, we call generateText to get a caption for our images |
32 | | - const { text } = await generateText({ |
33 | | - // The recipe UI creates messages in the ModelMessage format, |
34 | | - // so converting from UIMessage to ModelMessage is unnecessary |
35 | | - model: model, |
36 | | - messages: messages, |
37 | | - }) |
| 39 | + if (isBatch) { |
| 40 | + // NDJSON streaming: send results progressively as they complete |
| 41 | + const encoder = new TextEncoder() |
| 42 | + const stream = new ReadableStream({ |
| 43 | + async start(controller) { |
| 44 | + try { |
| 45 | + // Process all images in parallel using Promise.all |
| 46 | + // As each caption completes, we immediately stream it to the client |
| 47 | + await Promise.all( |
| 48 | + body.batch.map(async (item: { imageId: string; messages: any }) => { |
| 49 | + try { |
| 50 | + const startTime = Date.now() |
| 51 | + let firstTokenTime: number | null = null |
| 52 | + let ttft: number | null = null |
| 53 | + let textChunks: string[] = [] |
| 54 | + |
| 55 | + // Use streamText (not generateText) to capture timing metrics |
| 56 | + const result = streamText({ |
| 57 | + model: model, |
| 58 | + messages: item.messages, |
| 59 | + }) |
| 60 | + |
| 61 | + // Consume the stream chunk-by-chunk to collect text and timing |
| 62 | + for await (const chunk of result.textStream) { |
| 63 | + // Capture TTFT: time from request start to first token |
| 64 | + if (ttft === null) { |
| 65 | + firstTokenTime = Date.now() |
| 66 | + ttft = firstTokenTime - startTime |
| 67 | + } |
| 68 | + textChunks.push(chunk) |
| 69 | + } |
| 70 | + |
| 71 | + // Duration: time from first token to completion (not total time) |
| 72 | + const duration = firstTokenTime ? Date.now() - firstTokenTime : null |
| 73 | + const text = textChunks.join('') |
| 74 | + |
| 75 | + // Stream result as NDJSON: one JSON object per line with metrics |
| 76 | + const line = JSON.stringify({ |
| 77 | + imageId: item.imageId, |
| 78 | + text, |
| 79 | + ttft, |
| 80 | + duration |
| 81 | + }) + '\n' |
| 82 | + controller.enqueue(encoder.encode(line)) |
| 83 | + } catch (error) { |
| 84 | + // Send errors per-image so UI can show partial results |
| 85 | + const errorMessage = error instanceof Error ? error.message : 'Unknown error' |
| 86 | + const line = JSON.stringify({ |
| 87 | + imageId: item.imageId, |
| 88 | + error: errorMessage |
| 89 | + }) + '\n' |
| 90 | + controller.enqueue(encoder.encode(line)) |
| 91 | + } |
| 92 | + }) |
| 93 | + ) |
| 94 | + |
| 95 | + controller.close() |
| 96 | + } catch (error) { |
| 97 | + controller.error(error) |
| 98 | + } |
| 99 | + }, |
| 100 | + }) |
| 101 | + |
| 102 | + return new Response(stream, { |
| 103 | + headers: { |
| 104 | + 'Content-Type': 'application/x-ndjson', |
| 105 | + }, |
| 106 | + }) |
| 107 | + } else { |
| 108 | + // Single caption request: stream and collect text |
| 109 | + const result = streamText({ |
| 110 | + model: model, |
| 111 | + messages: body.messages, |
| 112 | + }) |
| 113 | + |
| 114 | + let textChunks: string[] = [] |
| 115 | + for await (const chunk of result.textStream) { |
| 116 | + textChunks.push(chunk) |
| 117 | + } |
38 | 118 |
|
39 | | - return Response.json({ text }) |
| 119 | + return Response.json({ text: textChunks.join('') }) |
| 120 | + } |
40 | 121 | } catch (error) { |
41 | 122 | const errorMessage = error instanceof Error ? `(${error.message})` : '' |
42 | 123 | return new Response(`Failed to generate caption ${errorMessage}`, { |
|
0 commit comments