diff --git a/content/cookbook/00-guides/20-sonnet-3-7.mdx b/content/cookbook/00-guides/20-sonnet-3-7.mdx index c055dec44ed0..9f62b1dd7e95 100644 --- a/content/cookbook/00-guides/20-sonnet-3-7.mdx +++ b/content/cookbook/00-guides/20-sonnet-3-7.mdx @@ -26,7 +26,7 @@ At the center of the AI SDK is [AI SDK Core](/docs/ai-sdk-core/overview), which import { anthropic } from '@ai-sdk/anthropic'; import { generateText } from 'ai'; -const { text, reasoning, reasoningDetails } = await generateText({ +const { text, reasoningText, reasoning } = await generateText({ model: anthropic('claude-3-7-sonnet-20250219'), prompt: 'How many people will live in the world in 2040?', }); @@ -53,7 +53,7 @@ Claude 3.7 Sonnet introduces a new extended thinking—the ability to solve comp import { anthropic, AnthropicProviderOptions } from '@ai-sdk/anthropic'; import { generateText } from 'ai'; -const { text, reasoning, reasoningDetails } = await generateText({ +const { text, reasoningText, reasoning } = await generateText({ model: anthropic('claude-3-7-sonnet-20250219'), prompt: 'How many people will live in the world in 2040?', providerOptions: { @@ -63,8 +63,8 @@ const { text, reasoning, reasoningDetails } = await generateText({ }, }); -console.log(reasoning); // reasoning text -console.log(reasoningDetails); // reasoning details including redacted reasoning +console.log(reasoningText); // reasoning text +console.log(reasoning); // reasoning details including redacted reasoning console.log(text); // text response ``` diff --git a/content/docs/07-reference/01-ai-sdk-core/01-generate-text.mdx b/content/docs/07-reference/01-ai-sdk-core/01-generate-text.mdx index 339060af3c90..eb35138d3cb4 100644 --- a/content/docs/07-reference/01-ai-sdk-core/01-generate-text.mdx +++ b/content/docs/07-reference/01-ai-sdk-core/01-generate-text.mdx @@ -824,6 +824,199 @@ To see `generateText` in action, check out [these examples](#examples). description: 'Optional metadata from the provider. The outer key is the provider name. The inner values are the metadata. Details depend on the provider.', }, +<<<<<<< HEAD +======= + { + name: 'text', + type: 'string', + description: 'The full text that has been generated.', + }, + { + name: 'reasoningText', + type: 'string | undefined', + description: + 'The reasoning text of the model (only available for some models).', + }, + { + name: 'reasoning', + type: 'Array', + description: + 'The reasoning details of the model (only available for some models).', + properties: [ + { + type: 'ReasoningDetail', + parameters: [ + { + name: 'type', + type: "'text'", + description: 'The type of the reasoning detail.', + }, + { + name: 'text', + type: 'string', + description: 'The text content (only for type "text").', + }, + { + name: 'signature', + type: 'string', + isOptional: true, + description: 'Optional signature (only for type "text").', + }, + ], + }, + { + type: 'ReasoningDetail', + parameters: [ + { + name: 'type', + type: "'redacted'", + description: 'The type of the reasoning detail.', + }, + { + name: 'data', + type: 'string', + description: + 'The redacted data content (only for type "redacted").', + }, + ], + }, + ], + }, + { + name: 'sources', + type: 'Array', + description: + 'Sources that have been used as input to generate the response. For multi-step generation, the sources are accumulated from all steps.', + properties: [ + { + type: 'Source', + parameters: [ + { + name: 'sourceType', + type: "'url'", + description: + 'A URL source. This is return by web search RAG models.', + }, + { + name: 'id', + type: 'string', + description: 'The ID of the source.', + }, + { + name: 'url', + type: 'string', + description: 'The URL of the source.', + }, + { + name: 'title', + type: 'string', + isOptional: true, + description: 'The title of the source.', + }, + { + name: 'providerMetadata', + type: 'SharedV2ProviderMetadata', + isOptional: true, + description: + 'Additional provider metadata for the source.', + }, + ], + }, + ], + }, + { + name: 'files', + type: 'Array', + description: 'Files that were generated in the final step.', + properties: [ + { + type: 'GeneratedFile', + parameters: [ + { + name: 'base64', + type: 'string', + description: 'File as a base64 encoded string.', + }, + { + name: 'uint8Array', + type: 'Uint8Array', + description: 'File as a Uint8Array.', + }, + { + name: 'mediaType', + type: 'string', + description: 'The IANA media type of the file.', + }, + ], + }, + ], + }, + { + name: 'toolCalls', + type: 'ToolCall[]', + description: 'The tool calls that have been executed.', + }, + { + name: 'toolResults', + type: 'ToolResult[]', + description: 'The tool results that have been generated.', + }, + { + name: 'warnings', + type: 'Warning[] | undefined', + description: + 'Warnings from the model provider (e.g. unsupported settings).', + }, + { + name: 'response', + type: 'Response', + isOptional: true, + description: 'Response metadata.', + properties: [ + { + type: 'Response', + parameters: [ + { + name: 'id', + type: 'string', + description: + 'The response identifier. The AI SDK uses the ID from the provider response when available, and generates an ID otherwise.', + }, + { + name: 'model', + type: 'string', + description: + 'The model that was used to generate the response. The AI SDK uses the response model from the provider response when available, and the model from the function call otherwise.', + }, + { + name: 'timestamp', + type: 'Date', + description: + 'The timestamp of the response. The AI SDK uses the response timestamp from the provider response when available, and creates a timestamp otherwise.', + }, + { + name: 'headers', + isOptional: true, + type: 'Record', + description: 'Optional response headers.', + }, + { + name: 'messages', + type: 'Array', + description: + 'The response messages that were generated during the call. It consists of an assistant message, potentially containing tool calls. When there are tool results, there is an additional tool message with the tool results that are available. If there are tools that do not have execute functions, they are not included in the tool results and need to be added separately.', + }, + ], + }, + ], + }, + { + name: 'steps', + type: 'Array', + description: + 'Response information for every step. You can use this to get information about intermediate steps, such as the tool calls or the response headers.', + }, +>>>>>>> e5a42f715 (fix(docs): update reasoning property names to match API 5.0 (#10966)) ], }, ], diff --git a/content/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx b/content/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx index 5789a891ad4b..7e0e8bd28ec2 100644 --- a/content/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx +++ b/content/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx @@ -1008,7 +1008,7 @@ To see `streamText` in action, check out [these examples](#examples). description: 'The full text that has been generated.', }, { - name: 'reasoning', + name: 'reasoningText', type: 'string | undefined', description: 'The reasoning text of the model (only available for some models).', @@ -1225,7 +1225,7 @@ To see `streamText` in action, check out [these examples](#examples). 'The reasoning text of the model (only available for some models).', }, { - name: 'reasoningDetails', + name: 'reasoning', type: 'Array', description: 'The reasoning details of the model (only available for some models).', diff --git a/content/providers/01-ai-sdk-providers/05-anthropic.mdx b/content/providers/01-ai-sdk-providers/05-anthropic.mdx index 8dc666bc77a9..f412343a29d5 100644 --- a/content/providers/01-ai-sdk-providers/05-anthropic.mdx +++ b/content/providers/01-ai-sdk-providers/05-anthropic.mdx @@ -225,7 +225,7 @@ and specifying a thinking budget in tokens. import { anthropic, AnthropicProviderOptions } from '@ai-sdk/anthropic'; import { generateText } from 'ai'; -const { text, reasoning, reasoningDetails } = await generateText({ +const { text, reasoningText, reasoning } = await generateText({ model: anthropic('claude-opus-4-20250514'), prompt: 'How many people will live in the world in 2040?', providerOptions: { @@ -235,8 +235,8 @@ const { text, reasoning, reasoningDetails } = await generateText({ }, }); -console.log(reasoning); // reasoning text -console.log(reasoningDetails); // reasoning details including redacted reasoning +console.log(reasoningText); // reasoning text +console.log(reasoning); // reasoning details including redacted reasoning console.log(text); // text response ``` diff --git a/content/providers/01-ai-sdk-providers/08-amazon-bedrock.mdx b/content/providers/01-ai-sdk-providers/08-amazon-bedrock.mdx index 1cf9564cafaa..eeb40f1b95a8 100644 --- a/content/providers/01-ai-sdk-providers/08-amazon-bedrock.mdx +++ b/content/providers/01-ai-sdk-providers/08-amazon-bedrock.mdx @@ -436,7 +436,7 @@ const anthropicResult = await generateText({ }, }); -console.log(anthropicResult.reasoning); // reasoning text +console.log(anthropicResult.reasoningText); // reasoning text console.log(anthropicResult.text); // text response // Nova 2 example @@ -450,7 +450,7 @@ const amazonResult = await generateText({ }, }); -console.log(amazonResult.reasoning); // reasoning text +console.log(amazonResult.reasoningText); // reasoning text console.log(amazonResult.text); // text response ``` diff --git a/content/providers/01-ai-sdk-providers/16-google-vertex.mdx b/content/providers/01-ai-sdk-providers/16-google-vertex.mdx index 3a7a7ec9ea06..239c08f30dde 100644 --- a/content/providers/01-ai-sdk-providers/16-google-vertex.mdx +++ b/content/providers/01-ai-sdk-providers/16-google-vertex.mdx @@ -394,7 +394,7 @@ import { GoogleGenerativeAIProviderOptions } from '@ai-sdk/google'; // Note: imp import { generateText, streamText } from 'ai'; // For generateText: -const { text, reasoning, reasoningDetails } = await generateText({ +const { text, reasoningText, reasoning } = await generateText({ model: vertex('gemini-2.0-flash-001'), // Or other supported model via Vertex providerOptions: { google: { @@ -408,8 +408,8 @@ const { text, reasoning, reasoningDetails } = await generateText({ prompt: 'Explain quantum computing in simple terms.', }); -console.log('Reasoning:', reasoning); -console.log('Reasoning Details:', reasoningDetails); +console.log('Reasoning:', reasoningText); +console.log('Reasoning Details:', reasoning); console.log('Final Text:', text); // For streamText: @@ -438,7 +438,7 @@ for await (const part of result.fullStream) { When `includeThoughts` is true, parts of the API response marked with `thought: true` will be processed as reasoning. -- In `generateText`, these contribute to the `reasoning` (string) and `reasoningDetails` (array) fields. +- In `generateText`, these contribute to the `reasoningText` (string) and `reasoning` (array) fields. - In `streamText`, these are emitted as `reasoning` stream parts. @@ -970,7 +970,7 @@ and specifying a thinking budget in tokens. import { vertexAnthropic } from '@ai-sdk/google-vertex/anthropic'; import { generateText } from 'ai'; -const { text, reasoning, reasoningDetails } = await generateText({ +const { text, reasoningText, reasoning } = await generateText({ model: vertexAnthropic('claude-3-7-sonnet@20250219'), prompt: 'How many people will live in the world in 2040?', providerOptions: { @@ -980,8 +980,8 @@ const { text, reasoning, reasoningDetails } = await generateText({ }, }); -console.log(reasoning); // reasoning text -console.log(reasoningDetails); // reasoning details including redacted reasoning +console.log(reasoningText); // reasoning text +console.log(reasoning); // reasoning details including redacted reasoning console.log(text); // text response ```