From c5c8292b631c678efff5498bbab9f5a43bee50b6 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 27 Sep 2025 02:34:59 +0000 Subject: [PATCH 01/26] chore(internal): fix incremental formatting in some cases --- scripts/fast-format | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/fast-format b/scripts/fast-format index 03fb1a3..8a8e9d5 100755 --- a/scripts/fast-format +++ b/scripts/fast-format @@ -35,6 +35,6 @@ echo "==> Running prettier --write" PRETTIER_FILES="$(grep '\.\(js\|json\)$' "$FILE_LIST" || true)" if ! [ -z "$PRETTIER_FILES" ]; then echo "$PRETTIER_FILES" | xargs ./node_modules/.bin/prettier \ - --write --cache --cache-strategy metadata \ + --write --cache --cache-strategy metadata --no-error-on-unmatched-pattern \ '!**/dist' '!**/*.ts' '!**/*.mts' '!**/*.cts' '!**/*.js' '!**/*.mjs' '!**/*.cjs' fi From 34da720c34c35dafb38775243d28dfbdce2497db Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 27 Sep 2025 02:39:02 +0000 Subject: [PATCH 02/26] chore(internal): codegen related update --- .devcontainer/devcontainer.json | 4 +--- release-please-config.json | 5 +---- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 763462f..43fd5a7 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -9,9 +9,7 @@ "postCreateCommand": "yarn install", "customizations": { "vscode": { - "extensions": [ - "esbenp.prettier-vscode" - ] + "extensions": ["esbenp.prettier-vscode"] } } } diff --git a/release-please-config.json b/release-please-config.json index 624ed99..1ebd0bd 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -60,8 +60,5 @@ } ], "release-type": "node", - "extra-files": [ - "src/version.ts", - "README.md" - ] + "extra-files": ["src/version.ts", "README.md"] } From 252e0a2a38bd8aedab91b401c440a9b10c056cec Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 27 Sep 2025 02:54:11 +0000 Subject: [PATCH 03/26] chore(internal): codegen related update --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index d98d51a..2412bb7 100644 --- a/.gitignore +++ b/.gitignore @@ -7,4 +7,5 @@ dist dist-deno /*.tgz .idea/ +.eslintcache From b5432de2ad56ff0d2fd5a5b8e1755b5237616b60 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 27 Sep 2025 20:32:34 +0000 Subject: [PATCH 04/26] feat(api): removing openai/v1 --- .stats.yml | 8 +- README.md | 35 ++- api.md | 64 ++-- src/index.ts | 15 - src/resources/chat/completions.ts | 19 +- src/resources/completions.ts | 8 +- src/resources/embeddings.ts | 2 +- src/resources/files.ts | 17 +- src/resources/index.ts | 7 - src/resources/inference.ts | 298 +------------------ src/resources/models/models.ts | 21 +- src/resources/models/openai.ts | 2 +- src/resources/moderations.ts | 2 +- src/resources/responses/input-items.ts | 2 +- src/resources/responses/responses.ts | 14 +- src/resources/shared.ts | 14 +- src/resources/tool-runtime/tool-runtime.ts | 10 + src/resources/tools.ts | 10 + src/resources/vector-stores/files.ts | 15 +- src/resources/vector-stores/vector-stores.ts | 15 +- tests/api-resources/agents/agents.test.ts | 2 + tests/api-resources/files.test.ts | 4 + tests/api-resources/inference.test.ts | 104 +------ 23 files changed, 166 insertions(+), 522 deletions(-) diff --git a/.stats.yml b/.stats.yml index fa9edfc..e5bf0be 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-f252873ea1e1f38fd207331ef2621c511154d5be3f4076e59cc15754fc58eee4.yml -openapi_spec_hash: 10cbb4337a06a9fdd7d08612dd6044c3 -config_hash: 0358112cc0f3d880b4d55debdbe1cfa3 +configured_endpoints: 107 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-1eddf141208c131ee4a64ef996f8f419b444f60450de6807a9f6bc711ed8b661.yml +openapi_spec_hash: 94765c67ea99b1358169d41d810dd395 +config_hash: 7ec5a583f9c26b38993013bdfb0e7d46 diff --git a/README.md b/README.md index a27b8c1..092025e 100644 --- a/README.md +++ b/README.md @@ -92,17 +92,42 @@ import LlamaStackClient, { toFile } from 'llama-stack-client'; const client = new LlamaStackClient(); // If you have access to Node `fs` we recommend using `fs.createReadStream()`: -await client.files.create({ file: fs.createReadStream('/path/to/file'), purpose: 'assistants' }); +await client.files.create({ + expires_after_anchor: 'expires_after_anchor', + expires_after_seconds: 0, + file: fs.createReadStream('/path/to/file'), + purpose: 'assistants', +}); // Or if you have the web `File` API you can pass a `File` instance: -await client.files.create({ file: new File(['my bytes'], 'file'), purpose: 'assistants' }); +await client.files.create({ + expires_after_anchor: 'expires_after_anchor', + expires_after_seconds: 0, + file: new File(['my bytes'], 'file'), + purpose: 'assistants', +}); // You can also pass a `fetch` `Response`: -await client.files.create({ file: await fetch('https://somesite/file'), purpose: 'assistants' }); +await client.files.create({ + expires_after_anchor: 'expires_after_anchor', + expires_after_seconds: 0, + file: await fetch('https://somesite/file'), + purpose: 'assistants', +}); // Finally, if none of the above are convenient, you can use our `toFile` helper: -await client.files.create({ file: await toFile(Buffer.from('my bytes'), 'file'), purpose: 'assistants' }); -await client.files.create({ file: await toFile(new Uint8Array([0, 1, 2]), 'file'), purpose: 'assistants' }); +await client.files.create({ + expires_after_anchor: 'expires_after_anchor', + expires_after_seconds: 0, + file: await toFile(Buffer.from('my bytes'), 'file'), + purpose: 'assistants', +}); +await client.files.create({ + expires_after_anchor: 'expires_after_anchor', + expires_after_seconds: 0, + file: await toFile(new Uint8Array([0, 1, 2]), 'file'), + purpose: 'assistants', +}); ``` ## Handling errors diff --git a/api.md b/api.md index 01d88a5..8c7747f 100644 --- a/api.md +++ b/api.md @@ -3,7 +3,6 @@ Types: - AgentConfig -- BatchCompletion - ChatCompletionResponse - CompletionMessage - ContentDelta @@ -85,10 +84,10 @@ Types: Methods: -- client.responses.create({ ...params }) -> ResponseObject -- client.responses.retrieve(responseId) -> ResponseObject -- client.responses.list({ ...params }) -> ResponseListResponsesOpenAICursorPage -- client.responses.delete(responseId) -> ResponseDeleteResponse +- client.responses.create({ ...params }) -> ResponseObject +- client.responses.retrieve(responseId) -> ResponseObject +- client.responses.list({ ...params }) -> ResponseListResponsesOpenAICursorPage +- client.responses.delete(responseId) -> ResponseDeleteResponse ## InputItems @@ -98,7 +97,7 @@ Types: Methods: -- client.responses.inputItems.list(responseId, { ...params }) -> InputItemListResponse +- client.responses.inputItems.list(responseId, { ...params }) -> InputItemListResponse # Agents @@ -222,18 +221,13 @@ Methods: Types: - ChatCompletionResponseStreamChunk -- CompletionResponse - EmbeddingsResponse - TokenLogProbs -- InferenceBatchChatCompletionResponse - InferenceRerankResponse Methods: -- client.inference.batchChatCompletion({ ...params }) -> InferenceBatchChatCompletionResponse -- client.inference.batchCompletion({ ...params }) -> BatchCompletion - client.inference.chatCompletion({ ...params }) -> ChatCompletionResponse -- client.inference.completion({ ...params }) -> CompletionResponse - client.inference.embeddings({ ...params }) -> EmbeddingsResponse - client.inference.rerank({ ...params }) -> InferenceRerankResponse @@ -245,7 +239,7 @@ Types: Methods: -- client.embeddings.create({ ...params }) -> CreateEmbeddingsResponse +- client.embeddings.create({ ...params }) -> CreateEmbeddingsResponse # Chat @@ -263,9 +257,9 @@ Types: Methods: -- client.chat.completions.create({ ...params }) -> CompletionCreateResponse -- client.chat.completions.retrieve(completionId) -> CompletionRetrieveResponse -- client.chat.completions.list({ ...params }) -> CompletionListResponsesOpenAICursorPage +- client.chat.completions.create({ ...params }) -> CompletionCreateResponse +- client.chat.completions.retrieve(completionId) -> CompletionRetrieveResponse +- client.chat.completions.list({ ...params }) -> CompletionListResponsesOpenAICursorPage # Completions @@ -275,7 +269,7 @@ Types: Methods: -- client.completions.create({ ...params }) -> CompletionCreateResponse +- client.completions.create({ ...params }) -> CompletionCreateResponse # VectorIo @@ -315,12 +309,12 @@ Types: Methods: -- client.vectorStores.create({ ...params }) -> VectorStore -- client.vectorStores.retrieve(vectorStoreId) -> VectorStore -- client.vectorStores.update(vectorStoreId, { ...params }) -> VectorStore -- client.vectorStores.list({ ...params }) -> VectorStoresOpenAICursorPage -- client.vectorStores.delete(vectorStoreId) -> VectorStoreDeleteResponse -- client.vectorStores.search(vectorStoreId, { ...params }) -> VectorStoreSearchResponse +- client.vectorStores.create({ ...params }) -> VectorStore +- client.vectorStores.retrieve(vectorStoreId) -> VectorStore +- client.vectorStores.update(vectorStoreId, { ...params }) -> VectorStore +- client.vectorStores.list({ ...params }) -> VectorStoresOpenAICursorPage +- client.vectorStores.delete(vectorStoreId) -> VectorStoreDeleteResponse +- client.vectorStores.search(vectorStoreId, { ...params }) -> VectorStoreSearchResponse ## Files @@ -332,12 +326,12 @@ Types: Methods: -- client.vectorStores.files.create(vectorStoreId, { ...params }) -> VectorStoreFile -- client.vectorStores.files.retrieve(vectorStoreId, fileId) -> VectorStoreFile -- client.vectorStores.files.update(vectorStoreId, fileId, { ...params }) -> VectorStoreFile -- client.vectorStores.files.list(vectorStoreId, { ...params }) -> VectorStoreFilesOpenAICursorPage -- client.vectorStores.files.delete(vectorStoreId, fileId) -> FileDeleteResponse -- client.vectorStores.files.content(vectorStoreId, fileId) -> FileContentResponse +- client.vectorStores.files.create(vectorStoreId, { ...params }) -> VectorStoreFile +- client.vectorStores.files.retrieve(vectorStoreId, fileId) -> VectorStoreFile +- client.vectorStores.files.update(vectorStoreId, fileId, { ...params }) -> VectorStoreFile +- client.vectorStores.files.list(vectorStoreId, { ...params }) -> VectorStoreFilesOpenAICursorPage +- client.vectorStores.files.delete(vectorStoreId, fileId) -> FileDeleteResponse +- client.vectorStores.files.content(vectorStoreId, fileId) -> FileContentResponse # Models @@ -362,7 +356,7 @@ Types: Methods: -- client.models.openai.list() -> OpenAIListResponse +- client.models.openai.list() -> OpenAIListResponse # PostTraining @@ -423,7 +417,7 @@ Types: Methods: -- client.moderations.create({ ...params }) -> CreateResponse +- client.moderations.create({ ...params }) -> CreateResponse # Safety @@ -538,8 +532,8 @@ Types: Methods: -- client.files.create({ ...params }) -> File -- client.files.retrieve(fileId) -> File -- client.files.list({ ...params }) -> FilesOpenAICursorPage -- client.files.delete(fileId) -> DeleteFileResponse -- client.files.content(fileId) -> unknown +- client.files.create({ ...params }) -> File +- client.files.retrieve(fileId) -> File +- client.files.list({ ...params }) -> FilesOpenAICursorPage +- client.files.delete(fileId) -> DeleteFileResponse +- client.files.content(fileId) -> unknown diff --git a/src/index.ts b/src/index.ts index 68d219d..cb8689b 100644 --- a/src/index.ts +++ b/src/index.ts @@ -51,18 +51,11 @@ import { } from './resources/files'; import { ChatCompletionResponseStreamChunk, - CompletionResponse, EmbeddingsResponse, Inference, - InferenceBatchChatCompletionParams, - InferenceBatchChatCompletionResponse, - InferenceBatchCompletionParams, InferenceChatCompletionParams, InferenceChatCompletionParamsNonStreaming, InferenceChatCompletionParamsStreaming, - InferenceCompletionParams, - InferenceCompletionParamsNonStreaming, - InferenceCompletionParamsStreaming, InferenceEmbeddingsParams, InferenceRerankParams, InferenceRerankResponse, @@ -537,19 +530,12 @@ export declare namespace LlamaStackClient { export { Inference as Inference, type ChatCompletionResponseStreamChunk as ChatCompletionResponseStreamChunk, - type CompletionResponse as CompletionResponse, type EmbeddingsResponse as EmbeddingsResponse, type TokenLogProbs as TokenLogProbs, - type InferenceBatchChatCompletionResponse as InferenceBatchChatCompletionResponse, type InferenceRerankResponse as InferenceRerankResponse, - type InferenceBatchChatCompletionParams as InferenceBatchChatCompletionParams, - type InferenceBatchCompletionParams as InferenceBatchCompletionParams, type InferenceChatCompletionParams as InferenceChatCompletionParams, type InferenceChatCompletionParamsNonStreaming as InferenceChatCompletionParamsNonStreaming, type InferenceChatCompletionParamsStreaming as InferenceChatCompletionParamsStreaming, - type InferenceCompletionParams as InferenceCompletionParams, - type InferenceCompletionParamsNonStreaming as InferenceCompletionParamsNonStreaming, - type InferenceCompletionParamsStreaming as InferenceCompletionParamsStreaming, type InferenceEmbeddingsParams as InferenceEmbeddingsParams, type InferenceRerankParams as InferenceRerankParams, }; @@ -711,7 +697,6 @@ export declare namespace LlamaStackClient { }; export type AgentConfig = API.AgentConfig; - export type BatchCompletion = API.BatchCompletion; export type ChatCompletionResponse = API.ChatCompletionResponse; export type CompletionMessage = API.CompletionMessage; export type ContentDelta = API.ContentDelta; diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index c7ed5e8..b76ee5d 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -30,18 +30,16 @@ export class Completions extends APIResource { body: CompletionCreateParams, options?: Core.RequestOptions, ): APIPromise | APIPromise> { - return this._client.post('/v1/openai/v1/chat/completions', { - body, - ...options, - stream: body.stream ?? false, - }) as APIPromise | APIPromise>; + return this._client.post('/v1/chat/completions', { body, ...options, stream: body.stream ?? false }) as + | APIPromise + | APIPromise>; } /** * Describe a chat completion by its ID. */ retrieve(completionId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/v1/openai/v1/chat/completions/${completionId}`, options); + return this._client.get(`/v1/chat/completions/${completionId}`, options); } /** @@ -61,11 +59,10 @@ export class Completions extends APIResource { if (isRequestOptions(query)) { return this.list({}, query); } - return this._client.getAPIList( - '/v1/openai/v1/chat/completions', - CompletionListResponsesOpenAICursorPage, - { query, ...options }, - ); + return this._client.getAPIList('/v1/chat/completions', CompletionListResponsesOpenAICursorPage, { + query, + ...options, + }); } } diff --git a/src/resources/completions.ts b/src/resources/completions.ts index 0ade7ab..be435b9 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -27,11 +27,9 @@ export class Completions extends APIResource { body: CompletionCreateParams, options?: Core.RequestOptions, ): APIPromise | APIPromise> { - return this._client.post('/v1/openai/v1/completions', { - body, - ...options, - stream: body.stream ?? false, - }) as APIPromise | APIPromise>; + return this._client.post('/v1/completions', { body, ...options, stream: body.stream ?? false }) as + | APIPromise + | APIPromise>; } } diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index 89758af..ff5d371 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -12,7 +12,7 @@ export class Embeddings extends APIResource { body: EmbeddingCreateParams, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post('/v1/openai/v1/embeddings', { body, ...options }); + return this._client.post('/v1/embeddings', { body, ...options }); } } diff --git a/src/resources/files.ts b/src/resources/files.ts index 4dc5223..77edc6c 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -12,16 +12,19 @@ export class Files extends APIResource { * * - file: The File object (not file name) to be uploaded. * - purpose: The intended purpose of the uploaded file. + * - expires_after: Optional form values describing expiration for the file. + * Expected expires_after[anchor] = "created_at", expires_after[seconds] = + * {integer}. Seconds must be between 3600 and 2592000 (1 hour to 30 days). */ create(body: FileCreateParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.post('/v1/openai/v1/files', Core.multipartFormRequestOptions({ body, ...options })); + return this._client.post('/v1/files', Core.multipartFormRequestOptions({ body, ...options })); } /** * Returns information about a specific file. */ retrieve(fileId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/v1/openai/v1/files/${fileId}`, options); + return this._client.get(`/v1/files/${fileId}`, options); } /** @@ -36,21 +39,21 @@ export class Files extends APIResource { if (isRequestOptions(query)) { return this.list({}, query); } - return this._client.getAPIList('/v1/openai/v1/files', FilesOpenAICursorPage, { query, ...options }); + return this._client.getAPIList('/v1/files', FilesOpenAICursorPage, { query, ...options }); } /** * Delete a file. */ delete(fileId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.delete(`/v1/openai/v1/files/${fileId}`, options); + return this._client.delete(`/v1/files/${fileId}`, options); } /** * Returns the contents of the specified file. */ content(fileId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/v1/openai/v1/files/${fileId}/content`, options); + return this._client.get(`/v1/files/${fileId}/content`, options); } } @@ -149,6 +152,10 @@ export interface ListFilesResponse { export type FileContentResponse = unknown; export interface FileCreateParams { + expires_after_anchor: string | null; + + expires_after_seconds: number | null; + file: Core.Uploadable; /** diff --git a/src/resources/index.ts b/src/resources/index.ts index 58ad928..19ffaf6 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -65,19 +65,12 @@ export { export { Inference, type ChatCompletionResponseStreamChunk, - type CompletionResponse, type EmbeddingsResponse, type TokenLogProbs, - type InferenceBatchChatCompletionResponse, type InferenceRerankResponse, - type InferenceBatchChatCompletionParams, - type InferenceBatchCompletionParams, type InferenceChatCompletionParams, type InferenceChatCompletionParamsNonStreaming, type InferenceChatCompletionParamsStreaming, - type InferenceCompletionParams, - type InferenceCompletionParamsNonStreaming, - type InferenceCompletionParamsStreaming, type InferenceEmbeddingsParams, type InferenceRerankParams, } from './inference'; diff --git a/src/resources/inference.ts b/src/resources/inference.ts index a6f3e1e..bf60dc6 100644 --- a/src/resources/inference.ts +++ b/src/resources/inference.ts @@ -8,30 +8,10 @@ import * as Shared from './shared'; import { Stream } from '../streaming'; export class Inference extends APIResource { - /** - * Generate chat completions for a batch of messages using the specified model. - */ - batchChatCompletion( - body: InferenceBatchChatCompletionParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.post('/v1/inference/batch-chat-completion', { body, ...options }); - } - - /** - * Generate completions for a batch of content using the specified model. - */ - batchCompletion( - body: InferenceBatchCompletionParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.post('/v1/inference/batch-completion', { body, ...options }); - } - /** * Generate a chat completion for the given messages using the specified model. * - * @deprecated /v1/inference/chat-completion is deprecated. Please use /v1/openai/v1/chat/completions. + * @deprecated /v1/inference/chat-completion is deprecated. Please use /v1/chat/completions. */ chatCompletion( body: InferenceChatCompletionParamsNonStreaming, @@ -56,38 +36,10 @@ export class Inference extends APIResource { }) as APIPromise | APIPromise>; } - /** - * Generate a completion for the given content using the specified model. - * - * @deprecated /v1/inference/completion is deprecated. Please use /v1/openai/v1/completions. - */ - completion( - body: InferenceCompletionParamsNonStreaming, - options?: Core.RequestOptions, - ): APIPromise; - completion( - body: InferenceCompletionParamsStreaming, - options?: Core.RequestOptions, - ): APIPromise>; - completion( - body: InferenceCompletionParamsBase, - options?: Core.RequestOptions, - ): APIPromise | CompletionResponse>; - completion( - body: InferenceCompletionParams, - options?: Core.RequestOptions, - ): APIPromise | APIPromise> { - return this._client.post('/v1/inference/completion', { - body, - ...options, - stream: body.stream ?? false, - }) as APIPromise | APIPromise>; - } - /** * Generate embeddings for content pieces using the specified model. * - * @deprecated /v1/inference/embeddings is deprecated. Please use /v1/openai/v1/embeddings. + * @deprecated /v1/inference/embeddings is deprecated. Please use /v1/embeddings. */ embeddings( body: InferenceEmbeddingsParams, @@ -154,31 +106,6 @@ export namespace ChatCompletionResponseStreamChunk { } } -/** - * Response from a completion request. - */ -export interface CompletionResponse { - /** - * The generated completion text - */ - content: string; - - /** - * Reason why generation stopped - */ - stop_reason: 'end_of_turn' | 'end_of_message' | 'out_of_tokens'; - - /** - * Optional log probabilities for generated tokens - */ - logprobs?: Array; - - /** - * (Optional) List of metrics associated with the API response - */ - metrics?: Array; -} - /** * Response containing generated embeddings. */ @@ -201,16 +128,6 @@ export interface TokenLogProbs { logprobs_by_token: { [key: string]: number }; } -/** - * Response from a batch chat completion request. - */ -export interface InferenceBatchChatCompletionResponse { - /** - * List of chat completion responses, one for each conversation in the batch - */ - batch: Array; -} - /** * List of rerank result objects, sorted by relevance score (descending) */ @@ -234,139 +151,6 @@ export namespace InferenceRerankResponse { } } -export interface InferenceBatchChatCompletionParams { - /** - * The messages to generate completions for. - */ - messages_batch: Array>; - - /** - * The identifier of the model to use. The model must be registered with Llama - * Stack and available via the /models endpoint. - */ - model_id: string; - - /** - * (Optional) If specified, log probabilities for each token position will be - * returned. - */ - logprobs?: InferenceBatchChatCompletionParams.Logprobs; - - /** - * (Optional) Grammar specification for guided (structured) decoding. - */ - response_format?: Shared.ResponseFormat; - - /** - * (Optional) Parameters to control the sampling strategy. - */ - sampling_params?: Shared.SamplingParams; - - /** - * (Optional) Configuration for tool use. - */ - tool_config?: InferenceBatchChatCompletionParams.ToolConfig; - - /** - * (Optional) List of tool definitions available to the model. - */ - tools?: Array; -} - -export namespace InferenceBatchChatCompletionParams { - /** - * (Optional) If specified, log probabilities for each token position will be - * returned. - */ - export interface Logprobs { - /** - * How many tokens (for each position) to return log probabilities for. - */ - top_k?: number; - } - - /** - * (Optional) Configuration for tool use. - */ - export interface ToolConfig { - /** - * (Optional) Config for how to override the default system prompt. - - * `SystemMessageBehavior.append`: Appends the provided system message to the - * default system prompt. - `SystemMessageBehavior.replace`: Replaces the default - * system prompt with the provided system message. The system message can include - * the string '{{function_definitions}}' to indicate where the function definitions - * should be inserted. - */ - system_message_behavior?: 'append' | 'replace'; - - /** - * (Optional) Whether tool use is automatic, required, or none. Can also specify a - * tool name to use a specific tool. Defaults to ToolChoice.auto. - */ - tool_choice?: 'auto' | 'required' | 'none' | (string & {}); - - /** - * (Optional) Instructs the model how to format tool calls. By default, Llama Stack - * will attempt to use a format that is best adapted to the model. - - * `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. - - * `ToolPromptFormat.function_tag`: The tool calls are enclosed in a - * tag. - `ToolPromptFormat.python_list`: The tool calls - * are output as Python syntax -- a list of function calls. - */ - tool_prompt_format?: 'json' | 'function_tag' | 'python_list'; - } - - export interface Tool { - tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {}); - - description?: string; - - parameters?: { [key: string]: Shared.ToolParamDefinition }; - } -} - -export interface InferenceBatchCompletionParams { - /** - * The content to generate completions for. - */ - content_batch: Array; - - /** - * The identifier of the model to use. The model must be registered with Llama - * Stack and available via the /models endpoint. - */ - model_id: string; - - /** - * (Optional) If specified, log probabilities for each token position will be - * returned. - */ - logprobs?: InferenceBatchCompletionParams.Logprobs; - - /** - * (Optional) Grammar specification for guided (structured) decoding. - */ - response_format?: Shared.ResponseFormat; - - /** - * (Optional) Parameters to control the sampling strategy. - */ - sampling_params?: Shared.SamplingParams; -} - -export namespace InferenceBatchCompletionParams { - /** - * (Optional) If specified, log probabilities for each token position will be - * returned. - */ - export interface Logprobs { - /** - * How many tokens (for each position) to return log probabilities for. - */ - top_k?: number; - } -} - export type InferenceChatCompletionParams = | InferenceChatCompletionParamsNonStreaming | InferenceChatCompletionParamsStreaming; @@ -508,77 +292,6 @@ export interface InferenceChatCompletionParamsStreaming extends InferenceChatCom stream: true; } -export type InferenceCompletionParams = - | InferenceCompletionParamsNonStreaming - | InferenceCompletionParamsStreaming; - -export interface InferenceCompletionParamsBase { - /** - * The content to generate a completion for. - */ - content: Shared.InterleavedContent; - - /** - * The identifier of the model to use. The model must be registered with Llama - * Stack and available via the /models endpoint. - */ - model_id: string; - - /** - * (Optional) If specified, log probabilities for each token position will be - * returned. - */ - logprobs?: InferenceCompletionParams.Logprobs; - - /** - * (Optional) Grammar specification for guided (structured) decoding. - */ - response_format?: Shared.ResponseFormat; - - /** - * (Optional) Parameters to control the sampling strategy. - */ - sampling_params?: Shared.SamplingParams; - - /** - * (Optional) If True, generate an SSE event stream of the response. Defaults to - * False. - */ - stream?: boolean; -} - -export namespace InferenceCompletionParams { - /** - * (Optional) If specified, log probabilities for each token position will be - * returned. - */ - export interface Logprobs { - /** - * How many tokens (for each position) to return log probabilities for. - */ - top_k?: number; - } - - export type InferenceCompletionParamsNonStreaming = InferenceAPI.InferenceCompletionParamsNonStreaming; - export type InferenceCompletionParamsStreaming = InferenceAPI.InferenceCompletionParamsStreaming; -} - -export interface InferenceCompletionParamsNonStreaming extends InferenceCompletionParamsBase { - /** - * (Optional) If True, generate an SSE event stream of the response. Defaults to - * False. - */ - stream?: false; -} - -export interface InferenceCompletionParamsStreaming extends InferenceCompletionParamsBase { - /** - * (Optional) If True, generate an SSE event stream of the response. Defaults to - * False. - */ - stream: true; -} - export interface InferenceEmbeddingsParams { /** * List of contents to generate embeddings for. Each content can be a string or an @@ -743,19 +456,12 @@ export namespace InferenceRerankParams { export declare namespace Inference { export { type ChatCompletionResponseStreamChunk as ChatCompletionResponseStreamChunk, - type CompletionResponse as CompletionResponse, type EmbeddingsResponse as EmbeddingsResponse, type TokenLogProbs as TokenLogProbs, - type InferenceBatchChatCompletionResponse as InferenceBatchChatCompletionResponse, type InferenceRerankResponse as InferenceRerankResponse, - type InferenceBatchChatCompletionParams as InferenceBatchChatCompletionParams, - type InferenceBatchCompletionParams as InferenceBatchCompletionParams, type InferenceChatCompletionParams as InferenceChatCompletionParams, type InferenceChatCompletionParamsNonStreaming as InferenceChatCompletionParamsNonStreaming, type InferenceChatCompletionParamsStreaming as InferenceChatCompletionParamsStreaming, - type InferenceCompletionParams as InferenceCompletionParams, - type InferenceCompletionParamsNonStreaming as InferenceCompletionParamsNonStreaming, - type InferenceCompletionParamsStreaming as InferenceCompletionParamsStreaming, type InferenceEmbeddingsParams as InferenceEmbeddingsParams, type InferenceRerankParams as InferenceRerankParams, }; diff --git a/src/resources/models/models.ts b/src/resources/models/models.ts index d72281f..fe49ac0 100644 --- a/src/resources/models/models.ts +++ b/src/resources/models/models.ts @@ -16,7 +16,7 @@ export class Models extends APIResource { } /** - * List all models. + * List models using the OpenAI API. */ list(options?: Core.RequestOptions): Core.APIPromise { return ( @@ -43,7 +43,7 @@ export class Models extends APIResource { } export interface ListModelsResponse { - data: ModelListResponse; + data: Array; } /** @@ -81,7 +81,22 @@ export interface Model { provider_resource_id?: string; } -export type ModelListResponse = Array; +export type ModelListResponse = Array; + +export namespace ModelListResponse { + /** + * A model from OpenAI. + */ + export interface ModelListResponseItem { + id: string; + + created: number; + + object: 'model'; + + owned_by: string; + } +} export interface ModelRegisterParams { /** diff --git a/src/resources/models/openai.ts b/src/resources/models/openai.ts index bcdef6f..4190759 100644 --- a/src/resources/models/openai.ts +++ b/src/resources/models/openai.ts @@ -9,7 +9,7 @@ export class OpenAI extends APIResource { */ list(options?: Core.RequestOptions): Core.APIPromise { return ( - this._client.get('/v1/openai/v1/models', options) as Core.APIPromise<{ data: OpenAIListResponse }> + this._client.get('/v1/models', options) as Core.APIPromise<{ data: OpenAIListResponse }> )._thenUnwrap((obj) => obj.data); } } diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts index a945ab3..b824f10 100644 --- a/src/resources/moderations.ts +++ b/src/resources/moderations.ts @@ -8,7 +8,7 @@ export class Moderations extends APIResource { * Classifies if text and/or image inputs are potentially harmful. */ create(body: ModerationCreateParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.post('/v1/openai/v1/moderations', { body, ...options }); + return this._client.post('/v1/moderations', { body, ...options }); } } diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts index 74c556c..5ad384d 100644 --- a/src/resources/responses/input-items.ts +++ b/src/resources/responses/input-items.ts @@ -22,7 +22,7 @@ export class InputItems extends APIResource { if (isRequestOptions(query)) { return this.list(responseId, {}, query); } - return this._client.get(`/v1/openai/v1/responses/${responseId}/input_items`, { query, ...options }); + return this._client.get(`/v1/responses/${responseId}/input_items`, { query, ...options }); } } diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index e329519..5113b1a 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -29,18 +29,16 @@ export class Responses extends APIResource { body: ResponseCreateParams, options?: Core.RequestOptions, ): APIPromise | APIPromise> { - return this._client.post('/v1/openai/v1/responses', { - body, - ...options, - stream: body.stream ?? false, - }) as APIPromise | APIPromise>; + return this._client.post('/v1/responses', { body, ...options, stream: body.stream ?? false }) as + | APIPromise + | APIPromise>; } /** * Retrieve an OpenAI response by its ID. */ retrieve(responseId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/v1/openai/v1/responses/${responseId}`, options); + return this._client.get(`/v1/responses/${responseId}`, options); } /** @@ -60,7 +58,7 @@ export class Responses extends APIResource { if (isRequestOptions(query)) { return this.list({}, query); } - return this._client.getAPIList('/v1/openai/v1/responses', ResponseListResponsesOpenAICursorPage, { + return this._client.getAPIList('/v1/responses', ResponseListResponsesOpenAICursorPage, { query, ...options, }); @@ -70,7 +68,7 @@ export class Responses extends APIResource { * Delete an OpenAI response by its ID. */ delete(responseId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.delete(`/v1/openai/v1/responses/${responseId}`, options); + return this._client.delete(`/v1/responses/${responseId}`, options); } } diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 00c767f..bd5f670 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -105,16 +105,6 @@ export namespace AgentConfig { } } -/** - * Response from a batch completion request. - */ -export interface BatchCompletion { - /** - * List of completion responses, one for each input in the batch - */ - batch: Array; -} - /** * Response from a chat completion request. */ @@ -940,7 +930,11 @@ export interface ToolParamDefinition { description?: string; + items?: boolean | number | string | Array | unknown | null; + required?: boolean; + + title?: string; } /** diff --git a/src/resources/tool-runtime/tool-runtime.ts b/src/resources/tool-runtime/tool-runtime.ts index ca1a6c8..058779e 100644 --- a/src/resources/tool-runtime/tool-runtime.ts +++ b/src/resources/tool-runtime/tool-runtime.ts @@ -97,6 +97,16 @@ export namespace ToolDef { * (Optional) Default value for the parameter if not provided */ default?: boolean | number | string | Array | unknown | null; + + /** + * Type of the elements when parameter_type is array + */ + items?: unknown; + + /** + * (Optional) Title of the parameter + */ + title?: string; } } diff --git a/src/resources/tools.ts b/src/resources/tools.ts index ba35360..ab05a60 100644 --- a/src/resources/tools.ts +++ b/src/resources/tools.ts @@ -105,6 +105,16 @@ export namespace Tool { * (Optional) Default value for the parameter if not provided */ default?: boolean | number | string | Array | unknown | null; + + /** + * Type of the elements when parameter_type is array + */ + items?: unknown; + + /** + * (Optional) Title of the parameter + */ + title?: string; } } diff --git a/src/resources/vector-stores/files.ts b/src/resources/vector-stores/files.ts index bc950cc..9af2869 100644 --- a/src/resources/vector-stores/files.ts +++ b/src/resources/vector-stores/files.ts @@ -14,7 +14,7 @@ export class Files extends APIResource { body: FileCreateParams, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post(`/v1/openai/v1/vector_stores/${vectorStoreId}/files`, { body, ...options }); + return this._client.post(`/v1/vector_stores/${vectorStoreId}/files`, { body, ...options }); } /** @@ -25,7 +25,7 @@ export class Files extends APIResource { fileId: string, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.get(`/v1/openai/v1/vector_stores/${vectorStoreId}/files/${fileId}`, options); + return this._client.get(`/v1/vector_stores/${vectorStoreId}/files/${fileId}`, options); } /** @@ -37,10 +37,7 @@ export class Files extends APIResource { body: FileUpdateParams, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post(`/v1/openai/v1/vector_stores/${vectorStoreId}/files/${fileId}`, { - body, - ...options, - }); + return this._client.post(`/v1/vector_stores/${vectorStoreId}/files/${fileId}`, { body, ...options }); } /** @@ -64,7 +61,7 @@ export class Files extends APIResource { return this.list(vectorStoreId, {}, query); } return this._client.getAPIList( - `/v1/openai/v1/vector_stores/${vectorStoreId}/files`, + `/v1/vector_stores/${vectorStoreId}/files`, VectorStoreFilesOpenAICursorPage, { query, ...options }, ); @@ -78,7 +75,7 @@ export class Files extends APIResource { fileId: string, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.delete(`/v1/openai/v1/vector_stores/${vectorStoreId}/files/${fileId}`, options); + return this._client.delete(`/v1/vector_stores/${vectorStoreId}/files/${fileId}`, options); } /** @@ -89,7 +86,7 @@ export class Files extends APIResource { fileId: string, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.get(`/v1/openai/v1/vector_stores/${vectorStoreId}/files/${fileId}/content`, options); + return this._client.get(`/v1/vector_stores/${vectorStoreId}/files/${fileId}/content`, options); } } diff --git a/src/resources/vector-stores/vector-stores.ts b/src/resources/vector-stores/vector-stores.ts index e8994e2..459fb54 100644 --- a/src/resources/vector-stores/vector-stores.ts +++ b/src/resources/vector-stores/vector-stores.ts @@ -23,14 +23,14 @@ export class VectorStores extends APIResource { * Creates a vector store. */ create(body: VectorStoreCreateParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.post('/v1/openai/v1/vector_stores', { body, ...options }); + return this._client.post('/v1/vector_stores', { body, ...options }); } /** * Retrieves a vector store. */ retrieve(vectorStoreId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/v1/openai/v1/vector_stores/${vectorStoreId}`, options); + return this._client.get(`/v1/vector_stores/${vectorStoreId}`, options); } /** @@ -41,7 +41,7 @@ export class VectorStores extends APIResource { body: VectorStoreUpdateParams, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post(`/v1/openai/v1/vector_stores/${vectorStoreId}`, { body, ...options }); + return this._client.post(`/v1/vector_stores/${vectorStoreId}`, { body, ...options }); } /** @@ -59,17 +59,14 @@ export class VectorStores extends APIResource { if (isRequestOptions(query)) { return this.list({}, query); } - return this._client.getAPIList('/v1/openai/v1/vector_stores', VectorStoresOpenAICursorPage, { - query, - ...options, - }); + return this._client.getAPIList('/v1/vector_stores', VectorStoresOpenAICursorPage, { query, ...options }); } /** * Delete a vector store. */ delete(vectorStoreId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.delete(`/v1/openai/v1/vector_stores/${vectorStoreId}`, options); + return this._client.delete(`/v1/vector_stores/${vectorStoreId}`, options); } /** @@ -81,7 +78,7 @@ export class VectorStores extends APIResource { body: VectorStoreSearchParams, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post(`/v1/openai/v1/vector_stores/${vectorStoreId}/search`, { body, ...options }); + return this._client.post(`/v1/vector_stores/${vectorStoreId}/search`, { body, ...options }); } } diff --git a/tests/api-resources/agents/agents.test.ts b/tests/api-resources/agents/agents.test.ts index 2f22dff..7fbf1d7 100644 --- a/tests/api-resources/agents/agents.test.ts +++ b/tests/api-resources/agents/agents.test.ts @@ -36,6 +36,8 @@ describe('resource agents', () => { parameter_type: 'parameter_type', required: true, default: true, + items: {}, + title: 'title', }, ], }, diff --git a/tests/api-resources/files.test.ts b/tests/api-resources/files.test.ts index 6482b2e..0dcbd3e 100644 --- a/tests/api-resources/files.test.ts +++ b/tests/api-resources/files.test.ts @@ -8,6 +8,8 @@ const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] describe('resource files', () => { test('create: only required params', async () => { const responsePromise = client.files.create({ + expires_after_anchor: 'expires_after_anchor', + expires_after_seconds: 0, file: await toFile(Buffer.from('# my file contents'), 'README.md'), purpose: 'assistants', }); @@ -22,6 +24,8 @@ describe('resource files', () => { test('create: required and optional params', async () => { const response = await client.files.create({ + expires_after_anchor: 'expires_after_anchor', + expires_after_seconds: 0, file: await toFile(Buffer.from('# my file contents'), 'README.md'), purpose: 'assistants', }); diff --git a/tests/api-resources/inference.test.ts b/tests/api-resources/inference.test.ts index e7d5df3..94be46c 100644 --- a/tests/api-resources/inference.test.ts +++ b/tests/api-resources/inference.test.ts @@ -6,74 +6,6 @@ import { Response } from 'node-fetch'; const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource inference', () => { - test('batchChatCompletion: only required params', async () => { - const responsePromise = client.inference.batchChatCompletion({ - messages_batch: [[{ content: 'string', role: 'user' }]], - model_id: 'model_id', - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('batchChatCompletion: required and optional params', async () => { - const response = await client.inference.batchChatCompletion({ - messages_batch: [[{ content: 'string', role: 'user', context: 'string' }]], - model_id: 'model_id', - logprobs: { top_k: 0 }, - response_format: { json_schema: { foo: true }, type: 'json_schema' }, - sampling_params: { - strategy: { type: 'greedy' }, - max_tokens: 0, - repetition_penalty: 0, - stop: ['string'], - }, - tool_config: { system_message_behavior: 'append', tool_choice: 'auto', tool_prompt_format: 'json' }, - tools: [ - { - tool_name: 'brave_search', - description: 'description', - parameters: { - foo: { param_type: 'param_type', default: true, description: 'description', required: true }, - }, - }, - ], - }); - }); - - test('batchCompletion: only required params', async () => { - const responsePromise = client.inference.batchCompletion({ - content_batch: ['string'], - model_id: 'model_id', - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('batchCompletion: required and optional params', async () => { - const response = await client.inference.batchCompletion({ - content_batch: ['string'], - model_id: 'model_id', - logprobs: { top_k: 0 }, - response_format: { json_schema: { foo: true }, type: 'json_schema' }, - sampling_params: { - strategy: { type: 'greedy' }, - max_tokens: 0, - repetition_penalty: 0, - stop: ['string'], - }, - }); - }); - test('chatCompletion: only required params', async () => { const responsePromise = client.inference.chatCompletion({ messages: [{ content: 'string', role: 'user' }], @@ -109,40 +41,20 @@ describe('resource inference', () => { tool_name: 'brave_search', description: 'description', parameters: { - foo: { param_type: 'param_type', default: true, description: 'description', required: true }, + foo: { + param_type: 'param_type', + default: true, + description: 'description', + items: true, + required: true, + title: 'title', + }, }, }, ], }); }); - test('completion: only required params', async () => { - const responsePromise = client.inference.completion({ content: 'string', model_id: 'model_id' }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('completion: required and optional params', async () => { - const response = await client.inference.completion({ - content: 'string', - model_id: 'model_id', - logprobs: { top_k: 0 }, - response_format: { json_schema: { foo: true }, type: 'json_schema' }, - sampling_params: { - strategy: { type: 'greedy' }, - max_tokens: 0, - repetition_penalty: 0, - stop: ['string'], - }, - stream: false, - }); - }); - test('embeddings: only required params', async () => { const responsePromise = client.inference.embeddings({ contents: ['string'], model_id: 'model_id' }); const rawResponse = await responsePromise.asResponse(); From a0b0fb7aa74668f3f6996c178f9654723b8b0f22 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 30 Sep 2025 01:55:34 +0000 Subject: [PATCH 05/26] feat(api): expires_after changes for /files --- .stats.yml | 6 +- README.md | 35 +--- api.md | 6 +- src/index.ts | 8 - src/resources/files.ts | 11 -- src/resources/index.ts | 4 - src/resources/inference.ts | 226 -------------------------- src/resources/models/models.ts | 21 +-- src/resources/models/openai.ts | 24 +-- src/resources/responses/responses.ts | 10 -- tests/api-resources/files.test.ts | 6 - tests/api-resources/inference.test.ts | 41 ----- 12 files changed, 17 insertions(+), 381 deletions(-) diff --git a/.stats.yml b/.stats.yml index e5bf0be..016bf7b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 107 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-1eddf141208c131ee4a64ef996f8f419b444f60450de6807a9f6bc711ed8b661.yml -openapi_spec_hash: 94765c67ea99b1358169d41d810dd395 +configured_endpoints: 105 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-adcfaad1990d45e42b20e200a9ecc35ee32df5692bd9cd18ae898b0b7728c919.yml +openapi_spec_hash: 4f532287bafe5da0578a1c1a5e31c952 config_hash: 7ec5a583f9c26b38993013bdfb0e7d46 diff --git a/README.md b/README.md index 092025e..3fa5d11 100644 --- a/README.md +++ b/README.md @@ -92,42 +92,17 @@ import LlamaStackClient, { toFile } from 'llama-stack-client'; const client = new LlamaStackClient(); // If you have access to Node `fs` we recommend using `fs.createReadStream()`: -await client.files.create({ - expires_after_anchor: 'expires_after_anchor', - expires_after_seconds: 0, - file: fs.createReadStream('/path/to/file'), - purpose: 'assistants', -}); +await client.files.create({ file: fs.createReadStream('/path/to/file') }); // Or if you have the web `File` API you can pass a `File` instance: -await client.files.create({ - expires_after_anchor: 'expires_after_anchor', - expires_after_seconds: 0, - file: new File(['my bytes'], 'file'), - purpose: 'assistants', -}); +await client.files.create({ file: new File(['my bytes'], 'file') }); // You can also pass a `fetch` `Response`: -await client.files.create({ - expires_after_anchor: 'expires_after_anchor', - expires_after_seconds: 0, - file: await fetch('https://somesite/file'), - purpose: 'assistants', -}); +await client.files.create({ file: await fetch('https://somesite/file') }); // Finally, if none of the above are convenient, you can use our `toFile` helper: -await client.files.create({ - expires_after_anchor: 'expires_after_anchor', - expires_after_seconds: 0, - file: await toFile(Buffer.from('my bytes'), 'file'), - purpose: 'assistants', -}); -await client.files.create({ - expires_after_anchor: 'expires_after_anchor', - expires_after_seconds: 0, - file: await toFile(new Uint8Array([0, 1, 2]), 'file'), - purpose: 'assistants', -}); +await client.files.create({ file: await toFile(Buffer.from('my bytes'), 'file') }); +await client.files.create({ file: await toFile(new Uint8Array([0, 1, 2]), 'file') }); ``` ## Handling errors diff --git a/api.md b/api.md index 8c7747f..a388c69 100644 --- a/api.md +++ b/api.md @@ -221,15 +221,11 @@ Methods: Types: - ChatCompletionResponseStreamChunk -- EmbeddingsResponse - TokenLogProbs -- InferenceRerankResponse Methods: - client.inference.chatCompletion({ ...params }) -> ChatCompletionResponse -- client.inference.embeddings({ ...params }) -> EmbeddingsResponse -- client.inference.rerank({ ...params }) -> InferenceRerankResponse # Embeddings @@ -356,7 +352,7 @@ Types: Methods: -- client.models.openai.list() -> OpenAIListResponse +- client.models.openai.list() -> ModelListResponse # PostTraining diff --git a/src/index.ts b/src/index.ts index cb8689b..3659007 100644 --- a/src/index.ts +++ b/src/index.ts @@ -51,14 +51,10 @@ import { } from './resources/files'; import { ChatCompletionResponseStreamChunk, - EmbeddingsResponse, Inference, InferenceChatCompletionParams, InferenceChatCompletionParamsNonStreaming, InferenceChatCompletionParamsStreaming, - InferenceEmbeddingsParams, - InferenceRerankParams, - InferenceRerankResponse, TokenLogProbs, } from './resources/inference'; import { HealthInfo, Inspect, ProviderInfo, RouteInfo, VersionInfo } from './resources/inspect'; @@ -530,14 +526,10 @@ export declare namespace LlamaStackClient { export { Inference as Inference, type ChatCompletionResponseStreamChunk as ChatCompletionResponseStreamChunk, - type EmbeddingsResponse as EmbeddingsResponse, type TokenLogProbs as TokenLogProbs, - type InferenceRerankResponse as InferenceRerankResponse, type InferenceChatCompletionParams as InferenceChatCompletionParams, type InferenceChatCompletionParamsNonStreaming as InferenceChatCompletionParamsNonStreaming, type InferenceChatCompletionParamsStreaming as InferenceChatCompletionParamsStreaming, - type InferenceEmbeddingsParams as InferenceEmbeddingsParams, - type InferenceRerankParams as InferenceRerankParams, }; export { diff --git a/src/resources/files.ts b/src/resources/files.ts index 77edc6c..ad1bc76 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -13,8 +13,6 @@ export class Files extends APIResource { * - file: The File object (not file name) to be uploaded. * - purpose: The intended purpose of the uploaded file. * - expires_after: Optional form values describing expiration for the file. - * Expected expires_after[anchor] = "created_at", expires_after[seconds] = - * {integer}. Seconds must be between 3600 and 2592000 (1 hour to 30 days). */ create(body: FileCreateParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/v1/files', Core.multipartFormRequestOptions({ body, ...options })); @@ -152,16 +150,7 @@ export interface ListFilesResponse { export type FileContentResponse = unknown; export interface FileCreateParams { - expires_after_anchor: string | null; - - expires_after_seconds: number | null; - file: Core.Uploadable; - - /** - * Valid purpose values for OpenAI Files API. - */ - purpose: 'assistants' | 'batch'; } export interface FileListParams extends OpenAICursorPageParams { diff --git a/src/resources/index.ts b/src/resources/index.ts index 19ffaf6..cd422cb 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -65,14 +65,10 @@ export { export { Inference, type ChatCompletionResponseStreamChunk, - type EmbeddingsResponse, type TokenLogProbs, - type InferenceRerankResponse, type InferenceChatCompletionParams, type InferenceChatCompletionParamsNonStreaming, type InferenceChatCompletionParamsStreaming, - type InferenceEmbeddingsParams, - type InferenceRerankParams, } from './inference'; export { Inspect, type HealthInfo, type ProviderInfo, type RouteInfo, type VersionInfo } from './inspect'; export { diff --git a/src/resources/inference.ts b/src/resources/inference.ts index bf60dc6..3862451 100644 --- a/src/resources/inference.ts +++ b/src/resources/inference.ts @@ -35,32 +35,6 @@ export class Inference extends APIResource { stream: body.stream ?? false, }) as APIPromise | APIPromise>; } - - /** - * Generate embeddings for content pieces using the specified model. - * - * @deprecated /v1/inference/embeddings is deprecated. Please use /v1/embeddings. - */ - embeddings( - body: InferenceEmbeddingsParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.post('/v1/inference/embeddings', { body, ...options }); - } - - /** - * Rerank a list of documents based on their relevance to a query. - */ - rerank( - body: InferenceRerankParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return ( - this._client.post('/v1/inference/rerank', { body, ...options }) as Core.APIPromise<{ - data: InferenceRerankResponse; - }> - )._thenUnwrap((obj) => obj.data); - } } /** @@ -106,18 +80,6 @@ export namespace ChatCompletionResponseStreamChunk { } } -/** - * Response containing generated embeddings. - */ -export interface EmbeddingsResponse { - /** - * List of embedding vectors, one per input content. Each embedding is a list of - * floats. The dimensionality of the embedding is model-specific; you can check - * model metadata using /models/{model_id} - */ - embeddings: Array>; -} - /** * Log probabilities for generated tokens. */ @@ -128,29 +90,6 @@ export interface TokenLogProbs { logprobs_by_token: { [key: string]: number }; } -/** - * List of rerank result objects, sorted by relevance score (descending) - */ -export type InferenceRerankResponse = Array; - -export namespace InferenceRerankResponse { - /** - * A single rerank result from a reranking response. - */ - export interface InferenceRerankResponseItem { - /** - * The original index of the document in the input list - */ - index: number; - - /** - * The relevance score from the model output. Values are inverted when applicable - * so that higher scores indicate greater relevance. - */ - relevance_score: number; - } -} - export type InferenceChatCompletionParams = | InferenceChatCompletionParamsNonStreaming | InferenceChatCompletionParamsStreaming; @@ -292,177 +231,12 @@ export interface InferenceChatCompletionParamsStreaming extends InferenceChatCom stream: true; } -export interface InferenceEmbeddingsParams { - /** - * List of contents to generate embeddings for. Each content can be a string or an - * InterleavedContentItem (and hence can be multimodal). The behavior depends on - * the model and provider. Some models may only support text. - */ - contents: Array | Array; - - /** - * The identifier of the model to use. The model must be an embedding model - * registered with Llama Stack and available via the /models endpoint. - */ - model_id: string; - - /** - * (Optional) Output dimensionality for the embeddings. Only supported by - * Matryoshka models. - */ - output_dimension?: number; - - /** - * (Optional) How is the embedding being used? This is only supported by asymmetric - * embedding models. - */ - task_type?: 'query' | 'document'; - - /** - * (Optional) Config for how to truncate text for embedding when text is longer - * than the model's max sequence length. - */ - text_truncation?: 'none' | 'start' | 'end'; -} - -export interface InferenceRerankParams { - /** - * List of items to rerank. Each item can be a string, text content part, or image - * content part. Each input must not exceed the model's max input token length. - */ - items: Array< - | string - | InferenceRerankParams.OpenAIChatCompletionContentPartTextParam - | InferenceRerankParams.OpenAIChatCompletionContentPartImageParam - >; - - /** - * The identifier of the reranking model to use. - */ - model: string; - - /** - * The search query to rank items against. Can be a string, text content part, or - * image content part. The input must not exceed the model's max input token - * length. - */ - query: - | string - | InferenceRerankParams.OpenAIChatCompletionContentPartTextParam - | InferenceRerankParams.OpenAIChatCompletionContentPartImageParam; - - /** - * (Optional) Maximum number of results to return. Default: returns all. - */ - max_num_results?: number; -} - -export namespace InferenceRerankParams { - /** - * Text content part for OpenAI-compatible chat completion messages. - */ - export interface OpenAIChatCompletionContentPartTextParam { - /** - * The text content of the message - */ - text: string; - - /** - * Must be "text" to identify this as text content - */ - type: 'text'; - } - - /** - * Image content part for OpenAI-compatible chat completion messages. - */ - export interface OpenAIChatCompletionContentPartImageParam { - /** - * Image URL specification and processing details - */ - image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; - - /** - * Must be "image_url" to identify this as image content - */ - type: 'image_url'; - } - - export namespace OpenAIChatCompletionContentPartImageParam { - /** - * Image URL specification and processing details - */ - export interface ImageURL { - /** - * URL of the image to include in the message - */ - url: string; - - /** - * (Optional) Level of detail for image processing. Can be "low", "high", or "auto" - */ - detail?: string; - } - } - - /** - * Text content part for OpenAI-compatible chat completion messages. - */ - export interface OpenAIChatCompletionContentPartTextParam { - /** - * The text content of the message - */ - text: string; - - /** - * Must be "text" to identify this as text content - */ - type: 'text'; - } - - /** - * Image content part for OpenAI-compatible chat completion messages. - */ - export interface OpenAIChatCompletionContentPartImageParam { - /** - * Image URL specification and processing details - */ - image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; - - /** - * Must be "image_url" to identify this as image content - */ - type: 'image_url'; - } - - export namespace OpenAIChatCompletionContentPartImageParam { - /** - * Image URL specification and processing details - */ - export interface ImageURL { - /** - * URL of the image to include in the message - */ - url: string; - - /** - * (Optional) Level of detail for image processing. Can be "low", "high", or "auto" - */ - detail?: string; - } - } -} - export declare namespace Inference { export { type ChatCompletionResponseStreamChunk as ChatCompletionResponseStreamChunk, - type EmbeddingsResponse as EmbeddingsResponse, type TokenLogProbs as TokenLogProbs, - type InferenceRerankResponse as InferenceRerankResponse, type InferenceChatCompletionParams as InferenceChatCompletionParams, type InferenceChatCompletionParamsNonStreaming as InferenceChatCompletionParamsNonStreaming, type InferenceChatCompletionParamsStreaming as InferenceChatCompletionParamsStreaming, - type InferenceEmbeddingsParams as InferenceEmbeddingsParams, - type InferenceRerankParams as InferenceRerankParams, }; } diff --git a/src/resources/models/models.ts b/src/resources/models/models.ts index fe49ac0..d72281f 100644 --- a/src/resources/models/models.ts +++ b/src/resources/models/models.ts @@ -16,7 +16,7 @@ export class Models extends APIResource { } /** - * List models using the OpenAI API. + * List all models. */ list(options?: Core.RequestOptions): Core.APIPromise { return ( @@ -43,7 +43,7 @@ export class Models extends APIResource { } export interface ListModelsResponse { - data: Array; + data: ModelListResponse; } /** @@ -81,22 +81,7 @@ export interface Model { provider_resource_id?: string; } -export type ModelListResponse = Array; - -export namespace ModelListResponse { - /** - * A model from OpenAI. - */ - export interface ModelListResponseItem { - id: string; - - created: number; - - object: 'model'; - - owned_by: string; - } -} +export type ModelListResponse = Array; export interface ModelRegisterParams { /** diff --git a/src/resources/models/openai.ts b/src/resources/models/openai.ts index 4190759..8055dea 100644 --- a/src/resources/models/openai.ts +++ b/src/resources/models/openai.ts @@ -2,34 +2,20 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; +import * as ModelsAPI from './models'; export class OpenAI extends APIResource { /** - * List models using the OpenAI API. + * List all models. */ - list(options?: Core.RequestOptions): Core.APIPromise { + list(options?: Core.RequestOptions): Core.APIPromise { return ( - this._client.get('/v1/models', options) as Core.APIPromise<{ data: OpenAIListResponse }> + this._client.get('/v1/models', options) as Core.APIPromise<{ data: ModelsAPI.ModelListResponse }> )._thenUnwrap((obj) => obj.data); } } -export type OpenAIListResponse = Array; - -export namespace OpenAIListResponse { - /** - * A model from OpenAI. - */ - export interface OpenAIListResponseItem { - id: string; - - created: number; - - object: 'model'; - - owned_by: string; - } -} +export type OpenAIListResponse = Array; export declare namespace OpenAI { export { type OpenAIListResponse as OpenAIListResponse }; diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 5113b1a..a186b01 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -149,11 +149,6 @@ export interface ResponseObject { * (Optional) Truncation strategy applied to the response */ truncation?: string; - - /** - * (Optional) User identifier associated with the request - */ - user?: string; } export namespace ResponseObject { @@ -1884,11 +1879,6 @@ export interface ResponseListResponse { * (Optional) Truncation strategy applied to the response */ truncation?: string; - - /** - * (Optional) User identifier associated with the request - */ - user?: string; } export namespace ResponseListResponse { diff --git a/tests/api-resources/files.test.ts b/tests/api-resources/files.test.ts index 0dcbd3e..2104b09 100644 --- a/tests/api-resources/files.test.ts +++ b/tests/api-resources/files.test.ts @@ -8,10 +8,7 @@ const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] describe('resource files', () => { test('create: only required params', async () => { const responsePromise = client.files.create({ - expires_after_anchor: 'expires_after_anchor', - expires_after_seconds: 0, file: await toFile(Buffer.from('# my file contents'), 'README.md'), - purpose: 'assistants', }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -24,10 +21,7 @@ describe('resource files', () => { test('create: required and optional params', async () => { const response = await client.files.create({ - expires_after_anchor: 'expires_after_anchor', - expires_after_seconds: 0, file: await toFile(Buffer.from('# my file contents'), 'README.md'), - purpose: 'assistants', }); }); diff --git a/tests/api-resources/inference.test.ts b/tests/api-resources/inference.test.ts index 94be46c..7de326e 100644 --- a/tests/api-resources/inference.test.ts +++ b/tests/api-resources/inference.test.ts @@ -54,45 +54,4 @@ describe('resource inference', () => { ], }); }); - - test('embeddings: only required params', async () => { - const responsePromise = client.inference.embeddings({ contents: ['string'], model_id: 'model_id' }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('embeddings: required and optional params', async () => { - const response = await client.inference.embeddings({ - contents: ['string'], - model_id: 'model_id', - output_dimension: 0, - task_type: 'query', - text_truncation: 'none', - }); - }); - - test('rerank: only required params', async () => { - const responsePromise = client.inference.rerank({ items: ['string'], model: 'model', query: 'string' }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('rerank: required and optional params', async () => { - const response = await client.inference.rerank({ - items: ['string'], - model: 'model', - query: 'string', - max_num_results: 0, - }); - }); }); From 367d775c3d5a2fd85bf138d2b175e91b7c185913 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 30 Sep 2025 02:00:08 +0000 Subject: [PATCH 06/26] feat(api)!: fixes to remove deprecated inference resources --- .stats.yml | 2 +- README.md | 73 ++----- api.md | 5 +- src/index.ts | 16 +- src/resources/index.ts | 9 +- src/resources/inference.ts | 280 ++++++++++---------------- src/resources/shared.ts | 15 +- tests/api-resources/inference.test.ts | 45 +---- 8 files changed, 154 insertions(+), 291 deletions(-) diff --git a/.stats.yml b/.stats.yml index 016bf7b..ed58961 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 105 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-adcfaad1990d45e42b20e200a9ecc35ee32df5692bd9cd18ae898b0b7728c919.yml openapi_spec_hash: 4f532287bafe5da0578a1c1a5e31c952 -config_hash: 7ec5a583f9c26b38993013bdfb0e7d46 +config_hash: 5b643c97c83a497d7d346253f1e175f3 diff --git a/README.md b/README.md index 3fa5d11..506c87d 100644 --- a/README.md +++ b/README.md @@ -32,28 +32,6 @@ const model = await client.models.register({ model_id: 'model_id' }); console.log(model.identifier); ``` -## Streaming responses - -We provide support for streaming responses using Server Sent Events (SSE). - -```ts -import LlamaStackClient from 'llama-stack-client'; - -const client = new LlamaStackClient(); - -const stream = await client.inference.chatCompletion({ - messages: [{ content: 'string', role: 'user' }], - model_id: 'model_id', - stream: true, -}); -for await (const chatCompletionResponseStreamChunk of stream) { - console.log(chatCompletionResponseStreamChunk.completion_message); -} -``` - -If you need to cancel a stream, you can `break` from the loop -or call `stream.controller.abort()`. - ### Request & Response types This library includes TypeScript definitions for all request params and response fields. You may import and use them like so: @@ -64,13 +42,7 @@ import LlamaStackClient from 'llama-stack-client'; const client = new LlamaStackClient(); -const params: LlamaStackClient.InferenceChatCompletionParams = { - messages: [{ content: 'string', role: 'user' }], - model_id: 'model_id', -}; -const chatCompletionResponse: LlamaStackClient.ChatCompletionResponse = await client.inference.chatCompletion( - params, -); +const toolGroups: LlamaStackClient.ToolgroupListResponse = await client.toolgroups.list(); ``` Documentation for each method, request param, and response field are available in docstrings and will appear on hover in most modern editors. @@ -113,17 +85,15 @@ a subclass of `APIError` will be thrown: ```ts -const chatCompletionResponse = await client.inference - .chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }) - .catch(async (err) => { - if (err instanceof LlamaStackClient.APIError) { - console.log(err.status); // 400 - console.log(err.name); // BadRequestError - console.log(err.headers); // {server: 'nginx', ...} - } else { - throw err; - } - }); +const toolGroups = await client.toolgroups.list().catch(async (err) => { + if (err instanceof LlamaStackClient.APIError) { + console.log(err.status); // 400 + console.log(err.name); // BadRequestError + console.log(err.headers); // {server: 'nginx', ...} + } else { + throw err; + } +}); ``` Error codes are as follows: @@ -155,7 +125,7 @@ const client = new LlamaStackClient({ }); // Or, configure per-request: -await client.inference.chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }, { +await client.toolgroups.list({ maxRetries: 5, }); ``` @@ -172,7 +142,7 @@ const client = new LlamaStackClient({ }); // Override per-request: -await client.inference.chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }, { +await client.toolgroups.list({ timeout: 5 * 1000, }); ``` @@ -193,17 +163,13 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi ```ts const client = new LlamaStackClient(); -const response = await client.inference - .chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }) - .asResponse(); +const response = await client.toolgroups.list().asResponse(); console.log(response.headers.get('X-My-Header')); console.log(response.statusText); // access the underlying Response object -const { data: chatCompletionResponse, response: raw } = await client.inference - .chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }) - .withResponse(); +const { data: toolGroups, response: raw } = await client.toolgroups.list().withResponse(); console.log(raw.headers.get('X-My-Header')); -console.log(chatCompletionResponse.completion_message); +console.log(toolGroups); ``` ### Making custom/undocumented requests @@ -307,12 +273,9 @@ const client = new LlamaStackClient({ }); // Override per-request: -await client.inference.chatCompletion( - { messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }, - { - httpAgent: new http.Agent({ keepAlive: false }), - }, -); +await client.toolgroups.list({ + httpAgent: new http.Agent({ keepAlive: false }), +}); ``` ## Semantic versioning diff --git a/api.md b/api.md index a388c69..7ac7499 100644 --- a/api.md +++ b/api.md @@ -220,12 +220,11 @@ Methods: Types: -- ChatCompletionResponseStreamChunk -- TokenLogProbs +- InferenceRerankResponse Methods: -- client.inference.chatCompletion({ ...params }) -> ChatCompletionResponse +- client.inference.rerank({ ...params }) -> InferenceRerankResponse # Embeddings diff --git a/src/index.ts b/src/index.ts index 3659007..8e3621d 100644 --- a/src/index.ts +++ b/src/index.ts @@ -49,14 +49,7 @@ import { FilesOpenAICursorPage, ListFilesResponse, } from './resources/files'; -import { - ChatCompletionResponseStreamChunk, - Inference, - InferenceChatCompletionParams, - InferenceChatCompletionParamsNonStreaming, - InferenceChatCompletionParamsStreaming, - TokenLogProbs, -} from './resources/inference'; +import { Inference, InferenceRerankParams, InferenceRerankResponse } from './resources/inference'; import { HealthInfo, Inspect, ProviderInfo, RouteInfo, VersionInfo } from './resources/inspect'; import { CreateResponse, ModerationCreateParams, Moderations } from './resources/moderations'; import { ListProvidersResponse, ProviderListResponse, Providers } from './resources/providers'; @@ -525,11 +518,8 @@ export declare namespace LlamaStackClient { export { Inference as Inference, - type ChatCompletionResponseStreamChunk as ChatCompletionResponseStreamChunk, - type TokenLogProbs as TokenLogProbs, - type InferenceChatCompletionParams as InferenceChatCompletionParams, - type InferenceChatCompletionParamsNonStreaming as InferenceChatCompletionParamsNonStreaming, - type InferenceChatCompletionParamsStreaming as InferenceChatCompletionParamsStreaming, + type InferenceRerankResponse as InferenceRerankResponse, + type InferenceRerankParams as InferenceRerankParams, }; export { diff --git a/src/resources/index.ts b/src/resources/index.ts index cd422cb..4812f93 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -62,14 +62,7 @@ export { type FileCreateParams, type FileListParams, } from './files'; -export { - Inference, - type ChatCompletionResponseStreamChunk, - type TokenLogProbs, - type InferenceChatCompletionParams, - type InferenceChatCompletionParamsNonStreaming, - type InferenceChatCompletionParamsStreaming, -} from './inference'; +export { Inference, type InferenceRerankResponse, type InferenceRerankParams } from './inference'; export { Inspect, type HealthInfo, type ProviderInfo, type RouteInfo, type VersionInfo } from './inspect'; export { Models, diff --git a/src/resources/inference.ts b/src/resources/inference.ts index 3862451..055d133 100644 --- a/src/resources/inference.ts +++ b/src/resources/inference.ts @@ -1,242 +1,178 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../resource'; -import { APIPromise } from '../core'; import * as Core from '../core'; -import * as InferenceAPI from './inference'; -import * as Shared from './shared'; -import { Stream } from '../streaming'; export class Inference extends APIResource { /** - * Generate a chat completion for the given messages using the specified model. - * - * @deprecated /v1/inference/chat-completion is deprecated. Please use /v1/chat/completions. + * Rerank a list of documents based on their relevance to a query. */ - chatCompletion( - body: InferenceChatCompletionParamsNonStreaming, + rerank( + body: InferenceRerankParams, options?: Core.RequestOptions, - ): APIPromise; - chatCompletion( - body: InferenceChatCompletionParamsStreaming, - options?: Core.RequestOptions, - ): APIPromise>; - chatCompletion( - body: InferenceChatCompletionParamsBase, - options?: Core.RequestOptions, - ): APIPromise | Shared.ChatCompletionResponse>; - chatCompletion( - body: InferenceChatCompletionParams, - options?: Core.RequestOptions, - ): APIPromise | APIPromise> { - return this._client.post('/v1/inference/chat-completion', { - body, - ...options, - stream: body.stream ?? false, - }) as APIPromise | APIPromise>; + ): Core.APIPromise { + return ( + this._client.post('/v1alpha/inference/rerank', { body, ...options }) as Core.APIPromise<{ + data: InferenceRerankResponse; + }> + )._thenUnwrap((obj) => obj.data); } } /** - * A chunk of a streamed chat completion response. + * List of rerank result objects, sorted by relevance score (descending) */ -export interface ChatCompletionResponseStreamChunk { - /** - * The event containing the new content - */ - event: ChatCompletionResponseStreamChunk.Event; - - /** - * (Optional) List of metrics associated with the API response - */ - metrics?: Array; -} +export type InferenceRerankResponse = Array; -export namespace ChatCompletionResponseStreamChunk { +export namespace InferenceRerankResponse { /** - * The event containing the new content + * A single rerank result from a reranking response. */ - export interface Event { - /** - * Content generated since last event. This can be one or more tokens, or a tool - * call. - */ - delta: Shared.ContentDelta; - + export interface InferenceRerankResponseItem { /** - * Type of the event + * The original index of the document in the input list */ - event_type: 'start' | 'complete' | 'progress'; + index: number; /** - * Optional log probabilities for generated tokens + * The relevance score from the model output. Values are inverted when applicable + * so that higher scores indicate greater relevance. */ - logprobs?: Array; - - /** - * Optional reason why generation stopped, if complete - */ - stop_reason?: 'end_of_turn' | 'end_of_message' | 'out_of_tokens'; + relevance_score: number; } } -/** - * Log probabilities for generated tokens. - */ -export interface TokenLogProbs { - /** - * Dictionary mapping tokens to their log probabilities - */ - logprobs_by_token: { [key: string]: number }; -} - -export type InferenceChatCompletionParams = - | InferenceChatCompletionParamsNonStreaming - | InferenceChatCompletionParamsStreaming; - -export interface InferenceChatCompletionParamsBase { +export interface InferenceRerankParams { /** - * List of messages in the conversation. + * List of items to rerank. Each item can be a string, text content part, or image + * content part. Each input must not exceed the model's max input token length. */ - messages: Array; + items: Array< + | string + | InferenceRerankParams.OpenAIChatCompletionContentPartTextParam + | InferenceRerankParams.OpenAIChatCompletionContentPartImageParam + >; /** - * The identifier of the model to use. The model must be registered with Llama - * Stack and available via the /models endpoint. + * The identifier of the reranking model to use. */ - model_id: string; + model: string; /** - * (Optional) If specified, log probabilities for each token position will be - * returned. + * The search query to rank items against. Can be a string, text content part, or + * image content part. The input must not exceed the model's max input token + * length. */ - logprobs?: InferenceChatCompletionParams.Logprobs; + query: + | string + | InferenceRerankParams.OpenAIChatCompletionContentPartTextParam + | InferenceRerankParams.OpenAIChatCompletionContentPartImageParam; /** - * (Optional) Grammar specification for guided (structured) decoding. There are two - * options: - `ResponseFormat.json_schema`: The grammar is a JSON schema. Most - * providers support this format. - `ResponseFormat.grammar`: The grammar is a BNF - * grammar. This format is more flexible, but not all providers support it. + * (Optional) Maximum number of results to return. Default: returns all. */ - response_format?: Shared.ResponseFormat; + max_num_results?: number; +} +export namespace InferenceRerankParams { /** - * Parameters to control the sampling strategy. + * Text content part for OpenAI-compatible chat completion messages. */ - sampling_params?: Shared.SamplingParams; + export interface OpenAIChatCompletionContentPartTextParam { + /** + * The text content of the message + */ + text: string; - /** - * (Optional) If True, generate an SSE event stream of the response. Defaults to - * False. - */ - stream?: boolean; + /** + * Must be "text" to identify this as text content + */ + type: 'text'; + } /** - * (Optional) Whether tool use is required or automatic. Defaults to - * ToolChoice.auto. .. deprecated:: Use tool_config instead. + * Image content part for OpenAI-compatible chat completion messages. */ - tool_choice?: 'auto' | 'required' | 'none'; + export interface OpenAIChatCompletionContentPartImageParam { + /** + * Image URL specification and processing details + */ + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; - /** - * (Optional) Configuration for tool use. - */ - tool_config?: InferenceChatCompletionParams.ToolConfig; + /** + * Must be "image_url" to identify this as image content + */ + type: 'image_url'; + } - /** - * (Optional) Instructs the model how to format tool calls. By default, Llama Stack - * will attempt to use a format that is best adapted to the model. - - * `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. - - * `ToolPromptFormat.function_tag`: The tool calls are enclosed in a - * tag. - `ToolPromptFormat.python_list`: The tool calls - * are output as Python syntax -- a list of function calls. .. deprecated:: Use - * tool_config instead. - */ - tool_prompt_format?: 'json' | 'function_tag' | 'python_list'; + export namespace OpenAIChatCompletionContentPartImageParam { + /** + * Image URL specification and processing details + */ + export interface ImageURL { + /** + * URL of the image to include in the message + */ + url: string; + + /** + * (Optional) Level of detail for image processing. Can be "low", "high", or "auto" + */ + detail?: string; + } + } /** - * (Optional) List of tool definitions available to the model. + * Text content part for OpenAI-compatible chat completion messages. */ - tools?: Array; -} + export interface OpenAIChatCompletionContentPartTextParam { + /** + * The text content of the message + */ + text: string; -export namespace InferenceChatCompletionParams { - /** - * (Optional) If specified, log probabilities for each token position will be - * returned. - */ - export interface Logprobs { /** - * How many tokens (for each position) to return log probabilities for. + * Must be "text" to identify this as text content */ - top_k?: number; + type: 'text'; } /** - * (Optional) Configuration for tool use. + * Image content part for OpenAI-compatible chat completion messages. */ - export interface ToolConfig { + export interface OpenAIChatCompletionContentPartImageParam { /** - * (Optional) Config for how to override the default system prompt. - - * `SystemMessageBehavior.append`: Appends the provided system message to the - * default system prompt. - `SystemMessageBehavior.replace`: Replaces the default - * system prompt with the provided system message. The system message can include - * the string '{{function_definitions}}' to indicate where the function definitions - * should be inserted. + * Image URL specification and processing details */ - system_message_behavior?: 'append' | 'replace'; + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; /** - * (Optional) Whether tool use is automatic, required, or none. Can also specify a - * tool name to use a specific tool. Defaults to ToolChoice.auto. + * Must be "image_url" to identify this as image content */ - tool_choice?: 'auto' | 'required' | 'none' | (string & {}); + type: 'image_url'; + } + export namespace OpenAIChatCompletionContentPartImageParam { /** - * (Optional) Instructs the model how to format tool calls. By default, Llama Stack - * will attempt to use a format that is best adapted to the model. - - * `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. - - * `ToolPromptFormat.function_tag`: The tool calls are enclosed in a - * tag. - `ToolPromptFormat.python_list`: The tool calls - * are output as Python syntax -- a list of function calls. + * Image URL specification and processing details */ - tool_prompt_format?: 'json' | 'function_tag' | 'python_list'; - } - - export interface Tool { - tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {}); - - description?: string; - - parameters?: { [key: string]: Shared.ToolParamDefinition }; + export interface ImageURL { + /** + * URL of the image to include in the message + */ + url: string; + + /** + * (Optional) Level of detail for image processing. Can be "low", "high", or "auto" + */ + detail?: string; + } } - - export type InferenceChatCompletionParamsNonStreaming = - InferenceAPI.InferenceChatCompletionParamsNonStreaming; - export type InferenceChatCompletionParamsStreaming = InferenceAPI.InferenceChatCompletionParamsStreaming; -} - -export interface InferenceChatCompletionParamsNonStreaming extends InferenceChatCompletionParamsBase { - /** - * (Optional) If True, generate an SSE event stream of the response. Defaults to - * False. - */ - stream?: false; -} - -export interface InferenceChatCompletionParamsStreaming extends InferenceChatCompletionParamsBase { - /** - * (Optional) If True, generate an SSE event stream of the response. Defaults to - * False. - */ - stream: true; } export declare namespace Inference { export { - type ChatCompletionResponseStreamChunk as ChatCompletionResponseStreamChunk, - type TokenLogProbs as TokenLogProbs, - type InferenceChatCompletionParams as InferenceChatCompletionParams, - type InferenceChatCompletionParamsNonStreaming as InferenceChatCompletionParamsNonStreaming, - type InferenceChatCompletionParamsStreaming as InferenceChatCompletionParamsStreaming, + type InferenceRerankResponse as InferenceRerankResponse, + type InferenceRerankParams as InferenceRerankParams, }; } diff --git a/src/resources/shared.ts b/src/resources/shared.ts index bd5f670..4168c91 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -1,7 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import * as Shared from './shared'; -import * as InferenceAPI from './inference'; import * as ToolRuntimeAPI from './tool-runtime/tool-runtime'; /** @@ -117,7 +116,7 @@ export interface ChatCompletionResponse { /** * Optional log probabilities for generated tokens */ - logprobs?: Array; + logprobs?: Array; /** * (Optional) List of metrics associated with the API response @@ -125,6 +124,18 @@ export interface ChatCompletionResponse { metrics?: Array; } +export namespace ChatCompletionResponse { + /** + * Log probabilities for generated tokens. + */ + export interface Logprob { + /** + * Dictionary mapping tokens to their log probabilities + */ + logprobs_by_token: { [key: string]: number }; + } +} + /** * A message containing the model's (assistant) response in a chat conversation. */ diff --git a/tests/api-resources/inference.test.ts b/tests/api-resources/inference.test.ts index 7de326e..6eef337 100644 --- a/tests/api-resources/inference.test.ts +++ b/tests/api-resources/inference.test.ts @@ -6,11 +6,8 @@ import { Response } from 'node-fetch'; const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource inference', () => { - test('chatCompletion: only required params', async () => { - const responsePromise = client.inference.chatCompletion({ - messages: [{ content: 'string', role: 'user' }], - model_id: 'model_id', - }); + test('rerank: only required params', async () => { + const responsePromise = client.inference.rerank({ items: ['string'], model: 'model', query: 'string' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -20,38 +17,12 @@ describe('resource inference', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - test('chatCompletion: required and optional params', async () => { - const response = await client.inference.chatCompletion({ - messages: [{ content: 'string', role: 'user', context: 'string' }], - model_id: 'model_id', - logprobs: { top_k: 0 }, - response_format: { json_schema: { foo: true }, type: 'json_schema' }, - sampling_params: { - strategy: { type: 'greedy' }, - max_tokens: 0, - repetition_penalty: 0, - stop: ['string'], - }, - stream: false, - tool_choice: 'auto', - tool_config: { system_message_behavior: 'append', tool_choice: 'auto', tool_prompt_format: 'json' }, - tool_prompt_format: 'json', - tools: [ - { - tool_name: 'brave_search', - description: 'description', - parameters: { - foo: { - param_type: 'param_type', - default: true, - description: 'description', - items: true, - required: true, - title: 'title', - }, - }, - }, - ], + test('rerank: required and optional params', async () => { + const response = await client.inference.rerank({ + items: ['string'], + model: 'model', + query: 'string', + max_num_results: 0, }); }); }); From f1cf9d68b6b2569dfb5ea3e2d2c33eff1a832e47 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 30 Sep 2025 03:37:11 +0000 Subject: [PATCH 07/26] feat(api): updating post /v1/files to have correct multipart/form-data --- .stats.yml | 4 +- README.md | 10 +-- api.md | 4 - src/index.ts | 4 - src/resources/agents/agents.ts | 2 - src/resources/agents/index.ts | 1 - src/resources/agents/turn.ts | 79 ++++++++++++++---- src/resources/eval/eval.ts | 10 +-- src/resources/eval/index.ts | 1 - src/resources/files.ts | 27 ++++++ src/resources/index.ts | 1 - src/resources/shared.ts | 131 ++++++++---------------------- tests/api-resources/files.test.ts | 3 + 13 files changed, 135 insertions(+), 142 deletions(-) diff --git a/.stats.yml b/.stats.yml index ed58961..20dba32 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 105 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-adcfaad1990d45e42b20e200a9ecc35ee32df5692bd9cd18ae898b0b7728c919.yml -openapi_spec_hash: 4f532287bafe5da0578a1c1a5e31c952 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-d7bea816190382a93511491e33d1f37f707620926ab133ae8ce0883d763df741.yml +openapi_spec_hash: f73b3af77108625edae3f25972b9e665 config_hash: 5b643c97c83a497d7d346253f1e175f3 diff --git a/README.md b/README.md index 506c87d..0724245 100644 --- a/README.md +++ b/README.md @@ -64,17 +64,17 @@ import LlamaStackClient, { toFile } from 'llama-stack-client'; const client = new LlamaStackClient(); // If you have access to Node `fs` we recommend using `fs.createReadStream()`: -await client.files.create({ file: fs.createReadStream('/path/to/file') }); +await client.files.create({ file: fs.createReadStream('/path/to/file'), purpose: 'assistants' }); // Or if you have the web `File` API you can pass a `File` instance: -await client.files.create({ file: new File(['my bytes'], 'file') }); +await client.files.create({ file: new File(['my bytes'], 'file'), purpose: 'assistants' }); // You can also pass a `fetch` `Response`: -await client.files.create({ file: await fetch('https://somesite/file') }); +await client.files.create({ file: await fetch('https://somesite/file'), purpose: 'assistants' }); // Finally, if none of the above are convenient, you can use our `toFile` helper: -await client.files.create({ file: await toFile(Buffer.from('my bytes'), 'file') }); -await client.files.create({ file: await toFile(new Uint8Array([0, 1, 2]), 'file') }); +await client.files.create({ file: await toFile(Buffer.from('my bytes'), 'file'), purpose: 'assistants' }); +await client.files.create({ file: await toFile(new Uint8Array([0, 1, 2]), 'file'), purpose: 'assistants' }); ``` ## Handling errors diff --git a/api.md b/api.md index 7ac7499..619a82c 100644 --- a/api.md +++ b/api.md @@ -5,7 +5,6 @@ Types: - AgentConfig - ChatCompletionResponse - CompletionMessage -- ContentDelta - Document - InterleavedContent - InterleavedContentItem @@ -13,7 +12,6 @@ Types: - Metric - ParamType - QueryConfig -- QueryGeneratorConfig - QueryResult - ResponseFormat - SafetyViolation @@ -151,7 +149,6 @@ Types: - AgentTurnResponseStreamChunk - Turn - TurnResponseEvent -- TurnResponseEventPayload Methods: @@ -183,7 +180,6 @@ Methods: Types: - BenchmarkConfig -- EvalCandidate - EvaluateResponse - Job diff --git a/src/index.ts b/src/index.ts index 8e3621d..9839985 100644 --- a/src/index.ts +++ b/src/index.ts @@ -140,7 +140,6 @@ import { Chat, ChatCompletionChunk } from './resources/chat/chat'; import { BenchmarkConfig, Eval, - EvalCandidate, EvalEvaluateRowsAlphaParams, EvalEvaluateRowsParams, EvalRunEvalAlphaParams, @@ -499,7 +498,6 @@ export declare namespace LlamaStackClient { export { Eval as Eval, type BenchmarkConfig as BenchmarkConfig, - type EvalCandidate as EvalCandidate, type EvaluateResponse as EvaluateResponse, type Job as Job, type EvalEvaluateRowsParams as EvalEvaluateRowsParams, @@ -681,7 +679,6 @@ export declare namespace LlamaStackClient { export type AgentConfig = API.AgentConfig; export type ChatCompletionResponse = API.ChatCompletionResponse; export type CompletionMessage = API.CompletionMessage; - export type ContentDelta = API.ContentDelta; export type Document = API.Document; export type InterleavedContent = API.InterleavedContent; export type InterleavedContentItem = API.InterleavedContentItem; @@ -689,7 +686,6 @@ export declare namespace LlamaStackClient { export type Metric = API.Metric; export type ParamType = API.ParamType; export type QueryConfig = API.QueryConfig; - export type QueryGeneratorConfig = API.QueryGeneratorConfig; export type QueryResult = API.QueryResult; export type ResponseFormat = API.ResponseFormat; export type SafetyViolation = API.SafetyViolation; diff --git a/src/resources/agents/agents.ts b/src/resources/agents/agents.ts index 35a4d62..41e5102 100644 --- a/src/resources/agents/agents.ts +++ b/src/resources/agents/agents.ts @@ -25,7 +25,6 @@ import { TurnCreateParamsStreaming, TurnResource, TurnResponseEvent, - TurnResponseEventPayload, TurnResumeParams, TurnResumeParamsNonStreaming, TurnResumeParamsStreaming, @@ -355,7 +354,6 @@ export declare namespace Agents { type AgentTurnResponseStreamChunk as AgentTurnResponseStreamChunk, type Turn as Turn, type TurnResponseEvent as TurnResponseEvent, - type TurnResponseEventPayload as TurnResponseEventPayload, type TurnCreateParams as TurnCreateParams, type TurnCreateParamsNonStreaming as TurnCreateParamsNonStreaming, type TurnCreateParamsStreaming as TurnCreateParamsStreaming, diff --git a/src/resources/agents/index.ts b/src/resources/agents/index.ts index 88a44bf..05f354f 100644 --- a/src/resources/agents/index.ts +++ b/src/resources/agents/index.ts @@ -28,7 +28,6 @@ export { type AgentTurnResponseStreamChunk, type Turn, type TurnResponseEvent, - type TurnResponseEventPayload, type TurnCreateParams, type TurnCreateParamsNonStreaming, type TurnCreateParamsStreaming, diff --git a/src/resources/agents/turn.ts b/src/resources/agents/turn.ts index 0273625..f33db5d 100644 --- a/src/resources/agents/turn.ts +++ b/src/resources/agents/turn.ts @@ -259,21 +259,16 @@ export interface TurnResponseEvent { /** * Event-specific payload containing event data */ - payload: TurnResponseEventPayload; + payload: + | TurnResponseEvent.AgentTurnResponseStepStartPayload + | TurnResponseEvent.AgentTurnResponseStepProgressPayload + | TurnResponseEvent.AgentTurnResponseStepCompletePayload + | TurnResponseEvent.AgentTurnResponseTurnStartPayload + | TurnResponseEvent.AgentTurnResponseTurnCompletePayload + | TurnResponseEvent.AgentTurnResponseTurnAwaitingInputPayload; } -/** - * Payload for step start events in agent turn responses. - */ -export type TurnResponseEventPayload = - | TurnResponseEventPayload.AgentTurnResponseStepStartPayload - | TurnResponseEventPayload.AgentTurnResponseStepProgressPayload - | TurnResponseEventPayload.AgentTurnResponseStepCompletePayload - | TurnResponseEventPayload.AgentTurnResponseTurnStartPayload - | TurnResponseEventPayload.AgentTurnResponseTurnCompletePayload - | TurnResponseEventPayload.AgentTurnResponseTurnAwaitingInputPayload; - -export namespace TurnResponseEventPayload { +export namespace TurnResponseEvent { /** * Payload for step start events in agent turn responses. */ @@ -306,7 +301,10 @@ export namespace TurnResponseEventPayload { /** * Incremental content changes during step execution */ - delta: Shared.ContentDelta; + delta: + | AgentTurnResponseStepProgressPayload.TextDelta + | AgentTurnResponseStepProgressPayload.ImageDelta + | AgentTurnResponseStepProgressPayload.ToolCallDelta; /** * Type of event being reported @@ -324,6 +322,58 @@ export namespace TurnResponseEventPayload { step_type: 'inference' | 'tool_execution' | 'shield_call' | 'memory_retrieval'; } + export namespace AgentTurnResponseStepProgressPayload { + /** + * A text content delta for streaming responses. + */ + export interface TextDelta { + /** + * The incremental text content + */ + text: string; + + /** + * Discriminator type of the delta. Always "text" + */ + type: 'text'; + } + + /** + * An image content delta for streaming responses. + */ + export interface ImageDelta { + /** + * The incremental image data as bytes + */ + image: string; + + /** + * Discriminator type of the delta. Always "image" + */ + type: 'image'; + } + + /** + * A tool call content delta for streaming responses. + */ + export interface ToolCallDelta { + /** + * Current parsing status of the tool call + */ + parse_status: 'started' | 'in_progress' | 'failed' | 'succeeded'; + + /** + * Either an in-progress tool call string or the final parsed tool call + */ + tool_call: string | Shared.ToolCall; + + /** + * Discriminator type of the delta. Always "tool_call" + */ + type: 'tool_call'; + } + } + /** * Payload for step completion events in agent turn responses. */ @@ -621,7 +671,6 @@ export declare namespace TurnResource { type AgentTurnResponseStreamChunk as AgentTurnResponseStreamChunk, type Turn as Turn, type TurnResponseEvent as TurnResponseEvent, - type TurnResponseEventPayload as TurnResponseEventPayload, type TurnCreateParams as TurnCreateParams, type TurnCreateParamsNonStreaming as TurnCreateParamsNonStreaming, type TurnCreateParamsStreaming as TurnCreateParamsStreaming, diff --git a/src/resources/eval/eval.ts b/src/resources/eval/eval.ts index 961b24e..765c88d 100644 --- a/src/resources/eval/eval.ts +++ b/src/resources/eval/eval.ts @@ -58,7 +58,7 @@ export interface BenchmarkConfig { /** * The candidate to evaluate. */ - eval_candidate: EvalCandidate; + eval_candidate: BenchmarkConfig.ModelCandidate | BenchmarkConfig.AgentCandidate; /** * Map between scoring function id and parameters for each scoring function you @@ -73,12 +73,7 @@ export interface BenchmarkConfig { num_examples?: number; } -/** - * A model candidate for evaluation. - */ -export type EvalCandidate = EvalCandidate.ModelCandidate | EvalCandidate.AgentCandidate; - -export namespace EvalCandidate { +export namespace BenchmarkConfig { /** * A model candidate for evaluation. */ @@ -197,7 +192,6 @@ Eval.Jobs = Jobs; export declare namespace Eval { export { type BenchmarkConfig as BenchmarkConfig, - type EvalCandidate as EvalCandidate, type EvaluateResponse as EvaluateResponse, type Job as Job, type EvalEvaluateRowsParams as EvalEvaluateRowsParams, diff --git a/src/resources/eval/index.ts b/src/resources/eval/index.ts index e8c35f3..ecdb275 100644 --- a/src/resources/eval/index.ts +++ b/src/resources/eval/index.ts @@ -3,7 +3,6 @@ export { Eval, type BenchmarkConfig, - type EvalCandidate, type EvaluateResponse, type Job, type EvalEvaluateRowsParams, diff --git a/src/resources/files.ts b/src/resources/files.ts index ad1bc76..077487a 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -151,6 +151,33 @@ export type FileContentResponse = unknown; export interface FileCreateParams { file: Core.Uploadable; + + /** + * Valid purpose values for OpenAI Files API. + */ + purpose: 'assistants' | 'batch'; + + /** + * Control expiration of uploaded files. Params: + * + * - anchor, must be "created_at" + * - seconds, must be int between 3600 and 2592000 (1 hour to 30 days) + */ + expires_after?: FileCreateParams.ExpiresAfter; +} + +export namespace FileCreateParams { + /** + * Control expiration of uploaded files. Params: + * + * - anchor, must be "created_at" + * - seconds, must be int between 3600 and 2592000 (1 hour to 30 days) + */ + export interface ExpiresAfter { + anchor: 'created_at'; + + seconds: number; + } } export interface FileListParams extends OpenAICursorPageParams { diff --git a/src/resources/index.ts b/src/resources/index.ts index 4812f93..9d7792c 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -44,7 +44,6 @@ export { Embeddings, type CreateEmbeddingsResponse, type EmbeddingCreateParams } export { Eval, type BenchmarkConfig, - type EvalCandidate, type EvaluateResponse, type Job, type EvalEvaluateRowsParams, diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 4168c91..764bb87 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -1,6 +1,5 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Shared from './shared'; import * as ToolRuntimeAPI from './tool-runtime/tool-runtime'; /** @@ -166,63 +165,6 @@ export interface CompletionMessage { tool_calls?: Array; } -/** - * A text content delta for streaming responses. - */ -export type ContentDelta = ContentDelta.TextDelta | ContentDelta.ImageDelta | ContentDelta.ToolCallDelta; - -export namespace ContentDelta { - /** - * A text content delta for streaming responses. - */ - export interface TextDelta { - /** - * The incremental text content - */ - text: string; - - /** - * Discriminator type of the delta. Always "text" - */ - type: 'text'; - } - - /** - * An image content delta for streaming responses. - */ - export interface ImageDelta { - /** - * The incremental image data as bytes - */ - image: string; - - /** - * Discriminator type of the delta. Always "image" - */ - type: 'image'; - } - - /** - * A tool call content delta for streaming responses. - */ - export interface ToolCallDelta { - /** - * Current parsing status of the tool call - */ - parse_status: 'started' | 'in_progress' | 'failed' | 'succeeded'; - - /** - * Either an in-progress tool call string or the final parsed tool call - */ - tool_call: string | Shared.ToolCall; - - /** - * Discriminator type of the delta. Always "tool_call" - */ - type: 'tool_call'; - } -} - /** * A document to be used for document ingestion in the RAG Tool. */ @@ -635,7 +577,7 @@ export interface QueryConfig { /** * Configuration for the query generator. */ - query_generator_config: QueryGeneratorConfig; + query_generator_config: QueryConfig.DefaultRagQueryGeneratorConfig | QueryConfig.LlmragQueryGeneratorConfig; /** * Search mode for retrieval—either "vector", "keyword", or "hybrid". Default @@ -651,79 +593,70 @@ export interface QueryConfig { export namespace QueryConfig { /** - * Reciprocal Rank Fusion (RRF) ranker configuration. + * Configuration for the default RAG query generator. */ - export interface RrfRanker { + export interface DefaultRagQueryGeneratorConfig { /** - * The impact factor for RRF scoring. Higher values give more weight to - * higher-ranked results. Must be greater than 0 + * String separator used to join query terms */ - impact_factor: number; + separator: string; /** - * The type of ranker, always "rrf" + * Type of query generator, always 'default' */ - type: 'rrf'; + type: 'default'; } /** - * Weighted ranker configuration that combines vector and keyword scores. + * Configuration for the LLM-based RAG query generator. */ - export interface WeightedRanker { + export interface LlmragQueryGeneratorConfig { /** - * Weight factor between 0 and 1. 0 means only use keyword scores, 1 means only use - * vector scores, values in between blend both scores. + * Name of the language model to use for query generation */ - alpha: number; + model: string; /** - * The type of ranker, always "weighted" + * Template string for formatting the query generation prompt */ - type: 'weighted'; - } -} + template: string; -/** - * Configuration for the default RAG query generator. - */ -export type QueryGeneratorConfig = - | QueryGeneratorConfig.DefaultRagQueryGeneratorConfig - | QueryGeneratorConfig.LlmragQueryGeneratorConfig; + /** + * Type of query generator, always 'llm' + */ + type: 'llm'; + } -export namespace QueryGeneratorConfig { /** - * Configuration for the default RAG query generator. + * Reciprocal Rank Fusion (RRF) ranker configuration. */ - export interface DefaultRagQueryGeneratorConfig { + export interface RrfRanker { /** - * String separator used to join query terms + * The impact factor for RRF scoring. Higher values give more weight to + * higher-ranked results. Must be greater than 0 */ - separator: string; + impact_factor: number; /** - * Type of query generator, always 'default' + * The type of ranker, always "rrf" */ - type: 'default'; + type: 'rrf'; } /** - * Configuration for the LLM-based RAG query generator. + * Weighted ranker configuration that combines vector and keyword scores. */ - export interface LlmragQueryGeneratorConfig { - /** - * Name of the language model to use for query generation - */ - model: string; - + export interface WeightedRanker { /** - * Template string for formatting the query generation prompt + * Weight factor between 0 and 1. 0 means only use keyword scores, 1 means only use + * vector scores, values in between blend both scores. */ - template: string; + alpha: number; /** - * Type of query generator, always 'llm' + * The type of ranker, always "weighted" */ - type: 'llm'; + type: 'weighted'; } } diff --git a/tests/api-resources/files.test.ts b/tests/api-resources/files.test.ts index 2104b09..e3eec3d 100644 --- a/tests/api-resources/files.test.ts +++ b/tests/api-resources/files.test.ts @@ -9,6 +9,7 @@ describe('resource files', () => { test('create: only required params', async () => { const responsePromise = client.files.create({ file: await toFile(Buffer.from('# my file contents'), 'README.md'), + purpose: 'assistants', }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -22,6 +23,8 @@ describe('resource files', () => { test('create: required and optional params', async () => { const response = await client.files.create({ file: await toFile(Buffer.from('# my file contents'), 'README.md'), + purpose: 'assistants', + expires_after: { anchor: 'created_at', seconds: 0 }, }); }); From 17b9eb3c40957b63d2a71f7fc21944abcc720d80 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 30 Sep 2025 03:38:30 +0000 Subject: [PATCH 08/26] docs: update examples --- .stats.yml | 2 +- README.md | 73 ++++++++++++++++++++++++++++++++++++++++-------------- 2 files changed, 56 insertions(+), 19 deletions(-) diff --git a/.stats.yml b/.stats.yml index 20dba32..36fa92d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 105 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-d7bea816190382a93511491e33d1f37f707620926ab133ae8ce0883d763df741.yml openapi_spec_hash: f73b3af77108625edae3f25972b9e665 -config_hash: 5b643c97c83a497d7d346253f1e175f3 +config_hash: 06f95bf1b7786cfe2470af8f238fc36d diff --git a/README.md b/README.md index 0724245..c0f0665 100644 --- a/README.md +++ b/README.md @@ -32,6 +32,28 @@ const model = await client.models.register({ model_id: 'model_id' }); console.log(model.identifier); ``` +## Streaming responses + +We provide support for streaming responses using Server Sent Events (SSE). + +```ts +import LlamaStackClient from 'llama-stack-client'; + +const client = new LlamaStackClient(); + +const stream = await client.chat.completions.create({ + messages: [{ content: 'string', role: 'user' }], + model: 'model', + stream: true, +}); +for await (const chatCompletionChunk of stream) { + console.log(chatCompletionChunk); +} +``` + +If you need to cancel a stream, you can `break` from the loop +or call `stream.controller.abort()`. + ### Request & Response types This library includes TypeScript definitions for all request params and response fields. You may import and use them like so: @@ -42,7 +64,13 @@ import LlamaStackClient from 'llama-stack-client'; const client = new LlamaStackClient(); -const toolGroups: LlamaStackClient.ToolgroupListResponse = await client.toolgroups.list(); +const params: LlamaStackClient.Chat.CompletionCreateParams = { + messages: [{ content: 'string', role: 'user' }], + model: 'model', +}; +const completion: LlamaStackClient.Chat.CompletionCreateResponse = await client.chat.completions.create( + params, +); ``` Documentation for each method, request param, and response field are available in docstrings and will appear on hover in most modern editors. @@ -85,15 +113,17 @@ a subclass of `APIError` will be thrown: ```ts -const toolGroups = await client.toolgroups.list().catch(async (err) => { - if (err instanceof LlamaStackClient.APIError) { - console.log(err.status); // 400 - console.log(err.name); // BadRequestError - console.log(err.headers); // {server: 'nginx', ...} - } else { - throw err; - } -}); +const completion = await client.chat.completions + .create({ messages: [{ content: 'string', role: 'user' }], model: 'model' }) + .catch(async (err) => { + if (err instanceof LlamaStackClient.APIError) { + console.log(err.status); // 400 + console.log(err.name); // BadRequestError + console.log(err.headers); // {server: 'nginx', ...} + } else { + throw err; + } + }); ``` Error codes are as follows: @@ -125,7 +155,7 @@ const client = new LlamaStackClient({ }); // Or, configure per-request: -await client.toolgroups.list({ +await client.chat.completions.create({ messages: [{ content: 'string', role: 'user' }], model: 'model' }, { maxRetries: 5, }); ``` @@ -142,7 +172,7 @@ const client = new LlamaStackClient({ }); // Override per-request: -await client.toolgroups.list({ +await client.chat.completions.create({ messages: [{ content: 'string', role: 'user' }], model: 'model' }, { timeout: 5 * 1000, }); ``` @@ -163,13 +193,17 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi ```ts const client = new LlamaStackClient(); -const response = await client.toolgroups.list().asResponse(); +const response = await client.chat.completions + .create({ messages: [{ content: 'string', role: 'user' }], model: 'model' }) + .asResponse(); console.log(response.headers.get('X-My-Header')); console.log(response.statusText); // access the underlying Response object -const { data: toolGroups, response: raw } = await client.toolgroups.list().withResponse(); +const { data: completion, response: raw } = await client.chat.completions + .create({ messages: [{ content: 'string', role: 'user' }], model: 'model' }) + .withResponse(); console.log(raw.headers.get('X-My-Header')); -console.log(toolGroups); +console.log(completion); ``` ### Making custom/undocumented requests @@ -273,9 +307,12 @@ const client = new LlamaStackClient({ }); // Override per-request: -await client.toolgroups.list({ - httpAgent: new http.Agent({ keepAlive: false }), -}); +await client.chat.completions.create( + { messages: [{ content: 'string', role: 'user' }], model: 'model' }, + { + httpAgent: new http.Agent({ keepAlive: false }), + }, +); ``` ## Semantic versioning From a38809dd8446af7fa9ceb373e3479d87c1416366 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 30 Sep 2025 03:39:35 +0000 Subject: [PATCH 09/26] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 36fa92d..755df45 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 105 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-d7bea816190382a93511491e33d1f37f707620926ab133ae8ce0883d763df741.yml openapi_spec_hash: f73b3af77108625edae3f25972b9e665 -config_hash: 06f95bf1b7786cfe2470af8f238fc36d +config_hash: 548f336ac1b68ab1dfe385b79df764dd From b0676c837bbd835276fea3fe12f435afdbb75ef7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 30 Sep 2025 17:55:56 +0000 Subject: [PATCH 10/26] feat(api): SDKs for vector store file batches --- .stats.yml | 8 +-- api.md | 6 +-- src/resources/files.ts | 14 ----- src/resources/vector-stores/file-batches.ts | 5 ++ src/resources/vector-stores/files.ts | 54 ------------------- src/resources/vector-stores/index.ts | 2 +- src/resources/vector-stores/vector-stores.ts | 8 ++- tests/api-resources/files.test.ts | 36 ------------- .../api-resources/vector-stores/files.test.ts | 18 ------- 9 files changed, 18 insertions(+), 133 deletions(-) create mode 100644 src/resources/vector-stores/file-batches.ts diff --git a/.stats.yml b/.stats.yml index 755df45..cbb0181 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 105 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-d7bea816190382a93511491e33d1f37f707620926ab133ae8ce0883d763df741.yml -openapi_spec_hash: f73b3af77108625edae3f25972b9e665 -config_hash: 548f336ac1b68ab1dfe385b79df764dd +configured_endpoints: 102 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-3c569913f686c852ab392d470b9d905cc944d5a46a2324a21aa1376fb24e6714.yml +openapi_spec_hash: 455f397c5f401ea425a4064bb39c6801 +config_hash: 53c09ba1fdae5045de1860c479a51dc7 diff --git a/api.md b/api.md index 619a82c..cb56b9d 100644 --- a/api.md +++ b/api.md @@ -313,7 +313,6 @@ Types: - VectorStoreFile - FileDeleteResponse -- FileContentResponse Methods: @@ -322,7 +321,8 @@ Methods: - client.vectorStores.files.update(vectorStoreId, fileId, { ...params }) -> VectorStoreFile - client.vectorStores.files.list(vectorStoreId, { ...params }) -> VectorStoreFilesOpenAICursorPage - client.vectorStores.files.delete(vectorStoreId, fileId) -> FileDeleteResponse -- client.vectorStores.files.content(vectorStoreId, fileId) -> FileContentResponse + +## FileBatches # Models @@ -524,7 +524,5 @@ Types: Methods: - client.files.create({ ...params }) -> File -- client.files.retrieve(fileId) -> File - client.files.list({ ...params }) -> FilesOpenAICursorPage -- client.files.delete(fileId) -> DeleteFileResponse - client.files.content(fileId) -> unknown diff --git a/src/resources/files.ts b/src/resources/files.ts index 077487a..c7fa624 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -18,13 +18,6 @@ export class Files extends APIResource { return this._client.post('/v1/files', Core.multipartFormRequestOptions({ body, ...options })); } - /** - * Returns information about a specific file. - */ - retrieve(fileId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/v1/files/${fileId}`, options); - } - /** * Returns a list of files that belong to the user's organization. */ @@ -40,13 +33,6 @@ export class Files extends APIResource { return this._client.getAPIList('/v1/files', FilesOpenAICursorPage, { query, ...options }); } - /** - * Delete a file. - */ - delete(fileId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.delete(`/v1/files/${fileId}`, options); - } - /** * Returns the contents of the specified file. */ diff --git a/src/resources/vector-stores/file-batches.ts b/src/resources/vector-stores/file-batches.ts new file mode 100644 index 0000000..7608d7e --- /dev/null +++ b/src/resources/vector-stores/file-batches.ts @@ -0,0 +1,5 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; + +export class FileBatches extends APIResource {} diff --git a/src/resources/vector-stores/files.ts b/src/resources/vector-stores/files.ts index 9af2869..4dd6450 100644 --- a/src/resources/vector-stores/files.ts +++ b/src/resources/vector-stores/files.ts @@ -77,17 +77,6 @@ export class Files extends APIResource { ): Core.APIPromise { return this._client.delete(`/v1/vector_stores/${vectorStoreId}/files/${fileId}`, options); } - - /** - * Retrieves the contents of a vector store file. - */ - content( - vectorStoreId: string, - fileId: string, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.get(`/v1/vector_stores/${vectorStoreId}/files/${fileId}/content`, options); - } } export class VectorStoreFilesOpenAICursorPage extends OpenAICursorPage {} @@ -223,48 +212,6 @@ export interface FileDeleteResponse { object: string; } -/** - * Response from retrieving the contents of a vector store file. - */ -export interface FileContentResponse { - /** - * Key-value attributes associated with the file - */ - attributes: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * List of content items from the file - */ - content: Array; - - /** - * Unique identifier for the file - */ - file_id: string; - - /** - * Name of the file - */ - filename: string; -} - -export namespace FileContentResponse { - /** - * Content item from a vector store file or search result. - */ - export interface Content { - /** - * The actual text content - */ - text: string; - - /** - * Content type, currently only "text" is supported - */ - type: 'text'; - } -} - export interface FileCreateParams { /** * The ID of the file to attach to the vector store. @@ -360,7 +307,6 @@ export declare namespace Files { export { type VectorStoreFile as VectorStoreFile, type FileDeleteResponse as FileDeleteResponse, - type FileContentResponse as FileContentResponse, VectorStoreFilesOpenAICursorPage as VectorStoreFilesOpenAICursorPage, type FileCreateParams as FileCreateParams, type FileUpdateParams as FileUpdateParams, diff --git a/src/resources/vector-stores/index.ts b/src/resources/vector-stores/index.ts index 4b35bbb..75b4414 100644 --- a/src/resources/vector-stores/index.ts +++ b/src/resources/vector-stores/index.ts @@ -1,11 +1,11 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +export { FileBatches } from './file-batches'; export { VectorStoreFilesOpenAICursorPage, Files, type VectorStoreFile, type FileDeleteResponse, - type FileContentResponse, type FileCreateParams, type FileUpdateParams, type FileListParams, diff --git a/src/resources/vector-stores/vector-stores.ts b/src/resources/vector-stores/vector-stores.ts index 459fb54..1b3a675 100644 --- a/src/resources/vector-stores/vector-stores.ts +++ b/src/resources/vector-stores/vector-stores.ts @@ -3,9 +3,10 @@ import { APIResource } from '../../resource'; import { isRequestOptions } from '../../core'; import * as Core from '../../core'; +import * as FileBatchesAPI from './file-batches'; +import { FileBatches } from './file-batches'; import * as FilesAPI from './files'; import { - FileContentResponse, FileCreateParams, FileDeleteResponse, FileListParams, @@ -18,6 +19,7 @@ import { OpenAICursorPage, type OpenAICursorPageParams } from '../../pagination' export class VectorStores extends APIResource { files: FilesAPI.Files = new FilesAPI.Files(this._client); + fileBatches: FileBatchesAPI.FileBatches = new FileBatchesAPI.FileBatches(this._client); /** * Creates a vector store. @@ -432,6 +434,7 @@ export namespace VectorStoreSearchParams { VectorStores.VectorStoresOpenAICursorPage = VectorStoresOpenAICursorPage; VectorStores.Files = Files; VectorStores.VectorStoreFilesOpenAICursorPage = VectorStoreFilesOpenAICursorPage; +VectorStores.FileBatches = FileBatches; export declare namespace VectorStores { export { @@ -450,10 +453,11 @@ export declare namespace VectorStores { Files as Files, type VectorStoreFile as VectorStoreFile, type FileDeleteResponse as FileDeleteResponse, - type FileContentResponse as FileContentResponse, VectorStoreFilesOpenAICursorPage as VectorStoreFilesOpenAICursorPage, type FileCreateParams as FileCreateParams, type FileUpdateParams as FileUpdateParams, type FileListParams as FileListParams, }; + + export { FileBatches as FileBatches }; } diff --git a/tests/api-resources/files.test.ts b/tests/api-resources/files.test.ts index e3eec3d..34831cb 100644 --- a/tests/api-resources/files.test.ts +++ b/tests/api-resources/files.test.ts @@ -28,24 +28,6 @@ describe('resource files', () => { }); }); - test('retrieve', async () => { - const responsePromise = client.files.retrieve('file_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('retrieve: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(client.files.retrieve('file_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( - LlamaStackClient.NotFoundError, - ); - }); - test('list', async () => { const responsePromise = client.files.list(); const rawResponse = await responsePromise.asResponse(); @@ -74,24 +56,6 @@ describe('resource files', () => { ).rejects.toThrow(LlamaStackClient.NotFoundError); }); - test('delete', async () => { - const responsePromise = client.files.delete('file_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('delete: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(client.files.delete('file_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( - LlamaStackClient.NotFoundError, - ); - }); - test('content', async () => { const responsePromise = client.files.content('file_id'); const rawResponse = await responsePromise.asResponse(); diff --git a/tests/api-resources/vector-stores/files.test.ts b/tests/api-resources/vector-stores/files.test.ts index 1f6c452..3a9268b 100644 --- a/tests/api-resources/vector-stores/files.test.ts +++ b/tests/api-resources/vector-stores/files.test.ts @@ -108,22 +108,4 @@ describe('resource files', () => { client.vectorStores.files.delete('vector_store_id', 'file_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(LlamaStackClient.NotFoundError); }); - - test('content', async () => { - const responsePromise = client.vectorStores.files.content('vector_store_id', 'file_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('content: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.vectorStores.files.content('vector_store_id', 'file_id', { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(LlamaStackClient.NotFoundError); - }); }); From 88731bfecd6f548ae79cbe2a1125620e488c42a3 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 30 Sep 2025 17:59:02 +0000 Subject: [PATCH 11/26] feat(api): SDKs for vector store file batches apis --- .stats.yml | 6 +- api.md | 16 ++ src/resources/files.ts | 14 + src/resources/vector-stores/file-batches.ts | 260 +++++++++++++++++- src/resources/vector-stores/files.ts | 54 ++++ src/resources/vector-stores/index.ts | 9 +- src/resources/vector-stores/vector-stores.ts | 18 +- tests/api-resources/files.test.ts | 36 +++ .../vector-stores/file-batches.test.ts | 101 +++++++ .../api-resources/vector-stores/files.test.ts | 18 ++ 10 files changed, 525 insertions(+), 7 deletions(-) create mode 100644 tests/api-resources/vector-stores/file-batches.test.ts diff --git a/.stats.yml b/.stats.yml index cbb0181..b453267 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 102 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-3c569913f686c852ab392d470b9d905cc944d5a46a2324a21aa1376fb24e6714.yml -openapi_spec_hash: 455f397c5f401ea425a4064bb39c6801 +configured_endpoints: 109 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-05bb7b0636a86ad0b485a5f2abfbd6b9e1873e802235f340af291f9ad9fb03b3.yml +openapi_spec_hash: a78c30e308bc39473ea8e9ae9d0b726c config_hash: 53c09ba1fdae5045de1860c479a51dc7 diff --git a/api.md b/api.md index cb56b9d..928c554 100644 --- a/api.md +++ b/api.md @@ -313,6 +313,7 @@ Types: - VectorStoreFile - FileDeleteResponse +- FileContentResponse Methods: @@ -321,9 +322,22 @@ Methods: - client.vectorStores.files.update(vectorStoreId, fileId, { ...params }) -> VectorStoreFile - client.vectorStores.files.list(vectorStoreId, { ...params }) -> VectorStoreFilesOpenAICursorPage - client.vectorStores.files.delete(vectorStoreId, fileId) -> FileDeleteResponse +- client.vectorStores.files.content(vectorStoreId, fileId) -> FileContentResponse ## FileBatches +Types: + +- ListVectorStoreFilesInBatchResponse +- VectorStoreFileBatches + +Methods: + +- client.vectorStores.fileBatches.create(vectorStoreId, { ...params }) -> VectorStoreFileBatches +- client.vectorStores.fileBatches.retrieve(vectorStoreId, batchId) -> VectorStoreFileBatches +- client.vectorStores.fileBatches.list(vectorStoreId, batchId, { ...params }) -> VectorStoreFilesOpenAICursorPage +- client.vectorStores.fileBatches.cancel(vectorStoreId, batchId) -> VectorStoreFileBatches + # Models Types: @@ -524,5 +538,7 @@ Types: Methods: - client.files.create({ ...params }) -> File +- client.files.retrieve(fileId) -> File - client.files.list({ ...params }) -> FilesOpenAICursorPage +- client.files.delete(fileId) -> DeleteFileResponse - client.files.content(fileId) -> unknown diff --git a/src/resources/files.ts b/src/resources/files.ts index c7fa624..077487a 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -18,6 +18,13 @@ export class Files extends APIResource { return this._client.post('/v1/files', Core.multipartFormRequestOptions({ body, ...options })); } + /** + * Returns information about a specific file. + */ + retrieve(fileId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/v1/files/${fileId}`, options); + } + /** * Returns a list of files that belong to the user's organization. */ @@ -33,6 +40,13 @@ export class Files extends APIResource { return this._client.getAPIList('/v1/files', FilesOpenAICursorPage, { query, ...options }); } + /** + * Delete a file. + */ + delete(fileId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/v1/files/${fileId}`, options); + } + /** * Returns the contents of the specified file. */ diff --git a/src/resources/vector-stores/file-batches.ts b/src/resources/vector-stores/file-batches.ts index 7608d7e..532bd36 100644 --- a/src/resources/vector-stores/file-batches.ts +++ b/src/resources/vector-stores/file-batches.ts @@ -1,5 +1,263 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; +import * as FilesAPI from './files'; +import { VectorStoreFilesOpenAICursorPage } from './files'; +import { type OpenAICursorPageParams } from '../../pagination'; -export class FileBatches extends APIResource {} +export class FileBatches extends APIResource { + /** + * Create a vector store file batch. + */ + create( + vectorStoreId: string, + body: FileBatchCreateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/v1/vector_stores/${vectorStoreId}/file_batches`, { body, ...options }); + } + + /** + * Retrieve a vector store file batch. + */ + retrieve( + vectorStoreId: string, + batchId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.get(`/v1/vector_stores/${vectorStoreId}/file_batches/${batchId}`, options); + } + + /** + * Returns a list of vector store files in a batch. + */ + list( + vectorStoreId: string, + batchId: string, + query?: FileBatchListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + vectorStoreId: string, + batchId: string, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + vectorStoreId: string, + batchId: string, + query: FileBatchListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list(vectorStoreId, batchId, {}, query); + } + return this._client.getAPIList( + `/v1/vector_stores/${vectorStoreId}/file_batches/${batchId}/files`, + VectorStoreFilesOpenAICursorPage, + { query, ...options }, + ); + } + + /** + * Cancels a vector store file batch. + */ + cancel( + vectorStoreId: string, + batchId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/v1/vector_stores/${vectorStoreId}/file_batches/${batchId}/cancel`, options); + } +} + +/** + * Response from listing files in a vector store file batch. + */ +export interface ListVectorStoreFilesInBatchResponse { + /** + * List of vector store file objects in the batch + */ + data: Array; + + /** + * Whether there are more files available beyond this page + */ + has_more: boolean; + + /** + * Object type identifier, always "list" + */ + object: string; + + /** + * (Optional) ID of the first file in the list for pagination + */ + first_id?: string; + + /** + * (Optional) ID of the last file in the list for pagination + */ + last_id?: string; +} + +/** + * OpenAI Vector Store File Batch object. + */ +export interface VectorStoreFileBatches { + /** + * Unique identifier for the file batch + */ + id: string; + + /** + * Timestamp when the file batch was created + */ + created_at: number; + + /** + * File processing status counts for the batch + */ + file_counts: VectorStoreFileBatches.FileCounts; + + /** + * Object type identifier, always "vector_store.file_batch" + */ + object: string; + + /** + * Current processing status of the file batch + */ + status: 'completed' | 'in_progress' | 'cancelled' | 'failed'; + + /** + * ID of the vector store containing the file batch + */ + vector_store_id: string; +} + +export namespace VectorStoreFileBatches { + /** + * File processing status counts for the batch + */ + export interface FileCounts { + /** + * Number of files that had their processing cancelled + */ + cancelled: number; + + /** + * Number of files that have been successfully processed + */ + completed: number; + + /** + * Number of files that failed to process + */ + failed: number; + + /** + * Number of files currently being processed + */ + in_progress: number; + + /** + * Total number of files in the vector store + */ + total: number; + } +} + +export interface FileBatchCreateParams { + /** + * A list of File IDs that the vector store should use. + */ + file_ids: Array; + + /** + * (Optional) Key-value attributes to store with the files. + */ + attributes?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * (Optional) The chunking strategy used to chunk the file(s). Defaults to auto. + */ + chunking_strategy?: + | FileBatchCreateParams.VectorStoreChunkingStrategyAuto + | FileBatchCreateParams.VectorStoreChunkingStrategyStatic; +} + +export namespace FileBatchCreateParams { + /** + * Automatic chunking strategy for vector store files. + */ + export interface VectorStoreChunkingStrategyAuto { + /** + * Strategy type, always "auto" for automatic chunking + */ + type: 'auto'; + } + + /** + * Static chunking strategy with configurable parameters. + */ + export interface VectorStoreChunkingStrategyStatic { + /** + * Configuration parameters for the static chunking strategy + */ + static: VectorStoreChunkingStrategyStatic.Static; + + /** + * Strategy type, always "static" for static chunking + */ + type: 'static'; + } + + export namespace VectorStoreChunkingStrategyStatic { + /** + * Configuration parameters for the static chunking strategy + */ + export interface Static { + /** + * Number of tokens to overlap between adjacent chunks + */ + chunk_overlap_tokens: number; + + /** + * Maximum number of tokens per chunk, must be between 100 and 4096 + */ + max_chunk_size_tokens: number; + } + } +} + +export interface FileBatchListParams extends OpenAICursorPageParams { + /** + * A cursor for use in pagination. `before` is an object ID that defines your place + * in the list. + */ + before?: string; + + /** + * Filter by file status. One of in_progress, completed, failed, cancelled. + */ + filter?: string; + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending + * order and `desc` for descending order. + */ + order?: string; +} + +export declare namespace FileBatches { + export { + type ListVectorStoreFilesInBatchResponse as ListVectorStoreFilesInBatchResponse, + type VectorStoreFileBatches as VectorStoreFileBatches, + type FileBatchCreateParams as FileBatchCreateParams, + type FileBatchListParams as FileBatchListParams, + }; +} + +export { VectorStoreFilesOpenAICursorPage }; diff --git a/src/resources/vector-stores/files.ts b/src/resources/vector-stores/files.ts index 4dd6450..9af2869 100644 --- a/src/resources/vector-stores/files.ts +++ b/src/resources/vector-stores/files.ts @@ -77,6 +77,17 @@ export class Files extends APIResource { ): Core.APIPromise { return this._client.delete(`/v1/vector_stores/${vectorStoreId}/files/${fileId}`, options); } + + /** + * Retrieves the contents of a vector store file. + */ + content( + vectorStoreId: string, + fileId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.get(`/v1/vector_stores/${vectorStoreId}/files/${fileId}/content`, options); + } } export class VectorStoreFilesOpenAICursorPage extends OpenAICursorPage {} @@ -212,6 +223,48 @@ export interface FileDeleteResponse { object: string; } +/** + * Response from retrieving the contents of a vector store file. + */ +export interface FileContentResponse { + /** + * Key-value attributes associated with the file + */ + attributes: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * List of content items from the file + */ + content: Array; + + /** + * Unique identifier for the file + */ + file_id: string; + + /** + * Name of the file + */ + filename: string; +} + +export namespace FileContentResponse { + /** + * Content item from a vector store file or search result. + */ + export interface Content { + /** + * The actual text content + */ + text: string; + + /** + * Content type, currently only "text" is supported + */ + type: 'text'; + } +} + export interface FileCreateParams { /** * The ID of the file to attach to the vector store. @@ -307,6 +360,7 @@ export declare namespace Files { export { type VectorStoreFile as VectorStoreFile, type FileDeleteResponse as FileDeleteResponse, + type FileContentResponse as FileContentResponse, VectorStoreFilesOpenAICursorPage as VectorStoreFilesOpenAICursorPage, type FileCreateParams as FileCreateParams, type FileUpdateParams as FileUpdateParams, diff --git a/src/resources/vector-stores/index.ts b/src/resources/vector-stores/index.ts index 75b4414..59545d6 100644 --- a/src/resources/vector-stores/index.ts +++ b/src/resources/vector-stores/index.ts @@ -1,11 +1,18 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export { FileBatches } from './file-batches'; +export { + FileBatches, + type ListVectorStoreFilesInBatchResponse, + type VectorStoreFileBatches, + type FileBatchCreateParams, + type FileBatchListParams, +} from './file-batches'; export { VectorStoreFilesOpenAICursorPage, Files, type VectorStoreFile, type FileDeleteResponse, + type FileContentResponse, type FileCreateParams, type FileUpdateParams, type FileListParams, diff --git a/src/resources/vector-stores/vector-stores.ts b/src/resources/vector-stores/vector-stores.ts index 1b3a675..01afd40 100644 --- a/src/resources/vector-stores/vector-stores.ts +++ b/src/resources/vector-stores/vector-stores.ts @@ -4,9 +4,16 @@ import { APIResource } from '../../resource'; import { isRequestOptions } from '../../core'; import * as Core from '../../core'; import * as FileBatchesAPI from './file-batches'; -import { FileBatches } from './file-batches'; +import { + FileBatchCreateParams, + FileBatchListParams, + FileBatches, + ListVectorStoreFilesInBatchResponse, + VectorStoreFileBatches, +} from './file-batches'; import * as FilesAPI from './files'; import { + FileContentResponse, FileCreateParams, FileDeleteResponse, FileListParams, @@ -453,11 +460,18 @@ export declare namespace VectorStores { Files as Files, type VectorStoreFile as VectorStoreFile, type FileDeleteResponse as FileDeleteResponse, + type FileContentResponse as FileContentResponse, VectorStoreFilesOpenAICursorPage as VectorStoreFilesOpenAICursorPage, type FileCreateParams as FileCreateParams, type FileUpdateParams as FileUpdateParams, type FileListParams as FileListParams, }; - export { FileBatches as FileBatches }; + export { + FileBatches as FileBatches, + type ListVectorStoreFilesInBatchResponse as ListVectorStoreFilesInBatchResponse, + type VectorStoreFileBatches as VectorStoreFileBatches, + type FileBatchCreateParams as FileBatchCreateParams, + type FileBatchListParams as FileBatchListParams, + }; } diff --git a/tests/api-resources/files.test.ts b/tests/api-resources/files.test.ts index 34831cb..e3eec3d 100644 --- a/tests/api-resources/files.test.ts +++ b/tests/api-resources/files.test.ts @@ -28,6 +28,24 @@ describe('resource files', () => { }); }); + test('retrieve', async () => { + const responsePromise = client.files.retrieve('file_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.files.retrieve('file_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + test('list', async () => { const responsePromise = client.files.list(); const rawResponse = await responsePromise.asResponse(); @@ -56,6 +74,24 @@ describe('resource files', () => { ).rejects.toThrow(LlamaStackClient.NotFoundError); }); + test('delete', async () => { + const responsePromise = client.files.delete('file_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('delete: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.files.delete('file_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + test('content', async () => { const responsePromise = client.files.content('file_id'); const rawResponse = await responsePromise.asResponse(); diff --git a/tests/api-resources/vector-stores/file-batches.test.ts b/tests/api-resources/vector-stores/file-batches.test.ts new file mode 100644 index 0000000..bc5018c --- /dev/null +++ b/tests/api-resources/vector-stores/file-batches.test.ts @@ -0,0 +1,101 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; + +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); + +describe('resource fileBatches', () => { + test('create: only required params', async () => { + const responsePromise = client.vectorStores.fileBatches.create('vector_store_id', { + file_ids: ['string'], + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.vectorStores.fileBatches.create('vector_store_id', { + file_ids: ['string'], + attributes: { foo: true }, + chunking_strategy: { type: 'auto' }, + }); + }); + + test('retrieve', async () => { + const responsePromise = client.vectorStores.fileBatches.retrieve('vector_store_id', 'batch_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.fileBatches.retrieve('vector_store_id', 'batch_id', { + path: '/_stainless_unknown_path', + }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('list', async () => { + const responsePromise = client.vectorStores.fileBatches.list('vector_store_id', 'batch_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.fileBatches.list('vector_store_id', 'batch_id', { + path: '/_stainless_unknown_path', + }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.fileBatches.list( + 'vector_store_id', + 'batch_id', + { after: 'after', before: 'before', filter: 'filter', limit: 0, order: 'order' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('cancel', async () => { + const responsePromise = client.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('cancel: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id', { + path: '/_stainless_unknown_path', + }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); +}); diff --git a/tests/api-resources/vector-stores/files.test.ts b/tests/api-resources/vector-stores/files.test.ts index 3a9268b..1f6c452 100644 --- a/tests/api-resources/vector-stores/files.test.ts +++ b/tests/api-resources/vector-stores/files.test.ts @@ -108,4 +108,22 @@ describe('resource files', () => { client.vectorStores.files.delete('vector_store_id', 'file_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(LlamaStackClient.NotFoundError); }); + + test('content', async () => { + const responsePromise = client.vectorStores.files.content('vector_store_id', 'file_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('content: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.content('vector_store_id', 'file_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); }); From 793e0694d75c2af4535bf991d5858cd1f21300b4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 30 Sep 2025 19:35:49 +0000 Subject: [PATCH 12/26] feat(api): moving { rerank, agents } to `client.alpha.` --- .stats.yml | 6 +- api.md | 143 +++++++++--------- src/index.ts | 46 +----- src/resources/alpha.ts | 3 + src/resources/{ => alpha}/agents.ts | 0 src/resources/{ => alpha}/agents/agents.ts | 8 +- src/resources/{ => alpha}/agents/index.ts | 0 src/resources/{ => alpha}/agents/session.ts | 6 +- src/resources/{ => alpha}/agents/steps.ts | 4 +- src/resources/{ => alpha}/agents/turn.ts | 26 ++-- src/resources/alpha/alpha.ts | 49 ++++++ src/resources/alpha/index.ts | 17 +++ src/resources/{ => alpha}/inference.ts | 4 +- src/resources/index.ts | 15 +- src/resources/shared.ts | 66 -------- .../{ => alpha}/agents/agents.test.ts | 26 ++-- .../{ => alpha}/agents/session.test.ts | 20 +-- .../{ => alpha}/agents/steps.test.ts | 9 +- .../{ => alpha}/agents/turn.test.ts | 14 +- .../{ => alpha}/inference.test.ts | 8 +- 20 files changed, 217 insertions(+), 253 deletions(-) create mode 100644 src/resources/alpha.ts rename src/resources/{ => alpha}/agents.ts (100%) rename src/resources/{ => alpha}/agents/agents.ts (97%) rename src/resources/{ => alpha}/agents/index.ts (100%) rename src/resources/{ => alpha}/agents/session.ts (96%) rename src/resources/{ => alpha}/agents/steps.ts (91%) rename src/resources/{ => alpha}/agents/turn.ts (95%) create mode 100644 src/resources/alpha/alpha.ts create mode 100644 src/resources/alpha/index.ts rename src/resources/{ => alpha}/inference.ts (98%) rename tests/api-resources/{ => alpha}/agents/agents.test.ts (84%) rename tests/api-resources/{ => alpha}/agents/session.test.ts (81%) rename tests/api-resources/{ => alpha}/agents/steps.test.ts (82%) rename tests/api-resources/{ => alpha}/agents/turn.test.ts (81%) rename tests/api-resources/{ => alpha}/inference.test.ts (82%) diff --git a/.stats.yml b/.stats.yml index b453267..35d7077 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 109 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-05bb7b0636a86ad0b485a5f2abfbd6b9e1873e802235f340af291f9ad9fb03b3.yml -openapi_spec_hash: a78c30e308bc39473ea8e9ae9d0b726c -config_hash: 53c09ba1fdae5045de1860c479a51dc7 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-4337a6181c2db17737133e944b4b660a5e00ea10dce6be3252918e39451e9b5f.yml +openapi_spec_hash: a0bc8f4b5f45bc5741fed8eaa61171c3 +config_hash: 03aab396899c7d9aa3fba867ce54824b diff --git a/api.md b/api.md index 928c554..ba8c552 100644 --- a/api.md +++ b/api.md @@ -3,13 +3,11 @@ Types: - AgentConfig -- ChatCompletionResponse - CompletionMessage - Document - InterleavedContent - InterleavedContentItem - Message -- Metric - ParamType - QueryConfig - QueryResult @@ -19,7 +17,6 @@ Types: - ScoringResult - SystemMessage - ToolCall -- ToolParamDefinition - ToolResponseMessage - UserMessage @@ -97,65 +94,6 @@ Methods: - client.responses.inputItems.list(responseId, { ...params }) -> InputItemListResponse -# Agents - -Types: - -- InferenceStep -- MemoryRetrievalStep -- ShieldCallStep -- ToolExecutionStep -- ToolResponse -- AgentCreateResponse -- AgentRetrieveResponse -- AgentListResponse - -Methods: - -- client.agents.create({ ...params }) -> AgentCreateResponse -- client.agents.retrieve(agentId) -> AgentRetrieveResponse -- client.agents.list({ ...params }) -> AgentListResponse -- client.agents.delete(agentId) -> void - -## Session - -Types: - -- Session -- SessionCreateResponse -- SessionListResponse - -Methods: - -- client.agents.session.create(agentId, { ...params }) -> SessionCreateResponse -- client.agents.session.retrieve(agentId, sessionId, { ...params }) -> Session -- client.agents.session.list(agentId, { ...params }) -> SessionListResponse -- client.agents.session.delete(agentId, sessionId) -> void - -## Steps - -Types: - -- StepRetrieveResponse - -Methods: - -- client.agents.steps.retrieve(agentId, sessionId, turnId, stepId) -> StepRetrieveResponse - -## Turn - -Types: - -- AgentTurnResponseStreamChunk -- Turn -- TurnResponseEvent - -Methods: - -- client.agents.turn.create(agentId, sessionId, { ...params }) -> Turn -- client.agents.turn.retrieve(agentId, sessionId, turnId) -> Turn -- client.agents.turn.resume(agentId, sessionId, turnId, { ...params }) -> Turn - # Datasets Types: @@ -212,16 +150,6 @@ Methods: - client.inspect.health() -> HealthInfo - client.inspect.version() -> VersionInfo -# Inference - -Types: - -- InferenceRerankResponse - -Methods: - -- client.inference.rerank({ ...params }) -> InferenceRerankResponse - # Embeddings Types: @@ -542,3 +470,74 @@ Methods: - client.files.list({ ...params }) -> FilesOpenAICursorPage - client.files.delete(fileId) -> DeleteFileResponse - client.files.content(fileId) -> unknown + +# Alpha + +## Inference + +Types: + +- InferenceRerankResponse + +Methods: + +- client.alpha.inference.rerank({ ...params }) -> InferenceRerankResponse + +## Agents + +Types: + +- InferenceStep +- MemoryRetrievalStep +- ShieldCallStep +- ToolExecutionStep +- ToolResponse +- AgentCreateResponse +- AgentRetrieveResponse +- AgentListResponse + +Methods: + +- client.alpha.agents.create({ ...params }) -> AgentCreateResponse +- client.alpha.agents.retrieve(agentId) -> AgentRetrieveResponse +- client.alpha.agents.list({ ...params }) -> AgentListResponse +- client.alpha.agents.delete(agentId) -> void + +### Session + +Types: + +- Session +- SessionCreateResponse +- SessionListResponse + +Methods: + +- client.alpha.agents.session.create(agentId, { ...params }) -> SessionCreateResponse +- client.alpha.agents.session.retrieve(agentId, sessionId, { ...params }) -> Session +- client.alpha.agents.session.list(agentId, { ...params }) -> SessionListResponse +- client.alpha.agents.session.delete(agentId, sessionId) -> void + +### Steps + +Types: + +- StepRetrieveResponse + +Methods: + +- client.alpha.agents.steps.retrieve(agentId, sessionId, turnId, stepId) -> StepRetrieveResponse + +### Turn + +Types: + +- AgentTurnResponseStreamChunk +- Turn +- TurnResponseEvent + +Methods: + +- client.alpha.agents.turn.create(agentId, sessionId, { ...params }) -> Turn +- client.alpha.agents.turn.retrieve(agentId, sessionId, turnId) -> Turn +- client.alpha.agents.turn.resume(agentId, sessionId, turnId, { ...params }) -> Turn diff --git a/src/index.ts b/src/index.ts index 9839985..417e834 100644 --- a/src/index.ts +++ b/src/index.ts @@ -49,7 +49,6 @@ import { FilesOpenAICursorPage, ListFilesResponse, } from './resources/files'; -import { Inference, InferenceRerankParams, InferenceRerankResponse } from './resources/inference'; import { HealthInfo, Inspect, ProviderInfo, RouteInfo, VersionInfo } from './resources/inspect'; import { CreateResponse, ModerationCreateParams, Moderations } from './resources/moderations'; import { ListProvidersResponse, ProviderListResponse, Providers } from './resources/providers'; @@ -123,19 +122,7 @@ import { VectorIoInsertParams, VectorIoQueryParams, } from './resources/vector-io'; -import { - AgentCreateParams, - AgentCreateResponse, - AgentListParams, - AgentListResponse, - AgentRetrieveResponse, - Agents, - InferenceStep, - MemoryRetrievalStep, - ShieldCallStep, - ToolExecutionStep, - ToolResponse, -} from './resources/agents/agents'; +import { Alpha } from './resources/alpha/alpha'; import { Chat, ChatCompletionChunk } from './resources/chat/chat'; import { BenchmarkConfig, @@ -309,11 +296,9 @@ export class LlamaStackClient extends Core.APIClient { tools: API.Tools = new API.Tools(this); toolRuntime: API.ToolRuntime = new API.ToolRuntime(this); responses: API.Responses = new API.Responses(this); - agents: API.Agents = new API.Agents(this); datasets: API.Datasets = new API.Datasets(this); eval: API.Eval = new API.Eval(this); inspect: API.Inspect = new API.Inspect(this); - inference: API.Inference = new API.Inference(this); embeddings: API.Embeddings = new API.Embeddings(this); chat: API.Chat = new API.Chat(this); completions: API.Completions = new API.Completions(this); @@ -333,6 +318,7 @@ export class LlamaStackClient extends Core.APIClient { scoringFunctions: API.ScoringFunctions = new API.ScoringFunctions(this); benchmarks: API.Benchmarks = new API.Benchmarks(this); files: API.Files = new API.Files(this); + alpha: API.Alpha = new API.Alpha(this); /** * Check whether the base URL is set to its default. @@ -389,11 +375,9 @@ LlamaStackClient.Tools = Tools; LlamaStackClient.ToolRuntime = ToolRuntime; LlamaStackClient.Responses = Responses; LlamaStackClient.ResponseListResponsesOpenAICursorPage = ResponseListResponsesOpenAICursorPage; -LlamaStackClient.Agents = Agents; LlamaStackClient.Datasets = Datasets; LlamaStackClient.Eval = Eval; LlamaStackClient.Inspect = Inspect; -LlamaStackClient.Inference = Inference; LlamaStackClient.Embeddings = Embeddings; LlamaStackClient.Chat = Chat; LlamaStackClient.Completions = Completions; @@ -415,6 +399,7 @@ LlamaStackClient.ScoringFunctions = ScoringFunctions; LlamaStackClient.Benchmarks = Benchmarks; LlamaStackClient.Files = Files; LlamaStackClient.FilesOpenAICursorPage = FilesOpenAICursorPage; +LlamaStackClient.Alpha = Alpha; export declare namespace LlamaStackClient { export type RequestOptions = Core.RequestOptions; @@ -469,20 +454,6 @@ export declare namespace LlamaStackClient { type ResponseListParams as ResponseListParams, }; - export { - Agents as Agents, - type InferenceStep as InferenceStep, - type MemoryRetrievalStep as MemoryRetrievalStep, - type ShieldCallStep as ShieldCallStep, - type ToolExecutionStep as ToolExecutionStep, - type ToolResponse as ToolResponse, - type AgentCreateResponse as AgentCreateResponse, - type AgentRetrieveResponse as AgentRetrieveResponse, - type AgentListResponse as AgentListResponse, - type AgentCreateParams as AgentCreateParams, - type AgentListParams as AgentListParams, - }; - export { Datasets as Datasets, type ListDatasetsResponse as ListDatasetsResponse, @@ -514,12 +485,6 @@ export declare namespace LlamaStackClient { type VersionInfo as VersionInfo, }; - export { - Inference as Inference, - type InferenceRerankResponse as InferenceRerankResponse, - type InferenceRerankParams as InferenceRerankParams, - }; - export { Embeddings as Embeddings, type CreateEmbeddingsResponse as CreateEmbeddingsResponse, @@ -676,14 +641,14 @@ export declare namespace LlamaStackClient { type FileListParams as FileListParams, }; + export { Alpha as Alpha }; + export type AgentConfig = API.AgentConfig; - export type ChatCompletionResponse = API.ChatCompletionResponse; export type CompletionMessage = API.CompletionMessage; export type Document = API.Document; export type InterleavedContent = API.InterleavedContent; export type InterleavedContentItem = API.InterleavedContentItem; export type Message = API.Message; - export type Metric = API.Metric; export type ParamType = API.ParamType; export type QueryConfig = API.QueryConfig; export type QueryResult = API.QueryResult; @@ -693,7 +658,6 @@ export declare namespace LlamaStackClient { export type ScoringResult = API.ScoringResult; export type SystemMessage = API.SystemMessage; export type ToolCall = API.ToolCall; - export type ToolParamDefinition = API.ToolParamDefinition; export type ToolResponseMessage = API.ToolResponseMessage; export type UserMessage = API.UserMessage; } diff --git a/src/resources/alpha.ts b/src/resources/alpha.ts new file mode 100644 index 0000000..446b643 --- /dev/null +++ b/src/resources/alpha.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './alpha/index'; diff --git a/src/resources/agents.ts b/src/resources/alpha/agents.ts similarity index 100% rename from src/resources/agents.ts rename to src/resources/alpha/agents.ts diff --git a/src/resources/agents/agents.ts b/src/resources/alpha/agents/agents.ts similarity index 97% rename from src/resources/agents/agents.ts rename to src/resources/alpha/agents/agents.ts index 41e5102..c624056 100644 --- a/src/resources/agents/agents.ts +++ b/src/resources/alpha/agents/agents.ts @@ -1,9 +1,9 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; -import { isRequestOptions } from '../../core'; -import * as Core from '../../core'; -import * as Shared from '../shared'; +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import * as Core from '../../../core'; +import * as Shared from '../../shared'; import * as SessionAPI from './session'; import { Session, diff --git a/src/resources/agents/index.ts b/src/resources/alpha/agents/index.ts similarity index 100% rename from src/resources/agents/index.ts rename to src/resources/alpha/agents/index.ts diff --git a/src/resources/agents/session.ts b/src/resources/alpha/agents/session.ts similarity index 96% rename from src/resources/agents/session.ts rename to src/resources/alpha/agents/session.ts index 35c8511..c5d6b99 100644 --- a/src/resources/agents/session.ts +++ b/src/resources/alpha/agents/session.ts @@ -1,8 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; -import { isRequestOptions } from '../../core'; -import * as Core from '../../core'; +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import * as Core from '../../../core'; import * as TurnAPI from './turn'; export class SessionResource extends APIResource { diff --git a/src/resources/agents/steps.ts b/src/resources/alpha/agents/steps.ts similarity index 91% rename from src/resources/agents/steps.ts rename to src/resources/alpha/agents/steps.ts index 8d2d821..b7672bc 100644 --- a/src/resources/agents/steps.ts +++ b/src/resources/alpha/agents/steps.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; -import * as Core from '../../core'; +import { APIResource } from '../../../resource'; +import * as Core from '../../../core'; import * as AgentsAPI from './agents'; export class Steps extends APIResource { diff --git a/src/resources/agents/turn.ts b/src/resources/alpha/agents/turn.ts similarity index 95% rename from src/resources/agents/turn.ts rename to src/resources/alpha/agents/turn.ts index f33db5d..50c6807 100644 --- a/src/resources/agents/turn.ts +++ b/src/resources/alpha/agents/turn.ts @@ -1,12 +1,12 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; -import { APIPromise } from '../../core'; -import * as Core from '../../core'; +import { APIResource } from '../../../resource'; +import { APIPromise } from '../../../core'; +import * as Core from '../../../core'; import * as TurnAPI from './turn'; -import * as Shared from '../shared'; +import * as Shared from '../../shared'; import * as AgentsAPI from './agents'; -import { Stream } from '../../streaming'; +import { Stream } from '../../../streaming'; export class TurnResource extends APIResource { /** @@ -23,24 +23,24 @@ export class TurnResource extends APIResource { sessionId: string, body: TurnCreateParamsStreaming, options?: Core.RequestOptions, - ): APIPromise>; + ): APIPromise>; create( agentId: string, sessionId: string, body: TurnCreateParamsBase, options?: Core.RequestOptions, - ): APIPromise | Turn>; + ): APIPromise | Turn>; create( agentId: string, sessionId: string, body: TurnCreateParams, options?: Core.RequestOptions, - ): APIPromise | APIPromise> { + ): APIPromise | APIPromise> { return this._client.post(`/v1/agents/${agentId}/session/${sessionId}/turn`, { body, ...options, stream: body.stream ?? false, - }) as APIPromise | APIPromise>; + }) as APIPromise | APIPromise>; } /** @@ -74,26 +74,26 @@ export class TurnResource extends APIResource { turnId: string, body: TurnResumeParamsStreaming, options?: Core.RequestOptions, - ): APIPromise>; + ): APIPromise>; resume( agentId: string, sessionId: string, turnId: string, body: TurnResumeParamsBase, options?: Core.RequestOptions, - ): APIPromise | Turn>; + ): APIPromise | Turn>; resume( agentId: string, sessionId: string, turnId: string, body: TurnResumeParams, options?: Core.RequestOptions, - ): APIPromise | APIPromise> { + ): APIPromise | APIPromise> { return this._client.post(`/v1/agents/${agentId}/session/${sessionId}/turn/${turnId}/resume`, { body, ...options, stream: body.stream ?? false, - }) as APIPromise | APIPromise>; + }) as APIPromise | APIPromise>; } } diff --git a/src/resources/alpha/alpha.ts b/src/resources/alpha/alpha.ts new file mode 100644 index 0000000..4f7df9f --- /dev/null +++ b/src/resources/alpha/alpha.ts @@ -0,0 +1,49 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import * as InferenceAPI from './inference'; +import { Inference, InferenceRerankParams, InferenceRerankResponse } from './inference'; +import * as AgentsAPI from './agents/agents'; +import { + AgentCreateParams, + AgentCreateResponse, + AgentListParams, + AgentListResponse, + AgentRetrieveResponse, + Agents, + InferenceStep, + MemoryRetrievalStep, + ShieldCallStep, + ToolExecutionStep, + ToolResponse, +} from './agents/agents'; + +export class Alpha extends APIResource { + inference: InferenceAPI.Inference = new InferenceAPI.Inference(this._client); + agents: AgentsAPI.Agents = new AgentsAPI.Agents(this._client); +} + +Alpha.Inference = Inference; +Alpha.Agents = Agents; + +export declare namespace Alpha { + export { + Inference as Inference, + type InferenceRerankResponse as InferenceRerankResponse, + type InferenceRerankParams as InferenceRerankParams, + }; + + export { + Agents as Agents, + type InferenceStep as InferenceStep, + type MemoryRetrievalStep as MemoryRetrievalStep, + type ShieldCallStep as ShieldCallStep, + type ToolExecutionStep as ToolExecutionStep, + type ToolResponse as ToolResponse, + type AgentCreateResponse as AgentCreateResponse, + type AgentRetrieveResponse as AgentRetrieveResponse, + type AgentListResponse as AgentListResponse, + type AgentCreateParams as AgentCreateParams, + type AgentListParams as AgentListParams, + }; +} diff --git a/src/resources/alpha/index.ts b/src/resources/alpha/index.ts new file mode 100644 index 0000000..a941ba0 --- /dev/null +++ b/src/resources/alpha/index.ts @@ -0,0 +1,17 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + Agents, + type InferenceStep, + type MemoryRetrievalStep, + type ShieldCallStep, + type ToolExecutionStep, + type ToolResponse, + type AgentCreateResponse, + type AgentRetrieveResponse, + type AgentListResponse, + type AgentCreateParams, + type AgentListParams, +} from './agents/index'; +export { Alpha } from './alpha'; +export { Inference, type InferenceRerankResponse, type InferenceRerankParams } from './inference'; diff --git a/src/resources/inference.ts b/src/resources/alpha/inference.ts similarity index 98% rename from src/resources/inference.ts rename to src/resources/alpha/inference.ts index 055d133..ca6db21 100644 --- a/src/resources/inference.ts +++ b/src/resources/alpha/inference.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../resource'; -import * as Core from '../core'; +import { APIResource } from '../../resource'; +import * as Core from '../../core'; export class Inference extends APIResource { /** diff --git a/src/resources/index.ts b/src/resources/index.ts index 9d7792c..9b14171 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -1,19 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export * from './shared'; -export { - Agents, - type InferenceStep, - type MemoryRetrievalStep, - type ShieldCallStep, - type ToolExecutionStep, - type ToolResponse, - type AgentCreateResponse, - type AgentRetrieveResponse, - type AgentListResponse, - type AgentCreateParams, - type AgentListParams, -} from './agents/agents'; +export { Alpha } from './alpha/alpha'; export { Benchmarks, type Benchmark, @@ -61,7 +49,6 @@ export { type FileCreateParams, type FileListParams, } from './files'; -export { Inference, type InferenceRerankResponse, type InferenceRerankParams } from './inference'; export { Inspect, type HealthInfo, type ProviderInfo, type RouteInfo, type VersionInfo } from './inspect'; export { Models, diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 764bb87..35b0da3 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -103,38 +103,6 @@ export namespace AgentConfig { } } -/** - * Response from a chat completion request. - */ -export interface ChatCompletionResponse { - /** - * The complete response message - */ - completion_message: CompletionMessage; - - /** - * Optional log probabilities for generated tokens - */ - logprobs?: Array; - - /** - * (Optional) List of metrics associated with the API response - */ - metrics?: Array; -} - -export namespace ChatCompletionResponse { - /** - * Log probabilities for generated tokens. - */ - export interface Logprob { - /** - * Dictionary mapping tokens to their log probabilities - */ - logprobs_by_token: { [key: string]: number }; - } -} - /** * A message containing the model's (assistant) response in a chat conversation. */ @@ -415,26 +383,6 @@ export namespace InterleavedContentItem { */ export type Message = UserMessage | SystemMessage | ToolResponseMessage | CompletionMessage; -/** - * A metric value included in API responses. - */ -export interface Metric { - /** - * The name of the metric - */ - metric: string; - - /** - * The numeric value of the metric - */ - value: number; - - /** - * (Optional) The unit of measurement for the metric value - */ - unit?: string; -} - /** * Parameter type for string values. */ @@ -867,20 +815,6 @@ export interface ToolCall { arguments_json?: string; } -export interface ToolParamDefinition { - param_type: string; - - default?: boolean | number | string | Array | unknown | null; - - description?: string; - - items?: boolean | number | string | Array | unknown | null; - - required?: boolean; - - title?: string; -} - /** * A message representing the result of a tool invocation. */ diff --git a/tests/api-resources/agents/agents.test.ts b/tests/api-resources/alpha/agents/agents.test.ts similarity index 84% rename from tests/api-resources/agents/agents.test.ts rename to tests/api-resources/alpha/agents/agents.test.ts index 7fbf1d7..0f26b3a 100644 --- a/tests/api-resources/agents/agents.test.ts +++ b/tests/api-resources/alpha/agents/agents.test.ts @@ -7,7 +7,7 @@ const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] describe('resource agents', () => { test('create: only required params', async () => { - const responsePromise = client.agents.create({ + const responsePromise = client.alpha.agents.create({ agent_config: { instructions: 'instructions', model: 'model' }, }); const rawResponse = await responsePromise.asResponse(); @@ -20,7 +20,7 @@ describe('resource agents', () => { }); test('create: required and optional params', async () => { - const response = await client.agents.create({ + const response = await client.alpha.agents.create({ agent_config: { instructions: 'instructions', model: 'model', @@ -63,7 +63,7 @@ describe('resource agents', () => { }); test('retrieve', async () => { - const responsePromise = client.agents.retrieve('agent_id'); + const responsePromise = client.alpha.agents.retrieve('agent_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -75,13 +75,13 @@ describe('resource agents', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(client.agents.retrieve('agent_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( - LlamaStackClient.NotFoundError, - ); + await expect( + client.alpha.agents.retrieve('agent_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); }); test('list', async () => { - const responsePromise = client.agents.list(); + const responsePromise = client.alpha.agents.list(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -93,7 +93,7 @@ describe('resource agents', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(client.agents.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(client.alpha.agents.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( LlamaStackClient.NotFoundError, ); }); @@ -101,12 +101,12 @@ describe('resource agents', () => { test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.agents.list({ limit: 0, start_index: 0 }, { path: '/_stainless_unknown_path' }), + client.alpha.agents.list({ limit: 0, start_index: 0 }, { path: '/_stainless_unknown_path' }), ).rejects.toThrow(LlamaStackClient.NotFoundError); }); test('delete', async () => { - const responsePromise = client.agents.delete('agent_id'); + const responsePromise = client.alpha.agents.delete('agent_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -118,8 +118,8 @@ describe('resource agents', () => { test('delete: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(client.agents.delete('agent_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( - LlamaStackClient.NotFoundError, - ); + await expect( + client.alpha.agents.delete('agent_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); }); }); diff --git a/tests/api-resources/agents/session.test.ts b/tests/api-resources/alpha/agents/session.test.ts similarity index 81% rename from tests/api-resources/agents/session.test.ts rename to tests/api-resources/alpha/agents/session.test.ts index efcf0e7..6a21a85 100644 --- a/tests/api-resources/agents/session.test.ts +++ b/tests/api-resources/alpha/agents/session.test.ts @@ -7,7 +7,7 @@ const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] describe('resource session', () => { test('create: only required params', async () => { - const responsePromise = client.agents.session.create('agent_id', { session_name: 'session_name' }); + const responsePromise = client.alpha.agents.session.create('agent_id', { session_name: 'session_name' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -18,11 +18,11 @@ describe('resource session', () => { }); test('create: required and optional params', async () => { - const response = await client.agents.session.create('agent_id', { session_name: 'session_name' }); + const response = await client.alpha.agents.session.create('agent_id', { session_name: 'session_name' }); }); test('retrieve', async () => { - const responsePromise = client.agents.session.retrieve('agent_id', 'session_id'); + const responsePromise = client.alpha.agents.session.retrieve('agent_id', 'session_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -35,14 +35,14 @@ describe('resource session', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.agents.session.retrieve('agent_id', 'session_id', { path: '/_stainless_unknown_path' }), + client.alpha.agents.session.retrieve('agent_id', 'session_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(LlamaStackClient.NotFoundError); }); test('retrieve: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.agents.session.retrieve( + client.alpha.agents.session.retrieve( 'agent_id', 'session_id', { turn_ids: ['string'] }, @@ -52,7 +52,7 @@ describe('resource session', () => { }); test('list', async () => { - const responsePromise = client.agents.session.list('agent_id'); + const responsePromise = client.alpha.agents.session.list('agent_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -65,14 +65,14 @@ describe('resource session', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.agents.session.list('agent_id', { path: '/_stainless_unknown_path' }), + client.alpha.agents.session.list('agent_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(LlamaStackClient.NotFoundError); }); test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.agents.session.list( + client.alpha.agents.session.list( 'agent_id', { limit: 0, start_index: 0 }, { path: '/_stainless_unknown_path' }, @@ -81,7 +81,7 @@ describe('resource session', () => { }); test('delete', async () => { - const responsePromise = client.agents.session.delete('agent_id', 'session_id'); + const responsePromise = client.alpha.agents.session.delete('agent_id', 'session_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -94,7 +94,7 @@ describe('resource session', () => { test('delete: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.agents.session.delete('agent_id', 'session_id', { path: '/_stainless_unknown_path' }), + client.alpha.agents.session.delete('agent_id', 'session_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(LlamaStackClient.NotFoundError); }); }); diff --git a/tests/api-resources/agents/steps.test.ts b/tests/api-resources/alpha/agents/steps.test.ts similarity index 82% rename from tests/api-resources/agents/steps.test.ts rename to tests/api-resources/alpha/agents/steps.test.ts index 0696783..ef3a136 100644 --- a/tests/api-resources/agents/steps.test.ts +++ b/tests/api-resources/alpha/agents/steps.test.ts @@ -7,7 +7,12 @@ const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] describe('resource steps', () => { test('retrieve', async () => { - const responsePromise = client.agents.steps.retrieve('agent_id', 'session_id', 'turn_id', 'step_id'); + const responsePromise = client.alpha.agents.steps.retrieve( + 'agent_id', + 'session_id', + 'turn_id', + 'step_id', + ); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -20,7 +25,7 @@ describe('resource steps', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.agents.steps.retrieve('agent_id', 'session_id', 'turn_id', 'step_id', { + client.alpha.agents.steps.retrieve('agent_id', 'session_id', 'turn_id', 'step_id', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(LlamaStackClient.NotFoundError); diff --git a/tests/api-resources/agents/turn.test.ts b/tests/api-resources/alpha/agents/turn.test.ts similarity index 81% rename from tests/api-resources/agents/turn.test.ts rename to tests/api-resources/alpha/agents/turn.test.ts index dd4e3de..fc36021 100644 --- a/tests/api-resources/agents/turn.test.ts +++ b/tests/api-resources/alpha/agents/turn.test.ts @@ -7,7 +7,7 @@ const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] describe('resource turn', () => { test('create: only required params', async () => { - const responsePromise = client.agents.turn.create('agent_id', 'session_id', { + const responsePromise = client.alpha.agents.turn.create('agent_id', 'session_id', { messages: [{ content: 'string', role: 'user' }], }); const rawResponse = await responsePromise.asResponse(); @@ -20,7 +20,7 @@ describe('resource turn', () => { }); test('create: required and optional params', async () => { - const response = await client.agents.turn.create('agent_id', 'session_id', { + const response = await client.alpha.agents.turn.create('agent_id', 'session_id', { messages: [{ content: 'string', role: 'user', context: 'string' }], documents: [{ content: 'string', mime_type: 'mime_type' }], stream: false, @@ -30,7 +30,7 @@ describe('resource turn', () => { }); test('retrieve', async () => { - const responsePromise = client.agents.turn.retrieve('agent_id', 'session_id', 'turn_id'); + const responsePromise = client.alpha.agents.turn.retrieve('agent_id', 'session_id', 'turn_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -43,12 +43,14 @@ describe('resource turn', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.agents.turn.retrieve('agent_id', 'session_id', 'turn_id', { path: '/_stainless_unknown_path' }), + client.alpha.agents.turn.retrieve('agent_id', 'session_id', 'turn_id', { + path: '/_stainless_unknown_path', + }), ).rejects.toThrow(LlamaStackClient.NotFoundError); }); test('resume: only required params', async () => { - const responsePromise = client.agents.turn.resume('agent_id', 'session_id', 'turn_id', { + const responsePromise = client.alpha.agents.turn.resume('agent_id', 'session_id', 'turn_id', { tool_responses: [{ call_id: 'call_id', content: 'string', tool_name: 'brave_search' }], }); const rawResponse = await responsePromise.asResponse(); @@ -61,7 +63,7 @@ describe('resource turn', () => { }); test('resume: required and optional params', async () => { - const response = await client.agents.turn.resume('agent_id', 'session_id', 'turn_id', { + const response = await client.alpha.agents.turn.resume('agent_id', 'session_id', 'turn_id', { tool_responses: [ { call_id: 'call_id', content: 'string', tool_name: 'brave_search', metadata: { foo: true } }, ], diff --git a/tests/api-resources/inference.test.ts b/tests/api-resources/alpha/inference.test.ts similarity index 82% rename from tests/api-resources/inference.test.ts rename to tests/api-resources/alpha/inference.test.ts index 6eef337..0d353cc 100644 --- a/tests/api-resources/inference.test.ts +++ b/tests/api-resources/alpha/inference.test.ts @@ -7,7 +7,11 @@ const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] describe('resource inference', () => { test('rerank: only required params', async () => { - const responsePromise = client.inference.rerank({ items: ['string'], model: 'model', query: 'string' }); + const responsePromise = client.alpha.inference.rerank({ + items: ['string'], + model: 'model', + query: 'string', + }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -18,7 +22,7 @@ describe('resource inference', () => { }); test('rerank: required and optional params', async () => { - const response = await client.inference.rerank({ + const response = await client.alpha.inference.rerank({ items: ['string'], model: 'model', query: 'string', From a71b421152a609e49e76d01c6e4dd46eb3dbfae0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 30 Sep 2025 19:37:30 +0000 Subject: [PATCH 13/26] fix: fix stream event model reference --- .stats.yml | 2 +- src/resources/alpha/agents/turn.ts | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.stats.yml b/.stats.yml index 35d7077..f7df1a9 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 109 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-4337a6181c2db17737133e944b4b660a5e00ea10dce6be3252918e39451e9b5f.yml openapi_spec_hash: a0bc8f4b5f45bc5741fed8eaa61171c3 -config_hash: 03aab396899c7d9aa3fba867ce54824b +config_hash: 47ef2eb62d188340f22eb6dea3693f15 diff --git a/src/resources/alpha/agents/turn.ts b/src/resources/alpha/agents/turn.ts index 50c6807..86646f7 100644 --- a/src/resources/alpha/agents/turn.ts +++ b/src/resources/alpha/agents/turn.ts @@ -23,24 +23,24 @@ export class TurnResource extends APIResource { sessionId: string, body: TurnCreateParamsStreaming, options?: Core.RequestOptions, - ): APIPromise>; + ): APIPromise>; create( agentId: string, sessionId: string, body: TurnCreateParamsBase, options?: Core.RequestOptions, - ): APIPromise | Turn>; + ): APIPromise | Turn>; create( agentId: string, sessionId: string, body: TurnCreateParams, options?: Core.RequestOptions, - ): APIPromise | APIPromise> { + ): APIPromise | APIPromise> { return this._client.post(`/v1/agents/${agentId}/session/${sessionId}/turn`, { body, ...options, stream: body.stream ?? false, - }) as APIPromise | APIPromise>; + }) as APIPromise | APIPromise>; } /** @@ -74,26 +74,26 @@ export class TurnResource extends APIResource { turnId: string, body: TurnResumeParamsStreaming, options?: Core.RequestOptions, - ): APIPromise>; + ): APIPromise>; resume( agentId: string, sessionId: string, turnId: string, body: TurnResumeParamsBase, options?: Core.RequestOptions, - ): APIPromise | Turn>; + ): APIPromise | Turn>; resume( agentId: string, sessionId: string, turnId: string, body: TurnResumeParams, options?: Core.RequestOptions, - ): APIPromise | APIPromise> { + ): APIPromise | APIPromise> { return this._client.post(`/v1/agents/${agentId}/session/${sessionId}/turn/${turnId}/resume`, { body, ...options, stream: body.stream ?? false, - }) as APIPromise | APIPromise>; + }) as APIPromise | APIPromise>; } } From aec1d5ff198473ba736bf543ad00c6626cab9b81 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 30 Sep 2025 19:56:55 +0000 Subject: [PATCH 14/26] feat(api): move post_training and eval under alpha namespace --- .stats.yml | 2 +- api.md | 126 +++++++++--------- src/index.ts | 42 ------ src/resources/alpha/agents/agents.ts | 8 +- src/resources/alpha/agents/session.ts | 8 +- src/resources/alpha/agents/steps.ts | 2 +- src/resources/alpha/agents/turn.ts | 6 +- src/resources/alpha/alpha.ts | 44 ++++++ src/resources/{ => alpha}/eval.ts | 0 src/resources/{ => alpha}/eval/eval.ts | 16 +-- src/resources/{ => alpha}/eval/index.ts | 0 src/resources/{ => alpha}/eval/jobs.ts | 10 +- src/resources/alpha/index.ts | 18 +++ src/resources/{ => alpha}/post-training.ts | 0 .../{ => alpha}/post-training/index.ts | 0 .../{ => alpha}/post-training/job.ts | 12 +- .../post-training/post-training.ts | 8 +- src/resources/index.ts | 18 --- .../{ => alpha}/eval/eval.test.ts | 16 +-- .../{ => alpha}/eval/jobs.test.ts | 12 +- .../{ => alpha}/post-training/job.test.ts | 16 +-- .../post-training/post-training.test.ts | 8 +- 22 files changed, 187 insertions(+), 185 deletions(-) rename src/resources/{ => alpha}/eval.ts (100%) rename src/resources/{ => alpha}/eval/eval.ts (87%) rename src/resources/{ => alpha}/eval/index.ts (100%) rename src/resources/{ => alpha}/eval/jobs.ts (65%) rename src/resources/{ => alpha}/post-training.ts (100%) rename src/resources/{ => alpha}/post-training/index.ts (100%) rename src/resources/{ => alpha}/post-training/job.ts (93%) rename src/resources/{ => alpha}/post-training/post-training.ts (97%) rename tests/api-resources/{ => alpha}/eval/eval.test.ts (92%) rename tests/api-resources/{ => alpha}/eval/jobs.test.ts (81%) rename tests/api-resources/{ => alpha}/post-training/job.test.ts (77%) rename tests/api-resources/{ => alpha}/post-training/post-training.test.ts (93%) diff --git a/.stats.yml b/.stats.yml index f7df1a9..448f905 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 109 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-4337a6181c2db17737133e944b4b660a5e00ea10dce6be3252918e39451e9b5f.yml openapi_spec_hash: a0bc8f4b5f45bc5741fed8eaa61171c3 -config_hash: 47ef2eb62d188340f22eb6dea3693f15 +config_hash: d8706905bf16d9e4141e88d5a778263b diff --git a/api.md b/api.md index ba8c552..6efdf3f 100644 --- a/api.md +++ b/api.md @@ -113,29 +113,6 @@ Methods: - client.datasets.register({ ...params }) -> DatasetRegisterResponse - client.datasets.unregister(datasetId) -> void -# Eval - -Types: - -- BenchmarkConfig -- EvaluateResponse -- Job - -Methods: - -- client.eval.evaluateRows(benchmarkId, { ...params }) -> EvaluateResponse -- client.eval.evaluateRowsAlpha(benchmarkId, { ...params }) -> EvaluateResponse -- client.eval.runEval(benchmarkId, { ...params }) -> Job -- client.eval.runEvalAlpha(benchmarkId, { ...params }) -> Job - -## Jobs - -Methods: - -- client.eval.jobs.retrieve(benchmarkId, jobId) -> EvaluateResponse -- client.eval.jobs.cancel(benchmarkId, jobId) -> void -- client.eval.jobs.status(benchmarkId, jobId) -> Job - # Inspect Types: @@ -291,34 +268,6 @@ Methods: - client.models.openai.list() -> ModelListResponse -# PostTraining - -Types: - -- AlgorithmConfig -- ListPostTrainingJobsResponse -- PostTrainingJob - -Methods: - -- client.postTraining.preferenceOptimize({ ...params }) -> PostTrainingJob -- client.postTraining.supervisedFineTune({ ...params }) -> PostTrainingJob - -## Job - -Types: - -- JobListResponse -- JobArtifactsResponse -- JobStatusResponse - -Methods: - -- client.postTraining.job.list() -> Array<ListPostTrainingJobsResponse.Data> -- client.postTraining.job.artifacts({ ...params }) -> JobArtifactsResponse -- client.postTraining.job.cancel({ ...params }) -> void -- client.postTraining.job.status({ ...params }) -> JobStatusResponse - # Providers Types: @@ -483,6 +432,57 @@ Methods: - client.alpha.inference.rerank({ ...params }) -> InferenceRerankResponse +## PostTraining + +Types: + +- AlgorithmConfig +- ListPostTrainingJobsResponse +- PostTrainingJob + +Methods: + +- client.alpha.postTraining.preferenceOptimize({ ...params }) -> PostTrainingJob +- client.alpha.postTraining.supervisedFineTune({ ...params }) -> PostTrainingJob + +### Job + +Types: + +- JobListResponse +- JobArtifactsResponse +- JobStatusResponse + +Methods: + +- client.alpha.postTraining.job.list() -> Array<ListPostTrainingJobsResponse.Data> +- client.alpha.postTraining.job.artifacts({ ...params }) -> JobArtifactsResponse +- client.alpha.postTraining.job.cancel({ ...params }) -> void +- client.alpha.postTraining.job.status({ ...params }) -> JobStatusResponse + +## Eval + +Types: + +- BenchmarkConfig +- EvaluateResponse +- Job + +Methods: + +- client.alpha.eval.evaluateRows(benchmarkId, { ...params }) -> EvaluateResponse +- client.alpha.eval.evaluateRowsAlpha(benchmarkId, { ...params }) -> EvaluateResponse +- client.alpha.eval.runEval(benchmarkId, { ...params }) -> Job +- client.alpha.eval.runEvalAlpha(benchmarkId, { ...params }) -> Job + +### Jobs + +Methods: + +- client.alpha.eval.jobs.retrieve(benchmarkId, jobId) -> EvaluateResponse +- client.alpha.eval.jobs.cancel(benchmarkId, jobId) -> void +- client.alpha.eval.jobs.status(benchmarkId, jobId) -> Job + ## Agents Types: @@ -498,10 +498,10 @@ Types: Methods: -- client.alpha.agents.create({ ...params }) -> AgentCreateResponse -- client.alpha.agents.retrieve(agentId) -> AgentRetrieveResponse -- client.alpha.agents.list({ ...params }) -> AgentListResponse -- client.alpha.agents.delete(agentId) -> void +- client.alpha.agents.create({ ...params }) -> AgentCreateResponse +- client.alpha.agents.retrieve(agentId) -> AgentRetrieveResponse +- client.alpha.agents.list({ ...params }) -> AgentListResponse +- client.alpha.agents.delete(agentId) -> void ### Session @@ -513,10 +513,10 @@ Types: Methods: -- client.alpha.agents.session.create(agentId, { ...params }) -> SessionCreateResponse -- client.alpha.agents.session.retrieve(agentId, sessionId, { ...params }) -> Session -- client.alpha.agents.session.list(agentId, { ...params }) -> SessionListResponse -- client.alpha.agents.session.delete(agentId, sessionId) -> void +- client.alpha.agents.session.create(agentId, { ...params }) -> SessionCreateResponse +- client.alpha.agents.session.retrieve(agentId, sessionId, { ...params }) -> Session +- client.alpha.agents.session.list(agentId, { ...params }) -> SessionListResponse +- client.alpha.agents.session.delete(agentId, sessionId) -> void ### Steps @@ -526,7 +526,7 @@ Types: Methods: -- client.alpha.agents.steps.retrieve(agentId, sessionId, turnId, stepId) -> StepRetrieveResponse +- client.alpha.agents.steps.retrieve(agentId, sessionId, turnId, stepId) -> StepRetrieveResponse ### Turn @@ -538,6 +538,6 @@ Types: Methods: -- client.alpha.agents.turn.create(agentId, sessionId, { ...params }) -> Turn -- client.alpha.agents.turn.retrieve(agentId, sessionId, turnId) -> Turn -- client.alpha.agents.turn.resume(agentId, sessionId, turnId, { ...params }) -> Turn +- client.alpha.agents.turn.create(agentId, sessionId, { ...params }) -> Turn +- client.alpha.agents.turn.retrieve(agentId, sessionId, turnId) -> Turn +- client.alpha.agents.turn.resume(agentId, sessionId, turnId, { ...params }) -> Turn diff --git a/src/index.ts b/src/index.ts index 417e834..5411297 100644 --- a/src/index.ts +++ b/src/index.ts @@ -124,16 +124,6 @@ import { } from './resources/vector-io'; import { Alpha } from './resources/alpha/alpha'; import { Chat, ChatCompletionChunk } from './resources/chat/chat'; -import { - BenchmarkConfig, - Eval, - EvalEvaluateRowsAlphaParams, - EvalEvaluateRowsParams, - EvalRunEvalAlphaParams, - EvalRunEvalParams, - EvaluateResponse, - Job, -} from './resources/eval/eval'; import { ListModelsResponse, Model, @@ -141,14 +131,6 @@ import { ModelRegisterParams, Models, } from './resources/models/models'; -import { - AlgorithmConfig, - ListPostTrainingJobsResponse, - PostTraining, - PostTrainingJob, - PostTrainingPreferenceOptimizeParams, - PostTrainingSupervisedFineTuneParams, -} from './resources/post-training/post-training'; import { ResponseCreateParams, ResponseCreateParamsNonStreaming, @@ -297,7 +279,6 @@ export class LlamaStackClient extends Core.APIClient { toolRuntime: API.ToolRuntime = new API.ToolRuntime(this); responses: API.Responses = new API.Responses(this); datasets: API.Datasets = new API.Datasets(this); - eval: API.Eval = new API.Eval(this); inspect: API.Inspect = new API.Inspect(this); embeddings: API.Embeddings = new API.Embeddings(this); chat: API.Chat = new API.Chat(this); @@ -306,7 +287,6 @@ export class LlamaStackClient extends Core.APIClient { vectorDBs: API.VectorDBs = new API.VectorDBs(this); vectorStores: API.VectorStores = new API.VectorStores(this); models: API.Models = new API.Models(this); - postTraining: API.PostTraining = new API.PostTraining(this); providers: API.Providers = new API.Providers(this); routes: API.Routes = new API.Routes(this); moderations: API.Moderations = new API.Moderations(this); @@ -376,7 +356,6 @@ LlamaStackClient.ToolRuntime = ToolRuntime; LlamaStackClient.Responses = Responses; LlamaStackClient.ResponseListResponsesOpenAICursorPage = ResponseListResponsesOpenAICursorPage; LlamaStackClient.Datasets = Datasets; -LlamaStackClient.Eval = Eval; LlamaStackClient.Inspect = Inspect; LlamaStackClient.Embeddings = Embeddings; LlamaStackClient.Chat = Chat; @@ -386,7 +365,6 @@ LlamaStackClient.VectorDBs = VectorDBs; LlamaStackClient.VectorStores = VectorStores; LlamaStackClient.VectorStoresOpenAICursorPage = VectorStoresOpenAICursorPage; LlamaStackClient.Models = Models; -LlamaStackClient.PostTraining = PostTraining; LlamaStackClient.Providers = Providers; LlamaStackClient.Routes = Routes; LlamaStackClient.Moderations = Moderations; @@ -466,17 +444,6 @@ export declare namespace LlamaStackClient { type DatasetRegisterParams as DatasetRegisterParams, }; - export { - Eval as Eval, - type BenchmarkConfig as BenchmarkConfig, - type EvaluateResponse as EvaluateResponse, - type Job as Job, - type EvalEvaluateRowsParams as EvalEvaluateRowsParams, - type EvalEvaluateRowsAlphaParams as EvalEvaluateRowsAlphaParams, - type EvalRunEvalParams as EvalRunEvalParams, - type EvalRunEvalAlphaParams as EvalRunEvalAlphaParams, - }; - export { Inspect as Inspect, type HealthInfo as HealthInfo, @@ -538,15 +505,6 @@ export declare namespace LlamaStackClient { type ModelRegisterParams as ModelRegisterParams, }; - export { - PostTraining as PostTraining, - type AlgorithmConfig as AlgorithmConfig, - type ListPostTrainingJobsResponse as ListPostTrainingJobsResponse, - type PostTrainingJob as PostTrainingJob, - type PostTrainingPreferenceOptimizeParams as PostTrainingPreferenceOptimizeParams, - type PostTrainingSupervisedFineTuneParams as PostTrainingSupervisedFineTuneParams, - }; - export { Providers as Providers, type ListProvidersResponse as ListProvidersResponse, diff --git a/src/resources/alpha/agents/agents.ts b/src/resources/alpha/agents/agents.ts index c624056..6c6a147 100644 --- a/src/resources/alpha/agents/agents.ts +++ b/src/resources/alpha/agents/agents.ts @@ -39,14 +39,14 @@ export class Agents extends APIResource { * Create an agent with the given configuration. */ create(body: AgentCreateParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.post('/v1/agents', { body, ...options }); + return this._client.post('/v1alpha/agents', { body, ...options }); } /** * Describe an agent by its ID. */ retrieve(agentId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/v1/agents/${agentId}`, options); + return this._client.get(`/v1alpha/agents/${agentId}`, options); } /** @@ -61,14 +61,14 @@ export class Agents extends APIResource { if (isRequestOptions(query)) { return this.list({}, query); } - return this._client.get('/v1/agents', { query, ...options }); + return this._client.get('/v1alpha/agents', { query, ...options }); } /** * Delete an agent by its ID and its associated sessions and turns. */ delete(agentId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.delete(`/v1/agents/${agentId}`, { + return this._client.delete(`/v1alpha/agents/${agentId}`, { ...options, headers: { Accept: '*/*', ...options?.headers }, }); diff --git a/src/resources/alpha/agents/session.ts b/src/resources/alpha/agents/session.ts index c5d6b99..53fa1c8 100644 --- a/src/resources/alpha/agents/session.ts +++ b/src/resources/alpha/agents/session.ts @@ -14,7 +14,7 @@ export class SessionResource extends APIResource { body: SessionCreateParams, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post(`/v1/agents/${agentId}/session`, { body, ...options }); + return this._client.post(`/v1alpha/agents/${agentId}/session`, { body, ...options }); } /** @@ -36,7 +36,7 @@ export class SessionResource extends APIResource { if (isRequestOptions(query)) { return this.retrieve(agentId, sessionId, {}, query); } - return this._client.get(`/v1/agents/${agentId}/session/${sessionId}`, { query, ...options }); + return this._client.get(`/v1alpha/agents/${agentId}/session/${sessionId}`, { query, ...options }); } /** @@ -56,14 +56,14 @@ export class SessionResource extends APIResource { if (isRequestOptions(query)) { return this.list(agentId, {}, query); } - return this._client.get(`/v1/agents/${agentId}/sessions`, { query, ...options }); + return this._client.get(`/v1alpha/agents/${agentId}/sessions`, { query, ...options }); } /** * Delete an agent session by its ID and its associated turns. */ delete(agentId: string, sessionId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.delete(`/v1/agents/${agentId}/session/${sessionId}`, { + return this._client.delete(`/v1alpha/agents/${agentId}/session/${sessionId}`, { ...options, headers: { Accept: '*/*', ...options?.headers }, }); diff --git a/src/resources/alpha/agents/steps.ts b/src/resources/alpha/agents/steps.ts index b7672bc..76c48c3 100644 --- a/src/resources/alpha/agents/steps.ts +++ b/src/resources/alpha/agents/steps.ts @@ -16,7 +16,7 @@ export class Steps extends APIResource { options?: Core.RequestOptions, ): Core.APIPromise { return this._client.get( - `/v1/agents/${agentId}/session/${sessionId}/turn/${turnId}/step/${stepId}`, + `/v1alpha/agents/${agentId}/session/${sessionId}/turn/${turnId}/step/${stepId}`, options, ); } diff --git a/src/resources/alpha/agents/turn.ts b/src/resources/alpha/agents/turn.ts index 86646f7..33c4065 100644 --- a/src/resources/alpha/agents/turn.ts +++ b/src/resources/alpha/agents/turn.ts @@ -36,7 +36,7 @@ export class TurnResource extends APIResource { body: TurnCreateParams, options?: Core.RequestOptions, ): APIPromise | APIPromise> { - return this._client.post(`/v1/agents/${agentId}/session/${sessionId}/turn`, { + return this._client.post(`/v1alpha/agents/${agentId}/session/${sessionId}/turn`, { body, ...options, stream: body.stream ?? false, @@ -52,7 +52,7 @@ export class TurnResource extends APIResource { turnId: string, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.get(`/v1/agents/${agentId}/session/${sessionId}/turn/${turnId}`, options); + return this._client.get(`/v1alpha/agents/${agentId}/session/${sessionId}/turn/${turnId}`, options); } /** @@ -89,7 +89,7 @@ export class TurnResource extends APIResource { body: TurnResumeParams, options?: Core.RequestOptions, ): APIPromise | APIPromise> { - return this._client.post(`/v1/agents/${agentId}/session/${sessionId}/turn/${turnId}/resume`, { + return this._client.post(`/v1alpha/agents/${agentId}/session/${sessionId}/turn/${turnId}/resume`, { body, ...options, stream: body.stream ?? false, diff --git a/src/resources/alpha/alpha.ts b/src/resources/alpha/alpha.ts index 4f7df9f..3565610 100644 --- a/src/resources/alpha/alpha.ts +++ b/src/resources/alpha/alpha.ts @@ -17,13 +17,37 @@ import { ToolExecutionStep, ToolResponse, } from './agents/agents'; +import * as EvalAPI from './eval/eval'; +import { + BenchmarkConfig, + Eval, + EvalEvaluateRowsAlphaParams, + EvalEvaluateRowsParams, + EvalRunEvalAlphaParams, + EvalRunEvalParams, + EvaluateResponse, + Job, +} from './eval/eval'; +import * as PostTrainingAPI from './post-training/post-training'; +import { + AlgorithmConfig, + ListPostTrainingJobsResponse, + PostTraining, + PostTrainingJob, + PostTrainingPreferenceOptimizeParams, + PostTrainingSupervisedFineTuneParams, +} from './post-training/post-training'; export class Alpha extends APIResource { inference: InferenceAPI.Inference = new InferenceAPI.Inference(this._client); + postTraining: PostTrainingAPI.PostTraining = new PostTrainingAPI.PostTraining(this._client); + eval: EvalAPI.Eval = new EvalAPI.Eval(this._client); agents: AgentsAPI.Agents = new AgentsAPI.Agents(this._client); } Alpha.Inference = Inference; +Alpha.PostTraining = PostTraining; +Alpha.Eval = Eval; Alpha.Agents = Agents; export declare namespace Alpha { @@ -33,6 +57,26 @@ export declare namespace Alpha { type InferenceRerankParams as InferenceRerankParams, }; + export { + PostTraining as PostTraining, + type AlgorithmConfig as AlgorithmConfig, + type ListPostTrainingJobsResponse as ListPostTrainingJobsResponse, + type PostTrainingJob as PostTrainingJob, + type PostTrainingPreferenceOptimizeParams as PostTrainingPreferenceOptimizeParams, + type PostTrainingSupervisedFineTuneParams as PostTrainingSupervisedFineTuneParams, + }; + + export { + Eval as Eval, + type BenchmarkConfig as BenchmarkConfig, + type EvaluateResponse as EvaluateResponse, + type Job as Job, + type EvalEvaluateRowsParams as EvalEvaluateRowsParams, + type EvalEvaluateRowsAlphaParams as EvalEvaluateRowsAlphaParams, + type EvalRunEvalParams as EvalRunEvalParams, + type EvalRunEvalAlphaParams as EvalRunEvalAlphaParams, + }; + export { Agents as Agents, type InferenceStep as InferenceStep, diff --git a/src/resources/eval.ts b/src/resources/alpha/eval.ts similarity index 100% rename from src/resources/eval.ts rename to src/resources/alpha/eval.ts diff --git a/src/resources/eval/eval.ts b/src/resources/alpha/eval/eval.ts similarity index 87% rename from src/resources/eval/eval.ts rename to src/resources/alpha/eval/eval.ts index 765c88d..97f0cfb 100644 --- a/src/resources/eval/eval.ts +++ b/src/resources/alpha/eval/eval.ts @@ -1,9 +1,9 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; -import * as Core from '../../core'; -import * as ScoringFunctionsAPI from '../scoring-functions'; -import * as Shared from '../shared'; +import { APIResource } from '../../../resource'; +import * as Core from '../../../core'; +import * as ScoringFunctionsAPI from '../../scoring-functions'; +import * as Shared from '../../shared'; import * as JobsAPI from './jobs'; import { Jobs } from './jobs'; @@ -18,7 +18,7 @@ export class Eval extends APIResource { body: EvalEvaluateRowsParams, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/evaluations`, { body, ...options }); + return this._client.post(`/v1alpha/eval/benchmarks/${benchmarkId}/evaluations`, { body, ...options }); } /** @@ -29,14 +29,14 @@ export class Eval extends APIResource { body: EvalEvaluateRowsAlphaParams, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/evaluations`, { body, ...options }); + return this._client.post(`/v1alpha/eval/benchmarks/${benchmarkId}/evaluations`, { body, ...options }); } /** * Run an evaluation on a benchmark. */ runEval(benchmarkId: string, body: EvalRunEvalParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/jobs`, { body, ...options }); + return this._client.post(`/v1alpha/eval/benchmarks/${benchmarkId}/jobs`, { body, ...options }); } /** @@ -47,7 +47,7 @@ export class Eval extends APIResource { body: EvalRunEvalAlphaParams, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/jobs`, { body, ...options }); + return this._client.post(`/v1alpha/eval/benchmarks/${benchmarkId}/jobs`, { body, ...options }); } } diff --git a/src/resources/eval/index.ts b/src/resources/alpha/eval/index.ts similarity index 100% rename from src/resources/eval/index.ts rename to src/resources/alpha/eval/index.ts diff --git a/src/resources/eval/jobs.ts b/src/resources/alpha/eval/jobs.ts similarity index 65% rename from src/resources/eval/jobs.ts rename to src/resources/alpha/eval/jobs.ts index 13d4a4d..3a830bd 100644 --- a/src/resources/eval/jobs.ts +++ b/src/resources/alpha/eval/jobs.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; -import * as Core from '../../core'; +import { APIResource } from '../../../resource'; +import * as Core from '../../../core'; import * as EvalAPI from './eval'; export class Jobs extends APIResource { @@ -13,14 +13,14 @@ export class Jobs extends APIResource { jobId: string, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.get(`/v1/eval/benchmarks/${benchmarkId}/jobs/${jobId}/result`, options); + return this._client.get(`/v1alpha/eval/benchmarks/${benchmarkId}/jobs/${jobId}/result`, options); } /** * Cancel a job. */ cancel(benchmarkId: string, jobId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.delete(`/v1/eval/benchmarks/${benchmarkId}/jobs/${jobId}`, { + return this._client.delete(`/v1alpha/eval/benchmarks/${benchmarkId}/jobs/${jobId}`, { ...options, headers: { Accept: '*/*', ...options?.headers }, }); @@ -30,6 +30,6 @@ export class Jobs extends APIResource { * Get the status of a job. */ status(benchmarkId: string, jobId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/v1/eval/benchmarks/${benchmarkId}/jobs/${jobId}`, options); + return this._client.get(`/v1alpha/eval/benchmarks/${benchmarkId}/jobs/${jobId}`, options); } } diff --git a/src/resources/alpha/index.ts b/src/resources/alpha/index.ts index a941ba0..082839d 100644 --- a/src/resources/alpha/index.ts +++ b/src/resources/alpha/index.ts @@ -14,4 +14,22 @@ export { type AgentListParams, } from './agents/index'; export { Alpha } from './alpha'; +export { + Eval, + type BenchmarkConfig, + type EvaluateResponse, + type Job, + type EvalEvaluateRowsParams, + type EvalEvaluateRowsAlphaParams, + type EvalRunEvalParams, + type EvalRunEvalAlphaParams, +} from './eval/index'; export { Inference, type InferenceRerankResponse, type InferenceRerankParams } from './inference'; +export { + PostTraining, + type AlgorithmConfig, + type ListPostTrainingJobsResponse, + type PostTrainingJob, + type PostTrainingPreferenceOptimizeParams, + type PostTrainingSupervisedFineTuneParams, +} from './post-training/index'; diff --git a/src/resources/post-training.ts b/src/resources/alpha/post-training.ts similarity index 100% rename from src/resources/post-training.ts rename to src/resources/alpha/post-training.ts diff --git a/src/resources/post-training/index.ts b/src/resources/alpha/post-training/index.ts similarity index 100% rename from src/resources/post-training/index.ts rename to src/resources/alpha/post-training/index.ts diff --git a/src/resources/post-training/job.ts b/src/resources/alpha/post-training/job.ts similarity index 93% rename from src/resources/post-training/job.ts rename to src/resources/alpha/post-training/job.ts index a250ac9..3f77ceb 100644 --- a/src/resources/post-training/job.ts +++ b/src/resources/alpha/post-training/job.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; -import * as Core from '../../core'; +import { APIResource } from '../../../resource'; +import * as Core from '../../../core'; import * as PostTrainingAPI from './post-training'; export class Job extends APIResource { @@ -12,7 +12,7 @@ export class Job extends APIResource { options?: Core.RequestOptions, ): Core.APIPromise> { return ( - this._client.get('/v1/post-training/jobs', options) as Core.APIPromise<{ + this._client.get('/v1alpha/post-training/jobs', options) as Core.APIPromise<{ data: Array; }> )._thenUnwrap((obj) => obj.data); @@ -22,14 +22,14 @@ export class Job extends APIResource { * Get the artifacts of a training job. */ artifacts(query: JobArtifactsParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get('/v1/post-training/job/artifacts', { query, ...options }); + return this._client.get('/v1alpha/post-training/job/artifacts', { query, ...options }); } /** * Cancel a training job. */ cancel(body: JobCancelParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.post('/v1/post-training/job/cancel', { + return this._client.post('/v1alpha/post-training/job/cancel', { body, ...options, headers: { Accept: '*/*', ...options?.headers }, @@ -40,7 +40,7 @@ export class Job extends APIResource { * Get the status of a training job. */ status(query: JobStatusParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get('/v1/post-training/job/status', { query, ...options }); + return this._client.get('/v1alpha/post-training/job/status', { query, ...options }); } } diff --git a/src/resources/post-training/post-training.ts b/src/resources/alpha/post-training/post-training.ts similarity index 97% rename from src/resources/post-training/post-training.ts rename to src/resources/alpha/post-training/post-training.ts index 8f6eb3f..e8bf24a 100644 --- a/src/resources/post-training/post-training.ts +++ b/src/resources/alpha/post-training/post-training.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; -import * as Core from '../../core'; +import { APIResource } from '../../../resource'; +import * as Core from '../../../core'; import * as JobAPI from './job'; import { Job, @@ -23,7 +23,7 @@ export class PostTraining extends APIResource { body: PostTrainingPreferenceOptimizeParams, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post('/v1/post-training/preference-optimize', { body, ...options }); + return this._client.post('/v1alpha/post-training/preference-optimize', { body, ...options }); } /** @@ -33,7 +33,7 @@ export class PostTraining extends APIResource { body: PostTrainingSupervisedFineTuneParams, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post('/v1/post-training/supervised-fine-tune', { body, ...options }); + return this._client.post('/v1alpha/post-training/supervised-fine-tune', { body, ...options }); } } diff --git a/src/resources/index.ts b/src/resources/index.ts index 9b14171..4c3de4e 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -29,16 +29,6 @@ export { type DatasetRegisterParams, } from './datasets'; export { Embeddings, type CreateEmbeddingsResponse, type EmbeddingCreateParams } from './embeddings'; -export { - Eval, - type BenchmarkConfig, - type EvaluateResponse, - type Job, - type EvalEvaluateRowsParams, - type EvalEvaluateRowsAlphaParams, - type EvalRunEvalParams, - type EvalRunEvalAlphaParams, -} from './eval/eval'; export { FilesOpenAICursorPage, Files, @@ -58,14 +48,6 @@ export { type ModelRegisterParams, } from './models/models'; export { Moderations, type CreateResponse, type ModerationCreateParams } from './moderations'; -export { - PostTraining, - type AlgorithmConfig, - type ListPostTrainingJobsResponse, - type PostTrainingJob, - type PostTrainingPreferenceOptimizeParams, - type PostTrainingSupervisedFineTuneParams, -} from './post-training/post-training'; export { Providers, type ListProvidersResponse, type ProviderListResponse } from './providers'; export { ResponseListResponsesOpenAICursorPage, diff --git a/tests/api-resources/eval/eval.test.ts b/tests/api-resources/alpha/eval/eval.test.ts similarity index 92% rename from tests/api-resources/eval/eval.test.ts rename to tests/api-resources/alpha/eval/eval.test.ts index 9f3e461..af16560 100644 --- a/tests/api-resources/eval/eval.test.ts +++ b/tests/api-resources/alpha/eval/eval.test.ts @@ -7,7 +7,7 @@ const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] describe('resource eval', () => { test('evaluateRows: only required params', async () => { - const responsePromise = client.eval.evaluateRows('benchmark_id', { + const responsePromise = client.alpha.eval.evaluateRows('benchmark_id', { benchmark_config: { eval_candidate: { model: 'model', sampling_params: { strategy: { type: 'greedy' } }, type: 'model' }, scoring_params: { @@ -32,7 +32,7 @@ describe('resource eval', () => { }); test('evaluateRows: required and optional params', async () => { - const response = await client.eval.evaluateRows('benchmark_id', { + const response = await client.alpha.eval.evaluateRows('benchmark_id', { benchmark_config: { eval_candidate: { model: 'model', @@ -62,7 +62,7 @@ describe('resource eval', () => { }); test('evaluateRowsAlpha: only required params', async () => { - const responsePromise = client.eval.evaluateRowsAlpha('benchmark_id', { + const responsePromise = client.alpha.eval.evaluateRowsAlpha('benchmark_id', { benchmark_config: { eval_candidate: { model: 'model', sampling_params: { strategy: { type: 'greedy' } }, type: 'model' }, scoring_params: { @@ -87,7 +87,7 @@ describe('resource eval', () => { }); test('evaluateRowsAlpha: required and optional params', async () => { - const response = await client.eval.evaluateRowsAlpha('benchmark_id', { + const response = await client.alpha.eval.evaluateRowsAlpha('benchmark_id', { benchmark_config: { eval_candidate: { model: 'model', @@ -117,7 +117,7 @@ describe('resource eval', () => { }); test('runEval: only required params', async () => { - const responsePromise = client.eval.runEval('benchmark_id', { + const responsePromise = client.alpha.eval.runEval('benchmark_id', { benchmark_config: { eval_candidate: { model: 'model', sampling_params: { strategy: { type: 'greedy' } }, type: 'model' }, scoring_params: { @@ -140,7 +140,7 @@ describe('resource eval', () => { }); test('runEval: required and optional params', async () => { - const response = await client.eval.runEval('benchmark_id', { + const response = await client.alpha.eval.runEval('benchmark_id', { benchmark_config: { eval_candidate: { model: 'model', @@ -168,7 +168,7 @@ describe('resource eval', () => { }); test('runEvalAlpha: only required params', async () => { - const responsePromise = client.eval.runEvalAlpha('benchmark_id', { + const responsePromise = client.alpha.eval.runEvalAlpha('benchmark_id', { benchmark_config: { eval_candidate: { model: 'model', sampling_params: { strategy: { type: 'greedy' } }, type: 'model' }, scoring_params: { @@ -191,7 +191,7 @@ describe('resource eval', () => { }); test('runEvalAlpha: required and optional params', async () => { - const response = await client.eval.runEvalAlpha('benchmark_id', { + const response = await client.alpha.eval.runEvalAlpha('benchmark_id', { benchmark_config: { eval_candidate: { model: 'model', diff --git a/tests/api-resources/eval/jobs.test.ts b/tests/api-resources/alpha/eval/jobs.test.ts similarity index 81% rename from tests/api-resources/eval/jobs.test.ts rename to tests/api-resources/alpha/eval/jobs.test.ts index cad4ebd..c18a9f2 100644 --- a/tests/api-resources/eval/jobs.test.ts +++ b/tests/api-resources/alpha/eval/jobs.test.ts @@ -7,7 +7,7 @@ const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] describe('resource jobs', () => { test('retrieve', async () => { - const responsePromise = client.eval.jobs.retrieve('benchmark_id', 'job_id'); + const responsePromise = client.alpha.eval.jobs.retrieve('benchmark_id', 'job_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -20,12 +20,12 @@ describe('resource jobs', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.eval.jobs.retrieve('benchmark_id', 'job_id', { path: '/_stainless_unknown_path' }), + client.alpha.eval.jobs.retrieve('benchmark_id', 'job_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(LlamaStackClient.NotFoundError); }); test('cancel', async () => { - const responsePromise = client.eval.jobs.cancel('benchmark_id', 'job_id'); + const responsePromise = client.alpha.eval.jobs.cancel('benchmark_id', 'job_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -38,12 +38,12 @@ describe('resource jobs', () => { test('cancel: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.eval.jobs.cancel('benchmark_id', 'job_id', { path: '/_stainless_unknown_path' }), + client.alpha.eval.jobs.cancel('benchmark_id', 'job_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(LlamaStackClient.NotFoundError); }); test('status', async () => { - const responsePromise = client.eval.jobs.status('benchmark_id', 'job_id'); + const responsePromise = client.alpha.eval.jobs.status('benchmark_id', 'job_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -56,7 +56,7 @@ describe('resource jobs', () => { test('status: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.eval.jobs.status('benchmark_id', 'job_id', { path: '/_stainless_unknown_path' }), + client.alpha.eval.jobs.status('benchmark_id', 'job_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(LlamaStackClient.NotFoundError); }); }); diff --git a/tests/api-resources/post-training/job.test.ts b/tests/api-resources/alpha/post-training/job.test.ts similarity index 77% rename from tests/api-resources/post-training/job.test.ts rename to tests/api-resources/alpha/post-training/job.test.ts index 0cb1ebb..3f79918 100644 --- a/tests/api-resources/post-training/job.test.ts +++ b/tests/api-resources/alpha/post-training/job.test.ts @@ -7,7 +7,7 @@ const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] describe('resource job', () => { test('list', async () => { - const responsePromise = client.postTraining.job.list(); + const responsePromise = client.alpha.postTraining.job.list(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -19,13 +19,13 @@ describe('resource job', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(client.postTraining.job.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(client.alpha.postTraining.job.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( LlamaStackClient.NotFoundError, ); }); test('artifacts: only required params', async () => { - const responsePromise = client.postTraining.job.artifacts({ job_uuid: 'job_uuid' }); + const responsePromise = client.alpha.postTraining.job.artifacts({ job_uuid: 'job_uuid' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -36,11 +36,11 @@ describe('resource job', () => { }); test('artifacts: required and optional params', async () => { - const response = await client.postTraining.job.artifacts({ job_uuid: 'job_uuid' }); + const response = await client.alpha.postTraining.job.artifacts({ job_uuid: 'job_uuid' }); }); test('cancel: only required params', async () => { - const responsePromise = client.postTraining.job.cancel({ job_uuid: 'job_uuid' }); + const responsePromise = client.alpha.postTraining.job.cancel({ job_uuid: 'job_uuid' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -51,11 +51,11 @@ describe('resource job', () => { }); test('cancel: required and optional params', async () => { - const response = await client.postTraining.job.cancel({ job_uuid: 'job_uuid' }); + const response = await client.alpha.postTraining.job.cancel({ job_uuid: 'job_uuid' }); }); test('status: only required params', async () => { - const responsePromise = client.postTraining.job.status({ job_uuid: 'job_uuid' }); + const responsePromise = client.alpha.postTraining.job.status({ job_uuid: 'job_uuid' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -66,6 +66,6 @@ describe('resource job', () => { }); test('status: required and optional params', async () => { - const response = await client.postTraining.job.status({ job_uuid: 'job_uuid' }); + const response = await client.alpha.postTraining.job.status({ job_uuid: 'job_uuid' }); }); }); diff --git a/tests/api-resources/post-training/post-training.test.ts b/tests/api-resources/alpha/post-training/post-training.test.ts similarity index 93% rename from tests/api-resources/post-training/post-training.test.ts rename to tests/api-resources/alpha/post-training/post-training.test.ts index ac7a53b..6069666 100644 --- a/tests/api-resources/post-training/post-training.test.ts +++ b/tests/api-resources/alpha/post-training/post-training.test.ts @@ -7,7 +7,7 @@ const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] describe('resource postTraining', () => { test('preferenceOptimize: only required params', async () => { - const responsePromise = client.postTraining.preferenceOptimize({ + const responsePromise = client.alpha.postTraining.preferenceOptimize({ algorithm_config: { beta: 0, loss_type: 'sigmoid' }, finetuned_model: 'finetuned_model', hyperparam_search_config: { foo: true }, @@ -25,7 +25,7 @@ describe('resource postTraining', () => { }); test('preferenceOptimize: required and optional params', async () => { - const response = await client.postTraining.preferenceOptimize({ + const response = await client.alpha.postTraining.preferenceOptimize({ algorithm_config: { beta: 0, loss_type: 'sigmoid' }, finetuned_model: 'finetuned_model', hyperparam_search_config: { foo: true }, @@ -58,7 +58,7 @@ describe('resource postTraining', () => { }); test('supervisedFineTune: only required params', async () => { - const responsePromise = client.postTraining.supervisedFineTune({ + const responsePromise = client.alpha.postTraining.supervisedFineTune({ hyperparam_search_config: { foo: true }, job_uuid: 'job_uuid', logger_config: { foo: true }, @@ -74,7 +74,7 @@ describe('resource postTraining', () => { }); test('supervisedFineTune: required and optional params', async () => { - const response = await client.postTraining.supervisedFineTune({ + const response = await client.alpha.postTraining.supervisedFineTune({ hyperparam_search_config: { foo: true }, job_uuid: 'job_uuid', logger_config: { foo: true }, From 25a0f10cffa7de7f1457d65c97259911bc70ab0a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 30 Sep 2025 22:45:45 +0000 Subject: [PATCH 15/26] feat(api): fix file batches SDK to list_files --- .stats.yml | 2 +- api.md | 2 +- src/resources/vector-stores/file-batches.ts | 38 +++++++++--------- src/resources/vector-stores/index.ts | 2 +- src/resources/vector-stores/vector-stores.ts | 4 +- .../vector-stores/file-batches.test.ts | 40 +++++++++---------- 6 files changed, 44 insertions(+), 44 deletions(-) diff --git a/.stats.yml b/.stats.yml index 448f905..436151e 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 109 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-4337a6181c2db17737133e944b4b660a5e00ea10dce6be3252918e39451e9b5f.yml openapi_spec_hash: a0bc8f4b5f45bc5741fed8eaa61171c3 -config_hash: d8706905bf16d9e4141e88d5a778263b +config_hash: 0412cd40c0609550c1a47c69dd104e4f diff --git a/api.md b/api.md index 6efdf3f..dfca1d9 100644 --- a/api.md +++ b/api.md @@ -240,8 +240,8 @@ Methods: - client.vectorStores.fileBatches.create(vectorStoreId, { ...params }) -> VectorStoreFileBatches - client.vectorStores.fileBatches.retrieve(vectorStoreId, batchId) -> VectorStoreFileBatches -- client.vectorStores.fileBatches.list(vectorStoreId, batchId, { ...params }) -> VectorStoreFilesOpenAICursorPage - client.vectorStores.fileBatches.cancel(vectorStoreId, batchId) -> VectorStoreFileBatches +- client.vectorStores.fileBatches.listFiles(vectorStoreId, batchId, { ...params }) -> VectorStoreFilesOpenAICursorPage # Models diff --git a/src/resources/vector-stores/file-batches.ts b/src/resources/vector-stores/file-batches.ts index 532bd36..54bce95 100644 --- a/src/resources/vector-stores/file-batches.ts +++ b/src/resources/vector-stores/file-batches.ts @@ -30,28 +30,39 @@ export class FileBatches extends APIResource { return this._client.get(`/v1/vector_stores/${vectorStoreId}/file_batches/${batchId}`, options); } + /** + * Cancels a vector store file batch. + */ + cancel( + vectorStoreId: string, + batchId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/v1/vector_stores/${vectorStoreId}/file_batches/${batchId}/cancel`, options); + } + /** * Returns a list of vector store files in a batch. */ - list( + listFiles( vectorStoreId: string, batchId: string, - query?: FileBatchListParams, + query?: FileBatchListFilesParams, options?: Core.RequestOptions, ): Core.PagePromise; - list( + listFiles( vectorStoreId: string, batchId: string, options?: Core.RequestOptions, ): Core.PagePromise; - list( + listFiles( vectorStoreId: string, batchId: string, - query: FileBatchListParams | Core.RequestOptions = {}, + query: FileBatchListFilesParams | Core.RequestOptions = {}, options?: Core.RequestOptions, ): Core.PagePromise { if (isRequestOptions(query)) { - return this.list(vectorStoreId, batchId, {}, query); + return this.listFiles(vectorStoreId, batchId, {}, query); } return this._client.getAPIList( `/v1/vector_stores/${vectorStoreId}/file_batches/${batchId}/files`, @@ -59,17 +70,6 @@ export class FileBatches extends APIResource { { query, ...options }, ); } - - /** - * Cancels a vector store file batch. - */ - cancel( - vectorStoreId: string, - batchId: string, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.post(`/v1/vector_stores/${vectorStoreId}/file_batches/${batchId}/cancel`, options); - } } /** @@ -232,7 +232,7 @@ export namespace FileBatchCreateParams { } } -export interface FileBatchListParams extends OpenAICursorPageParams { +export interface FileBatchListFilesParams extends OpenAICursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place * in the list. @@ -256,7 +256,7 @@ export declare namespace FileBatches { type ListVectorStoreFilesInBatchResponse as ListVectorStoreFilesInBatchResponse, type VectorStoreFileBatches as VectorStoreFileBatches, type FileBatchCreateParams as FileBatchCreateParams, - type FileBatchListParams as FileBatchListParams, + type FileBatchListFilesParams as FileBatchListFilesParams, }; } diff --git a/src/resources/vector-stores/index.ts b/src/resources/vector-stores/index.ts index 59545d6..0f53c8f 100644 --- a/src/resources/vector-stores/index.ts +++ b/src/resources/vector-stores/index.ts @@ -5,7 +5,7 @@ export { type ListVectorStoreFilesInBatchResponse, type VectorStoreFileBatches, type FileBatchCreateParams, - type FileBatchListParams, + type FileBatchListFilesParams, } from './file-batches'; export { VectorStoreFilesOpenAICursorPage, diff --git a/src/resources/vector-stores/vector-stores.ts b/src/resources/vector-stores/vector-stores.ts index 01afd40..7163cf9 100644 --- a/src/resources/vector-stores/vector-stores.ts +++ b/src/resources/vector-stores/vector-stores.ts @@ -6,7 +6,7 @@ import * as Core from '../../core'; import * as FileBatchesAPI from './file-batches'; import { FileBatchCreateParams, - FileBatchListParams, + FileBatchListFilesParams, FileBatches, ListVectorStoreFilesInBatchResponse, VectorStoreFileBatches, @@ -472,6 +472,6 @@ export declare namespace VectorStores { type ListVectorStoreFilesInBatchResponse as ListVectorStoreFilesInBatchResponse, type VectorStoreFileBatches as VectorStoreFileBatches, type FileBatchCreateParams as FileBatchCreateParams, - type FileBatchListParams as FileBatchListParams, + type FileBatchListFilesParams as FileBatchListFilesParams, }; } diff --git a/tests/api-resources/vector-stores/file-batches.test.ts b/tests/api-resources/vector-stores/file-batches.test.ts index bc5018c..98e8964 100644 --- a/tests/api-resources/vector-stores/file-batches.test.ts +++ b/tests/api-resources/vector-stores/file-batches.test.ts @@ -47,8 +47,8 @@ describe('resource fileBatches', () => { ).rejects.toThrow(LlamaStackClient.NotFoundError); }); - test('list', async () => { - const responsePromise = client.vectorStores.fileBatches.list('vector_store_id', 'batch_id'); + test('cancel', async () => { + const responsePromise = client.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -58,29 +58,17 @@ describe('resource fileBatches', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - test('list: request options instead of params are passed correctly', async () => { + test('cancel: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.vectorStores.fileBatches.list('vector_store_id', 'batch_id', { + client.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(LlamaStackClient.NotFoundError); }); - test('list: request options and params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.vectorStores.fileBatches.list( - 'vector_store_id', - 'batch_id', - { after: 'after', before: 'before', filter: 'filter', limit: 0, order: 'order' }, - { path: '/_stainless_unknown_path' }, - ), - ).rejects.toThrow(LlamaStackClient.NotFoundError); - }); - - test('cancel', async () => { - const responsePromise = client.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id'); + test('listFiles', async () => { + const responsePromise = client.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -90,12 +78,24 @@ describe('resource fileBatches', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - test('cancel: request options instead of params are passed correctly', async () => { + test('listFiles: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id', { + client.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(LlamaStackClient.NotFoundError); }); + + test('listFiles: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.fileBatches.listFiles( + 'vector_store_id', + 'batch_id', + { after: 'after', before: 'before', filter: 'filter', limit: 0, order: 'order' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); }); From 8910a121146aeddcb8f400101e6a2232245097e0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 1 Oct 2025 21:28:11 +0000 Subject: [PATCH 16/26] feat(api)!: use input_schema instead of parameters for tools --- .stats.yml | 4 +- src/resources/responses/input-items.ts | 32 +++++ src/resources/responses/responses.ts | 132 +++++++++++++++++- src/resources/tool-runtime/tool-runtime.ts | 49 +------ src/resources/tools.ts | 55 ++------ .../api-resources/alpha/agents/agents.test.ts | 13 +- 6 files changed, 181 insertions(+), 104 deletions(-) diff --git a/.stats.yml b/.stats.yml index 436151e..d9b62ff 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 109 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-4337a6181c2db17737133e944b4b660a5e00ea10dce6be3252918e39451e9b5f.yml -openapi_spec_hash: a0bc8f4b5f45bc5741fed8eaa61171c3 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-5f0f0b99d1b0bf40e00e11f5d134ed13de97799cf2dfea0c8612e2f003584505.yml +openapi_spec_hash: 5f51544cb340c37aba54b93a526c536e config_hash: 0412cd40c0609550c1a47c69dd104e4f diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts index 5ad384d..398022b 100644 --- a/src/resources/responses/input-items.ts +++ b/src/resources/responses/input-items.ts @@ -38,6 +38,8 @@ export interface InputItemListResponse { | InputItemListResponse.OpenAIResponseOutputMessageFileSearchToolCall | InputItemListResponse.OpenAIResponseOutputMessageFunctionToolCall | InputItemListResponse.OpenAIResponseInputFunctionToolCallOutput + | InputItemListResponse.OpenAIResponseMcpApprovalRequest + | InputItemListResponse.OpenAIResponseMcpApprovalResponse | InputItemListResponse.OpenAIResponseMessage >; @@ -181,6 +183,36 @@ export namespace InputItemListResponse { status?: string; } + /** + * A request for human approval of a tool invocation. + */ + export interface OpenAIResponseMcpApprovalRequest { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_approval_request'; + } + + /** + * A response to an MCP approval request. + */ + export interface OpenAIResponseMcpApprovalResponse { + approval_request_id: string; + + approve: boolean; + + type: 'mcp_approval_response'; + + id?: string; + + reason?: string; + } + /** * Corresponds to the various Message types in the Responses API. They are all * under one type because the Responses API gives them all the same "type" value, diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index a186b01..05fe120 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -108,6 +108,7 @@ export interface ResponseObject { | ResponseObject.OpenAIResponseOutputMessageFunctionToolCall | ResponseObject.OpenAIResponseOutputMessageMcpCall | ResponseObject.OpenAIResponseOutputMessageMcpListTools + | ResponseObject.OpenAIResponseMcpApprovalRequest >; /** @@ -508,6 +509,21 @@ export namespace ResponseObject { } } + /** + * A request for human approval of a tool invocation. + */ + export interface OpenAIResponseMcpApprovalRequest { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_approval_request'; + } + /** * Text formatting configuration for the response */ @@ -623,7 +639,8 @@ export namespace ResponseObjectStream { | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageFileSearchToolCall | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageFunctionToolCall | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageMcpCall - | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageMcpListTools; + | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageMcpListTools + | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseMcpApprovalRequest; /** * Index position of this item in the output list @@ -1002,6 +1019,21 @@ export namespace ResponseObjectStream { description?: string; } } + + /** + * A request for human approval of a tool invocation. + */ + export interface OpenAIResponseMcpApprovalRequest { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_approval_request'; + } } /** @@ -1017,7 +1049,8 @@ export namespace ResponseObjectStream { | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageFileSearchToolCall | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageFunctionToolCall | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageMcpCall - | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageMcpListTools; + | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageMcpListTools + | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseMcpApprovalRequest; /** * Index position of this item in the output list @@ -1396,6 +1429,21 @@ export namespace ResponseObjectStream { description?: string; } } + + /** + * A request for human approval of a tool invocation. + */ + export interface OpenAIResponseMcpApprovalRequest { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_approval_request'; + } } /** @@ -1815,6 +1863,8 @@ export interface ResponseListResponse { | ResponseListResponse.OpenAIResponseOutputMessageFileSearchToolCall | ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall | ResponseListResponse.OpenAIResponseInputFunctionToolCallOutput + | ResponseListResponse.OpenAIResponseMcpApprovalRequest + | ResponseListResponse.OpenAIResponseMcpApprovalResponse | ResponseListResponse.OpenAIResponseMessage >; @@ -1838,6 +1888,7 @@ export interface ResponseListResponse { | ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall | ResponseListResponse.OpenAIResponseOutputMessageMcpCall | ResponseListResponse.OpenAIResponseOutputMessageMcpListTools + | ResponseListResponse.OpenAIResponseMcpApprovalRequest >; /** @@ -2015,6 +2066,36 @@ export namespace ResponseListResponse { status?: string; } + /** + * A request for human approval of a tool invocation. + */ + export interface OpenAIResponseMcpApprovalRequest { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_approval_request'; + } + + /** + * A response to an MCP approval request. + */ + export interface OpenAIResponseMcpApprovalResponse { + approval_request_id: string; + + approve: boolean; + + type: 'mcp_approval_response'; + + id?: string; + + reason?: string; + } + /** * Corresponds to the various Message types in the Responses API. They are all * under one type because the Responses API gives them all the same "type" value, @@ -2523,6 +2604,21 @@ export namespace ResponseListResponse { } } + /** + * A request for human approval of a tool invocation. + */ + export interface OpenAIResponseMcpApprovalRequest { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_approval_request'; + } + /** * Text formatting configuration for the response */ @@ -2616,6 +2712,8 @@ export interface ResponseCreateParamsBase { | ResponseCreateParams.OpenAIResponseOutputMessageFileSearchToolCall | ResponseCreateParams.OpenAIResponseOutputMessageFunctionToolCall | ResponseCreateParams.OpenAIResponseInputFunctionToolCallOutput + | ResponseCreateParams.OpenAIResponseMcpApprovalRequest + | ResponseCreateParams.OpenAIResponseMcpApprovalResponse | ResponseCreateParams.OpenAIResponseMessage >; @@ -2793,6 +2891,36 @@ export namespace ResponseCreateParams { status?: string; } + /** + * A request for human approval of a tool invocation. + */ + export interface OpenAIResponseMcpApprovalRequest { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_approval_request'; + } + + /** + * A response to an MCP approval request. + */ + export interface OpenAIResponseMcpApprovalResponse { + approval_request_id: string; + + approve: boolean; + + type: 'mcp_approval_response'; + + id?: string; + + reason?: string; + } + /** * Corresponds to the various Message types in the Responses API. They are all * under one type because the Responses API gives them all the same "type" value, diff --git a/src/resources/tool-runtime/tool-runtime.ts b/src/resources/tool-runtime/tool-runtime.ts index 058779e..64996ff 100644 --- a/src/resources/tool-runtime/tool-runtime.ts +++ b/src/resources/tool-runtime/tool-runtime.ts @@ -58,56 +58,19 @@ export interface ToolDef { description?: string; /** - * (Optional) Additional metadata about the tool + * (Optional) JSON Schema for tool inputs (MCP inputSchema) */ - metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; + input_schema?: { [key: string]: boolean | number | string | Array | unknown | null }; /** - * (Optional) List of parameters this tool accepts + * (Optional) Additional metadata about the tool */ - parameters?: Array; -} + metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; -export namespace ToolDef { /** - * Parameter definition for a tool. + * (Optional) JSON Schema for tool outputs (MCP outputSchema) */ - export interface Parameter { - /** - * Human-readable description of what the parameter does - */ - description: string; - - /** - * Name of the parameter - */ - name: string; - - /** - * Type of the parameter (e.g., string, integer) - */ - parameter_type: string; - - /** - * Whether this parameter is required for tool invocation - */ - required: boolean; - - /** - * (Optional) Default value for the parameter if not provided - */ - default?: boolean | number | string | Array | unknown | null; - - /** - * Type of the elements when parameter_type is array - */ - items?: unknown; - - /** - * (Optional) Title of the parameter - */ - title?: string; - } + output_schema?: { [key: string]: boolean | number | string | Array | unknown | null }; } /** diff --git a/src/resources/tools.ts b/src/resources/tools.ts index ab05a60..efc2626 100644 --- a/src/resources/tools.ts +++ b/src/resources/tools.ts @@ -51,11 +51,6 @@ export interface Tool { identifier: string; - /** - * List of parameters this tool accepts - */ - parameters: Array; - provider_id: string; /** @@ -68,54 +63,22 @@ export interface Tool { */ type: 'tool'; + /** + * JSON Schema for the tool's input parameters + */ + input_schema?: { [key: string]: boolean | number | string | Array | unknown | null }; + /** * (Optional) Additional metadata about the tool */ metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; - provider_resource_id?: string; -} - -export namespace Tool { /** - * Parameter definition for a tool. + * JSON Schema for the tool's output */ - export interface Parameter { - /** - * Human-readable description of what the parameter does - */ - description: string; - - /** - * Name of the parameter - */ - name: string; - - /** - * Type of the parameter (e.g., string, integer) - */ - parameter_type: string; - - /** - * Whether this parameter is required for tool invocation - */ - required: boolean; - - /** - * (Optional) Default value for the parameter if not provided - */ - default?: boolean | number | string | Array | unknown | null; - - /** - * Type of the elements when parameter_type is array - */ - items?: unknown; - - /** - * (Optional) Title of the parameter - */ - title?: string; - } + output_schema?: { [key: string]: boolean | number | string | Array | unknown | null }; + + provider_resource_id?: string; } /** diff --git a/tests/api-resources/alpha/agents/agents.test.ts b/tests/api-resources/alpha/agents/agents.test.ts index 0f26b3a..8beec68 100644 --- a/tests/api-resources/alpha/agents/agents.test.ts +++ b/tests/api-resources/alpha/agents/agents.test.ts @@ -28,18 +28,9 @@ describe('resource agents', () => { { name: 'name', description: 'description', + input_schema: { foo: true }, metadata: { foo: true }, - parameters: [ - { - description: 'description', - name: 'name', - parameter_type: 'parameter_type', - required: true, - default: true, - items: {}, - title: 'title', - }, - ], + output_schema: { foo: true }, }, ], enable_session_persistence: true, From 06f2bcaf0df2e5d462cbe2d9ef3704ab0cfe9248 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 2 Oct 2025 16:49:28 +0000 Subject: [PATCH 17/26] feat(api): tool api (input_schema, etc.) changes --- .stats.yml | 6 +- api.md | 35 +- src/index.ts | 58 +- src/resources/benchmarks.ts | 73 +- src/resources/datasets.ts | 339 +-- src/resources/index.ts | 32 +- src/resources/responses/index.ts | 2 +- src/resources/responses/responses.ts | 1831 ++++++++++------- src/resources/telemetry.ts | 417 +--- src/resources/tool-runtime/tool-runtime.ts | 49 +- src/resources/tools.ts | 55 +- .../api-resources/alpha/agents/agents.test.ts | 13 +- tests/api-resources/benchmarks.test.ts | 70 - tests/api-resources/datasets.test.ts | 129 -- .../api-resources/responses/responses.test.ts | 28 +- tests/api-resources/telemetry.test.ts | 133 -- 16 files changed, 1243 insertions(+), 2027 deletions(-) delete mode 100644 tests/api-resources/benchmarks.test.ts delete mode 100644 tests/api-resources/datasets.test.ts diff --git a/.stats.yml b/.stats.yml index d9b62ff..cbd436b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 109 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-5f0f0b99d1b0bf40e00e11f5d134ed13de97799cf2dfea0c8612e2f003584505.yml -openapi_spec_hash: 5f51544cb340c37aba54b93a526c536e +configured_endpoints: 93 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-41cb5d8049e6ffd933a7ad6bbbb76b2fef2e864d0d857c91799ee16e9a796883.yml +openapi_spec_hash: 5e0bdf64563e020ef14b968ab724d2db config_hash: 0412cd40c0609550c1a47c69dd104e4f diff --git a/api.md b/api.md index dfca1d9..63895d0 100644 --- a/api.md +++ b/api.md @@ -74,12 +74,13 @@ Types: - ResponseObject - ResponseObjectStream +- ResponseCreateResponse - ResponseListResponse - ResponseDeleteResponse Methods: -- client.responses.create({ ...params }) -> ResponseObject +- client.responses.create({ ...params }) -> ResponseCreateResponse - client.responses.retrieve(responseId) -> ResponseObject - client.responses.list({ ...params }) -> ResponseListResponsesOpenAICursorPage - client.responses.delete(responseId) -> ResponseDeleteResponse @@ -99,19 +100,6 @@ Methods: Types: - ListDatasetsResponse -- DatasetRetrieveResponse -- DatasetListResponse -- DatasetIterrowsResponse -- DatasetRegisterResponse - -Methods: - -- client.datasets.retrieve(datasetId) -> DatasetRetrieveResponse -- client.datasets.list() -> DatasetListResponse -- client.datasets.appendrows(datasetId, { ...params }) -> void -- client.datasets.iterrows(datasetId, { ...params }) -> DatasetIterrowsResponse -- client.datasets.register({ ...params }) -> DatasetRegisterResponse -- client.datasets.unregister(datasetId) -> void # Inspect @@ -345,22 +333,10 @@ Types: - QuerySpansResponse - SpanWithStatus - Trace -- TelemetryGetSpanResponse -- TelemetryGetSpanTreeResponse -- TelemetryQueryMetricsResponse -- TelemetryQuerySpansResponse -- TelemetryQueryTracesResponse Methods: -- client.telemetry.getSpan(traceId, spanId) -> TelemetryGetSpanResponse -- client.telemetry.getSpanTree(spanId, { ...params }) -> TelemetryGetSpanTreeResponse -- client.telemetry.getTrace(traceId) -> Trace - client.telemetry.logEvent({ ...params }) -> void -- client.telemetry.queryMetrics(metricName, { ...params }) -> TelemetryQueryMetricsResponse -- client.telemetry.querySpans({ ...params }) -> TelemetryQuerySpansResponse -- client.telemetry.queryTraces({ ...params }) -> TelemetryQueryTracesResponse -- client.telemetry.saveSpansToDataset({ ...params }) -> void # Scoring @@ -395,13 +371,6 @@ Types: - Benchmark - ListBenchmarksResponse -- BenchmarkListResponse - -Methods: - -- client.benchmarks.retrieve(benchmarkId) -> Benchmark -- client.benchmarks.list() -> BenchmarkListResponse -- client.benchmarks.register({ ...params }) -> void # Files diff --git a/src/index.ts b/src/index.ts index 5411297..8212d27 100644 --- a/src/index.ts +++ b/src/index.ts @@ -13,13 +13,7 @@ import { } from './pagination'; import * as Uploads from './uploads'; import * as API from './resources/index'; -import { - Benchmark, - BenchmarkListResponse, - BenchmarkRegisterParams, - Benchmarks, - ListBenchmarksResponse, -} from './resources/benchmarks'; +import { Benchmark, Benchmarks, ListBenchmarksResponse } from './resources/benchmarks'; import { CompletionCreateParams, CompletionCreateParamsNonStreaming, @@ -27,17 +21,7 @@ import { CompletionCreateResponse, Completions, } from './resources/completions'; -import { - DatasetAppendrowsParams, - DatasetIterrowsParams, - DatasetIterrowsResponse, - DatasetListResponse, - DatasetRegisterParams, - DatasetRegisterResponse, - DatasetRetrieveResponse, - Datasets, - ListDatasetsResponse, -} from './resources/datasets'; +import { Datasets, ListDatasetsResponse } from './resources/datasets'; import { CreateEmbeddingsResponse, EmbeddingCreateParams, Embeddings } from './resources/embeddings'; import { DeleteFileResponse, @@ -87,17 +71,7 @@ import { QuerySpansResponse, SpanWithStatus, Telemetry, - TelemetryGetSpanResponse, - TelemetryGetSpanTreeParams, - TelemetryGetSpanTreeResponse, TelemetryLogEventParams, - TelemetryQueryMetricsParams, - TelemetryQueryMetricsResponse, - TelemetryQuerySpansParams, - TelemetryQuerySpansResponse, - TelemetryQueryTracesParams, - TelemetryQueryTracesResponse, - TelemetrySaveSpansToDatasetParams, Trace, } from './resources/telemetry'; import { @@ -134,7 +108,7 @@ import { import { ResponseCreateParams, ResponseCreateParamsNonStreaming, - ResponseCreateParamsStreaming, + ResponseCreateResponse, ResponseDeleteResponse, ResponseListParams, ResponseListResponse, @@ -423,26 +397,16 @@ export declare namespace LlamaStackClient { Responses as Responses, type ResponseObject as ResponseObject, type ResponseObjectStream as ResponseObjectStream, + type ResponseCreateResponse as ResponseCreateResponse, type ResponseListResponse as ResponseListResponse, type ResponseDeleteResponse as ResponseDeleteResponse, ResponseListResponsesOpenAICursorPage as ResponseListResponsesOpenAICursorPage, type ResponseCreateParams as ResponseCreateParams, type ResponseCreateParamsNonStreaming as ResponseCreateParamsNonStreaming, - type ResponseCreateParamsStreaming as ResponseCreateParamsStreaming, type ResponseListParams as ResponseListParams, }; - export { - Datasets as Datasets, - type ListDatasetsResponse as ListDatasetsResponse, - type DatasetRetrieveResponse as DatasetRetrieveResponse, - type DatasetListResponse as DatasetListResponse, - type DatasetIterrowsResponse as DatasetIterrowsResponse, - type DatasetRegisterResponse as DatasetRegisterResponse, - type DatasetAppendrowsParams as DatasetAppendrowsParams, - type DatasetIterrowsParams as DatasetIterrowsParams, - type DatasetRegisterParams as DatasetRegisterParams, - }; + export { Datasets as Datasets, type ListDatasetsResponse as ListDatasetsResponse }; export { Inspect as Inspect, @@ -550,17 +514,7 @@ export declare namespace LlamaStackClient { type QuerySpansResponse as QuerySpansResponse, type SpanWithStatus as SpanWithStatus, type Trace as Trace, - type TelemetryGetSpanResponse as TelemetryGetSpanResponse, - type TelemetryGetSpanTreeResponse as TelemetryGetSpanTreeResponse, - type TelemetryQueryMetricsResponse as TelemetryQueryMetricsResponse, - type TelemetryQuerySpansResponse as TelemetryQuerySpansResponse, - type TelemetryQueryTracesResponse as TelemetryQueryTracesResponse, - type TelemetryGetSpanTreeParams as TelemetryGetSpanTreeParams, type TelemetryLogEventParams as TelemetryLogEventParams, - type TelemetryQueryMetricsParams as TelemetryQueryMetricsParams, - type TelemetryQuerySpansParams as TelemetryQuerySpansParams, - type TelemetryQueryTracesParams as TelemetryQueryTracesParams, - type TelemetrySaveSpansToDatasetParams as TelemetrySaveSpansToDatasetParams, }; export { @@ -584,8 +538,6 @@ export declare namespace LlamaStackClient { Benchmarks as Benchmarks, type Benchmark as Benchmark, type ListBenchmarksResponse as ListBenchmarksResponse, - type BenchmarkListResponse as BenchmarkListResponse, - type BenchmarkRegisterParams as BenchmarkRegisterParams, }; export { diff --git a/src/resources/benchmarks.ts b/src/resources/benchmarks.ts index b6b8363..a5659ed 100644 --- a/src/resources/benchmarks.ts +++ b/src/resources/benchmarks.ts @@ -1,36 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../resource'; -import * as Core from '../core'; -export class Benchmarks extends APIResource { - /** - * Get a benchmark by its ID. - */ - retrieve(benchmarkId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/v1/eval/benchmarks/${benchmarkId}`, options); - } - - /** - * List all benchmarks. - */ - list(options?: Core.RequestOptions): Core.APIPromise { - return ( - this._client.get('/v1/eval/benchmarks', options) as Core.APIPromise<{ data: BenchmarkListResponse }> - )._thenUnwrap((obj) => obj.data); - } - - /** - * Register a benchmark. - */ - register(body: BenchmarkRegisterParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.post('/v1/eval/benchmarks', { - body, - ...options, - headers: { Accept: '*/*', ...options?.headers }, - }); - } -} +export class Benchmarks extends APIResource {} /** * A benchmark resource for evaluating model performance. @@ -64,48 +36,9 @@ export interface Benchmark { } export interface ListBenchmarksResponse { - data: BenchmarkListResponse; -} - -export type BenchmarkListResponse = Array; - -export interface BenchmarkRegisterParams { - /** - * The ID of the benchmark to register. - */ - benchmark_id: string; - - /** - * The ID of the dataset to use for the benchmark. - */ - dataset_id: string; - - /** - * The scoring functions to use for the benchmark. - */ - scoring_functions: Array; - - /** - * The metadata to use for the benchmark. - */ - metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * The ID of the provider benchmark to use for the benchmark. - */ - provider_benchmark_id?: string; - - /** - * The ID of the provider to use for the benchmark. - */ - provider_id?: string; + data: Array; } export declare namespace Benchmarks { - export { - type Benchmark as Benchmark, - type ListBenchmarksResponse as ListBenchmarksResponse, - type BenchmarkListResponse as BenchmarkListResponse, - type BenchmarkRegisterParams as BenchmarkRegisterParams, - }; + export { type Benchmark as Benchmark, type ListBenchmarksResponse as ListBenchmarksResponse }; } diff --git a/src/resources/datasets.ts b/src/resources/datasets.ts index 5ed6661..140a8cc 100644 --- a/src/resources/datasets.ts +++ b/src/resources/datasets.ts @@ -1,89 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../resource'; -import { isRequestOptions } from '../core'; -import * as Core from '../core'; -export class Datasets extends APIResource { - /** - * Get a dataset by its ID. - */ - retrieve(datasetId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/v1/datasets/${datasetId}`, options); - } - - /** - * List all datasets. - */ - list(options?: Core.RequestOptions): Core.APIPromise { - return ( - this._client.get('/v1/datasets', options) as Core.APIPromise<{ data: DatasetListResponse }> - )._thenUnwrap((obj) => obj.data); - } - - /** - * Append rows to a dataset. - */ - appendrows( - datasetId: string, - body: DatasetAppendrowsParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.post(`/v1/datasetio/append-rows/${datasetId}`, { - body, - ...options, - headers: { Accept: '*/*', ...options?.headers }, - }); - } - - /** - * Get a paginated list of rows from a dataset. Uses offset-based pagination where: - * - * - start_index: The starting index (0-based). If None, starts from beginning. - * - limit: Number of items to return. If None or -1, returns all items. - * - * The response includes: - * - * - data: List of items for the current page. - * - has_more: Whether there are more items available after this set. - */ - iterrows( - datasetId: string, - query?: DatasetIterrowsParams, - options?: Core.RequestOptions, - ): Core.APIPromise; - iterrows(datasetId: string, options?: Core.RequestOptions): Core.APIPromise; - iterrows( - datasetId: string, - query: DatasetIterrowsParams | Core.RequestOptions = {}, - options?: Core.RequestOptions, - ): Core.APIPromise { - if (isRequestOptions(query)) { - return this.iterrows(datasetId, {}, query); - } - return this._client.get(`/v1/datasetio/iterrows/${datasetId}`, { query, ...options }); - } - - /** - * Register a new dataset. - */ - register( - body: DatasetRegisterParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.post('/v1/datasets', { body, ...options }); - } - - /** - * Unregister a dataset by its ID. - */ - unregister(datasetId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.delete(`/v1/datasets/${datasetId}`, { - ...options, - headers: { Accept: '*/*', ...options?.headers }, - }); - } -} +export class Datasets extends APIResource {} /** * Response from listing datasets. @@ -92,80 +11,14 @@ export interface ListDatasetsResponse { /** * List of datasets */ - data: DatasetListResponse; + data: Array; } -/** - * Dataset resource for storing and accessing training or evaluation data. - */ -export interface DatasetRetrieveResponse { - identifier: string; - - /** - * Additional metadata for the dataset - */ - metadata: { [key: string]: boolean | number | string | Array | unknown | null }; - - provider_id: string; - - /** - * Purpose of the dataset indicating its intended use - */ - purpose: 'post-training/messages' | 'eval/question-answer' | 'eval/messages-answer'; - - /** - * Data source configuration for the dataset - */ - source: DatasetRetrieveResponse.UriDataSource | DatasetRetrieveResponse.RowsDataSource; - - /** - * Type of resource, always 'dataset' for datasets - */ - type: 'dataset'; - - provider_resource_id?: string; -} - -export namespace DatasetRetrieveResponse { - /** - * A dataset that can be obtained from a URI. - */ - export interface UriDataSource { - type: 'uri'; - - /** - * The dataset can be obtained from a URI. E.g. - - * "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" - - * "data:csv;base64,{base64_content}" - */ - uri: string; - } - - /** - * A dataset stored in rows. - */ - export interface RowsDataSource { - /** - * The dataset is stored in rows. E.g. - [ {"messages": [{"role": "user", - * "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}]} - * ] - */ - rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; - - type: 'rows'; - } -} - -/** - * List of datasets - */ -export type DatasetListResponse = Array; - -export namespace DatasetListResponse { +export namespace ListDatasetsResponse { /** * Dataset resource for storing and accessing training or evaluation data. */ - export interface DatasetListResponseItem { + export interface Data { identifier: string; /** @@ -183,7 +36,7 @@ export namespace DatasetListResponse { /** * Data source configuration for the dataset */ - source: DatasetListResponseItem.UriDataSource | DatasetListResponseItem.RowsDataSource; + source: Data.UriDataSource | Data.RowsDataSource; /** * Type of resource, always 'dataset' for datasets @@ -193,7 +46,7 @@ export namespace DatasetListResponse { provider_resource_id?: string; } - export namespace DatasetListResponseItem { + export namespace Data { /** * A dataset that can be obtained from a URI. */ @@ -224,184 +77,6 @@ export namespace DatasetListResponse { } } -/** - * A generic paginated response that follows a simple format. - */ -export interface DatasetIterrowsResponse { - /** - * The list of items for the current page - */ - data: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; - - /** - * Whether there are more items available after this set - */ - has_more: boolean; - - /** - * The URL for accessing this list - */ - url?: string; -} - -/** - * Dataset resource for storing and accessing training or evaluation data. - */ -export interface DatasetRegisterResponse { - identifier: string; - - /** - * Additional metadata for the dataset - */ - metadata: { [key: string]: boolean | number | string | Array | unknown | null }; - - provider_id: string; - - /** - * Purpose of the dataset indicating its intended use - */ - purpose: 'post-training/messages' | 'eval/question-answer' | 'eval/messages-answer'; - - /** - * Data source configuration for the dataset - */ - source: DatasetRegisterResponse.UriDataSource | DatasetRegisterResponse.RowsDataSource; - - /** - * Type of resource, always 'dataset' for datasets - */ - type: 'dataset'; - - provider_resource_id?: string; -} - -export namespace DatasetRegisterResponse { - /** - * A dataset that can be obtained from a URI. - */ - export interface UriDataSource { - type: 'uri'; - - /** - * The dataset can be obtained from a URI. E.g. - - * "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" - - * "data:csv;base64,{base64_content}" - */ - uri: string; - } - - /** - * A dataset stored in rows. - */ - export interface RowsDataSource { - /** - * The dataset is stored in rows. E.g. - [ {"messages": [{"role": "user", - * "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}]} - * ] - */ - rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; - - type: 'rows'; - } -} - -export interface DatasetAppendrowsParams { - /** - * The rows to append to the dataset. - */ - rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; -} - -export interface DatasetIterrowsParams { - /** - * The number of rows to get. - */ - limit?: number; - - /** - * Index into dataset for the first row to get. Get all rows if None. - */ - start_index?: number; -} - -export interface DatasetRegisterParams { - /** - * The purpose of the dataset. One of: - "post-training/messages": The dataset - * contains a messages column with list of messages for post-training. { - * "messages": [ {"role": "user", "content": "Hello, world!"}, {"role": - * "assistant", "content": "Hello, world!"}, ] } - "eval/question-answer": The - * dataset contains a question column and an answer column for evaluation. { - * "question": "What is the capital of France?", "answer": "Paris" } - - * "eval/messages-answer": The dataset contains a messages column with list of - * messages and an answer column for evaluation. { "messages": [ {"role": "user", - * "content": "Hello, my name is John Doe."}, {"role": "assistant", "content": - * "Hello, John Doe. How can I help you today?"}, {"role": "user", "content": - * "What's my name?"}, ], "answer": "John Doe" } - */ - purpose: 'post-training/messages' | 'eval/question-answer' | 'eval/messages-answer'; - - /** - * The data source of the dataset. Ensure that the data source schema is compatible - * with the purpose of the dataset. Examples: - { "type": "uri", "uri": - * "https://mywebsite.com/mydata.jsonl" } - { "type": "uri", "uri": - * "lsfs://mydata.jsonl" } - { "type": "uri", "uri": - * "data:csv;base64,{base64_content}" } - { "type": "uri", "uri": - * "huggingface://llamastack/simpleqa?split=train" } - { "type": "rows", "rows": [ - * { "messages": [ {"role": "user", "content": "Hello, world!"}, {"role": - * "assistant", "content": "Hello, world!"}, ] } ] } - */ - source: DatasetRegisterParams.UriDataSource | DatasetRegisterParams.RowsDataSource; - - /** - * The ID of the dataset. If not provided, an ID will be generated. - */ - dataset_id?: string; - - /** - * The metadata for the dataset. - E.g. {"description": "My dataset"}. - */ - metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; -} - -export namespace DatasetRegisterParams { - /** - * A dataset that can be obtained from a URI. - */ - export interface UriDataSource { - type: 'uri'; - - /** - * The dataset can be obtained from a URI. E.g. - - * "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" - - * "data:csv;base64,{base64_content}" - */ - uri: string; - } - - /** - * A dataset stored in rows. - */ - export interface RowsDataSource { - /** - * The dataset is stored in rows. E.g. - [ {"messages": [{"role": "user", - * "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}]} - * ] - */ - rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; - - type: 'rows'; - } -} - export declare namespace Datasets { - export { - type ListDatasetsResponse as ListDatasetsResponse, - type DatasetRetrieveResponse as DatasetRetrieveResponse, - type DatasetListResponse as DatasetListResponse, - type DatasetIterrowsResponse as DatasetIterrowsResponse, - type DatasetRegisterResponse as DatasetRegisterResponse, - type DatasetAppendrowsParams as DatasetAppendrowsParams, - type DatasetIterrowsParams as DatasetIterrowsParams, - type DatasetRegisterParams as DatasetRegisterParams, - }; + export { type ListDatasetsResponse as ListDatasetsResponse }; } diff --git a/src/resources/index.ts b/src/resources/index.ts index 4c3de4e..0fd6c90 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -2,13 +2,7 @@ export * from './shared'; export { Alpha } from './alpha/alpha'; -export { - Benchmarks, - type Benchmark, - type ListBenchmarksResponse, - type BenchmarkListResponse, - type BenchmarkRegisterParams, -} from './benchmarks'; +export { Benchmarks, type Benchmark, type ListBenchmarksResponse } from './benchmarks'; export { Chat, type ChatCompletionChunk } from './chat/chat'; export { Completions, @@ -17,17 +11,7 @@ export { type CompletionCreateParamsNonStreaming, type CompletionCreateParamsStreaming, } from './completions'; -export { - Datasets, - type ListDatasetsResponse, - type DatasetRetrieveResponse, - type DatasetListResponse, - type DatasetIterrowsResponse, - type DatasetRegisterResponse, - type DatasetAppendrowsParams, - type DatasetIterrowsParams, - type DatasetRegisterParams, -} from './datasets'; +export { Datasets, type ListDatasetsResponse } from './datasets'; export { Embeddings, type CreateEmbeddingsResponse, type EmbeddingCreateParams } from './embeddings'; export { FilesOpenAICursorPage, @@ -54,11 +38,11 @@ export { Responses, type ResponseObject, type ResponseObjectStream, + type ResponseCreateResponse, type ResponseListResponse, type ResponseDeleteResponse, type ResponseCreateParams, type ResponseCreateParamsNonStreaming, - type ResponseCreateParamsStreaming, type ResponseListParams, } from './responses/responses'; export { Routes, type ListRoutesResponse, type RouteListResponse } from './routes'; @@ -97,17 +81,7 @@ export { type QuerySpansResponse, type SpanWithStatus, type Trace, - type TelemetryGetSpanResponse, - type TelemetryGetSpanTreeResponse, - type TelemetryQueryMetricsResponse, - type TelemetryQuerySpansResponse, - type TelemetryQueryTracesResponse, - type TelemetryGetSpanTreeParams, type TelemetryLogEventParams, - type TelemetryQueryMetricsParams, - type TelemetryQuerySpansParams, - type TelemetryQueryTracesParams, - type TelemetrySaveSpansToDatasetParams, } from './telemetry'; export { ToolRuntime, diff --git a/src/resources/responses/index.ts b/src/resources/responses/index.ts index 1465569..fe54614 100644 --- a/src/resources/responses/index.ts +++ b/src/resources/responses/index.ts @@ -6,10 +6,10 @@ export { Responses, type ResponseObject, type ResponseObjectStream, + type ResponseCreateResponse, type ResponseListResponse, type ResponseDeleteResponse, type ResponseCreateParams, type ResponseCreateParamsNonStreaming, - type ResponseCreateParamsStreaming, type ResponseListParams, } from './responses'; diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 05fe120..2cf8e01 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -14,24 +14,15 @@ export class Responses extends APIResource { inputItems: InputItemsAPI.InputItems = new InputItemsAPI.InputItems(this._client); /** - * Create a new OpenAI response. + * List all OpenAI responses. */ - create(body: ResponseCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise; - create( - body: ResponseCreateParamsStreaming, - options?: Core.RequestOptions, - ): APIPromise>; - create( - body: ResponseCreateParamsBase, - options?: Core.RequestOptions, - ): APIPromise | ResponseObject>; create( body: ResponseCreateParams, options?: Core.RequestOptions, - ): APIPromise | APIPromise> { - return this._client.post('/v1/responses', { body, ...options, stream: body.stream ?? false }) as - | APIPromise - | APIPromise>; + ): APIPromise> { + return this._client.post('/v1/responses', { body, ...options, stream: true }) as APIPromise< + Stream + >; } /** @@ -1842,922 +1833,967 @@ export namespace ResponseObjectStream { } /** - * OpenAI response object extended with input context information. + * Paginated list of OpenAI response objects with navigation metadata. */ -export interface ResponseListResponse { - /** - * Unique identifier for this response - */ - id: string; - - /** - * Unix timestamp when the response was created - */ - created_at: number; - - /** - * List of input items that led to this response - */ - input: Array< - | ResponseListResponse.OpenAIResponseOutputMessageWebSearchToolCall - | ResponseListResponse.OpenAIResponseOutputMessageFileSearchToolCall - | ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall - | ResponseListResponse.OpenAIResponseInputFunctionToolCallOutput - | ResponseListResponse.OpenAIResponseMcpApprovalRequest - | ResponseListResponse.OpenAIResponseMcpApprovalResponse - | ResponseListResponse.OpenAIResponseMessage - >; - - /** - * Model identifier used for generation - */ - model: string; - - /** - * Object type identifier, always "response" - */ - object: 'response'; - - /** - * List of generated output items (messages, tool calls, etc.) - */ - output: Array< - | ResponseListResponse.OpenAIResponseMessage - | ResponseListResponse.OpenAIResponseOutputMessageWebSearchToolCall - | ResponseListResponse.OpenAIResponseOutputMessageFileSearchToolCall - | ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall - | ResponseListResponse.OpenAIResponseOutputMessageMcpCall - | ResponseListResponse.OpenAIResponseOutputMessageMcpListTools - | ResponseListResponse.OpenAIResponseMcpApprovalRequest - >; - - /** - * Whether tool calls can be executed in parallel - */ - parallel_tool_calls: boolean; - - /** - * Current status of the response generation - */ - status: string; - - /** - * Text formatting configuration for the response - */ - text: ResponseListResponse.Text; - +export interface ResponseCreateResponse { /** - * (Optional) Error details if the response generation failed + * List of response objects with their input context */ - error?: ResponseListResponse.Error; + data: Array; /** - * (Optional) ID of the previous response in a conversation + * Identifier of the first item in this page */ - previous_response_id?: string; + first_id: string; /** - * (Optional) Sampling temperature used for generation + * Whether there are more results available beyond this page */ - temperature?: number; + has_more: boolean; /** - * (Optional) Nucleus sampling parameter used for generation + * Identifier of the last item in this page */ - top_p?: number; + last_id: string; /** - * (Optional) Truncation strategy applied to the response + * Object type identifier, always "list" */ - truncation?: string; + object: 'list'; } -export namespace ResponseListResponse { +export namespace ResponseCreateResponse { /** - * Web search tool call output message for OpenAI responses. + * OpenAI response object extended with input context information. */ - export interface OpenAIResponseOutputMessageWebSearchToolCall { + export interface Data { /** - * Unique identifier for this tool call + * Unique identifier for this response */ id: string; /** - * Current status of the web search operation - */ - status: string; - - /** - * Tool call type identifier, always "web_search_call" + * Unix timestamp when the response was created */ - type: 'web_search_call'; - } + created_at: number; - /** - * File search tool call output message for OpenAI responses. - */ - export interface OpenAIResponseOutputMessageFileSearchToolCall { /** - * Unique identifier for this tool call + * List of input items that led to this response */ - id: string; + input: Array< + | Data.OpenAIResponseOutputMessageWebSearchToolCall + | Data.OpenAIResponseOutputMessageFileSearchToolCall + | Data.OpenAIResponseOutputMessageFunctionToolCall + | Data.OpenAIResponseInputFunctionToolCallOutput + | Data.OpenAIResponseMcpApprovalRequest + | Data.OpenAIResponseMcpApprovalResponse + | Data.OpenAIResponseMessage + >; /** - * List of search queries executed + * Model identifier used for generation */ - queries: Array; + model: string; /** - * Current status of the file search operation + * Object type identifier, always "response" */ - status: string; + object: 'response'; /** - * Tool call type identifier, always "file_search_call" + * List of generated output items (messages, tool calls, etc.) */ - type: 'file_search_call'; + output: Array< + | Data.OpenAIResponseMessage + | Data.OpenAIResponseOutputMessageWebSearchToolCall + | Data.OpenAIResponseOutputMessageFileSearchToolCall + | Data.OpenAIResponseOutputMessageFunctionToolCall + | Data.OpenAIResponseOutputMessageMcpCall + | Data.OpenAIResponseOutputMessageMcpListTools + | Data.OpenAIResponseMcpApprovalRequest + >; /** - * (Optional) Search results returned by the file search operation + * Whether tool calls can be executed in parallel */ - results?: Array; - } + parallel_tool_calls: boolean; - export namespace OpenAIResponseOutputMessageFileSearchToolCall { /** - * Search results returned by the file search operation. + * Current status of the response generation */ - export interface Result { - /** - * (Optional) Key-value attributes associated with the file - */ - attributes: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * Unique identifier of the file containing the result - */ - file_id: string; - - /** - * Name of the file containing the result - */ - filename: string; - - /** - * Relevance score for this search result (between 0 and 1) - */ - score: number; - - /** - * Text content of the search result - */ - text: string; - } - } + status: string; - /** - * Function tool call output message for OpenAI responses. - */ - export interface OpenAIResponseOutputMessageFunctionToolCall { /** - * JSON string containing the function arguments + * Text formatting configuration for the response */ - arguments: string; + text: Data.Text; /** - * Unique identifier for the function call + * (Optional) Error details if the response generation failed */ - call_id: string; + error?: Data.Error; /** - * Name of the function being called + * (Optional) ID of the previous response in a conversation */ - name: string; + previous_response_id?: string; /** - * Tool call type identifier, always "function_call" + * (Optional) Sampling temperature used for generation */ - type: 'function_call'; + temperature?: number; /** - * (Optional) Additional identifier for the tool call + * (Optional) Nucleus sampling parameter used for generation */ - id?: string; + top_p?: number; /** - * (Optional) Current status of the function call execution + * (Optional) Truncation strategy applied to the response */ - status?: string; - } - - /** - * This represents the output of a function call that gets passed back to the - * model. - */ - export interface OpenAIResponseInputFunctionToolCallOutput { - call_id: string; - - output: string; - - type: 'function_call_output'; - - id?: string; - - status?: string; - } - - /** - * A request for human approval of a tool invocation. - */ - export interface OpenAIResponseMcpApprovalRequest { - id: string; - - arguments: string; - - name: string; - - server_label: string; - - type: 'mcp_approval_request'; - } - - /** - * A response to an MCP approval request. - */ - export interface OpenAIResponseMcpApprovalResponse { - approval_request_id: string; - - approve: boolean; - - type: 'mcp_approval_response'; - - id?: string; - - reason?: string; - } - - /** - * Corresponds to the various Message types in the Responses API. They are all - * under one type because the Responses API gives them all the same "type" value, - * and there is no way to tell them apart in certain scenarios. - */ - export interface OpenAIResponseMessage { - content: - | string - | Array< - | OpenAIResponseMessage.OpenAIResponseInputMessageContentText - | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage - > - | Array; - - role: 'system' | 'developer' | 'user' | 'assistant'; - - type: 'message'; - - id?: string; - - status?: string; + truncation?: string; } - export namespace OpenAIResponseMessage { + export namespace Data { /** - * Text content for input messages in OpenAI response format. + * Web search tool call output message for OpenAI responses. */ - export interface OpenAIResponseInputMessageContentText { + export interface OpenAIResponseOutputMessageWebSearchToolCall { /** - * The text content of the input message + * Unique identifier for this tool call */ - text: string; + id: string; /** - * Content type identifier, always "input_text" + * Current status of the web search operation */ - type: 'input_text'; + status: string; + + /** + * Tool call type identifier, always "web_search_call" + */ + type: 'web_search_call'; } /** - * Image content for input messages in OpenAI response format. + * File search tool call output message for OpenAI responses. */ - export interface OpenAIResponseInputMessageContentImage { + export interface OpenAIResponseOutputMessageFileSearchToolCall { /** - * Level of detail for image processing, can be "low", "high", or "auto" + * Unique identifier for this tool call */ - detail: 'low' | 'high' | 'auto'; + id: string; /** - * Content type identifier, always "input_image" + * List of search queries executed */ - type: 'input_image'; + queries: Array; /** - * (Optional) URL of the image content + * Current status of the file search operation */ - image_url?: string; - } + status: string; - export interface UnionMember2 { - annotations: Array< - | UnionMember2.OpenAIResponseAnnotationFileCitation - | UnionMember2.OpenAIResponseAnnotationCitation - | UnionMember2.OpenAIResponseAnnotationContainerFileCitation - | UnionMember2.OpenAIResponseAnnotationFilePath - >; - - text: string; + /** + * Tool call type identifier, always "file_search_call" + */ + type: 'file_search_call'; - type: 'output_text'; + /** + * (Optional) Search results returned by the file search operation + */ + results?: Array; } - export namespace UnionMember2 { + export namespace OpenAIResponseOutputMessageFileSearchToolCall { /** - * File citation annotation for referencing specific files in response content. + * Search results returned by the file search operation. */ - export interface OpenAIResponseAnnotationFileCitation { + export interface Result { /** - * Unique identifier of the referenced file + * (Optional) Key-value attributes associated with the file + */ + attributes: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Unique identifier of the file containing the result */ file_id: string; /** - * Name of the referenced file + * Name of the file containing the result */ filename: string; /** - * Position index of the citation within the content + * Relevance score for this search result (between 0 and 1) */ - index: number; + score: number; /** - * Annotation type identifier, always "file_citation" + * Text content of the search result */ - type: 'file_citation'; + text: string; } + } + /** + * Function tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFunctionToolCall { /** - * URL citation annotation for referencing external web resources. + * JSON string containing the function arguments */ - export interface OpenAIResponseAnnotationCitation { - /** - * End position of the citation span in the content - */ - end_index: number; - - /** - * Start position of the citation span in the content - */ - start_index: number; - - /** - * Title of the referenced web resource - */ - title: string; - - /** - * Annotation type identifier, always "url_citation" - */ - type: 'url_citation'; + arguments: string; - /** - * URL of the referenced web resource - */ - url: string; - } + /** + * Unique identifier for the function call + */ + call_id: string; - export interface OpenAIResponseAnnotationContainerFileCitation { - container_id: string; + /** + * Name of the function being called + */ + name: string; - end_index: number; + /** + * Tool call type identifier, always "function_call" + */ + type: 'function_call'; - file_id: string; + /** + * (Optional) Additional identifier for the tool call + */ + id?: string; - filename: string; + /** + * (Optional) Current status of the function call execution + */ + status?: string; + } - start_index: number; + /** + * This represents the output of a function call that gets passed back to the + * model. + */ + export interface OpenAIResponseInputFunctionToolCallOutput { + call_id: string; - type: 'container_file_citation'; - } + output: string; - export interface OpenAIResponseAnnotationFilePath { - file_id: string; + type: 'function_call_output'; - index: number; + id?: string; - type: 'file_path'; - } + status?: string; } - } - /** - * Corresponds to the various Message types in the Responses API. They are all - * under one type because the Responses API gives them all the same "type" value, - * and there is no way to tell them apart in certain scenarios. - */ - export interface OpenAIResponseMessage { - content: - | string - | Array< - | OpenAIResponseMessage.OpenAIResponseInputMessageContentText - | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage - > - | Array; + /** + * A request for human approval of a tool invocation. + */ + export interface OpenAIResponseMcpApprovalRequest { + id: string; - role: 'system' | 'developer' | 'user' | 'assistant'; + arguments: string; - type: 'message'; + name: string; - id?: string; + server_label: string; - status?: string; - } + type: 'mcp_approval_request'; + } - export namespace OpenAIResponseMessage { /** - * Text content for input messages in OpenAI response format. + * A response to an MCP approval request. */ - export interface OpenAIResponseInputMessageContentText { - /** - * The text content of the input message - */ - text: string; + export interface OpenAIResponseMcpApprovalResponse { + approval_request_id: string; - /** - * Content type identifier, always "input_text" - */ - type: 'input_text'; + approve: boolean; + + type: 'mcp_approval_response'; + + id?: string; + + reason?: string; } /** - * Image content for input messages in OpenAI response format. + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. */ - export interface OpenAIResponseInputMessageContentImage { - /** - * Level of detail for image processing, can be "low", "high", or "auto" - */ - detail: 'low' | 'high' | 'auto'; - - /** - * Content type identifier, always "input_image" - */ - type: 'input_image'; + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + > + | Array; - /** - * (Optional) URL of the image content - */ - image_url?: string; - } + role: 'system' | 'developer' | 'user' | 'assistant'; - export interface UnionMember2 { - annotations: Array< - | UnionMember2.OpenAIResponseAnnotationFileCitation - | UnionMember2.OpenAIResponseAnnotationCitation - | UnionMember2.OpenAIResponseAnnotationContainerFileCitation - | UnionMember2.OpenAIResponseAnnotationFilePath - >; + type: 'message'; - text: string; + id?: string; - type: 'output_text'; + status?: string; } - export namespace UnionMember2 { + export namespace OpenAIResponseMessage { /** - * File citation annotation for referencing specific files in response content. + * Text content for input messages in OpenAI response format. */ - export interface OpenAIResponseAnnotationFileCitation { - /** - * Unique identifier of the referenced file - */ - file_id: string; - - /** - * Name of the referenced file - */ - filename: string; - + export interface OpenAIResponseInputMessageContentText { /** - * Position index of the citation within the content + * The text content of the input message */ - index: number; + text: string; /** - * Annotation type identifier, always "file_citation" + * Content type identifier, always "input_text" */ - type: 'file_citation'; + type: 'input_text'; } /** - * URL citation annotation for referencing external web resources. + * Image content for input messages in OpenAI response format. */ - export interface OpenAIResponseAnnotationCitation { + export interface OpenAIResponseInputMessageContentImage { /** - * End position of the citation span in the content + * Level of detail for image processing, can be "low", "high", or "auto" */ - end_index: number; + detail: 'low' | 'high' | 'auto'; /** - * Start position of the citation span in the content + * Content type identifier, always "input_image" */ - start_index: number; + type: 'input_image'; /** - * Title of the referenced web resource + * (Optional) URL of the image content */ - title: string; + image_url?: string; + } - /** - * Annotation type identifier, always "url_citation" - */ - type: 'url_citation'; + export interface UnionMember2 { + annotations: Array< + | UnionMember2.OpenAIResponseAnnotationFileCitation + | UnionMember2.OpenAIResponseAnnotationCitation + | UnionMember2.OpenAIResponseAnnotationContainerFileCitation + | UnionMember2.OpenAIResponseAnnotationFilePath + >; - /** - * URL of the referenced web resource - */ - url: string; + text: string; + + type: 'output_text'; } - export interface OpenAIResponseAnnotationContainerFileCitation { - container_id: string; + export namespace UnionMember2 { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { + /** + * Unique identifier of the referenced file + */ + file_id: string; - end_index: number; + /** + * Name of the referenced file + */ + filename: string; - file_id: string; + /** + * Position index of the citation within the content + */ + index: number; - filename: string; + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; + } - start_index: number; + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; - type: 'container_file_citation'; - } + /** + * Start position of the citation span in the content + */ + start_index: number; - export interface OpenAIResponseAnnotationFilePath { - file_id: string; + /** + * Title of the referenced web resource + */ + title: string; - index: number; + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; - type: 'file_path'; + /** + * URL of the referenced web resource + */ + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } } } - } - /** - * Web search tool call output message for OpenAI responses. - */ - export interface OpenAIResponseOutputMessageWebSearchToolCall { /** - * Unique identifier for this tool call + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. */ - id: string; + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + > + | Array; - /** - * Current status of the web search operation - */ - status: string; + role: 'system' | 'developer' | 'user' | 'assistant'; - /** - * Tool call type identifier, always "web_search_call" - */ - type: 'web_search_call'; - } + type: 'message'; - /** - * File search tool call output message for OpenAI responses. - */ - export interface OpenAIResponseOutputMessageFileSearchToolCall { - /** - * Unique identifier for this tool call - */ - id: string; + id?: string; - /** - * List of search queries executed - */ - queries: Array; + status?: string; + } + + export namespace OpenAIResponseMessage { + /** + * Text content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentText { + /** + * The text content of the input message + */ + text: string; + + /** + * Content type identifier, always "input_text" + */ + type: 'input_text'; + } + + /** + * Image content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentImage { + /** + * Level of detail for image processing, can be "low", "high", or "auto" + */ + detail: 'low' | 'high' | 'auto'; + + /** + * Content type identifier, always "input_image" + */ + type: 'input_image'; + + /** + * (Optional) URL of the image content + */ + image_url?: string; + } + + export interface UnionMember2 { + annotations: Array< + | UnionMember2.OpenAIResponseAnnotationFileCitation + | UnionMember2.OpenAIResponseAnnotationCitation + | UnionMember2.OpenAIResponseAnnotationContainerFileCitation + | UnionMember2.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace UnionMember2 { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { + /** + * Unique identifier of the referenced file + */ + file_id: string; + + /** + * Name of the referenced file + */ + filename: string; + + /** + * Position index of the citation within the content + */ + index: number; + + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; + } + + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; + + /** + * Start position of the citation span in the content + */ + start_index: number; + + /** + * Title of the referenced web resource + */ + title: string; + + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; + + /** + * URL of the referenced web resource + */ + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + } /** - * Current status of the file search operation + * Web search tool call output message for OpenAI responses. */ - status: string; + export interface OpenAIResponseOutputMessageWebSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * Current status of the web search operation + */ + status: string; + + /** + * Tool call type identifier, always "web_search_call" + */ + type: 'web_search_call'; + } /** - * Tool call type identifier, always "file_search_call" + * File search tool call output message for OpenAI responses. */ - type: 'file_search_call'; + export interface OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * List of search queries executed + */ + queries: Array; + + /** + * Current status of the file search operation + */ + status: string; + + /** + * Tool call type identifier, always "file_search_call" + */ + type: 'file_search_call'; + + /** + * (Optional) Search results returned by the file search operation + */ + results?: Array; + } + + export namespace OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Search results returned by the file search operation. + */ + export interface Result { + /** + * (Optional) Key-value attributes associated with the file + */ + attributes: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Unique identifier of the file containing the result + */ + file_id: string; + + /** + * Name of the file containing the result + */ + filename: string; + + /** + * Relevance score for this search result (between 0 and 1) + */ + score: number; - /** - * (Optional) Search results returned by the file search operation - */ - results?: Array; - } + /** + * Text content of the search result + */ + text: string; + } + } - export namespace OpenAIResponseOutputMessageFileSearchToolCall { /** - * Search results returned by the file search operation. + * Function tool call output message for OpenAI responses. */ - export interface Result { + export interface OpenAIResponseOutputMessageFunctionToolCall { /** - * (Optional) Key-value attributes associated with the file + * JSON string containing the function arguments */ - attributes: { [key: string]: boolean | number | string | Array | unknown | null }; + arguments: string; /** - * Unique identifier of the file containing the result + * Unique identifier for the function call */ - file_id: string; + call_id: string; /** - * Name of the file containing the result + * Name of the function being called */ - filename: string; + name: string; /** - * Relevance score for this search result (between 0 and 1) + * Tool call type identifier, always "function_call" */ - score: number; + type: 'function_call'; /** - * Text content of the search result + * (Optional) Additional identifier for the tool call */ - text: string; - } - } - - /** - * Function tool call output message for OpenAI responses. - */ - export interface OpenAIResponseOutputMessageFunctionToolCall { - /** - * JSON string containing the function arguments - */ - arguments: string; - - /** - * Unique identifier for the function call - */ - call_id: string; - - /** - * Name of the function being called - */ - name: string; - - /** - * Tool call type identifier, always "function_call" - */ - type: 'function_call'; - - /** - * (Optional) Additional identifier for the tool call - */ - id?: string; - - /** - * (Optional) Current status of the function call execution - */ - status?: string; - } - - /** - * Model Context Protocol (MCP) call output message for OpenAI responses. - */ - export interface OpenAIResponseOutputMessageMcpCall { - /** - * Unique identifier for this MCP call - */ - id: string; - - /** - * JSON string containing the MCP call arguments - */ - arguments: string; + id?: string; - /** - * Name of the MCP method being called - */ - name: string; + /** + * (Optional) Current status of the function call execution + */ + status?: string; + } /** - * Label identifying the MCP server handling the call + * Model Context Protocol (MCP) call output message for OpenAI responses. */ - server_label: string; + export interface OpenAIResponseOutputMessageMcpCall { + /** + * Unique identifier for this MCP call + */ + id: string; - /** - * Tool call type identifier, always "mcp_call" - */ - type: 'mcp_call'; + /** + * JSON string containing the MCP call arguments + */ + arguments: string; - /** - * (Optional) Error message if the MCP call failed - */ - error?: string; + /** + * Name of the MCP method being called + */ + name: string; - /** - * (Optional) Output result from the successful MCP call - */ - output?: string; - } + /** + * Label identifying the MCP server handling the call + */ + server_label: string; - /** - * MCP list tools output message containing available tools from an MCP server. - */ - export interface OpenAIResponseOutputMessageMcpListTools { - /** - * Unique identifier for this MCP list tools operation - */ - id: string; + /** + * Tool call type identifier, always "mcp_call" + */ + type: 'mcp_call'; - /** - * Label identifying the MCP server providing the tools - */ - server_label: string; + /** + * (Optional) Error message if the MCP call failed + */ + error?: string; - /** - * List of available tools provided by the MCP server - */ - tools: Array; + /** + * (Optional) Output result from the successful MCP call + */ + output?: string; + } /** - * Tool call type identifier, always "mcp_list_tools" + * MCP list tools output message containing available tools from an MCP server. */ - type: 'mcp_list_tools'; - } + export interface OpenAIResponseOutputMessageMcpListTools { + /** + * Unique identifier for this MCP list tools operation + */ + id: string; - export namespace OpenAIResponseOutputMessageMcpListTools { - /** - * Tool definition returned by MCP list tools operation. - */ - export interface Tool { /** - * JSON schema defining the tool's input parameters + * Label identifying the MCP server providing the tools */ - input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; + server_label: string; /** - * Name of the tool + * List of available tools provided by the MCP server */ - name: string; + tools: Array; /** - * (Optional) Description of what the tool does + * Tool call type identifier, always "mcp_list_tools" */ - description?: string; + type: 'mcp_list_tools'; } - } - - /** - * A request for human approval of a tool invocation. - */ - export interface OpenAIResponseMcpApprovalRequest { - id: string; - - arguments: string; - name: string; + export namespace OpenAIResponseOutputMessageMcpListTools { + /** + * Tool definition returned by MCP list tools operation. + */ + export interface Tool { + /** + * JSON schema defining the tool's input parameters + */ + input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; - server_label: string; + /** + * Name of the tool + */ + name: string; - type: 'mcp_approval_request'; - } + /** + * (Optional) Description of what the tool does + */ + description?: string; + } + } - /** - * Text formatting configuration for the response - */ - export interface Text { /** - * (Optional) Text format configuration specifying output format requirements + * A request for human approval of a tool invocation. */ - format?: Text.Format; - } + export interface OpenAIResponseMcpApprovalRequest { + id: string; - export namespace Text { - /** - * (Optional) Text format configuration specifying output format requirements - */ - export interface Format { - /** - * Must be "text", "json_schema", or "json_object" to identify the format type - */ - type: 'text' | 'json_schema' | 'json_object'; + arguments: string; - /** - * (Optional) A description of the response format. Only used for json_schema. - */ - description?: string; + name: string; - /** - * The name of the response format. Only used for json_schema. - */ - name?: string; + server_label: string; - /** - * The JSON schema the response should conform to. In a Python SDK, this is often a - * `pydantic` model. Only used for json_schema. + type: 'mcp_approval_request'; + } + + /** + * Text formatting configuration for the response + */ + export interface Text { + /** + * (Optional) Text format configuration specifying output format requirements */ - schema?: { [key: string]: boolean | number | string | Array | unknown | null }; + format?: Text.Format; + } + export namespace Text { /** - * (Optional) Whether to strictly enforce the JSON schema. If true, the response - * must match the schema exactly. Only used for json_schema. + * (Optional) Text format configuration specifying output format requirements */ - strict?: boolean; + export interface Format { + /** + * Must be "text", "json_schema", or "json_object" to identify the format type + */ + type: 'text' | 'json_schema' | 'json_object'; + + /** + * (Optional) A description of the response format. Only used for json_schema. + */ + description?: string; + + /** + * The name of the response format. Only used for json_schema. + */ + name?: string; + + /** + * The JSON schema the response should conform to. In a Python SDK, this is often a + * `pydantic` model. Only used for json_schema. + */ + schema?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * (Optional) Whether to strictly enforce the JSON schema. If true, the response + * must match the schema exactly. Only used for json_schema. + */ + strict?: boolean; + } } - } - /** - * (Optional) Error details if the response generation failed - */ - export interface Error { /** - * Error code identifying the type of failure + * (Optional) Error details if the response generation failed */ - code: string; + export interface Error { + /** + * Error code identifying the type of failure + */ + code: string; - /** - * Human-readable error message describing the failure - */ - message: string; + /** + * Human-readable error message describing the failure + */ + message: string; + } } } /** - * Response object confirming deletion of an OpenAI response. + * OpenAI response object extended with input context information. */ -export interface ResponseDeleteResponse { +export interface ResponseListResponse { /** - * Unique identifier of the deleted response + * Unique identifier for this response */ id: string; /** - * Deletion confirmation flag, always True + * Unix timestamp when the response was created */ - deleted: boolean; + created_at: number; + + /** + * List of input items that led to this response + */ + input: Array< + | ResponseListResponse.OpenAIResponseOutputMessageWebSearchToolCall + | ResponseListResponse.OpenAIResponseOutputMessageFileSearchToolCall + | ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall + | ResponseListResponse.OpenAIResponseInputFunctionToolCallOutput + | ResponseListResponse.OpenAIResponseMcpApprovalRequest + | ResponseListResponse.OpenAIResponseMcpApprovalResponse + | ResponseListResponse.OpenAIResponseMessage + >; + + /** + * Model identifier used for generation + */ + model: string; /** * Object type identifier, always "response" */ object: 'response'; -} - -export type ResponseCreateParams = ResponseCreateParamsNonStreaming | ResponseCreateParamsStreaming; -export interface ResponseCreateParamsBase { /** - * Input message(s) to create the response. + * List of generated output items (messages, tool calls, etc.) */ - input: - | string - | Array< - | ResponseCreateParams.OpenAIResponseOutputMessageWebSearchToolCall - | ResponseCreateParams.OpenAIResponseOutputMessageFileSearchToolCall - | ResponseCreateParams.OpenAIResponseOutputMessageFunctionToolCall - | ResponseCreateParams.OpenAIResponseInputFunctionToolCallOutput - | ResponseCreateParams.OpenAIResponseMcpApprovalRequest - | ResponseCreateParams.OpenAIResponseMcpApprovalResponse - | ResponseCreateParams.OpenAIResponseMessage - >; + output: Array< + | ResponseListResponse.OpenAIResponseMessage + | ResponseListResponse.OpenAIResponseOutputMessageWebSearchToolCall + | ResponseListResponse.OpenAIResponseOutputMessageFileSearchToolCall + | ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall + | ResponseListResponse.OpenAIResponseOutputMessageMcpCall + | ResponseListResponse.OpenAIResponseOutputMessageMcpListTools + | ResponseListResponse.OpenAIResponseMcpApprovalRequest + >; /** - * The underlying LLM used for completions. + * Whether tool calls can be executed in parallel */ - model: string; + parallel_tool_calls: boolean; /** - * (Optional) Additional fields to include in the response. + * Current status of the response generation */ - include?: Array; + status: string; - instructions?: string; + /** + * Text formatting configuration for the response + */ + text: ResponseListResponse.Text; - max_infer_iters?: number; + /** + * (Optional) Error details if the response generation failed + */ + error?: ResponseListResponse.Error; /** - * (Optional) if specified, the new response will be a continuation of the previous - * response. This can be used to easily fork-off new responses from existing - * responses. + * (Optional) ID of the previous response in a conversation */ previous_response_id?: string; - store?: boolean; - - stream?: boolean; - + /** + * (Optional) Sampling temperature used for generation + */ temperature?: number; /** - * Text response configuration for OpenAI responses. + * (Optional) Nucleus sampling parameter used for generation */ - text?: ResponseCreateParams.Text; + top_p?: number; - tools?: Array< - | ResponseCreateParams.OpenAIResponseInputToolWebSearch - | ResponseCreateParams.OpenAIResponseInputToolFileSearch - | ResponseCreateParams.OpenAIResponseInputToolFunction - | ResponseCreateParams.OpenAIResponseInputToolMcp - >; + /** + * (Optional) Truncation strategy applied to the response + */ + truncation?: string; } -export namespace ResponseCreateParams { +export namespace ResponseListResponse { /** * Web search tool call output message for OpenAI responses. */ @@ -2918,7 +2954,159 @@ export namespace ResponseCreateParams { id?: string; - reason?: string; + reason?: string; + } + + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + > + | Array; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; + } + + export namespace OpenAIResponseMessage { + /** + * Text content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentText { + /** + * The text content of the input message + */ + text: string; + + /** + * Content type identifier, always "input_text" + */ + type: 'input_text'; + } + + /** + * Image content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentImage { + /** + * Level of detail for image processing, can be "low", "high", or "auto" + */ + detail: 'low' | 'high' | 'auto'; + + /** + * Content type identifier, always "input_image" + */ + type: 'input_image'; + + /** + * (Optional) URL of the image content + */ + image_url?: string; + } + + export interface UnionMember2 { + annotations: Array< + | UnionMember2.OpenAIResponseAnnotationFileCitation + | UnionMember2.OpenAIResponseAnnotationCitation + | UnionMember2.OpenAIResponseAnnotationContainerFileCitation + | UnionMember2.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace UnionMember2 { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { + /** + * Unique identifier of the referenced file + */ + file_id: string; + + /** + * Name of the referenced file + */ + filename: string; + + /** + * Position index of the citation within the content + */ + index: number; + + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; + } + + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; + + /** + * Start position of the citation span in the content + */ + start_index: number; + + /** + * Title of the referenced web resource + */ + title: string; + + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; + + /** + * URL of the referenced web resource + */ + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } } /** @@ -3074,213 +3262,350 @@ export namespace ResponseCreateParams { } /** - * Text response configuration for OpenAI responses. + * Web search tool call output message for OpenAI responses. */ - export interface Text { + export interface OpenAIResponseOutputMessageWebSearchToolCall { /** - * (Optional) Text format configuration specifying output format requirements + * Unique identifier for this tool call */ - format?: Text.Format; + id: string; + + /** + * Current status of the web search operation + */ + status: string; + + /** + * Tool call type identifier, always "web_search_call" + */ + type: 'web_search_call'; } - export namespace Text { + /** + * File search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFileSearchToolCall { /** - * (Optional) Text format configuration specifying output format requirements + * Unique identifier for this tool call */ - export interface Format { + id: string; + + /** + * List of search queries executed + */ + queries: Array; + + /** + * Current status of the file search operation + */ + status: string; + + /** + * Tool call type identifier, always "file_search_call" + */ + type: 'file_search_call'; + + /** + * (Optional) Search results returned by the file search operation + */ + results?: Array; + } + + export namespace OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Search results returned by the file search operation. + */ + export interface Result { /** - * Must be "text", "json_schema", or "json_object" to identify the format type + * (Optional) Key-value attributes associated with the file */ - type: 'text' | 'json_schema' | 'json_object'; + attributes: { [key: string]: boolean | number | string | Array | unknown | null }; /** - * (Optional) A description of the response format. Only used for json_schema. + * Unique identifier of the file containing the result */ - description?: string; + file_id: string; /** - * The name of the response format. Only used for json_schema. + * Name of the file containing the result */ - name?: string; + filename: string; /** - * The JSON schema the response should conform to. In a Python SDK, this is often a - * `pydantic` model. Only used for json_schema. + * Relevance score for this search result (between 0 and 1) */ - schema?: { [key: string]: boolean | number | string | Array | unknown | null }; + score: number; /** - * (Optional) Whether to strictly enforce the JSON schema. If true, the response - * must match the schema exactly. Only used for json_schema. + * Text content of the search result */ - strict?: boolean; + text: string; } } /** - * Web search tool configuration for OpenAI response inputs. + * Function tool call output message for OpenAI responses. */ - export interface OpenAIResponseInputToolWebSearch { + export interface OpenAIResponseOutputMessageFunctionToolCall { /** - * Web search tool type variant to use + * JSON string containing the function arguments */ - type: 'web_search' | 'web_search_preview' | 'web_search_preview_2025_03_11'; + arguments: string; /** - * (Optional) Size of search context, must be "low", "medium", or "high" + * Unique identifier for the function call */ - search_context_size?: string; - } + call_id: string; - /** - * File search tool configuration for OpenAI response inputs. - */ - export interface OpenAIResponseInputToolFileSearch { /** - * Tool type identifier, always "file_search" + * Name of the function being called */ - type: 'file_search'; + name: string; /** - * List of vector store identifiers to search within + * Tool call type identifier, always "function_call" */ - vector_store_ids: Array; + type: 'function_call'; /** - * (Optional) Additional filters to apply to the search + * (Optional) Additional identifier for the tool call */ - filters?: { [key: string]: boolean | number | string | Array | unknown | null }; + id?: string; /** - * (Optional) Maximum number of search results to return (1-50) + * (Optional) Current status of the function call execution */ - max_num_results?: number; + status?: string; + } + /** + * Model Context Protocol (MCP) call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageMcpCall { /** - * (Optional) Options for ranking and scoring search results + * Unique identifier for this MCP call */ - ranking_options?: OpenAIResponseInputToolFileSearch.RankingOptions; - } + id: string; - export namespace OpenAIResponseInputToolFileSearch { /** - * (Optional) Options for ranking and scoring search results + * JSON string containing the MCP call arguments */ - export interface RankingOptions { - /** - * (Optional) Name of the ranking algorithm to use - */ - ranker?: string; - - /** - * (Optional) Minimum relevance score threshold for results - */ - score_threshold?: number; - } - } + arguments: string; - /** - * Function tool configuration for OpenAI response inputs. - */ - export interface OpenAIResponseInputToolFunction { /** - * Name of the function that can be called + * Name of the MCP method being called */ name: string; /** - * Tool type identifier, always "function" + * Label identifying the MCP server handling the call */ - type: 'function'; + server_label: string; /** - * (Optional) Description of what the function does + * Tool call type identifier, always "mcp_call" */ - description?: string; + type: 'mcp_call'; /** - * (Optional) JSON schema defining the function's parameters + * (Optional) Error message if the MCP call failed */ - parameters?: { [key: string]: boolean | number | string | Array | unknown | null }; + error?: string; /** - * (Optional) Whether to enforce strict parameter validation + * (Optional) Output result from the successful MCP call */ - strict?: boolean; + output?: string; } /** - * Model Context Protocol (MCP) tool configuration for OpenAI response inputs. + * MCP list tools output message containing available tools from an MCP server. */ - export interface OpenAIResponseInputToolMcp { + export interface OpenAIResponseOutputMessageMcpListTools { /** - * Approval requirement for tool calls ("always", "never", or filter) + * Unique identifier for this MCP list tools operation */ - require_approval: 'always' | 'never' | OpenAIResponseInputToolMcp.ApprovalFilter; + id: string; /** - * Label to identify this MCP server + * Label identifying the MCP server providing the tools */ server_label: string; /** - * URL endpoint of the MCP server + * List of available tools provided by the MCP server */ - server_url: string; + tools: Array; /** - * Tool type identifier, always "mcp" + * Tool call type identifier, always "mcp_list_tools" */ - type: 'mcp'; + type: 'mcp_list_tools'; + } + export namespace OpenAIResponseOutputMessageMcpListTools { /** - * (Optional) Restriction on which tools can be used from this server + * Tool definition returned by MCP list tools operation. */ - allowed_tools?: Array | OpenAIResponseInputToolMcp.AllowedToolsFilter; + export interface Tool { + /** + * JSON schema defining the tool's input parameters + */ + input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Name of the tool + */ + name: string; + + /** + * (Optional) Description of what the tool does + */ + description?: string; + } + } + + /** + * A request for human approval of a tool invocation. + */ + export interface OpenAIResponseMcpApprovalRequest { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_approval_request'; + } + /** + * Text formatting configuration for the response + */ + export interface Text { /** - * (Optional) HTTP headers to include when connecting to the server + * (Optional) Text format configuration specifying output format requirements */ - headers?: { [key: string]: boolean | number | string | Array | unknown | null }; + format?: Text.Format; } - export namespace OpenAIResponseInputToolMcp { + export namespace Text { /** - * Filter configuration for MCP tool approval requirements. + * (Optional) Text format configuration specifying output format requirements */ - export interface ApprovalFilter { + export interface Format { /** - * (Optional) List of tool names that always require approval + * Must be "text", "json_schema", or "json_object" to identify the format type */ - always?: Array; + type: 'text' | 'json_schema' | 'json_object'; /** - * (Optional) List of tool names that never require approval + * (Optional) A description of the response format. Only used for json_schema. */ - never?: Array; - } + description?: string; + + /** + * The name of the response format. Only used for json_schema. + */ + name?: string; + + /** + * The JSON schema the response should conform to. In a Python SDK, this is often a + * `pydantic` model. Only used for json_schema. + */ + schema?: { [key: string]: boolean | number | string | Array | unknown | null }; - /** - * Filter configuration for restricting which MCP tools can be used. - */ - export interface AllowedToolsFilter { /** - * (Optional) List of specific tool names that are allowed + * (Optional) Whether to strictly enforce the JSON schema. If true, the response + * must match the schema exactly. Only used for json_schema. */ - tool_names?: Array; + strict?: boolean; } } - export type ResponseCreateParamsNonStreaming = ResponsesAPI.ResponseCreateParamsNonStreaming; - export type ResponseCreateParamsStreaming = ResponsesAPI.ResponseCreateParamsStreaming; + /** + * (Optional) Error details if the response generation failed + */ + export interface Error { + /** + * Error code identifying the type of failure + */ + code: string; + + /** + * Human-readable error message describing the failure + */ + message: string; + } } -export interface ResponseCreateParamsNonStreaming extends ResponseCreateParamsBase { - stream?: false; +/** + * Response object confirming deletion of an OpenAI response. + */ +export interface ResponseDeleteResponse { + /** + * Unique identifier of the deleted response + */ + id: string; + + /** + * Deletion confirmation flag, always True + */ + deleted: boolean; + + /** + * Object type identifier, always "response" + */ + object: 'response'; } -export interface ResponseCreateParamsStreaming extends ResponseCreateParamsBase { - stream: true; +export type ResponseCreateParams = ResponseCreateParamsNonStreaming | ResponseCreateParamsNonStreaming; + +export declare namespace ResponseCreateParams { + export interface ResponseCreateParamsNonStreaming { + /** + * The ID of the last response to return. + */ + after?: string; + + /** + * The number of responses to return. + */ + limit?: number; + + /** + * The model to filter responses by. + */ + model?: string; + + /** + * The order to sort responses by when sorted by created_at ('asc' or 'desc'). + */ + order?: 'asc' | 'desc'; + } + + export interface ResponseCreateParamsNonStreaming { + /** + * The ID of the last response to return. + */ + after?: string; + + /** + * The number of responses to return. + */ + limit?: number; + + /** + * The model to filter responses by. + */ + model?: string; + + /** + * The order to sort responses by when sorted by created_at ('asc' or 'desc'). + */ + order?: 'asc' | 'desc'; + } } export interface ResponseListParams extends OpenAICursorPageParams { @@ -3302,12 +3627,12 @@ export declare namespace Responses { export { type ResponseObject as ResponseObject, type ResponseObjectStream as ResponseObjectStream, + type ResponseCreateResponse as ResponseCreateResponse, type ResponseListResponse as ResponseListResponse, type ResponseDeleteResponse as ResponseDeleteResponse, ResponseListResponsesOpenAICursorPage as ResponseListResponsesOpenAICursorPage, type ResponseCreateParams as ResponseCreateParams, type ResponseCreateParamsNonStreaming as ResponseCreateParamsNonStreaming, - type ResponseCreateParamsStreaming as ResponseCreateParamsStreaming, type ResponseListParams as ResponseListParams, }; diff --git a/src/resources/telemetry.ts b/src/resources/telemetry.ts index 8064a72..e554a32 100644 --- a/src/resources/telemetry.ts +++ b/src/resources/telemetry.ts @@ -4,39 +4,6 @@ import { APIResource } from '../resource'; import * as Core from '../core'; export class Telemetry extends APIResource { - /** - * Get a span by its ID. - */ - getSpan( - traceId: string, - spanId: string, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.get(`/v1/telemetry/traces/${traceId}/spans/${spanId}`, options); - } - - /** - * Get a span tree by its ID. - */ - getSpanTree( - spanId: string, - body: TelemetryGetSpanTreeParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return ( - this._client.post(`/v1/telemetry/spans/${spanId}/tree`, { body, ...options }) as Core.APIPromise<{ - data: TelemetryGetSpanTreeResponse; - }> - )._thenUnwrap((obj) => obj.data); - } - - /** - * Get a trace by its ID. - */ - getTrace(traceId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/v1/telemetry/traces/${traceId}`, options); - } - /** * Log an event. */ @@ -47,63 +14,6 @@ export class Telemetry extends APIResource { headers: { Accept: '*/*', ...options?.headers }, }); } - - /** - * Query metrics. - */ - queryMetrics( - metricName: string, - body: TelemetryQueryMetricsParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return ( - this._client.post(`/v1/telemetry/metrics/${metricName}`, { body, ...options }) as Core.APIPromise<{ - data: TelemetryQueryMetricsResponse; - }> - )._thenUnwrap((obj) => obj.data); - } - - /** - * Query spans. - */ - querySpans( - body: TelemetryQuerySpansParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return ( - this._client.post('/v1/telemetry/spans', { body, ...options }) as Core.APIPromise<{ - data: TelemetryQuerySpansResponse; - }> - )._thenUnwrap((obj) => obj.data); - } - - /** - * Query traces. - */ - queryTraces( - body: TelemetryQueryTracesParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return ( - this._client.post('/v1/telemetry/traces', { body, ...options }) as Core.APIPromise<{ - data: TelemetryQueryTracesResponse; - }> - )._thenUnwrap((obj) => obj.data); - } - - /** - * Save spans to a dataset. - */ - saveSpansToDataset( - body: TelemetrySaveSpansToDatasetParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.post('/v1/telemetry/spans/export', { - body, - ...options, - headers: { Accept: '*/*', ...options?.headers }, - }); - } } /** @@ -297,197 +207,14 @@ export interface QuerySpansResponse { /** * List of spans matching the query criteria */ - data: TelemetryQuerySpansResponse; -} - -/** - * A span that includes status information. - */ -export interface SpanWithStatus { - /** - * Human-readable name describing the operation this span represents - */ - name: string; - - /** - * Unique identifier for the span - */ - span_id: string; - - /** - * Timestamp when the operation began - */ - start_time: string; - - /** - * Unique identifier for the trace this span belongs to - */ - trace_id: string; - - /** - * (Optional) Key-value pairs containing additional metadata about the span - */ - attributes?: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * (Optional) Timestamp when the operation finished, if completed - */ - end_time?: string; - - /** - * (Optional) Unique identifier for the parent span, if this is a child span - */ - parent_span_id?: string; - - /** - * (Optional) The current status of the span - */ - status?: 'ok' | 'error'; -} - -/** - * A trace representing the complete execution path of a request across multiple - * operations. - */ -export interface Trace { - /** - * Unique identifier for the root span that started this trace - */ - root_span_id: string; - - /** - * Timestamp when the trace began - */ - start_time: string; - - /** - * Unique identifier for the trace - */ - trace_id: string; - - /** - * (Optional) Timestamp when the trace finished, if completed - */ - end_time?: string; -} - -/** - * A span representing a single operation within a trace. - */ -export interface TelemetryGetSpanResponse { - /** - * Human-readable name describing the operation this span represents - */ - name: string; - - /** - * Unique identifier for the span - */ - span_id: string; - - /** - * Timestamp when the operation began - */ - start_time: string; - - /** - * Unique identifier for the trace this span belongs to - */ - trace_id: string; - - /** - * (Optional) Key-value pairs containing additional metadata about the span - */ - attributes?: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * (Optional) Timestamp when the operation finished, if completed - */ - end_time?: string; - - /** - * (Optional) Unique identifier for the parent span, if this is a child span - */ - parent_span_id?: string; -} - -/** - * Dictionary mapping span IDs to spans with status information - */ -export type TelemetryGetSpanTreeResponse = { [key: string]: SpanWithStatus }; - -/** - * List of metric series matching the query criteria - */ -export type TelemetryQueryMetricsResponse = - Array; - -export namespace TelemetryQueryMetricsResponse { - /** - * A time series of metric data points. - */ - export interface TelemetryQueryMetricsResponseItem { - /** - * List of labels associated with this metric series - */ - labels: Array; - - /** - * The name of the metric - */ - metric: string; - - /** - * List of data points in chronological order - */ - values: Array; - } - - export namespace TelemetryQueryMetricsResponseItem { - /** - * A label associated with a metric. - */ - export interface Label { - /** - * The name of the label - */ - name: string; - - /** - * The value of the label - */ - value: string; - } - - /** - * A single data point in a metric time series. - */ - export interface Value { - /** - * Unix timestamp when the metric value was recorded - */ - timestamp: number; - - unit: string; - - /** - * The numeric value of the metric at this timestamp - */ - value: number; - } - } + data: Array; } -/** - * List of spans matching the query criteria - */ -export type TelemetryQuerySpansResponse = Array; - -export namespace TelemetryQuerySpansResponse { +export namespace QuerySpansResponse { /** * A span representing a single operation within a trace. */ - export interface TelemetryQuerySpansResponseItem { + export interface Data { /** * Human-readable name describing the operation this span represents */ @@ -526,142 +253,86 @@ export namespace TelemetryQuerySpansResponse { } /** - * List of traces matching the query criteria + * A span that includes status information. */ -export type TelemetryQueryTracesResponse = Array; - -export interface TelemetryGetSpanTreeParams { - /** - * The attributes to return in the tree. - */ - attributes_to_return?: Array; - - /** - * The maximum depth of the tree. - */ - max_depth?: number; -} - -export interface TelemetryLogEventParams { - /** - * The event to log. - */ - event: Event; - - /** - * The time to live of the event. - */ - ttl_seconds: number; -} - -export interface TelemetryQueryMetricsParams { - /** - * The type of query to perform. - */ - query_type: 'range' | 'instant'; - +export interface SpanWithStatus { /** - * The start time of the metric to query. + * Human-readable name describing the operation this span represents */ - start_time: number; + name: string; /** - * The end time of the metric to query. + * Unique identifier for the span */ - end_time?: number; + span_id: string; /** - * The granularity of the metric to query. + * Timestamp when the operation began */ - granularity?: string; + start_time: string; /** - * The label matchers to apply to the metric. + * Unique identifier for the trace this span belongs to */ - label_matchers?: Array; -} + trace_id: string; -export namespace TelemetryQueryMetricsParams { /** - * A matcher for filtering metrics by label values. + * (Optional) Key-value pairs containing additional metadata about the span */ - export interface LabelMatcher { - /** - * The name of the label to match - */ - name: string; - - /** - * The comparison operator to use for matching - */ - operator: '=' | '!=' | '=~' | '!~'; - - /** - * The value to match against - */ - value: string; - } -} + attributes?: { [key: string]: boolean | number | string | Array | unknown | null }; -export interface TelemetryQuerySpansParams { /** - * The attribute filters to apply to the spans. + * (Optional) Timestamp when the operation finished, if completed */ - attribute_filters: Array; + end_time?: string; /** - * The attributes to return in the spans. + * (Optional) Unique identifier for the parent span, if this is a child span */ - attributes_to_return: Array; + parent_span_id?: string; /** - * The maximum depth of the tree. + * (Optional) The current status of the span */ - max_depth?: number; + status?: 'ok' | 'error'; } -export interface TelemetryQueryTracesParams { +/** + * A trace representing the complete execution path of a request across multiple + * operations. + */ +export interface Trace { /** - * The attribute filters to apply to the traces. + * Unique identifier for the root span that started this trace */ - attribute_filters?: Array; + root_span_id: string; /** - * The limit of traces to return. + * Timestamp when the trace began */ - limit?: number; + start_time: string; /** - * The offset of the traces to return. + * Unique identifier for the trace */ - offset?: number; + trace_id: string; /** - * The order by of the traces to return. + * (Optional) Timestamp when the trace finished, if completed */ - order_by?: Array; + end_time?: string; } -export interface TelemetrySaveSpansToDatasetParams { - /** - * The attribute filters to apply to the spans. - */ - attribute_filters: Array; - - /** - * The attributes to save to the dataset. - */ - attributes_to_save: Array; - +export interface TelemetryLogEventParams { /** - * The ID of the dataset to save the spans to. + * The event to log. */ - dataset_id: string; + event: Event; /** - * The maximum depth of the tree. + * The time to live of the event. */ - max_depth?: number; + ttl_seconds: number; } export declare namespace Telemetry { @@ -671,16 +342,6 @@ export declare namespace Telemetry { type QuerySpansResponse as QuerySpansResponse, type SpanWithStatus as SpanWithStatus, type Trace as Trace, - type TelemetryGetSpanResponse as TelemetryGetSpanResponse, - type TelemetryGetSpanTreeResponse as TelemetryGetSpanTreeResponse, - type TelemetryQueryMetricsResponse as TelemetryQueryMetricsResponse, - type TelemetryQuerySpansResponse as TelemetryQuerySpansResponse, - type TelemetryQueryTracesResponse as TelemetryQueryTracesResponse, - type TelemetryGetSpanTreeParams as TelemetryGetSpanTreeParams, type TelemetryLogEventParams as TelemetryLogEventParams, - type TelemetryQueryMetricsParams as TelemetryQueryMetricsParams, - type TelemetryQuerySpansParams as TelemetryQuerySpansParams, - type TelemetryQueryTracesParams as TelemetryQueryTracesParams, - type TelemetrySaveSpansToDatasetParams as TelemetrySaveSpansToDatasetParams, }; } diff --git a/src/resources/tool-runtime/tool-runtime.ts b/src/resources/tool-runtime/tool-runtime.ts index 64996ff..058779e 100644 --- a/src/resources/tool-runtime/tool-runtime.ts +++ b/src/resources/tool-runtime/tool-runtime.ts @@ -58,19 +58,56 @@ export interface ToolDef { description?: string; /** - * (Optional) JSON Schema for tool inputs (MCP inputSchema) + * (Optional) Additional metadata about the tool */ - input_schema?: { [key: string]: boolean | number | string | Array | unknown | null }; + metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; /** - * (Optional) Additional metadata about the tool + * (Optional) List of parameters this tool accepts */ - metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; + parameters?: Array; +} +export namespace ToolDef { /** - * (Optional) JSON Schema for tool outputs (MCP outputSchema) + * Parameter definition for a tool. */ - output_schema?: { [key: string]: boolean | number | string | Array | unknown | null }; + export interface Parameter { + /** + * Human-readable description of what the parameter does + */ + description: string; + + /** + * Name of the parameter + */ + name: string; + + /** + * Type of the parameter (e.g., string, integer) + */ + parameter_type: string; + + /** + * Whether this parameter is required for tool invocation + */ + required: boolean; + + /** + * (Optional) Default value for the parameter if not provided + */ + default?: boolean | number | string | Array | unknown | null; + + /** + * Type of the elements when parameter_type is array + */ + items?: unknown; + + /** + * (Optional) Title of the parameter + */ + title?: string; + } } /** diff --git a/src/resources/tools.ts b/src/resources/tools.ts index efc2626..ab05a60 100644 --- a/src/resources/tools.ts +++ b/src/resources/tools.ts @@ -51,6 +51,11 @@ export interface Tool { identifier: string; + /** + * List of parameters this tool accepts + */ + parameters: Array; + provider_id: string; /** @@ -63,22 +68,54 @@ export interface Tool { */ type: 'tool'; - /** - * JSON Schema for the tool's input parameters - */ - input_schema?: { [key: string]: boolean | number | string | Array | unknown | null }; - /** * (Optional) Additional metadata about the tool */ metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; + provider_resource_id?: string; +} + +export namespace Tool { /** - * JSON Schema for the tool's output + * Parameter definition for a tool. */ - output_schema?: { [key: string]: boolean | number | string | Array | unknown | null }; - - provider_resource_id?: string; + export interface Parameter { + /** + * Human-readable description of what the parameter does + */ + description: string; + + /** + * Name of the parameter + */ + name: string; + + /** + * Type of the parameter (e.g., string, integer) + */ + parameter_type: string; + + /** + * Whether this parameter is required for tool invocation + */ + required: boolean; + + /** + * (Optional) Default value for the parameter if not provided + */ + default?: boolean | number | string | Array | unknown | null; + + /** + * Type of the elements when parameter_type is array + */ + items?: unknown; + + /** + * (Optional) Title of the parameter + */ + title?: string; + } } /** diff --git a/tests/api-resources/alpha/agents/agents.test.ts b/tests/api-resources/alpha/agents/agents.test.ts index 8beec68..0f26b3a 100644 --- a/tests/api-resources/alpha/agents/agents.test.ts +++ b/tests/api-resources/alpha/agents/agents.test.ts @@ -28,9 +28,18 @@ describe('resource agents', () => { { name: 'name', description: 'description', - input_schema: { foo: true }, metadata: { foo: true }, - output_schema: { foo: true }, + parameters: [ + { + description: 'description', + name: 'name', + parameter_type: 'parameter_type', + required: true, + default: true, + items: {}, + title: 'title', + }, + ], }, ], enable_session_persistence: true, diff --git a/tests/api-resources/benchmarks.test.ts b/tests/api-resources/benchmarks.test.ts deleted file mode 100644 index 45bc197..0000000 --- a/tests/api-resources/benchmarks.test.ts +++ /dev/null @@ -1,70 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; -import { Response } from 'node-fetch'; - -const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); - -describe('resource benchmarks', () => { - test('retrieve', async () => { - const responsePromise = client.benchmarks.retrieve('benchmark_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('retrieve: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.benchmarks.retrieve('benchmark_id', { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(LlamaStackClient.NotFoundError); - }); - - test('list', async () => { - const responsePromise = client.benchmarks.list(); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('list: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(client.benchmarks.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( - LlamaStackClient.NotFoundError, - ); - }); - - test('register: only required params', async () => { - const responsePromise = client.benchmarks.register({ - benchmark_id: 'benchmark_id', - dataset_id: 'dataset_id', - scoring_functions: ['string'], - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('register: required and optional params', async () => { - const response = await client.benchmarks.register({ - benchmark_id: 'benchmark_id', - dataset_id: 'dataset_id', - scoring_functions: ['string'], - metadata: { foo: true }, - provider_benchmark_id: 'provider_benchmark_id', - provider_id: 'provider_id', - }); - }); -}); diff --git a/tests/api-resources/datasets.test.ts b/tests/api-resources/datasets.test.ts deleted file mode 100644 index e0db4c4..0000000 --- a/tests/api-resources/datasets.test.ts +++ /dev/null @@ -1,129 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; -import { Response } from 'node-fetch'; - -const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); - -describe('resource datasets', () => { - test('retrieve', async () => { - const responsePromise = client.datasets.retrieve('dataset_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('retrieve: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.datasets.retrieve('dataset_id', { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(LlamaStackClient.NotFoundError); - }); - - test('list', async () => { - const responsePromise = client.datasets.list(); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('list: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(client.datasets.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( - LlamaStackClient.NotFoundError, - ); - }); - - test('appendrows: only required params', async () => { - const responsePromise = client.datasets.appendrows('dataset_id', { rows: [{ foo: true }] }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('appendrows: required and optional params', async () => { - const response = await client.datasets.appendrows('dataset_id', { rows: [{ foo: true }] }); - }); - - test('iterrows', async () => { - const responsePromise = client.datasets.iterrows('dataset_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('iterrows: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.datasets.iterrows('dataset_id', { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(LlamaStackClient.NotFoundError); - }); - - test('iterrows: request options and params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.datasets.iterrows( - 'dataset_id', - { limit: 0, start_index: 0 }, - { path: '/_stainless_unknown_path' }, - ), - ).rejects.toThrow(LlamaStackClient.NotFoundError); - }); - - test('register: only required params', async () => { - const responsePromise = client.datasets.register({ - purpose: 'post-training/messages', - source: { type: 'uri', uri: 'uri' }, - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('register: required and optional params', async () => { - const response = await client.datasets.register({ - purpose: 'post-training/messages', - source: { type: 'uri', uri: 'uri' }, - dataset_id: 'dataset_id', - metadata: { foo: true }, - }); - }); - - test('unregister', async () => { - const responsePromise = client.datasets.unregister('dataset_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('unregister: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.datasets.unregister('dataset_id', { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(LlamaStackClient.NotFoundError); - }); -}); diff --git a/tests/api-resources/responses/responses.test.ts b/tests/api-resources/responses/responses.test.ts index f1142d8..fbe25a5 100644 --- a/tests/api-resources/responses/responses.test.ts +++ b/tests/api-resources/responses/responses.test.ts @@ -6,8 +6,8 @@ import { Response } from 'node-fetch'; const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource responses', () => { - test('create: only required params', async () => { - const responsePromise = client.responses.create({ input: 'string', model: 'model' }); + test('create', async () => { + const responsePromise = client.responses.create({}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -17,30 +17,6 @@ describe('resource responses', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - test('create: required and optional params', async () => { - const response = await client.responses.create({ - input: 'string', - model: 'model', - include: ['string'], - instructions: 'instructions', - max_infer_iters: 0, - previous_response_id: 'previous_response_id', - store: true, - stream: false, - temperature: 0, - text: { - format: { - type: 'text', - description: 'description', - name: 'name', - schema: { foo: true }, - strict: true, - }, - }, - tools: [{ type: 'web_search', search_context_size: 'search_context_size' }], - }); - }); - test('retrieve', async () => { const responsePromise = client.responses.retrieve('response_id'); const rawResponse = await responsePromise.asResponse(); diff --git a/tests/api-resources/telemetry.test.ts b/tests/api-resources/telemetry.test.ts index e042d08..df88631 100644 --- a/tests/api-resources/telemetry.test.ts +++ b/tests/api-resources/telemetry.test.ts @@ -6,53 +6,6 @@ import { Response } from 'node-fetch'; const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource telemetry', () => { - test('getSpan', async () => { - const responsePromise = client.telemetry.getSpan('trace_id', 'span_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('getSpan: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.telemetry.getSpan('trace_id', 'span_id', { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(LlamaStackClient.NotFoundError); - }); - - test('getSpanTree', async () => { - const responsePromise = client.telemetry.getSpanTree('span_id', {}); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('getTrace', async () => { - const responsePromise = client.telemetry.getTrace('trace_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('getTrace: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(client.telemetry.getTrace('trace_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( - LlamaStackClient.NotFoundError, - ); - }); - test('logEvent: only required params', async () => { const responsePromise = client.telemetry.logEvent({ event: { @@ -88,90 +41,4 @@ describe('resource telemetry', () => { ttl_seconds: 0, }); }); - - // unsupported query params in java / kotlin - test.skip('queryMetrics: only required params', async () => { - const responsePromise = client.telemetry.queryMetrics('metric_name', { - query_type: 'range', - start_time: 0, - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // unsupported query params in java / kotlin - test.skip('queryMetrics: required and optional params', async () => { - const response = await client.telemetry.queryMetrics('metric_name', { - query_type: 'range', - start_time: 0, - end_time: 0, - granularity: 'granularity', - label_matchers: [{ name: 'name', operator: '=', value: 'value' }], - }); - }); - - // unsupported query params in java / kotlin - test.skip('querySpans: only required params', async () => { - const responsePromise = client.telemetry.querySpans({ - attribute_filters: [{ key: 'key', op: 'eq', value: true }], - attributes_to_return: ['string'], - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // unsupported query params in java / kotlin - test.skip('querySpans: required and optional params', async () => { - const response = await client.telemetry.querySpans({ - attribute_filters: [{ key: 'key', op: 'eq', value: true }], - attributes_to_return: ['string'], - max_depth: 0, - }); - }); - - // unsupported query params in java / kotlin - test.skip('queryTraces', async () => { - const responsePromise = client.telemetry.queryTraces({}); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('saveSpansToDataset: only required params', async () => { - const responsePromise = client.telemetry.saveSpansToDataset({ - attribute_filters: [{ key: 'key', op: 'eq', value: true }], - attributes_to_save: ['string'], - dataset_id: 'dataset_id', - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('saveSpansToDataset: required and optional params', async () => { - const response = await client.telemetry.saveSpansToDataset({ - attribute_filters: [{ key: 'key', op: 'eq', value: true }], - attributes_to_save: ['string'], - dataset_id: 'dataset_id', - max_depth: 0, - }); - }); }); From 5cee3d69650a4c827e12fc046c1d2ec3b2fa9126 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 2 Oct 2025 18:05:42 +0000 Subject: [PATCH 18/26] fix(api): fix the ToolDefParam updates --- .stats.yml | 4 +- api.md | 4 +- src/index.ts | 10 +- src/resources/index.ts | 8 +- src/resources/shared.ts | 14 +-- src/resources/tool-runtime/tool-runtime.ts | 50 ++------- src/resources/tools.ts | 102 +----------------- .../api-resources/alpha/agents/agents.test.ts | 14 +-- 8 files changed, 24 insertions(+), 182 deletions(-) diff --git a/.stats.yml b/.stats.yml index cbd436b..724604d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 93 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-41cb5d8049e6ffd933a7ad6bbbb76b2fef2e864d0d857c91799ee16e9a796883.yml -openapi_spec_hash: 5e0bdf64563e020ef14b968ab724d2db +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-f26df77f0800baeaea40407776f6c1e618756037969411e29de209ce961655dd.yml +openapi_spec_hash: e7c2329edc0f9f5aa1c78b6afb996e1c config_hash: 0412cd40c0609550c1a47c69dd104e4f diff --git a/api.md b/api.md index 63895d0..c6e03b5 100644 --- a/api.md +++ b/api.md @@ -39,14 +39,12 @@ Methods: Types: -- ListToolsResponse -- Tool - ToolListResponse Methods: - client.tools.list({ ...params }) -> ToolListResponse -- client.tools.get(toolName) -> Tool +- client.tools.get(toolName) -> ToolDef # ToolRuntime diff --git a/src/index.ts b/src/index.ts index 8212d27..e504e03 100644 --- a/src/index.ts +++ b/src/index.ts @@ -81,7 +81,7 @@ import { ToolgroupRegisterParams, Toolgroups, } from './resources/toolgroups'; -import { ListToolsResponse, Tool, ToolListParams, ToolListResponse, Tools } from './resources/tools'; +import { ToolListParams, ToolListResponse, Tools } from './resources/tools'; import { ListVectorDBsResponse, VectorDBListResponse, @@ -376,13 +376,7 @@ export declare namespace LlamaStackClient { type ToolgroupRegisterParams as ToolgroupRegisterParams, }; - export { - Tools as Tools, - type ListToolsResponse as ListToolsResponse, - type Tool as Tool, - type ToolListResponse as ToolListResponse, - type ToolListParams as ToolListParams, - }; + export { Tools as Tools, type ToolListResponse as ToolListResponse, type ToolListParams as ToolListParams }; export { ToolRuntime as ToolRuntime, diff --git a/src/resources/index.ts b/src/resources/index.ts index 0fd6c90..bc6b6af 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -98,13 +98,7 @@ export { type ToolgroupListResponse, type ToolgroupRegisterParams, } from './toolgroups'; -export { - Tools, - type ListToolsResponse, - type Tool, - type ToolListResponse, - type ToolListParams, -} from './tools'; +export { Tools, type ToolListResponse, type ToolListParams } from './tools'; export { VectorDBs, type ListVectorDBsResponse, diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 35b0da3..7a10e74 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -796,23 +796,11 @@ export interface SystemMessage { } export interface ToolCall { - arguments: - | string - | { - [key: string]: - | string - | number - | boolean - | Array - | { [key: string]: string | number | boolean | null } - | null; - }; + arguments: string; call_id: string; tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {}); - - arguments_json?: string; } /** diff --git a/src/resources/tool-runtime/tool-runtime.ts b/src/resources/tool-runtime/tool-runtime.ts index 058779e..3324906 100644 --- a/src/resources/tool-runtime/tool-runtime.ts +++ b/src/resources/tool-runtime/tool-runtime.ts @@ -57,57 +57,25 @@ export interface ToolDef { */ description?: string; + /** + * (Optional) JSON Schema for tool inputs (MCP inputSchema) + */ + input_schema?: { [key: string]: boolean | number | string | Array | unknown | null }; + /** * (Optional) Additional metadata about the tool */ metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; /** - * (Optional) List of parameters this tool accepts + * (Optional) JSON Schema for tool outputs (MCP outputSchema) */ - parameters?: Array; -} + output_schema?: { [key: string]: boolean | number | string | Array | unknown | null }; -export namespace ToolDef { /** - * Parameter definition for a tool. + * (Optional) ID of the tool group this tool belongs to */ - export interface Parameter { - /** - * Human-readable description of what the parameter does - */ - description: string; - - /** - * Name of the parameter - */ - name: string; - - /** - * Type of the parameter (e.g., string, integer) - */ - parameter_type: string; - - /** - * Whether this parameter is required for tool invocation - */ - required: boolean; - - /** - * (Optional) Default value for the parameter if not provided - */ - default?: boolean | number | string | Array | unknown | null; - - /** - * Type of the elements when parameter_type is array - */ - items?: unknown; - - /** - * (Optional) Title of the parameter - */ - title?: string; - } + toolgroup_id?: string; } /** diff --git a/src/resources/tools.ts b/src/resources/tools.ts index ab05a60..668d2ce 100644 --- a/src/resources/tools.ts +++ b/src/resources/tools.ts @@ -3,6 +3,7 @@ import { APIResource } from '../resource'; import { isRequestOptions } from '../core'; import * as Core from '../core'; +import * as ToolRuntimeAPI from './tool-runtime/tool-runtime'; export class Tools extends APIResource { /** @@ -25,103 +26,15 @@ export class Tools extends APIResource { /** * Get a tool by its name. */ - get(toolName: string, options?: Core.RequestOptions): Core.APIPromise { + get(toolName: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/v1/tools/${toolName}`, options); } } /** - * Response containing a list of tools. + * List of tool definitions */ -export interface ListToolsResponse { - /** - * List of tools - */ - data: ToolListResponse; -} - -/** - * A tool that can be invoked by agents. - */ -export interface Tool { - /** - * Human-readable description of what the tool does - */ - description: string; - - identifier: string; - - /** - * List of parameters this tool accepts - */ - parameters: Array; - - provider_id: string; - - /** - * ID of the tool group this tool belongs to - */ - toolgroup_id: string; - - /** - * Type of resource, always 'tool' - */ - type: 'tool'; - - /** - * (Optional) Additional metadata about the tool - */ - metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; - - provider_resource_id?: string; -} - -export namespace Tool { - /** - * Parameter definition for a tool. - */ - export interface Parameter { - /** - * Human-readable description of what the parameter does - */ - description: string; - - /** - * Name of the parameter - */ - name: string; - - /** - * Type of the parameter (e.g., string, integer) - */ - parameter_type: string; - - /** - * Whether this parameter is required for tool invocation - */ - required: boolean; - - /** - * (Optional) Default value for the parameter if not provided - */ - default?: boolean | number | string | Array | unknown | null; - - /** - * Type of the elements when parameter_type is array - */ - items?: unknown; - - /** - * (Optional) Title of the parameter - */ - title?: string; - } -} - -/** - * List of tools - */ -export type ToolListResponse = Array; +export type ToolListResponse = Array; export interface ToolListParams { /** @@ -131,10 +44,5 @@ export interface ToolListParams { } export declare namespace Tools { - export { - type ListToolsResponse as ListToolsResponse, - type Tool as Tool, - type ToolListResponse as ToolListResponse, - type ToolListParams as ToolListParams, - }; + export { type ToolListResponse as ToolListResponse, type ToolListParams as ToolListParams }; } diff --git a/tests/api-resources/alpha/agents/agents.test.ts b/tests/api-resources/alpha/agents/agents.test.ts index 0f26b3a..f06a1d3 100644 --- a/tests/api-resources/alpha/agents/agents.test.ts +++ b/tests/api-resources/alpha/agents/agents.test.ts @@ -28,18 +28,10 @@ describe('resource agents', () => { { name: 'name', description: 'description', + input_schema: { foo: true }, metadata: { foo: true }, - parameters: [ - { - description: 'description', - name: 'name', - parameter_type: 'parameter_type', - required: true, - default: true, - items: {}, - title: 'title', - }, - ], + output_schema: { foo: true }, + toolgroup_id: 'toolgroup_id', }, ], enable_session_persistence: true, From e4f78407f74f3ba7597de355c314e1932dd94761 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 2 Oct 2025 18:18:50 +0000 Subject: [PATCH 19/26] feat(api): fixes to URLs --- .stats.yml | 4 +- api.md | 33 +- src/index.ts | 56 +++- src/resources/benchmarks.ts | 75 ++++- src/resources/datasets.ts | 339 ++++++++++++++++++++- src/resources/index.ts | 31 +- src/resources/telemetry.ts | 401 ++++++++++++++++++++++--- tests/api-resources/benchmarks.test.ts | 70 +++++ tests/api-resources/datasets.test.ts | 129 ++++++++ tests/api-resources/telemetry.test.ts | 143 +++++++-- 10 files changed, 1194 insertions(+), 87 deletions(-) create mode 100644 tests/api-resources/benchmarks.test.ts create mode 100644 tests/api-resources/datasets.test.ts diff --git a/.stats.yml b/.stats.yml index 724604d..a88d26a 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 93 +configured_endpoints: 108 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-f26df77f0800baeaea40407776f6c1e618756037969411e29de209ce961655dd.yml openapi_spec_hash: e7c2329edc0f9f5aa1c78b6afb996e1c -config_hash: 0412cd40c0609550c1a47c69dd104e4f +config_hash: 8800bdff1a087b9d5211dda2a7b9f66f diff --git a/api.md b/api.md index c6e03b5..41cf8af 100644 --- a/api.md +++ b/api.md @@ -98,6 +98,19 @@ Methods: Types: - ListDatasetsResponse +- DatasetRetrieveResponse +- DatasetListResponse +- DatasetIterrowsResponse +- DatasetRegisterResponse + +Methods: + +- client.datasets.retrieve(datasetId) -> DatasetRetrieveResponse +- client.datasets.list() -> DatasetListResponse +- client.datasets.appendrows(datasetId, { ...params }) -> void +- client.datasets.iterrows(datasetId, { ...params }) -> DatasetIterrowsResponse +- client.datasets.register({ ...params }) -> DatasetRegisterResponse +- client.datasets.unregister(datasetId) -> void # Inspect @@ -331,10 +344,21 @@ Types: - QuerySpansResponse - SpanWithStatus - Trace +- TelemetryGetSpanResponse +- TelemetryGetSpanTreeResponse +- TelemetryQueryMetricsResponse +- TelemetryQuerySpansResponse +- TelemetryQueryTracesResponse Methods: -- client.telemetry.logEvent({ ...params }) -> void +- client.telemetry.getSpan(traceId, spanId) -> TelemetryGetSpanResponse +- client.telemetry.getSpanTree(spanId, { ...params }) -> TelemetryGetSpanTreeResponse +- client.telemetry.getTrace(traceId) -> Trace +- client.telemetry.queryMetrics(metricName, { ...params }) -> TelemetryQueryMetricsResponse +- client.telemetry.querySpans({ ...params }) -> TelemetryQuerySpansResponse +- client.telemetry.queryTraces({ ...params }) -> TelemetryQueryTracesResponse +- client.telemetry.saveSpansToDataset({ ...params }) -> void # Scoring @@ -369,6 +393,13 @@ Types: - Benchmark - ListBenchmarksResponse +- BenchmarkListResponse + +Methods: + +- client.benchmarks.retrieve(benchmarkId) -> Benchmark +- client.benchmarks.list() -> BenchmarkListResponse +- client.benchmarks.register({ ...params }) -> void # Files diff --git a/src/index.ts b/src/index.ts index e504e03..bd16e4b 100644 --- a/src/index.ts +++ b/src/index.ts @@ -13,7 +13,13 @@ import { } from './pagination'; import * as Uploads from './uploads'; import * as API from './resources/index'; -import { Benchmark, Benchmarks, ListBenchmarksResponse } from './resources/benchmarks'; +import { + Benchmark, + BenchmarkListResponse, + BenchmarkRegisterParams, + Benchmarks, + ListBenchmarksResponse, +} from './resources/benchmarks'; import { CompletionCreateParams, CompletionCreateParamsNonStreaming, @@ -21,7 +27,17 @@ import { CompletionCreateResponse, Completions, } from './resources/completions'; -import { Datasets, ListDatasetsResponse } from './resources/datasets'; +import { + DatasetAppendrowsParams, + DatasetIterrowsParams, + DatasetIterrowsResponse, + DatasetListResponse, + DatasetRegisterParams, + DatasetRegisterResponse, + DatasetRetrieveResponse, + Datasets, + ListDatasetsResponse, +} from './resources/datasets'; import { CreateEmbeddingsResponse, EmbeddingCreateParams, Embeddings } from './resources/embeddings'; import { DeleteFileResponse, @@ -71,7 +87,16 @@ import { QuerySpansResponse, SpanWithStatus, Telemetry, - TelemetryLogEventParams, + TelemetryGetSpanResponse, + TelemetryGetSpanTreeParams, + TelemetryGetSpanTreeResponse, + TelemetryQueryMetricsParams, + TelemetryQueryMetricsResponse, + TelemetryQuerySpansParams, + TelemetryQuerySpansResponse, + TelemetryQueryTracesParams, + TelemetryQueryTracesResponse, + TelemetrySaveSpansToDatasetParams, Trace, } from './resources/telemetry'; import { @@ -400,7 +425,17 @@ export declare namespace LlamaStackClient { type ResponseListParams as ResponseListParams, }; - export { Datasets as Datasets, type ListDatasetsResponse as ListDatasetsResponse }; + export { + Datasets as Datasets, + type ListDatasetsResponse as ListDatasetsResponse, + type DatasetRetrieveResponse as DatasetRetrieveResponse, + type DatasetListResponse as DatasetListResponse, + type DatasetIterrowsResponse as DatasetIterrowsResponse, + type DatasetRegisterResponse as DatasetRegisterResponse, + type DatasetAppendrowsParams as DatasetAppendrowsParams, + type DatasetIterrowsParams as DatasetIterrowsParams, + type DatasetRegisterParams as DatasetRegisterParams, + }; export { Inspect as Inspect, @@ -508,7 +543,16 @@ export declare namespace LlamaStackClient { type QuerySpansResponse as QuerySpansResponse, type SpanWithStatus as SpanWithStatus, type Trace as Trace, - type TelemetryLogEventParams as TelemetryLogEventParams, + type TelemetryGetSpanResponse as TelemetryGetSpanResponse, + type TelemetryGetSpanTreeResponse as TelemetryGetSpanTreeResponse, + type TelemetryQueryMetricsResponse as TelemetryQueryMetricsResponse, + type TelemetryQuerySpansResponse as TelemetryQuerySpansResponse, + type TelemetryQueryTracesResponse as TelemetryQueryTracesResponse, + type TelemetryGetSpanTreeParams as TelemetryGetSpanTreeParams, + type TelemetryQueryMetricsParams as TelemetryQueryMetricsParams, + type TelemetryQuerySpansParams as TelemetryQuerySpansParams, + type TelemetryQueryTracesParams as TelemetryQueryTracesParams, + type TelemetrySaveSpansToDatasetParams as TelemetrySaveSpansToDatasetParams, }; export { @@ -532,6 +576,8 @@ export declare namespace LlamaStackClient { Benchmarks as Benchmarks, type Benchmark as Benchmark, type ListBenchmarksResponse as ListBenchmarksResponse, + type BenchmarkListResponse as BenchmarkListResponse, + type BenchmarkRegisterParams as BenchmarkRegisterParams, }; export { diff --git a/src/resources/benchmarks.ts b/src/resources/benchmarks.ts index a5659ed..3b33eab 100644 --- a/src/resources/benchmarks.ts +++ b/src/resources/benchmarks.ts @@ -1,8 +1,38 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../resource'; +import * as Core from '../core'; -export class Benchmarks extends APIResource {} +export class Benchmarks extends APIResource { + /** + * Get a benchmark by its ID. + */ + retrieve(benchmarkId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/v1alpha/eval/benchmarks/${benchmarkId}`, options); + } + + /** + * List all benchmarks. + */ + list(options?: Core.RequestOptions): Core.APIPromise { + return ( + this._client.get('/v1alpha/eval/benchmarks', options) as Core.APIPromise<{ + data: BenchmarkListResponse; + }> + )._thenUnwrap((obj) => obj.data); + } + + /** + * Register a benchmark. + */ + register(body: BenchmarkRegisterParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post('/v1alpha/eval/benchmarks', { + body, + ...options, + headers: { Accept: '*/*', ...options?.headers }, + }); + } +} /** * A benchmark resource for evaluating model performance. @@ -36,9 +66,48 @@ export interface Benchmark { } export interface ListBenchmarksResponse { - data: Array; + data: BenchmarkListResponse; +} + +export type BenchmarkListResponse = Array; + +export interface BenchmarkRegisterParams { + /** + * The ID of the benchmark to register. + */ + benchmark_id: string; + + /** + * The ID of the dataset to use for the benchmark. + */ + dataset_id: string; + + /** + * The scoring functions to use for the benchmark. + */ + scoring_functions: Array; + + /** + * The metadata to use for the benchmark. + */ + metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * The ID of the provider benchmark to use for the benchmark. + */ + provider_benchmark_id?: string; + + /** + * The ID of the provider to use for the benchmark. + */ + provider_id?: string; } export declare namespace Benchmarks { - export { type Benchmark as Benchmark, type ListBenchmarksResponse as ListBenchmarksResponse }; + export { + type Benchmark as Benchmark, + type ListBenchmarksResponse as ListBenchmarksResponse, + type BenchmarkListResponse as BenchmarkListResponse, + type BenchmarkRegisterParams as BenchmarkRegisterParams, + }; } diff --git a/src/resources/datasets.ts b/src/resources/datasets.ts index 140a8cc..7a33c4f 100644 --- a/src/resources/datasets.ts +++ b/src/resources/datasets.ts @@ -1,8 +1,89 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../resource'; +import { isRequestOptions } from '../core'; +import * as Core from '../core'; -export class Datasets extends APIResource {} +export class Datasets extends APIResource { + /** + * Get a dataset by its ID. + */ + retrieve(datasetId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/v1beta/datasets/${datasetId}`, options); + } + + /** + * List all datasets. + */ + list(options?: Core.RequestOptions): Core.APIPromise { + return ( + this._client.get('/v1beta/datasets', options) as Core.APIPromise<{ data: DatasetListResponse }> + )._thenUnwrap((obj) => obj.data); + } + + /** + * Append rows to a dataset. + */ + appendrows( + datasetId: string, + body: DatasetAppendrowsParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/v1beta/datasetio/append-rows/${datasetId}`, { + body, + ...options, + headers: { Accept: '*/*', ...options?.headers }, + }); + } + + /** + * Get a paginated list of rows from a dataset. Uses offset-based pagination where: + * + * - start_index: The starting index (0-based). If None, starts from beginning. + * - limit: Number of items to return. If None or -1, returns all items. + * + * The response includes: + * + * - data: List of items for the current page. + * - has_more: Whether there are more items available after this set. + */ + iterrows( + datasetId: string, + query?: DatasetIterrowsParams, + options?: Core.RequestOptions, + ): Core.APIPromise; + iterrows(datasetId: string, options?: Core.RequestOptions): Core.APIPromise; + iterrows( + datasetId: string, + query: DatasetIterrowsParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.APIPromise { + if (isRequestOptions(query)) { + return this.iterrows(datasetId, {}, query); + } + return this._client.get(`/v1beta/datasetio/iterrows/${datasetId}`, { query, ...options }); + } + + /** + * Register a new dataset. + */ + register( + body: DatasetRegisterParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post('/v1beta/datasets', { body, ...options }); + } + + /** + * Unregister a dataset by its ID. + */ + unregister(datasetId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/v1beta/datasets/${datasetId}`, { + ...options, + headers: { Accept: '*/*', ...options?.headers }, + }); + } +} /** * Response from listing datasets. @@ -11,14 +92,80 @@ export interface ListDatasetsResponse { /** * List of datasets */ - data: Array; + data: DatasetListResponse; } -export namespace ListDatasetsResponse { +/** + * Dataset resource for storing and accessing training or evaluation data. + */ +export interface DatasetRetrieveResponse { + identifier: string; + + /** + * Additional metadata for the dataset + */ + metadata: { [key: string]: boolean | number | string | Array | unknown | null }; + + provider_id: string; + + /** + * Purpose of the dataset indicating its intended use + */ + purpose: 'post-training/messages' | 'eval/question-answer' | 'eval/messages-answer'; + + /** + * Data source configuration for the dataset + */ + source: DatasetRetrieveResponse.UriDataSource | DatasetRetrieveResponse.RowsDataSource; + + /** + * Type of resource, always 'dataset' for datasets + */ + type: 'dataset'; + + provider_resource_id?: string; +} + +export namespace DatasetRetrieveResponse { + /** + * A dataset that can be obtained from a URI. + */ + export interface UriDataSource { + type: 'uri'; + + /** + * The dataset can be obtained from a URI. E.g. - + * "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" - + * "data:csv;base64,{base64_content}" + */ + uri: string; + } + + /** + * A dataset stored in rows. + */ + export interface RowsDataSource { + /** + * The dataset is stored in rows. E.g. - [ {"messages": [{"role": "user", + * "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}]} + * ] + */ + rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; + + type: 'rows'; + } +} + +/** + * List of datasets + */ +export type DatasetListResponse = Array; + +export namespace DatasetListResponse { /** * Dataset resource for storing and accessing training or evaluation data. */ - export interface Data { + export interface DatasetListResponseItem { identifier: string; /** @@ -36,7 +183,7 @@ export namespace ListDatasetsResponse { /** * Data source configuration for the dataset */ - source: Data.UriDataSource | Data.RowsDataSource; + source: DatasetListResponseItem.UriDataSource | DatasetListResponseItem.RowsDataSource; /** * Type of resource, always 'dataset' for datasets @@ -46,7 +193,7 @@ export namespace ListDatasetsResponse { provider_resource_id?: string; } - export namespace Data { + export namespace DatasetListResponseItem { /** * A dataset that can be obtained from a URI. */ @@ -77,6 +224,184 @@ export namespace ListDatasetsResponse { } } +/** + * A generic paginated response that follows a simple format. + */ +export interface DatasetIterrowsResponse { + /** + * The list of items for the current page + */ + data: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; + + /** + * Whether there are more items available after this set + */ + has_more: boolean; + + /** + * The URL for accessing this list + */ + url?: string; +} + +/** + * Dataset resource for storing and accessing training or evaluation data. + */ +export interface DatasetRegisterResponse { + identifier: string; + + /** + * Additional metadata for the dataset + */ + metadata: { [key: string]: boolean | number | string | Array | unknown | null }; + + provider_id: string; + + /** + * Purpose of the dataset indicating its intended use + */ + purpose: 'post-training/messages' | 'eval/question-answer' | 'eval/messages-answer'; + + /** + * Data source configuration for the dataset + */ + source: DatasetRegisterResponse.UriDataSource | DatasetRegisterResponse.RowsDataSource; + + /** + * Type of resource, always 'dataset' for datasets + */ + type: 'dataset'; + + provider_resource_id?: string; +} + +export namespace DatasetRegisterResponse { + /** + * A dataset that can be obtained from a URI. + */ + export interface UriDataSource { + type: 'uri'; + + /** + * The dataset can be obtained from a URI. E.g. - + * "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" - + * "data:csv;base64,{base64_content}" + */ + uri: string; + } + + /** + * A dataset stored in rows. + */ + export interface RowsDataSource { + /** + * The dataset is stored in rows. E.g. - [ {"messages": [{"role": "user", + * "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}]} + * ] + */ + rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; + + type: 'rows'; + } +} + +export interface DatasetAppendrowsParams { + /** + * The rows to append to the dataset. + */ + rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; +} + +export interface DatasetIterrowsParams { + /** + * The number of rows to get. + */ + limit?: number; + + /** + * Index into dataset for the first row to get. Get all rows if None. + */ + start_index?: number; +} + +export interface DatasetRegisterParams { + /** + * The purpose of the dataset. One of: - "post-training/messages": The dataset + * contains a messages column with list of messages for post-training. { + * "messages": [ {"role": "user", "content": "Hello, world!"}, {"role": + * "assistant", "content": "Hello, world!"}, ] } - "eval/question-answer": The + * dataset contains a question column and an answer column for evaluation. { + * "question": "What is the capital of France?", "answer": "Paris" } - + * "eval/messages-answer": The dataset contains a messages column with list of + * messages and an answer column for evaluation. { "messages": [ {"role": "user", + * "content": "Hello, my name is John Doe."}, {"role": "assistant", "content": + * "Hello, John Doe. How can I help you today?"}, {"role": "user", "content": + * "What's my name?"}, ], "answer": "John Doe" } + */ + purpose: 'post-training/messages' | 'eval/question-answer' | 'eval/messages-answer'; + + /** + * The data source of the dataset. Ensure that the data source schema is compatible + * with the purpose of the dataset. Examples: - { "type": "uri", "uri": + * "https://mywebsite.com/mydata.jsonl" } - { "type": "uri", "uri": + * "lsfs://mydata.jsonl" } - { "type": "uri", "uri": + * "data:csv;base64,{base64_content}" } - { "type": "uri", "uri": + * "huggingface://llamastack/simpleqa?split=train" } - { "type": "rows", "rows": [ + * { "messages": [ {"role": "user", "content": "Hello, world!"}, {"role": + * "assistant", "content": "Hello, world!"}, ] } ] } + */ + source: DatasetRegisterParams.UriDataSource | DatasetRegisterParams.RowsDataSource; + + /** + * The ID of the dataset. If not provided, an ID will be generated. + */ + dataset_id?: string; + + /** + * The metadata for the dataset. - E.g. {"description": "My dataset"}. + */ + metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; +} + +export namespace DatasetRegisterParams { + /** + * A dataset that can be obtained from a URI. + */ + export interface UriDataSource { + type: 'uri'; + + /** + * The dataset can be obtained from a URI. E.g. - + * "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" - + * "data:csv;base64,{base64_content}" + */ + uri: string; + } + + /** + * A dataset stored in rows. + */ + export interface RowsDataSource { + /** + * The dataset is stored in rows. E.g. - [ {"messages": [{"role": "user", + * "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}]} + * ] + */ + rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; + + type: 'rows'; + } +} + export declare namespace Datasets { - export { type ListDatasetsResponse as ListDatasetsResponse }; + export { + type ListDatasetsResponse as ListDatasetsResponse, + type DatasetRetrieveResponse as DatasetRetrieveResponse, + type DatasetListResponse as DatasetListResponse, + type DatasetIterrowsResponse as DatasetIterrowsResponse, + type DatasetRegisterResponse as DatasetRegisterResponse, + type DatasetAppendrowsParams as DatasetAppendrowsParams, + type DatasetIterrowsParams as DatasetIterrowsParams, + type DatasetRegisterParams as DatasetRegisterParams, + }; } diff --git a/src/resources/index.ts b/src/resources/index.ts index bc6b6af..df5cd34 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -2,7 +2,13 @@ export * from './shared'; export { Alpha } from './alpha/alpha'; -export { Benchmarks, type Benchmark, type ListBenchmarksResponse } from './benchmarks'; +export { + Benchmarks, + type Benchmark, + type ListBenchmarksResponse, + type BenchmarkListResponse, + type BenchmarkRegisterParams, +} from './benchmarks'; export { Chat, type ChatCompletionChunk } from './chat/chat'; export { Completions, @@ -11,7 +17,17 @@ export { type CompletionCreateParamsNonStreaming, type CompletionCreateParamsStreaming, } from './completions'; -export { Datasets, type ListDatasetsResponse } from './datasets'; +export { + Datasets, + type ListDatasetsResponse, + type DatasetRetrieveResponse, + type DatasetListResponse, + type DatasetIterrowsResponse, + type DatasetRegisterResponse, + type DatasetAppendrowsParams, + type DatasetIterrowsParams, + type DatasetRegisterParams, +} from './datasets'; export { Embeddings, type CreateEmbeddingsResponse, type EmbeddingCreateParams } from './embeddings'; export { FilesOpenAICursorPage, @@ -81,7 +97,16 @@ export { type QuerySpansResponse, type SpanWithStatus, type Trace, - type TelemetryLogEventParams, + type TelemetryGetSpanResponse, + type TelemetryGetSpanTreeResponse, + type TelemetryQueryMetricsResponse, + type TelemetryQuerySpansResponse, + type TelemetryQueryTracesResponse, + type TelemetryGetSpanTreeParams, + type TelemetryQueryMetricsParams, + type TelemetryQuerySpansParams, + type TelemetryQueryTracesParams, + type TelemetrySaveSpansToDatasetParams, } from './telemetry'; export { ToolRuntime, diff --git a/src/resources/telemetry.ts b/src/resources/telemetry.ts index e554a32..5c711ae 100644 --- a/src/resources/telemetry.ts +++ b/src/resources/telemetry.ts @@ -5,10 +5,89 @@ import * as Core from '../core'; export class Telemetry extends APIResource { /** - * Log an event. + * Get a span by its ID. */ - logEvent(body: TelemetryLogEventParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.post('/v1/telemetry/events', { + getSpan( + traceId: string, + spanId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.get(`/v1alpha/telemetry/traces/${traceId}/spans/${spanId}`, options); + } + + /** + * Get a span tree by its ID. + */ + getSpanTree( + spanId: string, + body: TelemetryGetSpanTreeParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return ( + this._client.post(`/v1alpha/telemetry/spans/${spanId}/tree`, { body, ...options }) as Core.APIPromise<{ + data: TelemetryGetSpanTreeResponse; + }> + )._thenUnwrap((obj) => obj.data); + } + + /** + * Get a trace by its ID. + */ + getTrace(traceId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/v1alpha/telemetry/traces/${traceId}`, options); + } + + /** + * Query metrics. + */ + queryMetrics( + metricName: string, + body: TelemetryQueryMetricsParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return ( + this._client.post(`/v1alpha/telemetry/metrics/${metricName}`, { body, ...options }) as Core.APIPromise<{ + data: TelemetryQueryMetricsResponse; + }> + )._thenUnwrap((obj) => obj.data); + } + + /** + * Query spans. + */ + querySpans( + body: TelemetryQuerySpansParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return ( + this._client.post('/v1alpha/telemetry/spans', { body, ...options }) as Core.APIPromise<{ + data: TelemetryQuerySpansResponse; + }> + )._thenUnwrap((obj) => obj.data); + } + + /** + * Query traces. + */ + queryTraces( + body: TelemetryQueryTracesParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return ( + this._client.post('/v1alpha/telemetry/traces', { body, ...options }) as Core.APIPromise<{ + data: TelemetryQueryTracesResponse; + }> + )._thenUnwrap((obj) => obj.data); + } + + /** + * Save spans to a dataset. + */ + saveSpansToDataset( + body: TelemetrySaveSpansToDatasetParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post('/v1alpha/telemetry/spans/export', { body, ...options, headers: { Accept: '*/*', ...options?.headers }, @@ -207,14 +286,197 @@ export interface QuerySpansResponse { /** * List of spans matching the query criteria */ - data: Array; + data: TelemetryQuerySpansResponse; } -export namespace QuerySpansResponse { +/** + * A span that includes status information. + */ +export interface SpanWithStatus { + /** + * Human-readable name describing the operation this span represents + */ + name: string; + + /** + * Unique identifier for the span + */ + span_id: string; + + /** + * Timestamp when the operation began + */ + start_time: string; + + /** + * Unique identifier for the trace this span belongs to + */ + trace_id: string; + + /** + * (Optional) Key-value pairs containing additional metadata about the span + */ + attributes?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * (Optional) Timestamp when the operation finished, if completed + */ + end_time?: string; + + /** + * (Optional) Unique identifier for the parent span, if this is a child span + */ + parent_span_id?: string; + + /** + * (Optional) The current status of the span + */ + status?: 'ok' | 'error'; +} + +/** + * A trace representing the complete execution path of a request across multiple + * operations. + */ +export interface Trace { + /** + * Unique identifier for the root span that started this trace + */ + root_span_id: string; + + /** + * Timestamp when the trace began + */ + start_time: string; + + /** + * Unique identifier for the trace + */ + trace_id: string; + + /** + * (Optional) Timestamp when the trace finished, if completed + */ + end_time?: string; +} + +/** + * A span representing a single operation within a trace. + */ +export interface TelemetryGetSpanResponse { + /** + * Human-readable name describing the operation this span represents + */ + name: string; + + /** + * Unique identifier for the span + */ + span_id: string; + + /** + * Timestamp when the operation began + */ + start_time: string; + + /** + * Unique identifier for the trace this span belongs to + */ + trace_id: string; + + /** + * (Optional) Key-value pairs containing additional metadata about the span + */ + attributes?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * (Optional) Timestamp when the operation finished, if completed + */ + end_time?: string; + + /** + * (Optional) Unique identifier for the parent span, if this is a child span + */ + parent_span_id?: string; +} + +/** + * Dictionary mapping span IDs to spans with status information + */ +export type TelemetryGetSpanTreeResponse = { [key: string]: SpanWithStatus }; + +/** + * List of metric series matching the query criteria + */ +export type TelemetryQueryMetricsResponse = + Array; + +export namespace TelemetryQueryMetricsResponse { + /** + * A time series of metric data points. + */ + export interface TelemetryQueryMetricsResponseItem { + /** + * List of labels associated with this metric series + */ + labels: Array; + + /** + * The name of the metric + */ + metric: string; + + /** + * List of data points in chronological order + */ + values: Array; + } + + export namespace TelemetryQueryMetricsResponseItem { + /** + * A label associated with a metric. + */ + export interface Label { + /** + * The name of the label + */ + name: string; + + /** + * The value of the label + */ + value: string; + } + + /** + * A single data point in a metric time series. + */ + export interface Value { + /** + * Unix timestamp when the metric value was recorded + */ + timestamp: number; + + unit: string; + + /** + * The numeric value of the metric at this timestamp + */ + value: number; + } + } +} + +/** + * List of spans matching the query criteria + */ +export type TelemetryQuerySpansResponse = Array; + +export namespace TelemetryQuerySpansResponse { /** * A span representing a single operation within a trace. */ - export interface Data { + export interface TelemetryQuerySpansResponseItem { /** * Human-readable name describing the operation this span represents */ @@ -253,86 +515,130 @@ export namespace QuerySpansResponse { } /** - * A span that includes status information. + * List of traces matching the query criteria */ -export interface SpanWithStatus { +export type TelemetryQueryTracesResponse = Array; + +export interface TelemetryGetSpanTreeParams { /** - * Human-readable name describing the operation this span represents + * The attributes to return in the tree. */ - name: string; + attributes_to_return?: Array; /** - * Unique identifier for the span + * The maximum depth of the tree. */ - span_id: string; + max_depth?: number; +} +export interface TelemetryQueryMetricsParams { /** - * Timestamp when the operation began + * The type of query to perform. */ - start_time: string; + query_type: 'range' | 'instant'; /** - * Unique identifier for the trace this span belongs to + * The start time of the metric to query. */ - trace_id: string; + start_time: number; /** - * (Optional) Key-value pairs containing additional metadata about the span + * The end time of the metric to query. */ - attributes?: { [key: string]: boolean | number | string | Array | unknown | null }; + end_time?: number; /** - * (Optional) Timestamp when the operation finished, if completed + * The granularity of the metric to query. */ - end_time?: string; + granularity?: string; /** - * (Optional) Unique identifier for the parent span, if this is a child span + * The label matchers to apply to the metric. */ - parent_span_id?: string; + label_matchers?: Array; +} +export namespace TelemetryQueryMetricsParams { /** - * (Optional) The current status of the span + * A matcher for filtering metrics by label values. */ - status?: 'ok' | 'error'; + export interface LabelMatcher { + /** + * The name of the label to match + */ + name: string; + + /** + * The comparison operator to use for matching + */ + operator: '=' | '!=' | '=~' | '!~'; + + /** + * The value to match against + */ + value: string; + } } -/** - * A trace representing the complete execution path of a request across multiple - * operations. - */ -export interface Trace { +export interface TelemetryQuerySpansParams { /** - * Unique identifier for the root span that started this trace + * The attribute filters to apply to the spans. */ - root_span_id: string; + attribute_filters: Array; /** - * Timestamp when the trace began + * The attributes to return in the spans. */ - start_time: string; + attributes_to_return: Array; /** - * Unique identifier for the trace + * The maximum depth of the tree. */ - trace_id: string; + max_depth?: number; +} +export interface TelemetryQueryTracesParams { /** - * (Optional) Timestamp when the trace finished, if completed + * The attribute filters to apply to the traces. */ - end_time?: string; + attribute_filters?: Array; + + /** + * The limit of traces to return. + */ + limit?: number; + + /** + * The offset of the traces to return. + */ + offset?: number; + + /** + * The order by of the traces to return. + */ + order_by?: Array; } -export interface TelemetryLogEventParams { +export interface TelemetrySaveSpansToDatasetParams { + /** + * The attribute filters to apply to the spans. + */ + attribute_filters: Array; + + /** + * The attributes to save to the dataset. + */ + attributes_to_save: Array; + /** - * The event to log. + * The ID of the dataset to save the spans to. */ - event: Event; + dataset_id: string; /** - * The time to live of the event. + * The maximum depth of the tree. */ - ttl_seconds: number; + max_depth?: number; } export declare namespace Telemetry { @@ -342,6 +648,15 @@ export declare namespace Telemetry { type QuerySpansResponse as QuerySpansResponse, type SpanWithStatus as SpanWithStatus, type Trace as Trace, - type TelemetryLogEventParams as TelemetryLogEventParams, + type TelemetryGetSpanResponse as TelemetryGetSpanResponse, + type TelemetryGetSpanTreeResponse as TelemetryGetSpanTreeResponse, + type TelemetryQueryMetricsResponse as TelemetryQueryMetricsResponse, + type TelemetryQuerySpansResponse as TelemetryQuerySpansResponse, + type TelemetryQueryTracesResponse as TelemetryQueryTracesResponse, + type TelemetryGetSpanTreeParams as TelemetryGetSpanTreeParams, + type TelemetryQueryMetricsParams as TelemetryQueryMetricsParams, + type TelemetryQuerySpansParams as TelemetryQuerySpansParams, + type TelemetryQueryTracesParams as TelemetryQueryTracesParams, + type TelemetrySaveSpansToDatasetParams as TelemetrySaveSpansToDatasetParams, }; } diff --git a/tests/api-resources/benchmarks.test.ts b/tests/api-resources/benchmarks.test.ts new file mode 100644 index 0000000..45bc197 --- /dev/null +++ b/tests/api-resources/benchmarks.test.ts @@ -0,0 +1,70 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; + +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); + +describe('resource benchmarks', () => { + test('retrieve', async () => { + const responsePromise = client.benchmarks.retrieve('benchmark_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.benchmarks.retrieve('benchmark_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('list', async () => { + const responsePromise = client.benchmarks.list(); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.benchmarks.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + test('register: only required params', async () => { + const responsePromise = client.benchmarks.register({ + benchmark_id: 'benchmark_id', + dataset_id: 'dataset_id', + scoring_functions: ['string'], + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('register: required and optional params', async () => { + const response = await client.benchmarks.register({ + benchmark_id: 'benchmark_id', + dataset_id: 'dataset_id', + scoring_functions: ['string'], + metadata: { foo: true }, + provider_benchmark_id: 'provider_benchmark_id', + provider_id: 'provider_id', + }); + }); +}); diff --git a/tests/api-resources/datasets.test.ts b/tests/api-resources/datasets.test.ts new file mode 100644 index 0000000..e0db4c4 --- /dev/null +++ b/tests/api-resources/datasets.test.ts @@ -0,0 +1,129 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; + +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); + +describe('resource datasets', () => { + test('retrieve', async () => { + const responsePromise = client.datasets.retrieve('dataset_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.datasets.retrieve('dataset_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('list', async () => { + const responsePromise = client.datasets.list(); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.datasets.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + test('appendrows: only required params', async () => { + const responsePromise = client.datasets.appendrows('dataset_id', { rows: [{ foo: true }] }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('appendrows: required and optional params', async () => { + const response = await client.datasets.appendrows('dataset_id', { rows: [{ foo: true }] }); + }); + + test('iterrows', async () => { + const responsePromise = client.datasets.iterrows('dataset_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('iterrows: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.datasets.iterrows('dataset_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('iterrows: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.datasets.iterrows( + 'dataset_id', + { limit: 0, start_index: 0 }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('register: only required params', async () => { + const responsePromise = client.datasets.register({ + purpose: 'post-training/messages', + source: { type: 'uri', uri: 'uri' }, + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('register: required and optional params', async () => { + const response = await client.datasets.register({ + purpose: 'post-training/messages', + source: { type: 'uri', uri: 'uri' }, + dataset_id: 'dataset_id', + metadata: { foo: true }, + }); + }); + + test('unregister', async () => { + const responsePromise = client.datasets.unregister('dataset_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('unregister: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.datasets.unregister('dataset_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); +}); diff --git a/tests/api-resources/telemetry.test.ts b/tests/api-resources/telemetry.test.ts index df88631..5653ebb 100644 --- a/tests/api-resources/telemetry.test.ts +++ b/tests/api-resources/telemetry.test.ts @@ -6,17 +6,120 @@ import { Response } from 'node-fetch'; const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource telemetry', () => { - test('logEvent: only required params', async () => { - const responsePromise = client.telemetry.logEvent({ - event: { - message: 'message', - severity: 'verbose', - span_id: 'span_id', - timestamp: '2019-12-27T18:11:19.117Z', - trace_id: 'trace_id', - type: 'unstructured_log', - }, - ttl_seconds: 0, + test('getSpan', async () => { + const responsePromise = client.telemetry.getSpan('trace_id', 'span_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('getSpan: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.telemetry.getSpan('trace_id', 'span_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('getSpanTree', async () => { + const responsePromise = client.telemetry.getSpanTree('span_id', {}); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('getTrace', async () => { + const responsePromise = client.telemetry.getTrace('trace_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('getTrace: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.telemetry.getTrace('trace_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + LlamaStackClient.NotFoundError, + ); + }); + + // unsupported query params in java / kotlin + test.skip('queryMetrics: only required params', async () => { + const responsePromise = client.telemetry.queryMetrics('metric_name', { + query_type: 'range', + start_time: 0, + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + // unsupported query params in java / kotlin + test.skip('queryMetrics: required and optional params', async () => { + const response = await client.telemetry.queryMetrics('metric_name', { + query_type: 'range', + start_time: 0, + end_time: 0, + granularity: 'granularity', + label_matchers: [{ name: 'name', operator: '=', value: 'value' }], + }); + }); + + // unsupported query params in java / kotlin + test.skip('querySpans: only required params', async () => { + const responsePromise = client.telemetry.querySpans({ + attribute_filters: [{ key: 'key', op: 'eq', value: true }], + attributes_to_return: ['string'], + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + // unsupported query params in java / kotlin + test.skip('querySpans: required and optional params', async () => { + const response = await client.telemetry.querySpans({ + attribute_filters: [{ key: 'key', op: 'eq', value: true }], + attributes_to_return: ['string'], + max_depth: 0, + }); + }); + + // unsupported query params in java / kotlin + test.skip('queryTraces', async () => { + const responsePromise = client.telemetry.queryTraces({}); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('saveSpansToDataset: only required params', async () => { + const responsePromise = client.telemetry.saveSpansToDataset({ + attribute_filters: [{ key: 'key', op: 'eq', value: true }], + attributes_to_save: ['string'], + dataset_id: 'dataset_id', }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -27,18 +130,12 @@ describe('resource telemetry', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - test('logEvent: required and optional params', async () => { - const response = await client.telemetry.logEvent({ - event: { - message: 'message', - severity: 'verbose', - span_id: 'span_id', - timestamp: '2019-12-27T18:11:19.117Z', - trace_id: 'trace_id', - type: 'unstructured_log', - attributes: { foo: 'string' }, - }, - ttl_seconds: 0, + test('saveSpansToDataset: required and optional params', async () => { + const response = await client.telemetry.saveSpansToDataset({ + attribute_filters: [{ key: 'key', op: 'eq', value: true }], + attributes_to_save: ['string'], + dataset_id: 'dataset_id', + max_depth: 0, }); }); }); From 6acae910db289080e8f52864f1bdf6d7951d1c3b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 2 Oct 2025 20:08:32 +0000 Subject: [PATCH 20/26] fix(api): another fix to capture correct responses.create() params --- .stats.yml | 4 +- api.md | 3 +- src/index.ts | 4 +- src/resources/index.ts | 2 +- src/resources/responses/index.ts | 2 +- src/resources/responses/responses.ts | 1825 +++++++---------- .../api-resources/responses/responses.test.ts | 28 +- 7 files changed, 783 insertions(+), 1085 deletions(-) diff --git a/.stats.yml b/.stats.yml index a88d26a..5588dfb 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 108 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-f26df77f0800baeaea40407776f6c1e618756037969411e29de209ce961655dd.yml -openapi_spec_hash: e7c2329edc0f9f5aa1c78b6afb996e1c +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-b220f9f8667d2af8007134d0403b24452c20c9c512ca87d0b69b20b761272609.yml +openapi_spec_hash: cde1096a830f2081d68f858f020fd53f config_hash: 8800bdff1a087b9d5211dda2a7b9f66f diff --git a/api.md b/api.md index 41cf8af..e4a9720 100644 --- a/api.md +++ b/api.md @@ -72,13 +72,12 @@ Types: - ResponseObject - ResponseObjectStream -- ResponseCreateResponse - ResponseListResponse - ResponseDeleteResponse Methods: -- client.responses.create({ ...params }) -> ResponseCreateResponse +- client.responses.create({ ...params }) -> ResponseObject - client.responses.retrieve(responseId) -> ResponseObject - client.responses.list({ ...params }) -> ResponseListResponsesOpenAICursorPage - client.responses.delete(responseId) -> ResponseDeleteResponse diff --git a/src/index.ts b/src/index.ts index bd16e4b..1e14f85 100644 --- a/src/index.ts +++ b/src/index.ts @@ -133,7 +133,7 @@ import { import { ResponseCreateParams, ResponseCreateParamsNonStreaming, - ResponseCreateResponse, + ResponseCreateParamsStreaming, ResponseDeleteResponse, ResponseListParams, ResponseListResponse, @@ -416,12 +416,12 @@ export declare namespace LlamaStackClient { Responses as Responses, type ResponseObject as ResponseObject, type ResponseObjectStream as ResponseObjectStream, - type ResponseCreateResponse as ResponseCreateResponse, type ResponseListResponse as ResponseListResponse, type ResponseDeleteResponse as ResponseDeleteResponse, ResponseListResponsesOpenAICursorPage as ResponseListResponsesOpenAICursorPage, type ResponseCreateParams as ResponseCreateParams, type ResponseCreateParamsNonStreaming as ResponseCreateParamsNonStreaming, + type ResponseCreateParamsStreaming as ResponseCreateParamsStreaming, type ResponseListParams as ResponseListParams, }; diff --git a/src/resources/index.ts b/src/resources/index.ts index df5cd34..5d5b2f7 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -54,11 +54,11 @@ export { Responses, type ResponseObject, type ResponseObjectStream, - type ResponseCreateResponse, type ResponseListResponse, type ResponseDeleteResponse, type ResponseCreateParams, type ResponseCreateParamsNonStreaming, + type ResponseCreateParamsStreaming, type ResponseListParams, } from './responses/responses'; export { Routes, type ListRoutesResponse, type RouteListResponse } from './routes'; diff --git a/src/resources/responses/index.ts b/src/resources/responses/index.ts index fe54614..1465569 100644 --- a/src/resources/responses/index.ts +++ b/src/resources/responses/index.ts @@ -6,10 +6,10 @@ export { Responses, type ResponseObject, type ResponseObjectStream, - type ResponseCreateResponse, type ResponseListResponse, type ResponseDeleteResponse, type ResponseCreateParams, type ResponseCreateParamsNonStreaming, + type ResponseCreateParamsStreaming, type ResponseListParams, } from './responses'; diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 2cf8e01..05fe120 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -14,15 +14,24 @@ export class Responses extends APIResource { inputItems: InputItemsAPI.InputItems = new InputItemsAPI.InputItems(this._client); /** - * List all OpenAI responses. + * Create a new OpenAI response. */ + create(body: ResponseCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise; + create( + body: ResponseCreateParamsStreaming, + options?: Core.RequestOptions, + ): APIPromise>; + create( + body: ResponseCreateParamsBase, + options?: Core.RequestOptions, + ): APIPromise | ResponseObject>; create( body: ResponseCreateParams, options?: Core.RequestOptions, - ): APIPromise> { - return this._client.post('/v1/responses', { body, ...options, stream: true }) as APIPromise< - Stream - >; + ): APIPromise | APIPromise> { + return this._client.post('/v1/responses', { body, ...options, stream: body.stream ?? false }) as + | APIPromise + | APIPromise>; } /** @@ -1833,967 +1842,922 @@ export namespace ResponseObjectStream { } /** - * Paginated list of OpenAI response objects with navigation metadata. + * OpenAI response object extended with input context information. */ -export interface ResponseCreateResponse { +export interface ResponseListResponse { + /** + * Unique identifier for this response + */ + id: string; + + /** + * Unix timestamp when the response was created + */ + created_at: number; + + /** + * List of input items that led to this response + */ + input: Array< + | ResponseListResponse.OpenAIResponseOutputMessageWebSearchToolCall + | ResponseListResponse.OpenAIResponseOutputMessageFileSearchToolCall + | ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall + | ResponseListResponse.OpenAIResponseInputFunctionToolCallOutput + | ResponseListResponse.OpenAIResponseMcpApprovalRequest + | ResponseListResponse.OpenAIResponseMcpApprovalResponse + | ResponseListResponse.OpenAIResponseMessage + >; + + /** + * Model identifier used for generation + */ + model: string; + + /** + * Object type identifier, always "response" + */ + object: 'response'; + + /** + * List of generated output items (messages, tool calls, etc.) + */ + output: Array< + | ResponseListResponse.OpenAIResponseMessage + | ResponseListResponse.OpenAIResponseOutputMessageWebSearchToolCall + | ResponseListResponse.OpenAIResponseOutputMessageFileSearchToolCall + | ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall + | ResponseListResponse.OpenAIResponseOutputMessageMcpCall + | ResponseListResponse.OpenAIResponseOutputMessageMcpListTools + | ResponseListResponse.OpenAIResponseMcpApprovalRequest + >; + + /** + * Whether tool calls can be executed in parallel + */ + parallel_tool_calls: boolean; + + /** + * Current status of the response generation + */ + status: string; + + /** + * Text formatting configuration for the response + */ + text: ResponseListResponse.Text; + /** - * List of response objects with their input context + * (Optional) Error details if the response generation failed */ - data: Array; + error?: ResponseListResponse.Error; /** - * Identifier of the first item in this page + * (Optional) ID of the previous response in a conversation */ - first_id: string; + previous_response_id?: string; /** - * Whether there are more results available beyond this page + * (Optional) Sampling temperature used for generation */ - has_more: boolean; + temperature?: number; /** - * Identifier of the last item in this page + * (Optional) Nucleus sampling parameter used for generation */ - last_id: string; + top_p?: number; /** - * Object type identifier, always "list" + * (Optional) Truncation strategy applied to the response */ - object: 'list'; + truncation?: string; } -export namespace ResponseCreateResponse { +export namespace ResponseListResponse { /** - * OpenAI response object extended with input context information. + * Web search tool call output message for OpenAI responses. */ - export interface Data { + export interface OpenAIResponseOutputMessageWebSearchToolCall { /** - * Unique identifier for this response + * Unique identifier for this tool call */ id: string; /** - * Unix timestamp when the response was created + * Current status of the web search operation + */ + status: string; + + /** + * Tool call type identifier, always "web_search_call" */ - created_at: number; + type: 'web_search_call'; + } + /** + * File search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFileSearchToolCall { /** - * List of input items that led to this response + * Unique identifier for this tool call */ - input: Array< - | Data.OpenAIResponseOutputMessageWebSearchToolCall - | Data.OpenAIResponseOutputMessageFileSearchToolCall - | Data.OpenAIResponseOutputMessageFunctionToolCall - | Data.OpenAIResponseInputFunctionToolCallOutput - | Data.OpenAIResponseMcpApprovalRequest - | Data.OpenAIResponseMcpApprovalResponse - | Data.OpenAIResponseMessage - >; + id: string; /** - * Model identifier used for generation + * List of search queries executed */ - model: string; + queries: Array; /** - * Object type identifier, always "response" + * Current status of the file search operation */ - object: 'response'; + status: string; /** - * List of generated output items (messages, tool calls, etc.) + * Tool call type identifier, always "file_search_call" */ - output: Array< - | Data.OpenAIResponseMessage - | Data.OpenAIResponseOutputMessageWebSearchToolCall - | Data.OpenAIResponseOutputMessageFileSearchToolCall - | Data.OpenAIResponseOutputMessageFunctionToolCall - | Data.OpenAIResponseOutputMessageMcpCall - | Data.OpenAIResponseOutputMessageMcpListTools - | Data.OpenAIResponseMcpApprovalRequest - >; + type: 'file_search_call'; /** - * Whether tool calls can be executed in parallel + * (Optional) Search results returned by the file search operation */ - parallel_tool_calls: boolean; + results?: Array; + } + export namespace OpenAIResponseOutputMessageFileSearchToolCall { /** - * Current status of the response generation + * Search results returned by the file search operation. */ - status: string; + export interface Result { + /** + * (Optional) Key-value attributes associated with the file + */ + attributes: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Unique identifier of the file containing the result + */ + file_id: string; + + /** + * Name of the file containing the result + */ + filename: string; + + /** + * Relevance score for this search result (between 0 and 1) + */ + score: number; + /** + * Text content of the search result + */ + text: string; + } + } + + /** + * Function tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFunctionToolCall { /** - * Text formatting configuration for the response + * JSON string containing the function arguments */ - text: Data.Text; + arguments: string; /** - * (Optional) Error details if the response generation failed + * Unique identifier for the function call */ - error?: Data.Error; + call_id: string; /** - * (Optional) ID of the previous response in a conversation + * Name of the function being called */ - previous_response_id?: string; + name: string; /** - * (Optional) Sampling temperature used for generation + * Tool call type identifier, always "function_call" */ - temperature?: number; + type: 'function_call'; /** - * (Optional) Nucleus sampling parameter used for generation + * (Optional) Additional identifier for the tool call */ - top_p?: number; + id?: string; /** - * (Optional) Truncation strategy applied to the response + * (Optional) Current status of the function call execution */ - truncation?: string; + status?: string; + } + + /** + * This represents the output of a function call that gets passed back to the + * model. + */ + export interface OpenAIResponseInputFunctionToolCallOutput { + call_id: string; + + output: string; + + type: 'function_call_output'; + + id?: string; + + status?: string; + } + + /** + * A request for human approval of a tool invocation. + */ + export interface OpenAIResponseMcpApprovalRequest { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_approval_request'; + } + + /** + * A response to an MCP approval request. + */ + export interface OpenAIResponseMcpApprovalResponse { + approval_request_id: string; + + approve: boolean; + + type: 'mcp_approval_response'; + + id?: string; + + reason?: string; + } + + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + > + | Array; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; } - export namespace Data { + export namespace OpenAIResponseMessage { /** - * Web search tool call output message for OpenAI responses. + * Text content for input messages in OpenAI response format. */ - export interface OpenAIResponseOutputMessageWebSearchToolCall { - /** - * Unique identifier for this tool call - */ - id: string; - + export interface OpenAIResponseInputMessageContentText { /** - * Current status of the web search operation + * The text content of the input message */ - status: string; + text: string; /** - * Tool call type identifier, always "web_search_call" + * Content type identifier, always "input_text" */ - type: 'web_search_call'; + type: 'input_text'; } /** - * File search tool call output message for OpenAI responses. + * Image content for input messages in OpenAI response format. */ - export interface OpenAIResponseOutputMessageFileSearchToolCall { + export interface OpenAIResponseInputMessageContentImage { /** - * Unique identifier for this tool call + * Level of detail for image processing, can be "low", "high", or "auto" */ - id: string; + detail: 'low' | 'high' | 'auto'; /** - * List of search queries executed + * Content type identifier, always "input_image" */ - queries: Array; + type: 'input_image'; /** - * Current status of the file search operation + * (Optional) URL of the image content */ - status: string; + image_url?: string; + } - /** - * Tool call type identifier, always "file_search_call" - */ - type: 'file_search_call'; + export interface UnionMember2 { + annotations: Array< + | UnionMember2.OpenAIResponseAnnotationFileCitation + | UnionMember2.OpenAIResponseAnnotationCitation + | UnionMember2.OpenAIResponseAnnotationContainerFileCitation + | UnionMember2.OpenAIResponseAnnotationFilePath + >; - /** - * (Optional) Search results returned by the file search operation - */ - results?: Array; + text: string; + + type: 'output_text'; } - export namespace OpenAIResponseOutputMessageFileSearchToolCall { + export namespace UnionMember2 { /** - * Search results returned by the file search operation. + * File citation annotation for referencing specific files in response content. */ - export interface Result { - /** - * (Optional) Key-value attributes associated with the file - */ - attributes: { [key: string]: boolean | number | string | Array | unknown | null }; - + export interface OpenAIResponseAnnotationFileCitation { /** - * Unique identifier of the file containing the result + * Unique identifier of the referenced file */ file_id: string; /** - * Name of the file containing the result + * Name of the referenced file */ filename: string; /** - * Relevance score for this search result (between 0 and 1) + * Position index of the citation within the content */ - score: number; + index: number; /** - * Text content of the search result + * Annotation type identifier, always "file_citation" */ - text: string; + type: 'file_citation'; } - } - /** - * Function tool call output message for OpenAI responses. - */ - export interface OpenAIResponseOutputMessageFunctionToolCall { /** - * JSON string containing the function arguments + * URL citation annotation for referencing external web resources. */ - arguments: string; + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; - /** - * Unique identifier for the function call - */ - call_id: string; + /** + * Start position of the citation span in the content + */ + start_index: number; - /** - * Name of the function being called - */ - name: string; + /** + * Title of the referenced web resource + */ + title: string; - /** - * Tool call type identifier, always "function_call" - */ - type: 'function_call'; + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; - /** - * (Optional) Additional identifier for the tool call - */ - id?: string; + /** + * URL of the referenced web resource + */ + url: string; + } - /** - * (Optional) Current status of the function call execution - */ - status?: string; - } + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; - /** - * This represents the output of a function call that gets passed back to the - * model. - */ - export interface OpenAIResponseInputFunctionToolCallOutput { - call_id: string; + end_index: number; - output: string; + file_id: string; - type: 'function_call_output'; + filename: string; - id?: string; + start_index: number; - status?: string; - } + type: 'container_file_citation'; + } - /** - * A request for human approval of a tool invocation. - */ - export interface OpenAIResponseMcpApprovalRequest { - id: string; + export interface OpenAIResponseAnnotationFilePath { + file_id: string; - arguments: string; + index: number; - name: string; + type: 'file_path'; + } + } + } - server_label: string; + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + > + | Array; - type: 'mcp_approval_request'; - } + role: 'system' | 'developer' | 'user' | 'assistant'; - /** - * A response to an MCP approval request. - */ - export interface OpenAIResponseMcpApprovalResponse { - approval_request_id: string; + type: 'message'; - approve: boolean; + id?: string; - type: 'mcp_approval_response'; + status?: string; + } - id?: string; + export namespace OpenAIResponseMessage { + /** + * Text content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentText { + /** + * The text content of the input message + */ + text: string; - reason?: string; + /** + * Content type identifier, always "input_text" + */ + type: 'input_text'; } /** - * Corresponds to the various Message types in the Responses API. They are all - * under one type because the Responses API gives them all the same "type" value, - * and there is no way to tell them apart in certain scenarios. + * Image content for input messages in OpenAI response format. */ - export interface OpenAIResponseMessage { - content: - | string - | Array< - | OpenAIResponseMessage.OpenAIResponseInputMessageContentText - | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage - > - | Array; + export interface OpenAIResponseInputMessageContentImage { + /** + * Level of detail for image processing, can be "low", "high", or "auto" + */ + detail: 'low' | 'high' | 'auto'; - role: 'system' | 'developer' | 'user' | 'assistant'; + /** + * Content type identifier, always "input_image" + */ + type: 'input_image'; - type: 'message'; + /** + * (Optional) URL of the image content + */ + image_url?: string; + } - id?: string; + export interface UnionMember2 { + annotations: Array< + | UnionMember2.OpenAIResponseAnnotationFileCitation + | UnionMember2.OpenAIResponseAnnotationCitation + | UnionMember2.OpenAIResponseAnnotationContainerFileCitation + | UnionMember2.OpenAIResponseAnnotationFilePath + >; - status?: string; + text: string; + + type: 'output_text'; } - export namespace OpenAIResponseMessage { + export namespace UnionMember2 { /** - * Text content for input messages in OpenAI response format. + * File citation annotation for referencing specific files in response content. */ - export interface OpenAIResponseInputMessageContentText { + export interface OpenAIResponseAnnotationFileCitation { /** - * The text content of the input message + * Unique identifier of the referenced file */ - text: string; + file_id: string; /** - * Content type identifier, always "input_text" + * Name of the referenced file */ - type: 'input_text'; + filename: string; + + /** + * Position index of the citation within the content + */ + index: number; + + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; } /** - * Image content for input messages in OpenAI response format. + * URL citation annotation for referencing external web resources. */ - export interface OpenAIResponseInputMessageContentImage { + export interface OpenAIResponseAnnotationCitation { /** - * Level of detail for image processing, can be "low", "high", or "auto" + * End position of the citation span in the content */ - detail: 'low' | 'high' | 'auto'; + end_index: number; /** - * Content type identifier, always "input_image" + * Start position of the citation span in the content */ - type: 'input_image'; + start_index: number; /** - * (Optional) URL of the image content + * Title of the referenced web resource */ - image_url?: string; - } - - export interface UnionMember2 { - annotations: Array< - | UnionMember2.OpenAIResponseAnnotationFileCitation - | UnionMember2.OpenAIResponseAnnotationCitation - | UnionMember2.OpenAIResponseAnnotationContainerFileCitation - | UnionMember2.OpenAIResponseAnnotationFilePath - >; - - text: string; + title: string; - type: 'output_text'; - } + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; - export namespace UnionMember2 { /** - * File citation annotation for referencing specific files in response content. + * URL of the referenced web resource */ - export interface OpenAIResponseAnnotationFileCitation { - /** - * Unique identifier of the referenced file - */ - file_id: string; - - /** - * Name of the referenced file - */ - filename: string; - - /** - * Position index of the citation within the content - */ - index: number; - - /** - * Annotation type identifier, always "file_citation" - */ - type: 'file_citation'; - } - - /** - * URL citation annotation for referencing external web resources. - */ - export interface OpenAIResponseAnnotationCitation { - /** - * End position of the citation span in the content - */ - end_index: number; - - /** - * Start position of the citation span in the content - */ - start_index: number; - - /** - * Title of the referenced web resource - */ - title: string; - - /** - * Annotation type identifier, always "url_citation" - */ - type: 'url_citation'; - - /** - * URL of the referenced web resource - */ - url: string; - } - - export interface OpenAIResponseAnnotationContainerFileCitation { - container_id: string; - - end_index: number; - - file_id: string; - - filename: string; - - start_index: number; - - type: 'container_file_citation'; - } - - export interface OpenAIResponseAnnotationFilePath { - file_id: string; - - index: number; - - type: 'file_path'; - } - } - } - - /** - * Corresponds to the various Message types in the Responses API. They are all - * under one type because the Responses API gives them all the same "type" value, - * and there is no way to tell them apart in certain scenarios. - */ - export interface OpenAIResponseMessage { - content: - | string - | Array< - | OpenAIResponseMessage.OpenAIResponseInputMessageContentText - | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage - > - | Array; - - role: 'system' | 'developer' | 'user' | 'assistant'; - - type: 'message'; - - id?: string; - - status?: string; - } - - export namespace OpenAIResponseMessage { - /** - * Text content for input messages in OpenAI response format. - */ - export interface OpenAIResponseInputMessageContentText { - /** - * The text content of the input message - */ - text: string; - - /** - * Content type identifier, always "input_text" - */ - type: 'input_text'; + url: string; } - /** - * Image content for input messages in OpenAI response format. - */ - export interface OpenAIResponseInputMessageContentImage { - /** - * Level of detail for image processing, can be "low", "high", or "auto" - */ - detail: 'low' | 'high' | 'auto'; + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; - /** - * Content type identifier, always "input_image" - */ - type: 'input_image'; + end_index: number; - /** - * (Optional) URL of the image content - */ - image_url?: string; - } + file_id: string; - export interface UnionMember2 { - annotations: Array< - | UnionMember2.OpenAIResponseAnnotationFileCitation - | UnionMember2.OpenAIResponseAnnotationCitation - | UnionMember2.OpenAIResponseAnnotationContainerFileCitation - | UnionMember2.OpenAIResponseAnnotationFilePath - >; + filename: string; - text: string; + start_index: number; - type: 'output_text'; + type: 'container_file_citation'; } - export namespace UnionMember2 { - /** - * File citation annotation for referencing specific files in response content. - */ - export interface OpenAIResponseAnnotationFileCitation { - /** - * Unique identifier of the referenced file - */ - file_id: string; - - /** - * Name of the referenced file - */ - filename: string; - - /** - * Position index of the citation within the content - */ - index: number; - - /** - * Annotation type identifier, always "file_citation" - */ - type: 'file_citation'; - } - - /** - * URL citation annotation for referencing external web resources. - */ - export interface OpenAIResponseAnnotationCitation { - /** - * End position of the citation span in the content - */ - end_index: number; - - /** - * Start position of the citation span in the content - */ - start_index: number; - - /** - * Title of the referenced web resource - */ - title: string; - - /** - * Annotation type identifier, always "url_citation" - */ - type: 'url_citation'; - - /** - * URL of the referenced web resource - */ - url: string; - } - - export interface OpenAIResponseAnnotationContainerFileCitation { - container_id: string; - - end_index: number; - - file_id: string; - - filename: string; - - start_index: number; - - type: 'container_file_citation'; - } - - export interface OpenAIResponseAnnotationFilePath { - file_id: string; + export interface OpenAIResponseAnnotationFilePath { + file_id: string; - index: number; + index: number; - type: 'file_path'; - } + type: 'file_path'; } } + } + /** + * Web search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageWebSearchToolCall { /** - * Web search tool call output message for OpenAI responses. + * Unique identifier for this tool call */ - export interface OpenAIResponseOutputMessageWebSearchToolCall { - /** - * Unique identifier for this tool call - */ - id: string; - - /** - * Current status of the web search operation - */ - status: string; - - /** - * Tool call type identifier, always "web_search_call" - */ - type: 'web_search_call'; - } + id: string; /** - * File search tool call output message for OpenAI responses. + * Current status of the web search operation */ - export interface OpenAIResponseOutputMessageFileSearchToolCall { - /** - * Unique identifier for this tool call - */ - id: string; - - /** - * List of search queries executed - */ - queries: Array; - - /** - * Current status of the file search operation - */ - status: string; - - /** - * Tool call type identifier, always "file_search_call" - */ - type: 'file_search_call'; - - /** - * (Optional) Search results returned by the file search operation - */ - results?: Array; - } + status: string; - export namespace OpenAIResponseOutputMessageFileSearchToolCall { - /** - * Search results returned by the file search operation. - */ - export interface Result { - /** - * (Optional) Key-value attributes associated with the file - */ - attributes: { [key: string]: boolean | number | string | Array | unknown | null }; + /** + * Tool call type identifier, always "web_search_call" + */ + type: 'web_search_call'; + } - /** - * Unique identifier of the file containing the result - */ - file_id: string; + /** + * File search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; - /** - * Name of the file containing the result - */ - filename: string; + /** + * List of search queries executed + */ + queries: Array; - /** - * Relevance score for this search result (between 0 and 1) - */ - score: number; + /** + * Current status of the file search operation + */ + status: string; - /** - * Text content of the search result - */ - text: string; - } - } + /** + * Tool call type identifier, always "file_search_call" + */ + type: 'file_search_call'; /** - * Function tool call output message for OpenAI responses. + * (Optional) Search results returned by the file search operation */ - export interface OpenAIResponseOutputMessageFunctionToolCall { - /** - * JSON string containing the function arguments - */ - arguments: string; + results?: Array; + } + export namespace OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Search results returned by the file search operation. + */ + export interface Result { /** - * Unique identifier for the function call + * (Optional) Key-value attributes associated with the file */ - call_id: string; + attributes: { [key: string]: boolean | number | string | Array | unknown | null }; /** - * Name of the function being called + * Unique identifier of the file containing the result */ - name: string; + file_id: string; /** - * Tool call type identifier, always "function_call" + * Name of the file containing the result */ - type: 'function_call'; + filename: string; /** - * (Optional) Additional identifier for the tool call + * Relevance score for this search result (between 0 and 1) */ - id?: string; + score: number; /** - * (Optional) Current status of the function call execution + * Text content of the search result */ - status?: string; + text: string; } + } + /** + * Function tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFunctionToolCall { /** - * Model Context Protocol (MCP) call output message for OpenAI responses. + * JSON string containing the function arguments */ - export interface OpenAIResponseOutputMessageMcpCall { - /** - * Unique identifier for this MCP call - */ - id: string; + arguments: string; - /** - * JSON string containing the MCP call arguments - */ - arguments: string; + /** + * Unique identifier for the function call + */ + call_id: string; - /** - * Name of the MCP method being called - */ - name: string; + /** + * Name of the function being called + */ + name: string; - /** - * Label identifying the MCP server handling the call - */ - server_label: string; + /** + * Tool call type identifier, always "function_call" + */ + type: 'function_call'; - /** - * Tool call type identifier, always "mcp_call" - */ - type: 'mcp_call'; + /** + * (Optional) Additional identifier for the tool call + */ + id?: string; - /** - * (Optional) Error message if the MCP call failed - */ - error?: string; + /** + * (Optional) Current status of the function call execution + */ + status?: string; + } - /** - * (Optional) Output result from the successful MCP call - */ - output?: string; - } + /** + * Model Context Protocol (MCP) call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageMcpCall { + /** + * Unique identifier for this MCP call + */ + id: string; /** - * MCP list tools output message containing available tools from an MCP server. + * JSON string containing the MCP call arguments */ - export interface OpenAIResponseOutputMessageMcpListTools { - /** - * Unique identifier for this MCP list tools operation - */ - id: string; + arguments: string; - /** - * Label identifying the MCP server providing the tools - */ - server_label: string; + /** + * Name of the MCP method being called + */ + name: string; + + /** + * Label identifying the MCP server handling the call + */ + server_label: string; + + /** + * Tool call type identifier, always "mcp_call" + */ + type: 'mcp_call'; + + /** + * (Optional) Error message if the MCP call failed + */ + error?: string; + + /** + * (Optional) Output result from the successful MCP call + */ + output?: string; + } + + /** + * MCP list tools output message containing available tools from an MCP server. + */ + export interface OpenAIResponseOutputMessageMcpListTools { + /** + * Unique identifier for this MCP list tools operation + */ + id: string; + /** + * Label identifying the MCP server providing the tools + */ + server_label: string; + + /** + * List of available tools provided by the MCP server + */ + tools: Array; + + /** + * Tool call type identifier, always "mcp_list_tools" + */ + type: 'mcp_list_tools'; + } + + export namespace OpenAIResponseOutputMessageMcpListTools { + /** + * Tool definition returned by MCP list tools operation. + */ + export interface Tool { /** - * List of available tools provided by the MCP server + * JSON schema defining the tool's input parameters */ - tools: Array; + input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; /** - * Tool call type identifier, always "mcp_list_tools" + * Name of the tool */ - type: 'mcp_list_tools'; - } + name: string; - export namespace OpenAIResponseOutputMessageMcpListTools { /** - * Tool definition returned by MCP list tools operation. + * (Optional) Description of what the tool does */ - export interface Tool { - /** - * JSON schema defining the tool's input parameters - */ - input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * Name of the tool - */ - name: string; - - /** - * (Optional) Description of what the tool does - */ - description?: string; - } + description?: string; } + } - /** - * A request for human approval of a tool invocation. - */ - export interface OpenAIResponseMcpApprovalRequest { - id: string; + /** + * A request for human approval of a tool invocation. + */ + export interface OpenAIResponseMcpApprovalRequest { + id: string; - arguments: string; + arguments: string; - name: string; + name: string; - server_label: string; + server_label: string; - type: 'mcp_approval_request'; - } + type: 'mcp_approval_request'; + } + /** + * Text formatting configuration for the response + */ + export interface Text { /** - * Text formatting configuration for the response + * (Optional) Text format configuration specifying output format requirements */ - export interface Text { - /** - * (Optional) Text format configuration specifying output format requirements - */ - format?: Text.Format; - } + format?: Text.Format; + } - export namespace Text { + export namespace Text { + /** + * (Optional) Text format configuration specifying output format requirements + */ + export interface Format { /** - * (Optional) Text format configuration specifying output format requirements + * Must be "text", "json_schema", or "json_object" to identify the format type */ - export interface Format { - /** - * Must be "text", "json_schema", or "json_object" to identify the format type - */ - type: 'text' | 'json_schema' | 'json_object'; - - /** - * (Optional) A description of the response format. Only used for json_schema. - */ - description?: string; - - /** - * The name of the response format. Only used for json_schema. - */ - name?: string; + type: 'text' | 'json_schema' | 'json_object'; - /** - * The JSON schema the response should conform to. In a Python SDK, this is often a - * `pydantic` model. Only used for json_schema. - */ - schema?: { [key: string]: boolean | number | string | Array | unknown | null }; + /** + * (Optional) A description of the response format. Only used for json_schema. + */ + description?: string; - /** - * (Optional) Whether to strictly enforce the JSON schema. If true, the response - * must match the schema exactly. Only used for json_schema. - */ - strict?: boolean; - } - } + /** + * The name of the response format. Only used for json_schema. + */ + name?: string; - /** - * (Optional) Error details if the response generation failed - */ - export interface Error { /** - * Error code identifying the type of failure + * The JSON schema the response should conform to. In a Python SDK, this is often a + * `pydantic` model. Only used for json_schema. */ - code: string; + schema?: { [key: string]: boolean | number | string | Array | unknown | null }; /** - * Human-readable error message describing the failure + * (Optional) Whether to strictly enforce the JSON schema. If true, the response + * must match the schema exactly. Only used for json_schema. */ - message: string; + strict?: boolean; } } -} -/** - * OpenAI response object extended with input context information. - */ -export interface ResponseListResponse { /** - * Unique identifier for this response + * (Optional) Error details if the response generation failed */ - id: string; + export interface Error { + /** + * Error code identifying the type of failure + */ + code: string; - /** - * Unix timestamp when the response was created - */ - created_at: number; + /** + * Human-readable error message describing the failure + */ + message: string; + } +} +/** + * Response object confirming deletion of an OpenAI response. + */ +export interface ResponseDeleteResponse { /** - * List of input items that led to this response + * Unique identifier of the deleted response */ - input: Array< - | ResponseListResponse.OpenAIResponseOutputMessageWebSearchToolCall - | ResponseListResponse.OpenAIResponseOutputMessageFileSearchToolCall - | ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall - | ResponseListResponse.OpenAIResponseInputFunctionToolCallOutput - | ResponseListResponse.OpenAIResponseMcpApprovalRequest - | ResponseListResponse.OpenAIResponseMcpApprovalResponse - | ResponseListResponse.OpenAIResponseMessage - >; + id: string; /** - * Model identifier used for generation + * Deletion confirmation flag, always True */ - model: string; + deleted: boolean; /** * Object type identifier, always "response" */ object: 'response'; +} - /** - * List of generated output items (messages, tool calls, etc.) - */ - output: Array< - | ResponseListResponse.OpenAIResponseMessage - | ResponseListResponse.OpenAIResponseOutputMessageWebSearchToolCall - | ResponseListResponse.OpenAIResponseOutputMessageFileSearchToolCall - | ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall - | ResponseListResponse.OpenAIResponseOutputMessageMcpCall - | ResponseListResponse.OpenAIResponseOutputMessageMcpListTools - | ResponseListResponse.OpenAIResponseMcpApprovalRequest - >; +export type ResponseCreateParams = ResponseCreateParamsNonStreaming | ResponseCreateParamsStreaming; +export interface ResponseCreateParamsBase { /** - * Whether tool calls can be executed in parallel + * Input message(s) to create the response. */ - parallel_tool_calls: boolean; + input: + | string + | Array< + | ResponseCreateParams.OpenAIResponseOutputMessageWebSearchToolCall + | ResponseCreateParams.OpenAIResponseOutputMessageFileSearchToolCall + | ResponseCreateParams.OpenAIResponseOutputMessageFunctionToolCall + | ResponseCreateParams.OpenAIResponseInputFunctionToolCallOutput + | ResponseCreateParams.OpenAIResponseMcpApprovalRequest + | ResponseCreateParams.OpenAIResponseMcpApprovalResponse + | ResponseCreateParams.OpenAIResponseMessage + >; /** - * Current status of the response generation + * The underlying LLM used for completions. */ - status: string; + model: string; /** - * Text formatting configuration for the response + * (Optional) Additional fields to include in the response. */ - text: ResponseListResponse.Text; + include?: Array; - /** - * (Optional) Error details if the response generation failed - */ - error?: ResponseListResponse.Error; + instructions?: string; + + max_infer_iters?: number; /** - * (Optional) ID of the previous response in a conversation + * (Optional) if specified, the new response will be a continuation of the previous + * response. This can be used to easily fork-off new responses from existing + * responses. */ previous_response_id?: string; - /** - * (Optional) Sampling temperature used for generation - */ + store?: boolean; + + stream?: boolean; + temperature?: number; /** - * (Optional) Nucleus sampling parameter used for generation + * Text response configuration for OpenAI responses. */ - top_p?: number; + text?: ResponseCreateParams.Text; - /** - * (Optional) Truncation strategy applied to the response - */ - truncation?: string; + tools?: Array< + | ResponseCreateParams.OpenAIResponseInputToolWebSearch + | ResponseCreateParams.OpenAIResponseInputToolFileSearch + | ResponseCreateParams.OpenAIResponseInputToolFunction + | ResponseCreateParams.OpenAIResponseInputToolMcp + >; } -export namespace ResponseListResponse { +export namespace ResponseCreateParams { /** * Web search tool call output message for OpenAI responses. */ @@ -2954,159 +2918,7 @@ export namespace ResponseListResponse { id?: string; - reason?: string; - } - - /** - * Corresponds to the various Message types in the Responses API. They are all - * under one type because the Responses API gives them all the same "type" value, - * and there is no way to tell them apart in certain scenarios. - */ - export interface OpenAIResponseMessage { - content: - | string - | Array< - | OpenAIResponseMessage.OpenAIResponseInputMessageContentText - | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage - > - | Array; - - role: 'system' | 'developer' | 'user' | 'assistant'; - - type: 'message'; - - id?: string; - - status?: string; - } - - export namespace OpenAIResponseMessage { - /** - * Text content for input messages in OpenAI response format. - */ - export interface OpenAIResponseInputMessageContentText { - /** - * The text content of the input message - */ - text: string; - - /** - * Content type identifier, always "input_text" - */ - type: 'input_text'; - } - - /** - * Image content for input messages in OpenAI response format. - */ - export interface OpenAIResponseInputMessageContentImage { - /** - * Level of detail for image processing, can be "low", "high", or "auto" - */ - detail: 'low' | 'high' | 'auto'; - - /** - * Content type identifier, always "input_image" - */ - type: 'input_image'; - - /** - * (Optional) URL of the image content - */ - image_url?: string; - } - - export interface UnionMember2 { - annotations: Array< - | UnionMember2.OpenAIResponseAnnotationFileCitation - | UnionMember2.OpenAIResponseAnnotationCitation - | UnionMember2.OpenAIResponseAnnotationContainerFileCitation - | UnionMember2.OpenAIResponseAnnotationFilePath - >; - - text: string; - - type: 'output_text'; - } - - export namespace UnionMember2 { - /** - * File citation annotation for referencing specific files in response content. - */ - export interface OpenAIResponseAnnotationFileCitation { - /** - * Unique identifier of the referenced file - */ - file_id: string; - - /** - * Name of the referenced file - */ - filename: string; - - /** - * Position index of the citation within the content - */ - index: number; - - /** - * Annotation type identifier, always "file_citation" - */ - type: 'file_citation'; - } - - /** - * URL citation annotation for referencing external web resources. - */ - export interface OpenAIResponseAnnotationCitation { - /** - * End position of the citation span in the content - */ - end_index: number; - - /** - * Start position of the citation span in the content - */ - start_index: number; - - /** - * Title of the referenced web resource - */ - title: string; - - /** - * Annotation type identifier, always "url_citation" - */ - type: 'url_citation'; - - /** - * URL of the referenced web resource - */ - url: string; - } - - export interface OpenAIResponseAnnotationContainerFileCitation { - container_id: string; - - end_index: number; - - file_id: string; - - filename: string; - - start_index: number; - - type: 'container_file_citation'; - } - - export interface OpenAIResponseAnnotationFilePath { - file_id: string; - - index: number; - - type: 'file_path'; - } - } + reason?: string; } /** @@ -3262,350 +3074,213 @@ export namespace ResponseListResponse { } /** - * Web search tool call output message for OpenAI responses. - */ - export interface OpenAIResponseOutputMessageWebSearchToolCall { - /** - * Unique identifier for this tool call - */ - id: string; - - /** - * Current status of the web search operation - */ - status: string; - - /** - * Tool call type identifier, always "web_search_call" - */ - type: 'web_search_call'; - } - - /** - * File search tool call output message for OpenAI responses. + * Text response configuration for OpenAI responses. */ - export interface OpenAIResponseOutputMessageFileSearchToolCall { - /** - * Unique identifier for this tool call - */ - id: string; - - /** - * List of search queries executed - */ - queries: Array; - - /** - * Current status of the file search operation - */ - status: string; - - /** - * Tool call type identifier, always "file_search_call" - */ - type: 'file_search_call'; - + export interface Text { /** - * (Optional) Search results returned by the file search operation + * (Optional) Text format configuration specifying output format requirements */ - results?: Array; + format?: Text.Format; } - export namespace OpenAIResponseOutputMessageFileSearchToolCall { + export namespace Text { /** - * Search results returned by the file search operation. + * (Optional) Text format configuration specifying output format requirements */ - export interface Result { + export interface Format { /** - * (Optional) Key-value attributes associated with the file + * Must be "text", "json_schema", or "json_object" to identify the format type */ - attributes: { [key: string]: boolean | number | string | Array | unknown | null }; + type: 'text' | 'json_schema' | 'json_object'; /** - * Unique identifier of the file containing the result + * (Optional) A description of the response format. Only used for json_schema. */ - file_id: string; + description?: string; /** - * Name of the file containing the result + * The name of the response format. Only used for json_schema. */ - filename: string; + name?: string; /** - * Relevance score for this search result (between 0 and 1) + * The JSON schema the response should conform to. In a Python SDK, this is often a + * `pydantic` model. Only used for json_schema. */ - score: number; + schema?: { [key: string]: boolean | number | string | Array | unknown | null }; /** - * Text content of the search result + * (Optional) Whether to strictly enforce the JSON schema. If true, the response + * must match the schema exactly. Only used for json_schema. */ - text: string; + strict?: boolean; } } /** - * Function tool call output message for OpenAI responses. + * Web search tool configuration for OpenAI response inputs. */ - export interface OpenAIResponseOutputMessageFunctionToolCall { + export interface OpenAIResponseInputToolWebSearch { /** - * JSON string containing the function arguments + * Web search tool type variant to use */ - arguments: string; + type: 'web_search' | 'web_search_preview' | 'web_search_preview_2025_03_11'; /** - * Unique identifier for the function call + * (Optional) Size of search context, must be "low", "medium", or "high" */ - call_id: string; + search_context_size?: string; + } + /** + * File search tool configuration for OpenAI response inputs. + */ + export interface OpenAIResponseInputToolFileSearch { /** - * Name of the function being called + * Tool type identifier, always "file_search" */ - name: string; + type: 'file_search'; /** - * Tool call type identifier, always "function_call" + * List of vector store identifiers to search within */ - type: 'function_call'; + vector_store_ids: Array; /** - * (Optional) Additional identifier for the tool call + * (Optional) Additional filters to apply to the search */ - id?: string; + filters?: { [key: string]: boolean | number | string | Array | unknown | null }; /** - * (Optional) Current status of the function call execution + * (Optional) Maximum number of search results to return (1-50) */ - status?: string; - } + max_num_results?: number; - /** - * Model Context Protocol (MCP) call output message for OpenAI responses. - */ - export interface OpenAIResponseOutputMessageMcpCall { /** - * Unique identifier for this MCP call + * (Optional) Options for ranking and scoring search results */ - id: string; + ranking_options?: OpenAIResponseInputToolFileSearch.RankingOptions; + } + export namespace OpenAIResponseInputToolFileSearch { /** - * JSON string containing the MCP call arguments + * (Optional) Options for ranking and scoring search results */ - arguments: string; + export interface RankingOptions { + /** + * (Optional) Name of the ranking algorithm to use + */ + ranker?: string; + + /** + * (Optional) Minimum relevance score threshold for results + */ + score_threshold?: number; + } + } + /** + * Function tool configuration for OpenAI response inputs. + */ + export interface OpenAIResponseInputToolFunction { /** - * Name of the MCP method being called + * Name of the function that can be called */ name: string; /** - * Label identifying the MCP server handling the call + * Tool type identifier, always "function" */ - server_label: string; + type: 'function'; /** - * Tool call type identifier, always "mcp_call" + * (Optional) Description of what the function does */ - type: 'mcp_call'; + description?: string; /** - * (Optional) Error message if the MCP call failed + * (Optional) JSON schema defining the function's parameters */ - error?: string; + parameters?: { [key: string]: boolean | number | string | Array | unknown | null }; /** - * (Optional) Output result from the successful MCP call + * (Optional) Whether to enforce strict parameter validation */ - output?: string; + strict?: boolean; } /** - * MCP list tools output message containing available tools from an MCP server. + * Model Context Protocol (MCP) tool configuration for OpenAI response inputs. */ - export interface OpenAIResponseOutputMessageMcpListTools { + export interface OpenAIResponseInputToolMcp { /** - * Unique identifier for this MCP list tools operation + * Approval requirement for tool calls ("always", "never", or filter) */ - id: string; + require_approval: 'always' | 'never' | OpenAIResponseInputToolMcp.ApprovalFilter; /** - * Label identifying the MCP server providing the tools + * Label to identify this MCP server */ server_label: string; /** - * List of available tools provided by the MCP server + * URL endpoint of the MCP server */ - tools: Array; + server_url: string; /** - * Tool call type identifier, always "mcp_list_tools" + * Tool type identifier, always "mcp" */ - type: 'mcp_list_tools'; - } + type: 'mcp'; - export namespace OpenAIResponseOutputMessageMcpListTools { /** - * Tool definition returned by MCP list tools operation. + * (Optional) Restriction on which tools can be used from this server */ - export interface Tool { - /** - * JSON schema defining the tool's input parameters - */ - input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * Name of the tool - */ - name: string; - - /** - * (Optional) Description of what the tool does - */ - description?: string; - } - } - - /** - * A request for human approval of a tool invocation. - */ - export interface OpenAIResponseMcpApprovalRequest { - id: string; - - arguments: string; - - name: string; - - server_label: string; - - type: 'mcp_approval_request'; - } + allowed_tools?: Array | OpenAIResponseInputToolMcp.AllowedToolsFilter; - /** - * Text formatting configuration for the response - */ - export interface Text { /** - * (Optional) Text format configuration specifying output format requirements + * (Optional) HTTP headers to include when connecting to the server */ - format?: Text.Format; + headers?: { [key: string]: boolean | number | string | Array | unknown | null }; } - export namespace Text { + export namespace OpenAIResponseInputToolMcp { /** - * (Optional) Text format configuration specifying output format requirements + * Filter configuration for MCP tool approval requirements. */ - export interface Format { - /** - * Must be "text", "json_schema", or "json_object" to identify the format type - */ - type: 'text' | 'json_schema' | 'json_object'; - - /** - * (Optional) A description of the response format. Only used for json_schema. - */ - description?: string; - + export interface ApprovalFilter { /** - * The name of the response format. Only used for json_schema. + * (Optional) List of tool names that always require approval */ - name?: string; + always?: Array; /** - * The JSON schema the response should conform to. In a Python SDK, this is often a - * `pydantic` model. Only used for json_schema. + * (Optional) List of tool names that never require approval */ - schema?: { [key: string]: boolean | number | string | Array | unknown | null }; + never?: Array; + } + /** + * Filter configuration for restricting which MCP tools can be used. + */ + export interface AllowedToolsFilter { /** - * (Optional) Whether to strictly enforce the JSON schema. If true, the response - * must match the schema exactly. Only used for json_schema. + * (Optional) List of specific tool names that are allowed */ - strict?: boolean; + tool_names?: Array; } } - /** - * (Optional) Error details if the response generation failed - */ - export interface Error { - /** - * Error code identifying the type of failure - */ - code: string; - - /** - * Human-readable error message describing the failure - */ - message: string; - } + export type ResponseCreateParamsNonStreaming = ResponsesAPI.ResponseCreateParamsNonStreaming; + export type ResponseCreateParamsStreaming = ResponsesAPI.ResponseCreateParamsStreaming; } -/** - * Response object confirming deletion of an OpenAI response. - */ -export interface ResponseDeleteResponse { - /** - * Unique identifier of the deleted response - */ - id: string; - - /** - * Deletion confirmation flag, always True - */ - deleted: boolean; - - /** - * Object type identifier, always "response" - */ - object: 'response'; +export interface ResponseCreateParamsNonStreaming extends ResponseCreateParamsBase { + stream?: false; } -export type ResponseCreateParams = ResponseCreateParamsNonStreaming | ResponseCreateParamsNonStreaming; - -export declare namespace ResponseCreateParams { - export interface ResponseCreateParamsNonStreaming { - /** - * The ID of the last response to return. - */ - after?: string; - - /** - * The number of responses to return. - */ - limit?: number; - - /** - * The model to filter responses by. - */ - model?: string; - - /** - * The order to sort responses by when sorted by created_at ('asc' or 'desc'). - */ - order?: 'asc' | 'desc'; - } - - export interface ResponseCreateParamsNonStreaming { - /** - * The ID of the last response to return. - */ - after?: string; - - /** - * The number of responses to return. - */ - limit?: number; - - /** - * The model to filter responses by. - */ - model?: string; - - /** - * The order to sort responses by when sorted by created_at ('asc' or 'desc'). - */ - order?: 'asc' | 'desc'; - } +export interface ResponseCreateParamsStreaming extends ResponseCreateParamsBase { + stream: true; } export interface ResponseListParams extends OpenAICursorPageParams { @@ -3627,12 +3302,12 @@ export declare namespace Responses { export { type ResponseObject as ResponseObject, type ResponseObjectStream as ResponseObjectStream, - type ResponseCreateResponse as ResponseCreateResponse, type ResponseListResponse as ResponseListResponse, type ResponseDeleteResponse as ResponseDeleteResponse, ResponseListResponsesOpenAICursorPage as ResponseListResponsesOpenAICursorPage, type ResponseCreateParams as ResponseCreateParams, type ResponseCreateParamsNonStreaming as ResponseCreateParamsNonStreaming, + type ResponseCreateParamsStreaming as ResponseCreateParamsStreaming, type ResponseListParams as ResponseListParams, }; diff --git a/tests/api-resources/responses/responses.test.ts b/tests/api-resources/responses/responses.test.ts index fbe25a5..f1142d8 100644 --- a/tests/api-resources/responses/responses.test.ts +++ b/tests/api-resources/responses/responses.test.ts @@ -6,8 +6,8 @@ import { Response } from 'node-fetch'; const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); describe('resource responses', () => { - test('create', async () => { - const responsePromise = client.responses.create({}); + test('create: only required params', async () => { + const responsePromise = client.responses.create({ input: 'string', model: 'model' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -17,6 +17,30 @@ describe('resource responses', () => { expect(dataAndResponse.response).toBe(rawResponse); }); + test('create: required and optional params', async () => { + const response = await client.responses.create({ + input: 'string', + model: 'model', + include: ['string'], + instructions: 'instructions', + max_infer_iters: 0, + previous_response_id: 'previous_response_id', + store: true, + stream: false, + temperature: 0, + text: { + format: { + type: 'text', + description: 'description', + name: 'name', + schema: { foo: true }, + strict: true, + }, + }, + tools: [{ type: 'web_search', search_context_size: 'search_context_size' }], + }); + }); + test('retrieve', async () => { const responsePromise = client.responses.retrieve('response_id'); const rawResponse = await responsePromise.asResponse(); From a24679300cff93fea8ad4bc85e549ecc88198d58 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 7 Oct 2025 02:36:23 +0000 Subject: [PATCH 21/26] chore(internal): use npm pack for build uploads --- scripts/utils/upload-artifact.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh index 211e0b9..831e716 100755 --- a/scripts/utils/upload-artifact.sh +++ b/scripts/utils/upload-artifact.sh @@ -12,9 +12,11 @@ if [[ "$SIGNED_URL" == "null" ]]; then exit 1 fi -UPLOAD_RESPONSE=$(tar "${BASE_PATH:+-C$BASE_PATH}" -cz "${ARTIFACT_PATH:-dist}" | curl -v -X PUT \ +TARBALL=$(cd dist && npm pack --silent) + +UPLOAD_RESPONSE=$(curl -v -X PUT \ -H "Content-Type: application/gzip" \ - --data-binary @- "$SIGNED_URL" 2>&1) + --data-binary "@dist/$TARBALL" "$SIGNED_URL" 2>&1) if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then echo -e "\033[32mUploaded build to Stainless storage.\033[0m" From dcc7bb8b4d940982c2e9c6d1a541636e99fdc5ff Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 9 Oct 2025 02:41:28 +0000 Subject: [PATCH 22/26] chore: extract some types in mcp docs --- api.md | 6 +----- src/resources/alpha/post-training/job.ts | 9 ++------- src/resources/alpha/post-training/post-training.ts | 8 +------- src/resources/models/index.ts | 2 +- src/resources/models/models.ts | 4 ++-- src/resources/models/openai.ts | 6 ------ 6 files changed, 7 insertions(+), 28 deletions(-) diff --git a/api.md b/api.md index e4a9720..35715d7 100644 --- a/api.md +++ b/api.md @@ -258,10 +258,6 @@ Methods: ## OpenAI -Types: - -- OpenAIListResponse - Methods: - client.models.openai.list() -> ModelListResponse @@ -452,7 +448,7 @@ Types: Methods: -- client.alpha.postTraining.job.list() -> Array<ListPostTrainingJobsResponse.Data> +- client.alpha.postTraining.job.list() -> JobListResponse - client.alpha.postTraining.job.artifacts({ ...params }) -> JobArtifactsResponse - client.alpha.postTraining.job.cancel({ ...params }) -> void - client.alpha.postTraining.job.status({ ...params }) -> JobStatusResponse diff --git a/src/resources/alpha/post-training/job.ts b/src/resources/alpha/post-training/job.ts index 3f77ceb..ba3de54 100644 --- a/src/resources/alpha/post-training/job.ts +++ b/src/resources/alpha/post-training/job.ts @@ -2,19 +2,14 @@ import { APIResource } from '../../../resource'; import * as Core from '../../../core'; -import * as PostTrainingAPI from './post-training'; export class Job extends APIResource { /** * Get all training jobs. */ - list( - options?: Core.RequestOptions, - ): Core.APIPromise> { + list(options?: Core.RequestOptions): Core.APIPromise { return ( - this._client.get('/v1alpha/post-training/jobs', options) as Core.APIPromise<{ - data: Array; - }> + this._client.get('/v1alpha/post-training/jobs', options) as Core.APIPromise<{ data: JobListResponse }> )._thenUnwrap((obj) => obj.data); } diff --git a/src/resources/alpha/post-training/post-training.ts b/src/resources/alpha/post-training/post-training.ts index e8bf24a..06edcfc 100644 --- a/src/resources/alpha/post-training/post-training.ts +++ b/src/resources/alpha/post-training/post-training.ts @@ -110,13 +110,7 @@ export namespace AlgorithmConfig { } export interface ListPostTrainingJobsResponse { - data: Array; -} - -export namespace ListPostTrainingJobsResponse { - export interface Data { - job_uuid: string; - } + data: JobAPI.JobListResponse; } export interface PostTrainingJob { diff --git a/src/resources/models/index.ts b/src/resources/models/index.ts index de6ecf3..e05a022 100644 --- a/src/resources/models/index.ts +++ b/src/resources/models/index.ts @@ -7,4 +7,4 @@ export { type ModelListResponse, type ModelRegisterParams, } from './models'; -export { OpenAI, type OpenAIListResponse } from './openai'; +export { OpenAI } from './openai'; diff --git a/src/resources/models/models.ts b/src/resources/models/models.ts index d72281f..1919365 100644 --- a/src/resources/models/models.ts +++ b/src/resources/models/models.ts @@ -3,7 +3,7 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; import * as OpenAIAPI from './openai'; -import { OpenAI, OpenAIListResponse } from './openai'; +import { OpenAI } from './openai'; export class Models extends APIResource { openai: OpenAIAPI.OpenAI = new OpenAIAPI.OpenAI(this._client); @@ -120,5 +120,5 @@ export declare namespace Models { type ModelRegisterParams as ModelRegisterParams, }; - export { OpenAI as OpenAI, type OpenAIListResponse as OpenAIListResponse }; + export { OpenAI as OpenAI }; } diff --git a/src/resources/models/openai.ts b/src/resources/models/openai.ts index 8055dea..c6b90d1 100644 --- a/src/resources/models/openai.ts +++ b/src/resources/models/openai.ts @@ -14,9 +14,3 @@ export class OpenAI extends APIResource { )._thenUnwrap((obj) => obj.data); } } - -export type OpenAIListResponse = Array; - -export declare namespace OpenAI { - export { type OpenAIListResponse as OpenAIListResponse }; -} From e0728d5dd59be8723d9f967d6164351eb05528d1 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 10 Oct 2025 17:04:20 +0000 Subject: [PATCH 23/26] feat(api): several updates including Conversations, Responses changes, etc. --- .stats.yml | 8 +- api.md | 28 + src/index.ts | 17 + src/resources/chat/chat.ts | 57 + src/resources/chat/completions.ts | 179 +- src/resources/completions.ts | 4 +- src/resources/conversations.ts | 3 + src/resources/conversations/conversations.ts | 479 +++++ src/resources/conversations/index.ts | 17 + src/resources/conversations/items.ts | 1583 +++++++++++++++++ src/resources/embeddings.ts | 4 +- src/resources/files.ts | 13 +- src/resources/index.ts | 7 + src/resources/inspect.ts | 4 +- src/resources/models/models.ts | 6 +- src/resources/moderations.ts | 3 +- src/resources/providers.ts | 4 +- src/resources/responses/input-items.ts | 2 +- src/resources/responses/responses.ts | 808 ++++++++- src/resources/routes.ts | 3 +- src/resources/safety.ts | 2 +- .../conversations/conversations.test.ts | 70 + .../api-resources/conversations/items.test.ts | 70 + .../api-resources/responses/responses.test.ts | 1 + 24 files changed, 3303 insertions(+), 69 deletions(-) create mode 100644 src/resources/conversations.ts create mode 100644 src/resources/conversations/conversations.ts create mode 100644 src/resources/conversations/index.ts create mode 100644 src/resources/conversations/items.ts create mode 100644 tests/api-resources/conversations/conversations.test.ts create mode 100644 tests/api-resources/conversations/items.test.ts diff --git a/.stats.yml b/.stats.yml index 5588dfb..9a8764c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 108 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-b220f9f8667d2af8007134d0403b24452c20c9c512ca87d0b69b20b761272609.yml -openapi_spec_hash: cde1096a830f2081d68f858f020fd53f -config_hash: 8800bdff1a087b9d5211dda2a7b9f66f +configured_endpoints: 115 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-8a12a05ba6892999ac506f69d5cbbc7218f28ee1a11bf8e0e548c603435bb643.yml +openapi_spec_hash: 871ce212a98bdad4a44ec7fbf58d9fcb +config_hash: 85d9db5422f2cf897267c0e4825ce1bf diff --git a/api.md b/api.md index 35715d7..1fad22c 100644 --- a/api.md +++ b/api.md @@ -92,6 +92,34 @@ Methods: - client.responses.inputItems.list(responseId, { ...params }) -> InputItemListResponse +# Conversations + +Types: + +- ConversationObject +- ConversationDeleteResponse + +Methods: + +- client.conversations.create({ ...params }) -> ConversationObject +- client.conversations.retrieve(conversationId) -> ConversationObject +- client.conversations.update(conversationId, { ...params }) -> ConversationObject +- client.conversations.delete(conversationId) -> ConversationDeleteResponse + +## Items + +Types: + +- ItemCreateResponse +- ItemListResponse +- ItemGetResponse + +Methods: + +- client.conversations.items.create(conversationId, { ...params }) -> ItemCreateResponse +- client.conversations.items.list(conversationId, { ...params }) -> ItemListResponse +- client.conversations.items.get(conversationId, itemId) -> ItemGetResponse + # Datasets Types: diff --git a/src/index.ts b/src/index.ts index 1e14f85..fbb5abf 100644 --- a/src/index.ts +++ b/src/index.ts @@ -123,6 +123,13 @@ import { } from './resources/vector-io'; import { Alpha } from './resources/alpha/alpha'; import { Chat, ChatCompletionChunk } from './resources/chat/chat'; +import { + ConversationCreateParams, + ConversationDeleteResponse, + ConversationObject, + ConversationUpdateParams, + Conversations, +} from './resources/conversations/conversations'; import { ListModelsResponse, Model, @@ -277,6 +284,7 @@ export class LlamaStackClient extends Core.APIClient { tools: API.Tools = new API.Tools(this); toolRuntime: API.ToolRuntime = new API.ToolRuntime(this); responses: API.Responses = new API.Responses(this); + conversations: API.Conversations = new API.Conversations(this); datasets: API.Datasets = new API.Datasets(this); inspect: API.Inspect = new API.Inspect(this); embeddings: API.Embeddings = new API.Embeddings(this); @@ -354,6 +362,7 @@ LlamaStackClient.Tools = Tools; LlamaStackClient.ToolRuntime = ToolRuntime; LlamaStackClient.Responses = Responses; LlamaStackClient.ResponseListResponsesOpenAICursorPage = ResponseListResponsesOpenAICursorPage; +LlamaStackClient.Conversations = Conversations; LlamaStackClient.Datasets = Datasets; LlamaStackClient.Inspect = Inspect; LlamaStackClient.Embeddings = Embeddings; @@ -425,6 +434,14 @@ export declare namespace LlamaStackClient { type ResponseListParams as ResponseListParams, }; + export { + Conversations as Conversations, + type ConversationObject as ConversationObject, + type ConversationDeleteResponse as ConversationDeleteResponse, + type ConversationCreateParams as ConversationCreateParams, + type ConversationUpdateParams as ConversationUpdateParams, + }; + export { Datasets as Datasets, type ListDatasetsResponse as ListDatasetsResponse, diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index b43e6d3..47bfc7a 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -46,6 +46,11 @@ export interface ChatCompletionChunk { * The object type, which will be "chat.completion.chunk" */ object: 'chat.completion.chunk'; + + /** + * Token usage information (typically included in final chunk with stream_options) + */ + usage?: ChatCompletionChunk.Usage; } export namespace ChatCompletionChunk { @@ -217,6 +222,58 @@ export namespace ChatCompletionChunk { } } } + + /** + * Token usage information (typically included in final chunk with stream_options) + */ + export interface Usage { + /** + * Number of tokens in the completion + */ + completion_tokens: number; + + /** + * Number of tokens in the prompt + */ + prompt_tokens: number; + + /** + * Total tokens used (prompt + completion) + */ + total_tokens: number; + + /** + * Token details for output tokens in OpenAI chat completion usage. + */ + completion_tokens_details?: Usage.CompletionTokensDetails; + + /** + * Token details for prompt tokens in OpenAI chat completion usage. + */ + prompt_tokens_details?: Usage.PromptTokensDetails; + } + + export namespace Usage { + /** + * Token details for output tokens in OpenAI chat completion usage. + */ + export interface CompletionTokensDetails { + /** + * Number of tokens used for reasoning (o1/o3 models) + */ + reasoning_tokens?: number; + } + + /** + * Token details for prompt tokens in OpenAI chat completion usage. + */ + export interface PromptTokensDetails { + /** + * Number of tokens retrieved from cache + */ + cached_tokens?: number; + } + } } Chat.Completions = Completions; diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index b76ee5d..7c8f133 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -11,8 +11,8 @@ import { Stream } from '../../streaming'; export class Completions extends APIResource { /** - * Generate an OpenAI-compatible chat completion for the given messages using the - * specified model. + * Create chat completions. Generate an OpenAI-compatible chat completion for the + * given messages using the specified model. */ create( body: CompletionCreateParamsNonStreaming, @@ -36,14 +36,14 @@ export class Completions extends APIResource { } /** - * Describe a chat completion by its ID. + * Get chat completion. Describe a chat completion by its ID. */ retrieve(completionId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/v1/chat/completions/${completionId}`, options); } /** - * List all chat completions. + * List chat completions. */ list( query?: CompletionListParams, @@ -104,6 +104,11 @@ export namespace CompletionCreateResponse { * The object type, which will be "chat.completion" */ object: 'chat.completion'; + + /** + * Token usage information for the completion + */ + usage?: OpenAIChatCompletion.Usage; } export namespace OpenAIChatCompletion { @@ -501,6 +506,58 @@ export namespace CompletionCreateResponse { } } } + + /** + * Token usage information for the completion + */ + export interface Usage { + /** + * Number of tokens in the completion + */ + completion_tokens: number; + + /** + * Number of tokens in the prompt + */ + prompt_tokens: number; + + /** + * Total tokens used (prompt + completion) + */ + total_tokens: number; + + /** + * Token details for output tokens in OpenAI chat completion usage. + */ + completion_tokens_details?: Usage.CompletionTokensDetails; + + /** + * Token details for prompt tokens in OpenAI chat completion usage. + */ + prompt_tokens_details?: Usage.PromptTokensDetails; + } + + export namespace Usage { + /** + * Token details for output tokens in OpenAI chat completion usage. + */ + export interface CompletionTokensDetails { + /** + * Number of tokens used for reasoning (o1/o3 models) + */ + reasoning_tokens?: number; + } + + /** + * Token details for prompt tokens in OpenAI chat completion usage. + */ + export interface PromptTokensDetails { + /** + * Number of tokens retrieved from cache + */ + cached_tokens?: number; + } + } } } @@ -537,6 +594,11 @@ export interface CompletionRetrieveResponse { * The object type, which will be "chat.completion" */ object: 'chat.completion'; + + /** + * Token usage information for the completion + */ + usage?: CompletionRetrieveResponse.Usage; } export namespace CompletionRetrieveResponse { @@ -1224,6 +1286,58 @@ export namespace CompletionRetrieveResponse { type: 'text'; } } + + /** + * Token usage information for the completion + */ + export interface Usage { + /** + * Number of tokens in the completion + */ + completion_tokens: number; + + /** + * Number of tokens in the prompt + */ + prompt_tokens: number; + + /** + * Total tokens used (prompt + completion) + */ + total_tokens: number; + + /** + * Token details for output tokens in OpenAI chat completion usage. + */ + completion_tokens_details?: Usage.CompletionTokensDetails; + + /** + * Token details for prompt tokens in OpenAI chat completion usage. + */ + prompt_tokens_details?: Usage.PromptTokensDetails; + } + + export namespace Usage { + /** + * Token details for output tokens in OpenAI chat completion usage. + */ + export interface CompletionTokensDetails { + /** + * Number of tokens used for reasoning (o1/o3 models) + */ + reasoning_tokens?: number; + } + + /** + * Token details for prompt tokens in OpenAI chat completion usage. + */ + export interface PromptTokensDetails { + /** + * Number of tokens retrieved from cache + */ + cached_tokens?: number; + } + } } export interface CompletionListResponse { @@ -1259,6 +1373,11 @@ export interface CompletionListResponse { * The object type, which will be "chat.completion" */ object: 'chat.completion'; + + /** + * Token usage information for the completion + */ + usage?: CompletionListResponse.Usage; } export namespace CompletionListResponse { @@ -1946,6 +2065,58 @@ export namespace CompletionListResponse { type: 'text'; } } + + /** + * Token usage information for the completion + */ + export interface Usage { + /** + * Number of tokens in the completion + */ + completion_tokens: number; + + /** + * Number of tokens in the prompt + */ + prompt_tokens: number; + + /** + * Total tokens used (prompt + completion) + */ + total_tokens: number; + + /** + * Token details for output tokens in OpenAI chat completion usage. + */ + completion_tokens_details?: Usage.CompletionTokensDetails; + + /** + * Token details for prompt tokens in OpenAI chat completion usage. + */ + prompt_tokens_details?: Usage.PromptTokensDetails; + } + + export namespace Usage { + /** + * Token details for output tokens in OpenAI chat completion usage. + */ + export interface CompletionTokensDetails { + /** + * Number of tokens used for reasoning (o1/o3 models) + */ + reasoning_tokens?: number; + } + + /** + * Token details for prompt tokens in OpenAI chat completion usage. + */ + export interface PromptTokensDetails { + /** + * Number of tokens retrieved from cache + */ + cached_tokens?: number; + } + } } export type CompletionCreateParams = CompletionCreateParamsNonStreaming | CompletionCreateParamsStreaming; diff --git a/src/resources/completions.ts b/src/resources/completions.ts index be435b9..da2d7f3 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -8,8 +8,8 @@ import { Stream } from '../streaming'; export class Completions extends APIResource { /** - * Generate an OpenAI-compatible completion for the given prompt using the - * specified model. + * Create completion. Generate an OpenAI-compatible completion for the given prompt + * using the specified model. */ create( body: CompletionCreateParamsNonStreaming, diff --git a/src/resources/conversations.ts b/src/resources/conversations.ts new file mode 100644 index 0000000..6b50950 --- /dev/null +++ b/src/resources/conversations.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './conversations/index'; diff --git a/src/resources/conversations/conversations.ts b/src/resources/conversations/conversations.ts new file mode 100644 index 0000000..faa7d4c --- /dev/null +++ b/src/resources/conversations/conversations.ts @@ -0,0 +1,479 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import * as Core from '../../core'; +import * as ItemsAPI from './items'; +import { + ItemCreateParams, + ItemCreateResponse, + ItemGetResponse, + ItemListParams, + ItemListResponse, + Items, +} from './items'; + +export class Conversations extends APIResource { + items: ItemsAPI.Items = new ItemsAPI.Items(this._client); + + /** + * Create a conversation. + */ + create(body: ConversationCreateParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post('/v1/conversations', { body, ...options }); + } + + /** + * Get a conversation with the given ID. + */ + retrieve(conversationId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/v1/conversations/${conversationId}`, options); + } + + /** + * Update a conversation's metadata with the given ID. + */ + update( + conversationId: string, + body: ConversationUpdateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/v1/conversations/${conversationId}`, { body, ...options }); + } + + /** + * Delete a conversation with the given ID. + */ + delete(conversationId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/v1/conversations/${conversationId}`, options); + } +} + +/** + * OpenAI-compatible conversation object. + */ +export interface ConversationObject { + id: string; + + created_at: number; + + object: 'conversation'; + + items?: Array; + + metadata?: { [key: string]: string }; +} + +/** + * Response for deleted conversation. + */ +export interface ConversationDeleteResponse { + id: string; + + deleted: boolean; + + object: string; +} + +export interface ConversationCreateParams { + /** + * Initial items to include in the conversation context. + */ + items?: Array< + | ConversationCreateParams.OpenAIResponseMessage + | ConversationCreateParams.OpenAIResponseOutputMessageFunctionToolCall + | ConversationCreateParams.OpenAIResponseOutputMessageFileSearchToolCall + | ConversationCreateParams.OpenAIResponseOutputMessageWebSearchToolCall + | ConversationCreateParams.OpenAIResponseOutputMessageMcpCall + | ConversationCreateParams.OpenAIResponseOutputMessageMcpListTools + >; + + /** + * Set of key-value pairs that can be attached to an object. + */ + metadata?: { [key: string]: string }; +} + +export namespace ConversationCreateParams { + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + > + | Array; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; + } + + export namespace OpenAIResponseMessage { + /** + * Text content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentText { + /** + * The text content of the input message + */ + text: string; + + /** + * Content type identifier, always "input_text" + */ + type: 'input_text'; + } + + /** + * Image content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentImage { + /** + * Level of detail for image processing, can be "low", "high", or "auto" + */ + detail: 'low' | 'high' | 'auto'; + + /** + * Content type identifier, always "input_image" + */ + type: 'input_image'; + + /** + * (Optional) URL of the image content + */ + image_url?: string; + } + + export interface UnionMember2 { + annotations: Array< + | UnionMember2.OpenAIResponseAnnotationFileCitation + | UnionMember2.OpenAIResponseAnnotationCitation + | UnionMember2.OpenAIResponseAnnotationContainerFileCitation + | UnionMember2.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace UnionMember2 { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { + /** + * Unique identifier of the referenced file + */ + file_id: string; + + /** + * Name of the referenced file + */ + filename: string; + + /** + * Position index of the citation within the content + */ + index: number; + + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; + } + + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; + + /** + * Start position of the citation span in the content + */ + start_index: number; + + /** + * Title of the referenced web resource + */ + title: string; + + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; + + /** + * URL of the referenced web resource + */ + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + } + + /** + * Function tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFunctionToolCall { + /** + * JSON string containing the function arguments + */ + arguments: string; + + /** + * Unique identifier for the function call + */ + call_id: string; + + /** + * Name of the function being called + */ + name: string; + + /** + * Tool call type identifier, always "function_call" + */ + type: 'function_call'; + + /** + * (Optional) Additional identifier for the tool call + */ + id?: string; + + /** + * (Optional) Current status of the function call execution + */ + status?: string; + } + + /** + * File search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * List of search queries executed + */ + queries: Array; + + /** + * Current status of the file search operation + */ + status: string; + + /** + * Tool call type identifier, always "file_search_call" + */ + type: 'file_search_call'; + + /** + * (Optional) Search results returned by the file search operation + */ + results?: Array; + } + + export namespace OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Search results returned by the file search operation. + */ + export interface Result { + /** + * (Optional) Key-value attributes associated with the file + */ + attributes: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Unique identifier of the file containing the result + */ + file_id: string; + + /** + * Name of the file containing the result + */ + filename: string; + + /** + * Relevance score for this search result (between 0 and 1) + */ + score: number; + + /** + * Text content of the search result + */ + text: string; + } + } + + /** + * Web search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageWebSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * Current status of the web search operation + */ + status: string; + + /** + * Tool call type identifier, always "web_search_call" + */ + type: 'web_search_call'; + } + + /** + * Model Context Protocol (MCP) call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageMcpCall { + /** + * Unique identifier for this MCP call + */ + id: string; + + /** + * JSON string containing the MCP call arguments + */ + arguments: string; + + /** + * Name of the MCP method being called + */ + name: string; + + /** + * Label identifying the MCP server handling the call + */ + server_label: string; + + /** + * Tool call type identifier, always "mcp_call" + */ + type: 'mcp_call'; + + /** + * (Optional) Error message if the MCP call failed + */ + error?: string; + + /** + * (Optional) Output result from the successful MCP call + */ + output?: string; + } + + /** + * MCP list tools output message containing available tools from an MCP server. + */ + export interface OpenAIResponseOutputMessageMcpListTools { + /** + * Unique identifier for this MCP list tools operation + */ + id: string; + + /** + * Label identifying the MCP server providing the tools + */ + server_label: string; + + /** + * List of available tools provided by the MCP server + */ + tools: Array; + + /** + * Tool call type identifier, always "mcp_list_tools" + */ + type: 'mcp_list_tools'; + } + + export namespace OpenAIResponseOutputMessageMcpListTools { + /** + * Tool definition returned by MCP list tools operation. + */ + export interface Tool { + /** + * JSON schema defining the tool's input parameters + */ + input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Name of the tool + */ + name: string; + + /** + * (Optional) Description of what the tool does + */ + description?: string; + } + } +} + +export interface ConversationUpdateParams { + /** + * Set of key-value pairs that can be attached to an object. + */ + metadata: { [key: string]: string }; +} + +Conversations.Items = Items; + +export declare namespace Conversations { + export { + type ConversationObject as ConversationObject, + type ConversationDeleteResponse as ConversationDeleteResponse, + type ConversationCreateParams as ConversationCreateParams, + type ConversationUpdateParams as ConversationUpdateParams, + }; + + export { + Items as Items, + type ItemCreateResponse as ItemCreateResponse, + type ItemListResponse as ItemListResponse, + type ItemGetResponse as ItemGetResponse, + type ItemCreateParams as ItemCreateParams, + type ItemListParams as ItemListParams, + }; +} diff --git a/src/resources/conversations/index.ts b/src/resources/conversations/index.ts new file mode 100644 index 0000000..f60086f --- /dev/null +++ b/src/resources/conversations/index.ts @@ -0,0 +1,17 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + Conversations, + type ConversationObject, + type ConversationDeleteResponse, + type ConversationCreateParams, + type ConversationUpdateParams, +} from './conversations'; +export { + Items, + type ItemCreateResponse, + type ItemListResponse, + type ItemGetResponse, + type ItemCreateParams, + type ItemListParams, +} from './items'; diff --git a/src/resources/conversations/items.ts b/src/resources/conversations/items.ts new file mode 100644 index 0000000..5e15969 --- /dev/null +++ b/src/resources/conversations/items.ts @@ -0,0 +1,1583 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import * as Core from '../../core'; + +export class Items extends APIResource { + /** + * Create items in the conversation. + */ + create( + conversationId: string, + body: ItemCreateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/v1/conversations/${conversationId}/items`, { body, ...options }); + } + + /** + * List items in the conversation. + */ + list( + conversationId: string, + query: ItemListParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.get(`/v1/conversations/${conversationId}/items`, { query, ...options }); + } + + /** + * Retrieve a conversation item. + */ + get( + conversationId: string, + itemId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.get(`/v1/conversations/${conversationId}/items/${itemId}`, options); + } +} + +/** + * List of conversation items with pagination. + */ +export interface ItemCreateResponse { + data: Array< + | ItemCreateResponse.OpenAIResponseMessage + | ItemCreateResponse.OpenAIResponseOutputMessageFunctionToolCall + | ItemCreateResponse.OpenAIResponseOutputMessageFileSearchToolCall + | ItemCreateResponse.OpenAIResponseOutputMessageWebSearchToolCall + | ItemCreateResponse.OpenAIResponseOutputMessageMcpCall + | ItemCreateResponse.OpenAIResponseOutputMessageMcpListTools + >; + + has_more: boolean; + + object: string; + + first_id?: string; + + last_id?: string; +} + +export namespace ItemCreateResponse { + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + > + | Array; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; + } + + export namespace OpenAIResponseMessage { + /** + * Text content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentText { + /** + * The text content of the input message + */ + text: string; + + /** + * Content type identifier, always "input_text" + */ + type: 'input_text'; + } + + /** + * Image content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentImage { + /** + * Level of detail for image processing, can be "low", "high", or "auto" + */ + detail: 'low' | 'high' | 'auto'; + + /** + * Content type identifier, always "input_image" + */ + type: 'input_image'; + + /** + * (Optional) URL of the image content + */ + image_url?: string; + } + + export interface UnionMember2 { + annotations: Array< + | UnionMember2.OpenAIResponseAnnotationFileCitation + | UnionMember2.OpenAIResponseAnnotationCitation + | UnionMember2.OpenAIResponseAnnotationContainerFileCitation + | UnionMember2.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace UnionMember2 { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { + /** + * Unique identifier of the referenced file + */ + file_id: string; + + /** + * Name of the referenced file + */ + filename: string; + + /** + * Position index of the citation within the content + */ + index: number; + + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; + } + + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; + + /** + * Start position of the citation span in the content + */ + start_index: number; + + /** + * Title of the referenced web resource + */ + title: string; + + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; + + /** + * URL of the referenced web resource + */ + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + } + + /** + * Function tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFunctionToolCall { + /** + * JSON string containing the function arguments + */ + arguments: string; + + /** + * Unique identifier for the function call + */ + call_id: string; + + /** + * Name of the function being called + */ + name: string; + + /** + * Tool call type identifier, always "function_call" + */ + type: 'function_call'; + + /** + * (Optional) Additional identifier for the tool call + */ + id?: string; + + /** + * (Optional) Current status of the function call execution + */ + status?: string; + } + + /** + * File search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * List of search queries executed + */ + queries: Array; + + /** + * Current status of the file search operation + */ + status: string; + + /** + * Tool call type identifier, always "file_search_call" + */ + type: 'file_search_call'; + + /** + * (Optional) Search results returned by the file search operation + */ + results?: Array; + } + + export namespace OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Search results returned by the file search operation. + */ + export interface Result { + /** + * (Optional) Key-value attributes associated with the file + */ + attributes: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Unique identifier of the file containing the result + */ + file_id: string; + + /** + * Name of the file containing the result + */ + filename: string; + + /** + * Relevance score for this search result (between 0 and 1) + */ + score: number; + + /** + * Text content of the search result + */ + text: string; + } + } + + /** + * Web search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageWebSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * Current status of the web search operation + */ + status: string; + + /** + * Tool call type identifier, always "web_search_call" + */ + type: 'web_search_call'; + } + + /** + * Model Context Protocol (MCP) call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageMcpCall { + /** + * Unique identifier for this MCP call + */ + id: string; + + /** + * JSON string containing the MCP call arguments + */ + arguments: string; + + /** + * Name of the MCP method being called + */ + name: string; + + /** + * Label identifying the MCP server handling the call + */ + server_label: string; + + /** + * Tool call type identifier, always "mcp_call" + */ + type: 'mcp_call'; + + /** + * (Optional) Error message if the MCP call failed + */ + error?: string; + + /** + * (Optional) Output result from the successful MCP call + */ + output?: string; + } + + /** + * MCP list tools output message containing available tools from an MCP server. + */ + export interface OpenAIResponseOutputMessageMcpListTools { + /** + * Unique identifier for this MCP list tools operation + */ + id: string; + + /** + * Label identifying the MCP server providing the tools + */ + server_label: string; + + /** + * List of available tools provided by the MCP server + */ + tools: Array; + + /** + * Tool call type identifier, always "mcp_list_tools" + */ + type: 'mcp_list_tools'; + } + + export namespace OpenAIResponseOutputMessageMcpListTools { + /** + * Tool definition returned by MCP list tools operation. + */ + export interface Tool { + /** + * JSON schema defining the tool's input parameters + */ + input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Name of the tool + */ + name: string; + + /** + * (Optional) Description of what the tool does + */ + description?: string; + } + } +} + +/** + * List of conversation items with pagination. + */ +export interface ItemListResponse { + data: Array< + | ItemListResponse.OpenAIResponseMessage + | ItemListResponse.OpenAIResponseOutputMessageFunctionToolCall + | ItemListResponse.OpenAIResponseOutputMessageFileSearchToolCall + | ItemListResponse.OpenAIResponseOutputMessageWebSearchToolCall + | ItemListResponse.OpenAIResponseOutputMessageMcpCall + | ItemListResponse.OpenAIResponseOutputMessageMcpListTools + >; + + has_more: boolean; + + object: string; + + first_id?: string; + + last_id?: string; +} + +export namespace ItemListResponse { + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + > + | Array; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; + } + + export namespace OpenAIResponseMessage { + /** + * Text content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentText { + /** + * The text content of the input message + */ + text: string; + + /** + * Content type identifier, always "input_text" + */ + type: 'input_text'; + } + + /** + * Image content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentImage { + /** + * Level of detail for image processing, can be "low", "high", or "auto" + */ + detail: 'low' | 'high' | 'auto'; + + /** + * Content type identifier, always "input_image" + */ + type: 'input_image'; + + /** + * (Optional) URL of the image content + */ + image_url?: string; + } + + export interface UnionMember2 { + annotations: Array< + | UnionMember2.OpenAIResponseAnnotationFileCitation + | UnionMember2.OpenAIResponseAnnotationCitation + | UnionMember2.OpenAIResponseAnnotationContainerFileCitation + | UnionMember2.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace UnionMember2 { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { + /** + * Unique identifier of the referenced file + */ + file_id: string; + + /** + * Name of the referenced file + */ + filename: string; + + /** + * Position index of the citation within the content + */ + index: number; + + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; + } + + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; + + /** + * Start position of the citation span in the content + */ + start_index: number; + + /** + * Title of the referenced web resource + */ + title: string; + + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; + + /** + * URL of the referenced web resource + */ + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + } + + /** + * Function tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFunctionToolCall { + /** + * JSON string containing the function arguments + */ + arguments: string; + + /** + * Unique identifier for the function call + */ + call_id: string; + + /** + * Name of the function being called + */ + name: string; + + /** + * Tool call type identifier, always "function_call" + */ + type: 'function_call'; + + /** + * (Optional) Additional identifier for the tool call + */ + id?: string; + + /** + * (Optional) Current status of the function call execution + */ + status?: string; + } + + /** + * File search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * List of search queries executed + */ + queries: Array; + + /** + * Current status of the file search operation + */ + status: string; + + /** + * Tool call type identifier, always "file_search_call" + */ + type: 'file_search_call'; + + /** + * (Optional) Search results returned by the file search operation + */ + results?: Array; + } + + export namespace OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Search results returned by the file search operation. + */ + export interface Result { + /** + * (Optional) Key-value attributes associated with the file + */ + attributes: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Unique identifier of the file containing the result + */ + file_id: string; + + /** + * Name of the file containing the result + */ + filename: string; + + /** + * Relevance score for this search result (between 0 and 1) + */ + score: number; + + /** + * Text content of the search result + */ + text: string; + } + } + + /** + * Web search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageWebSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * Current status of the web search operation + */ + status: string; + + /** + * Tool call type identifier, always "web_search_call" + */ + type: 'web_search_call'; + } + + /** + * Model Context Protocol (MCP) call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageMcpCall { + /** + * Unique identifier for this MCP call + */ + id: string; + + /** + * JSON string containing the MCP call arguments + */ + arguments: string; + + /** + * Name of the MCP method being called + */ + name: string; + + /** + * Label identifying the MCP server handling the call + */ + server_label: string; + + /** + * Tool call type identifier, always "mcp_call" + */ + type: 'mcp_call'; + + /** + * (Optional) Error message if the MCP call failed + */ + error?: string; + + /** + * (Optional) Output result from the successful MCP call + */ + output?: string; + } + + /** + * MCP list tools output message containing available tools from an MCP server. + */ + export interface OpenAIResponseOutputMessageMcpListTools { + /** + * Unique identifier for this MCP list tools operation + */ + id: string; + + /** + * Label identifying the MCP server providing the tools + */ + server_label: string; + + /** + * List of available tools provided by the MCP server + */ + tools: Array; + + /** + * Tool call type identifier, always "mcp_list_tools" + */ + type: 'mcp_list_tools'; + } + + export namespace OpenAIResponseOutputMessageMcpListTools { + /** + * Tool definition returned by MCP list tools operation. + */ + export interface Tool { + /** + * JSON schema defining the tool's input parameters + */ + input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Name of the tool + */ + name: string; + + /** + * (Optional) Description of what the tool does + */ + description?: string; + } + } +} + +/** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ +export type ItemGetResponse = + | ItemGetResponse.OpenAIResponseMessage + | ItemGetResponse.OpenAIResponseOutputMessageFunctionToolCall + | ItemGetResponse.OpenAIResponseOutputMessageFileSearchToolCall + | ItemGetResponse.OpenAIResponseOutputMessageWebSearchToolCall + | ItemGetResponse.OpenAIResponseOutputMessageMcpCall + | ItemGetResponse.OpenAIResponseOutputMessageMcpListTools; + +export namespace ItemGetResponse { + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + > + | Array; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; + } + + export namespace OpenAIResponseMessage { + /** + * Text content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentText { + /** + * The text content of the input message + */ + text: string; + + /** + * Content type identifier, always "input_text" + */ + type: 'input_text'; + } + + /** + * Image content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentImage { + /** + * Level of detail for image processing, can be "low", "high", or "auto" + */ + detail: 'low' | 'high' | 'auto'; + + /** + * Content type identifier, always "input_image" + */ + type: 'input_image'; + + /** + * (Optional) URL of the image content + */ + image_url?: string; + } + + export interface UnionMember2 { + annotations: Array< + | UnionMember2.OpenAIResponseAnnotationFileCitation + | UnionMember2.OpenAIResponseAnnotationCitation + | UnionMember2.OpenAIResponseAnnotationContainerFileCitation + | UnionMember2.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace UnionMember2 { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { + /** + * Unique identifier of the referenced file + */ + file_id: string; + + /** + * Name of the referenced file + */ + filename: string; + + /** + * Position index of the citation within the content + */ + index: number; + + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; + } + + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; + + /** + * Start position of the citation span in the content + */ + start_index: number; + + /** + * Title of the referenced web resource + */ + title: string; + + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; + + /** + * URL of the referenced web resource + */ + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + } + + /** + * Function tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFunctionToolCall { + /** + * JSON string containing the function arguments + */ + arguments: string; + + /** + * Unique identifier for the function call + */ + call_id: string; + + /** + * Name of the function being called + */ + name: string; + + /** + * Tool call type identifier, always "function_call" + */ + type: 'function_call'; + + /** + * (Optional) Additional identifier for the tool call + */ + id?: string; + + /** + * (Optional) Current status of the function call execution + */ + status?: string; + } + + /** + * File search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * List of search queries executed + */ + queries: Array; + + /** + * Current status of the file search operation + */ + status: string; + + /** + * Tool call type identifier, always "file_search_call" + */ + type: 'file_search_call'; + + /** + * (Optional) Search results returned by the file search operation + */ + results?: Array; + } + + export namespace OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Search results returned by the file search operation. + */ + export interface Result { + /** + * (Optional) Key-value attributes associated with the file + */ + attributes: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Unique identifier of the file containing the result + */ + file_id: string; + + /** + * Name of the file containing the result + */ + filename: string; + + /** + * Relevance score for this search result (between 0 and 1) + */ + score: number; + + /** + * Text content of the search result + */ + text: string; + } + } + + /** + * Web search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageWebSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * Current status of the web search operation + */ + status: string; + + /** + * Tool call type identifier, always "web_search_call" + */ + type: 'web_search_call'; + } + + /** + * Model Context Protocol (MCP) call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageMcpCall { + /** + * Unique identifier for this MCP call + */ + id: string; + + /** + * JSON string containing the MCP call arguments + */ + arguments: string; + + /** + * Name of the MCP method being called + */ + name: string; + + /** + * Label identifying the MCP server handling the call + */ + server_label: string; + + /** + * Tool call type identifier, always "mcp_call" + */ + type: 'mcp_call'; + + /** + * (Optional) Error message if the MCP call failed + */ + error?: string; + + /** + * (Optional) Output result from the successful MCP call + */ + output?: string; + } + + /** + * MCP list tools output message containing available tools from an MCP server. + */ + export interface OpenAIResponseOutputMessageMcpListTools { + /** + * Unique identifier for this MCP list tools operation + */ + id: string; + + /** + * Label identifying the MCP server providing the tools + */ + server_label: string; + + /** + * List of available tools provided by the MCP server + */ + tools: Array; + + /** + * Tool call type identifier, always "mcp_list_tools" + */ + type: 'mcp_list_tools'; + } + + export namespace OpenAIResponseOutputMessageMcpListTools { + /** + * Tool definition returned by MCP list tools operation. + */ + export interface Tool { + /** + * JSON schema defining the tool's input parameters + */ + input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Name of the tool + */ + name: string; + + /** + * (Optional) Description of what the tool does + */ + description?: string; + } + } +} + +export interface ItemCreateParams { + /** + * Items to include in the conversation context. + */ + items: Array< + | ItemCreateParams.OpenAIResponseMessage + | ItemCreateParams.OpenAIResponseOutputMessageFunctionToolCall + | ItemCreateParams.OpenAIResponseOutputMessageFileSearchToolCall + | ItemCreateParams.OpenAIResponseOutputMessageWebSearchToolCall + | ItemCreateParams.OpenAIResponseOutputMessageMcpCall + | ItemCreateParams.OpenAIResponseOutputMessageMcpListTools + >; +} + +export namespace ItemCreateParams { + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + > + | Array; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; + } + + export namespace OpenAIResponseMessage { + /** + * Text content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentText { + /** + * The text content of the input message + */ + text: string; + + /** + * Content type identifier, always "input_text" + */ + type: 'input_text'; + } + + /** + * Image content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentImage { + /** + * Level of detail for image processing, can be "low", "high", or "auto" + */ + detail: 'low' | 'high' | 'auto'; + + /** + * Content type identifier, always "input_image" + */ + type: 'input_image'; + + /** + * (Optional) URL of the image content + */ + image_url?: string; + } + + export interface UnionMember2 { + annotations: Array< + | UnionMember2.OpenAIResponseAnnotationFileCitation + | UnionMember2.OpenAIResponseAnnotationCitation + | UnionMember2.OpenAIResponseAnnotationContainerFileCitation + | UnionMember2.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace UnionMember2 { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { + /** + * Unique identifier of the referenced file + */ + file_id: string; + + /** + * Name of the referenced file + */ + filename: string; + + /** + * Position index of the citation within the content + */ + index: number; + + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; + } + + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; + + /** + * Start position of the citation span in the content + */ + start_index: number; + + /** + * Title of the referenced web resource + */ + title: string; + + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; + + /** + * URL of the referenced web resource + */ + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + } + + /** + * Function tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFunctionToolCall { + /** + * JSON string containing the function arguments + */ + arguments: string; + + /** + * Unique identifier for the function call + */ + call_id: string; + + /** + * Name of the function being called + */ + name: string; + + /** + * Tool call type identifier, always "function_call" + */ + type: 'function_call'; + + /** + * (Optional) Additional identifier for the tool call + */ + id?: string; + + /** + * (Optional) Current status of the function call execution + */ + status?: string; + } + + /** + * File search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * List of search queries executed + */ + queries: Array; + + /** + * Current status of the file search operation + */ + status: string; + + /** + * Tool call type identifier, always "file_search_call" + */ + type: 'file_search_call'; + + /** + * (Optional) Search results returned by the file search operation + */ + results?: Array; + } + + export namespace OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Search results returned by the file search operation. + */ + export interface Result { + /** + * (Optional) Key-value attributes associated with the file + */ + attributes: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Unique identifier of the file containing the result + */ + file_id: string; + + /** + * Name of the file containing the result + */ + filename: string; + + /** + * Relevance score for this search result (between 0 and 1) + */ + score: number; + + /** + * Text content of the search result + */ + text: string; + } + } + + /** + * Web search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageWebSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * Current status of the web search operation + */ + status: string; + + /** + * Tool call type identifier, always "web_search_call" + */ + type: 'web_search_call'; + } + + /** + * Model Context Protocol (MCP) call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageMcpCall { + /** + * Unique identifier for this MCP call + */ + id: string; + + /** + * JSON string containing the MCP call arguments + */ + arguments: string; + + /** + * Name of the MCP method being called + */ + name: string; + + /** + * Label identifying the MCP server handling the call + */ + server_label: string; + + /** + * Tool call type identifier, always "mcp_call" + */ + type: 'mcp_call'; + + /** + * (Optional) Error message if the MCP call failed + */ + error?: string; + + /** + * (Optional) Output result from the successful MCP call + */ + output?: string; + } + + /** + * MCP list tools output message containing available tools from an MCP server. + */ + export interface OpenAIResponseOutputMessageMcpListTools { + /** + * Unique identifier for this MCP list tools operation + */ + id: string; + + /** + * Label identifying the MCP server providing the tools + */ + server_label: string; + + /** + * List of available tools provided by the MCP server + */ + tools: Array; + + /** + * Tool call type identifier, always "mcp_list_tools" + */ + type: 'mcp_list_tools'; + } + + export namespace OpenAIResponseOutputMessageMcpListTools { + /** + * Tool definition returned by MCP list tools operation. + */ + export interface Tool { + /** + * JSON schema defining the tool's input parameters + */ + input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Name of the tool + */ + name: string; + + /** + * (Optional) Description of what the tool does + */ + description?: string; + } + } +} + +export interface ItemListParams { + /** + * An item ID to list items after, used in pagination. + */ + after: string | unknown; + + /** + * Specify additional output data to include in the response. + */ + include: + | Array< + | 'code_interpreter_call.outputs' + | 'computer_call_output.output.image_url' + | 'file_search_call.results' + | 'message.input_image.image_url' + | 'message.output_text.logprobs' + | 'reasoning.encrypted_content' + > + | unknown; + + /** + * A limit on the number of objects to be returned (1-100, default 20). + */ + limit: number | unknown; + + /** + * The order to return items in (asc or desc, default desc). + */ + order: 'asc' | 'desc' | unknown; +} + +export declare namespace Items { + export { + type ItemCreateResponse as ItemCreateResponse, + type ItemListResponse as ItemListResponse, + type ItemGetResponse as ItemGetResponse, + type ItemCreateParams as ItemCreateParams, + type ItemListParams as ItemListParams, + }; +} diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index ff5d371..f07ff14 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -5,8 +5,8 @@ import * as Core from '../core'; export class Embeddings extends APIResource { /** - * Generate OpenAI-compatible embeddings for the given input using the specified - * model. + * Create embeddings. Generate OpenAI-compatible embeddings for the given input + * using the specified model. */ create( body: EmbeddingCreateParams, diff --git a/src/resources/files.ts b/src/resources/files.ts index 077487a..e59026e 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -7,8 +7,9 @@ import { OpenAICursorPage, type OpenAICursorPageParams } from '../pagination'; export class Files extends APIResource { /** - * Upload a file that can be used across various endpoints. The file upload should - * be a multipart form request with: + * Upload file. Upload a file that can be used across various endpoints. + * + * The file upload should be a multipart form request with: * * - file: The File object (not file name) to be uploaded. * - purpose: The intended purpose of the uploaded file. @@ -19,14 +20,14 @@ export class Files extends APIResource { } /** - * Returns information about a specific file. + * Retrieve file. Returns information about a specific file. */ retrieve(fileId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/v1/files/${fileId}`, options); } /** - * Returns a list of files that belong to the user's organization. + * List files. Returns a list of files that belong to the user's organization. */ list(query?: FileListParams, options?: Core.RequestOptions): Core.PagePromise; list(options?: Core.RequestOptions): Core.PagePromise; @@ -41,14 +42,14 @@ export class Files extends APIResource { } /** - * Delete a file. + * Delete file. */ delete(fileId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.delete(`/v1/files/${fileId}`, options); } /** - * Returns the contents of the specified file. + * Retrieve file content. Returns the contents of the specified file. */ content(fileId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/v1/files/${fileId}/content`, options); diff --git a/src/resources/index.ts b/src/resources/index.ts index 5d5b2f7..53ab7ad 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -17,6 +17,13 @@ export { type CompletionCreateParamsNonStreaming, type CompletionCreateParamsStreaming, } from './completions'; +export { + Conversations, + type ConversationObject, + type ConversationDeleteResponse, + type ConversationCreateParams, + type ConversationUpdateParams, +} from './conversations/conversations'; export { Datasets, type ListDatasetsResponse, diff --git a/src/resources/inspect.ts b/src/resources/inspect.ts index 4e5d87c..0c10896 100644 --- a/src/resources/inspect.ts +++ b/src/resources/inspect.ts @@ -5,14 +5,14 @@ import * as Core from '../core'; export class Inspect extends APIResource { /** - * Get the current health status of the service. + * Get health status. Get the current health status of the service. */ health(options?: Core.RequestOptions): Core.APIPromise { return this._client.get('/v1/health', options); } /** - * Get the version of the service. + * Get version. Get the version of the service. */ version(options?: Core.RequestOptions): Core.APIPromise { return this._client.get('/v1/version', options); diff --git a/src/resources/models/models.ts b/src/resources/models/models.ts index 1919365..041245a 100644 --- a/src/resources/models/models.ts +++ b/src/resources/models/models.ts @@ -9,7 +9,7 @@ export class Models extends APIResource { openai: OpenAIAPI.OpenAI = new OpenAIAPI.OpenAI(this._client); /** - * Get a model by its identifier. + * Get model. Get a model by its identifier. */ retrieve(modelId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/v1/models/${modelId}`, options); @@ -25,14 +25,14 @@ export class Models extends APIResource { } /** - * Register a model. + * Register model. Register a model. */ register(body: ModelRegisterParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/v1/models', { body, ...options }); } /** - * Unregister a model. + * Unregister model. Unregister a model. */ unregister(modelId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.delete(`/v1/models/${modelId}`, { diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts index b824f10..be12766 100644 --- a/src/resources/moderations.ts +++ b/src/resources/moderations.ts @@ -5,7 +5,8 @@ import * as Core from '../core'; export class Moderations extends APIResource { /** - * Classifies if text and/or image inputs are potentially harmful. + * Create moderation. Classifies if text and/or image inputs are potentially + * harmful. */ create(body: ModerationCreateParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/v1/moderations', { body, ...options }); diff --git a/src/resources/providers.ts b/src/resources/providers.ts index d27b9ab..2736f37 100644 --- a/src/resources/providers.ts +++ b/src/resources/providers.ts @@ -6,14 +6,14 @@ import * as InspectAPI from './inspect'; export class Providers extends APIResource { /** - * Get detailed information about a specific provider. + * Get provider. Get detailed information about a specific provider. */ retrieve(providerId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/v1/providers/${providerId}`, options); } /** - * List all available providers. + * List providers. List all available providers. */ list(options?: Core.RequestOptions): Core.APIPromise { return ( diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts index 398022b..c8c672b 100644 --- a/src/resources/responses/input-items.ts +++ b/src/resources/responses/input-items.ts @@ -6,7 +6,7 @@ import * as Core from '../../core'; export class InputItems extends APIResource { /** - * List input items for a given OpenAI response. + * List input items. */ list( responseId: string, diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 05fe120..6356449 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -14,7 +14,7 @@ export class Responses extends APIResource { inputItems: InputItemsAPI.InputItems = new InputItemsAPI.InputItems(this._client); /** - * Create a new OpenAI response. + * Create a model response. */ create(body: ResponseCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise; create( @@ -35,14 +35,14 @@ export class Responses extends APIResource { } /** - * Retrieve an OpenAI response by its ID. + * Get a model response. */ retrieve(responseId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/v1/responses/${responseId}`, options); } /** - * List all OpenAI responses. + * List all responses. */ list( query?: ResponseListParams, @@ -65,7 +65,7 @@ export class Responses extends APIResource { } /** - * Delete an OpenAI response by its ID. + * Delete a response. */ delete(responseId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.delete(`/v1/responses/${responseId}`, options); @@ -141,6 +141,16 @@ export interface ResponseObject { */ temperature?: number; + /** + * (Optional) An array of tools the model may call while generating a response. + */ + tools?: Array< + | ResponseObject.OpenAIResponseInputToolWebSearch + | ResponseObject.OpenAIResponseInputToolFileSearch + | ResponseObject.OpenAIResponseInputToolFunction + | ResponseObject.OpenAIResponseToolMcp + >; + /** * (Optional) Nucleus sampling parameter used for generation */ @@ -150,6 +160,11 @@ export interface ResponseObject { * (Optional) Truncation strategy applied to the response */ truncation?: string; + + /** + * (Optional) Token usage information for the response + */ + usage?: ResponseObject.Usage; } export namespace ResponseObject { @@ -582,6 +597,182 @@ export namespace ResponseObject { */ message: string; } + + /** + * Web search tool configuration for OpenAI response inputs. + */ + export interface OpenAIResponseInputToolWebSearch { + /** + * Web search tool type variant to use + */ + type: 'web_search' | 'web_search_preview' | 'web_search_preview_2025_03_11'; + + /** + * (Optional) Size of search context, must be "low", "medium", or "high" + */ + search_context_size?: string; + } + + /** + * File search tool configuration for OpenAI response inputs. + */ + export interface OpenAIResponseInputToolFileSearch { + /** + * Tool type identifier, always "file_search" + */ + type: 'file_search'; + + /** + * List of vector store identifiers to search within + */ + vector_store_ids: Array; + + /** + * (Optional) Additional filters to apply to the search + */ + filters?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * (Optional) Maximum number of search results to return (1-50) + */ + max_num_results?: number; + + /** + * (Optional) Options for ranking and scoring search results + */ + ranking_options?: OpenAIResponseInputToolFileSearch.RankingOptions; + } + + export namespace OpenAIResponseInputToolFileSearch { + /** + * (Optional) Options for ranking and scoring search results + */ + export interface RankingOptions { + /** + * (Optional) Name of the ranking algorithm to use + */ + ranker?: string; + + /** + * (Optional) Minimum relevance score threshold for results + */ + score_threshold?: number; + } + } + + /** + * Function tool configuration for OpenAI response inputs. + */ + export interface OpenAIResponseInputToolFunction { + /** + * Name of the function that can be called + */ + name: string; + + /** + * Tool type identifier, always "function" + */ + type: 'function'; + + /** + * (Optional) Description of what the function does + */ + description?: string; + + /** + * (Optional) JSON schema defining the function's parameters + */ + parameters?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * (Optional) Whether to enforce strict parameter validation + */ + strict?: boolean; + } + + /** + * Model Context Protocol (MCP) tool configuration for OpenAI response object. + */ + export interface OpenAIResponseToolMcp { + /** + * Label to identify this MCP server + */ + server_label: string; + + /** + * Tool type identifier, always "mcp" + */ + type: 'mcp'; + + /** + * (Optional) Restriction on which tools can be used from this server + */ + allowed_tools?: Array | OpenAIResponseToolMcp.AllowedToolsFilter; + } + + export namespace OpenAIResponseToolMcp { + /** + * Filter configuration for restricting which MCP tools can be used. + */ + export interface AllowedToolsFilter { + /** + * (Optional) List of specific tool names that are allowed + */ + tool_names?: Array; + } + } + + /** + * (Optional) Token usage information for the response + */ + export interface Usage { + /** + * Number of tokens in the input + */ + input_tokens: number; + + /** + * Number of tokens in the output + */ + output_tokens: number; + + /** + * Total tokens used (input + output) + */ + total_tokens: number; + + /** + * Detailed breakdown of input token usage + */ + input_tokens_details?: Usage.InputTokensDetails; + + /** + * Detailed breakdown of output token usage + */ + output_tokens_details?: Usage.OutputTokensDetails; + } + + export namespace Usage { + /** + * Detailed breakdown of input token usage + */ + export interface InputTokensDetails { + /** + * Number of tokens retrieved from cache + */ + cached_tokens?: number; + } + + /** + * Detailed breakdown of output token usage + */ + export interface OutputTokensDetails { + /** + * Number of tokens used for reasoning (o1/o3 models) + */ + reasoning_tokens?: number; + } + } } /** @@ -589,6 +780,7 @@ export namespace ResponseObject { */ export type ResponseObjectStream = | ResponseObjectStream.OpenAIResponseObjectStreamResponseCreated + | ResponseObjectStream.OpenAIResponseObjectStreamResponseInProgress | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputItemAdded | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputItemDone | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputTextDelta @@ -608,6 +800,8 @@ export type ResponseObjectStream = | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallCompleted | ResponseObjectStream.OpenAIResponseObjectStreamResponseContentPartAdded | ResponseObjectStream.OpenAIResponseObjectStreamResponseContentPartDone + | ResponseObjectStream.OpenAIResponseObjectStreamResponseIncomplete + | ResponseObjectStream.OpenAIResponseObjectStreamResponseFailed | ResponseObjectStream.OpenAIResponseObjectStreamResponseCompleted; export namespace ResponseObjectStream { @@ -616,7 +810,7 @@ export namespace ResponseObjectStream { */ export interface OpenAIResponseObjectStreamResponseCreated { /** - * The newly created response object + * The response object that was created */ response: ResponsesAPI.ResponseObject; @@ -626,6 +820,26 @@ export namespace ResponseObjectStream { type: 'response.created'; } + /** + * Streaming event indicating the response remains in progress. + */ + export interface OpenAIResponseObjectStreamResponseInProgress { + /** + * Current response state while in progress + */ + response: ResponsesAPI.ResponseObject; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Event type identifier, always "response.in_progress" + */ + type: 'response.in_progress'; + } + /** * Streaming event for when a new output item is added to the response. */ @@ -1737,17 +1951,28 @@ export namespace ResponseObjectStream { * Streaming event for when a new content part is added to a response item. */ export interface OpenAIResponseObjectStreamResponseContentPartAdded { + /** + * Index position of the part within the content array + */ + content_index: number; + /** * Unique identifier of the output item containing this content part */ item_id: string; + /** + * Index position of the output item in the response + */ + output_index: number; + /** * The content part that was added */ part: | OpenAIResponseObjectStreamResponseContentPartAdded.OpenAIResponseContentPartOutputText - | OpenAIResponseObjectStreamResponseContentPartAdded.OpenAIResponseContentPartRefusal; + | OpenAIResponseObjectStreamResponseContentPartAdded.OpenAIResponseContentPartRefusal + | OpenAIResponseObjectStreamResponseContentPartAdded.OpenAIResponseContentPartReasoningText; /** * Unique identifier of the response containing this content @@ -1766,63 +1991,368 @@ export namespace ResponseObjectStream { } export namespace OpenAIResponseObjectStreamResponseContentPartAdded { + /** + * Text content within a streamed response part. + */ export interface OpenAIResponseContentPartOutputText { + /** + * Structured annotations associated with the text + */ + annotations: Array< + | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationFileCitation + | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationCitation + | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationContainerFileCitation + | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationFilePath + >; + + /** + * Text emitted for this content part + */ text: string; + /** + * Content part type identifier, always "output_text" + */ type: 'output_text'; - } - - export interface OpenAIResponseContentPartRefusal { - refusal: string; - type: 'refusal'; + /** + * (Optional) Token log probability details + */ + logprobs?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; } - } - /** - * Streaming event for when a content part is completed. - */ - export interface OpenAIResponseObjectStreamResponseContentPartDone { - /** - * Unique identifier of the output item containing this content part - */ - item_id: string; + export namespace OpenAIResponseContentPartOutputText { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { + /** + * Unique identifier of the referenced file + */ + file_id: string; - /** - * The completed content part - */ - part: - | OpenAIResponseObjectStreamResponseContentPartDone.OpenAIResponseContentPartOutputText - | OpenAIResponseObjectStreamResponseContentPartDone.OpenAIResponseContentPartRefusal; + /** + * Name of the referenced file + */ + filename: string; - /** - * Unique identifier of the response containing this content - */ - response_id: string; + /** + * Position index of the citation within the content + */ + index: number; - /** - * Sequential number for ordering streaming events - */ - sequence_number: number; + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; + } - /** - * Event type identifier, always "response.content_part.done" - */ - type: 'response.content_part.done'; - } + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; + + /** + * Start position of the citation span in the content + */ + start_index: number; + + /** + * Title of the referenced web resource + */ + title: string; + + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; + + /** + * URL of the referenced web resource + */ + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + + /** + * Refusal content within a streamed response part. + */ + export interface OpenAIResponseContentPartRefusal { + /** + * Refusal text supplied by the model + */ + refusal: string; + + /** + * Content part type identifier, always "refusal" + */ + type: 'refusal'; + } + + /** + * Reasoning text emitted as part of a streamed response. + */ + export interface OpenAIResponseContentPartReasoningText { + /** + * Reasoning text supplied by the model + */ + text: string; + + /** + * Content part type identifier, always "reasoning_text" + */ + type: 'reasoning_text'; + } + } + + /** + * Streaming event for when a content part is completed. + */ + export interface OpenAIResponseObjectStreamResponseContentPartDone { + /** + * Index position of the part within the content array + */ + content_index: number; + + /** + * Unique identifier of the output item containing this content part + */ + item_id: string; + + /** + * Index position of the output item in the response + */ + output_index: number; + + /** + * The completed content part + */ + part: + | OpenAIResponseObjectStreamResponseContentPartDone.OpenAIResponseContentPartOutputText + | OpenAIResponseObjectStreamResponseContentPartDone.OpenAIResponseContentPartRefusal + | OpenAIResponseObjectStreamResponseContentPartDone.OpenAIResponseContentPartReasoningText; + + /** + * Unique identifier of the response containing this content + */ + response_id: string; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Event type identifier, always "response.content_part.done" + */ + type: 'response.content_part.done'; + } export namespace OpenAIResponseObjectStreamResponseContentPartDone { + /** + * Text content within a streamed response part. + */ export interface OpenAIResponseContentPartOutputText { + /** + * Structured annotations associated with the text + */ + annotations: Array< + | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationFileCitation + | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationCitation + | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationContainerFileCitation + | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationFilePath + >; + + /** + * Text emitted for this content part + */ text: string; + /** + * Content part type identifier, always "output_text" + */ type: 'output_text'; + + /** + * (Optional) Token log probability details + */ + logprobs?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; } + export namespace OpenAIResponseContentPartOutputText { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { + /** + * Unique identifier of the referenced file + */ + file_id: string; + + /** + * Name of the referenced file + */ + filename: string; + + /** + * Position index of the citation within the content + */ + index: number; + + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; + } + + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; + + /** + * Start position of the citation span in the content + */ + start_index: number; + + /** + * Title of the referenced web resource + */ + title: string; + + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; + + /** + * URL of the referenced web resource + */ + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + + /** + * Refusal content within a streamed response part. + */ export interface OpenAIResponseContentPartRefusal { + /** + * Refusal text supplied by the model + */ refusal: string; + /** + * Content part type identifier, always "refusal" + */ type: 'refusal'; } + + /** + * Reasoning text emitted as part of a streamed response. + */ + export interface OpenAIResponseContentPartReasoningText { + /** + * Reasoning text supplied by the model + */ + text: string; + + /** + * Content part type identifier, always "reasoning_text" + */ + type: 'reasoning_text'; + } + } + + /** + * Streaming event emitted when a response ends in an incomplete state. + */ + export interface OpenAIResponseObjectStreamResponseIncomplete { + /** + * Response object describing the incomplete state + */ + response: ResponsesAPI.ResponseObject; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Event type identifier, always "response.incomplete" + */ + type: 'response.incomplete'; + } + + /** + * Streaming event emitted when a response fails. + */ + export interface OpenAIResponseObjectStreamResponseFailed { + /** + * Response object describing the failure + */ + response: ResponsesAPI.ResponseObject; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Event type identifier, always "response.failed" + */ + type: 'response.failed'; } /** @@ -1830,7 +2360,7 @@ export namespace ResponseObjectStream { */ export interface OpenAIResponseObjectStreamResponseCompleted { /** - * The completed response object + * Completed response object */ response: ResponsesAPI.ResponseObject; @@ -1921,6 +2451,16 @@ export interface ResponseListResponse { */ temperature?: number; + /** + * (Optional) An array of tools the model may call while generating a response. + */ + tools?: Array< + | ResponseListResponse.OpenAIResponseInputToolWebSearch + | ResponseListResponse.OpenAIResponseInputToolFileSearch + | ResponseListResponse.OpenAIResponseInputToolFunction + | ResponseListResponse.OpenAIResponseToolMcp + >; + /** * (Optional) Nucleus sampling parameter used for generation */ @@ -1930,6 +2470,11 @@ export interface ResponseListResponse { * (Optional) Truncation strategy applied to the response */ truncation?: string; + + /** + * (Optional) Token usage information for the response + */ + usage?: ResponseListResponse.Usage; } export namespace ResponseListResponse { @@ -2677,6 +3222,182 @@ export namespace ResponseListResponse { */ message: string; } + + /** + * Web search tool configuration for OpenAI response inputs. + */ + export interface OpenAIResponseInputToolWebSearch { + /** + * Web search tool type variant to use + */ + type: 'web_search' | 'web_search_preview' | 'web_search_preview_2025_03_11'; + + /** + * (Optional) Size of search context, must be "low", "medium", or "high" + */ + search_context_size?: string; + } + + /** + * File search tool configuration for OpenAI response inputs. + */ + export interface OpenAIResponseInputToolFileSearch { + /** + * Tool type identifier, always "file_search" + */ + type: 'file_search'; + + /** + * List of vector store identifiers to search within + */ + vector_store_ids: Array; + + /** + * (Optional) Additional filters to apply to the search + */ + filters?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * (Optional) Maximum number of search results to return (1-50) + */ + max_num_results?: number; + + /** + * (Optional) Options for ranking and scoring search results + */ + ranking_options?: OpenAIResponseInputToolFileSearch.RankingOptions; + } + + export namespace OpenAIResponseInputToolFileSearch { + /** + * (Optional) Options for ranking and scoring search results + */ + export interface RankingOptions { + /** + * (Optional) Name of the ranking algorithm to use + */ + ranker?: string; + + /** + * (Optional) Minimum relevance score threshold for results + */ + score_threshold?: number; + } + } + + /** + * Function tool configuration for OpenAI response inputs. + */ + export interface OpenAIResponseInputToolFunction { + /** + * Name of the function that can be called + */ + name: string; + + /** + * Tool type identifier, always "function" + */ + type: 'function'; + + /** + * (Optional) Description of what the function does + */ + description?: string; + + /** + * (Optional) JSON schema defining the function's parameters + */ + parameters?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * (Optional) Whether to enforce strict parameter validation + */ + strict?: boolean; + } + + /** + * Model Context Protocol (MCP) tool configuration for OpenAI response object. + */ + export interface OpenAIResponseToolMcp { + /** + * Label to identify this MCP server + */ + server_label: string; + + /** + * Tool type identifier, always "mcp" + */ + type: 'mcp'; + + /** + * (Optional) Restriction on which tools can be used from this server + */ + allowed_tools?: Array | OpenAIResponseToolMcp.AllowedToolsFilter; + } + + export namespace OpenAIResponseToolMcp { + /** + * Filter configuration for restricting which MCP tools can be used. + */ + export interface AllowedToolsFilter { + /** + * (Optional) List of specific tool names that are allowed + */ + tool_names?: Array; + } + } + + /** + * (Optional) Token usage information for the response + */ + export interface Usage { + /** + * Number of tokens in the input + */ + input_tokens: number; + + /** + * Number of tokens in the output + */ + output_tokens: number; + + /** + * Total tokens used (input + output) + */ + total_tokens: number; + + /** + * Detailed breakdown of input token usage + */ + input_tokens_details?: Usage.InputTokensDetails; + + /** + * Detailed breakdown of output token usage + */ + output_tokens_details?: Usage.OutputTokensDetails; + } + + export namespace Usage { + /** + * Detailed breakdown of input token usage + */ + export interface InputTokensDetails { + /** + * Number of tokens retrieved from cache + */ + cached_tokens?: number; + } + + /** + * Detailed breakdown of output token usage + */ + export interface OutputTokensDetails { + /** + * Number of tokens used for reasoning (o1/o3 models) + */ + reasoning_tokens?: number; + } + } } /** @@ -2722,6 +3443,13 @@ export interface ResponseCreateParamsBase { */ model: string; + /** + * (Optional) The ID of a conversation to add the response to. Must begin with + * 'conv\_'. Input and output messages will be automatically added to the + * conversation. + */ + conversation?: string; + /** * (Optional) Additional fields to include in the response. */ diff --git a/src/resources/routes.ts b/src/resources/routes.ts index 98d5dfe..85e8496 100644 --- a/src/resources/routes.ts +++ b/src/resources/routes.ts @@ -6,7 +6,8 @@ import * as InspectAPI from './inspect'; export class Routes extends APIResource { /** - * List all available API routes with their methods and implementing providers. + * List routes. List all available API routes with their methods and implementing + * providers. */ list(options?: Core.RequestOptions): Core.APIPromise { return ( diff --git a/src/resources/safety.ts b/src/resources/safety.ts index d41b2c7..febaf83 100644 --- a/src/resources/safety.ts +++ b/src/resources/safety.ts @@ -6,7 +6,7 @@ import * as Shared from './shared'; export class Safety extends APIResource { /** - * Run a shield. + * Run shield. Run a shield. */ runShield(body: SafetyRunShieldParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/v1/safety/run-shield', { body, ...options }); diff --git a/tests/api-resources/conversations/conversations.test.ts b/tests/api-resources/conversations/conversations.test.ts new file mode 100644 index 0000000..e13a9e4 --- /dev/null +++ b/tests/api-resources/conversations/conversations.test.ts @@ -0,0 +1,70 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; + +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); + +describe('resource conversations', () => { + test('create', async () => { + const responsePromise = client.conversations.create({}); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve', async () => { + const responsePromise = client.conversations.retrieve('conversation_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.conversations.retrieve('conversation_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('update: only required params', async () => { + const responsePromise = client.conversations.update('conversation_id', { metadata: { foo: 'string' } }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('update: required and optional params', async () => { + const response = await client.conversations.update('conversation_id', { metadata: { foo: 'string' } }); + }); + + test('delete', async () => { + const responsePromise = client.conversations.delete('conversation_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('delete: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.conversations.delete('conversation_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); +}); diff --git a/tests/api-resources/conversations/items.test.ts b/tests/api-resources/conversations/items.test.ts new file mode 100644 index 0000000..91c2488 --- /dev/null +++ b/tests/api-resources/conversations/items.test.ts @@ -0,0 +1,70 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import LlamaStackClient from 'llama-stack-client'; +import { Response } from 'node-fetch'; + +const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); + +describe('resource items', () => { + test('create: only required params', async () => { + const responsePromise = client.conversations.items.create('conversation_id', { + items: [{ content: 'string', role: 'system', type: 'message' }], + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.conversations.items.create('conversation_id', { + items: [{ content: 'string', role: 'system', type: 'message', id: 'id', status: 'status' }], + }); + }); + + test('list: only required params', async () => { + const responsePromise = client.conversations.items.list('conversation_id', { + after: 'string', + include: ['code_interpreter_call.outputs'], + limit: 0, + order: 'asc', + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: required and optional params', async () => { + const response = await client.conversations.items.list('conversation_id', { + after: 'string', + include: ['code_interpreter_call.outputs'], + limit: 0, + order: 'asc', + }); + }); + + test('get', async () => { + const responsePromise = client.conversations.items.get('conversation_id', 'item_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('get: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.conversations.items.get('conversation_id', 'item_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); +}); diff --git a/tests/api-resources/responses/responses.test.ts b/tests/api-resources/responses/responses.test.ts index f1142d8..0a4bab6 100644 --- a/tests/api-resources/responses/responses.test.ts +++ b/tests/api-resources/responses/responses.test.ts @@ -21,6 +21,7 @@ describe('resource responses', () => { const response = await client.responses.create({ input: 'string', model: 'model', + conversation: 'conversation', include: ['string'], instructions: 'instructions', max_infer_iters: 0, From b521df178374fd7548d2425c4d0610680469ca75 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 10 Oct 2025 17:08:06 +0000 Subject: [PATCH 24/26] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 9a8764c..721d577 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 115 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-8a12a05ba6892999ac506f69d5cbbc7218f28ee1a11bf8e0e548c603435bb643.yml openapi_spec_hash: 871ce212a98bdad4a44ec7fbf58d9fcb -config_hash: 85d9db5422f2cf897267c0e4825ce1bf +config_hash: 4c1ba9dc45c31189cd1b039d003a3544 From 19535c27147bf6f6861b807d9eeee471b5625148 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 13 Oct 2025 01:51:36 +0000 Subject: [PATCH 25/26] feat(api): updates to vector_store, etc. --- .stats.yml | 8 +- api.md | 16 - src/index.ts | 19 - src/resources/chat/chat.ts | 5 + src/resources/completions.ts | 4 - src/resources/index.ts | 8 - src/resources/responses/responses.ts | 524 +++++++++++++++++++ src/resources/safety.ts | 300 ++++++++++- src/resources/vector-dbs.ts | 185 ------- src/resources/vector-stores/file-batches.ts | 9 +- src/resources/vector-stores/vector-stores.ts | 30 +- tests/api-resources/completions.test.ts | 2 - tests/api-resources/safety.test.ts | 2 +- tests/api-resources/vector-dbs.test.ts | 87 --- 14 files changed, 845 insertions(+), 354 deletions(-) delete mode 100644 src/resources/vector-dbs.ts delete mode 100644 tests/api-resources/vector-dbs.test.ts diff --git a/.stats.yml b/.stats.yml index 721d577..bf6aaff 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 115 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-8a12a05ba6892999ac506f69d5cbbc7218f28ee1a11bf8e0e548c603435bb643.yml -openapi_spec_hash: 871ce212a98bdad4a44ec7fbf58d9fcb -config_hash: 4c1ba9dc45c31189cd1b039d003a3544 +configured_endpoints: 111 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-15a929a0b71de779accc56bd09d1e5f580e216affdb408cf9890bc7a37847e9e.yml +openapi_spec_hash: 5db9f7c7e80427cfa0298cbb01689559 +config_hash: 06758df5c4f261f9c97eafcef7e0028f diff --git a/api.md b/api.md index 1fad22c..fc7c09e 100644 --- a/api.md +++ b/api.md @@ -204,22 +204,6 @@ Methods: - client.vectorIo.insert({ ...params }) -> void - client.vectorIo.query({ ...params }) -> QueryChunksResponse -# VectorDBs - -Types: - -- ListVectorDBsResponse -- VectorDBRetrieveResponse -- VectorDBListResponse -- VectorDBRegisterResponse - -Methods: - -- client.vectorDBs.retrieve(vectorDBId) -> VectorDBRetrieveResponse -- client.vectorDBs.list() -> VectorDBListResponse -- client.vectorDBs.register({ ...params }) -> VectorDBRegisterResponse -- client.vectorDBs.unregister(vectorDBId) -> void - # VectorStores Types: diff --git a/src/index.ts b/src/index.ts index fbb5abf..bdc75ae 100644 --- a/src/index.ts +++ b/src/index.ts @@ -107,14 +107,6 @@ import { Toolgroups, } from './resources/toolgroups'; import { ToolListParams, ToolListResponse, Tools } from './resources/tools'; -import { - ListVectorDBsResponse, - VectorDBListResponse, - VectorDBRegisterParams, - VectorDBRegisterResponse, - VectorDBRetrieveResponse, - VectorDBs, -} from './resources/vector-dbs'; import { QueryChunksResponse, VectorIo, @@ -291,7 +283,6 @@ export class LlamaStackClient extends Core.APIClient { chat: API.Chat = new API.Chat(this); completions: API.Completions = new API.Completions(this); vectorIo: API.VectorIo = new API.VectorIo(this); - vectorDBs: API.VectorDBs = new API.VectorDBs(this); vectorStores: API.VectorStores = new API.VectorStores(this); models: API.Models = new API.Models(this); providers: API.Providers = new API.Providers(this); @@ -369,7 +360,6 @@ LlamaStackClient.Embeddings = Embeddings; LlamaStackClient.Chat = Chat; LlamaStackClient.Completions = Completions; LlamaStackClient.VectorIo = VectorIo; -LlamaStackClient.VectorDBs = VectorDBs; LlamaStackClient.VectorStores = VectorStores; LlamaStackClient.VectorStoresOpenAICursorPage = VectorStoresOpenAICursorPage; LlamaStackClient.Models = Models; @@ -485,15 +475,6 @@ export declare namespace LlamaStackClient { type VectorIoQueryParams as VectorIoQueryParams, }; - export { - VectorDBs as VectorDBs, - type ListVectorDBsResponse as ListVectorDBsResponse, - type VectorDBRetrieveResponse as VectorDBRetrieveResponse, - type VectorDBListResponse as VectorDBListResponse, - type VectorDBRegisterResponse as VectorDBRegisterResponse, - type VectorDBRegisterParams as VectorDBRegisterParams, - }; - export { VectorStores as VectorStores, type ListVectorStoresResponse as ListVectorStoresResponse, diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 47bfc7a..cc0e0eb 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -89,6 +89,11 @@ export namespace ChatCompletionChunk { */ content?: string; + /** + * (Optional) The reasoning content from the model (non-standard, for o1/o3 models) + */ + reasoning_content?: string; + /** * (Optional) The refusal of the delta */ diff --git a/src/resources/completions.ts b/src/resources/completions.ts index da2d7f3..fe49a25 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -172,8 +172,6 @@ export interface CompletionCreateParamsBase { */ frequency_penalty?: number; - guided_choice?: Array; - /** * (Optional) The logit bias to use. */ @@ -199,8 +197,6 @@ export interface CompletionCreateParamsBase { */ presence_penalty?: number; - prompt_logprobs?: number; - /** * (Optional) The seed to use. */ diff --git a/src/resources/index.ts b/src/resources/index.ts index 53ab7ad..5003b67 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -131,14 +131,6 @@ export { type ToolgroupRegisterParams, } from './toolgroups'; export { Tools, type ToolListResponse, type ToolListParams } from './tools'; -export { - VectorDBs, - type ListVectorDBsResponse, - type VectorDBRetrieveResponse, - type VectorDBListResponse, - type VectorDBRegisterResponse, - type VectorDBRegisterParams, -} from './vector-dbs'; export { VectorIo, type QueryChunksResponse, diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 6356449..f785bf5 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -800,6 +800,18 @@ export type ResponseObjectStream = | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallCompleted | ResponseObjectStream.OpenAIResponseObjectStreamResponseContentPartAdded | ResponseObjectStream.OpenAIResponseObjectStreamResponseContentPartDone + | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningTextDelta + | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningTextDone + | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded + | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningSummaryPartDone + | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta + | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningSummaryTextDone + | ResponseObjectStream.OpenAIResponseObjectStreamResponseRefusalDelta + | ResponseObjectStream.OpenAIResponseObjectStreamResponseRefusalDone + | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded + | ResponseObjectStream.OpenAIResponseObjectStreamResponseFileSearchCallInProgress + | ResponseObjectStream.OpenAIResponseObjectStreamResponseFileSearchCallSearching + | ResponseObjectStream.OpenAIResponseObjectStreamResponseFileSearchCallCompleted | ResponseObjectStream.OpenAIResponseObjectStreamResponseIncomplete | ResponseObjectStream.OpenAIResponseObjectStreamResponseFailed | ResponseObjectStream.OpenAIResponseObjectStreamResponseCompleted; @@ -2315,6 +2327,518 @@ export namespace ResponseObjectStream { } } + /** + * Streaming event for incremental reasoning text updates. + */ + export interface OpenAIResponseObjectStreamResponseReasoningTextDelta { + /** + * Index position of the reasoning content part + */ + content_index: number; + + /** + * Incremental reasoning text being added + */ + delta: string; + + /** + * Unique identifier of the output item being updated + */ + item_id: string; + + /** + * Index position of the item in the output list + */ + output_index: number; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Event type identifier, always "response.reasoning_text.delta" + */ + type: 'response.reasoning_text.delta'; + } + + /** + * Streaming event for when reasoning text is completed. + */ + export interface OpenAIResponseObjectStreamResponseReasoningTextDone { + /** + * Index position of the reasoning content part + */ + content_index: number; + + /** + * Unique identifier of the completed output item + */ + item_id: string; + + /** + * Index position of the item in the output list + */ + output_index: number; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Final complete reasoning text + */ + text: string; + + /** + * Event type identifier, always "response.reasoning_text.done" + */ + type: 'response.reasoning_text.done'; + } + + /** + * Streaming event for when a new reasoning summary part is added. + */ + export interface OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded { + /** + * Unique identifier of the output item + */ + item_id: string; + + /** + * Index position of the output item + */ + output_index: number; + + /** + * The summary part that was added + */ + part: OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded.Part; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Index of the summary part within the reasoning summary + */ + summary_index: number; + + /** + * Event type identifier, always "response.reasoning_summary_part.added" + */ + type: 'response.reasoning_summary_part.added'; + } + + export namespace OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded { + /** + * The summary part that was added + */ + export interface Part { + /** + * Summary text + */ + text: string; + + /** + * Content part type identifier, always "summary_text" + */ + type: 'summary_text'; + } + } + + /** + * Streaming event for when a reasoning summary part is completed. + */ + export interface OpenAIResponseObjectStreamResponseReasoningSummaryPartDone { + /** + * Unique identifier of the output item + */ + item_id: string; + + /** + * Index position of the output item + */ + output_index: number; + + /** + * The completed summary part + */ + part: OpenAIResponseObjectStreamResponseReasoningSummaryPartDone.Part; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Index of the summary part within the reasoning summary + */ + summary_index: number; + + /** + * Event type identifier, always "response.reasoning_summary_part.done" + */ + type: 'response.reasoning_summary_part.done'; + } + + export namespace OpenAIResponseObjectStreamResponseReasoningSummaryPartDone { + /** + * The completed summary part + */ + export interface Part { + /** + * Summary text + */ + text: string; + + /** + * Content part type identifier, always "summary_text" + */ + type: 'summary_text'; + } + } + + /** + * Streaming event for incremental reasoning summary text updates. + */ + export interface OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta { + /** + * Incremental summary text being added + */ + delta: string; + + /** + * Unique identifier of the output item + */ + item_id: string; + + /** + * Index position of the output item + */ + output_index: number; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Index of the summary part within the reasoning summary + */ + summary_index: number; + + /** + * Event type identifier, always "response.reasoning_summary_text.delta" + */ + type: 'response.reasoning_summary_text.delta'; + } + + /** + * Streaming event for when reasoning summary text is completed. + */ + export interface OpenAIResponseObjectStreamResponseReasoningSummaryTextDone { + /** + * Unique identifier of the output item + */ + item_id: string; + + /** + * Index position of the output item + */ + output_index: number; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Index of the summary part within the reasoning summary + */ + summary_index: number; + + /** + * Final complete summary text + */ + text: string; + + /** + * Event type identifier, always "response.reasoning_summary_text.done" + */ + type: 'response.reasoning_summary_text.done'; + } + + /** + * Streaming event for incremental refusal text updates. + */ + export interface OpenAIResponseObjectStreamResponseRefusalDelta { + /** + * Index position of the content part + */ + content_index: number; + + /** + * Incremental refusal text being added + */ + delta: string; + + /** + * Unique identifier of the output item + */ + item_id: string; + + /** + * Index position of the item in the output list + */ + output_index: number; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Event type identifier, always "response.refusal.delta" + */ + type: 'response.refusal.delta'; + } + + /** + * Streaming event for when refusal text is completed. + */ + export interface OpenAIResponseObjectStreamResponseRefusalDone { + /** + * Index position of the content part + */ + content_index: number; + + /** + * Unique identifier of the output item + */ + item_id: string; + + /** + * Index position of the item in the output list + */ + output_index: number; + + /** + * Final complete refusal text + */ + refusal: string; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Event type identifier, always "response.refusal.done" + */ + type: 'response.refusal.done'; + } + + /** + * Streaming event for when an annotation is added to output text. + */ + export interface OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded { + /** + * The annotation object being added + */ + annotation: + | OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded.OpenAIResponseAnnotationFileCitation + | OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded.OpenAIResponseAnnotationCitation + | OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded.OpenAIResponseAnnotationContainerFileCitation + | OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded.OpenAIResponseAnnotationFilePath; + + /** + * Index of the annotation within the content part + */ + annotation_index: number; + + /** + * Index position of the content part within the output item + */ + content_index: number; + + /** + * Unique identifier of the item to which the annotation is being added + */ + item_id: string; + + /** + * Index position of the output item in the response's output array + */ + output_index: number; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Event type identifier, always "response.output_text.annotation.added" + */ + type: 'response.output_text.annotation.added'; + } + + export namespace OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { + /** + * Unique identifier of the referenced file + */ + file_id: string; + + /** + * Name of the referenced file + */ + filename: string; + + /** + * Position index of the citation within the content + */ + index: number; + + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; + } + + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; + + /** + * Start position of the citation span in the content + */ + start_index: number; + + /** + * Title of the referenced web resource + */ + title: string; + + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; + + /** + * URL of the referenced web resource + */ + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + + /** + * Streaming event for file search calls in progress. + */ + export interface OpenAIResponseObjectStreamResponseFileSearchCallInProgress { + /** + * Unique identifier of the file search call + */ + item_id: string; + + /** + * Index position of the item in the output list + */ + output_index: number; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Event type identifier, always "response.file_search_call.in_progress" + */ + type: 'response.file_search_call.in_progress'; + } + + /** + * Streaming event for file search currently searching. + */ + export interface OpenAIResponseObjectStreamResponseFileSearchCallSearching { + /** + * Unique identifier of the file search call + */ + item_id: string; + + /** + * Index position of the item in the output list + */ + output_index: number; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Event type identifier, always "response.file_search_call.searching" + */ + type: 'response.file_search_call.searching'; + } + + /** + * Streaming event for completed file search calls. + */ + export interface OpenAIResponseObjectStreamResponseFileSearchCallCompleted { + /** + * Unique identifier of the completed file search call + */ + item_id: string; + + /** + * Index position of the item in the output list + */ + output_index: number; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Event type identifier, always "response.file_search_call.completed" + */ + type: 'response.file_search_call.completed'; + } + /** * Streaming event emitted when a response ends in an incomplete state. */ diff --git a/src/resources/safety.ts b/src/resources/safety.ts index febaf83..902aa14 100644 --- a/src/resources/safety.ts +++ b/src/resources/safety.ts @@ -27,7 +27,13 @@ export interface SafetyRunShieldParams { /** * The messages to run the shield on. */ - messages: Array; + messages: Array< + | SafetyRunShieldParams.OpenAIUserMessageParam + | SafetyRunShieldParams.OpenAISystemMessageParam + | SafetyRunShieldParams.OpenAIAssistantMessageParam + | SafetyRunShieldParams.OpenAIToolMessageParam + | SafetyRunShieldParams.OpenAIDeveloperMessageParam + >; /** * The parameters of the shield. @@ -40,6 +46,298 @@ export interface SafetyRunShieldParams { shield_id: string; } +export namespace SafetyRunShieldParams { + /** + * A message from the user in an OpenAI-compatible chat completion request. + */ + export interface OpenAIUserMessageParam { + /** + * The content of the message, which can include text and other media + */ + content: + | string + | Array< + | OpenAIUserMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIUserMessageParam.OpenAIChatCompletionContentPartImageParam + | OpenAIUserMessageParam.OpenAIFile + >; + + /** + * Must be "user" to identify this as a user message + */ + role: 'user'; + + /** + * (Optional) The name of the user message participant. + */ + name?: string; + } + + export namespace OpenAIUserMessageParam { + /** + * Text content part for OpenAI-compatible chat completion messages. + */ + export interface OpenAIChatCompletionContentPartTextParam { + /** + * The text content of the message + */ + text: string; + + /** + * Must be "text" to identify this as text content + */ + type: 'text'; + } + + /** + * Image content part for OpenAI-compatible chat completion messages. + */ + export interface OpenAIChatCompletionContentPartImageParam { + /** + * Image URL specification and processing details + */ + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + /** + * Must be "image_url" to identify this as image content + */ + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + /** + * Image URL specification and processing details + */ + export interface ImageURL { + /** + * URL of the image to include in the message + */ + url: string; + + /** + * (Optional) Level of detail for image processing. Can be "low", "high", or "auto" + */ + detail?: string; + } + } + + export interface OpenAIFile { + file: OpenAIFile.File; + + type: 'file'; + } + + export namespace OpenAIFile { + export interface File { + file_data?: string; + + file_id?: string; + + filename?: string; + } + } + } + + /** + * A system message providing instructions or context to the model. + */ + export interface OpenAISystemMessageParam { + /** + * The content of the "system prompt". If multiple system messages are provided, + * they are concatenated. The underlying Llama Stack code may also add other system + * messages (for example, for formatting tool definitions). + */ + content: string | Array; + + /** + * Must be "system" to identify this as a system message + */ + role: 'system'; + + /** + * (Optional) The name of the system message participant. + */ + name?: string; + } + + export namespace OpenAISystemMessageParam { + /** + * Text content part for OpenAI-compatible chat completion messages. + */ + export interface UnionMember1 { + /** + * The text content of the message + */ + text: string; + + /** + * Must be "text" to identify this as text content + */ + type: 'text'; + } + } + + /** + * A message containing the model's (assistant) response in an OpenAI-compatible + * chat completion request. + */ + export interface OpenAIAssistantMessageParam { + /** + * Must be "assistant" to identify this as the model's response + */ + role: 'assistant'; + + /** + * The content of the model's response + */ + content?: string | Array; + + /** + * (Optional) The name of the assistant message participant. + */ + name?: string; + + /** + * List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object. + */ + tool_calls?: Array; + } + + export namespace OpenAIAssistantMessageParam { + /** + * Text content part for OpenAI-compatible chat completion messages. + */ + export interface UnionMember1 { + /** + * The text content of the message + */ + text: string; + + /** + * Must be "text" to identify this as text content + */ + type: 'text'; + } + + /** + * Tool call specification for OpenAI-compatible chat completion responses. + */ + export interface ToolCall { + /** + * Must be "function" to identify this as a function call + */ + type: 'function'; + + /** + * (Optional) Unique identifier for the tool call + */ + id?: string; + + /** + * (Optional) Function call details + */ + function?: ToolCall.Function; + + /** + * (Optional) Index of the tool call in the list + */ + index?: number; + } + + export namespace ToolCall { + /** + * (Optional) Function call details + */ + export interface Function { + /** + * (Optional) Arguments to pass to the function as a JSON string + */ + arguments?: string; + + /** + * (Optional) Name of the function to call + */ + name?: string; + } + } + } + + /** + * A message representing the result of a tool invocation in an OpenAI-compatible + * chat completion request. + */ + export interface OpenAIToolMessageParam { + /** + * The response content from the tool + */ + content: string | Array; + + /** + * Must be "tool" to identify this as a tool response + */ + role: 'tool'; + + /** + * Unique identifier for the tool call this response is for + */ + tool_call_id: string; + } + + export namespace OpenAIToolMessageParam { + /** + * Text content part for OpenAI-compatible chat completion messages. + */ + export interface UnionMember1 { + /** + * The text content of the message + */ + text: string; + + /** + * Must be "text" to identify this as text content + */ + type: 'text'; + } + } + + /** + * A message from the developer in an OpenAI-compatible chat completion request. + */ + export interface OpenAIDeveloperMessageParam { + /** + * The content of the developer message + */ + content: string | Array; + + /** + * Must be "developer" to identify this as a developer message + */ + role: 'developer'; + + /** + * (Optional) The name of the developer message participant. + */ + name?: string; + } + + export namespace OpenAIDeveloperMessageParam { + /** + * Text content part for OpenAI-compatible chat completion messages. + */ + export interface UnionMember1 { + /** + * The text content of the message + */ + text: string; + + /** + * Must be "text" to identify this as text content + */ + type: 'text'; + } + } +} + export declare namespace Safety { export { type RunShieldResponse as RunShieldResponse, type SafetyRunShieldParams as SafetyRunShieldParams }; } diff --git a/src/resources/vector-dbs.ts b/src/resources/vector-dbs.ts deleted file mode 100644 index 3004227..0000000 --- a/src/resources/vector-dbs.ts +++ /dev/null @@ -1,185 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../resource'; -import * as Core from '../core'; - -export class VectorDBs extends APIResource { - /** - * Get a vector database by its identifier. - */ - retrieve(vectorDBId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/v1/vector-dbs/${vectorDBId}`, options); - } - - /** - * List all vector databases. - */ - list(options?: Core.RequestOptions): Core.APIPromise { - return ( - this._client.get('/v1/vector-dbs', options) as Core.APIPromise<{ data: VectorDBListResponse }> - )._thenUnwrap((obj) => obj.data); - } - - /** - * Register a vector database. - */ - register( - body: VectorDBRegisterParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.post('/v1/vector-dbs', { body, ...options }); - } - - /** - * Unregister a vector database. - */ - unregister(vectorDBId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.delete(`/v1/vector-dbs/${vectorDBId}`, { - ...options, - headers: { Accept: '*/*', ...options?.headers }, - }); - } -} - -/** - * Response from listing vector databases. - */ -export interface ListVectorDBsResponse { - /** - * List of vector databases - */ - data: VectorDBListResponse; -} - -/** - * Vector database resource for storing and querying vector embeddings. - */ -export interface VectorDBRetrieveResponse { - /** - * Dimension of the embedding vectors - */ - embedding_dimension: number; - - /** - * Name of the embedding model to use for vector generation - */ - embedding_model: string; - - identifier: string; - - provider_id: string; - - /** - * Type of resource, always 'vector_db' for vector databases - */ - type: 'vector_db'; - - provider_resource_id?: string; - - vector_db_name?: string; -} - -/** - * List of vector databases - */ -export type VectorDBListResponse = Array; - -export namespace VectorDBListResponse { - /** - * Vector database resource for storing and querying vector embeddings. - */ - export interface VectorDBListResponseItem { - /** - * Dimension of the embedding vectors - */ - embedding_dimension: number; - - /** - * Name of the embedding model to use for vector generation - */ - embedding_model: string; - - identifier: string; - - provider_id: string; - - /** - * Type of resource, always 'vector_db' for vector databases - */ - type: 'vector_db'; - - provider_resource_id?: string; - - vector_db_name?: string; - } -} - -/** - * Vector database resource for storing and querying vector embeddings. - */ -export interface VectorDBRegisterResponse { - /** - * Dimension of the embedding vectors - */ - embedding_dimension: number; - - /** - * Name of the embedding model to use for vector generation - */ - embedding_model: string; - - identifier: string; - - provider_id: string; - - /** - * Type of resource, always 'vector_db' for vector databases - */ - type: 'vector_db'; - - provider_resource_id?: string; - - vector_db_name?: string; -} - -export interface VectorDBRegisterParams { - /** - * The embedding model to use. - */ - embedding_model: string; - - /** - * The identifier of the vector database to register. - */ - vector_db_id: string; - - /** - * The dimension of the embedding model. - */ - embedding_dimension?: number; - - /** - * The identifier of the provider. - */ - provider_id?: string; - - /** - * The identifier of the vector database in the provider. - */ - provider_vector_db_id?: string; - - /** - * The name of the vector database. - */ - vector_db_name?: string; -} - -export declare namespace VectorDBs { - export { - type ListVectorDBsResponse as ListVectorDBsResponse, - type VectorDBRetrieveResponse as VectorDBRetrieveResponse, - type VectorDBListResponse as VectorDBListResponse, - type VectorDBRegisterResponse as VectorDBRegisterResponse, - type VectorDBRegisterParams as VectorDBRegisterParams, - }; -} diff --git a/src/resources/vector-stores/file-batches.ts b/src/resources/vector-stores/file-batches.ts index 54bce95..75085eb 100644 --- a/src/resources/vector-stores/file-batches.ts +++ b/src/resources/vector-stores/file-batches.ts @@ -9,7 +9,8 @@ import { type OpenAICursorPageParams } from '../../pagination'; export class FileBatches extends APIResource { /** - * Create a vector store file batch. + * Create a vector store file batch. Generate an OpenAI-compatible vector store + * file batch for the given vector store. */ create( vectorStoreId: string, @@ -171,17 +172,17 @@ export namespace VectorStoreFileBatches { export interface FileBatchCreateParams { /** - * A list of File IDs that the vector store should use. + * A list of File IDs that the vector store should use */ file_ids: Array; /** - * (Optional) Key-value attributes to store with the files. + * (Optional) Key-value attributes to store with the files */ attributes?: { [key: string]: boolean | number | string | Array | unknown | null }; /** - * (Optional) The chunking strategy used to chunk the file(s). Defaults to auto. + * (Optional) The chunking strategy used to chunk the file(s). Defaults to auto */ chunking_strategy?: | FileBatchCreateParams.VectorStoreChunkingStrategyAuto diff --git a/src/resources/vector-stores/vector-stores.ts b/src/resources/vector-stores/vector-stores.ts index 7163cf9..85db692 100644 --- a/src/resources/vector-stores/vector-stores.ts +++ b/src/resources/vector-stores/vector-stores.ts @@ -29,7 +29,8 @@ export class VectorStores extends APIResource { fileBatches: FileBatchesAPI.FileBatches = new FileBatchesAPI.FileBatches(this._client); /** - * Creates a vector store. + * Creates a vector store. Generate an OpenAI-compatible vector store with the + * given parameters. */ create(body: VectorStoreCreateParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/v1/vector_stores', { body, ...options }); @@ -316,46 +317,29 @@ export namespace VectorStoreSearchResponse { export interface VectorStoreCreateParams { /** - * The chunking strategy used to chunk the file(s). If not set, will use the `auto` - * strategy. + * (Optional) Strategy for splitting files into chunks */ chunking_strategy?: { [key: string]: boolean | number | string | Array | unknown | null }; /** - * The dimension of the embedding vectors (default: 384). - */ - embedding_dimension?: number; - - /** - * The embedding model to use for this vector store. - */ - embedding_model?: string; - - /** - * The expiration policy for a vector store. + * (Optional) Expiration policy for the vector store */ expires_after?: { [key: string]: boolean | number | string | Array | unknown | null }; /** - * A list of File IDs that the vector store should use. Useful for tools like - * `file_search` that can access files. + * List of file IDs to include in the vector store */ file_ids?: Array; /** - * Set of 16 key-value pairs that can be attached to an object. + * Set of key-value pairs that can be attached to the vector store */ metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; /** - * A name for the vector store. + * (Optional) A name for the vector store */ name?: string; - - /** - * The ID of the provider to use for this vector store. - */ - provider_id?: string; } export interface VectorStoreUpdateParams { diff --git a/tests/api-resources/completions.test.ts b/tests/api-resources/completions.test.ts index 736d76a..9a0d2eb 100644 --- a/tests/api-resources/completions.test.ts +++ b/tests/api-resources/completions.test.ts @@ -24,13 +24,11 @@ describe('resource completions', () => { best_of: 0, echo: true, frequency_penalty: 0, - guided_choice: ['string'], logit_bias: { foo: 0 }, logprobs: true, max_tokens: 0, n: 0, presence_penalty: 0, - prompt_logprobs: 0, seed: 0, stop: 'string', stream: false, diff --git a/tests/api-resources/safety.test.ts b/tests/api-resources/safety.test.ts index 4ca2ca6..6b43983 100644 --- a/tests/api-resources/safety.test.ts +++ b/tests/api-resources/safety.test.ts @@ -23,7 +23,7 @@ describe('resource safety', () => { test('runShield: required and optional params', async () => { const response = await client.safety.runShield({ - messages: [{ content: 'string', role: 'user', context: 'string' }], + messages: [{ content: 'string', role: 'user', name: 'name' }], params: { foo: true }, shield_id: 'shield_id', }); diff --git a/tests/api-resources/vector-dbs.test.ts b/tests/api-resources/vector-dbs.test.ts deleted file mode 100644 index 4af5adf..0000000 --- a/tests/api-resources/vector-dbs.test.ts +++ /dev/null @@ -1,87 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; -import { Response } from 'node-fetch'; - -const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); - -describe('resource vectorDBs', () => { - test('retrieve', async () => { - const responsePromise = client.vectorDBs.retrieve('vector_db_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('retrieve: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.vectorDBs.retrieve('vector_db_id', { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(LlamaStackClient.NotFoundError); - }); - - test('list', async () => { - const responsePromise = client.vectorDBs.list(); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('list: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(client.vectorDBs.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( - LlamaStackClient.NotFoundError, - ); - }); - - test('register: only required params', async () => { - const responsePromise = client.vectorDBs.register({ - embedding_model: 'embedding_model', - vector_db_id: 'vector_db_id', - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('register: required and optional params', async () => { - const response = await client.vectorDBs.register({ - embedding_model: 'embedding_model', - vector_db_id: 'vector_db_id', - embedding_dimension: 0, - provider_id: 'provider_id', - provider_vector_db_id: 'provider_vector_db_id', - vector_db_name: 'vector_db_name', - }); - }); - - test('unregister', async () => { - const responsePromise = client.vectorDBs.unregister('vector_db_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('unregister: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.vectorDBs.unregister('vector_db_id', { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(LlamaStackClient.NotFoundError); - }); -}); From b982ff9e933b889f2baed560d22bac15753766c7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 13 Oct 2025 01:52:04 +0000 Subject: [PATCH 26/26] release: 0.3.0-alpha.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 52 +++++++++++++++++++++++++++++++++++ package.json | 2 +- src/version.ts | 2 +- 4 files changed, 55 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index ed9acd2..1ae2526 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.2.23-alpha.1" + ".": "0.3.0-alpha.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index e40a318..2d99f43 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,57 @@ # Changelog +## 0.3.0-alpha.1 (2025-10-13) + +Full Changelog: [v0.2.23-alpha.1...v0.3.0-alpha.1](https://github.com/llamastack/llama-stack-client-typescript/compare/v0.2.23-alpha.1...v0.3.0-alpha.1) + +### ⚠ BREAKING CHANGES + +* **api:** use input_schema instead of parameters for tools +* **api:** fixes to remove deprecated inference resources + +### Features + +* **api:** expires_after changes for /files ([a0b0fb7](https://github.com/llamastack/llama-stack-client-typescript/commit/a0b0fb7aa74668f3f6996c178f9654723b8b0f22)) +* **api:** fix file batches SDK to list_files ([25a0f10](https://github.com/llamastack/llama-stack-client-typescript/commit/25a0f10cffa7de7f1457d65c97259911bc70ab0a)) +* **api:** fixes to remove deprecated inference resources ([367d775](https://github.com/llamastack/llama-stack-client-typescript/commit/367d775c3d5a2fd85bf138d2b175e91b7c185913)) +* **api:** fixes to URLs ([e4f7840](https://github.com/llamastack/llama-stack-client-typescript/commit/e4f78407f74f3ba7597de355c314e1932dd94761)) +* **api:** move post_training and eval under alpha namespace ([aec1d5f](https://github.com/llamastack/llama-stack-client-typescript/commit/aec1d5ff198473ba736bf543ad00c6626cab9b81)) +* **api:** moving { rerank, agents } to `client.alpha.` ([793e069](https://github.com/llamastack/llama-stack-client-typescript/commit/793e0694d75c2af4535bf991d5858cd1f21300b4)) +* **api:** removing openai/v1 ([b5432de](https://github.com/llamastack/llama-stack-client-typescript/commit/b5432de2ad56ff0d2fd5a5b8e1755b5237616b60)) +* **api:** SDKs for vector store file batches ([b0676c8](https://github.com/llamastack/llama-stack-client-typescript/commit/b0676c837bbd835276fea3fe12f435afdbb75ef7)) +* **api:** SDKs for vector store file batches apis ([88731bf](https://github.com/llamastack/llama-stack-client-typescript/commit/88731bfecd6f548ae79cbe2a1125620e488c42a3)) +* **api:** several updates including Conversations, Responses changes, etc. ([e0728d5](https://github.com/llamastack/llama-stack-client-typescript/commit/e0728d5dd59be8723d9f967d6164351eb05528d1)) +* **api:** tool api (input_schema, etc.) changes ([06f2bca](https://github.com/llamastack/llama-stack-client-typescript/commit/06f2bcaf0df2e5d462cbe2d9ef3704ab0cfe9248)) +* **api:** updates to vector_store, etc. ([19535c2](https://github.com/llamastack/llama-stack-client-typescript/commit/19535c27147bf6f6861b807d9eeee471b5625148)) +* **api:** updating post /v1/files to have correct multipart/form-data ([f1cf9d6](https://github.com/llamastack/llama-stack-client-typescript/commit/f1cf9d68b6b2569dfb5ea3e2d2c33eff1a832e47)) +* **api:** use input_schema instead of parameters for tools ([8910a12](https://github.com/llamastack/llama-stack-client-typescript/commit/8910a121146aeddcb8f400101e6a2232245097e0)) + + +### Bug Fixes + +* **api:** another fix to capture correct responses.create() params ([6acae91](https://github.com/llamastack/llama-stack-client-typescript/commit/6acae910db289080e8f52864f1bdf6d7951d1c3b)) +* **api:** fix the ToolDefParam updates ([5cee3d6](https://github.com/llamastack/llama-stack-client-typescript/commit/5cee3d69650a4c827e12fc046c1d2ec3b2fa9126)) +* fix stream event model reference ([a71b421](https://github.com/llamastack/llama-stack-client-typescript/commit/a71b421152a609e49e76d01c6e4dd46eb3dbfae0)) + + +### Chores + +* extract some types in mcp docs ([dcc7bb8](https://github.com/llamastack/llama-stack-client-typescript/commit/dcc7bb8b4d940982c2e9c6d1a541636e99fdc5ff)) +* **internal:** codegen related update ([252e0a2](https://github.com/llamastack/llama-stack-client-typescript/commit/252e0a2a38bd8aedab91b401c440a9b10c056cec)) +* **internal:** codegen related update ([34da720](https://github.com/llamastack/llama-stack-client-typescript/commit/34da720c34c35dafb38775243d28dfbdce2497db)) +* **internal:** fix incremental formatting in some cases ([c5c8292](https://github.com/llamastack/llama-stack-client-typescript/commit/c5c8292b631c678efff5498bbab9f5a43bee50b6)) +* **internal:** use npm pack for build uploads ([a246793](https://github.com/llamastack/llama-stack-client-typescript/commit/a24679300cff93fea8ad4bc85e549ecc88198d58)) + + +### Documentation + +* update examples ([17b9eb3](https://github.com/llamastack/llama-stack-client-typescript/commit/17b9eb3c40957b63d2a71f7fc21944abcc720d80)) + + +### Build System + +* Bump version to 0.2.23 ([16e05ed](https://github.com/llamastack/llama-stack-client-typescript/commit/16e05ed9798233375e19098992632d223c3f5d8d)) + ## 0.2.23-alpha.1 (2025-09-26) Full Changelog: [v0.2.19-alpha.1...v0.2.23-alpha.1](https://github.com/llamastack/llama-stack-client-typescript/compare/v0.2.19-alpha.1...v0.2.23-alpha.1) diff --git a/package.json b/package.json index 6daf907..aa18833 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "llama-stack-client", - "version": "0.2.23", + "version": "0.3.0-alpha.1", "description": "The official TypeScript library for the Llama Stack Client API", "author": "Llama Stack Client ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 834272b..3c51123 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '0.2.23'; // x-release-please-version +export const VERSION = '0.3.0-alpha.1'; // x-release-please-version