diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 763462f..43fd5a7 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -9,9 +9,7 @@ "postCreateCommand": "yarn install", "customizations": { "vscode": { - "extensions": [ - "esbenp.prettier-vscode" - ] + "extensions": ["esbenp.prettier-vscode"] } } } diff --git a/.gitignore b/.gitignore index d98d51a..2412bb7 100644 --- a/.gitignore +++ b/.gitignore @@ -7,4 +7,5 @@ dist dist-deno /*.tgz .idea/ +.eslintcache diff --git a/.release-please-manifest.json b/.release-please-manifest.json index ed9acd2..193b35f 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.2.23-alpha.1" + ".": "0.3.1-alpha.1" } diff --git a/.stats.yml b/.stats.yml index fa9edfc..7196fab 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-f252873ea1e1f38fd207331ef2621c511154d5be3f4076e59cc15754fc58eee4.yml -openapi_spec_hash: 10cbb4337a06a9fdd7d08612dd6044c3 -config_hash: 0358112cc0f3d880b4d55debdbe1cfa3 +configured_endpoints: 71 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-96255baaaf07826c5292cbb73073ab40aa7073c53996c3be49441a8ecf95c8ee.yml +openapi_spec_hash: fae0303cbf75bd79be4ae084db015401 +config_hash: a3829dbdaa491194d01f399784d532cd diff --git a/CHANGELOG.md b/CHANGELOG.md index e40a318..76eea73 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,69 @@ # Changelog +## 0.3.1-alpha.1 (2025-10-29) + +Full Changelog: [v0.2.23-alpha.1...v0.3.1-alpha.1](https://github.com/llamastack/llama-stack-client-typescript/compare/v0.2.23-alpha.1...v0.3.1-alpha.1) + +### ⚠ BREAKING CHANGES + +* **api:** /v1/inspect only lists v1 apis by default +* **api:** use input_schema instead of parameters for tools +* **api:** fixes to remove deprecated inference resources + +### Features + +* **api:** expires_after changes for /files ([a0b0fb7](https://github.com/llamastack/llama-stack-client-typescript/commit/a0b0fb7aa74668f3f6996c178f9654723b8b0f22)) +* **api:** fix file batches SDK to list_files ([25a0f10](https://github.com/llamastack/llama-stack-client-typescript/commit/25a0f10cffa7de7f1457d65c97259911bc70ab0a)) +* **api:** fixes to remove deprecated inference resources ([367d775](https://github.com/llamastack/llama-stack-client-typescript/commit/367d775c3d5a2fd85bf138d2b175e91b7c185913)) +* **api:** fixes to URLs ([e4f7840](https://github.com/llamastack/llama-stack-client-typescript/commit/e4f78407f74f3ba7597de355c314e1932dd94761)) +* **api:** manual updates ([7d2e375](https://github.com/llamastack/llama-stack-client-typescript/commit/7d2e375bde7bd04ae58cc49fcd5ab7b134b25640)) +* **api:** manual updates ([0302d54](https://github.com/llamastack/llama-stack-client-typescript/commit/0302d54398d87127ab0e9221a8a92760123d235b)) +* **api:** manual updates ([98a596f](https://github.com/llamastack/llama-stack-client-typescript/commit/98a596f677fe2790e4b4765362aa19b6cff8b97e)) +* **api:** manual updates ([c6fb0b6](https://github.com/llamastack/llama-stack-client-typescript/commit/c6fb0b67d8f2e641c13836a17400e51df0b029f1)) +* **api:** move datasets to beta, vector_db -> vector_store ([f32c0be](https://github.com/llamastack/llama-stack-client-typescript/commit/f32c0becb1ec0d66129b7fcaa06de3323ee703da)) +* **api:** move post_training and eval under alpha namespace ([aec1d5f](https://github.com/llamastack/llama-stack-client-typescript/commit/aec1d5ff198473ba736bf543ad00c6626cab9b81)) +* **api:** moving { rerank, agents } to `client.alpha.` ([793e069](https://github.com/llamastack/llama-stack-client-typescript/commit/793e0694d75c2af4535bf991d5858cd1f21300b4)) +* **api:** removing openai/v1 ([b5432de](https://github.com/llamastack/llama-stack-client-typescript/commit/b5432de2ad56ff0d2fd5a5b8e1755b5237616b60)) +* **api:** SDKs for vector store file batches ([b0676c8](https://github.com/llamastack/llama-stack-client-typescript/commit/b0676c837bbd835276fea3fe12f435afdbb75ef7)) +* **api:** SDKs for vector store file batches apis ([88731bf](https://github.com/llamastack/llama-stack-client-typescript/commit/88731bfecd6f548ae79cbe2a1125620e488c42a3)) +* **api:** several updates including Conversations, Responses changes, etc. ([e0728d5](https://github.com/llamastack/llama-stack-client-typescript/commit/e0728d5dd59be8723d9f967d6164351eb05528d1)) +* **api:** sync ([7d85013](https://github.com/llamastack/llama-stack-client-typescript/commit/7d850139d1327a215312a82c98b3428ebc7e5f68)) +* **api:** tool api (input_schema, etc.) changes ([06f2bca](https://github.com/llamastack/llama-stack-client-typescript/commit/06f2bcaf0df2e5d462cbe2d9ef3704ab0cfe9248)) +* **api:** updates to vector_store, etc. ([19535c2](https://github.com/llamastack/llama-stack-client-typescript/commit/19535c27147bf6f6861b807d9eeee471b5625148)) +* **api:** updating post /v1/files to have correct multipart/form-data ([f1cf9d6](https://github.com/llamastack/llama-stack-client-typescript/commit/f1cf9d68b6b2569dfb5ea3e2d2c33eff1a832e47)) +* **api:** use input_schema instead of parameters for tools ([8910a12](https://github.com/llamastack/llama-stack-client-typescript/commit/8910a121146aeddcb8f400101e6a2232245097e0)) +* **api:** vector_db_id -> vector_store_id ([079d89d](https://github.com/llamastack/llama-stack-client-typescript/commit/079d89d6522cb4f2eed5e5a09962d94ad800e883)) + + +### Bug Fixes + +* **api:** another fix to capture correct responses.create() params ([6acae91](https://github.com/llamastack/llama-stack-client-typescript/commit/6acae910db289080e8f52864f1bdf6d7951d1c3b)) +* **api:** fix the ToolDefParam updates ([5cee3d6](https://github.com/llamastack/llama-stack-client-typescript/commit/5cee3d69650a4c827e12fc046c1d2ec3b2fa9126)) +* **client:** incorrect offset pagination check ([257285f](https://github.com/llamastack/llama-stack-client-typescript/commit/257285f33bb989c9040580dd24251d05f9657bb0)) +* fix stream event model reference ([a71b421](https://github.com/llamastack/llama-stack-client-typescript/commit/a71b421152a609e49e76d01c6e4dd46eb3dbfae0)) + + +### Chores + +* **api:** /v1/inspect only lists v1 apis by default ([e30f51c](https://github.com/llamastack/llama-stack-client-typescript/commit/e30f51c704c39129092255c040bbf5ad90ed0b07)) +* extract some types in mcp docs ([dcc7bb8](https://github.com/llamastack/llama-stack-client-typescript/commit/dcc7bb8b4d940982c2e9c6d1a541636e99fdc5ff)) +* fix readme example ([402f930](https://github.com/llamastack/llama-stack-client-typescript/commit/402f9301d033bb230c9714104fbfa554f3f7cd8f)) +* fix readme examples ([4d5517c](https://github.com/llamastack/llama-stack-client-typescript/commit/4d5517c2b9af2eb6994f5e4b2c033c95d268fb5c)) +* **internal:** codegen related update ([252e0a2](https://github.com/llamastack/llama-stack-client-typescript/commit/252e0a2a38bd8aedab91b401c440a9b10c056cec)) +* **internal:** codegen related update ([34da720](https://github.com/llamastack/llama-stack-client-typescript/commit/34da720c34c35dafb38775243d28dfbdce2497db)) +* **internal:** fix incremental formatting in some cases ([c5c8292](https://github.com/llamastack/llama-stack-client-typescript/commit/c5c8292b631c678efff5498bbab9f5a43bee50b6)) +* **internal:** use npm pack for build uploads ([a246793](https://github.com/llamastack/llama-stack-client-typescript/commit/a24679300cff93fea8ad4bc85e549ecc88198d58)) + + +### Documentation + +* update examples ([17b9eb3](https://github.com/llamastack/llama-stack-client-typescript/commit/17b9eb3c40957b63d2a71f7fc21944abcc720d80)) + + +### Build System + +* Bump version to 0.2.23 ([16e05ed](https://github.com/llamastack/llama-stack-client-typescript/commit/16e05ed9798233375e19098992632d223c3f5d8d)) + ## 0.2.23-alpha.1 (2025-09-26) Full Changelog: [v0.2.19-alpha.1...v0.2.23-alpha.1](https://github.com/llamastack/llama-stack-client-typescript/compare/v0.2.19-alpha.1...v0.2.23-alpha.1) diff --git a/README.md b/README.md index a27b8c1..c0f0665 100644 --- a/README.md +++ b/README.md @@ -41,13 +41,13 @@ import LlamaStackClient from 'llama-stack-client'; const client = new LlamaStackClient(); -const stream = await client.inference.chatCompletion({ +const stream = await client.chat.completions.create({ messages: [{ content: 'string', role: 'user' }], - model_id: 'model_id', + model: 'model', stream: true, }); -for await (const chatCompletionResponseStreamChunk of stream) { - console.log(chatCompletionResponseStreamChunk.completion_message); +for await (const chatCompletionChunk of stream) { + console.log(chatCompletionChunk); } ``` @@ -64,11 +64,11 @@ import LlamaStackClient from 'llama-stack-client'; const client = new LlamaStackClient(); -const params: LlamaStackClient.InferenceChatCompletionParams = { +const params: LlamaStackClient.Chat.CompletionCreateParams = { messages: [{ content: 'string', role: 'user' }], - model_id: 'model_id', + model: 'model', }; -const chatCompletionResponse: LlamaStackClient.ChatCompletionResponse = await client.inference.chatCompletion( +const completion: LlamaStackClient.Chat.CompletionCreateResponse = await client.chat.completions.create( params, ); ``` @@ -113,8 +113,8 @@ a subclass of `APIError` will be thrown: ```ts -const chatCompletionResponse = await client.inference - .chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }) +const completion = await client.chat.completions + .create({ messages: [{ content: 'string', role: 'user' }], model: 'model' }) .catch(async (err) => { if (err instanceof LlamaStackClient.APIError) { console.log(err.status); // 400 @@ -155,7 +155,7 @@ const client = new LlamaStackClient({ }); // Or, configure per-request: -await client.inference.chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }, { +await client.chat.completions.create({ messages: [{ content: 'string', role: 'user' }], model: 'model' }, { maxRetries: 5, }); ``` @@ -172,7 +172,7 @@ const client = new LlamaStackClient({ }); // Override per-request: -await client.inference.chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }, { +await client.chat.completions.create({ messages: [{ content: 'string', role: 'user' }], model: 'model' }, { timeout: 5 * 1000, }); ``` @@ -193,17 +193,17 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi ```ts const client = new LlamaStackClient(); -const response = await client.inference - .chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }) +const response = await client.chat.completions + .create({ messages: [{ content: 'string', role: 'user' }], model: 'model' }) .asResponse(); console.log(response.headers.get('X-My-Header')); console.log(response.statusText); // access the underlying Response object -const { data: chatCompletionResponse, response: raw } = await client.inference - .chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }) +const { data: completion, response: raw } = await client.chat.completions + .create({ messages: [{ content: 'string', role: 'user' }], model: 'model' }) .withResponse(); console.log(raw.headers.get('X-My-Header')); -console.log(chatCompletionResponse.completion_message); +console.log(completion); ``` ### Making custom/undocumented requests @@ -307,8 +307,8 @@ const client = new LlamaStackClient({ }); // Override per-request: -await client.inference.chatCompletion( - { messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }, +await client.chat.completions.create( + { messages: [{ content: 'string', role: 'user' }], model: 'model' }, { httpAgent: new http.Agent({ keepAlive: false }), }, diff --git a/api.md b/api.md index 01d88a5..f79d69d 100644 --- a/api.md +++ b/api.md @@ -2,27 +2,18 @@ Types: -- AgentConfig -- BatchCompletion -- ChatCompletionResponse - CompletionMessage -- ContentDelta - Document - InterleavedContent - InterleavedContentItem - Message -- Metric - ParamType - QueryConfig -- QueryGeneratorConfig - QueryResult -- ResponseFormat - SafetyViolation -- SamplingParams - ScoringResult - SystemMessage - ToolCall -- ToolParamDefinition - ToolResponseMessage - UserMessage @@ -45,14 +36,12 @@ Methods: Types: -- ListToolsResponse -- Tool - ToolListResponse Methods: - client.tools.list({ ...params }) -> ToolListResponse -- client.tools.get(toolName) -> Tool +- client.tools.get(toolName) -> ToolDef # ToolRuntime @@ -85,10 +74,10 @@ Types: Methods: -- client.responses.create({ ...params }) -> ResponseObject -- client.responses.retrieve(responseId) -> ResponseObject -- client.responses.list({ ...params }) -> ResponseListResponsesOpenAICursorPage -- client.responses.delete(responseId) -> ResponseDeleteResponse +- client.responses.create({ ...params }) -> ResponseObject +- client.responses.retrieve(responseId) -> ResponseObject +- client.responses.list({ ...params }) -> ResponseListResponsesOpenAICursorPage +- client.responses.delete(responseId) -> ResponseDeleteResponse ## InputItems @@ -98,110 +87,35 @@ Types: Methods: -- client.responses.inputItems.list(responseId, { ...params }) -> InputItemListResponse +- client.responses.inputItems.list(responseId, { ...params }) -> InputItemListResponse -# Agents +# Conversations Types: -- InferenceStep -- MemoryRetrievalStep -- ShieldCallStep -- ToolExecutionStep -- ToolResponse -- AgentCreateResponse -- AgentRetrieveResponse -- AgentListResponse +- ConversationObject +- ConversationDeleteResponse Methods: -- client.agents.create({ ...params }) -> AgentCreateResponse -- client.agents.retrieve(agentId) -> AgentRetrieveResponse -- client.agents.list({ ...params }) -> AgentListResponse -- client.agents.delete(agentId) -> void +- client.conversations.create({ ...params }) -> ConversationObject +- client.conversations.retrieve(conversationId) -> ConversationObject +- client.conversations.update(conversationId, { ...params }) -> ConversationObject +- client.conversations.delete(conversationId) -> ConversationDeleteResponse -## Session +## Items Types: -- Session -- SessionCreateResponse -- SessionListResponse +- ItemCreateResponse +- ItemListResponse +- ItemGetResponse Methods: -- client.agents.session.create(agentId, { ...params }) -> SessionCreateResponse -- client.agents.session.retrieve(agentId, sessionId, { ...params }) -> Session -- client.agents.session.list(agentId, { ...params }) -> SessionListResponse -- client.agents.session.delete(agentId, sessionId) -> void - -## Steps - -Types: - -- StepRetrieveResponse - -Methods: - -- client.agents.steps.retrieve(agentId, sessionId, turnId, stepId) -> StepRetrieveResponse - -## Turn - -Types: - -- AgentTurnResponseStreamChunk -- Turn -- TurnResponseEvent -- TurnResponseEventPayload - -Methods: - -- client.agents.turn.create(agentId, sessionId, { ...params }) -> Turn -- client.agents.turn.retrieve(agentId, sessionId, turnId) -> Turn -- client.agents.turn.resume(agentId, sessionId, turnId, { ...params }) -> Turn - -# Datasets - -Types: - -- ListDatasetsResponse -- DatasetRetrieveResponse -- DatasetListResponse -- DatasetIterrowsResponse -- DatasetRegisterResponse - -Methods: - -- client.datasets.retrieve(datasetId) -> DatasetRetrieveResponse -- client.datasets.list() -> DatasetListResponse -- client.datasets.appendrows(datasetId, { ...params }) -> void -- client.datasets.iterrows(datasetId, { ...params }) -> DatasetIterrowsResponse -- client.datasets.register({ ...params }) -> DatasetRegisterResponse -- client.datasets.unregister(datasetId) -> void - -# Eval - -Types: - -- BenchmarkConfig -- EvalCandidate -- EvaluateResponse -- Job - -Methods: - -- client.eval.evaluateRows(benchmarkId, { ...params }) -> EvaluateResponse -- client.eval.evaluateRowsAlpha(benchmarkId, { ...params }) -> EvaluateResponse -- client.eval.runEval(benchmarkId, { ...params }) -> Job -- client.eval.runEvalAlpha(benchmarkId, { ...params }) -> Job - -## Jobs - -Methods: - -- client.eval.jobs.retrieve(benchmarkId, jobId) -> EvaluateResponse -- client.eval.jobs.cancel(benchmarkId, jobId) -> void -- client.eval.jobs.status(benchmarkId, jobId) -> Job +- client.conversations.items.create(conversationId, { ...params }) -> ItemCreateResponse +- client.conversations.items.list(conversationId, { ...params }) -> ItemListResponsesOpenAICursorPage +- client.conversations.items.get(conversationId, itemId) -> ItemGetResponse # Inspect @@ -217,26 +131,6 @@ Methods: - client.inspect.health() -> HealthInfo - client.inspect.version() -> VersionInfo -# Inference - -Types: - -- ChatCompletionResponseStreamChunk -- CompletionResponse -- EmbeddingsResponse -- TokenLogProbs -- InferenceBatchChatCompletionResponse -- InferenceRerankResponse - -Methods: - -- client.inference.batchChatCompletion({ ...params }) -> InferenceBatchChatCompletionResponse -- client.inference.batchCompletion({ ...params }) -> BatchCompletion -- client.inference.chatCompletion({ ...params }) -> ChatCompletionResponse -- client.inference.completion({ ...params }) -> CompletionResponse -- client.inference.embeddings({ ...params }) -> EmbeddingsResponse -- client.inference.rerank({ ...params }) -> InferenceRerankResponse - # Embeddings Types: @@ -245,7 +139,7 @@ Types: Methods: -- client.embeddings.create({ ...params }) -> CreateEmbeddingsResponse +- client.embeddings.create({ ...params }) -> CreateEmbeddingsResponse # Chat @@ -263,9 +157,9 @@ Types: Methods: -- client.chat.completions.create({ ...params }) -> CompletionCreateResponse -- client.chat.completions.retrieve(completionId) -> CompletionRetrieveResponse -- client.chat.completions.list({ ...params }) -> CompletionListResponsesOpenAICursorPage +- client.chat.completions.create({ ...params }) -> CompletionCreateResponse +- client.chat.completions.retrieve(completionId) -> CompletionRetrieveResponse +- client.chat.completions.list({ ...params }) -> CompletionListResponsesOpenAICursorPage # Completions @@ -275,7 +169,7 @@ Types: Methods: -- client.completions.create({ ...params }) -> CompletionCreateResponse +- client.completions.create({ ...params }) -> CompletionCreateResponse # VectorIo @@ -288,22 +182,6 @@ Methods: - client.vectorIo.insert({ ...params }) -> void - client.vectorIo.query({ ...params }) -> QueryChunksResponse -# VectorDBs - -Types: - -- ListVectorDBsResponse -- VectorDBRetrieveResponse -- VectorDBListResponse -- VectorDBRegisterResponse - -Methods: - -- client.vectorDBs.retrieve(vectorDBId) -> VectorDBRetrieveResponse -- client.vectorDBs.list() -> VectorDBListResponse -- client.vectorDBs.register({ ...params }) -> VectorDBRegisterResponse -- client.vectorDBs.unregister(vectorDBId) -> void - # VectorStores Types: @@ -315,12 +193,12 @@ Types: Methods: -- client.vectorStores.create({ ...params }) -> VectorStore -- client.vectorStores.retrieve(vectorStoreId) -> VectorStore -- client.vectorStores.update(vectorStoreId, { ...params }) -> VectorStore -- client.vectorStores.list({ ...params }) -> VectorStoresOpenAICursorPage -- client.vectorStores.delete(vectorStoreId) -> VectorStoreDeleteResponse -- client.vectorStores.search(vectorStoreId, { ...params }) -> VectorStoreSearchResponse +- client.vectorStores.create({ ...params }) -> VectorStore +- client.vectorStores.retrieve(vectorStoreId) -> VectorStore +- client.vectorStores.update(vectorStoreId, { ...params }) -> VectorStore +- client.vectorStores.list({ ...params }) -> VectorStoresOpenAICursorPage +- client.vectorStores.delete(vectorStoreId) -> VectorStoreDeleteResponse +- client.vectorStores.search(vectorStoreId, { ...params }) -> VectorStoreSearchResponse ## Files @@ -332,12 +210,26 @@ Types: Methods: -- client.vectorStores.files.create(vectorStoreId, { ...params }) -> VectorStoreFile -- client.vectorStores.files.retrieve(vectorStoreId, fileId) -> VectorStoreFile -- client.vectorStores.files.update(vectorStoreId, fileId, { ...params }) -> VectorStoreFile -- client.vectorStores.files.list(vectorStoreId, { ...params }) -> VectorStoreFilesOpenAICursorPage -- client.vectorStores.files.delete(vectorStoreId, fileId) -> FileDeleteResponse -- client.vectorStores.files.content(vectorStoreId, fileId) -> FileContentResponse +- client.vectorStores.files.create(vectorStoreId, { ...params }) -> VectorStoreFile +- client.vectorStores.files.retrieve(vectorStoreId, fileId) -> VectorStoreFile +- client.vectorStores.files.update(vectorStoreId, fileId, { ...params }) -> VectorStoreFile +- client.vectorStores.files.list(vectorStoreId, { ...params }) -> VectorStoreFilesOpenAICursorPage +- client.vectorStores.files.delete(vectorStoreId, fileId) -> FileDeleteResponse +- client.vectorStores.files.content(vectorStoreId, fileId) -> FileContentResponse + +## FileBatches + +Types: + +- ListVectorStoreFilesInBatchResponse +- VectorStoreFileBatches + +Methods: + +- client.vectorStores.fileBatches.create(vectorStoreId, { ...params }) -> VectorStoreFileBatches +- client.vectorStores.fileBatches.retrieve(vectorStoreId, batchId) -> VectorStoreFileBatches +- client.vectorStores.fileBatches.cancel(vectorStoreId, batchId) -> VectorStoreFileBatches +- client.vectorStores.fileBatches.listFiles(vectorStoreId, batchId, { ...params }) -> VectorStoreFilesOpenAICursorPage # Models @@ -356,41 +248,9 @@ Methods: ## OpenAI -Types: - -- OpenAIListResponse - -Methods: - -- client.models.openai.list() -> OpenAIListResponse - -# PostTraining - -Types: - -- AlgorithmConfig -- ListPostTrainingJobsResponse -- PostTrainingJob - -Methods: - -- client.postTraining.preferenceOptimize({ ...params }) -> PostTrainingJob -- client.postTraining.supervisedFineTune({ ...params }) -> PostTrainingJob - -## Job - -Types: - -- JobListResponse -- JobArtifactsResponse -- JobStatusResponse - Methods: -- client.postTraining.job.list() -> Array<ListPostTrainingJobsResponse.Data> -- client.postTraining.job.artifacts({ ...params }) -> JobArtifactsResponse -- client.postTraining.job.cancel({ ...params }) -> void -- client.postTraining.job.status({ ...params }) -> JobStatusResponse +- client.models.openai.list() -> ModelListResponse # Providers @@ -413,7 +273,7 @@ Types: Methods: -- client.routes.list() -> RouteListResponse +- client.routes.list({ ...params }) -> RouteListResponse # Moderations @@ -423,7 +283,7 @@ Types: Methods: -- client.moderations.create({ ...params }) -> CreateResponse +- client.moderations.create({ ...params }) -> CreateResponse # Safety @@ -460,32 +320,6 @@ Methods: - client.syntheticDataGeneration.generate({ ...params }) -> SyntheticDataGenerationResponse -# Telemetry - -Types: - -- Event -- QueryCondition -- QuerySpansResponse -- SpanWithStatus -- Trace -- TelemetryGetSpanResponse -- TelemetryGetSpanTreeResponse -- TelemetryQueryMetricsResponse -- TelemetryQuerySpansResponse -- TelemetryQueryTracesResponse - -Methods: - -- client.telemetry.getSpan(traceId, spanId) -> TelemetryGetSpanResponse -- client.telemetry.getSpanTree(spanId, { ...params }) -> TelemetryGetSpanTreeResponse -- client.telemetry.getTrace(traceId) -> Trace -- client.telemetry.logEvent({ ...params }) -> void -- client.telemetry.queryMetrics(metricName, { ...params }) -> TelemetryQueryMetricsResponse -- client.telemetry.querySpans({ ...params }) -> TelemetryQuerySpansResponse -- client.telemetry.queryTraces({ ...params }) -> TelemetryQueryTracesResponse -- client.telemetry.saveSpansToDataset({ ...params }) -> void - # Scoring Types: @@ -513,20 +347,6 @@ Methods: - client.scoringFunctions.list() -> ScoringFunctionListResponse - client.scoringFunctions.register({ ...params }) -> void -# Benchmarks - -Types: - -- Benchmark -- ListBenchmarksResponse -- BenchmarkListResponse - -Methods: - -- client.benchmarks.retrieve(benchmarkId) -> Benchmark -- client.benchmarks.list() -> BenchmarkListResponse -- client.benchmarks.register({ ...params }) -> void - # Files Types: @@ -538,8 +358,34 @@ Types: Methods: -- client.files.create({ ...params }) -> File -- client.files.retrieve(fileId) -> File -- client.files.list({ ...params }) -> FilesOpenAICursorPage -- client.files.delete(fileId) -> DeleteFileResponse -- client.files.content(fileId) -> unknown +- client.files.create({ ...params }) -> File +- client.files.retrieve(fileId) -> File +- client.files.list({ ...params }) -> FilesOpenAICursorPage +- client.files.delete(fileId) -> DeleteFileResponse +- client.files.content(fileId) -> unknown + +# Alpha + +## Inference + +## PostTraining + +### Job + +## Benchmarks + +## Eval + +### Jobs + +## Agents + +### Session + +### Steps + +### Turn + +# Beta + +## Datasets diff --git a/package.json b/package.json index 6daf907..32dcc66 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "llama-stack-client", - "version": "0.2.23", + "version": "0.3.1-alpha.1", "description": "The official TypeScript library for the Llama Stack Client API", "author": "Llama Stack Client ", "types": "dist/index.d.ts", diff --git a/release-please-config.json b/release-please-config.json index 624ed99..1ebd0bd 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -60,8 +60,5 @@ } ], "release-type": "node", - "extra-files": [ - "src/version.ts", - "README.md" - ] + "extra-files": ["src/version.ts", "README.md"] } diff --git a/scripts/fast-format b/scripts/fast-format index 03fb1a3..8a8e9d5 100755 --- a/scripts/fast-format +++ b/scripts/fast-format @@ -35,6 +35,6 @@ echo "==> Running prettier --write" PRETTIER_FILES="$(grep '\.\(js\|json\)$' "$FILE_LIST" || true)" if ! [ -z "$PRETTIER_FILES" ]; then echo "$PRETTIER_FILES" | xargs ./node_modules/.bin/prettier \ - --write --cache --cache-strategy metadata \ + --write --cache --cache-strategy metadata --no-error-on-unmatched-pattern \ '!**/dist' '!**/*.ts' '!**/*.mts' '!**/*.cts' '!**/*.js' '!**/*.mjs' '!**/*.cjs' fi diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh index 211e0b9..831e716 100755 --- a/scripts/utils/upload-artifact.sh +++ b/scripts/utils/upload-artifact.sh @@ -12,9 +12,11 @@ if [[ "$SIGNED_URL" == "null" ]]; then exit 1 fi -UPLOAD_RESPONSE=$(tar "${BASE_PATH:+-C$BASE_PATH}" -cz "${ARTIFACT_PATH:-dist}" | curl -v -X PUT \ +TARBALL=$(cd dist && npm pack --silent) + +UPLOAD_RESPONSE=$(curl -v -X PUT \ -H "Content-Type: application/gzip" \ - --data-binary @- "$SIGNED_URL" 2>&1) + --data-binary "@dist/$TARBALL" "$SIGNED_URL" 2>&1) if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then echo -e "\033[32mUploaded build to Stainless storage.\033[0m" diff --git a/src/index.ts b/src/index.ts index 68d219d..682a884 100644 --- a/src/index.ts +++ b/src/index.ts @@ -13,13 +13,6 @@ import { } from './pagination'; import * as Uploads from './uploads'; import * as API from './resources/index'; -import { - Benchmark, - BenchmarkListResponse, - BenchmarkRegisterParams, - Benchmarks, - ListBenchmarksResponse, -} from './resources/benchmarks'; import { CompletionCreateParams, CompletionCreateParamsNonStreaming, @@ -27,17 +20,6 @@ import { CompletionCreateResponse, Completions, } from './resources/completions'; -import { - DatasetAppendrowsParams, - DatasetIterrowsParams, - DatasetIterrowsResponse, - DatasetListResponse, - DatasetRegisterParams, - DatasetRegisterResponse, - DatasetRetrieveResponse, - Datasets, - ListDatasetsResponse, -} from './resources/datasets'; import { CreateEmbeddingsResponse, EmbeddingCreateParams, Embeddings } from './resources/embeddings'; import { DeleteFileResponse, @@ -49,29 +31,10 @@ import { FilesOpenAICursorPage, ListFilesResponse, } from './resources/files'; -import { - ChatCompletionResponseStreamChunk, - CompletionResponse, - EmbeddingsResponse, - Inference, - InferenceBatchChatCompletionParams, - InferenceBatchChatCompletionResponse, - InferenceBatchCompletionParams, - InferenceChatCompletionParams, - InferenceChatCompletionParamsNonStreaming, - InferenceChatCompletionParamsStreaming, - InferenceCompletionParams, - InferenceCompletionParamsNonStreaming, - InferenceCompletionParamsStreaming, - InferenceEmbeddingsParams, - InferenceRerankParams, - InferenceRerankResponse, - TokenLogProbs, -} from './resources/inference'; import { HealthInfo, Inspect, ProviderInfo, RouteInfo, VersionInfo } from './resources/inspect'; import { CreateResponse, ModerationCreateParams, Moderations } from './resources/moderations'; import { ListProvidersResponse, ProviderListResponse, Providers } from './resources/providers'; -import { ListRoutesResponse, RouteListResponse, Routes } from './resources/routes'; +import { ListRoutesResponse, RouteListParams, RouteListResponse, Routes } from './resources/routes'; import { RunShieldResponse, Safety, SafetyRunShieldParams } from './resources/safety'; import { Scoring, @@ -100,25 +63,6 @@ import { SyntheticDataGenerationGenerateParams, SyntheticDataGenerationResponse, } from './resources/synthetic-data-generation'; -import { - Event, - QueryCondition, - QuerySpansResponse, - SpanWithStatus, - Telemetry, - TelemetryGetSpanResponse, - TelemetryGetSpanTreeParams, - TelemetryGetSpanTreeResponse, - TelemetryLogEventParams, - TelemetryQueryMetricsParams, - TelemetryQueryMetricsResponse, - TelemetryQuerySpansParams, - TelemetryQuerySpansResponse, - TelemetryQueryTracesParams, - TelemetryQueryTracesResponse, - TelemetrySaveSpansToDatasetParams, - Trace, -} from './resources/telemetry'; import { ListToolGroupsResponse, ToolGroup, @@ -126,46 +70,23 @@ import { ToolgroupRegisterParams, Toolgroups, } from './resources/toolgroups'; -import { ListToolsResponse, Tool, ToolListParams, ToolListResponse, Tools } from './resources/tools'; -import { - ListVectorDBsResponse, - VectorDBListResponse, - VectorDBRegisterParams, - VectorDBRegisterResponse, - VectorDBRetrieveResponse, - VectorDBs, -} from './resources/vector-dbs'; +import { ToolListParams, ToolListResponse, Tools } from './resources/tools'; import { QueryChunksResponse, VectorIo, VectorIoInsertParams, VectorIoQueryParams, } from './resources/vector-io'; -import { - AgentCreateParams, - AgentCreateResponse, - AgentListParams, - AgentListResponse, - AgentRetrieveResponse, - Agents, - InferenceStep, - MemoryRetrievalStep, - ShieldCallStep, - ToolExecutionStep, - ToolResponse, -} from './resources/agents/agents'; +import { Alpha } from './resources/alpha/alpha'; +import { Beta } from './resources/beta/beta'; import { Chat, ChatCompletionChunk } from './resources/chat/chat'; import { - BenchmarkConfig, - Eval, - EvalCandidate, - EvalEvaluateRowsAlphaParams, - EvalEvaluateRowsParams, - EvalRunEvalAlphaParams, - EvalRunEvalParams, - EvaluateResponse, - Job, -} from './resources/eval/eval'; + ConversationCreateParams, + ConversationDeleteResponse, + ConversationObject, + ConversationUpdateParams, + Conversations, +} from './resources/conversations/conversations'; import { ListModelsResponse, Model, @@ -173,14 +94,6 @@ import { ModelRegisterParams, Models, } from './resources/models/models'; -import { - AlgorithmConfig, - ListPostTrainingJobsResponse, - PostTraining, - PostTrainingJob, - PostTrainingPreferenceOptimizeParams, - PostTrainingSupervisedFineTuneParams, -} from './resources/post-training/post-training'; import { ResponseCreateParams, ResponseCreateParamsNonStreaming, @@ -328,30 +241,25 @@ export class LlamaStackClient extends Core.APIClient { tools: API.Tools = new API.Tools(this); toolRuntime: API.ToolRuntime = new API.ToolRuntime(this); responses: API.Responses = new API.Responses(this); - agents: API.Agents = new API.Agents(this); - datasets: API.Datasets = new API.Datasets(this); - eval: API.Eval = new API.Eval(this); + conversations: API.Conversations = new API.Conversations(this); inspect: API.Inspect = new API.Inspect(this); - inference: API.Inference = new API.Inference(this); embeddings: API.Embeddings = new API.Embeddings(this); chat: API.Chat = new API.Chat(this); completions: API.Completions = new API.Completions(this); vectorIo: API.VectorIo = new API.VectorIo(this); - vectorDBs: API.VectorDBs = new API.VectorDBs(this); vectorStores: API.VectorStores = new API.VectorStores(this); models: API.Models = new API.Models(this); - postTraining: API.PostTraining = new API.PostTraining(this); providers: API.Providers = new API.Providers(this); routes: API.Routes = new API.Routes(this); moderations: API.Moderations = new API.Moderations(this); safety: API.Safety = new API.Safety(this); shields: API.Shields = new API.Shields(this); syntheticDataGeneration: API.SyntheticDataGeneration = new API.SyntheticDataGeneration(this); - telemetry: API.Telemetry = new API.Telemetry(this); scoring: API.Scoring = new API.Scoring(this); scoringFunctions: API.ScoringFunctions = new API.ScoringFunctions(this); - benchmarks: API.Benchmarks = new API.Benchmarks(this); files: API.Files = new API.Files(this); + alpha: API.Alpha = new API.Alpha(this); + beta: API.Beta = new API.Beta(this); /** * Check whether the base URL is set to its default. @@ -408,32 +316,27 @@ LlamaStackClient.Tools = Tools; LlamaStackClient.ToolRuntime = ToolRuntime; LlamaStackClient.Responses = Responses; LlamaStackClient.ResponseListResponsesOpenAICursorPage = ResponseListResponsesOpenAICursorPage; -LlamaStackClient.Agents = Agents; -LlamaStackClient.Datasets = Datasets; -LlamaStackClient.Eval = Eval; +LlamaStackClient.Conversations = Conversations; LlamaStackClient.Inspect = Inspect; -LlamaStackClient.Inference = Inference; LlamaStackClient.Embeddings = Embeddings; LlamaStackClient.Chat = Chat; LlamaStackClient.Completions = Completions; LlamaStackClient.VectorIo = VectorIo; -LlamaStackClient.VectorDBs = VectorDBs; LlamaStackClient.VectorStores = VectorStores; LlamaStackClient.VectorStoresOpenAICursorPage = VectorStoresOpenAICursorPage; LlamaStackClient.Models = Models; -LlamaStackClient.PostTraining = PostTraining; LlamaStackClient.Providers = Providers; LlamaStackClient.Routes = Routes; LlamaStackClient.Moderations = Moderations; LlamaStackClient.Safety = Safety; LlamaStackClient.Shields = Shields; LlamaStackClient.SyntheticDataGeneration = SyntheticDataGeneration; -LlamaStackClient.Telemetry = Telemetry; LlamaStackClient.Scoring = Scoring; LlamaStackClient.ScoringFunctions = ScoringFunctions; -LlamaStackClient.Benchmarks = Benchmarks; LlamaStackClient.Files = Files; LlamaStackClient.FilesOpenAICursorPage = FilesOpenAICursorPage; +LlamaStackClient.Alpha = Alpha; +LlamaStackClient.Beta = Beta; export declare namespace LlamaStackClient { export type RequestOptions = Core.RequestOptions; @@ -458,13 +361,7 @@ export declare namespace LlamaStackClient { type ToolgroupRegisterParams as ToolgroupRegisterParams, }; - export { - Tools as Tools, - type ListToolsResponse as ListToolsResponse, - type Tool as Tool, - type ToolListResponse as ToolListResponse, - type ToolListParams as ToolListParams, - }; + export { Tools as Tools, type ToolListResponse as ToolListResponse, type ToolListParams as ToolListParams }; export { ToolRuntime as ToolRuntime, @@ -489,41 +386,11 @@ export declare namespace LlamaStackClient { }; export { - Agents as Agents, - type InferenceStep as InferenceStep, - type MemoryRetrievalStep as MemoryRetrievalStep, - type ShieldCallStep as ShieldCallStep, - type ToolExecutionStep as ToolExecutionStep, - type ToolResponse as ToolResponse, - type AgentCreateResponse as AgentCreateResponse, - type AgentRetrieveResponse as AgentRetrieveResponse, - type AgentListResponse as AgentListResponse, - type AgentCreateParams as AgentCreateParams, - type AgentListParams as AgentListParams, - }; - - export { - Datasets as Datasets, - type ListDatasetsResponse as ListDatasetsResponse, - type DatasetRetrieveResponse as DatasetRetrieveResponse, - type DatasetListResponse as DatasetListResponse, - type DatasetIterrowsResponse as DatasetIterrowsResponse, - type DatasetRegisterResponse as DatasetRegisterResponse, - type DatasetAppendrowsParams as DatasetAppendrowsParams, - type DatasetIterrowsParams as DatasetIterrowsParams, - type DatasetRegisterParams as DatasetRegisterParams, - }; - - export { - Eval as Eval, - type BenchmarkConfig as BenchmarkConfig, - type EvalCandidate as EvalCandidate, - type EvaluateResponse as EvaluateResponse, - type Job as Job, - type EvalEvaluateRowsParams as EvalEvaluateRowsParams, - type EvalEvaluateRowsAlphaParams as EvalEvaluateRowsAlphaParams, - type EvalRunEvalParams as EvalRunEvalParams, - type EvalRunEvalAlphaParams as EvalRunEvalAlphaParams, + Conversations as Conversations, + type ConversationObject as ConversationObject, + type ConversationDeleteResponse as ConversationDeleteResponse, + type ConversationCreateParams as ConversationCreateParams, + type ConversationUpdateParams as ConversationUpdateParams, }; export { @@ -534,26 +401,6 @@ export declare namespace LlamaStackClient { type VersionInfo as VersionInfo, }; - export { - Inference as Inference, - type ChatCompletionResponseStreamChunk as ChatCompletionResponseStreamChunk, - type CompletionResponse as CompletionResponse, - type EmbeddingsResponse as EmbeddingsResponse, - type TokenLogProbs as TokenLogProbs, - type InferenceBatchChatCompletionResponse as InferenceBatchChatCompletionResponse, - type InferenceRerankResponse as InferenceRerankResponse, - type InferenceBatchChatCompletionParams as InferenceBatchChatCompletionParams, - type InferenceBatchCompletionParams as InferenceBatchCompletionParams, - type InferenceChatCompletionParams as InferenceChatCompletionParams, - type InferenceChatCompletionParamsNonStreaming as InferenceChatCompletionParamsNonStreaming, - type InferenceChatCompletionParamsStreaming as InferenceChatCompletionParamsStreaming, - type InferenceCompletionParams as InferenceCompletionParams, - type InferenceCompletionParamsNonStreaming as InferenceCompletionParamsNonStreaming, - type InferenceCompletionParamsStreaming as InferenceCompletionParamsStreaming, - type InferenceEmbeddingsParams as InferenceEmbeddingsParams, - type InferenceRerankParams as InferenceRerankParams, - }; - export { Embeddings as Embeddings, type CreateEmbeddingsResponse as CreateEmbeddingsResponse, @@ -577,15 +424,6 @@ export declare namespace LlamaStackClient { type VectorIoQueryParams as VectorIoQueryParams, }; - export { - VectorDBs as VectorDBs, - type ListVectorDBsResponse as ListVectorDBsResponse, - type VectorDBRetrieveResponse as VectorDBRetrieveResponse, - type VectorDBListResponse as VectorDBListResponse, - type VectorDBRegisterResponse as VectorDBRegisterResponse, - type VectorDBRegisterParams as VectorDBRegisterParams, - }; - export { VectorStores as VectorStores, type ListVectorStoresResponse as ListVectorStoresResponse, @@ -607,15 +445,6 @@ export declare namespace LlamaStackClient { type ModelRegisterParams as ModelRegisterParams, }; - export { - PostTraining as PostTraining, - type AlgorithmConfig as AlgorithmConfig, - type ListPostTrainingJobsResponse as ListPostTrainingJobsResponse, - type PostTrainingJob as PostTrainingJob, - type PostTrainingPreferenceOptimizeParams as PostTrainingPreferenceOptimizeParams, - type PostTrainingSupervisedFineTuneParams as PostTrainingSupervisedFineTuneParams, - }; - export { Providers as Providers, type ListProvidersResponse as ListProvidersResponse, @@ -626,6 +455,7 @@ export declare namespace LlamaStackClient { Routes as Routes, type ListRoutesResponse as ListRoutesResponse, type RouteListResponse as RouteListResponse, + type RouteListParams as RouteListParams, }; export { @@ -654,26 +484,6 @@ export declare namespace LlamaStackClient { type SyntheticDataGenerationGenerateParams as SyntheticDataGenerationGenerateParams, }; - export { - Telemetry as Telemetry, - type Event as Event, - type QueryCondition as QueryCondition, - type QuerySpansResponse as QuerySpansResponse, - type SpanWithStatus as SpanWithStatus, - type Trace as Trace, - type TelemetryGetSpanResponse as TelemetryGetSpanResponse, - type TelemetryGetSpanTreeResponse as TelemetryGetSpanTreeResponse, - type TelemetryQueryMetricsResponse as TelemetryQueryMetricsResponse, - type TelemetryQuerySpansResponse as TelemetryQuerySpansResponse, - type TelemetryQueryTracesResponse as TelemetryQueryTracesResponse, - type TelemetryGetSpanTreeParams as TelemetryGetSpanTreeParams, - type TelemetryLogEventParams as TelemetryLogEventParams, - type TelemetryQueryMetricsParams as TelemetryQueryMetricsParams, - type TelemetryQuerySpansParams as TelemetryQuerySpansParams, - type TelemetryQueryTracesParams as TelemetryQueryTracesParams, - type TelemetrySaveSpansToDatasetParams as TelemetrySaveSpansToDatasetParams, - }; - export { Scoring as Scoring, type ScoringScoreResponse as ScoringScoreResponse, @@ -691,14 +501,6 @@ export declare namespace LlamaStackClient { type ScoringFunctionRegisterParams as ScoringFunctionRegisterParams, }; - export { - Benchmarks as Benchmarks, - type Benchmark as Benchmark, - type ListBenchmarksResponse as ListBenchmarksResponse, - type BenchmarkListResponse as BenchmarkListResponse, - type BenchmarkRegisterParams as BenchmarkRegisterParams, - }; - export { Files as Files, type DeleteFileResponse as DeleteFileResponse, @@ -710,27 +512,22 @@ export declare namespace LlamaStackClient { type FileListParams as FileListParams, }; - export type AgentConfig = API.AgentConfig; - export type BatchCompletion = API.BatchCompletion; - export type ChatCompletionResponse = API.ChatCompletionResponse; + export { Alpha as Alpha }; + + export { Beta as Beta }; + export type CompletionMessage = API.CompletionMessage; - export type ContentDelta = API.ContentDelta; export type Document = API.Document; export type InterleavedContent = API.InterleavedContent; export type InterleavedContentItem = API.InterleavedContentItem; export type Message = API.Message; - export type Metric = API.Metric; export type ParamType = API.ParamType; export type QueryConfig = API.QueryConfig; - export type QueryGeneratorConfig = API.QueryGeneratorConfig; export type QueryResult = API.QueryResult; - export type ResponseFormat = API.ResponseFormat; export type SafetyViolation = API.SafetyViolation; - export type SamplingParams = API.SamplingParams; export type ScoringResult = API.ScoringResult; export type SystemMessage = API.SystemMessage; export type ToolCall = API.ToolCall; - export type ToolParamDefinition = API.ToolParamDefinition; export type ToolResponseMessage = API.ToolResponseMessage; export type UserMessage = API.UserMessage; } diff --git a/src/pagination.ts b/src/pagination.ts index 79ab3eb..61184a7 100644 --- a/src/pagination.ts +++ b/src/pagination.ts @@ -48,11 +48,7 @@ export class DatasetsIterrows extends AbstractPage implements Datase } nextPageInfo(): PageInfo | null { - const offset = this.next_index; - if (!offset) { - return null; - } - + const offset = this.next_index ?? 0; const length = this.getPaginatedItems().length; const currentCount = offset + length; diff --git a/src/resources/agents/agents.ts b/src/resources/agents/agents.ts deleted file mode 100644 index 35a4d62..0000000 --- a/src/resources/agents/agents.ts +++ /dev/null @@ -1,366 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../../resource'; -import { isRequestOptions } from '../../core'; -import * as Core from '../../core'; -import * as Shared from '../shared'; -import * as SessionAPI from './session'; -import { - Session, - SessionCreateParams, - SessionCreateResponse, - SessionListParams, - SessionListResponse, - SessionResource, - SessionRetrieveParams, -} from './session'; -import * as StepsAPI from './steps'; -import { StepRetrieveResponse, Steps } from './steps'; -import * as TurnAPI from './turn'; -import { - AgentTurnResponseStreamChunk, - Turn, - TurnCreateParams, - TurnCreateParamsNonStreaming, - TurnCreateParamsStreaming, - TurnResource, - TurnResponseEvent, - TurnResponseEventPayload, - TurnResumeParams, - TurnResumeParamsNonStreaming, - TurnResumeParamsStreaming, -} from './turn'; - -export class Agents extends APIResource { - session: SessionAPI.SessionResource = new SessionAPI.SessionResource(this._client); - steps: StepsAPI.Steps = new StepsAPI.Steps(this._client); - turn: TurnAPI.TurnResource = new TurnAPI.TurnResource(this._client); - - /** - * Create an agent with the given configuration. - */ - create(body: AgentCreateParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.post('/v1/agents', { body, ...options }); - } - - /** - * Describe an agent by its ID. - */ - retrieve(agentId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/v1/agents/${agentId}`, options); - } - - /** - * List all agents. - */ - list(query?: AgentListParams, options?: Core.RequestOptions): Core.APIPromise; - list(options?: Core.RequestOptions): Core.APIPromise; - list( - query: AgentListParams | Core.RequestOptions = {}, - options?: Core.RequestOptions, - ): Core.APIPromise { - if (isRequestOptions(query)) { - return this.list({}, query); - } - return this._client.get('/v1/agents', { query, ...options }); - } - - /** - * Delete an agent by its ID and its associated sessions and turns. - */ - delete(agentId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.delete(`/v1/agents/${agentId}`, { - ...options, - headers: { Accept: '*/*', ...options?.headers }, - }); - } -} - -/** - * An inference step in an agent turn. - */ -export interface InferenceStep { - /** - * The response from the LLM. - */ - model_response: Shared.CompletionMessage; - - /** - * The ID of the step. - */ - step_id: string; - - /** - * Type of the step in an agent turn. - */ - step_type: 'inference'; - - /** - * The ID of the turn. - */ - turn_id: string; - - /** - * The time the step completed. - */ - completed_at?: string; - - /** - * The time the step started. - */ - started_at?: string; -} - -/** - * A memory retrieval step in an agent turn. - */ -export interface MemoryRetrievalStep { - /** - * The context retrieved from the vector databases. - */ - inserted_context: Shared.InterleavedContent; - - /** - * The ID of the step. - */ - step_id: string; - - /** - * Type of the step in an agent turn. - */ - step_type: 'memory_retrieval'; - - /** - * The ID of the turn. - */ - turn_id: string; - - /** - * The IDs of the vector databases to retrieve context from. - */ - vector_db_ids: string; - - /** - * The time the step completed. - */ - completed_at?: string; - - /** - * The time the step started. - */ - started_at?: string; -} - -/** - * A shield call step in an agent turn. - */ -export interface ShieldCallStep { - /** - * The ID of the step. - */ - step_id: string; - - /** - * Type of the step in an agent turn. - */ - step_type: 'shield_call'; - - /** - * The ID of the turn. - */ - turn_id: string; - - /** - * The time the step completed. - */ - completed_at?: string; - - /** - * The time the step started. - */ - started_at?: string; - - /** - * The violation from the shield call. - */ - violation?: Shared.SafetyViolation; -} - -/** - * A tool execution step in an agent turn. - */ -export interface ToolExecutionStep { - /** - * The ID of the step. - */ - step_id: string; - - /** - * Type of the step in an agent turn. - */ - step_type: 'tool_execution'; - - /** - * The tool calls to execute. - */ - tool_calls: Array; - - /** - * The tool responses from the tool calls. - */ - tool_responses: Array; - - /** - * The ID of the turn. - */ - turn_id: string; - - /** - * The time the step completed. - */ - completed_at?: string; - - /** - * The time the step started. - */ - started_at?: string; -} - -/** - * Response from a tool invocation. - */ -export interface ToolResponse { - /** - * Unique identifier for the tool call this response is for - */ - call_id: string; - - /** - * The response content from the tool - */ - content: Shared.InterleavedContent; - - /** - * Name of the tool that was invoked - */ - tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {}); - - /** - * (Optional) Additional metadata about the tool response - */ - metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; -} - -/** - * Response returned when creating a new agent. - */ -export interface AgentCreateResponse { - /** - * Unique identifier for the created agent - */ - agent_id: string; -} - -/** - * An agent instance with configuration and metadata. - */ -export interface AgentRetrieveResponse { - /** - * Configuration settings for the agent - */ - agent_config: Shared.AgentConfig; - - /** - * Unique identifier for the agent - */ - agent_id: string; - - /** - * Timestamp when the agent was created - */ - created_at: string; -} - -/** - * A generic paginated response that follows a simple format. - */ -export interface AgentListResponse { - /** - * The list of items for the current page - */ - data: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; - - /** - * Whether there are more items available after this set - */ - has_more: boolean; - - /** - * The URL for accessing this list - */ - url?: string; -} - -export interface AgentCreateParams { - /** - * The configuration for the agent. - */ - agent_config: Shared.AgentConfig; -} - -export interface AgentListParams { - /** - * The number of agents to return. - */ - limit?: number; - - /** - * The index to start the pagination from. - */ - start_index?: number; -} - -Agents.SessionResource = SessionResource; -Agents.Steps = Steps; -Agents.TurnResource = TurnResource; - -export declare namespace Agents { - export { - type InferenceStep as InferenceStep, - type MemoryRetrievalStep as MemoryRetrievalStep, - type ShieldCallStep as ShieldCallStep, - type ToolExecutionStep as ToolExecutionStep, - type ToolResponse as ToolResponse, - type AgentCreateResponse as AgentCreateResponse, - type AgentRetrieveResponse as AgentRetrieveResponse, - type AgentListResponse as AgentListResponse, - type AgentCreateParams as AgentCreateParams, - type AgentListParams as AgentListParams, - }; - - export { - SessionResource as SessionResource, - type Session as Session, - type SessionCreateResponse as SessionCreateResponse, - type SessionListResponse as SessionListResponse, - type SessionCreateParams as SessionCreateParams, - type SessionRetrieveParams as SessionRetrieveParams, - type SessionListParams as SessionListParams, - }; - - export { Steps as Steps, type StepRetrieveResponse as StepRetrieveResponse }; - - export { - TurnResource as TurnResource, - type AgentTurnResponseStreamChunk as AgentTurnResponseStreamChunk, - type Turn as Turn, - type TurnResponseEvent as TurnResponseEvent, - type TurnResponseEventPayload as TurnResponseEventPayload, - type TurnCreateParams as TurnCreateParams, - type TurnCreateParamsNonStreaming as TurnCreateParamsNonStreaming, - type TurnCreateParamsStreaming as TurnCreateParamsStreaming, - type TurnResumeParams as TurnResumeParams, - type TurnResumeParamsNonStreaming as TurnResumeParamsNonStreaming, - type TurnResumeParamsStreaming as TurnResumeParamsStreaming, - }; -} diff --git a/src/resources/agents/index.ts b/src/resources/agents/index.ts deleted file mode 100644 index 88a44bf..0000000 --- a/src/resources/agents/index.ts +++ /dev/null @@ -1,38 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -export { - Agents, - type InferenceStep, - type MemoryRetrievalStep, - type ShieldCallStep, - type ToolExecutionStep, - type ToolResponse, - type AgentCreateResponse, - type AgentRetrieveResponse, - type AgentListResponse, - type AgentCreateParams, - type AgentListParams, -} from './agents'; -export { - SessionResource, - type Session, - type SessionCreateResponse, - type SessionListResponse, - type SessionCreateParams, - type SessionRetrieveParams, - type SessionListParams, -} from './session'; -export { Steps, type StepRetrieveResponse } from './steps'; -export { - TurnResource, - type AgentTurnResponseStreamChunk, - type Turn, - type TurnResponseEvent, - type TurnResponseEventPayload, - type TurnCreateParams, - type TurnCreateParamsNonStreaming, - type TurnCreateParamsStreaming, - type TurnResumeParams, - type TurnResumeParamsNonStreaming, - type TurnResumeParamsStreaming, -} from './turn'; diff --git a/src/resources/agents/session.ts b/src/resources/agents/session.ts deleted file mode 100644 index 35c8511..0000000 --- a/src/resources/agents/session.ts +++ /dev/null @@ -1,163 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../../resource'; -import { isRequestOptions } from '../../core'; -import * as Core from '../../core'; -import * as TurnAPI from './turn'; - -export class SessionResource extends APIResource { - /** - * Create a new session for an agent. - */ - create( - agentId: string, - body: SessionCreateParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.post(`/v1/agents/${agentId}/session`, { body, ...options }); - } - - /** - * Retrieve an agent session by its ID. - */ - retrieve( - agentId: string, - sessionId: string, - query?: SessionRetrieveParams, - options?: Core.RequestOptions, - ): Core.APIPromise; - retrieve(agentId: string, sessionId: string, options?: Core.RequestOptions): Core.APIPromise; - retrieve( - agentId: string, - sessionId: string, - query: SessionRetrieveParams | Core.RequestOptions = {}, - options?: Core.RequestOptions, - ): Core.APIPromise { - if (isRequestOptions(query)) { - return this.retrieve(agentId, sessionId, {}, query); - } - return this._client.get(`/v1/agents/${agentId}/session/${sessionId}`, { query, ...options }); - } - - /** - * List all session(s) of a given agent. - */ - list( - agentId: string, - query?: SessionListParams, - options?: Core.RequestOptions, - ): Core.APIPromise; - list(agentId: string, options?: Core.RequestOptions): Core.APIPromise; - list( - agentId: string, - query: SessionListParams | Core.RequestOptions = {}, - options?: Core.RequestOptions, - ): Core.APIPromise { - if (isRequestOptions(query)) { - return this.list(agentId, {}, query); - } - return this._client.get(`/v1/agents/${agentId}/sessions`, { query, ...options }); - } - - /** - * Delete an agent session by its ID and its associated turns. - */ - delete(agentId: string, sessionId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.delete(`/v1/agents/${agentId}/session/${sessionId}`, { - ...options, - headers: { Accept: '*/*', ...options?.headers }, - }); - } -} - -/** - * A single session of an interaction with an Agentic System. - */ -export interface Session { - /** - * Unique identifier for the conversation session - */ - session_id: string; - - /** - * Human-readable name for the session - */ - session_name: string; - - /** - * Timestamp when the session was created - */ - started_at: string; - - /** - * List of all turns that have occurred in this session - */ - turns: Array; -} - -/** - * Response returned when creating a new agent session. - */ -export interface SessionCreateResponse { - /** - * Unique identifier for the created session - */ - session_id: string; -} - -/** - * A generic paginated response that follows a simple format. - */ -export interface SessionListResponse { - /** - * The list of items for the current page - */ - data: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; - - /** - * Whether there are more items available after this set - */ - has_more: boolean; - - /** - * The URL for accessing this list - */ - url?: string; -} - -export interface SessionCreateParams { - /** - * The name of the session to create. - */ - session_name: string; -} - -export interface SessionRetrieveParams { - /** - * (Optional) List of turn IDs to filter the session by. - */ - turn_ids?: Array; -} - -export interface SessionListParams { - /** - * The number of sessions to return. - */ - limit?: number; - - /** - * The index to start the pagination from. - */ - start_index?: number; -} - -export declare namespace SessionResource { - export { - type Session as Session, - type SessionCreateResponse as SessionCreateResponse, - type SessionListResponse as SessionListResponse, - type SessionCreateParams as SessionCreateParams, - type SessionRetrieveParams as SessionRetrieveParams, - type SessionListParams as SessionListParams, - }; -} diff --git a/src/resources/agents/steps.ts b/src/resources/agents/steps.ts deleted file mode 100644 index 8d2d821..0000000 --- a/src/resources/agents/steps.ts +++ /dev/null @@ -1,41 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../../resource'; -import * as Core from '../../core'; -import * as AgentsAPI from './agents'; - -export class Steps extends APIResource { - /** - * Retrieve an agent step by its ID. - */ - retrieve( - agentId: string, - sessionId: string, - turnId: string, - stepId: string, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.get( - `/v1/agents/${agentId}/session/${sessionId}/turn/${turnId}/step/${stepId}`, - options, - ); - } -} - -/** - * Response containing details of a specific agent step. - */ -export interface StepRetrieveResponse { - /** - * The complete step data and execution details - */ - step: - | AgentsAPI.InferenceStep - | AgentsAPI.ToolExecutionStep - | AgentsAPI.ShieldCallStep - | AgentsAPI.MemoryRetrievalStep; -} - -export declare namespace Steps { - export { type StepRetrieveResponse as StepRetrieveResponse }; -} diff --git a/src/resources/agents/turn.ts b/src/resources/agents/turn.ts deleted file mode 100644 index 0273625..0000000 --- a/src/resources/agents/turn.ts +++ /dev/null @@ -1,632 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../../resource'; -import { APIPromise } from '../../core'; -import * as Core from '../../core'; -import * as TurnAPI from './turn'; -import * as Shared from '../shared'; -import * as AgentsAPI from './agents'; -import { Stream } from '../../streaming'; - -export class TurnResource extends APIResource { - /** - * Create a new turn for an agent. - */ - create( - agentId: string, - sessionId: string, - body: TurnCreateParamsNonStreaming, - options?: Core.RequestOptions, - ): APIPromise; - create( - agentId: string, - sessionId: string, - body: TurnCreateParamsStreaming, - options?: Core.RequestOptions, - ): APIPromise>; - create( - agentId: string, - sessionId: string, - body: TurnCreateParamsBase, - options?: Core.RequestOptions, - ): APIPromise | Turn>; - create( - agentId: string, - sessionId: string, - body: TurnCreateParams, - options?: Core.RequestOptions, - ): APIPromise | APIPromise> { - return this._client.post(`/v1/agents/${agentId}/session/${sessionId}/turn`, { - body, - ...options, - stream: body.stream ?? false, - }) as APIPromise | APIPromise>; - } - - /** - * Retrieve an agent turn by its ID. - */ - retrieve( - agentId: string, - sessionId: string, - turnId: string, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.get(`/v1/agents/${agentId}/session/${sessionId}/turn/${turnId}`, options); - } - - /** - * Resume an agent turn with executed tool call responses. When a Turn has the - * status `awaiting_input` due to pending input from client side tool calls, this - * endpoint can be used to submit the outputs from the tool calls once they are - * ready. - */ - resume( - agentId: string, - sessionId: string, - turnId: string, - body: TurnResumeParamsNonStreaming, - options?: Core.RequestOptions, - ): APIPromise; - resume( - agentId: string, - sessionId: string, - turnId: string, - body: TurnResumeParamsStreaming, - options?: Core.RequestOptions, - ): APIPromise>; - resume( - agentId: string, - sessionId: string, - turnId: string, - body: TurnResumeParamsBase, - options?: Core.RequestOptions, - ): APIPromise | Turn>; - resume( - agentId: string, - sessionId: string, - turnId: string, - body: TurnResumeParams, - options?: Core.RequestOptions, - ): APIPromise | APIPromise> { - return this._client.post(`/v1/agents/${agentId}/session/${sessionId}/turn/${turnId}/resume`, { - body, - ...options, - stream: body.stream ?? false, - }) as APIPromise | APIPromise>; - } -} - -/** - * Streamed agent turn completion response. - */ -export interface AgentTurnResponseStreamChunk { - /** - * Individual event in the agent turn response stream - */ - event: TurnResponseEvent; -} - -/** - * A single turn in an interaction with an Agentic System. - */ -export interface Turn { - /** - * List of messages that initiated this turn - */ - input_messages: Array; - - /** - * The model's generated response containing content and metadata - */ - output_message: Shared.CompletionMessage; - - /** - * Unique identifier for the conversation session - */ - session_id: string; - - /** - * Timestamp when the turn began - */ - started_at: string; - - /** - * Ordered list of processing steps executed during this turn - */ - steps: Array< - | AgentsAPI.InferenceStep - | AgentsAPI.ToolExecutionStep - | AgentsAPI.ShieldCallStep - | AgentsAPI.MemoryRetrievalStep - >; - - /** - * Unique identifier for the turn within a session - */ - turn_id: string; - - /** - * (Optional) Timestamp when the turn finished, if completed - */ - completed_at?: string; - - /** - * (Optional) Files or media attached to the agent's response - */ - output_attachments?: Array; -} - -export namespace Turn { - /** - * An attachment to an agent turn. - */ - export interface OutputAttachment { - /** - * The content of the attachment. - */ - content: - | string - | OutputAttachment.ImageContentItem - | OutputAttachment.TextContentItem - | Array - | OutputAttachment.URL; - - /** - * The MIME type of the attachment. - */ - mime_type: string; - } - - export namespace OutputAttachment { - /** - * A image content item - */ - export interface ImageContentItem { - /** - * Image as a base64 encoded string or an URL - */ - image: ImageContentItem.Image; - - /** - * Discriminator type of the content item. Always "image" - */ - type: 'image'; - } - - export namespace ImageContentItem { - /** - * Image as a base64 encoded string or an URL - */ - export interface Image { - /** - * base64 encoded image data as string - */ - data?: string; - - /** - * A URL of the image or data URL in the format of data:image/{type};base64,{data}. - * Note that URL could have length limits. - */ - url?: Image.URL; - } - - export namespace Image { - /** - * A URL of the image or data URL in the format of data:image/{type};base64,{data}. - * Note that URL could have length limits. - */ - export interface URL { - /** - * The URL string pointing to the resource - */ - uri: string; - } - } - } - - /** - * A text content item - */ - export interface TextContentItem { - /** - * Text content - */ - text: string; - - /** - * Discriminator type of the content item. Always "text" - */ - type: 'text'; - } - - /** - * A URL reference to external content. - */ - export interface URL { - /** - * The URL string pointing to the resource - */ - uri: string; - } - } -} - -/** - * An event in an agent turn response stream. - */ -export interface TurnResponseEvent { - /** - * Event-specific payload containing event data - */ - payload: TurnResponseEventPayload; -} - -/** - * Payload for step start events in agent turn responses. - */ -export type TurnResponseEventPayload = - | TurnResponseEventPayload.AgentTurnResponseStepStartPayload - | TurnResponseEventPayload.AgentTurnResponseStepProgressPayload - | TurnResponseEventPayload.AgentTurnResponseStepCompletePayload - | TurnResponseEventPayload.AgentTurnResponseTurnStartPayload - | TurnResponseEventPayload.AgentTurnResponseTurnCompletePayload - | TurnResponseEventPayload.AgentTurnResponseTurnAwaitingInputPayload; - -export namespace TurnResponseEventPayload { - /** - * Payload for step start events in agent turn responses. - */ - export interface AgentTurnResponseStepStartPayload { - /** - * Type of event being reported - */ - event_type: 'step_start'; - - /** - * Unique identifier for the step within a turn - */ - step_id: string; - - /** - * Type of step being executed - */ - step_type: 'inference' | 'tool_execution' | 'shield_call' | 'memory_retrieval'; - - /** - * (Optional) Additional metadata for the step - */ - metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; - } - - /** - * Payload for step progress events in agent turn responses. - */ - export interface AgentTurnResponseStepProgressPayload { - /** - * Incremental content changes during step execution - */ - delta: Shared.ContentDelta; - - /** - * Type of event being reported - */ - event_type: 'step_progress'; - - /** - * Unique identifier for the step within a turn - */ - step_id: string; - - /** - * Type of step being executed - */ - step_type: 'inference' | 'tool_execution' | 'shield_call' | 'memory_retrieval'; - } - - /** - * Payload for step completion events in agent turn responses. - */ - export interface AgentTurnResponseStepCompletePayload { - /** - * Type of event being reported - */ - event_type: 'step_complete'; - - /** - * Complete details of the executed step - */ - step_details: - | AgentsAPI.InferenceStep - | AgentsAPI.ToolExecutionStep - | AgentsAPI.ShieldCallStep - | AgentsAPI.MemoryRetrievalStep; - - /** - * Unique identifier for the step within a turn - */ - step_id: string; - - /** - * Type of step being executed - */ - step_type: 'inference' | 'tool_execution' | 'shield_call' | 'memory_retrieval'; - } - - /** - * Payload for turn start events in agent turn responses. - */ - export interface AgentTurnResponseTurnStartPayload { - /** - * Type of event being reported - */ - event_type: 'turn_start'; - - /** - * Unique identifier for the turn within a session - */ - turn_id: string; - } - - /** - * Payload for turn completion events in agent turn responses. - */ - export interface AgentTurnResponseTurnCompletePayload { - /** - * Type of event being reported - */ - event_type: 'turn_complete'; - - /** - * Complete turn data including all steps and results - */ - turn: TurnAPI.Turn; - } - - /** - * Payload for turn awaiting input events in agent turn responses. - */ - export interface AgentTurnResponseTurnAwaitingInputPayload { - /** - * Type of event being reported - */ - event_type: 'turn_awaiting_input'; - - /** - * Turn data when waiting for external tool responses - */ - turn: TurnAPI.Turn; - } -} - -export type TurnCreateParams = TurnCreateParamsNonStreaming | TurnCreateParamsStreaming; - -export interface TurnCreateParamsBase { - /** - * List of messages to start the turn with. - */ - messages: Array; - - /** - * (Optional) List of documents to create the turn with. - */ - documents?: Array; - - /** - * (Optional) If True, generate an SSE event stream of the response. Defaults to - * False. - */ - stream?: boolean; - - /** - * (Optional) The tool configuration to create the turn with, will be used to - * override the agent's tool_config. - */ - tool_config?: TurnCreateParams.ToolConfig; - - /** - * (Optional) List of toolgroups to create the turn with, will be used in addition - * to the agent's config toolgroups for the request. - */ - toolgroups?: Array; -} - -export namespace TurnCreateParams { - /** - * A document to be used by an agent. - */ - export interface Document { - /** - * The content of the document. - */ - content: - | string - | Document.ImageContentItem - | Document.TextContentItem - | Array - | Document.URL; - - /** - * The MIME type of the document. - */ - mime_type: string; - } - - export namespace Document { - /** - * A image content item - */ - export interface ImageContentItem { - /** - * Image as a base64 encoded string or an URL - */ - image: ImageContentItem.Image; - - /** - * Discriminator type of the content item. Always "image" - */ - type: 'image'; - } - - export namespace ImageContentItem { - /** - * Image as a base64 encoded string or an URL - */ - export interface Image { - /** - * base64 encoded image data as string - */ - data?: string; - - /** - * A URL of the image or data URL in the format of data:image/{type};base64,{data}. - * Note that URL could have length limits. - */ - url?: Image.URL; - } - - export namespace Image { - /** - * A URL of the image or data URL in the format of data:image/{type};base64,{data}. - * Note that URL could have length limits. - */ - export interface URL { - /** - * The URL string pointing to the resource - */ - uri: string; - } - } - } - - /** - * A text content item - */ - export interface TextContentItem { - /** - * Text content - */ - text: string; - - /** - * Discriminator type of the content item. Always "text" - */ - type: 'text'; - } - - /** - * A URL reference to external content. - */ - export interface URL { - /** - * The URL string pointing to the resource - */ - uri: string; - } - } - - /** - * (Optional) The tool configuration to create the turn with, will be used to - * override the agent's tool_config. - */ - export interface ToolConfig { - /** - * (Optional) Config for how to override the default system prompt. - - * `SystemMessageBehavior.append`: Appends the provided system message to the - * default system prompt. - `SystemMessageBehavior.replace`: Replaces the default - * system prompt with the provided system message. The system message can include - * the string '{{function_definitions}}' to indicate where the function definitions - * should be inserted. - */ - system_message_behavior?: 'append' | 'replace'; - - /** - * (Optional) Whether tool use is automatic, required, or none. Can also specify a - * tool name to use a specific tool. Defaults to ToolChoice.auto. - */ - tool_choice?: 'auto' | 'required' | 'none' | (string & {}); - - /** - * (Optional) Instructs the model how to format tool calls. By default, Llama Stack - * will attempt to use a format that is best adapted to the model. - - * `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. - - * `ToolPromptFormat.function_tag`: The tool calls are enclosed in a - * tag. - `ToolPromptFormat.python_list`: The tool calls - * are output as Python syntax -- a list of function calls. - */ - tool_prompt_format?: 'json' | 'function_tag' | 'python_list'; - } - - export interface AgentToolGroupWithArgs { - args: { [key: string]: boolean | number | string | Array | unknown | null }; - - name: string; - } - - export type TurnCreateParamsNonStreaming = TurnAPI.TurnCreateParamsNonStreaming; - export type TurnCreateParamsStreaming = TurnAPI.TurnCreateParamsStreaming; -} - -export interface TurnCreateParamsNonStreaming extends TurnCreateParamsBase { - /** - * (Optional) If True, generate an SSE event stream of the response. Defaults to - * False. - */ - stream?: false; -} - -export interface TurnCreateParamsStreaming extends TurnCreateParamsBase { - /** - * (Optional) If True, generate an SSE event stream of the response. Defaults to - * False. - */ - stream: true; -} - -export type TurnResumeParams = TurnResumeParamsNonStreaming | TurnResumeParamsStreaming; - -export interface TurnResumeParamsBase { - /** - * The tool call responses to resume the turn with. - */ - tool_responses: Array; - - /** - * Whether to stream the response. - */ - stream?: boolean; -} - -export namespace TurnResumeParams { - export type TurnResumeParamsNonStreaming = TurnAPI.TurnResumeParamsNonStreaming; - export type TurnResumeParamsStreaming = TurnAPI.TurnResumeParamsStreaming; -} - -export interface TurnResumeParamsNonStreaming extends TurnResumeParamsBase { - /** - * Whether to stream the response. - */ - stream?: false; -} - -export interface TurnResumeParamsStreaming extends TurnResumeParamsBase { - /** - * Whether to stream the response. - */ - stream: true; -} - -export declare namespace TurnResource { - export { - type AgentTurnResponseStreamChunk as AgentTurnResponseStreamChunk, - type Turn as Turn, - type TurnResponseEvent as TurnResponseEvent, - type TurnResponseEventPayload as TurnResponseEventPayload, - type TurnCreateParams as TurnCreateParams, - type TurnCreateParamsNonStreaming as TurnCreateParamsNonStreaming, - type TurnCreateParamsStreaming as TurnCreateParamsStreaming, - type TurnResumeParams as TurnResumeParams, - type TurnResumeParamsNonStreaming as TurnResumeParamsNonStreaming, - type TurnResumeParamsStreaming as TurnResumeParamsStreaming, - }; -} diff --git a/src/resources/alpha.ts b/src/resources/alpha.ts new file mode 100644 index 0000000..446b643 --- /dev/null +++ b/src/resources/alpha.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './alpha/index'; diff --git a/src/resources/agents.ts b/src/resources/alpha/agents.ts similarity index 100% rename from src/resources/agents.ts rename to src/resources/alpha/agents.ts diff --git a/src/resources/alpha/agents/agents.ts b/src/resources/alpha/agents/agents.ts new file mode 100644 index 0000000..4732cd5 --- /dev/null +++ b/src/resources/alpha/agents/agents.ts @@ -0,0 +1,27 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import * as SessionAPI from './session'; +import { Session } from './session'; +import * as StepsAPI from './steps'; +import { Steps } from './steps'; +import * as TurnAPI from './turn'; +import { Turn } from './turn'; + +export class Agents extends APIResource { + session: SessionAPI.Session = new SessionAPI.Session(this._client); + steps: StepsAPI.Steps = new StepsAPI.Steps(this._client); + turn: TurnAPI.Turn = new TurnAPI.Turn(this._client); +} + +Agents.Session = Session; +Agents.Steps = Steps; +Agents.Turn = Turn; + +export declare namespace Agents { + export { Session as Session }; + + export { Steps as Steps }; + + export { Turn as Turn }; +} diff --git a/src/resources/alpha/agents/index.ts b/src/resources/alpha/agents/index.ts new file mode 100644 index 0000000..d9edac6 --- /dev/null +++ b/src/resources/alpha/agents/index.ts @@ -0,0 +1,6 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { Agents } from './agents'; +export { Session } from './session'; +export { Steps } from './steps'; +export { Turn } from './turn'; diff --git a/src/resources/alpha/agents/session.ts b/src/resources/alpha/agents/session.ts new file mode 100644 index 0000000..9e1bdde --- /dev/null +++ b/src/resources/alpha/agents/session.ts @@ -0,0 +1,5 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; + +export class Session extends APIResource {} diff --git a/src/resources/alpha/agents/steps.ts b/src/resources/alpha/agents/steps.ts new file mode 100644 index 0000000..295f1f7 --- /dev/null +++ b/src/resources/alpha/agents/steps.ts @@ -0,0 +1,5 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; + +export class Steps extends APIResource {} diff --git a/src/resources/alpha/agents/turn.ts b/src/resources/alpha/agents/turn.ts new file mode 100644 index 0000000..3528209 --- /dev/null +++ b/src/resources/alpha/agents/turn.ts @@ -0,0 +1,5 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; + +export class Turn extends APIResource {} diff --git a/src/resources/alpha/alpha.ts b/src/resources/alpha/alpha.ts new file mode 100644 index 0000000..6bf1d0c --- /dev/null +++ b/src/resources/alpha/alpha.ts @@ -0,0 +1,39 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import * as BenchmarksAPI from './benchmarks'; +import { Benchmarks } from './benchmarks'; +import * as InferenceAPI from './inference'; +import { Inference } from './inference'; +import * as AgentsAPI from './agents/agents'; +import { Agents } from './agents/agents'; +import * as EvalAPI from './eval/eval'; +import { Eval } from './eval/eval'; +import * as PostTrainingAPI from './post-training/post-training'; +import { PostTraining } from './post-training/post-training'; + +export class Alpha extends APIResource { + inference: InferenceAPI.Inference = new InferenceAPI.Inference(this._client); + postTraining: PostTrainingAPI.PostTraining = new PostTrainingAPI.PostTraining(this._client); + benchmarks: BenchmarksAPI.Benchmarks = new BenchmarksAPI.Benchmarks(this._client); + eval: EvalAPI.Eval = new EvalAPI.Eval(this._client); + agents: AgentsAPI.Agents = new AgentsAPI.Agents(this._client); +} + +Alpha.Inference = Inference; +Alpha.PostTraining = PostTraining; +Alpha.Benchmarks = Benchmarks; +Alpha.Eval = Eval; +Alpha.Agents = Agents; + +export declare namespace Alpha { + export { Inference as Inference }; + + export { PostTraining as PostTraining }; + + export { Benchmarks as Benchmarks }; + + export { Eval as Eval }; + + export { Agents as Agents }; +} diff --git a/src/resources/alpha/benchmarks.ts b/src/resources/alpha/benchmarks.ts new file mode 100644 index 0000000..b194605 --- /dev/null +++ b/src/resources/alpha/benchmarks.ts @@ -0,0 +1,5 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; + +export class Benchmarks extends APIResource {} diff --git a/src/resources/eval.ts b/src/resources/alpha/eval.ts similarity index 100% rename from src/resources/eval.ts rename to src/resources/alpha/eval.ts diff --git a/src/resources/alpha/eval/eval.ts b/src/resources/alpha/eval/eval.ts new file mode 100644 index 0000000..d60adf6 --- /dev/null +++ b/src/resources/alpha/eval/eval.ts @@ -0,0 +1,15 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import * as JobsAPI from './jobs'; +import { Jobs } from './jobs'; + +export class Eval extends APIResource { + jobs: JobsAPI.Jobs = new JobsAPI.Jobs(this._client); +} + +Eval.Jobs = Jobs; + +export declare namespace Eval { + export { Jobs as Jobs }; +} diff --git a/src/resources/alpha/eval/index.ts b/src/resources/alpha/eval/index.ts new file mode 100644 index 0000000..0297aa2 --- /dev/null +++ b/src/resources/alpha/eval/index.ts @@ -0,0 +1,4 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { Eval } from './eval'; +export { Jobs } from './jobs'; diff --git a/src/resources/alpha/eval/jobs.ts b/src/resources/alpha/eval/jobs.ts new file mode 100644 index 0000000..c1b3622 --- /dev/null +++ b/src/resources/alpha/eval/jobs.ts @@ -0,0 +1,5 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; + +export class Jobs extends APIResource {} diff --git a/src/resources/alpha/index.ts b/src/resources/alpha/index.ts new file mode 100644 index 0000000..005e72a --- /dev/null +++ b/src/resources/alpha/index.ts @@ -0,0 +1,8 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { Agents } from './agents/index'; +export { Alpha } from './alpha'; +export { Benchmarks } from './benchmarks'; +export { Eval } from './eval/index'; +export { Inference } from './inference'; +export { PostTraining } from './post-training/index'; diff --git a/src/resources/alpha/inference.ts b/src/resources/alpha/inference.ts new file mode 100644 index 0000000..ecdd4e7 --- /dev/null +++ b/src/resources/alpha/inference.ts @@ -0,0 +1,5 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; + +export class Inference extends APIResource {} diff --git a/src/resources/post-training.ts b/src/resources/alpha/post-training.ts similarity index 100% rename from src/resources/post-training.ts rename to src/resources/alpha/post-training.ts diff --git a/src/resources/alpha/post-training/index.ts b/src/resources/alpha/post-training/index.ts new file mode 100644 index 0000000..55c125a --- /dev/null +++ b/src/resources/alpha/post-training/index.ts @@ -0,0 +1,4 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { Job } from './job'; +export { PostTraining } from './post-training'; diff --git a/src/resources/alpha/post-training/job.ts b/src/resources/alpha/post-training/job.ts new file mode 100644 index 0000000..0a019ac --- /dev/null +++ b/src/resources/alpha/post-training/job.ts @@ -0,0 +1,5 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; + +export class Job extends APIResource {} diff --git a/src/resources/alpha/post-training/post-training.ts b/src/resources/alpha/post-training/post-training.ts new file mode 100644 index 0000000..813ae6a --- /dev/null +++ b/src/resources/alpha/post-training/post-training.ts @@ -0,0 +1,15 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import * as JobAPI from './job'; +import { Job } from './job'; + +export class PostTraining extends APIResource { + job: JobAPI.Job = new JobAPI.Job(this._client); +} + +PostTraining.Job = Job; + +export declare namespace PostTraining { + export { Job as Job }; +} diff --git a/src/resources/benchmarks.ts b/src/resources/benchmarks.ts deleted file mode 100644 index b6b8363..0000000 --- a/src/resources/benchmarks.ts +++ /dev/null @@ -1,111 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../resource'; -import * as Core from '../core'; - -export class Benchmarks extends APIResource { - /** - * Get a benchmark by its ID. - */ - retrieve(benchmarkId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/v1/eval/benchmarks/${benchmarkId}`, options); - } - - /** - * List all benchmarks. - */ - list(options?: Core.RequestOptions): Core.APIPromise { - return ( - this._client.get('/v1/eval/benchmarks', options) as Core.APIPromise<{ data: BenchmarkListResponse }> - )._thenUnwrap((obj) => obj.data); - } - - /** - * Register a benchmark. - */ - register(body: BenchmarkRegisterParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.post('/v1/eval/benchmarks', { - body, - ...options, - headers: { Accept: '*/*', ...options?.headers }, - }); - } -} - -/** - * A benchmark resource for evaluating model performance. - */ -export interface Benchmark { - /** - * Identifier of the dataset to use for the benchmark evaluation - */ - dataset_id: string; - - identifier: string; - - /** - * Metadata for this evaluation task - */ - metadata: { [key: string]: boolean | number | string | Array | unknown | null }; - - provider_id: string; - - /** - * List of scoring function identifiers to apply during evaluation - */ - scoring_functions: Array; - - /** - * The resource type, always benchmark - */ - type: 'benchmark'; - - provider_resource_id?: string; -} - -export interface ListBenchmarksResponse { - data: BenchmarkListResponse; -} - -export type BenchmarkListResponse = Array; - -export interface BenchmarkRegisterParams { - /** - * The ID of the benchmark to register. - */ - benchmark_id: string; - - /** - * The ID of the dataset to use for the benchmark. - */ - dataset_id: string; - - /** - * The scoring functions to use for the benchmark. - */ - scoring_functions: Array; - - /** - * The metadata to use for the benchmark. - */ - metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * The ID of the provider benchmark to use for the benchmark. - */ - provider_benchmark_id?: string; - - /** - * The ID of the provider to use for the benchmark. - */ - provider_id?: string; -} - -export declare namespace Benchmarks { - export { - type Benchmark as Benchmark, - type ListBenchmarksResponse as ListBenchmarksResponse, - type BenchmarkListResponse as BenchmarkListResponse, - type BenchmarkRegisterParams as BenchmarkRegisterParams, - }; -} diff --git a/src/resources/beta.ts b/src/resources/beta.ts new file mode 100644 index 0000000..1542e94 --- /dev/null +++ b/src/resources/beta.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './beta/index'; diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts new file mode 100644 index 0000000..c579b07 --- /dev/null +++ b/src/resources/beta/beta.ts @@ -0,0 +1,15 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import * as DatasetsAPI from './datasets'; +import { Datasets } from './datasets'; + +export class Beta extends APIResource { + datasets: DatasetsAPI.Datasets = new DatasetsAPI.Datasets(this._client); +} + +Beta.Datasets = Datasets; + +export declare namespace Beta { + export { Datasets as Datasets }; +} diff --git a/src/resources/beta/datasets.ts b/src/resources/beta/datasets.ts new file mode 100644 index 0000000..06f918c --- /dev/null +++ b/src/resources/beta/datasets.ts @@ -0,0 +1,5 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; + +export class Datasets extends APIResource {} diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts new file mode 100644 index 0000000..e2acaeb --- /dev/null +++ b/src/resources/beta/index.ts @@ -0,0 +1,4 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { Beta } from './beta'; +export { Datasets } from './datasets'; diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index b43e6d3..cc0e0eb 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -46,6 +46,11 @@ export interface ChatCompletionChunk { * The object type, which will be "chat.completion.chunk" */ object: 'chat.completion.chunk'; + + /** + * Token usage information (typically included in final chunk with stream_options) + */ + usage?: ChatCompletionChunk.Usage; } export namespace ChatCompletionChunk { @@ -84,6 +89,11 @@ export namespace ChatCompletionChunk { */ content?: string; + /** + * (Optional) The reasoning content from the model (non-standard, for o1/o3 models) + */ + reasoning_content?: string; + /** * (Optional) The refusal of the delta */ @@ -217,6 +227,58 @@ export namespace ChatCompletionChunk { } } } + + /** + * Token usage information (typically included in final chunk with stream_options) + */ + export interface Usage { + /** + * Number of tokens in the completion + */ + completion_tokens: number; + + /** + * Number of tokens in the prompt + */ + prompt_tokens: number; + + /** + * Total tokens used (prompt + completion) + */ + total_tokens: number; + + /** + * Token details for output tokens in OpenAI chat completion usage. + */ + completion_tokens_details?: Usage.CompletionTokensDetails; + + /** + * Token details for prompt tokens in OpenAI chat completion usage. + */ + prompt_tokens_details?: Usage.PromptTokensDetails; + } + + export namespace Usage { + /** + * Token details for output tokens in OpenAI chat completion usage. + */ + export interface CompletionTokensDetails { + /** + * Number of tokens used for reasoning (o1/o3 models) + */ + reasoning_tokens?: number; + } + + /** + * Token details for prompt tokens in OpenAI chat completion usage. + */ + export interface PromptTokensDetails { + /** + * Number of tokens retrieved from cache + */ + cached_tokens?: number; + } + } } Chat.Completions = Completions; diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index c7ed5e8..7c8f133 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -11,8 +11,8 @@ import { Stream } from '../../streaming'; export class Completions extends APIResource { /** - * Generate an OpenAI-compatible chat completion for the given messages using the - * specified model. + * Create chat completions. Generate an OpenAI-compatible chat completion for the + * given messages using the specified model. */ create( body: CompletionCreateParamsNonStreaming, @@ -30,22 +30,20 @@ export class Completions extends APIResource { body: CompletionCreateParams, options?: Core.RequestOptions, ): APIPromise | APIPromise> { - return this._client.post('/v1/openai/v1/chat/completions', { - body, - ...options, - stream: body.stream ?? false, - }) as APIPromise | APIPromise>; + return this._client.post('/v1/chat/completions', { body, ...options, stream: body.stream ?? false }) as + | APIPromise + | APIPromise>; } /** - * Describe a chat completion by its ID. + * Get chat completion. Describe a chat completion by its ID. */ retrieve(completionId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/v1/openai/v1/chat/completions/${completionId}`, options); + return this._client.get(`/v1/chat/completions/${completionId}`, options); } /** - * List all chat completions. + * List chat completions. */ list( query?: CompletionListParams, @@ -61,11 +59,10 @@ export class Completions extends APIResource { if (isRequestOptions(query)) { return this.list({}, query); } - return this._client.getAPIList( - '/v1/openai/v1/chat/completions', - CompletionListResponsesOpenAICursorPage, - { query, ...options }, - ); + return this._client.getAPIList('/v1/chat/completions', CompletionListResponsesOpenAICursorPage, { + query, + ...options, + }); } } @@ -107,6 +104,11 @@ export namespace CompletionCreateResponse { * The object type, which will be "chat.completion" */ object: 'chat.completion'; + + /** + * Token usage information for the completion + */ + usage?: OpenAIChatCompletion.Usage; } export namespace OpenAIChatCompletion { @@ -504,6 +506,58 @@ export namespace CompletionCreateResponse { } } } + + /** + * Token usage information for the completion + */ + export interface Usage { + /** + * Number of tokens in the completion + */ + completion_tokens: number; + + /** + * Number of tokens in the prompt + */ + prompt_tokens: number; + + /** + * Total tokens used (prompt + completion) + */ + total_tokens: number; + + /** + * Token details for output tokens in OpenAI chat completion usage. + */ + completion_tokens_details?: Usage.CompletionTokensDetails; + + /** + * Token details for prompt tokens in OpenAI chat completion usage. + */ + prompt_tokens_details?: Usage.PromptTokensDetails; + } + + export namespace Usage { + /** + * Token details for output tokens in OpenAI chat completion usage. + */ + export interface CompletionTokensDetails { + /** + * Number of tokens used for reasoning (o1/o3 models) + */ + reasoning_tokens?: number; + } + + /** + * Token details for prompt tokens in OpenAI chat completion usage. + */ + export interface PromptTokensDetails { + /** + * Number of tokens retrieved from cache + */ + cached_tokens?: number; + } + } } } @@ -540,6 +594,11 @@ export interface CompletionRetrieveResponse { * The object type, which will be "chat.completion" */ object: 'chat.completion'; + + /** + * Token usage information for the completion + */ + usage?: CompletionRetrieveResponse.Usage; } export namespace CompletionRetrieveResponse { @@ -1227,6 +1286,58 @@ export namespace CompletionRetrieveResponse { type: 'text'; } } + + /** + * Token usage information for the completion + */ + export interface Usage { + /** + * Number of tokens in the completion + */ + completion_tokens: number; + + /** + * Number of tokens in the prompt + */ + prompt_tokens: number; + + /** + * Total tokens used (prompt + completion) + */ + total_tokens: number; + + /** + * Token details for output tokens in OpenAI chat completion usage. + */ + completion_tokens_details?: Usage.CompletionTokensDetails; + + /** + * Token details for prompt tokens in OpenAI chat completion usage. + */ + prompt_tokens_details?: Usage.PromptTokensDetails; + } + + export namespace Usage { + /** + * Token details for output tokens in OpenAI chat completion usage. + */ + export interface CompletionTokensDetails { + /** + * Number of tokens used for reasoning (o1/o3 models) + */ + reasoning_tokens?: number; + } + + /** + * Token details for prompt tokens in OpenAI chat completion usage. + */ + export interface PromptTokensDetails { + /** + * Number of tokens retrieved from cache + */ + cached_tokens?: number; + } + } } export interface CompletionListResponse { @@ -1262,6 +1373,11 @@ export interface CompletionListResponse { * The object type, which will be "chat.completion" */ object: 'chat.completion'; + + /** + * Token usage information for the completion + */ + usage?: CompletionListResponse.Usage; } export namespace CompletionListResponse { @@ -1949,6 +2065,58 @@ export namespace CompletionListResponse { type: 'text'; } } + + /** + * Token usage information for the completion + */ + export interface Usage { + /** + * Number of tokens in the completion + */ + completion_tokens: number; + + /** + * Number of tokens in the prompt + */ + prompt_tokens: number; + + /** + * Total tokens used (prompt + completion) + */ + total_tokens: number; + + /** + * Token details for output tokens in OpenAI chat completion usage. + */ + completion_tokens_details?: Usage.CompletionTokensDetails; + + /** + * Token details for prompt tokens in OpenAI chat completion usage. + */ + prompt_tokens_details?: Usage.PromptTokensDetails; + } + + export namespace Usage { + /** + * Token details for output tokens in OpenAI chat completion usage. + */ + export interface CompletionTokensDetails { + /** + * Number of tokens used for reasoning (o1/o3 models) + */ + reasoning_tokens?: number; + } + + /** + * Token details for prompt tokens in OpenAI chat completion usage. + */ + export interface PromptTokensDetails { + /** + * Number of tokens retrieved from cache + */ + cached_tokens?: number; + } + } } export type CompletionCreateParams = CompletionCreateParamsNonStreaming | CompletionCreateParamsStreaming; diff --git a/src/resources/completions.ts b/src/resources/completions.ts index 0ade7ab..fe49a25 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -8,8 +8,8 @@ import { Stream } from '../streaming'; export class Completions extends APIResource { /** - * Generate an OpenAI-compatible completion for the given prompt using the - * specified model. + * Create completion. Generate an OpenAI-compatible completion for the given prompt + * using the specified model. */ create( body: CompletionCreateParamsNonStreaming, @@ -27,11 +27,9 @@ export class Completions extends APIResource { body: CompletionCreateParams, options?: Core.RequestOptions, ): APIPromise | APIPromise> { - return this._client.post('/v1/openai/v1/completions', { - body, - ...options, - stream: body.stream ?? false, - }) as APIPromise | APIPromise>; + return this._client.post('/v1/completions', { body, ...options, stream: body.stream ?? false }) as + | APIPromise + | APIPromise>; } } @@ -174,8 +172,6 @@ export interface CompletionCreateParamsBase { */ frequency_penalty?: number; - guided_choice?: Array; - /** * (Optional) The logit bias to use. */ @@ -201,8 +197,6 @@ export interface CompletionCreateParamsBase { */ presence_penalty?: number; - prompt_logprobs?: number; - /** * (Optional) The seed to use. */ diff --git a/src/resources/conversations.ts b/src/resources/conversations.ts new file mode 100644 index 0000000..6b50950 --- /dev/null +++ b/src/resources/conversations.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './conversations/index'; diff --git a/src/resources/conversations/conversations.ts b/src/resources/conversations/conversations.ts new file mode 100644 index 0000000..0465dec --- /dev/null +++ b/src/resources/conversations/conversations.ts @@ -0,0 +1,585 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import * as Core from '../../core'; +import * as ItemsAPI from './items'; +import { + ItemCreateParams, + ItemCreateResponse, + ItemGetResponse, + ItemListParams, + ItemListResponse, + ItemListResponsesOpenAICursorPage, + Items, +} from './items'; + +export class Conversations extends APIResource { + items: ItemsAPI.Items = new ItemsAPI.Items(this._client); + + /** + * Create a conversation. Create a conversation. + */ + create(body: ConversationCreateParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post('/v1/conversations', { body, ...options }); + } + + /** + * Retrieve a conversation. Get a conversation with the given ID. + */ + retrieve(conversationId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/v1/conversations/${conversationId}`, options); + } + + /** + * Update a conversation. Update a conversation's metadata with the given ID. + */ + update( + conversationId: string, + body: ConversationUpdateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/v1/conversations/${conversationId}`, { body, ...options }); + } + + /** + * Delete a conversation. Delete a conversation with the given ID. + */ + delete(conversationId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/v1/conversations/${conversationId}`, options); + } +} + +/** + * OpenAI-compatible conversation object. + */ +export interface ConversationObject { + id: string; + + created_at: number; + + object: 'conversation'; + + items?: Array; + + metadata?: { [key: string]: string }; +} + +/** + * Response for deleted conversation. + */ +export interface ConversationDeleteResponse { + id: string; + + deleted: boolean; + + object: string; +} + +export interface ConversationCreateParams { + /** + * Initial items to include in the conversation context. + */ + items?: Array< + | ConversationCreateParams.OpenAIResponseMessage + | ConversationCreateParams.OpenAIResponseOutputMessageWebSearchToolCall + | ConversationCreateParams.OpenAIResponseOutputMessageFileSearchToolCall + | ConversationCreateParams.OpenAIResponseOutputMessageFunctionToolCall + | ConversationCreateParams.OpenAIResponseInputFunctionToolCallOutput + | ConversationCreateParams.OpenAIResponseMcpApprovalRequest + | ConversationCreateParams.OpenAIResponseMcpApprovalResponse + | ConversationCreateParams.OpenAIResponseOutputMessageMcpCall + | ConversationCreateParams.OpenAIResponseOutputMessageMcpListTools + >; + + /** + * Set of key-value pairs that can be attached to an object. + */ + metadata?: { [key: string]: string }; +} + +export namespace ConversationCreateParams { + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile + > + | Array< + | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText + | OpenAIResponseMessage.OpenAIResponseContentPartRefusal + >; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; + } + + export namespace OpenAIResponseMessage { + /** + * Text content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentText { + /** + * The text content of the input message + */ + text: string; + + /** + * Content type identifier, always "input_text" + */ + type: 'input_text'; + } + + /** + * Image content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentImage { + /** + * Level of detail for image processing, can be "low", "high", or "auto" + */ + detail: 'low' | 'high' | 'auto'; + + /** + * Content type identifier, always "input_image" + */ + type: 'input_image'; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * (Optional) URL of the image content + */ + image_url?: string; + } + + /** + * File content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentFile { + /** + * The type of the input item. Always `input_file`. + */ + type: 'input_file'; + + /** + * The data of the file to be sent to the model. + */ + file_data?: string; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * The URL of the file to be sent to the model. + */ + file_url?: string; + + /** + * The name of the file to be sent to the model. + */ + filename?: string; + } + + export interface OpenAIResponseOutputMessageContentOutputText { + annotations: Array< + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace OpenAIResponseOutputMessageContentOutputText { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { + /** + * Unique identifier of the referenced file + */ + file_id: string; + + /** + * Name of the referenced file + */ + filename: string; + + /** + * Position index of the citation within the content + */ + index: number; + + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; + } + + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; + + /** + * Start position of the citation span in the content + */ + start_index: number; + + /** + * Title of the referenced web resource + */ + title: string; + + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; + + /** + * URL of the referenced web resource + */ + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + + /** + * Refusal content within a streamed response part. + */ + export interface OpenAIResponseContentPartRefusal { + /** + * Refusal text supplied by the model + */ + refusal: string; + + /** + * Content part type identifier, always "refusal" + */ + type: 'refusal'; + } + } + + /** + * Web search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageWebSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * Current status of the web search operation + */ + status: string; + + /** + * Tool call type identifier, always "web_search_call" + */ + type: 'web_search_call'; + } + + /** + * File search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * List of search queries executed + */ + queries: Array; + + /** + * Current status of the file search operation + */ + status: string; + + /** + * Tool call type identifier, always "file_search_call" + */ + type: 'file_search_call'; + + /** + * (Optional) Search results returned by the file search operation + */ + results?: Array; + } + + export namespace OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Search results returned by the file search operation. + */ + export interface Result { + /** + * (Optional) Key-value attributes associated with the file + */ + attributes: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Unique identifier of the file containing the result + */ + file_id: string; + + /** + * Name of the file containing the result + */ + filename: string; + + /** + * Relevance score for this search result (between 0 and 1) + */ + score: number; + + /** + * Text content of the search result + */ + text: string; + } + } + + /** + * Function tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFunctionToolCall { + /** + * JSON string containing the function arguments + */ + arguments: string; + + /** + * Unique identifier for the function call + */ + call_id: string; + + /** + * Name of the function being called + */ + name: string; + + /** + * Tool call type identifier, always "function_call" + */ + type: 'function_call'; + + /** + * (Optional) Additional identifier for the tool call + */ + id?: string; + + /** + * (Optional) Current status of the function call execution + */ + status?: string; + } + + /** + * This represents the output of a function call that gets passed back to the + * model. + */ + export interface OpenAIResponseInputFunctionToolCallOutput { + call_id: string; + + output: string; + + type: 'function_call_output'; + + id?: string; + + status?: string; + } + + /** + * A request for human approval of a tool invocation. + */ + export interface OpenAIResponseMcpApprovalRequest { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_approval_request'; + } + + /** + * A response to an MCP approval request. + */ + export interface OpenAIResponseMcpApprovalResponse { + approval_request_id: string; + + approve: boolean; + + type: 'mcp_approval_response'; + + id?: string; + + reason?: string; + } + + /** + * Model Context Protocol (MCP) call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageMcpCall { + /** + * Unique identifier for this MCP call + */ + id: string; + + /** + * JSON string containing the MCP call arguments + */ + arguments: string; + + /** + * Name of the MCP method being called + */ + name: string; + + /** + * Label identifying the MCP server handling the call + */ + server_label: string; + + /** + * Tool call type identifier, always "mcp_call" + */ + type: 'mcp_call'; + + /** + * (Optional) Error message if the MCP call failed + */ + error?: string; + + /** + * (Optional) Output result from the successful MCP call + */ + output?: string; + } + + /** + * MCP list tools output message containing available tools from an MCP server. + */ + export interface OpenAIResponseOutputMessageMcpListTools { + /** + * Unique identifier for this MCP list tools operation + */ + id: string; + + /** + * Label identifying the MCP server providing the tools + */ + server_label: string; + + /** + * List of available tools provided by the MCP server + */ + tools: Array; + + /** + * Tool call type identifier, always "mcp_list_tools" + */ + type: 'mcp_list_tools'; + } + + export namespace OpenAIResponseOutputMessageMcpListTools { + /** + * Tool definition returned by MCP list tools operation. + */ + export interface Tool { + /** + * JSON schema defining the tool's input parameters + */ + input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Name of the tool + */ + name: string; + + /** + * (Optional) Description of what the tool does + */ + description?: string; + } + } +} + +export interface ConversationUpdateParams { + /** + * Set of key-value pairs that can be attached to an object. + */ + metadata: { [key: string]: string }; +} + +Conversations.Items = Items; +Conversations.ItemListResponsesOpenAICursorPage = ItemListResponsesOpenAICursorPage; + +export declare namespace Conversations { + export { + type ConversationObject as ConversationObject, + type ConversationDeleteResponse as ConversationDeleteResponse, + type ConversationCreateParams as ConversationCreateParams, + type ConversationUpdateParams as ConversationUpdateParams, + }; + + export { + Items as Items, + type ItemCreateResponse as ItemCreateResponse, + type ItemListResponse as ItemListResponse, + type ItemGetResponse as ItemGetResponse, + ItemListResponsesOpenAICursorPage as ItemListResponsesOpenAICursorPage, + type ItemCreateParams as ItemCreateParams, + type ItemListParams as ItemListParams, + }; +} diff --git a/src/resources/conversations/index.ts b/src/resources/conversations/index.ts new file mode 100644 index 0000000..de33b78 --- /dev/null +++ b/src/resources/conversations/index.ts @@ -0,0 +1,18 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + Conversations, + type ConversationObject, + type ConversationDeleteResponse, + type ConversationCreateParams, + type ConversationUpdateParams, +} from './conversations'; +export { + ItemListResponsesOpenAICursorPage, + Items, + type ItemCreateResponse, + type ItemListResponse, + type ItemGetResponse, + type ItemCreateParams, + type ItemListParams, +} from './items'; diff --git a/src/resources/conversations/items.ts b/src/resources/conversations/items.ts new file mode 100644 index 0000000..6c2ae87 --- /dev/null +++ b/src/resources/conversations/items.ts @@ -0,0 +1,1998 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; +import { OpenAICursorPage, type OpenAICursorPageParams } from '../../pagination'; + +export class Items extends APIResource { + /** + * Create items. Create items in the conversation. + */ + create( + conversationId: string, + body: ItemCreateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/v1/conversations/${conversationId}/items`, { body, ...options }); + } + + /** + * List items. List items in the conversation. + */ + list( + conversationId: string, + query?: ItemListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + conversationId: string, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + conversationId: string, + query: ItemListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list(conversationId, {}, query); + } + return this._client.getAPIList( + `/v1/conversations/${conversationId}/items`, + ItemListResponsesOpenAICursorPage, + { query, ...options }, + ); + } + + /** + * Retrieve an item. Retrieve a conversation item. + */ + get( + conversationId: string, + itemId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.get(`/v1/conversations/${conversationId}/items/${itemId}`, options); + } +} + +export class ItemListResponsesOpenAICursorPage extends OpenAICursorPage {} + +/** + * List of conversation items with pagination. + */ +export interface ItemCreateResponse { + data: Array< + | ItemCreateResponse.OpenAIResponseMessage + | ItemCreateResponse.OpenAIResponseOutputMessageWebSearchToolCall + | ItemCreateResponse.OpenAIResponseOutputMessageFileSearchToolCall + | ItemCreateResponse.OpenAIResponseOutputMessageFunctionToolCall + | ItemCreateResponse.OpenAIResponseInputFunctionToolCallOutput + | ItemCreateResponse.OpenAIResponseMcpApprovalRequest + | ItemCreateResponse.OpenAIResponseMcpApprovalResponse + | ItemCreateResponse.OpenAIResponseOutputMessageMcpCall + | ItemCreateResponse.OpenAIResponseOutputMessageMcpListTools + >; + + has_more: boolean; + + object: string; + + first_id?: string; + + last_id?: string; +} + +export namespace ItemCreateResponse { + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile + > + | Array< + | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText + | OpenAIResponseMessage.OpenAIResponseContentPartRefusal + >; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; + } + + export namespace OpenAIResponseMessage { + /** + * Text content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentText { + /** + * The text content of the input message + */ + text: string; + + /** + * Content type identifier, always "input_text" + */ + type: 'input_text'; + } + + /** + * Image content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentImage { + /** + * Level of detail for image processing, can be "low", "high", or "auto" + */ + detail: 'low' | 'high' | 'auto'; + + /** + * Content type identifier, always "input_image" + */ + type: 'input_image'; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * (Optional) URL of the image content + */ + image_url?: string; + } + + /** + * File content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentFile { + /** + * The type of the input item. Always `input_file`. + */ + type: 'input_file'; + + /** + * The data of the file to be sent to the model. + */ + file_data?: string; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * The URL of the file to be sent to the model. + */ + file_url?: string; + + /** + * The name of the file to be sent to the model. + */ + filename?: string; + } + + export interface OpenAIResponseOutputMessageContentOutputText { + annotations: Array< + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace OpenAIResponseOutputMessageContentOutputText { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { + /** + * Unique identifier of the referenced file + */ + file_id: string; + + /** + * Name of the referenced file + */ + filename: string; + + /** + * Position index of the citation within the content + */ + index: number; + + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; + } + + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; + + /** + * Start position of the citation span in the content + */ + start_index: number; + + /** + * Title of the referenced web resource + */ + title: string; + + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; + + /** + * URL of the referenced web resource + */ + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + + /** + * Refusal content within a streamed response part. + */ + export interface OpenAIResponseContentPartRefusal { + /** + * Refusal text supplied by the model + */ + refusal: string; + + /** + * Content part type identifier, always "refusal" + */ + type: 'refusal'; + } + } + + /** + * Web search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageWebSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * Current status of the web search operation + */ + status: string; + + /** + * Tool call type identifier, always "web_search_call" + */ + type: 'web_search_call'; + } + + /** + * File search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * List of search queries executed + */ + queries: Array; + + /** + * Current status of the file search operation + */ + status: string; + + /** + * Tool call type identifier, always "file_search_call" + */ + type: 'file_search_call'; + + /** + * (Optional) Search results returned by the file search operation + */ + results?: Array; + } + + export namespace OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Search results returned by the file search operation. + */ + export interface Result { + /** + * (Optional) Key-value attributes associated with the file + */ + attributes: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Unique identifier of the file containing the result + */ + file_id: string; + + /** + * Name of the file containing the result + */ + filename: string; + + /** + * Relevance score for this search result (between 0 and 1) + */ + score: number; + + /** + * Text content of the search result + */ + text: string; + } + } + + /** + * Function tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFunctionToolCall { + /** + * JSON string containing the function arguments + */ + arguments: string; + + /** + * Unique identifier for the function call + */ + call_id: string; + + /** + * Name of the function being called + */ + name: string; + + /** + * Tool call type identifier, always "function_call" + */ + type: 'function_call'; + + /** + * (Optional) Additional identifier for the tool call + */ + id?: string; + + /** + * (Optional) Current status of the function call execution + */ + status?: string; + } + + /** + * This represents the output of a function call that gets passed back to the + * model. + */ + export interface OpenAIResponseInputFunctionToolCallOutput { + call_id: string; + + output: string; + + type: 'function_call_output'; + + id?: string; + + status?: string; + } + + /** + * A request for human approval of a tool invocation. + */ + export interface OpenAIResponseMcpApprovalRequest { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_approval_request'; + } + + /** + * A response to an MCP approval request. + */ + export interface OpenAIResponseMcpApprovalResponse { + approval_request_id: string; + + approve: boolean; + + type: 'mcp_approval_response'; + + id?: string; + + reason?: string; + } + + /** + * Model Context Protocol (MCP) call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageMcpCall { + /** + * Unique identifier for this MCP call + */ + id: string; + + /** + * JSON string containing the MCP call arguments + */ + arguments: string; + + /** + * Name of the MCP method being called + */ + name: string; + + /** + * Label identifying the MCP server handling the call + */ + server_label: string; + + /** + * Tool call type identifier, always "mcp_call" + */ + type: 'mcp_call'; + + /** + * (Optional) Error message if the MCP call failed + */ + error?: string; + + /** + * (Optional) Output result from the successful MCP call + */ + output?: string; + } + + /** + * MCP list tools output message containing available tools from an MCP server. + */ + export interface OpenAIResponseOutputMessageMcpListTools { + /** + * Unique identifier for this MCP list tools operation + */ + id: string; + + /** + * Label identifying the MCP server providing the tools + */ + server_label: string; + + /** + * List of available tools provided by the MCP server + */ + tools: Array; + + /** + * Tool call type identifier, always "mcp_list_tools" + */ + type: 'mcp_list_tools'; + } + + export namespace OpenAIResponseOutputMessageMcpListTools { + /** + * Tool definition returned by MCP list tools operation. + */ + export interface Tool { + /** + * JSON schema defining the tool's input parameters + */ + input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Name of the tool + */ + name: string; + + /** + * (Optional) Description of what the tool does + */ + description?: string; + } + } +} + +/** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ +export type ItemListResponse = + | ItemListResponse.OpenAIResponseMessage + | ItemListResponse.OpenAIResponseOutputMessageWebSearchToolCall + | ItemListResponse.OpenAIResponseOutputMessageFileSearchToolCall + | ItemListResponse.OpenAIResponseOutputMessageFunctionToolCall + | ItemListResponse.OpenAIResponseInputFunctionToolCallOutput + | ItemListResponse.OpenAIResponseMcpApprovalRequest + | ItemListResponse.OpenAIResponseMcpApprovalResponse + | ItemListResponse.OpenAIResponseOutputMessageMcpCall + | ItemListResponse.OpenAIResponseOutputMessageMcpListTools; + +export namespace ItemListResponse { + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile + > + | Array< + | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText + | OpenAIResponseMessage.OpenAIResponseContentPartRefusal + >; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; + } + + export namespace OpenAIResponseMessage { + /** + * Text content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentText { + /** + * The text content of the input message + */ + text: string; + + /** + * Content type identifier, always "input_text" + */ + type: 'input_text'; + } + + /** + * Image content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentImage { + /** + * Level of detail for image processing, can be "low", "high", or "auto" + */ + detail: 'low' | 'high' | 'auto'; + + /** + * Content type identifier, always "input_image" + */ + type: 'input_image'; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * (Optional) URL of the image content + */ + image_url?: string; + } + + /** + * File content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentFile { + /** + * The type of the input item. Always `input_file`. + */ + type: 'input_file'; + + /** + * The data of the file to be sent to the model. + */ + file_data?: string; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * The URL of the file to be sent to the model. + */ + file_url?: string; + + /** + * The name of the file to be sent to the model. + */ + filename?: string; + } + + export interface OpenAIResponseOutputMessageContentOutputText { + annotations: Array< + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace OpenAIResponseOutputMessageContentOutputText { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { + /** + * Unique identifier of the referenced file + */ + file_id: string; + + /** + * Name of the referenced file + */ + filename: string; + + /** + * Position index of the citation within the content + */ + index: number; + + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; + } + + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; + + /** + * Start position of the citation span in the content + */ + start_index: number; + + /** + * Title of the referenced web resource + */ + title: string; + + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; + + /** + * URL of the referenced web resource + */ + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + + /** + * Refusal content within a streamed response part. + */ + export interface OpenAIResponseContentPartRefusal { + /** + * Refusal text supplied by the model + */ + refusal: string; + + /** + * Content part type identifier, always "refusal" + */ + type: 'refusal'; + } + } + + /** + * Web search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageWebSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * Current status of the web search operation + */ + status: string; + + /** + * Tool call type identifier, always "web_search_call" + */ + type: 'web_search_call'; + } + + /** + * File search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * List of search queries executed + */ + queries: Array; + + /** + * Current status of the file search operation + */ + status: string; + + /** + * Tool call type identifier, always "file_search_call" + */ + type: 'file_search_call'; + + /** + * (Optional) Search results returned by the file search operation + */ + results?: Array; + } + + export namespace OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Search results returned by the file search operation. + */ + export interface Result { + /** + * (Optional) Key-value attributes associated with the file + */ + attributes: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Unique identifier of the file containing the result + */ + file_id: string; + + /** + * Name of the file containing the result + */ + filename: string; + + /** + * Relevance score for this search result (between 0 and 1) + */ + score: number; + + /** + * Text content of the search result + */ + text: string; + } + } + + /** + * Function tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFunctionToolCall { + /** + * JSON string containing the function arguments + */ + arguments: string; + + /** + * Unique identifier for the function call + */ + call_id: string; + + /** + * Name of the function being called + */ + name: string; + + /** + * Tool call type identifier, always "function_call" + */ + type: 'function_call'; + + /** + * (Optional) Additional identifier for the tool call + */ + id?: string; + + /** + * (Optional) Current status of the function call execution + */ + status?: string; + } + + /** + * This represents the output of a function call that gets passed back to the + * model. + */ + export interface OpenAIResponseInputFunctionToolCallOutput { + call_id: string; + + output: string; + + type: 'function_call_output'; + + id?: string; + + status?: string; + } + + /** + * A request for human approval of a tool invocation. + */ + export interface OpenAIResponseMcpApprovalRequest { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_approval_request'; + } + + /** + * A response to an MCP approval request. + */ + export interface OpenAIResponseMcpApprovalResponse { + approval_request_id: string; + + approve: boolean; + + type: 'mcp_approval_response'; + + id?: string; + + reason?: string; + } + + /** + * Model Context Protocol (MCP) call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageMcpCall { + /** + * Unique identifier for this MCP call + */ + id: string; + + /** + * JSON string containing the MCP call arguments + */ + arguments: string; + + /** + * Name of the MCP method being called + */ + name: string; + + /** + * Label identifying the MCP server handling the call + */ + server_label: string; + + /** + * Tool call type identifier, always "mcp_call" + */ + type: 'mcp_call'; + + /** + * (Optional) Error message if the MCP call failed + */ + error?: string; + + /** + * (Optional) Output result from the successful MCP call + */ + output?: string; + } + + /** + * MCP list tools output message containing available tools from an MCP server. + */ + export interface OpenAIResponseOutputMessageMcpListTools { + /** + * Unique identifier for this MCP list tools operation + */ + id: string; + + /** + * Label identifying the MCP server providing the tools + */ + server_label: string; + + /** + * List of available tools provided by the MCP server + */ + tools: Array; + + /** + * Tool call type identifier, always "mcp_list_tools" + */ + type: 'mcp_list_tools'; + } + + export namespace OpenAIResponseOutputMessageMcpListTools { + /** + * Tool definition returned by MCP list tools operation. + */ + export interface Tool { + /** + * JSON schema defining the tool's input parameters + */ + input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Name of the tool + */ + name: string; + + /** + * (Optional) Description of what the tool does + */ + description?: string; + } + } +} + +/** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ +export type ItemGetResponse = + | ItemGetResponse.OpenAIResponseMessage + | ItemGetResponse.OpenAIResponseOutputMessageWebSearchToolCall + | ItemGetResponse.OpenAIResponseOutputMessageFileSearchToolCall + | ItemGetResponse.OpenAIResponseOutputMessageFunctionToolCall + | ItemGetResponse.OpenAIResponseInputFunctionToolCallOutput + | ItemGetResponse.OpenAIResponseMcpApprovalRequest + | ItemGetResponse.OpenAIResponseMcpApprovalResponse + | ItemGetResponse.OpenAIResponseOutputMessageMcpCall + | ItemGetResponse.OpenAIResponseOutputMessageMcpListTools; + +export namespace ItemGetResponse { + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile + > + | Array< + | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText + | OpenAIResponseMessage.OpenAIResponseContentPartRefusal + >; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; + } + + export namespace OpenAIResponseMessage { + /** + * Text content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentText { + /** + * The text content of the input message + */ + text: string; + + /** + * Content type identifier, always "input_text" + */ + type: 'input_text'; + } + + /** + * Image content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentImage { + /** + * Level of detail for image processing, can be "low", "high", or "auto" + */ + detail: 'low' | 'high' | 'auto'; + + /** + * Content type identifier, always "input_image" + */ + type: 'input_image'; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * (Optional) URL of the image content + */ + image_url?: string; + } + + /** + * File content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentFile { + /** + * The type of the input item. Always `input_file`. + */ + type: 'input_file'; + + /** + * The data of the file to be sent to the model. + */ + file_data?: string; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * The URL of the file to be sent to the model. + */ + file_url?: string; + + /** + * The name of the file to be sent to the model. + */ + filename?: string; + } + + export interface OpenAIResponseOutputMessageContentOutputText { + annotations: Array< + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace OpenAIResponseOutputMessageContentOutputText { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { + /** + * Unique identifier of the referenced file + */ + file_id: string; + + /** + * Name of the referenced file + */ + filename: string; + + /** + * Position index of the citation within the content + */ + index: number; + + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; + } + + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; + + /** + * Start position of the citation span in the content + */ + start_index: number; + + /** + * Title of the referenced web resource + */ + title: string; + + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; + + /** + * URL of the referenced web resource + */ + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + + /** + * Refusal content within a streamed response part. + */ + export interface OpenAIResponseContentPartRefusal { + /** + * Refusal text supplied by the model + */ + refusal: string; + + /** + * Content part type identifier, always "refusal" + */ + type: 'refusal'; + } + } + + /** + * Web search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageWebSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * Current status of the web search operation + */ + status: string; + + /** + * Tool call type identifier, always "web_search_call" + */ + type: 'web_search_call'; + } + + /** + * File search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * List of search queries executed + */ + queries: Array; + + /** + * Current status of the file search operation + */ + status: string; + + /** + * Tool call type identifier, always "file_search_call" + */ + type: 'file_search_call'; + + /** + * (Optional) Search results returned by the file search operation + */ + results?: Array; + } + + export namespace OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Search results returned by the file search operation. + */ + export interface Result { + /** + * (Optional) Key-value attributes associated with the file + */ + attributes: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Unique identifier of the file containing the result + */ + file_id: string; + + /** + * Name of the file containing the result + */ + filename: string; + + /** + * Relevance score for this search result (between 0 and 1) + */ + score: number; + + /** + * Text content of the search result + */ + text: string; + } + } + + /** + * Function tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFunctionToolCall { + /** + * JSON string containing the function arguments + */ + arguments: string; + + /** + * Unique identifier for the function call + */ + call_id: string; + + /** + * Name of the function being called + */ + name: string; + + /** + * Tool call type identifier, always "function_call" + */ + type: 'function_call'; + + /** + * (Optional) Additional identifier for the tool call + */ + id?: string; + + /** + * (Optional) Current status of the function call execution + */ + status?: string; + } + + /** + * This represents the output of a function call that gets passed back to the + * model. + */ + export interface OpenAIResponseInputFunctionToolCallOutput { + call_id: string; + + output: string; + + type: 'function_call_output'; + + id?: string; + + status?: string; + } + + /** + * A request for human approval of a tool invocation. + */ + export interface OpenAIResponseMcpApprovalRequest { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_approval_request'; + } + + /** + * A response to an MCP approval request. + */ + export interface OpenAIResponseMcpApprovalResponse { + approval_request_id: string; + + approve: boolean; + + type: 'mcp_approval_response'; + + id?: string; + + reason?: string; + } + + /** + * Model Context Protocol (MCP) call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageMcpCall { + /** + * Unique identifier for this MCP call + */ + id: string; + + /** + * JSON string containing the MCP call arguments + */ + arguments: string; + + /** + * Name of the MCP method being called + */ + name: string; + + /** + * Label identifying the MCP server handling the call + */ + server_label: string; + + /** + * Tool call type identifier, always "mcp_call" + */ + type: 'mcp_call'; + + /** + * (Optional) Error message if the MCP call failed + */ + error?: string; + + /** + * (Optional) Output result from the successful MCP call + */ + output?: string; + } + + /** + * MCP list tools output message containing available tools from an MCP server. + */ + export interface OpenAIResponseOutputMessageMcpListTools { + /** + * Unique identifier for this MCP list tools operation + */ + id: string; + + /** + * Label identifying the MCP server providing the tools + */ + server_label: string; + + /** + * List of available tools provided by the MCP server + */ + tools: Array; + + /** + * Tool call type identifier, always "mcp_list_tools" + */ + type: 'mcp_list_tools'; + } + + export namespace OpenAIResponseOutputMessageMcpListTools { + /** + * Tool definition returned by MCP list tools operation. + */ + export interface Tool { + /** + * JSON schema defining the tool's input parameters + */ + input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Name of the tool + */ + name: string; + + /** + * (Optional) Description of what the tool does + */ + description?: string; + } + } +} + +export interface ItemCreateParams { + /** + * Items to include in the conversation context. + */ + items: Array< + | ItemCreateParams.OpenAIResponseMessage + | ItemCreateParams.OpenAIResponseOutputMessageWebSearchToolCall + | ItemCreateParams.OpenAIResponseOutputMessageFileSearchToolCall + | ItemCreateParams.OpenAIResponseOutputMessageFunctionToolCall + | ItemCreateParams.OpenAIResponseInputFunctionToolCallOutput + | ItemCreateParams.OpenAIResponseMcpApprovalRequest + | ItemCreateParams.OpenAIResponseMcpApprovalResponse + | ItemCreateParams.OpenAIResponseOutputMessageMcpCall + | ItemCreateParams.OpenAIResponseOutputMessageMcpListTools + >; +} + +export namespace ItemCreateParams { + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile + > + | Array< + | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText + | OpenAIResponseMessage.OpenAIResponseContentPartRefusal + >; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; + } + + export namespace OpenAIResponseMessage { + /** + * Text content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentText { + /** + * The text content of the input message + */ + text: string; + + /** + * Content type identifier, always "input_text" + */ + type: 'input_text'; + } + + /** + * Image content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentImage { + /** + * Level of detail for image processing, can be "low", "high", or "auto" + */ + detail: 'low' | 'high' | 'auto'; + + /** + * Content type identifier, always "input_image" + */ + type: 'input_image'; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * (Optional) URL of the image content + */ + image_url?: string; + } + + /** + * File content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentFile { + /** + * The type of the input item. Always `input_file`. + */ + type: 'input_file'; + + /** + * The data of the file to be sent to the model. + */ + file_data?: string; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * The URL of the file to be sent to the model. + */ + file_url?: string; + + /** + * The name of the file to be sent to the model. + */ + filename?: string; + } + + export interface OpenAIResponseOutputMessageContentOutputText { + annotations: Array< + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace OpenAIResponseOutputMessageContentOutputText { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { + /** + * Unique identifier of the referenced file + */ + file_id: string; + + /** + * Name of the referenced file + */ + filename: string; + + /** + * Position index of the citation within the content + */ + index: number; + + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; + } + + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; + + /** + * Start position of the citation span in the content + */ + start_index: number; + + /** + * Title of the referenced web resource + */ + title: string; + + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; + + /** + * URL of the referenced web resource + */ + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + + /** + * Refusal content within a streamed response part. + */ + export interface OpenAIResponseContentPartRefusal { + /** + * Refusal text supplied by the model + */ + refusal: string; + + /** + * Content part type identifier, always "refusal" + */ + type: 'refusal'; + } + } + + /** + * Web search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageWebSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * Current status of the web search operation + */ + status: string; + + /** + * Tool call type identifier, always "web_search_call" + */ + type: 'web_search_call'; + } + + /** + * File search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * List of search queries executed + */ + queries: Array; + + /** + * Current status of the file search operation + */ + status: string; + + /** + * Tool call type identifier, always "file_search_call" + */ + type: 'file_search_call'; + + /** + * (Optional) Search results returned by the file search operation + */ + results?: Array; + } + + export namespace OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Search results returned by the file search operation. + */ + export interface Result { + /** + * (Optional) Key-value attributes associated with the file + */ + attributes: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Unique identifier of the file containing the result + */ + file_id: string; + + /** + * Name of the file containing the result + */ + filename: string; + + /** + * Relevance score for this search result (between 0 and 1) + */ + score: number; + + /** + * Text content of the search result + */ + text: string; + } + } + + /** + * Function tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFunctionToolCall { + /** + * JSON string containing the function arguments + */ + arguments: string; + + /** + * Unique identifier for the function call + */ + call_id: string; + + /** + * Name of the function being called + */ + name: string; + + /** + * Tool call type identifier, always "function_call" + */ + type: 'function_call'; + + /** + * (Optional) Additional identifier for the tool call + */ + id?: string; + + /** + * (Optional) Current status of the function call execution + */ + status?: string; + } + + /** + * This represents the output of a function call that gets passed back to the + * model. + */ + export interface OpenAIResponseInputFunctionToolCallOutput { + call_id: string; + + output: string; + + type: 'function_call_output'; + + id?: string; + + status?: string; + } + + /** + * A request for human approval of a tool invocation. + */ + export interface OpenAIResponseMcpApprovalRequest { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_approval_request'; + } + + /** + * A response to an MCP approval request. + */ + export interface OpenAIResponseMcpApprovalResponse { + approval_request_id: string; + + approve: boolean; + + type: 'mcp_approval_response'; + + id?: string; + + reason?: string; + } + + /** + * Model Context Protocol (MCP) call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageMcpCall { + /** + * Unique identifier for this MCP call + */ + id: string; + + /** + * JSON string containing the MCP call arguments + */ + arguments: string; + + /** + * Name of the MCP method being called + */ + name: string; + + /** + * Label identifying the MCP server handling the call + */ + server_label: string; + + /** + * Tool call type identifier, always "mcp_call" + */ + type: 'mcp_call'; + + /** + * (Optional) Error message if the MCP call failed + */ + error?: string; + + /** + * (Optional) Output result from the successful MCP call + */ + output?: string; + } + + /** + * MCP list tools output message containing available tools from an MCP server. + */ + export interface OpenAIResponseOutputMessageMcpListTools { + /** + * Unique identifier for this MCP list tools operation + */ + id: string; + + /** + * Label identifying the MCP server providing the tools + */ + server_label: string; + + /** + * List of available tools provided by the MCP server + */ + tools: Array; + + /** + * Tool call type identifier, always "mcp_list_tools" + */ + type: 'mcp_list_tools'; + } + + export namespace OpenAIResponseOutputMessageMcpListTools { + /** + * Tool definition returned by MCP list tools operation. + */ + export interface Tool { + /** + * JSON schema defining the tool's input parameters + */ + input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Name of the tool + */ + name: string; + + /** + * (Optional) Description of what the tool does + */ + description?: string; + } + } +} + +export interface ItemListParams extends OpenAICursorPageParams { + /** + * Specify additional output data to include in the response. + */ + include?: Array< + | 'web_search_call.action.sources' + | 'code_interpreter_call.outputs' + | 'computer_call_output.output.image_url' + | 'file_search_call.results' + | 'message.input_image.image_url' + | 'message.output_text.logprobs' + | 'reasoning.encrypted_content' + >; + + /** + * The order to return items in (asc or desc, default desc). + */ + order?: 'asc' | 'desc'; +} + +Items.ItemListResponsesOpenAICursorPage = ItemListResponsesOpenAICursorPage; + +export declare namespace Items { + export { + type ItemCreateResponse as ItemCreateResponse, + type ItemListResponse as ItemListResponse, + type ItemGetResponse as ItemGetResponse, + ItemListResponsesOpenAICursorPage as ItemListResponsesOpenAICursorPage, + type ItemCreateParams as ItemCreateParams, + type ItemListParams as ItemListParams, + }; +} diff --git a/src/resources/datasets.ts b/src/resources/datasets.ts deleted file mode 100644 index 5ed6661..0000000 --- a/src/resources/datasets.ts +++ /dev/null @@ -1,407 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../resource'; -import { isRequestOptions } from '../core'; -import * as Core from '../core'; - -export class Datasets extends APIResource { - /** - * Get a dataset by its ID. - */ - retrieve(datasetId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/v1/datasets/${datasetId}`, options); - } - - /** - * List all datasets. - */ - list(options?: Core.RequestOptions): Core.APIPromise { - return ( - this._client.get('/v1/datasets', options) as Core.APIPromise<{ data: DatasetListResponse }> - )._thenUnwrap((obj) => obj.data); - } - - /** - * Append rows to a dataset. - */ - appendrows( - datasetId: string, - body: DatasetAppendrowsParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.post(`/v1/datasetio/append-rows/${datasetId}`, { - body, - ...options, - headers: { Accept: '*/*', ...options?.headers }, - }); - } - - /** - * Get a paginated list of rows from a dataset. Uses offset-based pagination where: - * - * - start_index: The starting index (0-based). If None, starts from beginning. - * - limit: Number of items to return. If None or -1, returns all items. - * - * The response includes: - * - * - data: List of items for the current page. - * - has_more: Whether there are more items available after this set. - */ - iterrows( - datasetId: string, - query?: DatasetIterrowsParams, - options?: Core.RequestOptions, - ): Core.APIPromise; - iterrows(datasetId: string, options?: Core.RequestOptions): Core.APIPromise; - iterrows( - datasetId: string, - query: DatasetIterrowsParams | Core.RequestOptions = {}, - options?: Core.RequestOptions, - ): Core.APIPromise { - if (isRequestOptions(query)) { - return this.iterrows(datasetId, {}, query); - } - return this._client.get(`/v1/datasetio/iterrows/${datasetId}`, { query, ...options }); - } - - /** - * Register a new dataset. - */ - register( - body: DatasetRegisterParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.post('/v1/datasets', { body, ...options }); - } - - /** - * Unregister a dataset by its ID. - */ - unregister(datasetId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.delete(`/v1/datasets/${datasetId}`, { - ...options, - headers: { Accept: '*/*', ...options?.headers }, - }); - } -} - -/** - * Response from listing datasets. - */ -export interface ListDatasetsResponse { - /** - * List of datasets - */ - data: DatasetListResponse; -} - -/** - * Dataset resource for storing and accessing training or evaluation data. - */ -export interface DatasetRetrieveResponse { - identifier: string; - - /** - * Additional metadata for the dataset - */ - metadata: { [key: string]: boolean | number | string | Array | unknown | null }; - - provider_id: string; - - /** - * Purpose of the dataset indicating its intended use - */ - purpose: 'post-training/messages' | 'eval/question-answer' | 'eval/messages-answer'; - - /** - * Data source configuration for the dataset - */ - source: DatasetRetrieveResponse.UriDataSource | DatasetRetrieveResponse.RowsDataSource; - - /** - * Type of resource, always 'dataset' for datasets - */ - type: 'dataset'; - - provider_resource_id?: string; -} - -export namespace DatasetRetrieveResponse { - /** - * A dataset that can be obtained from a URI. - */ - export interface UriDataSource { - type: 'uri'; - - /** - * The dataset can be obtained from a URI. E.g. - - * "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" - - * "data:csv;base64,{base64_content}" - */ - uri: string; - } - - /** - * A dataset stored in rows. - */ - export interface RowsDataSource { - /** - * The dataset is stored in rows. E.g. - [ {"messages": [{"role": "user", - * "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}]} - * ] - */ - rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; - - type: 'rows'; - } -} - -/** - * List of datasets - */ -export type DatasetListResponse = Array; - -export namespace DatasetListResponse { - /** - * Dataset resource for storing and accessing training or evaluation data. - */ - export interface DatasetListResponseItem { - identifier: string; - - /** - * Additional metadata for the dataset - */ - metadata: { [key: string]: boolean | number | string | Array | unknown | null }; - - provider_id: string; - - /** - * Purpose of the dataset indicating its intended use - */ - purpose: 'post-training/messages' | 'eval/question-answer' | 'eval/messages-answer'; - - /** - * Data source configuration for the dataset - */ - source: DatasetListResponseItem.UriDataSource | DatasetListResponseItem.RowsDataSource; - - /** - * Type of resource, always 'dataset' for datasets - */ - type: 'dataset'; - - provider_resource_id?: string; - } - - export namespace DatasetListResponseItem { - /** - * A dataset that can be obtained from a URI. - */ - export interface UriDataSource { - type: 'uri'; - - /** - * The dataset can be obtained from a URI. E.g. - - * "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" - - * "data:csv;base64,{base64_content}" - */ - uri: string; - } - - /** - * A dataset stored in rows. - */ - export interface RowsDataSource { - /** - * The dataset is stored in rows. E.g. - [ {"messages": [{"role": "user", - * "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}]} - * ] - */ - rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; - - type: 'rows'; - } - } -} - -/** - * A generic paginated response that follows a simple format. - */ -export interface DatasetIterrowsResponse { - /** - * The list of items for the current page - */ - data: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; - - /** - * Whether there are more items available after this set - */ - has_more: boolean; - - /** - * The URL for accessing this list - */ - url?: string; -} - -/** - * Dataset resource for storing and accessing training or evaluation data. - */ -export interface DatasetRegisterResponse { - identifier: string; - - /** - * Additional metadata for the dataset - */ - metadata: { [key: string]: boolean | number | string | Array | unknown | null }; - - provider_id: string; - - /** - * Purpose of the dataset indicating its intended use - */ - purpose: 'post-training/messages' | 'eval/question-answer' | 'eval/messages-answer'; - - /** - * Data source configuration for the dataset - */ - source: DatasetRegisterResponse.UriDataSource | DatasetRegisterResponse.RowsDataSource; - - /** - * Type of resource, always 'dataset' for datasets - */ - type: 'dataset'; - - provider_resource_id?: string; -} - -export namespace DatasetRegisterResponse { - /** - * A dataset that can be obtained from a URI. - */ - export interface UriDataSource { - type: 'uri'; - - /** - * The dataset can be obtained from a URI. E.g. - - * "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" - - * "data:csv;base64,{base64_content}" - */ - uri: string; - } - - /** - * A dataset stored in rows. - */ - export interface RowsDataSource { - /** - * The dataset is stored in rows. E.g. - [ {"messages": [{"role": "user", - * "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}]} - * ] - */ - rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; - - type: 'rows'; - } -} - -export interface DatasetAppendrowsParams { - /** - * The rows to append to the dataset. - */ - rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; -} - -export interface DatasetIterrowsParams { - /** - * The number of rows to get. - */ - limit?: number; - - /** - * Index into dataset for the first row to get. Get all rows if None. - */ - start_index?: number; -} - -export interface DatasetRegisterParams { - /** - * The purpose of the dataset. One of: - "post-training/messages": The dataset - * contains a messages column with list of messages for post-training. { - * "messages": [ {"role": "user", "content": "Hello, world!"}, {"role": - * "assistant", "content": "Hello, world!"}, ] } - "eval/question-answer": The - * dataset contains a question column and an answer column for evaluation. { - * "question": "What is the capital of France?", "answer": "Paris" } - - * "eval/messages-answer": The dataset contains a messages column with list of - * messages and an answer column for evaluation. { "messages": [ {"role": "user", - * "content": "Hello, my name is John Doe."}, {"role": "assistant", "content": - * "Hello, John Doe. How can I help you today?"}, {"role": "user", "content": - * "What's my name?"}, ], "answer": "John Doe" } - */ - purpose: 'post-training/messages' | 'eval/question-answer' | 'eval/messages-answer'; - - /** - * The data source of the dataset. Ensure that the data source schema is compatible - * with the purpose of the dataset. Examples: - { "type": "uri", "uri": - * "https://mywebsite.com/mydata.jsonl" } - { "type": "uri", "uri": - * "lsfs://mydata.jsonl" } - { "type": "uri", "uri": - * "data:csv;base64,{base64_content}" } - { "type": "uri", "uri": - * "huggingface://llamastack/simpleqa?split=train" } - { "type": "rows", "rows": [ - * { "messages": [ {"role": "user", "content": "Hello, world!"}, {"role": - * "assistant", "content": "Hello, world!"}, ] } ] } - */ - source: DatasetRegisterParams.UriDataSource | DatasetRegisterParams.RowsDataSource; - - /** - * The ID of the dataset. If not provided, an ID will be generated. - */ - dataset_id?: string; - - /** - * The metadata for the dataset. - E.g. {"description": "My dataset"}. - */ - metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; -} - -export namespace DatasetRegisterParams { - /** - * A dataset that can be obtained from a URI. - */ - export interface UriDataSource { - type: 'uri'; - - /** - * The dataset can be obtained from a URI. E.g. - - * "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" - - * "data:csv;base64,{base64_content}" - */ - uri: string; - } - - /** - * A dataset stored in rows. - */ - export interface RowsDataSource { - /** - * The dataset is stored in rows. E.g. - [ {"messages": [{"role": "user", - * "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}]} - * ] - */ - rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; - - type: 'rows'; - } -} - -export declare namespace Datasets { - export { - type ListDatasetsResponse as ListDatasetsResponse, - type DatasetRetrieveResponse as DatasetRetrieveResponse, - type DatasetListResponse as DatasetListResponse, - type DatasetIterrowsResponse as DatasetIterrowsResponse, - type DatasetRegisterResponse as DatasetRegisterResponse, - type DatasetAppendrowsParams as DatasetAppendrowsParams, - type DatasetIterrowsParams as DatasetIterrowsParams, - type DatasetRegisterParams as DatasetRegisterParams, - }; -} diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index 89758af..f07ff14 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -5,14 +5,14 @@ import * as Core from '../core'; export class Embeddings extends APIResource { /** - * Generate OpenAI-compatible embeddings for the given input using the specified - * model. + * Create embeddings. Generate OpenAI-compatible embeddings for the given input + * using the specified model. */ create( body: EmbeddingCreateParams, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post('/v1/openai/v1/embeddings', { body, ...options }); + return this._client.post('/v1/embeddings', { body, ...options }); } } diff --git a/src/resources/eval/eval.ts b/src/resources/eval/eval.ts deleted file mode 100644 index 961b24e..0000000 --- a/src/resources/eval/eval.ts +++ /dev/null @@ -1,210 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../../resource'; -import * as Core from '../../core'; -import * as ScoringFunctionsAPI from '../scoring-functions'; -import * as Shared from '../shared'; -import * as JobsAPI from './jobs'; -import { Jobs } from './jobs'; - -export class Eval extends APIResource { - jobs: JobsAPI.Jobs = new JobsAPI.Jobs(this._client); - - /** - * Evaluate a list of rows on a benchmark. - */ - evaluateRows( - benchmarkId: string, - body: EvalEvaluateRowsParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/evaluations`, { body, ...options }); - } - - /** - * Evaluate a list of rows on a benchmark. - */ - evaluateRowsAlpha( - benchmarkId: string, - body: EvalEvaluateRowsAlphaParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/evaluations`, { body, ...options }); - } - - /** - * Run an evaluation on a benchmark. - */ - runEval(benchmarkId: string, body: EvalRunEvalParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/jobs`, { body, ...options }); - } - - /** - * Run an evaluation on a benchmark. - */ - runEvalAlpha( - benchmarkId: string, - body: EvalRunEvalAlphaParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/jobs`, { body, ...options }); - } -} - -/** - * A benchmark configuration for evaluation. - */ -export interface BenchmarkConfig { - /** - * The candidate to evaluate. - */ - eval_candidate: EvalCandidate; - - /** - * Map between scoring function id and parameters for each scoring function you - * want to run - */ - scoring_params: { [key: string]: ScoringFunctionsAPI.ScoringFnParams }; - - /** - * (Optional) The number of examples to evaluate. If not provided, all examples in - * the dataset will be evaluated - */ - num_examples?: number; -} - -/** - * A model candidate for evaluation. - */ -export type EvalCandidate = EvalCandidate.ModelCandidate | EvalCandidate.AgentCandidate; - -export namespace EvalCandidate { - /** - * A model candidate for evaluation. - */ - export interface ModelCandidate { - /** - * The model ID to evaluate. - */ - model: string; - - /** - * The sampling parameters for the model. - */ - sampling_params: Shared.SamplingParams; - - type: 'model'; - - /** - * (Optional) The system message providing instructions or context to the model. - */ - system_message?: Shared.SystemMessage; - } - - /** - * An agent candidate for evaluation. - */ - export interface AgentCandidate { - /** - * The configuration for the agent candidate. - */ - config: Shared.AgentConfig; - - type: 'agent'; - } -} - -/** - * The response from an evaluation. - */ -export interface EvaluateResponse { - /** - * The generations from the evaluation. - */ - generations: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; - - /** - * The scores from the evaluation. - */ - scores: { [key: string]: Shared.ScoringResult }; -} - -/** - * A job execution instance with status tracking. - */ -export interface Job { - /** - * Unique identifier for the job - */ - job_id: string; - - /** - * Current execution status of the job - */ - status: 'completed' | 'in_progress' | 'failed' | 'scheduled' | 'cancelled'; -} - -export interface EvalEvaluateRowsParams { - /** - * The configuration for the benchmark. - */ - benchmark_config: BenchmarkConfig; - - /** - * The rows to evaluate. - */ - input_rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; - - /** - * The scoring functions to use for the evaluation. - */ - scoring_functions: Array; -} - -export interface EvalEvaluateRowsAlphaParams { - /** - * The configuration for the benchmark. - */ - benchmark_config: BenchmarkConfig; - - /** - * The rows to evaluate. - */ - input_rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; - - /** - * The scoring functions to use for the evaluation. - */ - scoring_functions: Array; -} - -export interface EvalRunEvalParams { - /** - * The configuration for the benchmark. - */ - benchmark_config: BenchmarkConfig; -} - -export interface EvalRunEvalAlphaParams { - /** - * The configuration for the benchmark. - */ - benchmark_config: BenchmarkConfig; -} - -Eval.Jobs = Jobs; - -export declare namespace Eval { - export { - type BenchmarkConfig as BenchmarkConfig, - type EvalCandidate as EvalCandidate, - type EvaluateResponse as EvaluateResponse, - type Job as Job, - type EvalEvaluateRowsParams as EvalEvaluateRowsParams, - type EvalEvaluateRowsAlphaParams as EvalEvaluateRowsAlphaParams, - type EvalRunEvalParams as EvalRunEvalParams, - type EvalRunEvalAlphaParams as EvalRunEvalAlphaParams, - }; - - export { Jobs as Jobs }; -} diff --git a/src/resources/eval/index.ts b/src/resources/eval/index.ts deleted file mode 100644 index e8c35f3..0000000 --- a/src/resources/eval/index.ts +++ /dev/null @@ -1,14 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -export { - Eval, - type BenchmarkConfig, - type EvalCandidate, - type EvaluateResponse, - type Job, - type EvalEvaluateRowsParams, - type EvalEvaluateRowsAlphaParams, - type EvalRunEvalParams, - type EvalRunEvalAlphaParams, -} from './eval'; -export { Jobs } from './jobs'; diff --git a/src/resources/eval/jobs.ts b/src/resources/eval/jobs.ts deleted file mode 100644 index 13d4a4d..0000000 --- a/src/resources/eval/jobs.ts +++ /dev/null @@ -1,35 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../../resource'; -import * as Core from '../../core'; -import * as EvalAPI from './eval'; - -export class Jobs extends APIResource { - /** - * Get the result of a job. - */ - retrieve( - benchmarkId: string, - jobId: string, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.get(`/v1/eval/benchmarks/${benchmarkId}/jobs/${jobId}/result`, options); - } - - /** - * Cancel a job. - */ - cancel(benchmarkId: string, jobId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.delete(`/v1/eval/benchmarks/${benchmarkId}/jobs/${jobId}`, { - ...options, - headers: { Accept: '*/*', ...options?.headers }, - }); - } - - /** - * Get the status of a job. - */ - status(benchmarkId: string, jobId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/v1/eval/benchmarks/${benchmarkId}/jobs/${jobId}`, options); - } -} diff --git a/src/resources/files.ts b/src/resources/files.ts index 4dc5223..e59026e 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -7,25 +7,27 @@ import { OpenAICursorPage, type OpenAICursorPageParams } from '../pagination'; export class Files extends APIResource { /** - * Upload a file that can be used across various endpoints. The file upload should - * be a multipart form request with: + * Upload file. Upload a file that can be used across various endpoints. + * + * The file upload should be a multipart form request with: * * - file: The File object (not file name) to be uploaded. * - purpose: The intended purpose of the uploaded file. + * - expires_after: Optional form values describing expiration for the file. */ create(body: FileCreateParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.post('/v1/openai/v1/files', Core.multipartFormRequestOptions({ body, ...options })); + return this._client.post('/v1/files', Core.multipartFormRequestOptions({ body, ...options })); } /** - * Returns information about a specific file. + * Retrieve file. Returns information about a specific file. */ retrieve(fileId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/v1/openai/v1/files/${fileId}`, options); + return this._client.get(`/v1/files/${fileId}`, options); } /** - * Returns a list of files that belong to the user's organization. + * List files. Returns a list of files that belong to the user's organization. */ list(query?: FileListParams, options?: Core.RequestOptions): Core.PagePromise; list(options?: Core.RequestOptions): Core.PagePromise; @@ -36,21 +38,21 @@ export class Files extends APIResource { if (isRequestOptions(query)) { return this.list({}, query); } - return this._client.getAPIList('/v1/openai/v1/files', FilesOpenAICursorPage, { query, ...options }); + return this._client.getAPIList('/v1/files', FilesOpenAICursorPage, { query, ...options }); } /** - * Delete a file. + * Delete file. */ delete(fileId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.delete(`/v1/openai/v1/files/${fileId}`, options); + return this._client.delete(`/v1/files/${fileId}`, options); } /** - * Returns the contents of the specified file. + * Retrieve file content. Returns the contents of the specified file. */ content(fileId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/v1/openai/v1/files/${fileId}/content`, options); + return this._client.get(`/v1/files/${fileId}/content`, options); } } @@ -155,6 +157,28 @@ export interface FileCreateParams { * Valid purpose values for OpenAI Files API. */ purpose: 'assistants' | 'batch'; + + /** + * Control expiration of uploaded files. Params: + * + * - anchor, must be "created_at" + * - seconds, must be int between 3600 and 2592000 (1 hour to 30 days) + */ + expires_after?: FileCreateParams.ExpiresAfter; +} + +export namespace FileCreateParams { + /** + * Control expiration of uploaded files. Params: + * + * - anchor, must be "created_at" + * - seconds, must be int between 3600 and 2592000 (1 hour to 30 days) + */ + export interface ExpiresAfter { + anchor: 'created_at'; + + seconds: number; + } } export interface FileListParams extends OpenAICursorPageParams { diff --git a/src/resources/index.ts b/src/resources/index.ts index 58ad928..b255e39 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -1,26 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export * from './shared'; -export { - Agents, - type InferenceStep, - type MemoryRetrievalStep, - type ShieldCallStep, - type ToolExecutionStep, - type ToolResponse, - type AgentCreateResponse, - type AgentRetrieveResponse, - type AgentListResponse, - type AgentCreateParams, - type AgentListParams, -} from './agents/agents'; -export { - Benchmarks, - type Benchmark, - type ListBenchmarksResponse, - type BenchmarkListResponse, - type BenchmarkRegisterParams, -} from './benchmarks'; +export { Alpha } from './alpha/alpha'; +export { Beta } from './beta/beta'; export { Chat, type ChatCompletionChunk } from './chat/chat'; export { Completions, @@ -30,28 +12,13 @@ export { type CompletionCreateParamsStreaming, } from './completions'; export { - Datasets, - type ListDatasetsResponse, - type DatasetRetrieveResponse, - type DatasetListResponse, - type DatasetIterrowsResponse, - type DatasetRegisterResponse, - type DatasetAppendrowsParams, - type DatasetIterrowsParams, - type DatasetRegisterParams, -} from './datasets'; + Conversations, + type ConversationObject, + type ConversationDeleteResponse, + type ConversationCreateParams, + type ConversationUpdateParams, +} from './conversations/conversations'; export { Embeddings, type CreateEmbeddingsResponse, type EmbeddingCreateParams } from './embeddings'; -export { - Eval, - type BenchmarkConfig, - type EvalCandidate, - type EvaluateResponse, - type Job, - type EvalEvaluateRowsParams, - type EvalEvaluateRowsAlphaParams, - type EvalRunEvalParams, - type EvalRunEvalAlphaParams, -} from './eval/eval'; export { FilesOpenAICursorPage, Files, @@ -62,25 +29,6 @@ export { type FileCreateParams, type FileListParams, } from './files'; -export { - Inference, - type ChatCompletionResponseStreamChunk, - type CompletionResponse, - type EmbeddingsResponse, - type TokenLogProbs, - type InferenceBatchChatCompletionResponse, - type InferenceRerankResponse, - type InferenceBatchChatCompletionParams, - type InferenceBatchCompletionParams, - type InferenceChatCompletionParams, - type InferenceChatCompletionParamsNonStreaming, - type InferenceChatCompletionParamsStreaming, - type InferenceCompletionParams, - type InferenceCompletionParamsNonStreaming, - type InferenceCompletionParamsStreaming, - type InferenceEmbeddingsParams, - type InferenceRerankParams, -} from './inference'; export { Inspect, type HealthInfo, type ProviderInfo, type RouteInfo, type VersionInfo } from './inspect'; export { Models, @@ -90,14 +38,6 @@ export { type ModelRegisterParams, } from './models/models'; export { Moderations, type CreateResponse, type ModerationCreateParams } from './moderations'; -export { - PostTraining, - type AlgorithmConfig, - type ListPostTrainingJobsResponse, - type PostTrainingJob, - type PostTrainingPreferenceOptimizeParams, - type PostTrainingSupervisedFineTuneParams, -} from './post-training/post-training'; export { Providers, type ListProvidersResponse, type ProviderListResponse } from './providers'; export { ResponseListResponsesOpenAICursorPage, @@ -111,7 +51,7 @@ export { type ResponseCreateParamsStreaming, type ResponseListParams, } from './responses/responses'; -export { Routes, type ListRoutesResponse, type RouteListResponse } from './routes'; +export { Routes, type ListRoutesResponse, type RouteListResponse, type RouteListParams } from './routes'; export { Safety, type RunShieldResponse, type SafetyRunShieldParams } from './safety'; export { Scoring, @@ -140,25 +80,6 @@ export { type SyntheticDataGenerationResponse, type SyntheticDataGenerationGenerateParams, } from './synthetic-data-generation'; -export { - Telemetry, - type Event, - type QueryCondition, - type QuerySpansResponse, - type SpanWithStatus, - type Trace, - type TelemetryGetSpanResponse, - type TelemetryGetSpanTreeResponse, - type TelemetryQueryMetricsResponse, - type TelemetryQuerySpansResponse, - type TelemetryQueryTracesResponse, - type TelemetryGetSpanTreeParams, - type TelemetryLogEventParams, - type TelemetryQueryMetricsParams, - type TelemetryQuerySpansParams, - type TelemetryQueryTracesParams, - type TelemetrySaveSpansToDatasetParams, -} from './telemetry'; export { ToolRuntime, type ToolDef, @@ -174,21 +95,7 @@ export { type ToolgroupListResponse, type ToolgroupRegisterParams, } from './toolgroups'; -export { - Tools, - type ListToolsResponse, - type Tool, - type ToolListResponse, - type ToolListParams, -} from './tools'; -export { - VectorDBs, - type ListVectorDBsResponse, - type VectorDBRetrieveResponse, - type VectorDBListResponse, - type VectorDBRegisterResponse, - type VectorDBRegisterParams, -} from './vector-dbs'; +export { Tools, type ToolListResponse, type ToolListParams } from './tools'; export { VectorIo, type QueryChunksResponse, diff --git a/src/resources/inference.ts b/src/resources/inference.ts deleted file mode 100644 index a6f3e1e..0000000 --- a/src/resources/inference.ts +++ /dev/null @@ -1,762 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../resource'; -import { APIPromise } from '../core'; -import * as Core from '../core'; -import * as InferenceAPI from './inference'; -import * as Shared from './shared'; -import { Stream } from '../streaming'; - -export class Inference extends APIResource { - /** - * Generate chat completions for a batch of messages using the specified model. - */ - batchChatCompletion( - body: InferenceBatchChatCompletionParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.post('/v1/inference/batch-chat-completion', { body, ...options }); - } - - /** - * Generate completions for a batch of content using the specified model. - */ - batchCompletion( - body: InferenceBatchCompletionParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.post('/v1/inference/batch-completion', { body, ...options }); - } - - /** - * Generate a chat completion for the given messages using the specified model. - * - * @deprecated /v1/inference/chat-completion is deprecated. Please use /v1/openai/v1/chat/completions. - */ - chatCompletion( - body: InferenceChatCompletionParamsNonStreaming, - options?: Core.RequestOptions, - ): APIPromise; - chatCompletion( - body: InferenceChatCompletionParamsStreaming, - options?: Core.RequestOptions, - ): APIPromise>; - chatCompletion( - body: InferenceChatCompletionParamsBase, - options?: Core.RequestOptions, - ): APIPromise | Shared.ChatCompletionResponse>; - chatCompletion( - body: InferenceChatCompletionParams, - options?: Core.RequestOptions, - ): APIPromise | APIPromise> { - return this._client.post('/v1/inference/chat-completion', { - body, - ...options, - stream: body.stream ?? false, - }) as APIPromise | APIPromise>; - } - - /** - * Generate a completion for the given content using the specified model. - * - * @deprecated /v1/inference/completion is deprecated. Please use /v1/openai/v1/completions. - */ - completion( - body: InferenceCompletionParamsNonStreaming, - options?: Core.RequestOptions, - ): APIPromise; - completion( - body: InferenceCompletionParamsStreaming, - options?: Core.RequestOptions, - ): APIPromise>; - completion( - body: InferenceCompletionParamsBase, - options?: Core.RequestOptions, - ): APIPromise | CompletionResponse>; - completion( - body: InferenceCompletionParams, - options?: Core.RequestOptions, - ): APIPromise | APIPromise> { - return this._client.post('/v1/inference/completion', { - body, - ...options, - stream: body.stream ?? false, - }) as APIPromise | APIPromise>; - } - - /** - * Generate embeddings for content pieces using the specified model. - * - * @deprecated /v1/inference/embeddings is deprecated. Please use /v1/openai/v1/embeddings. - */ - embeddings( - body: InferenceEmbeddingsParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.post('/v1/inference/embeddings', { body, ...options }); - } - - /** - * Rerank a list of documents based on their relevance to a query. - */ - rerank( - body: InferenceRerankParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return ( - this._client.post('/v1/inference/rerank', { body, ...options }) as Core.APIPromise<{ - data: InferenceRerankResponse; - }> - )._thenUnwrap((obj) => obj.data); - } -} - -/** - * A chunk of a streamed chat completion response. - */ -export interface ChatCompletionResponseStreamChunk { - /** - * The event containing the new content - */ - event: ChatCompletionResponseStreamChunk.Event; - - /** - * (Optional) List of metrics associated with the API response - */ - metrics?: Array; -} - -export namespace ChatCompletionResponseStreamChunk { - /** - * The event containing the new content - */ - export interface Event { - /** - * Content generated since last event. This can be one or more tokens, or a tool - * call. - */ - delta: Shared.ContentDelta; - - /** - * Type of the event - */ - event_type: 'start' | 'complete' | 'progress'; - - /** - * Optional log probabilities for generated tokens - */ - logprobs?: Array; - - /** - * Optional reason why generation stopped, if complete - */ - stop_reason?: 'end_of_turn' | 'end_of_message' | 'out_of_tokens'; - } -} - -/** - * Response from a completion request. - */ -export interface CompletionResponse { - /** - * The generated completion text - */ - content: string; - - /** - * Reason why generation stopped - */ - stop_reason: 'end_of_turn' | 'end_of_message' | 'out_of_tokens'; - - /** - * Optional log probabilities for generated tokens - */ - logprobs?: Array; - - /** - * (Optional) List of metrics associated with the API response - */ - metrics?: Array; -} - -/** - * Response containing generated embeddings. - */ -export interface EmbeddingsResponse { - /** - * List of embedding vectors, one per input content. Each embedding is a list of - * floats. The dimensionality of the embedding is model-specific; you can check - * model metadata using /models/{model_id} - */ - embeddings: Array>; -} - -/** - * Log probabilities for generated tokens. - */ -export interface TokenLogProbs { - /** - * Dictionary mapping tokens to their log probabilities - */ - logprobs_by_token: { [key: string]: number }; -} - -/** - * Response from a batch chat completion request. - */ -export interface InferenceBatchChatCompletionResponse { - /** - * List of chat completion responses, one for each conversation in the batch - */ - batch: Array; -} - -/** - * List of rerank result objects, sorted by relevance score (descending) - */ -export type InferenceRerankResponse = Array; - -export namespace InferenceRerankResponse { - /** - * A single rerank result from a reranking response. - */ - export interface InferenceRerankResponseItem { - /** - * The original index of the document in the input list - */ - index: number; - - /** - * The relevance score from the model output. Values are inverted when applicable - * so that higher scores indicate greater relevance. - */ - relevance_score: number; - } -} - -export interface InferenceBatchChatCompletionParams { - /** - * The messages to generate completions for. - */ - messages_batch: Array>; - - /** - * The identifier of the model to use. The model must be registered with Llama - * Stack and available via the /models endpoint. - */ - model_id: string; - - /** - * (Optional) If specified, log probabilities for each token position will be - * returned. - */ - logprobs?: InferenceBatchChatCompletionParams.Logprobs; - - /** - * (Optional) Grammar specification for guided (structured) decoding. - */ - response_format?: Shared.ResponseFormat; - - /** - * (Optional) Parameters to control the sampling strategy. - */ - sampling_params?: Shared.SamplingParams; - - /** - * (Optional) Configuration for tool use. - */ - tool_config?: InferenceBatchChatCompletionParams.ToolConfig; - - /** - * (Optional) List of tool definitions available to the model. - */ - tools?: Array; -} - -export namespace InferenceBatchChatCompletionParams { - /** - * (Optional) If specified, log probabilities for each token position will be - * returned. - */ - export interface Logprobs { - /** - * How many tokens (for each position) to return log probabilities for. - */ - top_k?: number; - } - - /** - * (Optional) Configuration for tool use. - */ - export interface ToolConfig { - /** - * (Optional) Config for how to override the default system prompt. - - * `SystemMessageBehavior.append`: Appends the provided system message to the - * default system prompt. - `SystemMessageBehavior.replace`: Replaces the default - * system prompt with the provided system message. The system message can include - * the string '{{function_definitions}}' to indicate where the function definitions - * should be inserted. - */ - system_message_behavior?: 'append' | 'replace'; - - /** - * (Optional) Whether tool use is automatic, required, or none. Can also specify a - * tool name to use a specific tool. Defaults to ToolChoice.auto. - */ - tool_choice?: 'auto' | 'required' | 'none' | (string & {}); - - /** - * (Optional) Instructs the model how to format tool calls. By default, Llama Stack - * will attempt to use a format that is best adapted to the model. - - * `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. - - * `ToolPromptFormat.function_tag`: The tool calls are enclosed in a - * tag. - `ToolPromptFormat.python_list`: The tool calls - * are output as Python syntax -- a list of function calls. - */ - tool_prompt_format?: 'json' | 'function_tag' | 'python_list'; - } - - export interface Tool { - tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {}); - - description?: string; - - parameters?: { [key: string]: Shared.ToolParamDefinition }; - } -} - -export interface InferenceBatchCompletionParams { - /** - * The content to generate completions for. - */ - content_batch: Array; - - /** - * The identifier of the model to use. The model must be registered with Llama - * Stack and available via the /models endpoint. - */ - model_id: string; - - /** - * (Optional) If specified, log probabilities for each token position will be - * returned. - */ - logprobs?: InferenceBatchCompletionParams.Logprobs; - - /** - * (Optional) Grammar specification for guided (structured) decoding. - */ - response_format?: Shared.ResponseFormat; - - /** - * (Optional) Parameters to control the sampling strategy. - */ - sampling_params?: Shared.SamplingParams; -} - -export namespace InferenceBatchCompletionParams { - /** - * (Optional) If specified, log probabilities for each token position will be - * returned. - */ - export interface Logprobs { - /** - * How many tokens (for each position) to return log probabilities for. - */ - top_k?: number; - } -} - -export type InferenceChatCompletionParams = - | InferenceChatCompletionParamsNonStreaming - | InferenceChatCompletionParamsStreaming; - -export interface InferenceChatCompletionParamsBase { - /** - * List of messages in the conversation. - */ - messages: Array; - - /** - * The identifier of the model to use. The model must be registered with Llama - * Stack and available via the /models endpoint. - */ - model_id: string; - - /** - * (Optional) If specified, log probabilities for each token position will be - * returned. - */ - logprobs?: InferenceChatCompletionParams.Logprobs; - - /** - * (Optional) Grammar specification for guided (structured) decoding. There are two - * options: - `ResponseFormat.json_schema`: The grammar is a JSON schema. Most - * providers support this format. - `ResponseFormat.grammar`: The grammar is a BNF - * grammar. This format is more flexible, but not all providers support it. - */ - response_format?: Shared.ResponseFormat; - - /** - * Parameters to control the sampling strategy. - */ - sampling_params?: Shared.SamplingParams; - - /** - * (Optional) If True, generate an SSE event stream of the response. Defaults to - * False. - */ - stream?: boolean; - - /** - * (Optional) Whether tool use is required or automatic. Defaults to - * ToolChoice.auto. .. deprecated:: Use tool_config instead. - */ - tool_choice?: 'auto' | 'required' | 'none'; - - /** - * (Optional) Configuration for tool use. - */ - tool_config?: InferenceChatCompletionParams.ToolConfig; - - /** - * (Optional) Instructs the model how to format tool calls. By default, Llama Stack - * will attempt to use a format that is best adapted to the model. - - * `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. - - * `ToolPromptFormat.function_tag`: The tool calls are enclosed in a - * tag. - `ToolPromptFormat.python_list`: The tool calls - * are output as Python syntax -- a list of function calls. .. deprecated:: Use - * tool_config instead. - */ - tool_prompt_format?: 'json' | 'function_tag' | 'python_list'; - - /** - * (Optional) List of tool definitions available to the model. - */ - tools?: Array; -} - -export namespace InferenceChatCompletionParams { - /** - * (Optional) If specified, log probabilities for each token position will be - * returned. - */ - export interface Logprobs { - /** - * How many tokens (for each position) to return log probabilities for. - */ - top_k?: number; - } - - /** - * (Optional) Configuration for tool use. - */ - export interface ToolConfig { - /** - * (Optional) Config for how to override the default system prompt. - - * `SystemMessageBehavior.append`: Appends the provided system message to the - * default system prompt. - `SystemMessageBehavior.replace`: Replaces the default - * system prompt with the provided system message. The system message can include - * the string '{{function_definitions}}' to indicate where the function definitions - * should be inserted. - */ - system_message_behavior?: 'append' | 'replace'; - - /** - * (Optional) Whether tool use is automatic, required, or none. Can also specify a - * tool name to use a specific tool. Defaults to ToolChoice.auto. - */ - tool_choice?: 'auto' | 'required' | 'none' | (string & {}); - - /** - * (Optional) Instructs the model how to format tool calls. By default, Llama Stack - * will attempt to use a format that is best adapted to the model. - - * `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. - - * `ToolPromptFormat.function_tag`: The tool calls are enclosed in a - * tag. - `ToolPromptFormat.python_list`: The tool calls - * are output as Python syntax -- a list of function calls. - */ - tool_prompt_format?: 'json' | 'function_tag' | 'python_list'; - } - - export interface Tool { - tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {}); - - description?: string; - - parameters?: { [key: string]: Shared.ToolParamDefinition }; - } - - export type InferenceChatCompletionParamsNonStreaming = - InferenceAPI.InferenceChatCompletionParamsNonStreaming; - export type InferenceChatCompletionParamsStreaming = InferenceAPI.InferenceChatCompletionParamsStreaming; -} - -export interface InferenceChatCompletionParamsNonStreaming extends InferenceChatCompletionParamsBase { - /** - * (Optional) If True, generate an SSE event stream of the response. Defaults to - * False. - */ - stream?: false; -} - -export interface InferenceChatCompletionParamsStreaming extends InferenceChatCompletionParamsBase { - /** - * (Optional) If True, generate an SSE event stream of the response. Defaults to - * False. - */ - stream: true; -} - -export type InferenceCompletionParams = - | InferenceCompletionParamsNonStreaming - | InferenceCompletionParamsStreaming; - -export interface InferenceCompletionParamsBase { - /** - * The content to generate a completion for. - */ - content: Shared.InterleavedContent; - - /** - * The identifier of the model to use. The model must be registered with Llama - * Stack and available via the /models endpoint. - */ - model_id: string; - - /** - * (Optional) If specified, log probabilities for each token position will be - * returned. - */ - logprobs?: InferenceCompletionParams.Logprobs; - - /** - * (Optional) Grammar specification for guided (structured) decoding. - */ - response_format?: Shared.ResponseFormat; - - /** - * (Optional) Parameters to control the sampling strategy. - */ - sampling_params?: Shared.SamplingParams; - - /** - * (Optional) If True, generate an SSE event stream of the response. Defaults to - * False. - */ - stream?: boolean; -} - -export namespace InferenceCompletionParams { - /** - * (Optional) If specified, log probabilities for each token position will be - * returned. - */ - export interface Logprobs { - /** - * How many tokens (for each position) to return log probabilities for. - */ - top_k?: number; - } - - export type InferenceCompletionParamsNonStreaming = InferenceAPI.InferenceCompletionParamsNonStreaming; - export type InferenceCompletionParamsStreaming = InferenceAPI.InferenceCompletionParamsStreaming; -} - -export interface InferenceCompletionParamsNonStreaming extends InferenceCompletionParamsBase { - /** - * (Optional) If True, generate an SSE event stream of the response. Defaults to - * False. - */ - stream?: false; -} - -export interface InferenceCompletionParamsStreaming extends InferenceCompletionParamsBase { - /** - * (Optional) If True, generate an SSE event stream of the response. Defaults to - * False. - */ - stream: true; -} - -export interface InferenceEmbeddingsParams { - /** - * List of contents to generate embeddings for. Each content can be a string or an - * InterleavedContentItem (and hence can be multimodal). The behavior depends on - * the model and provider. Some models may only support text. - */ - contents: Array | Array; - - /** - * The identifier of the model to use. The model must be an embedding model - * registered with Llama Stack and available via the /models endpoint. - */ - model_id: string; - - /** - * (Optional) Output dimensionality for the embeddings. Only supported by - * Matryoshka models. - */ - output_dimension?: number; - - /** - * (Optional) How is the embedding being used? This is only supported by asymmetric - * embedding models. - */ - task_type?: 'query' | 'document'; - - /** - * (Optional) Config for how to truncate text for embedding when text is longer - * than the model's max sequence length. - */ - text_truncation?: 'none' | 'start' | 'end'; -} - -export interface InferenceRerankParams { - /** - * List of items to rerank. Each item can be a string, text content part, or image - * content part. Each input must not exceed the model's max input token length. - */ - items: Array< - | string - | InferenceRerankParams.OpenAIChatCompletionContentPartTextParam - | InferenceRerankParams.OpenAIChatCompletionContentPartImageParam - >; - - /** - * The identifier of the reranking model to use. - */ - model: string; - - /** - * The search query to rank items against. Can be a string, text content part, or - * image content part. The input must not exceed the model's max input token - * length. - */ - query: - | string - | InferenceRerankParams.OpenAIChatCompletionContentPartTextParam - | InferenceRerankParams.OpenAIChatCompletionContentPartImageParam; - - /** - * (Optional) Maximum number of results to return. Default: returns all. - */ - max_num_results?: number; -} - -export namespace InferenceRerankParams { - /** - * Text content part for OpenAI-compatible chat completion messages. - */ - export interface OpenAIChatCompletionContentPartTextParam { - /** - * The text content of the message - */ - text: string; - - /** - * Must be "text" to identify this as text content - */ - type: 'text'; - } - - /** - * Image content part for OpenAI-compatible chat completion messages. - */ - export interface OpenAIChatCompletionContentPartImageParam { - /** - * Image URL specification and processing details - */ - image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; - - /** - * Must be "image_url" to identify this as image content - */ - type: 'image_url'; - } - - export namespace OpenAIChatCompletionContentPartImageParam { - /** - * Image URL specification and processing details - */ - export interface ImageURL { - /** - * URL of the image to include in the message - */ - url: string; - - /** - * (Optional) Level of detail for image processing. Can be "low", "high", or "auto" - */ - detail?: string; - } - } - - /** - * Text content part for OpenAI-compatible chat completion messages. - */ - export interface OpenAIChatCompletionContentPartTextParam { - /** - * The text content of the message - */ - text: string; - - /** - * Must be "text" to identify this as text content - */ - type: 'text'; - } - - /** - * Image content part for OpenAI-compatible chat completion messages. - */ - export interface OpenAIChatCompletionContentPartImageParam { - /** - * Image URL specification and processing details - */ - image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; - - /** - * Must be "image_url" to identify this as image content - */ - type: 'image_url'; - } - - export namespace OpenAIChatCompletionContentPartImageParam { - /** - * Image URL specification and processing details - */ - export interface ImageURL { - /** - * URL of the image to include in the message - */ - url: string; - - /** - * (Optional) Level of detail for image processing. Can be "low", "high", or "auto" - */ - detail?: string; - } - } -} - -export declare namespace Inference { - export { - type ChatCompletionResponseStreamChunk as ChatCompletionResponseStreamChunk, - type CompletionResponse as CompletionResponse, - type EmbeddingsResponse as EmbeddingsResponse, - type TokenLogProbs as TokenLogProbs, - type InferenceBatchChatCompletionResponse as InferenceBatchChatCompletionResponse, - type InferenceRerankResponse as InferenceRerankResponse, - type InferenceBatchChatCompletionParams as InferenceBatchChatCompletionParams, - type InferenceBatchCompletionParams as InferenceBatchCompletionParams, - type InferenceChatCompletionParams as InferenceChatCompletionParams, - type InferenceChatCompletionParamsNonStreaming as InferenceChatCompletionParamsNonStreaming, - type InferenceChatCompletionParamsStreaming as InferenceChatCompletionParamsStreaming, - type InferenceCompletionParams as InferenceCompletionParams, - type InferenceCompletionParamsNonStreaming as InferenceCompletionParamsNonStreaming, - type InferenceCompletionParamsStreaming as InferenceCompletionParamsStreaming, - type InferenceEmbeddingsParams as InferenceEmbeddingsParams, - type InferenceRerankParams as InferenceRerankParams, - }; -} diff --git a/src/resources/inspect.ts b/src/resources/inspect.ts index 4e5d87c..0c10896 100644 --- a/src/resources/inspect.ts +++ b/src/resources/inspect.ts @@ -5,14 +5,14 @@ import * as Core from '../core'; export class Inspect extends APIResource { /** - * Get the current health status of the service. + * Get health status. Get the current health status of the service. */ health(options?: Core.RequestOptions): Core.APIPromise { return this._client.get('/v1/health', options); } /** - * Get the version of the service. + * Get version. Get the version of the service. */ version(options?: Core.RequestOptions): Core.APIPromise { return this._client.get('/v1/version', options); diff --git a/src/resources/models/index.ts b/src/resources/models/index.ts index de6ecf3..e05a022 100644 --- a/src/resources/models/index.ts +++ b/src/resources/models/index.ts @@ -7,4 +7,4 @@ export { type ModelListResponse, type ModelRegisterParams, } from './models'; -export { OpenAI, type OpenAIListResponse } from './openai'; +export { OpenAI } from './openai'; diff --git a/src/resources/models/models.ts b/src/resources/models/models.ts index d72281f..b5d04cd 100644 --- a/src/resources/models/models.ts +++ b/src/resources/models/models.ts @@ -3,13 +3,13 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; import * as OpenAIAPI from './openai'; -import { OpenAI, OpenAIListResponse } from './openai'; +import { OpenAI } from './openai'; export class Models extends APIResource { openai: OpenAIAPI.OpenAI = new OpenAIAPI.OpenAI(this._client); /** - * Get a model by its identifier. + * Get model. Get a model by its identifier. */ retrieve(modelId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/v1/models/${modelId}`, options); @@ -25,14 +25,14 @@ export class Models extends APIResource { } /** - * Register a model. + * Register model. Register a model. */ register(body: ModelRegisterParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/v1/models', { body, ...options }); } /** - * Unregister a model. + * Unregister model. Unregister a model. */ unregister(modelId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.delete(`/v1/models/${modelId}`, { @@ -63,7 +63,7 @@ export interface Model { /** * The type of model (LLM or embedding model) */ - model_type: 'llm' | 'embedding'; + model_type: 'llm' | 'embedding' | 'rerank'; /** * ID of the provider that owns this resource @@ -97,7 +97,7 @@ export interface ModelRegisterParams { /** * The type of model to register. */ - model_type?: 'llm' | 'embedding'; + model_type?: 'llm' | 'embedding' | 'rerank'; /** * The identifier of the provider. @@ -120,5 +120,5 @@ export declare namespace Models { type ModelRegisterParams as ModelRegisterParams, }; - export { OpenAI as OpenAI, type OpenAIListResponse as OpenAIListResponse }; + export { OpenAI as OpenAI }; } diff --git a/src/resources/models/openai.ts b/src/resources/models/openai.ts index bcdef6f..c6b90d1 100644 --- a/src/resources/models/openai.ts +++ b/src/resources/models/openai.ts @@ -2,35 +2,15 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; +import * as ModelsAPI from './models'; export class OpenAI extends APIResource { /** - * List models using the OpenAI API. + * List all models. */ - list(options?: Core.RequestOptions): Core.APIPromise { + list(options?: Core.RequestOptions): Core.APIPromise { return ( - this._client.get('/v1/openai/v1/models', options) as Core.APIPromise<{ data: OpenAIListResponse }> + this._client.get('/v1/models', options) as Core.APIPromise<{ data: ModelsAPI.ModelListResponse }> )._thenUnwrap((obj) => obj.data); } } - -export type OpenAIListResponse = Array; - -export namespace OpenAIListResponse { - /** - * A model from OpenAI. - */ - export interface OpenAIListResponseItem { - id: string; - - created: number; - - object: 'model'; - - owned_by: string; - } -} - -export declare namespace OpenAI { - export { type OpenAIListResponse as OpenAIListResponse }; -} diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts index a945ab3..40bf49c 100644 --- a/src/resources/moderations.ts +++ b/src/resources/moderations.ts @@ -5,10 +5,11 @@ import * as Core from '../core'; export class Moderations extends APIResource { /** - * Classifies if text and/or image inputs are potentially harmful. + * Create moderation. Classifies if text and/or image inputs are potentially + * harmful. */ create(body: ModerationCreateParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.post('/v1/openai/v1/moderations', { body, ...options }); + return this._client.post('/v1/moderations', { body, ...options }); } } @@ -71,9 +72,9 @@ export interface ModerationCreateParams { input: string | Array; /** - * The content moderation model you would like to use. + * (Optional) The content moderation model you would like to use. */ - model: string; + model?: string; } export declare namespace Moderations { diff --git a/src/resources/post-training/index.ts b/src/resources/post-training/index.ts deleted file mode 100644 index 6fc7e36..0000000 --- a/src/resources/post-training/index.ts +++ /dev/null @@ -1,19 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -export { - Job, - type JobListResponse, - type JobArtifactsResponse, - type JobStatusResponse, - type JobArtifactsParams, - type JobCancelParams, - type JobStatusParams, -} from './job'; -export { - PostTraining, - type AlgorithmConfig, - type ListPostTrainingJobsResponse, - type PostTrainingJob, - type PostTrainingPreferenceOptimizeParams, - type PostTrainingSupervisedFineTuneParams, -} from './post-training'; diff --git a/src/resources/post-training/job.ts b/src/resources/post-training/job.ts deleted file mode 100644 index a250ac9..0000000 --- a/src/resources/post-training/job.ts +++ /dev/null @@ -1,268 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../../resource'; -import * as Core from '../../core'; -import * as PostTrainingAPI from './post-training'; - -export class Job extends APIResource { - /** - * Get all training jobs. - */ - list( - options?: Core.RequestOptions, - ): Core.APIPromise> { - return ( - this._client.get('/v1/post-training/jobs', options) as Core.APIPromise<{ - data: Array; - }> - )._thenUnwrap((obj) => obj.data); - } - - /** - * Get the artifacts of a training job. - */ - artifacts(query: JobArtifactsParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get('/v1/post-training/job/artifacts', { query, ...options }); - } - - /** - * Cancel a training job. - */ - cancel(body: JobCancelParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.post('/v1/post-training/job/cancel', { - body, - ...options, - headers: { Accept: '*/*', ...options?.headers }, - }); - } - - /** - * Get the status of a training job. - */ - status(query: JobStatusParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get('/v1/post-training/job/status', { query, ...options }); - } -} - -export type JobListResponse = Array; - -export namespace JobListResponse { - export interface JobListResponseItem { - job_uuid: string; - } -} - -/** - * Artifacts of a finetuning job. - */ -export interface JobArtifactsResponse { - /** - * List of model checkpoints created during training - */ - checkpoints: Array; - - /** - * Unique identifier for the training job - */ - job_uuid: string; -} - -export namespace JobArtifactsResponse { - /** - * Checkpoint created during training runs. - */ - export interface Checkpoint { - /** - * Timestamp when the checkpoint was created - */ - created_at: string; - - /** - * Training epoch when the checkpoint was saved - */ - epoch: number; - - /** - * Unique identifier for the checkpoint - */ - identifier: string; - - /** - * File system path where the checkpoint is stored - */ - path: string; - - /** - * Identifier of the training job that created this checkpoint - */ - post_training_job_id: string; - - /** - * (Optional) Training metrics associated with this checkpoint - */ - training_metrics?: Checkpoint.TrainingMetrics; - } - - export namespace Checkpoint { - /** - * (Optional) Training metrics associated with this checkpoint - */ - export interface TrainingMetrics { - /** - * Training epoch number - */ - epoch: number; - - /** - * Perplexity metric indicating model confidence - */ - perplexity: number; - - /** - * Loss value on the training dataset - */ - train_loss: number; - - /** - * Loss value on the validation dataset - */ - validation_loss: number; - } - } -} - -/** - * Status of a finetuning job. - */ -export interface JobStatusResponse { - /** - * List of model checkpoints created during training - */ - checkpoints: Array; - - /** - * Unique identifier for the training job - */ - job_uuid: string; - - /** - * Current status of the training job - */ - status: 'completed' | 'in_progress' | 'failed' | 'scheduled' | 'cancelled'; - - /** - * (Optional) Timestamp when the job finished, if completed - */ - completed_at?: string; - - /** - * (Optional) Information about computational resources allocated to the job - */ - resources_allocated?: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * (Optional) Timestamp when the job was scheduled - */ - scheduled_at?: string; - - /** - * (Optional) Timestamp when the job execution began - */ - started_at?: string; -} - -export namespace JobStatusResponse { - /** - * Checkpoint created during training runs. - */ - export interface Checkpoint { - /** - * Timestamp when the checkpoint was created - */ - created_at: string; - - /** - * Training epoch when the checkpoint was saved - */ - epoch: number; - - /** - * Unique identifier for the checkpoint - */ - identifier: string; - - /** - * File system path where the checkpoint is stored - */ - path: string; - - /** - * Identifier of the training job that created this checkpoint - */ - post_training_job_id: string; - - /** - * (Optional) Training metrics associated with this checkpoint - */ - training_metrics?: Checkpoint.TrainingMetrics; - } - - export namespace Checkpoint { - /** - * (Optional) Training metrics associated with this checkpoint - */ - export interface TrainingMetrics { - /** - * Training epoch number - */ - epoch: number; - - /** - * Perplexity metric indicating model confidence - */ - perplexity: number; - - /** - * Loss value on the training dataset - */ - train_loss: number; - - /** - * Loss value on the validation dataset - */ - validation_loss: number; - } - } -} - -export interface JobArtifactsParams { - /** - * The UUID of the job to get the artifacts of. - */ - job_uuid: string; -} - -export interface JobCancelParams { - /** - * The UUID of the job to cancel. - */ - job_uuid: string; -} - -export interface JobStatusParams { - /** - * The UUID of the job to get the status of. - */ - job_uuid: string; -} - -export declare namespace Job { - export { - type JobListResponse as JobListResponse, - type JobArtifactsResponse as JobArtifactsResponse, - type JobStatusResponse as JobStatusResponse, - type JobArtifactsParams as JobArtifactsParams, - type JobCancelParams as JobCancelParams, - type JobStatusParams as JobStatusParams, - }; -} diff --git a/src/resources/post-training/post-training.ts b/src/resources/post-training/post-training.ts deleted file mode 100644 index 8f6eb3f..0000000 --- a/src/resources/post-training/post-training.ts +++ /dev/null @@ -1,510 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../../resource'; -import * as Core from '../../core'; -import * as JobAPI from './job'; -import { - Job, - JobArtifactsParams, - JobArtifactsResponse, - JobCancelParams, - JobListResponse, - JobStatusParams, - JobStatusResponse, -} from './job'; - -export class PostTraining extends APIResource { - job: JobAPI.Job = new JobAPI.Job(this._client); - - /** - * Run preference optimization of a model. - */ - preferenceOptimize( - body: PostTrainingPreferenceOptimizeParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.post('/v1/post-training/preference-optimize', { body, ...options }); - } - - /** - * Run supervised fine-tuning of a model. - */ - supervisedFineTune( - body: PostTrainingSupervisedFineTuneParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.post('/v1/post-training/supervised-fine-tune', { body, ...options }); - } -} - -/** - * Configuration for Low-Rank Adaptation (LoRA) fine-tuning. - */ -export type AlgorithmConfig = AlgorithmConfig.LoraFinetuningConfig | AlgorithmConfig.QatFinetuningConfig; - -export namespace AlgorithmConfig { - /** - * Configuration for Low-Rank Adaptation (LoRA) fine-tuning. - */ - export interface LoraFinetuningConfig { - /** - * LoRA scaling parameter that controls adaptation strength - */ - alpha: number; - - /** - * Whether to apply LoRA to MLP layers - */ - apply_lora_to_mlp: boolean; - - /** - * Whether to apply LoRA to output projection layers - */ - apply_lora_to_output: boolean; - - /** - * List of attention module names to apply LoRA to - */ - lora_attn_modules: Array; - - /** - * Rank of the LoRA adaptation (lower rank = fewer parameters) - */ - rank: number; - - /** - * Algorithm type identifier, always "LoRA" - */ - type: 'LoRA'; - - /** - * (Optional) Whether to quantize the base model weights - */ - quantize_base?: boolean; - - /** - * (Optional) Whether to use DoRA (Weight-Decomposed Low-Rank Adaptation) - */ - use_dora?: boolean; - } - - /** - * Configuration for Quantization-Aware Training (QAT) fine-tuning. - */ - export interface QatFinetuningConfig { - /** - * Size of groups for grouped quantization - */ - group_size: number; - - /** - * Name of the quantization algorithm to use - */ - quantizer_name: string; - - /** - * Algorithm type identifier, always "QAT" - */ - type: 'QAT'; - } -} - -export interface ListPostTrainingJobsResponse { - data: Array; -} - -export namespace ListPostTrainingJobsResponse { - export interface Data { - job_uuid: string; - } -} - -export interface PostTrainingJob { - job_uuid: string; -} - -export interface PostTrainingPreferenceOptimizeParams { - /** - * The algorithm configuration. - */ - algorithm_config: PostTrainingPreferenceOptimizeParams.AlgorithmConfig; - - /** - * The model to fine-tune. - */ - finetuned_model: string; - - /** - * The hyperparam search configuration. - */ - hyperparam_search_config: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * The UUID of the job to create. - */ - job_uuid: string; - - /** - * The logger configuration. - */ - logger_config: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * The training configuration. - */ - training_config: PostTrainingPreferenceOptimizeParams.TrainingConfig; -} - -export namespace PostTrainingPreferenceOptimizeParams { - /** - * The algorithm configuration. - */ - export interface AlgorithmConfig { - /** - * Temperature parameter for the DPO loss - */ - beta: number; - - /** - * The type of loss function to use for DPO - */ - loss_type: 'sigmoid' | 'hinge' | 'ipo' | 'kto_pair'; - } - - /** - * The training configuration. - */ - export interface TrainingConfig { - /** - * Number of steps to accumulate gradients before updating - */ - gradient_accumulation_steps: number; - - /** - * Maximum number of steps to run per epoch - */ - max_steps_per_epoch: number; - - /** - * Number of training epochs to run - */ - n_epochs: number; - - /** - * (Optional) Configuration for data loading and formatting - */ - data_config?: TrainingConfig.DataConfig; - - /** - * (Optional) Data type for model parameters (bf16, fp16, fp32) - */ - dtype?: string; - - /** - * (Optional) Configuration for memory and compute optimizations - */ - efficiency_config?: TrainingConfig.EfficiencyConfig; - - /** - * (Optional) Maximum number of validation steps per epoch - */ - max_validation_steps?: number; - - /** - * (Optional) Configuration for the optimization algorithm - */ - optimizer_config?: TrainingConfig.OptimizerConfig; - } - - export namespace TrainingConfig { - /** - * (Optional) Configuration for data loading and formatting - */ - export interface DataConfig { - /** - * Number of samples per training batch - */ - batch_size: number; - - /** - * Format of the dataset (instruct or dialog) - */ - data_format: 'instruct' | 'dialog'; - - /** - * Unique identifier for the training dataset - */ - dataset_id: string; - - /** - * Whether to shuffle the dataset during training - */ - shuffle: boolean; - - /** - * (Optional) Whether to pack multiple samples into a single sequence for - * efficiency - */ - packed?: boolean; - - /** - * (Optional) Whether to compute loss on input tokens as well as output tokens - */ - train_on_input?: boolean; - - /** - * (Optional) Unique identifier for the validation dataset - */ - validation_dataset_id?: string; - } - - /** - * (Optional) Configuration for memory and compute optimizations - */ - export interface EfficiencyConfig { - /** - * (Optional) Whether to use activation checkpointing to reduce memory usage - */ - enable_activation_checkpointing?: boolean; - - /** - * (Optional) Whether to offload activations to CPU to save GPU memory - */ - enable_activation_offloading?: boolean; - - /** - * (Optional) Whether to offload FSDP parameters to CPU - */ - fsdp_cpu_offload?: boolean; - - /** - * (Optional) Whether to use memory-efficient FSDP wrapping - */ - memory_efficient_fsdp_wrap?: boolean; - } - - /** - * (Optional) Configuration for the optimization algorithm - */ - export interface OptimizerConfig { - /** - * Learning rate for the optimizer - */ - lr: number; - - /** - * Number of steps for learning rate warmup - */ - num_warmup_steps: number; - - /** - * Type of optimizer to use (adam, adamw, or sgd) - */ - optimizer_type: 'adam' | 'adamw' | 'sgd'; - - /** - * Weight decay coefficient for regularization - */ - weight_decay: number; - } - } -} - -export interface PostTrainingSupervisedFineTuneParams { - /** - * The hyperparam search configuration. - */ - hyperparam_search_config: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * The UUID of the job to create. - */ - job_uuid: string; - - /** - * The logger configuration. - */ - logger_config: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * The training configuration. - */ - training_config: PostTrainingSupervisedFineTuneParams.TrainingConfig; - - /** - * The algorithm configuration. - */ - algorithm_config?: AlgorithmConfig; - - /** - * The directory to save checkpoint(s) to. - */ - checkpoint_dir?: string; - - /** - * The model to fine-tune. - */ - model?: string; -} - -export namespace PostTrainingSupervisedFineTuneParams { - /** - * The training configuration. - */ - export interface TrainingConfig { - /** - * Number of steps to accumulate gradients before updating - */ - gradient_accumulation_steps: number; - - /** - * Maximum number of steps to run per epoch - */ - max_steps_per_epoch: number; - - /** - * Number of training epochs to run - */ - n_epochs: number; - - /** - * (Optional) Configuration for data loading and formatting - */ - data_config?: TrainingConfig.DataConfig; - - /** - * (Optional) Data type for model parameters (bf16, fp16, fp32) - */ - dtype?: string; - - /** - * (Optional) Configuration for memory and compute optimizations - */ - efficiency_config?: TrainingConfig.EfficiencyConfig; - - /** - * (Optional) Maximum number of validation steps per epoch - */ - max_validation_steps?: number; - - /** - * (Optional) Configuration for the optimization algorithm - */ - optimizer_config?: TrainingConfig.OptimizerConfig; - } - - export namespace TrainingConfig { - /** - * (Optional) Configuration for data loading and formatting - */ - export interface DataConfig { - /** - * Number of samples per training batch - */ - batch_size: number; - - /** - * Format of the dataset (instruct or dialog) - */ - data_format: 'instruct' | 'dialog'; - - /** - * Unique identifier for the training dataset - */ - dataset_id: string; - - /** - * Whether to shuffle the dataset during training - */ - shuffle: boolean; - - /** - * (Optional) Whether to pack multiple samples into a single sequence for - * efficiency - */ - packed?: boolean; - - /** - * (Optional) Whether to compute loss on input tokens as well as output tokens - */ - train_on_input?: boolean; - - /** - * (Optional) Unique identifier for the validation dataset - */ - validation_dataset_id?: string; - } - - /** - * (Optional) Configuration for memory and compute optimizations - */ - export interface EfficiencyConfig { - /** - * (Optional) Whether to use activation checkpointing to reduce memory usage - */ - enable_activation_checkpointing?: boolean; - - /** - * (Optional) Whether to offload activations to CPU to save GPU memory - */ - enable_activation_offloading?: boolean; - - /** - * (Optional) Whether to offload FSDP parameters to CPU - */ - fsdp_cpu_offload?: boolean; - - /** - * (Optional) Whether to use memory-efficient FSDP wrapping - */ - memory_efficient_fsdp_wrap?: boolean; - } - - /** - * (Optional) Configuration for the optimization algorithm - */ - export interface OptimizerConfig { - /** - * Learning rate for the optimizer - */ - lr: number; - - /** - * Number of steps for learning rate warmup - */ - num_warmup_steps: number; - - /** - * Type of optimizer to use (adam, adamw, or sgd) - */ - optimizer_type: 'adam' | 'adamw' | 'sgd'; - - /** - * Weight decay coefficient for regularization - */ - weight_decay: number; - } - } -} - -PostTraining.Job = Job; - -export declare namespace PostTraining { - export { - type AlgorithmConfig as AlgorithmConfig, - type ListPostTrainingJobsResponse as ListPostTrainingJobsResponse, - type PostTrainingJob as PostTrainingJob, - type PostTrainingPreferenceOptimizeParams as PostTrainingPreferenceOptimizeParams, - type PostTrainingSupervisedFineTuneParams as PostTrainingSupervisedFineTuneParams, - }; - - export { - Job as Job, - type JobListResponse as JobListResponse, - type JobArtifactsResponse as JobArtifactsResponse, - type JobStatusResponse as JobStatusResponse, - type JobArtifactsParams as JobArtifactsParams, - type JobCancelParams as JobCancelParams, - type JobStatusParams as JobStatusParams, - }; -} diff --git a/src/resources/providers.ts b/src/resources/providers.ts index d27b9ab..2736f37 100644 --- a/src/resources/providers.ts +++ b/src/resources/providers.ts @@ -6,14 +6,14 @@ import * as InspectAPI from './inspect'; export class Providers extends APIResource { /** - * Get detailed information about a specific provider. + * Get provider. Get detailed information about a specific provider. */ retrieve(providerId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/v1/providers/${providerId}`, options); } /** - * List all available providers. + * List providers. List all available providers. */ list(options?: Core.RequestOptions): Core.APIPromise { return ( diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts index 74c556c..549533c 100644 --- a/src/resources/responses/input-items.ts +++ b/src/resources/responses/input-items.ts @@ -6,7 +6,7 @@ import * as Core from '../../core'; export class InputItems extends APIResource { /** - * List input items for a given OpenAI response. + * List input items. */ list( responseId: string, @@ -22,7 +22,7 @@ export class InputItems extends APIResource { if (isRequestOptions(query)) { return this.list(responseId, {}, query); } - return this._client.get(`/v1/openai/v1/responses/${responseId}/input_items`, { query, ...options }); + return this._client.get(`/v1/responses/${responseId}/input_items`, { query, ...options }); } } @@ -34,10 +34,15 @@ export interface InputItemListResponse { * List of input items */ data: Array< + | InputItemListResponse.OpenAIResponseMessage | InputItemListResponse.OpenAIResponseOutputMessageWebSearchToolCall | InputItemListResponse.OpenAIResponseOutputMessageFileSearchToolCall | InputItemListResponse.OpenAIResponseOutputMessageFunctionToolCall + | InputItemListResponse.OpenAIResponseOutputMessageMcpCall + | InputItemListResponse.OpenAIResponseOutputMessageMcpListTools + | InputItemListResponse.OpenAIResponseMcpApprovalRequest | InputItemListResponse.OpenAIResponseInputFunctionToolCallOutput + | InputItemListResponse.OpenAIResponseMcpApprovalResponse | InputItemListResponse.OpenAIResponseMessage >; @@ -48,6 +53,212 @@ export interface InputItemListResponse { } export namespace InputItemListResponse { + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile + > + | Array< + | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText + | OpenAIResponseMessage.OpenAIResponseContentPartRefusal + >; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; + } + + export namespace OpenAIResponseMessage { + /** + * Text content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentText { + /** + * The text content of the input message + */ + text: string; + + /** + * Content type identifier, always "input_text" + */ + type: 'input_text'; + } + + /** + * Image content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentImage { + /** + * Level of detail for image processing, can be "low", "high", or "auto" + */ + detail: 'low' | 'high' | 'auto'; + + /** + * Content type identifier, always "input_image" + */ + type: 'input_image'; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * (Optional) URL of the image content + */ + image_url?: string; + } + + /** + * File content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentFile { + /** + * The type of the input item. Always `input_file`. + */ + type: 'input_file'; + + /** + * The data of the file to be sent to the model. + */ + file_data?: string; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * The URL of the file to be sent to the model. + */ + file_url?: string; + + /** + * The name of the file to be sent to the model. + */ + filename?: string; + } + + export interface OpenAIResponseOutputMessageContentOutputText { + annotations: Array< + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace OpenAIResponseOutputMessageContentOutputText { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { + /** + * Unique identifier of the referenced file + */ + file_id: string; + + /** + * Name of the referenced file + */ + filename: string; + + /** + * Position index of the citation within the content + */ + index: number; + + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; + } + + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; + + /** + * Start position of the citation span in the content + */ + start_index: number; + + /** + * Title of the referenced web resource + */ + title: string; + + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; + + /** + * URL of the referenced web resource + */ + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + + /** + * Refusal content within a streamed response part. + */ + export interface OpenAIResponseContentPartRefusal { + /** + * Refusal text supplied by the model + */ + refusal: string; + + /** + * Content part type identifier, always "refusal" + */ + type: 'refusal'; + } + } + /** * Web search tool call output message for OpenAI responses. */ @@ -165,6 +376,108 @@ export namespace InputItemListResponse { status?: string; } + /** + * Model Context Protocol (MCP) call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageMcpCall { + /** + * Unique identifier for this MCP call + */ + id: string; + + /** + * JSON string containing the MCP call arguments + */ + arguments: string; + + /** + * Name of the MCP method being called + */ + name: string; + + /** + * Label identifying the MCP server handling the call + */ + server_label: string; + + /** + * Tool call type identifier, always "mcp_call" + */ + type: 'mcp_call'; + + /** + * (Optional) Error message if the MCP call failed + */ + error?: string; + + /** + * (Optional) Output result from the successful MCP call + */ + output?: string; + } + + /** + * MCP list tools output message containing available tools from an MCP server. + */ + export interface OpenAIResponseOutputMessageMcpListTools { + /** + * Unique identifier for this MCP list tools operation + */ + id: string; + + /** + * Label identifying the MCP server providing the tools + */ + server_label: string; + + /** + * List of available tools provided by the MCP server + */ + tools: Array; + + /** + * Tool call type identifier, always "mcp_list_tools" + */ + type: 'mcp_list_tools'; + } + + export namespace OpenAIResponseOutputMessageMcpListTools { + /** + * Tool definition returned by MCP list tools operation. + */ + export interface Tool { + /** + * JSON schema defining the tool's input parameters + */ + input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Name of the tool + */ + name: string; + + /** + * (Optional) Description of what the tool does + */ + description?: string; + } + } + + /** + * A request for human approval of a tool invocation. + */ + export interface OpenAIResponseMcpApprovalRequest { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_approval_request'; + } + /** * This represents the output of a function call that gets passed back to the * model. @@ -181,6 +494,21 @@ export namespace InputItemListResponse { status?: string; } + /** + * A response to an MCP approval request. + */ + export interface OpenAIResponseMcpApprovalResponse { + approval_request_id: string; + + approve: boolean; + + type: 'mcp_approval_response'; + + id?: string; + + reason?: string; + } + /** * Corresponds to the various Message types in the Responses API. They are all * under one type because the Responses API gives them all the same "type" value, @@ -192,8 +520,12 @@ export namespace InputItemListResponse { | Array< | OpenAIResponseMessage.OpenAIResponseInputMessageContentText | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile > - | Array; + | Array< + | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText + | OpenAIResponseMessage.OpenAIResponseContentPartRefusal + >; role: 'system' | 'developer' | 'user' | 'assistant'; @@ -234,18 +566,53 @@ export namespace InputItemListResponse { */ type: 'input_image'; + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + /** * (Optional) URL of the image content */ image_url?: string; } - export interface UnionMember2 { + /** + * File content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentFile { + /** + * The type of the input item. Always `input_file`. + */ + type: 'input_file'; + + /** + * The data of the file to be sent to the model. + */ + file_data?: string; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * The URL of the file to be sent to the model. + */ + file_url?: string; + + /** + * The name of the file to be sent to the model. + */ + filename?: string; + } + + export interface OpenAIResponseOutputMessageContentOutputText { annotations: Array< - | UnionMember2.OpenAIResponseAnnotationFileCitation - | UnionMember2.OpenAIResponseAnnotationCitation - | UnionMember2.OpenAIResponseAnnotationContainerFileCitation - | UnionMember2.OpenAIResponseAnnotationFilePath + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath >; text: string; @@ -253,7 +620,7 @@ export namespace InputItemListResponse { type: 'output_text'; } - export namespace UnionMember2 { + export namespace OpenAIResponseOutputMessageContentOutputText { /** * File citation annotation for referencing specific files in response content. */ @@ -331,6 +698,21 @@ export namespace InputItemListResponse { type: 'file_path'; } } + + /** + * Refusal content within a streamed response part. + */ + export interface OpenAIResponseContentPartRefusal { + /** + * Refusal text supplied by the model + */ + refusal: string; + + /** + * Content part type identifier, always "refusal" + */ + type: 'refusal'; + } } } diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index e329519..2b8a74c 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -14,7 +14,7 @@ export class Responses extends APIResource { inputItems: InputItemsAPI.InputItems = new InputItemsAPI.InputItems(this._client); /** - * Create a new OpenAI response. + * Create a model response. */ create(body: ResponseCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise; create( @@ -29,22 +29,20 @@ export class Responses extends APIResource { body: ResponseCreateParams, options?: Core.RequestOptions, ): APIPromise | APIPromise> { - return this._client.post('/v1/openai/v1/responses', { - body, - ...options, - stream: body.stream ?? false, - }) as APIPromise | APIPromise>; + return this._client.post('/v1/responses', { body, ...options, stream: body.stream ?? false }) as + | APIPromise + | APIPromise>; } /** - * Retrieve an OpenAI response by its ID. + * Get a model response. */ retrieve(responseId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/v1/openai/v1/responses/${responseId}`, options); + return this._client.get(`/v1/responses/${responseId}`, options); } /** - * List all OpenAI responses. + * List all responses. */ list( query?: ResponseListParams, @@ -60,17 +58,17 @@ export class Responses extends APIResource { if (isRequestOptions(query)) { return this.list({}, query); } - return this._client.getAPIList('/v1/openai/v1/responses', ResponseListResponsesOpenAICursorPage, { + return this._client.getAPIList('/v1/responses', ResponseListResponsesOpenAICursorPage, { query, ...options, }); } /** - * Delete an OpenAI response by its ID. + * Delete a response. */ delete(responseId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.delete(`/v1/openai/v1/responses/${responseId}`, options); + return this._client.delete(`/v1/responses/${responseId}`, options); } } @@ -110,6 +108,7 @@ export interface ResponseObject { | ResponseObject.OpenAIResponseOutputMessageFunctionToolCall | ResponseObject.OpenAIResponseOutputMessageMcpCall | ResponseObject.OpenAIResponseOutputMessageMcpListTools + | ResponseObject.OpenAIResponseMcpApprovalRequest >; /** @@ -132,16 +131,36 @@ export interface ResponseObject { */ error?: ResponseObject.Error; + /** + * (Optional) System message inserted into the model's context + */ + instructions?: string; + /** * (Optional) ID of the previous response in a conversation */ previous_response_id?: string; + /** + * (Optional) Reference to a prompt template and its variables. + */ + prompt?: ResponseObject.Prompt; + /** * (Optional) Sampling temperature used for generation */ temperature?: number; + /** + * (Optional) An array of tools the model may call while generating a response. + */ + tools?: Array< + | ResponseObject.OpenAIResponseInputToolWebSearch + | ResponseObject.OpenAIResponseInputToolFileSearch + | ResponseObject.OpenAIResponseInputToolFunction + | ResponseObject.OpenAIResponseToolMcp + >; + /** * (Optional) Nucleus sampling parameter used for generation */ @@ -153,9 +172,9 @@ export interface ResponseObject { truncation?: string; /** - * (Optional) User identifier associated with the request + * (Optional) Token usage information for the response */ - user?: string; + usage?: ResponseObject.Usage; } export namespace ResponseObject { @@ -170,8 +189,12 @@ export namespace ResponseObject { | Array< | OpenAIResponseMessage.OpenAIResponseInputMessageContentText | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile > - | Array; + | Array< + | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText + | OpenAIResponseMessage.OpenAIResponseContentPartRefusal + >; role: 'system' | 'developer' | 'user' | 'assistant'; @@ -212,18 +235,53 @@ export namespace ResponseObject { */ type: 'input_image'; + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + /** * (Optional) URL of the image content */ image_url?: string; } - export interface UnionMember2 { + /** + * File content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentFile { + /** + * The type of the input item. Always `input_file`. + */ + type: 'input_file'; + + /** + * The data of the file to be sent to the model. + */ + file_data?: string; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * The URL of the file to be sent to the model. + */ + file_url?: string; + + /** + * The name of the file to be sent to the model. + */ + filename?: string; + } + + export interface OpenAIResponseOutputMessageContentOutputText { annotations: Array< - | UnionMember2.OpenAIResponseAnnotationFileCitation - | UnionMember2.OpenAIResponseAnnotationCitation - | UnionMember2.OpenAIResponseAnnotationContainerFileCitation - | UnionMember2.OpenAIResponseAnnotationFilePath + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath >; text: string; @@ -231,7 +289,7 @@ export namespace ResponseObject { type: 'output_text'; } - export namespace UnionMember2 { + export namespace OpenAIResponseOutputMessageContentOutputText { /** * File citation annotation for referencing specific files in response content. */ @@ -309,6 +367,21 @@ export namespace ResponseObject { type: 'file_path'; } } + + /** + * Refusal content within a streamed response part. + */ + export interface OpenAIResponseContentPartRefusal { + /** + * Refusal text supplied by the model + */ + refusal: string; + + /** + * Content part type identifier, always "refusal" + */ + type: 'refusal'; + } } /** @@ -515,6 +588,21 @@ export namespace ResponseObject { } } + /** + * A request for human approval of a tool invocation. + */ + export interface OpenAIResponseMcpApprovalRequest { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_approval_request'; + } + /** * Text formatting configuration for the response */ @@ -573,152 +661,502 @@ export namespace ResponseObject { */ message: string; } -} - -/** - * Streaming event indicating a new response has been created. - */ -export type ResponseObjectStream = - | ResponseObjectStream.OpenAIResponseObjectStreamResponseCreated - | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputItemAdded - | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputItemDone - | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputTextDelta - | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputTextDone - | ResponseObjectStream.OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta - | ResponseObjectStream.OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone - | ResponseObjectStream.OpenAIResponseObjectStreamResponseWebSearchCallInProgress - | ResponseObjectStream.OpenAIResponseObjectStreamResponseWebSearchCallSearching - | ResponseObjectStream.OpenAIResponseObjectStreamResponseWebSearchCallCompleted - | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpListToolsInProgress - | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpListToolsFailed - | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpListToolsCompleted - | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta - | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallArgumentsDone - | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallInProgress - | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallFailed - | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallCompleted - | ResponseObjectStream.OpenAIResponseObjectStreamResponseContentPartAdded - | ResponseObjectStream.OpenAIResponseObjectStreamResponseContentPartDone - | ResponseObjectStream.OpenAIResponseObjectStreamResponseCompleted; -export namespace ResponseObjectStream { /** - * Streaming event indicating a new response has been created. + * (Optional) Reference to a prompt template and its variables. */ - export interface OpenAIResponseObjectStreamResponseCreated { + export interface Prompt { /** - * The newly created response object + * Unique identifier of the prompt template */ - response: ResponsesAPI.ResponseObject; + id: string; /** - * Event type identifier, always "response.created" + * Dictionary of variable names to OpenAIResponseInputMessageContent structure for + * template substitution. The substitution values can either be strings, or other + * Response input types like images or files. */ - type: 'response.created'; + variables?: { + [key: string]: + | Prompt.OpenAIResponseInputMessageContentText + | Prompt.OpenAIResponseInputMessageContentImage + | Prompt.OpenAIResponseInputMessageContentFile; + }; + + /** + * Version number of the prompt to use (defaults to latest if not specified) + */ + version?: string; } - /** - * Streaming event for when a new output item is added to the response. - */ - export interface OpenAIResponseObjectStreamResponseOutputItemAdded { + export namespace Prompt { /** - * The output item that was added (message, tool call, etc.) + * Text content for input messages in OpenAI response format. */ - item: - | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseMessage - | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageWebSearchToolCall - | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageFileSearchToolCall - | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageFunctionToolCall - | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageMcpCall - | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageMcpListTools; + export interface OpenAIResponseInputMessageContentText { + /** + * The text content of the input message + */ + text: string; + + /** + * Content type identifier, always "input_text" + */ + type: 'input_text'; + } /** - * Index position of this item in the output list + * Image content for input messages in OpenAI response format. */ - output_index: number; + export interface OpenAIResponseInputMessageContentImage { + /** + * Level of detail for image processing, can be "low", "high", or "auto" + */ + detail: 'low' | 'high' | 'auto'; + + /** + * Content type identifier, always "input_image" + */ + type: 'input_image'; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * (Optional) URL of the image content + */ + image_url?: string; + } /** - * Unique identifier of the response containing this output + * File content for input messages in OpenAI response format. */ - response_id: string; + export interface OpenAIResponseInputMessageContentFile { + /** + * The type of the input item. Always `input_file`. + */ + type: 'input_file'; + + /** + * The data of the file to be sent to the model. + */ + file_data?: string; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * The URL of the file to be sent to the model. + */ + file_url?: string; + + /** + * The name of the file to be sent to the model. + */ + filename?: string; + } + } + /** + * Web search tool configuration for OpenAI response inputs. + */ + export interface OpenAIResponseInputToolWebSearch { /** - * Sequential number for ordering streaming events + * Web search tool type variant to use */ - sequence_number: number; + type: 'web_search' | 'web_search_preview' | 'web_search_preview_2025_03_11'; /** - * Event type identifier, always "response.output_item.added" + * (Optional) Size of search context, must be "low", "medium", or "high" */ - type: 'response.output_item.added'; + search_context_size?: string; } - export namespace OpenAIResponseObjectStreamResponseOutputItemAdded { + /** + * File search tool configuration for OpenAI response inputs. + */ + export interface OpenAIResponseInputToolFileSearch { /** - * Corresponds to the various Message types in the Responses API. They are all - * under one type because the Responses API gives them all the same "type" value, - * and there is no way to tell them apart in certain scenarios. + * Tool type identifier, always "file_search" */ - export interface OpenAIResponseMessage { - content: - | string - | Array< - | OpenAIResponseMessage.OpenAIResponseInputMessageContentText - | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage - > - | Array; + type: 'file_search'; - role: 'system' | 'developer' | 'user' | 'assistant'; + /** + * List of vector store identifiers to search within + */ + vector_store_ids: Array; - type: 'message'; + /** + * (Optional) Additional filters to apply to the search + */ + filters?: { [key: string]: boolean | number | string | Array | unknown | null }; - id?: string; + /** + * (Optional) Maximum number of search results to return (1-50) + */ + max_num_results?: number; - status?: string; - } + /** + * (Optional) Options for ranking and scoring search results + */ + ranking_options?: OpenAIResponseInputToolFileSearch.RankingOptions; + } - export namespace OpenAIResponseMessage { + export namespace OpenAIResponseInputToolFileSearch { + /** + * (Optional) Options for ranking and scoring search results + */ + export interface RankingOptions { /** - * Text content for input messages in OpenAI response format. + * (Optional) Name of the ranking algorithm to use */ - export interface OpenAIResponseInputMessageContentText { - /** - * The text content of the input message - */ - text: string; - - /** - * Content type identifier, always "input_text" - */ - type: 'input_text'; - } + ranker?: string; /** - * Image content for input messages in OpenAI response format. + * (Optional) Minimum relevance score threshold for results */ - export interface OpenAIResponseInputMessageContentImage { - /** - * Level of detail for image processing, can be "low", "high", or "auto" - */ - detail: 'low' | 'high' | 'auto'; + score_threshold?: number; + } + } - /** - * Content type identifier, always "input_image" - */ - type: 'input_image'; + /** + * Function tool configuration for OpenAI response inputs. + */ + export interface OpenAIResponseInputToolFunction { + /** + * Name of the function that can be called + */ + name: string; - /** - * (Optional) URL of the image content - */ + /** + * Tool type identifier, always "function" + */ + type: 'function'; + + /** + * (Optional) Description of what the function does + */ + description?: string; + + /** + * (Optional) JSON schema defining the function's parameters + */ + parameters?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * (Optional) Whether to enforce strict parameter validation + */ + strict?: boolean; + } + + /** + * Model Context Protocol (MCP) tool configuration for OpenAI response object. + */ + export interface OpenAIResponseToolMcp { + /** + * Label to identify this MCP server + */ + server_label: string; + + /** + * Tool type identifier, always "mcp" + */ + type: 'mcp'; + + /** + * (Optional) Restriction on which tools can be used from this server + */ + allowed_tools?: Array | OpenAIResponseToolMcp.AllowedToolsFilter; + } + + export namespace OpenAIResponseToolMcp { + /** + * Filter configuration for restricting which MCP tools can be used. + */ + export interface AllowedToolsFilter { + /** + * (Optional) List of specific tool names that are allowed + */ + tool_names?: Array; + } + } + + /** + * (Optional) Token usage information for the response + */ + export interface Usage { + /** + * Number of tokens in the input + */ + input_tokens: number; + + /** + * Number of tokens in the output + */ + output_tokens: number; + + /** + * Total tokens used (input + output) + */ + total_tokens: number; + + /** + * Detailed breakdown of input token usage + */ + input_tokens_details?: Usage.InputTokensDetails; + + /** + * Detailed breakdown of output token usage + */ + output_tokens_details?: Usage.OutputTokensDetails; + } + + export namespace Usage { + /** + * Detailed breakdown of input token usage + */ + export interface InputTokensDetails { + /** + * Number of tokens retrieved from cache + */ + cached_tokens?: number; + } + + /** + * Detailed breakdown of output token usage + */ + export interface OutputTokensDetails { + /** + * Number of tokens used for reasoning (o1/o3 models) + */ + reasoning_tokens?: number; + } + } +} + +/** + * Streaming event indicating a new response has been created. + */ +export type ResponseObjectStream = + | ResponseObjectStream.OpenAIResponseObjectStreamResponseCreated + | ResponseObjectStream.OpenAIResponseObjectStreamResponseInProgress + | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputItemAdded + | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputItemDone + | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputTextDelta + | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputTextDone + | ResponseObjectStream.OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta + | ResponseObjectStream.OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone + | ResponseObjectStream.OpenAIResponseObjectStreamResponseWebSearchCallInProgress + | ResponseObjectStream.OpenAIResponseObjectStreamResponseWebSearchCallSearching + | ResponseObjectStream.OpenAIResponseObjectStreamResponseWebSearchCallCompleted + | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpListToolsInProgress + | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpListToolsFailed + | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpListToolsCompleted + | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta + | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallArgumentsDone + | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallInProgress + | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallFailed + | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallCompleted + | ResponseObjectStream.OpenAIResponseObjectStreamResponseContentPartAdded + | ResponseObjectStream.OpenAIResponseObjectStreamResponseContentPartDone + | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningTextDelta + | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningTextDone + | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded + | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningSummaryPartDone + | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta + | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningSummaryTextDone + | ResponseObjectStream.OpenAIResponseObjectStreamResponseRefusalDelta + | ResponseObjectStream.OpenAIResponseObjectStreamResponseRefusalDone + | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded + | ResponseObjectStream.OpenAIResponseObjectStreamResponseFileSearchCallInProgress + | ResponseObjectStream.OpenAIResponseObjectStreamResponseFileSearchCallSearching + | ResponseObjectStream.OpenAIResponseObjectStreamResponseFileSearchCallCompleted + | ResponseObjectStream.OpenAIResponseObjectStreamResponseIncomplete + | ResponseObjectStream.OpenAIResponseObjectStreamResponseFailed + | ResponseObjectStream.OpenAIResponseObjectStreamResponseCompleted; + +export namespace ResponseObjectStream { + /** + * Streaming event indicating a new response has been created. + */ + export interface OpenAIResponseObjectStreamResponseCreated { + /** + * The response object that was created + */ + response: ResponsesAPI.ResponseObject; + + /** + * Event type identifier, always "response.created" + */ + type: 'response.created'; + } + + /** + * Streaming event indicating the response remains in progress. + */ + export interface OpenAIResponseObjectStreamResponseInProgress { + /** + * Current response state while in progress + */ + response: ResponsesAPI.ResponseObject; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Event type identifier, always "response.in_progress" + */ + type: 'response.in_progress'; + } + + /** + * Streaming event for when a new output item is added to the response. + */ + export interface OpenAIResponseObjectStreamResponseOutputItemAdded { + /** + * The output item that was added (message, tool call, etc.) + */ + item: + | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseMessage + | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageWebSearchToolCall + | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageFileSearchToolCall + | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageFunctionToolCall + | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageMcpCall + | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageMcpListTools + | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseMcpApprovalRequest; + + /** + * Index position of this item in the output list + */ + output_index: number; + + /** + * Unique identifier of the response containing this output + */ + response_id: string; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Event type identifier, always "response.output_item.added" + */ + type: 'response.output_item.added'; + } + + export namespace OpenAIResponseObjectStreamResponseOutputItemAdded { + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile + > + | Array< + | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText + | OpenAIResponseMessage.OpenAIResponseContentPartRefusal + >; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; + } + + export namespace OpenAIResponseMessage { + /** + * Text content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentText { + /** + * The text content of the input message + */ + text: string; + + /** + * Content type identifier, always "input_text" + */ + type: 'input_text'; + } + + /** + * Image content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentImage { + /** + * Level of detail for image processing, can be "low", "high", or "auto" + */ + detail: 'low' | 'high' | 'auto'; + + /** + * Content type identifier, always "input_image" + */ + type: 'input_image'; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * (Optional) URL of the image content + */ image_url?: string; } - export interface UnionMember2 { + /** + * File content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentFile { + /** + * The type of the input item. Always `input_file`. + */ + type: 'input_file'; + + /** + * The data of the file to be sent to the model. + */ + file_data?: string; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * The URL of the file to be sent to the model. + */ + file_url?: string; + + /** + * The name of the file to be sent to the model. + */ + filename?: string; + } + + export interface OpenAIResponseOutputMessageContentOutputText { annotations: Array< - | UnionMember2.OpenAIResponseAnnotationFileCitation - | UnionMember2.OpenAIResponseAnnotationCitation - | UnionMember2.OpenAIResponseAnnotationContainerFileCitation - | UnionMember2.OpenAIResponseAnnotationFilePath + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath >; text: string; @@ -726,7 +1164,7 @@ export namespace ResponseObjectStream { type: 'output_text'; } - export namespace UnionMember2 { + export namespace OpenAIResponseOutputMessageContentOutputText { /** * File citation annotation for referencing specific files in response content. */ @@ -804,14 +1242,29 @@ export namespace ResponseObjectStream { type: 'file_path'; } } - } - /** - * Web search tool call output message for OpenAI responses. - */ - export interface OpenAIResponseOutputMessageWebSearchToolCall { /** - * Unique identifier for this tool call + * Refusal content within a streamed response part. + */ + export interface OpenAIResponseContentPartRefusal { + /** + * Refusal text supplied by the model + */ + refusal: string; + + /** + * Content part type identifier, always "refusal" + */ + type: 'refusal'; + } + } + + /** + * Web search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageWebSearchToolCall { + /** + * Unique identifier for this tool call */ id: string; @@ -1009,6 +1462,21 @@ export namespace ResponseObjectStream { description?: string; } } + + /** + * A request for human approval of a tool invocation. + */ + export interface OpenAIResponseMcpApprovalRequest { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_approval_request'; + } } /** @@ -1024,7 +1492,8 @@ export namespace ResponseObjectStream { | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageFileSearchToolCall | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageFunctionToolCall | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageMcpCall - | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageMcpListTools; + | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageMcpListTools + | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseMcpApprovalRequest; /** * Index position of this item in the output list @@ -1059,8 +1528,12 @@ export namespace ResponseObjectStream { | Array< | OpenAIResponseMessage.OpenAIResponseInputMessageContentText | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile > - | Array; + | Array< + | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText + | OpenAIResponseMessage.OpenAIResponseContentPartRefusal + >; role: 'system' | 'developer' | 'user' | 'assistant'; @@ -1101,18 +1574,53 @@ export namespace ResponseObjectStream { */ type: 'input_image'; + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + /** * (Optional) URL of the image content */ image_url?: string; } - export interface UnionMember2 { + /** + * File content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentFile { + /** + * The type of the input item. Always `input_file`. + */ + type: 'input_file'; + + /** + * The data of the file to be sent to the model. + */ + file_data?: string; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * The URL of the file to be sent to the model. + */ + file_url?: string; + + /** + * The name of the file to be sent to the model. + */ + filename?: string; + } + + export interface OpenAIResponseOutputMessageContentOutputText { annotations: Array< - | UnionMember2.OpenAIResponseAnnotationFileCitation - | UnionMember2.OpenAIResponseAnnotationCitation - | UnionMember2.OpenAIResponseAnnotationContainerFileCitation - | UnionMember2.OpenAIResponseAnnotationFilePath + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath >; text: string; @@ -1120,7 +1628,7 @@ export namespace ResponseObjectStream { type: 'output_text'; } - export namespace UnionMember2 { + export namespace OpenAIResponseOutputMessageContentOutputText { /** * File citation annotation for referencing specific files in response content. */ @@ -1198,6 +1706,21 @@ export namespace ResponseObjectStream { type: 'file_path'; } } + + /** + * Refusal content within a streamed response part. + */ + export interface OpenAIResponseContentPartRefusal { + /** + * Refusal text supplied by the model + */ + refusal: string; + + /** + * Content part type identifier, always "refusal" + */ + type: 'refusal'; + } } /** @@ -1403,6 +1926,21 @@ export namespace ResponseObjectStream { description?: string; } } + + /** + * A request for human approval of a tool invocation. + */ + export interface OpenAIResponseMcpApprovalRequest { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_approval_request'; + } } /** @@ -1696,17 +2234,28 @@ export namespace ResponseObjectStream { * Streaming event for when a new content part is added to a response item. */ export interface OpenAIResponseObjectStreamResponseContentPartAdded { + /** + * Index position of the part within the content array + */ + content_index: number; + /** * Unique identifier of the output item containing this content part */ item_id: string; + /** + * Index position of the output item in the response + */ + output_index: number; + /** * The content part that was added */ part: | OpenAIResponseObjectStreamResponseContentPartAdded.OpenAIResponseContentPartOutputText - | OpenAIResponseObjectStreamResponseContentPartAdded.OpenAIResponseContentPartRefusal; + | OpenAIResponseObjectStreamResponseContentPartAdded.OpenAIResponseContentPartRefusal + | OpenAIResponseObjectStreamResponseContentPartAdded.OpenAIResponseContentPartReasoningText; /** * Unique identifier of the response containing this content @@ -1725,34 +2274,172 @@ export namespace ResponseObjectStream { } export namespace OpenAIResponseObjectStreamResponseContentPartAdded { + /** + * Text content within a streamed response part. + */ export interface OpenAIResponseContentPartOutputText { + /** + * Structured annotations associated with the text + */ + annotations: Array< + | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationFileCitation + | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationCitation + | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationContainerFileCitation + | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationFilePath + >; + + /** + * Text emitted for this content part + */ text: string; + /** + * Content part type identifier, always "output_text" + */ type: 'output_text'; + + /** + * (Optional) Token log probability details + */ + logprobs?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; + } + + export namespace OpenAIResponseContentPartOutputText { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { + /** + * Unique identifier of the referenced file + */ + file_id: string; + + /** + * Name of the referenced file + */ + filename: string; + + /** + * Position index of the citation within the content + */ + index: number; + + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; + } + + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; + + /** + * Start position of the citation span in the content + */ + start_index: number; + + /** + * Title of the referenced web resource + */ + title: string; + + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; + + /** + * URL of the referenced web resource + */ + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } } + /** + * Refusal content within a streamed response part. + */ export interface OpenAIResponseContentPartRefusal { + /** + * Refusal text supplied by the model + */ refusal: string; + /** + * Content part type identifier, always "refusal" + */ type: 'refusal'; } + + /** + * Reasoning text emitted as part of a streamed response. + */ + export interface OpenAIResponseContentPartReasoningText { + /** + * Reasoning text supplied by the model + */ + text: string; + + /** + * Content part type identifier, always "reasoning_text" + */ + type: 'reasoning_text'; + } } /** * Streaming event for when a content part is completed. */ export interface OpenAIResponseObjectStreamResponseContentPartDone { + /** + * Index position of the part within the content array + */ + content_index: number; + /** * Unique identifier of the output item containing this content part */ item_id: string; + /** + * Index position of the output item in the response + */ + output_index: number; + /** * The completed content part */ part: | OpenAIResponseObjectStreamResponseContentPartDone.OpenAIResponseContentPartOutputText - | OpenAIResponseObjectStreamResponseContentPartDone.OpenAIResponseContentPartRefusal; + | OpenAIResponseObjectStreamResponseContentPartDone.OpenAIResponseContentPartRefusal + | OpenAIResponseObjectStreamResponseContentPartDone.OpenAIResponseContentPartReasoningText; /** * Unique identifier of the response containing this content @@ -1771,427 +2458,851 @@ export namespace ResponseObjectStream { } export namespace OpenAIResponseObjectStreamResponseContentPartDone { + /** + * Text content within a streamed response part. + */ export interface OpenAIResponseContentPartOutputText { + /** + * Structured annotations associated with the text + */ + annotations: Array< + | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationFileCitation + | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationCitation + | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationContainerFileCitation + | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationFilePath + >; + + /** + * Text emitted for this content part + */ text: string; + /** + * Content part type identifier, always "output_text" + */ type: 'output_text'; - } - - export interface OpenAIResponseContentPartRefusal { - refusal: string; - type: 'refusal'; + /** + * (Optional) Token log probability details + */ + logprobs?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>; } - } - - /** - * Streaming event indicating a response has been completed. - */ - export interface OpenAIResponseObjectStreamResponseCompleted { - /** - * The completed response object - */ - response: ResponsesAPI.ResponseObject; - /** - * Event type identifier, always "response.completed" - */ - type: 'response.completed'; - } -} + export namespace OpenAIResponseContentPartOutputText { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { + /** + * Unique identifier of the referenced file + */ + file_id: string; -/** - * OpenAI response object extended with input context information. - */ -export interface ResponseListResponse { - /** - * Unique identifier for this response - */ - id: string; + /** + * Name of the referenced file + */ + filename: string; - /** - * Unix timestamp when the response was created - */ - created_at: number; + /** + * Position index of the citation within the content + */ + index: number; - /** - * List of input items that led to this response - */ - input: Array< - | ResponseListResponse.OpenAIResponseOutputMessageWebSearchToolCall - | ResponseListResponse.OpenAIResponseOutputMessageFileSearchToolCall - | ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall - | ResponseListResponse.OpenAIResponseInputFunctionToolCallOutput - | ResponseListResponse.OpenAIResponseMessage - >; + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; + } - /** - * Model identifier used for generation - */ - model: string; + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; - /** - * Object type identifier, always "response" - */ - object: 'response'; + /** + * Start position of the citation span in the content + */ + start_index: number; - /** - * List of generated output items (messages, tool calls, etc.) - */ - output: Array< - | ResponseListResponse.OpenAIResponseMessage - | ResponseListResponse.OpenAIResponseOutputMessageWebSearchToolCall - | ResponseListResponse.OpenAIResponseOutputMessageFileSearchToolCall - | ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall - | ResponseListResponse.OpenAIResponseOutputMessageMcpCall - | ResponseListResponse.OpenAIResponseOutputMessageMcpListTools - >; + /** + * Title of the referenced web resource + */ + title: string; - /** - * Whether tool calls can be executed in parallel - */ - parallel_tool_calls: boolean; + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; - /** - * Current status of the response generation - */ - status: string; + /** + * URL of the referenced web resource + */ + url: string; + } - /** - * Text formatting configuration for the response - */ - text: ResponseListResponse.Text; + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; - /** - * (Optional) Error details if the response generation failed - */ - error?: ResponseListResponse.Error; + end_index: number; - /** - * (Optional) ID of the previous response in a conversation - */ - previous_response_id?: string; + file_id: string; - /** - * (Optional) Sampling temperature used for generation - */ - temperature?: number; + filename: string; - /** - * (Optional) Nucleus sampling parameter used for generation - */ - top_p?: number; + start_index: number; - /** - * (Optional) Truncation strategy applied to the response - */ - truncation?: string; + type: 'container_file_citation'; + } - /** - * (Optional) User identifier associated with the request - */ - user?: string; -} + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + + /** + * Refusal content within a streamed response part. + */ + export interface OpenAIResponseContentPartRefusal { + /** + * Refusal text supplied by the model + */ + refusal: string; + + /** + * Content part type identifier, always "refusal" + */ + type: 'refusal'; + } + + /** + * Reasoning text emitted as part of a streamed response. + */ + export interface OpenAIResponseContentPartReasoningText { + /** + * Reasoning text supplied by the model + */ + text: string; + + /** + * Content part type identifier, always "reasoning_text" + */ + type: 'reasoning_text'; + } + } -export namespace ResponseListResponse { /** - * Web search tool call output message for OpenAI responses. + * Streaming event for incremental reasoning text updates. */ - export interface OpenAIResponseOutputMessageWebSearchToolCall { + export interface OpenAIResponseObjectStreamResponseReasoningTextDelta { /** - * Unique identifier for this tool call + * Index position of the reasoning content part */ - id: string; + content_index: number; /** - * Current status of the web search operation + * Incremental reasoning text being added */ - status: string; + delta: string; /** - * Tool call type identifier, always "web_search_call" + * Unique identifier of the output item being updated */ - type: 'web_search_call'; + item_id: string; + + /** + * Index position of the item in the output list + */ + output_index: number; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Event type identifier, always "response.reasoning_text.delta" + */ + type: 'response.reasoning_text.delta'; } /** - * File search tool call output message for OpenAI responses. + * Streaming event for when reasoning text is completed. */ - export interface OpenAIResponseOutputMessageFileSearchToolCall { + export interface OpenAIResponseObjectStreamResponseReasoningTextDone { /** - * Unique identifier for this tool call + * Index position of the reasoning content part */ - id: string; + content_index: number; /** - * List of search queries executed + * Unique identifier of the completed output item */ - queries: Array; + item_id: string; /** - * Current status of the file search operation + * Index position of the item in the output list */ - status: string; + output_index: number; /** - * Tool call type identifier, always "file_search_call" + * Sequential number for ordering streaming events */ - type: 'file_search_call'; + sequence_number: number; /** - * (Optional) Search results returned by the file search operation + * Final complete reasoning text */ - results?: Array; + text: string; + + /** + * Event type identifier, always "response.reasoning_text.done" + */ + type: 'response.reasoning_text.done'; } - export namespace OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Streaming event for when a new reasoning summary part is added. + */ + export interface OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded { /** - * Search results returned by the file search operation. + * Unique identifier of the output item */ - export interface Result { - /** - * (Optional) Key-value attributes associated with the file - */ - attributes: { [key: string]: boolean | number | string | Array | unknown | null }; + item_id: string; - /** - * Unique identifier of the file containing the result - */ - file_id: string; + /** + * Index position of the output item + */ + output_index: number; - /** - * Name of the file containing the result - */ - filename: string; + /** + * The summary part that was added + */ + part: OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded.Part; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Index of the summary part within the reasoning summary + */ + summary_index: number; + + /** + * Event type identifier, always "response.reasoning_summary_part.added" + */ + type: 'response.reasoning_summary_part.added'; + } + export namespace OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded { + /** + * The summary part that was added + */ + export interface Part { /** - * Relevance score for this search result (between 0 and 1) + * Summary text */ - score: number; + text: string; /** - * Text content of the search result + * Content part type identifier, always "summary_text" */ - text: string; + type: 'summary_text'; } } /** - * Function tool call output message for OpenAI responses. + * Streaming event for when a reasoning summary part is completed. */ - export interface OpenAIResponseOutputMessageFunctionToolCall { + export interface OpenAIResponseObjectStreamResponseReasoningSummaryPartDone { /** - * JSON string containing the function arguments + * Unique identifier of the output item */ - arguments: string; + item_id: string; /** - * Unique identifier for the function call + * Index position of the output item */ - call_id: string; + output_index: number; /** - * Name of the function being called + * The completed summary part */ - name: string; + part: OpenAIResponseObjectStreamResponseReasoningSummaryPartDone.Part; /** - * Tool call type identifier, always "function_call" + * Sequential number for ordering streaming events */ - type: 'function_call'; + sequence_number: number; /** - * (Optional) Additional identifier for the tool call + * Index of the summary part within the reasoning summary */ - id?: string; + summary_index: number; /** - * (Optional) Current status of the function call execution + * Event type identifier, always "response.reasoning_summary_part.done" */ - status?: string; + type: 'response.reasoning_summary_part.done'; + } + + export namespace OpenAIResponseObjectStreamResponseReasoningSummaryPartDone { + /** + * The completed summary part + */ + export interface Part { + /** + * Summary text + */ + text: string; + + /** + * Content part type identifier, always "summary_text" + */ + type: 'summary_text'; + } } /** - * This represents the output of a function call that gets passed back to the - * model. + * Streaming event for incremental reasoning summary text updates. */ - export interface OpenAIResponseInputFunctionToolCallOutput { - call_id: string; + export interface OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta { + /** + * Incremental summary text being added + */ + delta: string; - output: string; + /** + * Unique identifier of the output item + */ + item_id: string; - type: 'function_call_output'; + /** + * Index position of the output item + */ + output_index: number; - id?: string; + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; - status?: string; + /** + * Index of the summary part within the reasoning summary + */ + summary_index: number; + + /** + * Event type identifier, always "response.reasoning_summary_text.delta" + */ + type: 'response.reasoning_summary_text.delta'; } /** - * Corresponds to the various Message types in the Responses API. They are all - * under one type because the Responses API gives them all the same "type" value, - * and there is no way to tell them apart in certain scenarios. + * Streaming event for when reasoning summary text is completed. */ - export interface OpenAIResponseMessage { - content: - | string - | Array< - | OpenAIResponseMessage.OpenAIResponseInputMessageContentText - | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage - > - | Array; + export interface OpenAIResponseObjectStreamResponseReasoningSummaryTextDone { + /** + * Unique identifier of the output item + */ + item_id: string; - role: 'system' | 'developer' | 'user' | 'assistant'; + /** + * Index position of the output item + */ + output_index: number; - type: 'message'; + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; - id?: string; + /** + * Index of the summary part within the reasoning summary + */ + summary_index: number; - status?: string; + /** + * Final complete summary text + */ + text: string; + + /** + * Event type identifier, always "response.reasoning_summary_text.done" + */ + type: 'response.reasoning_summary_text.done'; } - export namespace OpenAIResponseMessage { + /** + * Streaming event for incremental refusal text updates. + */ + export interface OpenAIResponseObjectStreamResponseRefusalDelta { /** - * Text content for input messages in OpenAI response format. + * Index position of the content part */ - export interface OpenAIResponseInputMessageContentText { - /** - * The text content of the input message - */ - text: string; + content_index: number; - /** - * Content type identifier, always "input_text" - */ - type: 'input_text'; - } + /** + * Incremental refusal text being added + */ + delta: string; /** - * Image content for input messages in OpenAI response format. + * Unique identifier of the output item */ - export interface OpenAIResponseInputMessageContentImage { + item_id: string; + + /** + * Index position of the item in the output list + */ + output_index: number; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Event type identifier, always "response.refusal.delta" + */ + type: 'response.refusal.delta'; + } + + /** + * Streaming event for when refusal text is completed. + */ + export interface OpenAIResponseObjectStreamResponseRefusalDone { + /** + * Index position of the content part + */ + content_index: number; + + /** + * Unique identifier of the output item + */ + item_id: string; + + /** + * Index position of the item in the output list + */ + output_index: number; + + /** + * Final complete refusal text + */ + refusal: string; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Event type identifier, always "response.refusal.done" + */ + type: 'response.refusal.done'; + } + + /** + * Streaming event for when an annotation is added to output text. + */ + export interface OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded { + /** + * The annotation object being added + */ + annotation: + | OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded.OpenAIResponseAnnotationFileCitation + | OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded.OpenAIResponseAnnotationCitation + | OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded.OpenAIResponseAnnotationContainerFileCitation + | OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded.OpenAIResponseAnnotationFilePath; + + /** + * Index of the annotation within the content part + */ + annotation_index: number; + + /** + * Index position of the content part within the output item + */ + content_index: number; + + /** + * Unique identifier of the item to which the annotation is being added + */ + item_id: string; + + /** + * Index position of the output item in the response's output array + */ + output_index: number; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Event type identifier, always "response.output_text.annotation.added" + */ + type: 'response.output_text.annotation.added'; + } + + export namespace OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { /** - * Level of detail for image processing, can be "low", "high", or "auto" + * Unique identifier of the referenced file */ - detail: 'low' | 'high' | 'auto'; + file_id: string; /** - * Content type identifier, always "input_image" + * Name of the referenced file */ - type: 'input_image'; + filename: string; /** - * (Optional) URL of the image content + * Position index of the citation within the content */ - image_url?: string; - } - - export interface UnionMember2 { - annotations: Array< - | UnionMember2.OpenAIResponseAnnotationFileCitation - | UnionMember2.OpenAIResponseAnnotationCitation - | UnionMember2.OpenAIResponseAnnotationContainerFileCitation - | UnionMember2.OpenAIResponseAnnotationFilePath - >; + index: number; - text: string; - - type: 'output_text'; + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; } - export namespace UnionMember2 { + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { /** - * File citation annotation for referencing specific files in response content. + * End position of the citation span in the content */ - export interface OpenAIResponseAnnotationFileCitation { - /** - * Unique identifier of the referenced file - */ - file_id: string; + end_index: number; - /** - * Name of the referenced file - */ - filename: string; + /** + * Start position of the citation span in the content + */ + start_index: number; - /** - * Position index of the citation within the content - */ - index: number; + /** + * Title of the referenced web resource + */ + title: string; - /** - * Annotation type identifier, always "file_citation" - */ - type: 'file_citation'; - } + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; /** - * URL citation annotation for referencing external web resources. + * URL of the referenced web resource */ - export interface OpenAIResponseAnnotationCitation { - /** - * End position of the citation span in the content - */ - end_index: number; + url: string; + } - /** - * Start position of the citation span in the content - */ - start_index: number; + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; - /** - * Title of the referenced web resource - */ - title: string; + end_index: number; - /** - * Annotation type identifier, always "url_citation" - */ - type: 'url_citation'; + file_id: string; - /** - * URL of the referenced web resource - */ - url: string; - } + filename: string; - export interface OpenAIResponseAnnotationContainerFileCitation { - container_id: string; + start_index: number; - end_index: number; + type: 'container_file_citation'; + } - file_id: string; + export interface OpenAIResponseAnnotationFilePath { + file_id: string; - filename: string; + index: number; - start_index: number; + type: 'file_path'; + } + } - type: 'container_file_citation'; - } + /** + * Streaming event for file search calls in progress. + */ + export interface OpenAIResponseObjectStreamResponseFileSearchCallInProgress { + /** + * Unique identifier of the file search call + */ + item_id: string; - export interface OpenAIResponseAnnotationFilePath { - file_id: string; + /** + * Index position of the item in the output list + */ + output_index: number; - index: number; + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; - type: 'file_path'; - } - } + /** + * Event type identifier, always "response.file_search_call.in_progress" + */ + type: 'response.file_search_call.in_progress'; } /** - * Corresponds to the various Message types in the Responses API. They are all - * under one type because the Responses API gives them all the same "type" value, - * and there is no way to tell them apart in certain scenarios. + * Streaming event for file search currently searching. */ - export interface OpenAIResponseMessage { - content: + export interface OpenAIResponseObjectStreamResponseFileSearchCallSearching { + /** + * Unique identifier of the file search call + */ + item_id: string; + + /** + * Index position of the item in the output list + */ + output_index: number; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Event type identifier, always "response.file_search_call.searching" + */ + type: 'response.file_search_call.searching'; + } + + /** + * Streaming event for completed file search calls. + */ + export interface OpenAIResponseObjectStreamResponseFileSearchCallCompleted { + /** + * Unique identifier of the completed file search call + */ + item_id: string; + + /** + * Index position of the item in the output list + */ + output_index: number; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Event type identifier, always "response.file_search_call.completed" + */ + type: 'response.file_search_call.completed'; + } + + /** + * Streaming event emitted when a response ends in an incomplete state. + */ + export interface OpenAIResponseObjectStreamResponseIncomplete { + /** + * Response object describing the incomplete state + */ + response: ResponsesAPI.ResponseObject; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Event type identifier, always "response.incomplete" + */ + type: 'response.incomplete'; + } + + /** + * Streaming event emitted when a response fails. + */ + export interface OpenAIResponseObjectStreamResponseFailed { + /** + * Response object describing the failure + */ + response: ResponsesAPI.ResponseObject; + + /** + * Sequential number for ordering streaming events + */ + sequence_number: number; + + /** + * Event type identifier, always "response.failed" + */ + type: 'response.failed'; + } + + /** + * Streaming event indicating a response has been completed. + */ + export interface OpenAIResponseObjectStreamResponseCompleted { + /** + * Completed response object + */ + response: ResponsesAPI.ResponseObject; + + /** + * Event type identifier, always "response.completed" + */ + type: 'response.completed'; + } +} + +/** + * OpenAI response object extended with input context information. + */ +export interface ResponseListResponse { + /** + * Unique identifier for this response + */ + id: string; + + /** + * Unix timestamp when the response was created + */ + created_at: number; + + /** + * List of input items that led to this response + */ + input: Array< + | ResponseListResponse.OpenAIResponseMessage + | ResponseListResponse.OpenAIResponseOutputMessageWebSearchToolCall + | ResponseListResponse.OpenAIResponseOutputMessageFileSearchToolCall + | ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall + | ResponseListResponse.OpenAIResponseOutputMessageMcpCall + | ResponseListResponse.OpenAIResponseOutputMessageMcpListTools + | ResponseListResponse.OpenAIResponseMcpApprovalRequest + | ResponseListResponse.OpenAIResponseInputFunctionToolCallOutput + | ResponseListResponse.OpenAIResponseMcpApprovalResponse + | ResponseListResponse.OpenAIResponseMessage + >; + + /** + * Model identifier used for generation + */ + model: string; + + /** + * Object type identifier, always "response" + */ + object: 'response'; + + /** + * List of generated output items (messages, tool calls, etc.) + */ + output: Array< + | ResponseListResponse.OpenAIResponseMessage + | ResponseListResponse.OpenAIResponseOutputMessageWebSearchToolCall + | ResponseListResponse.OpenAIResponseOutputMessageFileSearchToolCall + | ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall + | ResponseListResponse.OpenAIResponseOutputMessageMcpCall + | ResponseListResponse.OpenAIResponseOutputMessageMcpListTools + | ResponseListResponse.OpenAIResponseMcpApprovalRequest + >; + + /** + * Whether tool calls can be executed in parallel + */ + parallel_tool_calls: boolean; + + /** + * Current status of the response generation + */ + status: string; + + /** + * Text formatting configuration for the response + */ + text: ResponseListResponse.Text; + + /** + * (Optional) Error details if the response generation failed + */ + error?: ResponseListResponse.Error; + + /** + * (Optional) System message inserted into the model's context + */ + instructions?: string; + + /** + * (Optional) ID of the previous response in a conversation + */ + previous_response_id?: string; + + /** + * (Optional) Reference to a prompt template and its variables. + */ + prompt?: ResponseListResponse.Prompt; + + /** + * (Optional) Sampling temperature used for generation + */ + temperature?: number; + + /** + * (Optional) An array of tools the model may call while generating a response. + */ + tools?: Array< + | ResponseListResponse.OpenAIResponseInputToolWebSearch + | ResponseListResponse.OpenAIResponseInputToolFileSearch + | ResponseListResponse.OpenAIResponseInputToolFunction + | ResponseListResponse.OpenAIResponseToolMcp + >; + + /** + * (Optional) Nucleus sampling parameter used for generation + */ + top_p?: number; + + /** + * (Optional) Truncation strategy applied to the response + */ + truncation?: string; + + /** + * (Optional) Token usage information for the response + */ + usage?: ResponseListResponse.Usage; +} + +export namespace ResponseListResponse { + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: | string | Array< | OpenAIResponseMessage.OpenAIResponseInputMessageContentText | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile > - | Array; + | Array< + | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText + | OpenAIResponseMessage.OpenAIResponseContentPartRefusal + >; role: 'system' | 'developer' | 'user' | 'assistant'; @@ -2202,7 +3313,1153 @@ export namespace ResponseListResponse { status?: string; } - export namespace OpenAIResponseMessage { + export namespace OpenAIResponseMessage { + /** + * Text content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentText { + /** + * The text content of the input message + */ + text: string; + + /** + * Content type identifier, always "input_text" + */ + type: 'input_text'; + } + + /** + * Image content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentImage { + /** + * Level of detail for image processing, can be "low", "high", or "auto" + */ + detail: 'low' | 'high' | 'auto'; + + /** + * Content type identifier, always "input_image" + */ + type: 'input_image'; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * (Optional) URL of the image content + */ + image_url?: string; + } + + /** + * File content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentFile { + /** + * The type of the input item. Always `input_file`. + */ + type: 'input_file'; + + /** + * The data of the file to be sent to the model. + */ + file_data?: string; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * The URL of the file to be sent to the model. + */ + file_url?: string; + + /** + * The name of the file to be sent to the model. + */ + filename?: string; + } + + export interface OpenAIResponseOutputMessageContentOutputText { + annotations: Array< + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace OpenAIResponseOutputMessageContentOutputText { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { + /** + * Unique identifier of the referenced file + */ + file_id: string; + + /** + * Name of the referenced file + */ + filename: string; + + /** + * Position index of the citation within the content + */ + index: number; + + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; + } + + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; + + /** + * Start position of the citation span in the content + */ + start_index: number; + + /** + * Title of the referenced web resource + */ + title: string; + + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; + + /** + * URL of the referenced web resource + */ + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + + /** + * Refusal content within a streamed response part. + */ + export interface OpenAIResponseContentPartRefusal { + /** + * Refusal text supplied by the model + */ + refusal: string; + + /** + * Content part type identifier, always "refusal" + */ + type: 'refusal'; + } + } + + /** + * Web search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageWebSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * Current status of the web search operation + */ + status: string; + + /** + * Tool call type identifier, always "web_search_call" + */ + type: 'web_search_call'; + } + + /** + * File search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * List of search queries executed + */ + queries: Array; + + /** + * Current status of the file search operation + */ + status: string; + + /** + * Tool call type identifier, always "file_search_call" + */ + type: 'file_search_call'; + + /** + * (Optional) Search results returned by the file search operation + */ + results?: Array; + } + + export namespace OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Search results returned by the file search operation. + */ + export interface Result { + /** + * (Optional) Key-value attributes associated with the file + */ + attributes: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Unique identifier of the file containing the result + */ + file_id: string; + + /** + * Name of the file containing the result + */ + filename: string; + + /** + * Relevance score for this search result (between 0 and 1) + */ + score: number; + + /** + * Text content of the search result + */ + text: string; + } + } + + /** + * Function tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFunctionToolCall { + /** + * JSON string containing the function arguments + */ + arguments: string; + + /** + * Unique identifier for the function call + */ + call_id: string; + + /** + * Name of the function being called + */ + name: string; + + /** + * Tool call type identifier, always "function_call" + */ + type: 'function_call'; + + /** + * (Optional) Additional identifier for the tool call + */ + id?: string; + + /** + * (Optional) Current status of the function call execution + */ + status?: string; + } + + /** + * Model Context Protocol (MCP) call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageMcpCall { + /** + * Unique identifier for this MCP call + */ + id: string; + + /** + * JSON string containing the MCP call arguments + */ + arguments: string; + + /** + * Name of the MCP method being called + */ + name: string; + + /** + * Label identifying the MCP server handling the call + */ + server_label: string; + + /** + * Tool call type identifier, always "mcp_call" + */ + type: 'mcp_call'; + + /** + * (Optional) Error message if the MCP call failed + */ + error?: string; + + /** + * (Optional) Output result from the successful MCP call + */ + output?: string; + } + + /** + * MCP list tools output message containing available tools from an MCP server. + */ + export interface OpenAIResponseOutputMessageMcpListTools { + /** + * Unique identifier for this MCP list tools operation + */ + id: string; + + /** + * Label identifying the MCP server providing the tools + */ + server_label: string; + + /** + * List of available tools provided by the MCP server + */ + tools: Array; + + /** + * Tool call type identifier, always "mcp_list_tools" + */ + type: 'mcp_list_tools'; + } + + export namespace OpenAIResponseOutputMessageMcpListTools { + /** + * Tool definition returned by MCP list tools operation. + */ + export interface Tool { + /** + * JSON schema defining the tool's input parameters + */ + input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Name of the tool + */ + name: string; + + /** + * (Optional) Description of what the tool does + */ + description?: string; + } + } + + /** + * A request for human approval of a tool invocation. + */ + export interface OpenAIResponseMcpApprovalRequest { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_approval_request'; + } + + /** + * This represents the output of a function call that gets passed back to the + * model. + */ + export interface OpenAIResponseInputFunctionToolCallOutput { + call_id: string; + + output: string; + + type: 'function_call_output'; + + id?: string; + + status?: string; + } + + /** + * A response to an MCP approval request. + */ + export interface OpenAIResponseMcpApprovalResponse { + approval_request_id: string; + + approve: boolean; + + type: 'mcp_approval_response'; + + id?: string; + + reason?: string; + } + + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile + > + | Array< + | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText + | OpenAIResponseMessage.OpenAIResponseContentPartRefusal + >; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; + } + + export namespace OpenAIResponseMessage { + /** + * Text content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentText { + /** + * The text content of the input message + */ + text: string; + + /** + * Content type identifier, always "input_text" + */ + type: 'input_text'; + } + + /** + * Image content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentImage { + /** + * Level of detail for image processing, can be "low", "high", or "auto" + */ + detail: 'low' | 'high' | 'auto'; + + /** + * Content type identifier, always "input_image" + */ + type: 'input_image'; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * (Optional) URL of the image content + */ + image_url?: string; + } + + /** + * File content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentFile { + /** + * The type of the input item. Always `input_file`. + */ + type: 'input_file'; + + /** + * The data of the file to be sent to the model. + */ + file_data?: string; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * The URL of the file to be sent to the model. + */ + file_url?: string; + + /** + * The name of the file to be sent to the model. + */ + filename?: string; + } + + export interface OpenAIResponseOutputMessageContentOutputText { + annotations: Array< + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace OpenAIResponseOutputMessageContentOutputText { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { + /** + * Unique identifier of the referenced file + */ + file_id: string; + + /** + * Name of the referenced file + */ + filename: string; + + /** + * Position index of the citation within the content + */ + index: number; + + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; + } + + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; + + /** + * Start position of the citation span in the content + */ + start_index: number; + + /** + * Title of the referenced web resource + */ + title: string; + + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; + + /** + * URL of the referenced web resource + */ + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + + /** + * Refusal content within a streamed response part. + */ + export interface OpenAIResponseContentPartRefusal { + /** + * Refusal text supplied by the model + */ + refusal: string; + + /** + * Content part type identifier, always "refusal" + */ + type: 'refusal'; + } + } + + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile + > + | Array< + | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText + | OpenAIResponseMessage.OpenAIResponseContentPartRefusal + >; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; + } + + export namespace OpenAIResponseMessage { + /** + * Text content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentText { + /** + * The text content of the input message + */ + text: string; + + /** + * Content type identifier, always "input_text" + */ + type: 'input_text'; + } + + /** + * Image content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentImage { + /** + * Level of detail for image processing, can be "low", "high", or "auto" + */ + detail: 'low' | 'high' | 'auto'; + + /** + * Content type identifier, always "input_image" + */ + type: 'input_image'; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * (Optional) URL of the image content + */ + image_url?: string; + } + + /** + * File content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentFile { + /** + * The type of the input item. Always `input_file`. + */ + type: 'input_file'; + + /** + * The data of the file to be sent to the model. + */ + file_data?: string; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * The URL of the file to be sent to the model. + */ + file_url?: string; + + /** + * The name of the file to be sent to the model. + */ + filename?: string; + } + + export interface OpenAIResponseOutputMessageContentOutputText { + annotations: Array< + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace OpenAIResponseOutputMessageContentOutputText { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { + /** + * Unique identifier of the referenced file + */ + file_id: string; + + /** + * Name of the referenced file + */ + filename: string; + + /** + * Position index of the citation within the content + */ + index: number; + + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; + } + + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; + + /** + * Start position of the citation span in the content + */ + start_index: number; + + /** + * Title of the referenced web resource + */ + title: string; + + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; + + /** + * URL of the referenced web resource + */ + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; + + type: 'container_file_citation'; + } + + export interface OpenAIResponseAnnotationFilePath { + file_id: string; + + index: number; + + type: 'file_path'; + } + } + + /** + * Refusal content within a streamed response part. + */ + export interface OpenAIResponseContentPartRefusal { + /** + * Refusal text supplied by the model + */ + refusal: string; + + /** + * Content part type identifier, always "refusal" + */ + type: 'refusal'; + } + } + + /** + * Web search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageWebSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * Current status of the web search operation + */ + status: string; + + /** + * Tool call type identifier, always "web_search_call" + */ + type: 'web_search_call'; + } + + /** + * File search tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Unique identifier for this tool call + */ + id: string; + + /** + * List of search queries executed + */ + queries: Array; + + /** + * Current status of the file search operation + */ + status: string; + + /** + * Tool call type identifier, always "file_search_call" + */ + type: 'file_search_call'; + + /** + * (Optional) Search results returned by the file search operation + */ + results?: Array; + } + + export namespace OpenAIResponseOutputMessageFileSearchToolCall { + /** + * Search results returned by the file search operation. + */ + export interface Result { + /** + * (Optional) Key-value attributes associated with the file + */ + attributes: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Unique identifier of the file containing the result + */ + file_id: string; + + /** + * Name of the file containing the result + */ + filename: string; + + /** + * Relevance score for this search result (between 0 and 1) + */ + score: number; + + /** + * Text content of the search result + */ + text: string; + } + } + + /** + * Function tool call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageFunctionToolCall { + /** + * JSON string containing the function arguments + */ + arguments: string; + + /** + * Unique identifier for the function call + */ + call_id: string; + + /** + * Name of the function being called + */ + name: string; + + /** + * Tool call type identifier, always "function_call" + */ + type: 'function_call'; + + /** + * (Optional) Additional identifier for the tool call + */ + id?: string; + + /** + * (Optional) Current status of the function call execution + */ + status?: string; + } + + /** + * Model Context Protocol (MCP) call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageMcpCall { + /** + * Unique identifier for this MCP call + */ + id: string; + + /** + * JSON string containing the MCP call arguments + */ + arguments: string; + + /** + * Name of the MCP method being called + */ + name: string; + + /** + * Label identifying the MCP server handling the call + */ + server_label: string; + + /** + * Tool call type identifier, always "mcp_call" + */ + type: 'mcp_call'; + + /** + * (Optional) Error message if the MCP call failed + */ + error?: string; + + /** + * (Optional) Output result from the successful MCP call + */ + output?: string; + } + + /** + * MCP list tools output message containing available tools from an MCP server. + */ + export interface OpenAIResponseOutputMessageMcpListTools { + /** + * Unique identifier for this MCP list tools operation + */ + id: string; + + /** + * Label identifying the MCP server providing the tools + */ + server_label: string; + + /** + * List of available tools provided by the MCP server + */ + tools: Array; + + /** + * Tool call type identifier, always "mcp_list_tools" + */ + type: 'mcp_list_tools'; + } + + export namespace OpenAIResponseOutputMessageMcpListTools { + /** + * Tool definition returned by MCP list tools operation. + */ + export interface Tool { + /** + * JSON schema defining the tool's input parameters + */ + input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Name of the tool + */ + name: string; + + /** + * (Optional) Description of what the tool does + */ + description?: string; + } + } + + /** + * A request for human approval of a tool invocation. + */ + export interface OpenAIResponseMcpApprovalRequest { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_approval_request'; + } + + /** + * Text formatting configuration for the response + */ + export interface Text { + /** + * (Optional) Text format configuration specifying output format requirements + */ + format?: Text.Format; + } + + export namespace Text { + /** + * (Optional) Text format configuration specifying output format requirements + */ + export interface Format { + /** + * Must be "text", "json_schema", or "json_object" to identify the format type + */ + type: 'text' | 'json_schema' | 'json_object'; + + /** + * (Optional) A description of the response format. Only used for json_schema. + */ + description?: string; + + /** + * The name of the response format. Only used for json_schema. + */ + name?: string; + + /** + * The JSON schema the response should conform to. In a Python SDK, this is often a + * `pydantic` model. Only used for json_schema. + */ + schema?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * (Optional) Whether to strictly enforce the JSON schema. If true, the response + * must match the schema exactly. Only used for json_schema. + */ + strict?: boolean; + } + } + + /** + * (Optional) Error details if the response generation failed + */ + export interface Error { + /** + * Error code identifying the type of failure + */ + code: string; + + /** + * Human-readable error message describing the failure + */ + message: string; + } + + /** + * (Optional) Reference to a prompt template and its variables. + */ + export interface Prompt { + /** + * Unique identifier of the prompt template + */ + id: string; + + /** + * Dictionary of variable names to OpenAIResponseInputMessageContent structure for + * template substitution. The substitution values can either be strings, or other + * Response input types like images or files. + */ + variables?: { + [key: string]: + | Prompt.OpenAIResponseInputMessageContentText + | Prompt.OpenAIResponseInputMessageContentImage + | Prompt.OpenAIResponseInputMessageContentFile; + }; + + /** + * Version number of the prompt to use (defaults to latest if not specified) + */ + version?: string; + } + + export namespace Prompt { /** * Text content for input messages in OpenAI response format. */ @@ -2232,366 +4489,222 @@ export namespace ResponseListResponse { */ type: 'input_image'; + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + /** * (Optional) URL of the image content */ image_url?: string; } - export interface UnionMember2 { - annotations: Array< - | UnionMember2.OpenAIResponseAnnotationFileCitation - | UnionMember2.OpenAIResponseAnnotationCitation - | UnionMember2.OpenAIResponseAnnotationContainerFileCitation - | UnionMember2.OpenAIResponseAnnotationFilePath - >; - - text: string; - - type: 'output_text'; - } - - export namespace UnionMember2 { + /** + * File content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentFile { /** - * File citation annotation for referencing specific files in response content. + * The type of the input item. Always `input_file`. */ - export interface OpenAIResponseAnnotationFileCitation { - /** - * Unique identifier of the referenced file - */ - file_id: string; - - /** - * Name of the referenced file - */ - filename: string; - - /** - * Position index of the citation within the content - */ - index: number; - - /** - * Annotation type identifier, always "file_citation" - */ - type: 'file_citation'; - } + type: 'input_file'; /** - * URL citation annotation for referencing external web resources. + * The data of the file to be sent to the model. */ - export interface OpenAIResponseAnnotationCitation { - /** - * End position of the citation span in the content - */ - end_index: number; - - /** - * Start position of the citation span in the content - */ - start_index: number; - - /** - * Title of the referenced web resource - */ - title: string; - - /** - * Annotation type identifier, always "url_citation" - */ - type: 'url_citation'; - - /** - * URL of the referenced web resource - */ - url: string; - } - - export interface OpenAIResponseAnnotationContainerFileCitation { - container_id: string; - - end_index: number; - - file_id: string; + file_data?: string; - filename: string; - - start_index: number; - - type: 'container_file_citation'; - } - - export interface OpenAIResponseAnnotationFilePath { - file_id: string; + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; - index: number; + /** + * The URL of the file to be sent to the model. + */ + file_url?: string; - type: 'file_path'; - } + /** + * The name of the file to be sent to the model. + */ + filename?: string; } } /** - * Web search tool call output message for OpenAI responses. + * Web search tool configuration for OpenAI response inputs. */ - export interface OpenAIResponseOutputMessageWebSearchToolCall { - /** - * Unique identifier for this tool call - */ - id: string; - + export interface OpenAIResponseInputToolWebSearch { /** - * Current status of the web search operation + * Web search tool type variant to use */ - status: string; + type: 'web_search' | 'web_search_preview' | 'web_search_preview_2025_03_11'; /** - * Tool call type identifier, always "web_search_call" + * (Optional) Size of search context, must be "low", "medium", or "high" */ - type: 'web_search_call'; + search_context_size?: string; } /** - * File search tool call output message for OpenAI responses. + * File search tool configuration for OpenAI response inputs. */ - export interface OpenAIResponseOutputMessageFileSearchToolCall { + export interface OpenAIResponseInputToolFileSearch { /** - * Unique identifier for this tool call + * Tool type identifier, always "file_search" */ - id: string; + type: 'file_search'; /** - * List of search queries executed + * List of vector store identifiers to search within */ - queries: Array; + vector_store_ids: Array; /** - * Current status of the file search operation + * (Optional) Additional filters to apply to the search */ - status: string; + filters?: { [key: string]: boolean | number | string | Array | unknown | null }; /** - * Tool call type identifier, always "file_search_call" + * (Optional) Maximum number of search results to return (1-50) */ - type: 'file_search_call'; + max_num_results?: number; /** - * (Optional) Search results returned by the file search operation + * (Optional) Options for ranking and scoring search results */ - results?: Array; + ranking_options?: OpenAIResponseInputToolFileSearch.RankingOptions; } - export namespace OpenAIResponseOutputMessageFileSearchToolCall { + export namespace OpenAIResponseInputToolFileSearch { /** - * Search results returned by the file search operation. + * (Optional) Options for ranking and scoring search results */ - export interface Result { - /** - * (Optional) Key-value attributes associated with the file - */ - attributes: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * Unique identifier of the file containing the result - */ - file_id: string; - - /** - * Name of the file containing the result - */ - filename: string; - + export interface RankingOptions { /** - * Relevance score for this search result (between 0 and 1) + * (Optional) Name of the ranking algorithm to use */ - score: number; + ranker?: string; /** - * Text content of the search result + * (Optional) Minimum relevance score threshold for results */ - text: string; + score_threshold?: number; } } /** - * Function tool call output message for OpenAI responses. - */ - export interface OpenAIResponseOutputMessageFunctionToolCall { - /** - * JSON string containing the function arguments - */ - arguments: string; - - /** - * Unique identifier for the function call - */ - call_id: string; - - /** - * Name of the function being called - */ - name: string; - - /** - * Tool call type identifier, always "function_call" - */ - type: 'function_call'; - - /** - * (Optional) Additional identifier for the tool call - */ - id?: string; - - /** - * (Optional) Current status of the function call execution - */ - status?: string; - } - - /** - * Model Context Protocol (MCP) call output message for OpenAI responses. + * Function tool configuration for OpenAI response inputs. */ - export interface OpenAIResponseOutputMessageMcpCall { - /** - * Unique identifier for this MCP call - */ - id: string; - - /** - * JSON string containing the MCP call arguments - */ - arguments: string; - + export interface OpenAIResponseInputToolFunction { /** - * Name of the MCP method being called + * Name of the function that can be called */ name: string; /** - * Label identifying the MCP server handling the call + * Tool type identifier, always "function" */ - server_label: string; + type: 'function'; /** - * Tool call type identifier, always "mcp_call" + * (Optional) Description of what the function does */ - type: 'mcp_call'; + description?: string; /** - * (Optional) Error message if the MCP call failed + * (Optional) JSON schema defining the function's parameters */ - error?: string; + parameters?: { [key: string]: boolean | number | string | Array | unknown | null }; /** - * (Optional) Output result from the successful MCP call + * (Optional) Whether to enforce strict parameter validation */ - output?: string; + strict?: boolean; } /** - * MCP list tools output message containing available tools from an MCP server. + * Model Context Protocol (MCP) tool configuration for OpenAI response object. */ - export interface OpenAIResponseOutputMessageMcpListTools { - /** - * Unique identifier for this MCP list tools operation - */ - id: string; - + export interface OpenAIResponseToolMcp { /** - * Label identifying the MCP server providing the tools + * Label to identify this MCP server */ server_label: string; /** - * List of available tools provided by the MCP server - */ - tools: Array; - - /** - * Tool call type identifier, always "mcp_list_tools" - */ - type: 'mcp_list_tools'; - } - - export namespace OpenAIResponseOutputMessageMcpListTools { - /** - * Tool definition returned by MCP list tools operation. + * Tool type identifier, always "mcp" */ - export interface Tool { - /** - * JSON schema defining the tool's input parameters - */ - input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; + type: 'mcp'; - /** - * Name of the tool - */ - name: string; + /** + * (Optional) Restriction on which tools can be used from this server + */ + allowed_tools?: Array | OpenAIResponseToolMcp.AllowedToolsFilter; + } + export namespace OpenAIResponseToolMcp { + /** + * Filter configuration for restricting which MCP tools can be used. + */ + export interface AllowedToolsFilter { /** - * (Optional) Description of what the tool does + * (Optional) List of specific tool names that are allowed */ - description?: string; + tool_names?: Array; } } /** - * Text formatting configuration for the response + * (Optional) Token usage information for the response */ - export interface Text { + export interface Usage { /** - * (Optional) Text format configuration specifying output format requirements + * Number of tokens in the input */ - format?: Text.Format; - } + input_tokens: number; - export namespace Text { /** - * (Optional) Text format configuration specifying output format requirements + * Number of tokens in the output */ - export interface Format { - /** - * Must be "text", "json_schema", or "json_object" to identify the format type - */ - type: 'text' | 'json_schema' | 'json_object'; - - /** - * (Optional) A description of the response format. Only used for json_schema. - */ - description?: string; + output_tokens: number; - /** - * The name of the response format. Only used for json_schema. - */ - name?: string; + /** + * Total tokens used (input + output) + */ + total_tokens: number; - /** - * The JSON schema the response should conform to. In a Python SDK, this is often a - * `pydantic` model. Only used for json_schema. - */ - schema?: { [key: string]: boolean | number | string | Array | unknown | null }; + /** + * Detailed breakdown of input token usage + */ + input_tokens_details?: Usage.InputTokensDetails; - /** - * (Optional) Whether to strictly enforce the JSON schema. If true, the response - * must match the schema exactly. Only used for json_schema. - */ - strict?: boolean; - } + /** + * Detailed breakdown of output token usage + */ + output_tokens_details?: Usage.OutputTokensDetails; } - /** - * (Optional) Error details if the response generation failed - */ - export interface Error { + export namespace Usage { /** - * Error code identifying the type of failure + * Detailed breakdown of input token usage */ - code: string; + export interface InputTokensDetails { + /** + * Number of tokens retrieved from cache + */ + cached_tokens?: number; + } /** - * Human-readable error message describing the failure + * Detailed breakdown of output token usage */ - message: string; + export interface OutputTokensDetails { + /** + * Number of tokens used for reasoning (o1/o3 models) + */ + reasoning_tokens?: number; + } } } @@ -2624,10 +4737,15 @@ export interface ResponseCreateParamsBase { input: | string | Array< + | ResponseCreateParams.OpenAIResponseMessage | ResponseCreateParams.OpenAIResponseOutputMessageWebSearchToolCall | ResponseCreateParams.OpenAIResponseOutputMessageFileSearchToolCall | ResponseCreateParams.OpenAIResponseOutputMessageFunctionToolCall + | ResponseCreateParams.OpenAIResponseOutputMessageMcpCall + | ResponseCreateParams.OpenAIResponseOutputMessageMcpListTools + | ResponseCreateParams.OpenAIResponseMcpApprovalRequest | ResponseCreateParams.OpenAIResponseInputFunctionToolCallOutput + | ResponseCreateParams.OpenAIResponseMcpApprovalResponse | ResponseCreateParams.OpenAIResponseMessage >; @@ -2636,6 +4754,13 @@ export interface ResponseCreateParamsBase { */ model: string; + /** + * (Optional) The ID of a conversation to add the response to. Must begin with + * 'conv\_'. Input and output messages will be automatically added to the + * conversation. + */ + conversation?: string; + /** * (Optional) Additional fields to include in the response. */ @@ -2643,35 +4768,246 @@ export interface ResponseCreateParamsBase { instructions?: string; - max_infer_iters?: number; + max_infer_iters?: number; + + /** + * (Optional) if specified, the new response will be a continuation of the previous + * response. This can be used to easily fork-off new responses from existing + * responses. + */ + previous_response_id?: string; + + /** + * (Optional) Prompt object with ID, version, and variables. + */ + prompt?: ResponseCreateParams.Prompt; + + store?: boolean; + + stream?: boolean; + + temperature?: number; + + /** + * Text response configuration for OpenAI responses. + */ + text?: ResponseCreateParams.Text; + + tools?: Array< + | ResponseCreateParams.OpenAIResponseInputToolWebSearch + | ResponseCreateParams.OpenAIResponseInputToolFileSearch + | ResponseCreateParams.OpenAIResponseInputToolFunction + | ResponseCreateParams.OpenAIResponseInputToolMcp + >; +} + +export namespace ResponseCreateParams { + /** + * Corresponds to the various Message types in the Responses API. They are all + * under one type because the Responses API gives them all the same "type" value, + * and there is no way to tell them apart in certain scenarios. + */ + export interface OpenAIResponseMessage { + content: + | string + | Array< + | OpenAIResponseMessage.OpenAIResponseInputMessageContentText + | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile + > + | Array< + | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText + | OpenAIResponseMessage.OpenAIResponseContentPartRefusal + >; + + role: 'system' | 'developer' | 'user' | 'assistant'; + + type: 'message'; + + id?: string; + + status?: string; + } + + export namespace OpenAIResponseMessage { + /** + * Text content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentText { + /** + * The text content of the input message + */ + text: string; + + /** + * Content type identifier, always "input_text" + */ + type: 'input_text'; + } + + /** + * Image content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentImage { + /** + * Level of detail for image processing, can be "low", "high", or "auto" + */ + detail: 'low' | 'high' | 'auto'; + + /** + * Content type identifier, always "input_image" + */ + type: 'input_image'; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * (Optional) URL of the image content + */ + image_url?: string; + } + + /** + * File content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentFile { + /** + * The type of the input item. Always `input_file`. + */ + type: 'input_file'; + + /** + * The data of the file to be sent to the model. + */ + file_data?: string; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * The URL of the file to be sent to the model. + */ + file_url?: string; + + /** + * The name of the file to be sent to the model. + */ + filename?: string; + } + + export interface OpenAIResponseOutputMessageContentOutputText { + annotations: Array< + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath + >; + + text: string; + + type: 'output_text'; + } + + export namespace OpenAIResponseOutputMessageContentOutputText { + /** + * File citation annotation for referencing specific files in response content. + */ + export interface OpenAIResponseAnnotationFileCitation { + /** + * Unique identifier of the referenced file + */ + file_id: string; + + /** + * Name of the referenced file + */ + filename: string; + + /** + * Position index of the citation within the content + */ + index: number; + + /** + * Annotation type identifier, always "file_citation" + */ + type: 'file_citation'; + } + + /** + * URL citation annotation for referencing external web resources. + */ + export interface OpenAIResponseAnnotationCitation { + /** + * End position of the citation span in the content + */ + end_index: number; + + /** + * Start position of the citation span in the content + */ + start_index: number; + + /** + * Title of the referenced web resource + */ + title: string; + + /** + * Annotation type identifier, always "url_citation" + */ + type: 'url_citation'; + + /** + * URL of the referenced web resource + */ + url: string; + } + + export interface OpenAIResponseAnnotationContainerFileCitation { + container_id: string; + + end_index: number; + + file_id: string; + + filename: string; + + start_index: number; - /** - * (Optional) if specified, the new response will be a continuation of the previous - * response. This can be used to easily fork-off new responses from existing - * responses. - */ - previous_response_id?: string; + type: 'container_file_citation'; + } - store?: boolean; + export interface OpenAIResponseAnnotationFilePath { + file_id: string; - stream?: boolean; + index: number; - temperature?: number; + type: 'file_path'; + } + } - /** - * Text response configuration for OpenAI responses. - */ - text?: ResponseCreateParams.Text; + /** + * Refusal content within a streamed response part. + */ + export interface OpenAIResponseContentPartRefusal { + /** + * Refusal text supplied by the model + */ + refusal: string; - tools?: Array< - | ResponseCreateParams.OpenAIResponseInputToolWebSearch - | ResponseCreateParams.OpenAIResponseInputToolFileSearch - | ResponseCreateParams.OpenAIResponseInputToolFunction - | ResponseCreateParams.OpenAIResponseInputToolMcp - >; -} + /** + * Content part type identifier, always "refusal" + */ + type: 'refusal'; + } + } -export namespace ResponseCreateParams { /** * Web search tool call output message for OpenAI responses. */ @@ -2789,6 +5125,108 @@ export namespace ResponseCreateParams { status?: string; } + /** + * Model Context Protocol (MCP) call output message for OpenAI responses. + */ + export interface OpenAIResponseOutputMessageMcpCall { + /** + * Unique identifier for this MCP call + */ + id: string; + + /** + * JSON string containing the MCP call arguments + */ + arguments: string; + + /** + * Name of the MCP method being called + */ + name: string; + + /** + * Label identifying the MCP server handling the call + */ + server_label: string; + + /** + * Tool call type identifier, always "mcp_call" + */ + type: 'mcp_call'; + + /** + * (Optional) Error message if the MCP call failed + */ + error?: string; + + /** + * (Optional) Output result from the successful MCP call + */ + output?: string; + } + + /** + * MCP list tools output message containing available tools from an MCP server. + */ + export interface OpenAIResponseOutputMessageMcpListTools { + /** + * Unique identifier for this MCP list tools operation + */ + id: string; + + /** + * Label identifying the MCP server providing the tools + */ + server_label: string; + + /** + * List of available tools provided by the MCP server + */ + tools: Array; + + /** + * Tool call type identifier, always "mcp_list_tools" + */ + type: 'mcp_list_tools'; + } + + export namespace OpenAIResponseOutputMessageMcpListTools { + /** + * Tool definition returned by MCP list tools operation. + */ + export interface Tool { + /** + * JSON schema defining the tool's input parameters + */ + input_schema: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * Name of the tool + */ + name: string; + + /** + * (Optional) Description of what the tool does + */ + description?: string; + } + } + + /** + * A request for human approval of a tool invocation. + */ + export interface OpenAIResponseMcpApprovalRequest { + id: string; + + arguments: string; + + name: string; + + server_label: string; + + type: 'mcp_approval_request'; + } + /** * This represents the output of a function call that gets passed back to the * model. @@ -2805,6 +5243,21 @@ export namespace ResponseCreateParams { status?: string; } + /** + * A response to an MCP approval request. + */ + export interface OpenAIResponseMcpApprovalResponse { + approval_request_id: string; + + approve: boolean; + + type: 'mcp_approval_response'; + + id?: string; + + reason?: string; + } + /** * Corresponds to the various Message types in the Responses API. They are all * under one type because the Responses API gives them all the same "type" value, @@ -2816,8 +5269,12 @@ export namespace ResponseCreateParams { | Array< | OpenAIResponseMessage.OpenAIResponseInputMessageContentText | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage + | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile > - | Array; + | Array< + | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText + | OpenAIResponseMessage.OpenAIResponseContentPartRefusal + >; role: 'system' | 'developer' | 'user' | 'assistant'; @@ -2858,18 +5315,53 @@ export namespace ResponseCreateParams { */ type: 'input_image'; + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + /** * (Optional) URL of the image content */ image_url?: string; } - export interface UnionMember2 { + /** + * File content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentFile { + /** + * The type of the input item. Always `input_file`. + */ + type: 'input_file'; + + /** + * The data of the file to be sent to the model. + */ + file_data?: string; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * The URL of the file to be sent to the model. + */ + file_url?: string; + + /** + * The name of the file to be sent to the model. + */ + filename?: string; + } + + export interface OpenAIResponseOutputMessageContentOutputText { annotations: Array< - | UnionMember2.OpenAIResponseAnnotationFileCitation - | UnionMember2.OpenAIResponseAnnotationCitation - | UnionMember2.OpenAIResponseAnnotationContainerFileCitation - | UnionMember2.OpenAIResponseAnnotationFilePath + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation + | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath >; text: string; @@ -2877,7 +5369,7 @@ export namespace ResponseCreateParams { type: 'output_text'; } - export namespace UnionMember2 { + export namespace OpenAIResponseOutputMessageContentOutputText { /** * File citation annotation for referencing specific files in response content. */ @@ -2955,6 +5447,120 @@ export namespace ResponseCreateParams { type: 'file_path'; } } + + /** + * Refusal content within a streamed response part. + */ + export interface OpenAIResponseContentPartRefusal { + /** + * Refusal text supplied by the model + */ + refusal: string; + + /** + * Content part type identifier, always "refusal" + */ + type: 'refusal'; + } + } + + /** + * (Optional) Prompt object with ID, version, and variables. + */ + export interface Prompt { + /** + * Unique identifier of the prompt template + */ + id: string; + + /** + * Dictionary of variable names to OpenAIResponseInputMessageContent structure for + * template substitution. The substitution values can either be strings, or other + * Response input types like images or files. + */ + variables?: { + [key: string]: + | Prompt.OpenAIResponseInputMessageContentText + | Prompt.OpenAIResponseInputMessageContentImage + | Prompt.OpenAIResponseInputMessageContentFile; + }; + + /** + * Version number of the prompt to use (defaults to latest if not specified) + */ + version?: string; + } + + export namespace Prompt { + /** + * Text content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentText { + /** + * The text content of the input message + */ + text: string; + + /** + * Content type identifier, always "input_text" + */ + type: 'input_text'; + } + + /** + * Image content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentImage { + /** + * Level of detail for image processing, can be "low", "high", or "auto" + */ + detail: 'low' | 'high' | 'auto'; + + /** + * Content type identifier, always "input_image" + */ + type: 'input_image'; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * (Optional) URL of the image content + */ + image_url?: string; + } + + /** + * File content for input messages in OpenAI response format. + */ + export interface OpenAIResponseInputMessageContentFile { + /** + * The type of the input item. Always `input_file`. + */ + type: 'input_file'; + + /** + * The data of the file to be sent to the model. + */ + file_data?: string; + + /** + * (Optional) The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * The URL of the file to be sent to the model. + */ + file_url?: string; + + /** + * The name of the file to be sent to the model. + */ + filename?: string; + } } /** diff --git a/src/resources/routes.ts b/src/resources/routes.ts index 98d5dfe..3d9e5a0 100644 --- a/src/resources/routes.ts +++ b/src/resources/routes.ts @@ -1,16 +1,28 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../resource'; +import { isRequestOptions } from '../core'; import * as Core from '../core'; import * as InspectAPI from './inspect'; export class Routes extends APIResource { /** - * List all available API routes with their methods and implementing providers. + * List routes. List all available API routes with their methods and implementing + * providers. */ - list(options?: Core.RequestOptions): Core.APIPromise { + list(query?: RouteListParams, options?: Core.RequestOptions): Core.APIPromise; + list(options?: Core.RequestOptions): Core.APIPromise; + list( + query: RouteListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.APIPromise { + if (isRequestOptions(query)) { + return this.list({}, query); + } return ( - this._client.get('/v1/inspect/routes', options) as Core.APIPromise<{ data: RouteListResponse }> + this._client.get('/v1/inspect/routes', { query, ...options }) as Core.APIPromise<{ + data: RouteListResponse; + }> )._thenUnwrap((obj) => obj.data); } } @@ -30,6 +42,20 @@ export interface ListRoutesResponse { */ export type RouteListResponse = Array; +export interface RouteListParams { + /** + * Optional filter to control which routes are returned. Can be an API level ('v1', + * 'v1alpha', 'v1beta') to show non-deprecated routes at that level, or + * 'deprecated' to show deprecated routes across all levels. If not specified, + * returns only non-deprecated v1 routes. + */ + api_filter?: 'v1' | 'v1alpha' | 'v1beta' | 'deprecated'; +} + export declare namespace Routes { - export { type ListRoutesResponse as ListRoutesResponse, type RouteListResponse as RouteListResponse }; + export { + type ListRoutesResponse as ListRoutesResponse, + type RouteListResponse as RouteListResponse, + type RouteListParams as RouteListParams, + }; } diff --git a/src/resources/safety.ts b/src/resources/safety.ts index d41b2c7..902aa14 100644 --- a/src/resources/safety.ts +++ b/src/resources/safety.ts @@ -6,7 +6,7 @@ import * as Shared from './shared'; export class Safety extends APIResource { /** - * Run a shield. + * Run shield. Run a shield. */ runShield(body: SafetyRunShieldParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/v1/safety/run-shield', { body, ...options }); @@ -27,7 +27,13 @@ export interface SafetyRunShieldParams { /** * The messages to run the shield on. */ - messages: Array; + messages: Array< + | SafetyRunShieldParams.OpenAIUserMessageParam + | SafetyRunShieldParams.OpenAISystemMessageParam + | SafetyRunShieldParams.OpenAIAssistantMessageParam + | SafetyRunShieldParams.OpenAIToolMessageParam + | SafetyRunShieldParams.OpenAIDeveloperMessageParam + >; /** * The parameters of the shield. @@ -40,6 +46,298 @@ export interface SafetyRunShieldParams { shield_id: string; } +export namespace SafetyRunShieldParams { + /** + * A message from the user in an OpenAI-compatible chat completion request. + */ + export interface OpenAIUserMessageParam { + /** + * The content of the message, which can include text and other media + */ + content: + | string + | Array< + | OpenAIUserMessageParam.OpenAIChatCompletionContentPartTextParam + | OpenAIUserMessageParam.OpenAIChatCompletionContentPartImageParam + | OpenAIUserMessageParam.OpenAIFile + >; + + /** + * Must be "user" to identify this as a user message + */ + role: 'user'; + + /** + * (Optional) The name of the user message participant. + */ + name?: string; + } + + export namespace OpenAIUserMessageParam { + /** + * Text content part for OpenAI-compatible chat completion messages. + */ + export interface OpenAIChatCompletionContentPartTextParam { + /** + * The text content of the message + */ + text: string; + + /** + * Must be "text" to identify this as text content + */ + type: 'text'; + } + + /** + * Image content part for OpenAI-compatible chat completion messages. + */ + export interface OpenAIChatCompletionContentPartImageParam { + /** + * Image URL specification and processing details + */ + image_url: OpenAIChatCompletionContentPartImageParam.ImageURL; + + /** + * Must be "image_url" to identify this as image content + */ + type: 'image_url'; + } + + export namespace OpenAIChatCompletionContentPartImageParam { + /** + * Image URL specification and processing details + */ + export interface ImageURL { + /** + * URL of the image to include in the message + */ + url: string; + + /** + * (Optional) Level of detail for image processing. Can be "low", "high", or "auto" + */ + detail?: string; + } + } + + export interface OpenAIFile { + file: OpenAIFile.File; + + type: 'file'; + } + + export namespace OpenAIFile { + export interface File { + file_data?: string; + + file_id?: string; + + filename?: string; + } + } + } + + /** + * A system message providing instructions or context to the model. + */ + export interface OpenAISystemMessageParam { + /** + * The content of the "system prompt". If multiple system messages are provided, + * they are concatenated. The underlying Llama Stack code may also add other system + * messages (for example, for formatting tool definitions). + */ + content: string | Array; + + /** + * Must be "system" to identify this as a system message + */ + role: 'system'; + + /** + * (Optional) The name of the system message participant. + */ + name?: string; + } + + export namespace OpenAISystemMessageParam { + /** + * Text content part for OpenAI-compatible chat completion messages. + */ + export interface UnionMember1 { + /** + * The text content of the message + */ + text: string; + + /** + * Must be "text" to identify this as text content + */ + type: 'text'; + } + } + + /** + * A message containing the model's (assistant) response in an OpenAI-compatible + * chat completion request. + */ + export interface OpenAIAssistantMessageParam { + /** + * Must be "assistant" to identify this as the model's response + */ + role: 'assistant'; + + /** + * The content of the model's response + */ + content?: string | Array; + + /** + * (Optional) The name of the assistant message participant. + */ + name?: string; + + /** + * List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object. + */ + tool_calls?: Array; + } + + export namespace OpenAIAssistantMessageParam { + /** + * Text content part for OpenAI-compatible chat completion messages. + */ + export interface UnionMember1 { + /** + * The text content of the message + */ + text: string; + + /** + * Must be "text" to identify this as text content + */ + type: 'text'; + } + + /** + * Tool call specification for OpenAI-compatible chat completion responses. + */ + export interface ToolCall { + /** + * Must be "function" to identify this as a function call + */ + type: 'function'; + + /** + * (Optional) Unique identifier for the tool call + */ + id?: string; + + /** + * (Optional) Function call details + */ + function?: ToolCall.Function; + + /** + * (Optional) Index of the tool call in the list + */ + index?: number; + } + + export namespace ToolCall { + /** + * (Optional) Function call details + */ + export interface Function { + /** + * (Optional) Arguments to pass to the function as a JSON string + */ + arguments?: string; + + /** + * (Optional) Name of the function to call + */ + name?: string; + } + } + } + + /** + * A message representing the result of a tool invocation in an OpenAI-compatible + * chat completion request. + */ + export interface OpenAIToolMessageParam { + /** + * The response content from the tool + */ + content: string | Array; + + /** + * Must be "tool" to identify this as a tool response + */ + role: 'tool'; + + /** + * Unique identifier for the tool call this response is for + */ + tool_call_id: string; + } + + export namespace OpenAIToolMessageParam { + /** + * Text content part for OpenAI-compatible chat completion messages. + */ + export interface UnionMember1 { + /** + * The text content of the message + */ + text: string; + + /** + * Must be "text" to identify this as text content + */ + type: 'text'; + } + } + + /** + * A message from the developer in an OpenAI-compatible chat completion request. + */ + export interface OpenAIDeveloperMessageParam { + /** + * The content of the developer message + */ + content: string | Array; + + /** + * Must be "developer" to identify this as a developer message + */ + role: 'developer'; + + /** + * (Optional) The name of the developer message participant. + */ + name?: string; + } + + export namespace OpenAIDeveloperMessageParam { + /** + * Text content part for OpenAI-compatible chat completion messages. + */ + export interface UnionMember1 { + /** + * The text content of the message + */ + text: string; + + /** + * Must be "text" to identify this as text content + */ + type: 'text'; + } + } +} + export declare namespace Safety { export { type RunShieldResponse as RunShieldResponse, type SafetyRunShieldParams as SafetyRunShieldParams }; } diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 00c767f..6d18737 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -1,140 +1,5 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Shared from './shared'; -import * as InferenceAPI from './inference'; -import * as ToolRuntimeAPI from './tool-runtime/tool-runtime'; - -/** - * Configuration for an agent. - */ -export interface AgentConfig { - /** - * The system instructions for the agent - */ - instructions: string; - - /** - * The model identifier to use for the agent - */ - model: string; - - client_tools?: Array; - - /** - * Optional flag indicating whether session data has to be persisted - */ - enable_session_persistence?: boolean; - - input_shields?: Array; - - max_infer_iters?: number; - - /** - * Optional name for the agent, used in telemetry and identification - */ - name?: string; - - output_shields?: Array; - - /** - * Optional response format configuration - */ - response_format?: ResponseFormat; - - /** - * Sampling parameters. - */ - sampling_params?: SamplingParams; - - /** - * @deprecated Whether tool use is required or automatic. This is a hint to the - * model which may not be followed. It depends on the Instruction Following - * capabilities of the model. - */ - tool_choice?: 'auto' | 'required' | 'none'; - - /** - * Configuration for tool use. - */ - tool_config?: AgentConfig.ToolConfig; - - /** - * @deprecated Prompt format for calling custom / zero shot tools. - */ - tool_prompt_format?: 'json' | 'function_tag' | 'python_list'; - - toolgroups?: Array; -} - -export namespace AgentConfig { - /** - * Configuration for tool use. - */ - export interface ToolConfig { - /** - * (Optional) Config for how to override the default system prompt. - - * `SystemMessageBehavior.append`: Appends the provided system message to the - * default system prompt. - `SystemMessageBehavior.replace`: Replaces the default - * system prompt with the provided system message. The system message can include - * the string '{{function_definitions}}' to indicate where the function definitions - * should be inserted. - */ - system_message_behavior?: 'append' | 'replace'; - - /** - * (Optional) Whether tool use is automatic, required, or none. Can also specify a - * tool name to use a specific tool. Defaults to ToolChoice.auto. - */ - tool_choice?: 'auto' | 'required' | 'none' | (string & {}); - - /** - * (Optional) Instructs the model how to format tool calls. By default, Llama Stack - * will attempt to use a format that is best adapted to the model. - - * `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. - - * `ToolPromptFormat.function_tag`: The tool calls are enclosed in a - * tag. - `ToolPromptFormat.python_list`: The tool calls - * are output as Python syntax -- a list of function calls. - */ - tool_prompt_format?: 'json' | 'function_tag' | 'python_list'; - } - - export interface AgentToolGroupWithArgs { - args: { [key: string]: boolean | number | string | Array | unknown | null }; - - name: string; - } -} - -/** - * Response from a batch completion request. - */ -export interface BatchCompletion { - /** - * List of completion responses, one for each input in the batch - */ - batch: Array; -} - -/** - * Response from a chat completion request. - */ -export interface ChatCompletionResponse { - /** - * The complete response message - */ - completion_message: CompletionMessage; - - /** - * Optional log probabilities for generated tokens - */ - logprobs?: Array; - - /** - * (Optional) List of metrics associated with the API response - */ - metrics?: Array; -} - /** * A message containing the model's (assistant) response in a chat conversation. */ @@ -165,63 +30,6 @@ export interface CompletionMessage { tool_calls?: Array; } -/** - * A text content delta for streaming responses. - */ -export type ContentDelta = ContentDelta.TextDelta | ContentDelta.ImageDelta | ContentDelta.ToolCallDelta; - -export namespace ContentDelta { - /** - * A text content delta for streaming responses. - */ - export interface TextDelta { - /** - * The incremental text content - */ - text: string; - - /** - * Discriminator type of the delta. Always "text" - */ - type: 'text'; - } - - /** - * An image content delta for streaming responses. - */ - export interface ImageDelta { - /** - * The incremental image data as bytes - */ - image: string; - - /** - * Discriminator type of the delta. Always "image" - */ - type: 'image'; - } - - /** - * A tool call content delta for streaming responses. - */ - export interface ToolCallDelta { - /** - * Current parsing status of the tool call - */ - parse_status: 'started' | 'in_progress' | 'failed' | 'succeeded'; - - /** - * Either an in-progress tool call string or the final parsed tool call - */ - tool_call: string | Shared.ToolCall; - - /** - * Discriminator type of the delta. Always "tool_call" - */ - type: 'tool_call'; - } -} - /** * A document to be used for document ingestion in the RAG Tool. */ @@ -472,26 +280,6 @@ export namespace InterleavedContentItem { */ export type Message = UserMessage | SystemMessage | ToolResponseMessage | CompletionMessage; -/** - * A metric value included in API responses. - */ -export interface Metric { - /** - * The name of the metric - */ - metric: string; - - /** - * The numeric value of the metric - */ - value: number; - - /** - * (Optional) The unit of measurement for the metric value - */ - unit?: string; -} - /** * Parameter type for string values. */ @@ -634,7 +422,7 @@ export interface QueryConfig { /** * Configuration for the query generator. */ - query_generator_config: QueryGeneratorConfig; + query_generator_config: QueryConfig.DefaultRagQueryGeneratorConfig | QueryConfig.LlmragQueryGeneratorConfig; /** * Search mode for retrieval—either "vector", "keyword", or "hybrid". Default @@ -649,47 +437,6 @@ export interface QueryConfig { } export namespace QueryConfig { - /** - * Reciprocal Rank Fusion (RRF) ranker configuration. - */ - export interface RrfRanker { - /** - * The impact factor for RRF scoring. Higher values give more weight to - * higher-ranked results. Must be greater than 0 - */ - impact_factor: number; - - /** - * The type of ranker, always "rrf" - */ - type: 'rrf'; - } - - /** - * Weighted ranker configuration that combines vector and keyword scores. - */ - export interface WeightedRanker { - /** - * Weight factor between 0 and 1. 0 means only use keyword scores, 1 means only use - * vector scores, values in between blend both scores. - */ - alpha: number; - - /** - * The type of ranker, always "weighted" - */ - type: 'weighted'; - } -} - -/** - * Configuration for the default RAG query generator. - */ -export type QueryGeneratorConfig = - | QueryGeneratorConfig.DefaultRagQueryGeneratorConfig - | QueryGeneratorConfig.LlmragQueryGeneratorConfig; - -export namespace QueryGeneratorConfig { /** * Configuration for the default RAG query generator. */ @@ -724,161 +471,74 @@ export namespace QueryGeneratorConfig { */ type: 'llm'; } -} -/** - * Result of a RAG query containing retrieved content and metadata. - */ -export interface QueryResult { - /** - * Additional metadata about the query result - */ - metadata: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * (Optional) The retrieved content from the query - */ - content?: InterleavedContent; -} - -/** - * Configuration for JSON schema-guided response generation. - */ -export type ResponseFormat = ResponseFormat.JsonSchemaResponseFormat | ResponseFormat.GrammarResponseFormat; - -export namespace ResponseFormat { /** - * Configuration for JSON schema-guided response generation. + * Reciprocal Rank Fusion (RRF) ranker configuration. */ - export interface JsonSchemaResponseFormat { + export interface RrfRanker { /** - * The JSON schema the response should conform to. In a Python SDK, this is often a - * `pydantic` model. + * The impact factor for RRF scoring. Higher values give more weight to + * higher-ranked results. Must be greater than 0 */ - json_schema: { [key: string]: boolean | number | string | Array | unknown | null }; + impact_factor: number; /** - * Must be "json_schema" to identify this format type + * The type of ranker, always "rrf" */ - type: 'json_schema'; + type: 'rrf'; } /** - * Configuration for grammar-guided response generation. + * Weighted ranker configuration that combines vector and keyword scores. */ - export interface GrammarResponseFormat { + export interface WeightedRanker { /** - * The BNF grammar specification the response should conform to + * Weight factor between 0 and 1. 0 means only use keyword scores, 1 means only use + * vector scores, values in between blend both scores. */ - bnf: { [key: string]: boolean | number | string | Array | unknown | null }; + alpha: number; /** - * Must be "grammar" to identify this format type + * The type of ranker, always "weighted" */ - type: 'grammar'; + type: 'weighted'; } } /** - * Details of a safety violation detected by content moderation. + * Result of a RAG query containing retrieved content and metadata. */ -export interface SafetyViolation { +export interface QueryResult { /** - * Additional metadata including specific violation codes for debugging and - * telemetry + * Additional metadata about the query result */ metadata: { [key: string]: boolean | number | string | Array | unknown | null }; /** - * Severity level of the violation - */ - violation_level: 'info' | 'warn' | 'error'; - - /** - * (Optional) Message to convey to the user about the violation + * (Optional) The retrieved content from the query */ - user_message?: string; + content?: InterleavedContent; } /** - * Sampling parameters. + * Details of a safety violation detected by content moderation. */ -export interface SamplingParams { - /** - * The sampling strategy. - */ - strategy: - | SamplingParams.GreedySamplingStrategy - | SamplingParams.TopPSamplingStrategy - | SamplingParams.TopKSamplingStrategy; - - /** - * The maximum number of tokens that can be generated in the completion. The token - * count of your prompt plus max_tokens cannot exceed the model's context length. - */ - max_tokens?: number; - - /** - * Number between -2.0 and 2.0. Positive values penalize new tokens based on - * whether they appear in the text so far, increasing the model's likelihood to - * talk about new topics. - */ - repetition_penalty?: number; - - /** - * Up to 4 sequences where the API will stop generating further tokens. The - * returned text will not contain the stop sequence. - */ - stop?: Array; -} - -export namespace SamplingParams { +export interface SafetyViolation { /** - * Greedy sampling strategy that selects the highest probability token at each - * step. + * Additional metadata including specific violation codes for debugging and + * telemetry */ - export interface GreedySamplingStrategy { - /** - * Must be "greedy" to identify this sampling strategy - */ - type: 'greedy'; - } + metadata: { [key: string]: boolean | number | string | Array | unknown | null }; /** - * Top-p (nucleus) sampling strategy that samples from the smallest set of tokens - * with cumulative probability >= p. + * Severity level of the violation */ - export interface TopPSamplingStrategy { - /** - * Must be "top_p" to identify this sampling strategy - */ - type: 'top_p'; - - /** - * Controls randomness in sampling. Higher values increase randomness - */ - temperature?: number; - - /** - * Cumulative probability threshold for nucleus sampling. Defaults to 0.95 - */ - top_p?: number; - } + violation_level: 'info' | 'warn' | 'error'; /** - * Top-k sampling strategy that restricts sampling to the k most likely tokens. + * (Optional) Message to convey to the user about the violation */ - export interface TopKSamplingStrategy { - /** - * Number of top tokens to consider for sampling. Must be at least 1 - */ - top_k: number; - - /** - * Must be "top_k" to identify this sampling strategy - */ - type: 'top_k'; - } + user_message?: string; } /** @@ -914,33 +574,11 @@ export interface SystemMessage { } export interface ToolCall { - arguments: - | string - | { - [key: string]: - | string - | number - | boolean - | Array - | { [key: string]: string | number | boolean | null } - | null; - }; + arguments: string; call_id: string; tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {}); - - arguments_json?: string; -} - -export interface ToolParamDefinition { - param_type: string; - - default?: boolean | number | string | Array | unknown | null; - - description?: string; - - required?: boolean; } /** diff --git a/src/resources/telemetry.ts b/src/resources/telemetry.ts deleted file mode 100644 index 8064a72..0000000 --- a/src/resources/telemetry.ts +++ /dev/null @@ -1,686 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../resource'; -import * as Core from '../core'; - -export class Telemetry extends APIResource { - /** - * Get a span by its ID. - */ - getSpan( - traceId: string, - spanId: string, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.get(`/v1/telemetry/traces/${traceId}/spans/${spanId}`, options); - } - - /** - * Get a span tree by its ID. - */ - getSpanTree( - spanId: string, - body: TelemetryGetSpanTreeParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return ( - this._client.post(`/v1/telemetry/spans/${spanId}/tree`, { body, ...options }) as Core.APIPromise<{ - data: TelemetryGetSpanTreeResponse; - }> - )._thenUnwrap((obj) => obj.data); - } - - /** - * Get a trace by its ID. - */ - getTrace(traceId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/v1/telemetry/traces/${traceId}`, options); - } - - /** - * Log an event. - */ - logEvent(body: TelemetryLogEventParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.post('/v1/telemetry/events', { - body, - ...options, - headers: { Accept: '*/*', ...options?.headers }, - }); - } - - /** - * Query metrics. - */ - queryMetrics( - metricName: string, - body: TelemetryQueryMetricsParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return ( - this._client.post(`/v1/telemetry/metrics/${metricName}`, { body, ...options }) as Core.APIPromise<{ - data: TelemetryQueryMetricsResponse; - }> - )._thenUnwrap((obj) => obj.data); - } - - /** - * Query spans. - */ - querySpans( - body: TelemetryQuerySpansParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return ( - this._client.post('/v1/telemetry/spans', { body, ...options }) as Core.APIPromise<{ - data: TelemetryQuerySpansResponse; - }> - )._thenUnwrap((obj) => obj.data); - } - - /** - * Query traces. - */ - queryTraces( - body: TelemetryQueryTracesParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return ( - this._client.post('/v1/telemetry/traces', { body, ...options }) as Core.APIPromise<{ - data: TelemetryQueryTracesResponse; - }> - )._thenUnwrap((obj) => obj.data); - } - - /** - * Save spans to a dataset. - */ - saveSpansToDataset( - body: TelemetrySaveSpansToDatasetParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.post('/v1/telemetry/spans/export', { - body, - ...options, - headers: { Accept: '*/*', ...options?.headers }, - }); - } -} - -/** - * An unstructured log event containing a simple text message. - */ -export type Event = Event.UnstructuredLogEvent | Event.MetricEvent | Event.StructuredLogEvent; - -export namespace Event { - /** - * An unstructured log event containing a simple text message. - */ - export interface UnstructuredLogEvent { - /** - * The log message text - */ - message: string; - - /** - * The severity level of the log message - */ - severity: 'verbose' | 'debug' | 'info' | 'warn' | 'error' | 'critical'; - - /** - * Unique identifier for the span this event belongs to - */ - span_id: string; - - /** - * Timestamp when the event occurred - */ - timestamp: string; - - /** - * Unique identifier for the trace this event belongs to - */ - trace_id: string; - - /** - * Event type identifier set to UNSTRUCTURED_LOG - */ - type: 'unstructured_log'; - - /** - * (Optional) Key-value pairs containing additional metadata about the event - */ - attributes?: { [key: string]: string | number | boolean | null }; - } - - /** - * A metric event containing a measured value. - */ - export interface MetricEvent { - /** - * The name of the metric being measured - */ - metric: string; - - /** - * Unique identifier for the span this event belongs to - */ - span_id: string; - - /** - * Timestamp when the event occurred - */ - timestamp: string; - - /** - * Unique identifier for the trace this event belongs to - */ - trace_id: string; - - /** - * Event type identifier set to METRIC - */ - type: 'metric'; - - /** - * The unit of measurement for the metric value - */ - unit: string; - - /** - * The numeric value of the metric measurement - */ - value: number; - - /** - * (Optional) Key-value pairs containing additional metadata about the event - */ - attributes?: { [key: string]: string | number | boolean | null }; - } - - /** - * A structured log event containing typed payload data. - */ - export interface StructuredLogEvent { - /** - * The structured payload data for the log event - */ - payload: StructuredLogEvent.SpanStartPayload | StructuredLogEvent.SpanEndPayload; - - /** - * Unique identifier for the span this event belongs to - */ - span_id: string; - - /** - * Timestamp when the event occurred - */ - timestamp: string; - - /** - * Unique identifier for the trace this event belongs to - */ - trace_id: string; - - /** - * Event type identifier set to STRUCTURED_LOG - */ - type: 'structured_log'; - - /** - * (Optional) Key-value pairs containing additional metadata about the event - */ - attributes?: { [key: string]: string | number | boolean | null }; - } - - export namespace StructuredLogEvent { - /** - * Payload for a span start event. - */ - export interface SpanStartPayload { - /** - * Human-readable name describing the operation this span represents - */ - name: string; - - /** - * Payload type identifier set to SPAN_START - */ - type: 'span_start'; - - /** - * (Optional) Unique identifier for the parent span, if this is a child span - */ - parent_span_id?: string; - } - - /** - * Payload for a span end event. - */ - export interface SpanEndPayload { - /** - * The final status of the span indicating success or failure - */ - status: 'ok' | 'error'; - - /** - * Payload type identifier set to SPAN_END - */ - type: 'span_end'; - } - } -} - -/** - * A condition for filtering query results. - */ -export interface QueryCondition { - /** - * The attribute key to filter on - */ - key: string; - - /** - * The comparison operator to apply - */ - op: 'eq' | 'ne' | 'gt' | 'lt'; - - /** - * The value to compare against - */ - value: boolean | number | string | Array | unknown | null; -} - -/** - * Response containing a list of spans. - */ -export interface QuerySpansResponse { - /** - * List of spans matching the query criteria - */ - data: TelemetryQuerySpansResponse; -} - -/** - * A span that includes status information. - */ -export interface SpanWithStatus { - /** - * Human-readable name describing the operation this span represents - */ - name: string; - - /** - * Unique identifier for the span - */ - span_id: string; - - /** - * Timestamp when the operation began - */ - start_time: string; - - /** - * Unique identifier for the trace this span belongs to - */ - trace_id: string; - - /** - * (Optional) Key-value pairs containing additional metadata about the span - */ - attributes?: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * (Optional) Timestamp when the operation finished, if completed - */ - end_time?: string; - - /** - * (Optional) Unique identifier for the parent span, if this is a child span - */ - parent_span_id?: string; - - /** - * (Optional) The current status of the span - */ - status?: 'ok' | 'error'; -} - -/** - * A trace representing the complete execution path of a request across multiple - * operations. - */ -export interface Trace { - /** - * Unique identifier for the root span that started this trace - */ - root_span_id: string; - - /** - * Timestamp when the trace began - */ - start_time: string; - - /** - * Unique identifier for the trace - */ - trace_id: string; - - /** - * (Optional) Timestamp when the trace finished, if completed - */ - end_time?: string; -} - -/** - * A span representing a single operation within a trace. - */ -export interface TelemetryGetSpanResponse { - /** - * Human-readable name describing the operation this span represents - */ - name: string; - - /** - * Unique identifier for the span - */ - span_id: string; - - /** - * Timestamp when the operation began - */ - start_time: string; - - /** - * Unique identifier for the trace this span belongs to - */ - trace_id: string; - - /** - * (Optional) Key-value pairs containing additional metadata about the span - */ - attributes?: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * (Optional) Timestamp when the operation finished, if completed - */ - end_time?: string; - - /** - * (Optional) Unique identifier for the parent span, if this is a child span - */ - parent_span_id?: string; -} - -/** - * Dictionary mapping span IDs to spans with status information - */ -export type TelemetryGetSpanTreeResponse = { [key: string]: SpanWithStatus }; - -/** - * List of metric series matching the query criteria - */ -export type TelemetryQueryMetricsResponse = - Array; - -export namespace TelemetryQueryMetricsResponse { - /** - * A time series of metric data points. - */ - export interface TelemetryQueryMetricsResponseItem { - /** - * List of labels associated with this metric series - */ - labels: Array; - - /** - * The name of the metric - */ - metric: string; - - /** - * List of data points in chronological order - */ - values: Array; - } - - export namespace TelemetryQueryMetricsResponseItem { - /** - * A label associated with a metric. - */ - export interface Label { - /** - * The name of the label - */ - name: string; - - /** - * The value of the label - */ - value: string; - } - - /** - * A single data point in a metric time series. - */ - export interface Value { - /** - * Unix timestamp when the metric value was recorded - */ - timestamp: number; - - unit: string; - - /** - * The numeric value of the metric at this timestamp - */ - value: number; - } - } -} - -/** - * List of spans matching the query criteria - */ -export type TelemetryQuerySpansResponse = Array; - -export namespace TelemetryQuerySpansResponse { - /** - * A span representing a single operation within a trace. - */ - export interface TelemetryQuerySpansResponseItem { - /** - * Human-readable name describing the operation this span represents - */ - name: string; - - /** - * Unique identifier for the span - */ - span_id: string; - - /** - * Timestamp when the operation began - */ - start_time: string; - - /** - * Unique identifier for the trace this span belongs to - */ - trace_id: string; - - /** - * (Optional) Key-value pairs containing additional metadata about the span - */ - attributes?: { [key: string]: boolean | number | string | Array | unknown | null }; - - /** - * (Optional) Timestamp when the operation finished, if completed - */ - end_time?: string; - - /** - * (Optional) Unique identifier for the parent span, if this is a child span - */ - parent_span_id?: string; - } -} - -/** - * List of traces matching the query criteria - */ -export type TelemetryQueryTracesResponse = Array; - -export interface TelemetryGetSpanTreeParams { - /** - * The attributes to return in the tree. - */ - attributes_to_return?: Array; - - /** - * The maximum depth of the tree. - */ - max_depth?: number; -} - -export interface TelemetryLogEventParams { - /** - * The event to log. - */ - event: Event; - - /** - * The time to live of the event. - */ - ttl_seconds: number; -} - -export interface TelemetryQueryMetricsParams { - /** - * The type of query to perform. - */ - query_type: 'range' | 'instant'; - - /** - * The start time of the metric to query. - */ - start_time: number; - - /** - * The end time of the metric to query. - */ - end_time?: number; - - /** - * The granularity of the metric to query. - */ - granularity?: string; - - /** - * The label matchers to apply to the metric. - */ - label_matchers?: Array; -} - -export namespace TelemetryQueryMetricsParams { - /** - * A matcher for filtering metrics by label values. - */ - export interface LabelMatcher { - /** - * The name of the label to match - */ - name: string; - - /** - * The comparison operator to use for matching - */ - operator: '=' | '!=' | '=~' | '!~'; - - /** - * The value to match against - */ - value: string; - } -} - -export interface TelemetryQuerySpansParams { - /** - * The attribute filters to apply to the spans. - */ - attribute_filters: Array; - - /** - * The attributes to return in the spans. - */ - attributes_to_return: Array; - - /** - * The maximum depth of the tree. - */ - max_depth?: number; -} - -export interface TelemetryQueryTracesParams { - /** - * The attribute filters to apply to the traces. - */ - attribute_filters?: Array; - - /** - * The limit of traces to return. - */ - limit?: number; - - /** - * The offset of the traces to return. - */ - offset?: number; - - /** - * The order by of the traces to return. - */ - order_by?: Array; -} - -export interface TelemetrySaveSpansToDatasetParams { - /** - * The attribute filters to apply to the spans. - */ - attribute_filters: Array; - - /** - * The attributes to save to the dataset. - */ - attributes_to_save: Array; - - /** - * The ID of the dataset to save the spans to. - */ - dataset_id: string; - - /** - * The maximum depth of the tree. - */ - max_depth?: number; -} - -export declare namespace Telemetry { - export { - type Event as Event, - type QueryCondition as QueryCondition, - type QuerySpansResponse as QuerySpansResponse, - type SpanWithStatus as SpanWithStatus, - type Trace as Trace, - type TelemetryGetSpanResponse as TelemetryGetSpanResponse, - type TelemetryGetSpanTreeResponse as TelemetryGetSpanTreeResponse, - type TelemetryQueryMetricsResponse as TelemetryQueryMetricsResponse, - type TelemetryQuerySpansResponse as TelemetryQuerySpansResponse, - type TelemetryQueryTracesResponse as TelemetryQueryTracesResponse, - type TelemetryGetSpanTreeParams as TelemetryGetSpanTreeParams, - type TelemetryLogEventParams as TelemetryLogEventParams, - type TelemetryQueryMetricsParams as TelemetryQueryMetricsParams, - type TelemetryQuerySpansParams as TelemetryQuerySpansParams, - type TelemetryQueryTracesParams as TelemetryQueryTracesParams, - type TelemetrySaveSpansToDatasetParams as TelemetrySaveSpansToDatasetParams, - }; -} diff --git a/src/resources/tool-runtime/rag-tool.ts b/src/resources/tool-runtime/rag-tool.ts index b9f6669..af15c6c 100644 --- a/src/resources/tool-runtime/rag-tool.ts +++ b/src/resources/tool-runtime/rag-tool.ts @@ -38,7 +38,7 @@ export interface RagToolInsertParams { /** * ID of the vector database to store the document embeddings */ - vector_db_id: string; + vector_store_id: string; } export interface RagToolQueryParams { @@ -50,7 +50,7 @@ export interface RagToolQueryParams { /** * List of vector database IDs to search within */ - vector_db_ids: Array; + vector_store_ids: Array; /** * (Optional) Configuration parameters for the query operation diff --git a/src/resources/tool-runtime/tool-runtime.ts b/src/resources/tool-runtime/tool-runtime.ts index ca1a6c8..3324906 100644 --- a/src/resources/tool-runtime/tool-runtime.ts +++ b/src/resources/tool-runtime/tool-runtime.ts @@ -57,47 +57,25 @@ export interface ToolDef { */ description?: string; + /** + * (Optional) JSON Schema for tool inputs (MCP inputSchema) + */ + input_schema?: { [key: string]: boolean | number | string | Array | unknown | null }; + /** * (Optional) Additional metadata about the tool */ metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; /** - * (Optional) List of parameters this tool accepts + * (Optional) JSON Schema for tool outputs (MCP outputSchema) */ - parameters?: Array; -} + output_schema?: { [key: string]: boolean | number | string | Array | unknown | null }; -export namespace ToolDef { /** - * Parameter definition for a tool. + * (Optional) ID of the tool group this tool belongs to */ - export interface Parameter { - /** - * Human-readable description of what the parameter does - */ - description: string; - - /** - * Name of the parameter - */ - name: string; - - /** - * Type of the parameter (e.g., string, integer) - */ - parameter_type: string; - - /** - * Whether this parameter is required for tool invocation - */ - required: boolean; - - /** - * (Optional) Default value for the parameter if not provided - */ - default?: boolean | number | string | Array | unknown | null; - } + toolgroup_id?: string; } /** diff --git a/src/resources/tools.ts b/src/resources/tools.ts index ba35360..668d2ce 100644 --- a/src/resources/tools.ts +++ b/src/resources/tools.ts @@ -3,6 +3,7 @@ import { APIResource } from '../resource'; import { isRequestOptions } from '../core'; import * as Core from '../core'; +import * as ToolRuntimeAPI from './tool-runtime/tool-runtime'; export class Tools extends APIResource { /** @@ -25,93 +26,15 @@ export class Tools extends APIResource { /** * Get a tool by its name. */ - get(toolName: string, options?: Core.RequestOptions): Core.APIPromise { + get(toolName: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/v1/tools/${toolName}`, options); } } /** - * Response containing a list of tools. + * List of tool definitions */ -export interface ListToolsResponse { - /** - * List of tools - */ - data: ToolListResponse; -} - -/** - * A tool that can be invoked by agents. - */ -export interface Tool { - /** - * Human-readable description of what the tool does - */ - description: string; - - identifier: string; - - /** - * List of parameters this tool accepts - */ - parameters: Array; - - provider_id: string; - - /** - * ID of the tool group this tool belongs to - */ - toolgroup_id: string; - - /** - * Type of resource, always 'tool' - */ - type: 'tool'; - - /** - * (Optional) Additional metadata about the tool - */ - metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; - - provider_resource_id?: string; -} - -export namespace Tool { - /** - * Parameter definition for a tool. - */ - export interface Parameter { - /** - * Human-readable description of what the parameter does - */ - description: string; - - /** - * Name of the parameter - */ - name: string; - - /** - * Type of the parameter (e.g., string, integer) - */ - parameter_type: string; - - /** - * Whether this parameter is required for tool invocation - */ - required: boolean; - - /** - * (Optional) Default value for the parameter if not provided - */ - default?: boolean | number | string | Array | unknown | null; - } -} - -/** - * List of tools - */ -export type ToolListResponse = Array; +export type ToolListResponse = Array; export interface ToolListParams { /** @@ -121,10 +44,5 @@ export interface ToolListParams { } export declare namespace Tools { - export { - type ListToolsResponse as ListToolsResponse, - type Tool as Tool, - type ToolListResponse as ToolListResponse, - type ToolListParams as ToolListParams, - }; + export { type ToolListResponse as ToolListResponse, type ToolListParams as ToolListParams }; } diff --git a/src/resources/vector-dbs.ts b/src/resources/vector-dbs.ts deleted file mode 100644 index 3004227..0000000 --- a/src/resources/vector-dbs.ts +++ /dev/null @@ -1,185 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../resource'; -import * as Core from '../core'; - -export class VectorDBs extends APIResource { - /** - * Get a vector database by its identifier. - */ - retrieve(vectorDBId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/v1/vector-dbs/${vectorDBId}`, options); - } - - /** - * List all vector databases. - */ - list(options?: Core.RequestOptions): Core.APIPromise { - return ( - this._client.get('/v1/vector-dbs', options) as Core.APIPromise<{ data: VectorDBListResponse }> - )._thenUnwrap((obj) => obj.data); - } - - /** - * Register a vector database. - */ - register( - body: VectorDBRegisterParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.post('/v1/vector-dbs', { body, ...options }); - } - - /** - * Unregister a vector database. - */ - unregister(vectorDBId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.delete(`/v1/vector-dbs/${vectorDBId}`, { - ...options, - headers: { Accept: '*/*', ...options?.headers }, - }); - } -} - -/** - * Response from listing vector databases. - */ -export interface ListVectorDBsResponse { - /** - * List of vector databases - */ - data: VectorDBListResponse; -} - -/** - * Vector database resource for storing and querying vector embeddings. - */ -export interface VectorDBRetrieveResponse { - /** - * Dimension of the embedding vectors - */ - embedding_dimension: number; - - /** - * Name of the embedding model to use for vector generation - */ - embedding_model: string; - - identifier: string; - - provider_id: string; - - /** - * Type of resource, always 'vector_db' for vector databases - */ - type: 'vector_db'; - - provider_resource_id?: string; - - vector_db_name?: string; -} - -/** - * List of vector databases - */ -export type VectorDBListResponse = Array; - -export namespace VectorDBListResponse { - /** - * Vector database resource for storing and querying vector embeddings. - */ - export interface VectorDBListResponseItem { - /** - * Dimension of the embedding vectors - */ - embedding_dimension: number; - - /** - * Name of the embedding model to use for vector generation - */ - embedding_model: string; - - identifier: string; - - provider_id: string; - - /** - * Type of resource, always 'vector_db' for vector databases - */ - type: 'vector_db'; - - provider_resource_id?: string; - - vector_db_name?: string; - } -} - -/** - * Vector database resource for storing and querying vector embeddings. - */ -export interface VectorDBRegisterResponse { - /** - * Dimension of the embedding vectors - */ - embedding_dimension: number; - - /** - * Name of the embedding model to use for vector generation - */ - embedding_model: string; - - identifier: string; - - provider_id: string; - - /** - * Type of resource, always 'vector_db' for vector databases - */ - type: 'vector_db'; - - provider_resource_id?: string; - - vector_db_name?: string; -} - -export interface VectorDBRegisterParams { - /** - * The embedding model to use. - */ - embedding_model: string; - - /** - * The identifier of the vector database to register. - */ - vector_db_id: string; - - /** - * The dimension of the embedding model. - */ - embedding_dimension?: number; - - /** - * The identifier of the provider. - */ - provider_id?: string; - - /** - * The identifier of the vector database in the provider. - */ - provider_vector_db_id?: string; - - /** - * The name of the vector database. - */ - vector_db_name?: string; -} - -export declare namespace VectorDBs { - export { - type ListVectorDBsResponse as ListVectorDBsResponse, - type VectorDBRetrieveResponse as VectorDBRetrieveResponse, - type VectorDBListResponse as VectorDBListResponse, - type VectorDBRegisterResponse as VectorDBRegisterResponse, - type VectorDBRegisterParams as VectorDBRegisterParams, - }; -} diff --git a/src/resources/vector-io.ts b/src/resources/vector-io.ts index 6ff531a..09ed568 100644 --- a/src/resources/vector-io.ts +++ b/src/resources/vector-io.ts @@ -151,7 +151,7 @@ export interface VectorIoInsertParams { /** * The identifier of the vector database to insert the chunks into. */ - vector_db_id: string; + vector_store_id: string; /** * The time to live of the chunks. @@ -267,7 +267,7 @@ export interface VectorIoQueryParams { /** * The identifier of the vector database to query. */ - vector_db_id: string; + vector_store_id: string; /** * The parameters of the query. diff --git a/src/resources/vector-stores/file-batches.ts b/src/resources/vector-stores/file-batches.ts new file mode 100644 index 0000000..75085eb --- /dev/null +++ b/src/resources/vector-stores/file-batches.ts @@ -0,0 +1,264 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; +import * as FilesAPI from './files'; +import { VectorStoreFilesOpenAICursorPage } from './files'; +import { type OpenAICursorPageParams } from '../../pagination'; + +export class FileBatches extends APIResource { + /** + * Create a vector store file batch. Generate an OpenAI-compatible vector store + * file batch for the given vector store. + */ + create( + vectorStoreId: string, + body: FileBatchCreateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/v1/vector_stores/${vectorStoreId}/file_batches`, { body, ...options }); + } + + /** + * Retrieve a vector store file batch. + */ + retrieve( + vectorStoreId: string, + batchId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.get(`/v1/vector_stores/${vectorStoreId}/file_batches/${batchId}`, options); + } + + /** + * Cancels a vector store file batch. + */ + cancel( + vectorStoreId: string, + batchId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/v1/vector_stores/${vectorStoreId}/file_batches/${batchId}/cancel`, options); + } + + /** + * Returns a list of vector store files in a batch. + */ + listFiles( + vectorStoreId: string, + batchId: string, + query?: FileBatchListFilesParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + listFiles( + vectorStoreId: string, + batchId: string, + options?: Core.RequestOptions, + ): Core.PagePromise; + listFiles( + vectorStoreId: string, + batchId: string, + query: FileBatchListFilesParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.listFiles(vectorStoreId, batchId, {}, query); + } + return this._client.getAPIList( + `/v1/vector_stores/${vectorStoreId}/file_batches/${batchId}/files`, + VectorStoreFilesOpenAICursorPage, + { query, ...options }, + ); + } +} + +/** + * Response from listing files in a vector store file batch. + */ +export interface ListVectorStoreFilesInBatchResponse { + /** + * List of vector store file objects in the batch + */ + data: Array; + + /** + * Whether there are more files available beyond this page + */ + has_more: boolean; + + /** + * Object type identifier, always "list" + */ + object: string; + + /** + * (Optional) ID of the first file in the list for pagination + */ + first_id?: string; + + /** + * (Optional) ID of the last file in the list for pagination + */ + last_id?: string; +} + +/** + * OpenAI Vector Store File Batch object. + */ +export interface VectorStoreFileBatches { + /** + * Unique identifier for the file batch + */ + id: string; + + /** + * Timestamp when the file batch was created + */ + created_at: number; + + /** + * File processing status counts for the batch + */ + file_counts: VectorStoreFileBatches.FileCounts; + + /** + * Object type identifier, always "vector_store.file_batch" + */ + object: string; + + /** + * Current processing status of the file batch + */ + status: 'completed' | 'in_progress' | 'cancelled' | 'failed'; + + /** + * ID of the vector store containing the file batch + */ + vector_store_id: string; +} + +export namespace VectorStoreFileBatches { + /** + * File processing status counts for the batch + */ + export interface FileCounts { + /** + * Number of files that had their processing cancelled + */ + cancelled: number; + + /** + * Number of files that have been successfully processed + */ + completed: number; + + /** + * Number of files that failed to process + */ + failed: number; + + /** + * Number of files currently being processed + */ + in_progress: number; + + /** + * Total number of files in the vector store + */ + total: number; + } +} + +export interface FileBatchCreateParams { + /** + * A list of File IDs that the vector store should use + */ + file_ids: Array; + + /** + * (Optional) Key-value attributes to store with the files + */ + attributes?: { [key: string]: boolean | number | string | Array | unknown | null }; + + /** + * (Optional) The chunking strategy used to chunk the file(s). Defaults to auto + */ + chunking_strategy?: + | FileBatchCreateParams.VectorStoreChunkingStrategyAuto + | FileBatchCreateParams.VectorStoreChunkingStrategyStatic; +} + +export namespace FileBatchCreateParams { + /** + * Automatic chunking strategy for vector store files. + */ + export interface VectorStoreChunkingStrategyAuto { + /** + * Strategy type, always "auto" for automatic chunking + */ + type: 'auto'; + } + + /** + * Static chunking strategy with configurable parameters. + */ + export interface VectorStoreChunkingStrategyStatic { + /** + * Configuration parameters for the static chunking strategy + */ + static: VectorStoreChunkingStrategyStatic.Static; + + /** + * Strategy type, always "static" for static chunking + */ + type: 'static'; + } + + export namespace VectorStoreChunkingStrategyStatic { + /** + * Configuration parameters for the static chunking strategy + */ + export interface Static { + /** + * Number of tokens to overlap between adjacent chunks + */ + chunk_overlap_tokens: number; + + /** + * Maximum number of tokens per chunk, must be between 100 and 4096 + */ + max_chunk_size_tokens: number; + } + } +} + +export interface FileBatchListFilesParams extends OpenAICursorPageParams { + /** + * A cursor for use in pagination. `before` is an object ID that defines your place + * in the list. + */ + before?: string; + + /** + * Filter by file status. One of in_progress, completed, failed, cancelled. + */ + filter?: string; + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending + * order and `desc` for descending order. + */ + order?: string; +} + +export declare namespace FileBatches { + export { + type ListVectorStoreFilesInBatchResponse as ListVectorStoreFilesInBatchResponse, + type VectorStoreFileBatches as VectorStoreFileBatches, + type FileBatchCreateParams as FileBatchCreateParams, + type FileBatchListFilesParams as FileBatchListFilesParams, + }; +} + +export { VectorStoreFilesOpenAICursorPage }; diff --git a/src/resources/vector-stores/files.ts b/src/resources/vector-stores/files.ts index bc950cc..9af2869 100644 --- a/src/resources/vector-stores/files.ts +++ b/src/resources/vector-stores/files.ts @@ -14,7 +14,7 @@ export class Files extends APIResource { body: FileCreateParams, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post(`/v1/openai/v1/vector_stores/${vectorStoreId}/files`, { body, ...options }); + return this._client.post(`/v1/vector_stores/${vectorStoreId}/files`, { body, ...options }); } /** @@ -25,7 +25,7 @@ export class Files extends APIResource { fileId: string, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.get(`/v1/openai/v1/vector_stores/${vectorStoreId}/files/${fileId}`, options); + return this._client.get(`/v1/vector_stores/${vectorStoreId}/files/${fileId}`, options); } /** @@ -37,10 +37,7 @@ export class Files extends APIResource { body: FileUpdateParams, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post(`/v1/openai/v1/vector_stores/${vectorStoreId}/files/${fileId}`, { - body, - ...options, - }); + return this._client.post(`/v1/vector_stores/${vectorStoreId}/files/${fileId}`, { body, ...options }); } /** @@ -64,7 +61,7 @@ export class Files extends APIResource { return this.list(vectorStoreId, {}, query); } return this._client.getAPIList( - `/v1/openai/v1/vector_stores/${vectorStoreId}/files`, + `/v1/vector_stores/${vectorStoreId}/files`, VectorStoreFilesOpenAICursorPage, { query, ...options }, ); @@ -78,7 +75,7 @@ export class Files extends APIResource { fileId: string, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.delete(`/v1/openai/v1/vector_stores/${vectorStoreId}/files/${fileId}`, options); + return this._client.delete(`/v1/vector_stores/${vectorStoreId}/files/${fileId}`, options); } /** @@ -89,7 +86,7 @@ export class Files extends APIResource { fileId: string, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.get(`/v1/openai/v1/vector_stores/${vectorStoreId}/files/${fileId}/content`, options); + return this._client.get(`/v1/vector_stores/${vectorStoreId}/files/${fileId}/content`, options); } } diff --git a/src/resources/vector-stores/index.ts b/src/resources/vector-stores/index.ts index 4b35bbb..0f53c8f 100644 --- a/src/resources/vector-stores/index.ts +++ b/src/resources/vector-stores/index.ts @@ -1,5 +1,12 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +export { + FileBatches, + type ListVectorStoreFilesInBatchResponse, + type VectorStoreFileBatches, + type FileBatchCreateParams, + type FileBatchListFilesParams, +} from './file-batches'; export { VectorStoreFilesOpenAICursorPage, Files, diff --git a/src/resources/vector-stores/vector-stores.ts b/src/resources/vector-stores/vector-stores.ts index e8994e2..85db692 100644 --- a/src/resources/vector-stores/vector-stores.ts +++ b/src/resources/vector-stores/vector-stores.ts @@ -3,6 +3,14 @@ import { APIResource } from '../../resource'; import { isRequestOptions } from '../../core'; import * as Core from '../../core'; +import * as FileBatchesAPI from './file-batches'; +import { + FileBatchCreateParams, + FileBatchListFilesParams, + FileBatches, + ListVectorStoreFilesInBatchResponse, + VectorStoreFileBatches, +} from './file-batches'; import * as FilesAPI from './files'; import { FileContentResponse, @@ -18,19 +26,21 @@ import { OpenAICursorPage, type OpenAICursorPageParams } from '../../pagination' export class VectorStores extends APIResource { files: FilesAPI.Files = new FilesAPI.Files(this._client); + fileBatches: FileBatchesAPI.FileBatches = new FileBatchesAPI.FileBatches(this._client); /** - * Creates a vector store. + * Creates a vector store. Generate an OpenAI-compatible vector store with the + * given parameters. */ create(body: VectorStoreCreateParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.post('/v1/openai/v1/vector_stores', { body, ...options }); + return this._client.post('/v1/vector_stores', { body, ...options }); } /** * Retrieves a vector store. */ retrieve(vectorStoreId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/v1/openai/v1/vector_stores/${vectorStoreId}`, options); + return this._client.get(`/v1/vector_stores/${vectorStoreId}`, options); } /** @@ -41,7 +51,7 @@ export class VectorStores extends APIResource { body: VectorStoreUpdateParams, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post(`/v1/openai/v1/vector_stores/${vectorStoreId}`, { body, ...options }); + return this._client.post(`/v1/vector_stores/${vectorStoreId}`, { body, ...options }); } /** @@ -59,17 +69,14 @@ export class VectorStores extends APIResource { if (isRequestOptions(query)) { return this.list({}, query); } - return this._client.getAPIList('/v1/openai/v1/vector_stores', VectorStoresOpenAICursorPage, { - query, - ...options, - }); + return this._client.getAPIList('/v1/vector_stores', VectorStoresOpenAICursorPage, { query, ...options }); } /** * Delete a vector store. */ delete(vectorStoreId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.delete(`/v1/openai/v1/vector_stores/${vectorStoreId}`, options); + return this._client.delete(`/v1/vector_stores/${vectorStoreId}`, options); } /** @@ -81,7 +88,7 @@ export class VectorStores extends APIResource { body: VectorStoreSearchParams, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post(`/v1/openai/v1/vector_stores/${vectorStoreId}/search`, { body, ...options }); + return this._client.post(`/v1/vector_stores/${vectorStoreId}/search`, { body, ...options }); } } @@ -310,46 +317,29 @@ export namespace VectorStoreSearchResponse { export interface VectorStoreCreateParams { /** - * The chunking strategy used to chunk the file(s). If not set, will use the `auto` - * strategy. + * (Optional) Strategy for splitting files into chunks */ chunking_strategy?: { [key: string]: boolean | number | string | Array | unknown | null }; /** - * The dimension of the embedding vectors (default: 384). - */ - embedding_dimension?: number; - - /** - * The embedding model to use for this vector store. - */ - embedding_model?: string; - - /** - * The expiration policy for a vector store. + * (Optional) Expiration policy for the vector store */ expires_after?: { [key: string]: boolean | number | string | Array | unknown | null }; /** - * A list of File IDs that the vector store should use. Useful for tools like - * `file_search` that can access files. + * List of file IDs to include in the vector store */ file_ids?: Array; /** - * Set of 16 key-value pairs that can be attached to an object. + * Set of key-value pairs that can be attached to the vector store */ metadata?: { [key: string]: boolean | number | string | Array | unknown | null }; /** - * A name for the vector store. + * (Optional) A name for the vector store */ name?: string; - - /** - * The ID of the provider to use for this vector store. - */ - provider_id?: string; } export interface VectorStoreUpdateParams { @@ -435,6 +425,7 @@ export namespace VectorStoreSearchParams { VectorStores.VectorStoresOpenAICursorPage = VectorStoresOpenAICursorPage; VectorStores.Files = Files; VectorStores.VectorStoreFilesOpenAICursorPage = VectorStoreFilesOpenAICursorPage; +VectorStores.FileBatches = FileBatches; export declare namespace VectorStores { export { @@ -459,4 +450,12 @@ export declare namespace VectorStores { type FileUpdateParams as FileUpdateParams, type FileListParams as FileListParams, }; + + export { + FileBatches as FileBatches, + type ListVectorStoreFilesInBatchResponse as ListVectorStoreFilesInBatchResponse, + type VectorStoreFileBatches as VectorStoreFileBatches, + type FileBatchCreateParams as FileBatchCreateParams, + type FileBatchListFilesParams as FileBatchListFilesParams, + }; } diff --git a/src/version.ts b/src/version.ts index 834272b..e21d398 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '0.2.23'; // x-release-please-version +export const VERSION = '0.3.1-alpha.1'; // x-release-please-version diff --git a/tests/api-resources/agents/agents.test.ts b/tests/api-resources/agents/agents.test.ts deleted file mode 100644 index 2f22dff..0000000 --- a/tests/api-resources/agents/agents.test.ts +++ /dev/null @@ -1,123 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; -import { Response } from 'node-fetch'; - -const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); - -describe('resource agents', () => { - test('create: only required params', async () => { - const responsePromise = client.agents.create({ - agent_config: { instructions: 'instructions', model: 'model' }, - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('create: required and optional params', async () => { - const response = await client.agents.create({ - agent_config: { - instructions: 'instructions', - model: 'model', - client_tools: [ - { - name: 'name', - description: 'description', - metadata: { foo: true }, - parameters: [ - { - description: 'description', - name: 'name', - parameter_type: 'parameter_type', - required: true, - default: true, - }, - ], - }, - ], - enable_session_persistence: true, - input_shields: ['string'], - max_infer_iters: 0, - name: 'name', - output_shields: ['string'], - response_format: { json_schema: { foo: true }, type: 'json_schema' }, - sampling_params: { - strategy: { type: 'greedy' }, - max_tokens: 0, - repetition_penalty: 0, - stop: ['string'], - }, - tool_choice: 'auto', - tool_config: { system_message_behavior: 'append', tool_choice: 'auto', tool_prompt_format: 'json' }, - tool_prompt_format: 'json', - toolgroups: ['string'], - }, - }); - }); - - test('retrieve', async () => { - const responsePromise = client.agents.retrieve('agent_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('retrieve: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(client.agents.retrieve('agent_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( - LlamaStackClient.NotFoundError, - ); - }); - - test('list', async () => { - const responsePromise = client.agents.list(); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('list: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(client.agents.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( - LlamaStackClient.NotFoundError, - ); - }); - - test('list: request options and params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.agents.list({ limit: 0, start_index: 0 }, { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(LlamaStackClient.NotFoundError); - }); - - test('delete', async () => { - const responsePromise = client.agents.delete('agent_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('delete: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(client.agents.delete('agent_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( - LlamaStackClient.NotFoundError, - ); - }); -}); diff --git a/tests/api-resources/agents/steps.test.ts b/tests/api-resources/agents/steps.test.ts deleted file mode 100644 index 0696783..0000000 --- a/tests/api-resources/agents/steps.test.ts +++ /dev/null @@ -1,28 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; -import { Response } from 'node-fetch'; - -const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); - -describe('resource steps', () => { - test('retrieve', async () => { - const responsePromise = client.agents.steps.retrieve('agent_id', 'session_id', 'turn_id', 'step_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('retrieve: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.agents.steps.retrieve('agent_id', 'session_id', 'turn_id', 'step_id', { - path: '/_stainless_unknown_path', - }), - ).rejects.toThrow(LlamaStackClient.NotFoundError); - }); -}); diff --git a/tests/api-resources/agents/turn.test.ts b/tests/api-resources/agents/turn.test.ts deleted file mode 100644 index dd4e3de..0000000 --- a/tests/api-resources/agents/turn.test.ts +++ /dev/null @@ -1,71 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; -import { Response } from 'node-fetch'; - -const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); - -describe('resource turn', () => { - test('create: only required params', async () => { - const responsePromise = client.agents.turn.create('agent_id', 'session_id', { - messages: [{ content: 'string', role: 'user' }], - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('create: required and optional params', async () => { - const response = await client.agents.turn.create('agent_id', 'session_id', { - messages: [{ content: 'string', role: 'user', context: 'string' }], - documents: [{ content: 'string', mime_type: 'mime_type' }], - stream: false, - tool_config: { system_message_behavior: 'append', tool_choice: 'auto', tool_prompt_format: 'json' }, - toolgroups: ['string'], - }); - }); - - test('retrieve', async () => { - const responsePromise = client.agents.turn.retrieve('agent_id', 'session_id', 'turn_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('retrieve: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.agents.turn.retrieve('agent_id', 'session_id', 'turn_id', { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(LlamaStackClient.NotFoundError); - }); - - test('resume: only required params', async () => { - const responsePromise = client.agents.turn.resume('agent_id', 'session_id', 'turn_id', { - tool_responses: [{ call_id: 'call_id', content: 'string', tool_name: 'brave_search' }], - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('resume: required and optional params', async () => { - const response = await client.agents.turn.resume('agent_id', 'session_id', 'turn_id', { - tool_responses: [ - { call_id: 'call_id', content: 'string', tool_name: 'brave_search', metadata: { foo: true } }, - ], - stream: false, - }); - }); -}); diff --git a/tests/api-resources/benchmarks.test.ts b/tests/api-resources/benchmarks.test.ts deleted file mode 100644 index 45bc197..0000000 --- a/tests/api-resources/benchmarks.test.ts +++ /dev/null @@ -1,70 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; -import { Response } from 'node-fetch'; - -const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); - -describe('resource benchmarks', () => { - test('retrieve', async () => { - const responsePromise = client.benchmarks.retrieve('benchmark_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('retrieve: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.benchmarks.retrieve('benchmark_id', { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(LlamaStackClient.NotFoundError); - }); - - test('list', async () => { - const responsePromise = client.benchmarks.list(); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('list: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(client.benchmarks.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( - LlamaStackClient.NotFoundError, - ); - }); - - test('register: only required params', async () => { - const responsePromise = client.benchmarks.register({ - benchmark_id: 'benchmark_id', - dataset_id: 'dataset_id', - scoring_functions: ['string'], - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('register: required and optional params', async () => { - const response = await client.benchmarks.register({ - benchmark_id: 'benchmark_id', - dataset_id: 'dataset_id', - scoring_functions: ['string'], - metadata: { foo: true }, - provider_benchmark_id: 'provider_benchmark_id', - provider_id: 'provider_id', - }); - }); -}); diff --git a/tests/api-resources/completions.test.ts b/tests/api-resources/completions.test.ts index 736d76a..9a0d2eb 100644 --- a/tests/api-resources/completions.test.ts +++ b/tests/api-resources/completions.test.ts @@ -24,13 +24,11 @@ describe('resource completions', () => { best_of: 0, echo: true, frequency_penalty: 0, - guided_choice: ['string'], logit_bias: { foo: 0 }, logprobs: true, max_tokens: 0, n: 0, presence_penalty: 0, - prompt_logprobs: 0, seed: 0, stop: 'string', stream: false, diff --git a/tests/api-resources/vector-dbs.test.ts b/tests/api-resources/conversations/conversations.test.ts similarity index 61% rename from tests/api-resources/vector-dbs.test.ts rename to tests/api-resources/conversations/conversations.test.ts index 4af5adf..e13a9e4 100644 --- a/tests/api-resources/vector-dbs.test.ts +++ b/tests/api-resources/conversations/conversations.test.ts @@ -5,9 +5,9 @@ import { Response } from 'node-fetch'; const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); -describe('resource vectorDBs', () => { - test('retrieve', async () => { - const responsePromise = client.vectorDBs.retrieve('vector_db_id'); +describe('resource conversations', () => { + test('create', async () => { + const responsePromise = client.conversations.create({}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -17,15 +17,8 @@ describe('resource vectorDBs', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - test('retrieve: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.vectorDBs.retrieve('vector_db_id', { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(LlamaStackClient.NotFoundError); - }); - - test('list', async () => { - const responsePromise = client.vectorDBs.list(); + test('retrieve', async () => { + const responsePromise = client.conversations.retrieve('conversation_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -35,18 +28,15 @@ describe('resource vectorDBs', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - test('list: request options instead of params are passed correctly', async () => { + test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(client.vectorDBs.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( - LlamaStackClient.NotFoundError, - ); + await expect( + client.conversations.retrieve('conversation_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); }); - test('register: only required params', async () => { - const responsePromise = client.vectorDBs.register({ - embedding_model: 'embedding_model', - vector_db_id: 'vector_db_id', - }); + test('update: only required params', async () => { + const responsePromise = client.conversations.update('conversation_id', { metadata: { foo: 'string' } }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -56,19 +46,12 @@ describe('resource vectorDBs', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - test('register: required and optional params', async () => { - const response = await client.vectorDBs.register({ - embedding_model: 'embedding_model', - vector_db_id: 'vector_db_id', - embedding_dimension: 0, - provider_id: 'provider_id', - provider_vector_db_id: 'provider_vector_db_id', - vector_db_name: 'vector_db_name', - }); + test('update: required and optional params', async () => { + const response = await client.conversations.update('conversation_id', { metadata: { foo: 'string' } }); }); - test('unregister', async () => { - const responsePromise = client.vectorDBs.unregister('vector_db_id'); + test('delete', async () => { + const responsePromise = client.conversations.delete('conversation_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -78,10 +61,10 @@ describe('resource vectorDBs', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - test('unregister: request options instead of params are passed correctly', async () => { + test('delete: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.vectorDBs.unregister('vector_db_id', { path: '/_stainless_unknown_path' }), + client.conversations.delete('conversation_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(LlamaStackClient.NotFoundError); }); }); diff --git a/tests/api-resources/eval/jobs.test.ts b/tests/api-resources/conversations/items.test.ts similarity index 59% rename from tests/api-resources/eval/jobs.test.ts rename to tests/api-resources/conversations/items.test.ts index cad4ebd..4177bfc 100644 --- a/tests/api-resources/eval/jobs.test.ts +++ b/tests/api-resources/conversations/items.test.ts @@ -5,9 +5,11 @@ import { Response } from 'node-fetch'; const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); -describe('resource jobs', () => { - test('retrieve', async () => { - const responsePromise = client.eval.jobs.retrieve('benchmark_id', 'job_id'); +describe('resource items', () => { + test('create: only required params', async () => { + const responsePromise = client.conversations.items.create('conversation_id', { + items: [{ content: 'string', role: 'system', type: 'message' }], + }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -17,15 +19,14 @@ describe('resource jobs', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - test('retrieve: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.eval.jobs.retrieve('benchmark_id', 'job_id', { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(LlamaStackClient.NotFoundError); + test('create: required and optional params', async () => { + const response = await client.conversations.items.create('conversation_id', { + items: [{ content: 'string', role: 'system', type: 'message', id: 'id', status: 'status' }], + }); }); - test('cancel', async () => { - const responsePromise = client.eval.jobs.cancel('benchmark_id', 'job_id'); + test('list', async () => { + const responsePromise = client.conversations.items.list('conversation_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -35,15 +36,26 @@ describe('resource jobs', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - test('cancel: request options instead of params are passed correctly', async () => { + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.conversations.items.list('conversation_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.eval.jobs.cancel('benchmark_id', 'job_id', { path: '/_stainless_unknown_path' }), + client.conversations.items.list( + 'conversation_id', + { after: 'after', include: ['web_search_call.action.sources'], limit: 0, order: 'asc' }, + { path: '/_stainless_unknown_path' }, + ), ).rejects.toThrow(LlamaStackClient.NotFoundError); }); - test('status', async () => { - const responsePromise = client.eval.jobs.status('benchmark_id', 'job_id'); + test('get', async () => { + const responsePromise = client.conversations.items.get('conversation_id', 'item_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -53,10 +65,10 @@ describe('resource jobs', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - test('status: request options instead of params are passed correctly', async () => { + test('get: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.eval.jobs.status('benchmark_id', 'job_id', { path: '/_stainless_unknown_path' }), + client.conversations.items.get('conversation_id', 'item_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(LlamaStackClient.NotFoundError); }); }); diff --git a/tests/api-resources/datasets.test.ts b/tests/api-resources/datasets.test.ts deleted file mode 100644 index e0db4c4..0000000 --- a/tests/api-resources/datasets.test.ts +++ /dev/null @@ -1,129 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; -import { Response } from 'node-fetch'; - -const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); - -describe('resource datasets', () => { - test('retrieve', async () => { - const responsePromise = client.datasets.retrieve('dataset_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('retrieve: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.datasets.retrieve('dataset_id', { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(LlamaStackClient.NotFoundError); - }); - - test('list', async () => { - const responsePromise = client.datasets.list(); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('list: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(client.datasets.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( - LlamaStackClient.NotFoundError, - ); - }); - - test('appendrows: only required params', async () => { - const responsePromise = client.datasets.appendrows('dataset_id', { rows: [{ foo: true }] }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('appendrows: required and optional params', async () => { - const response = await client.datasets.appendrows('dataset_id', { rows: [{ foo: true }] }); - }); - - test('iterrows', async () => { - const responsePromise = client.datasets.iterrows('dataset_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('iterrows: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.datasets.iterrows('dataset_id', { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(LlamaStackClient.NotFoundError); - }); - - test('iterrows: request options and params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.datasets.iterrows( - 'dataset_id', - { limit: 0, start_index: 0 }, - { path: '/_stainless_unknown_path' }, - ), - ).rejects.toThrow(LlamaStackClient.NotFoundError); - }); - - test('register: only required params', async () => { - const responsePromise = client.datasets.register({ - purpose: 'post-training/messages', - source: { type: 'uri', uri: 'uri' }, - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('register: required and optional params', async () => { - const response = await client.datasets.register({ - purpose: 'post-training/messages', - source: { type: 'uri', uri: 'uri' }, - dataset_id: 'dataset_id', - metadata: { foo: true }, - }); - }); - - test('unregister', async () => { - const responsePromise = client.datasets.unregister('dataset_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('unregister: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.datasets.unregister('dataset_id', { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(LlamaStackClient.NotFoundError); - }); -}); diff --git a/tests/api-resources/eval/eval.test.ts b/tests/api-resources/eval/eval.test.ts deleted file mode 100644 index 9f3e461..0000000 --- a/tests/api-resources/eval/eval.test.ts +++ /dev/null @@ -1,220 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; -import { Response } from 'node-fetch'; - -const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); - -describe('resource eval', () => { - test('evaluateRows: only required params', async () => { - const responsePromise = client.eval.evaluateRows('benchmark_id', { - benchmark_config: { - eval_candidate: { model: 'model', sampling_params: { strategy: { type: 'greedy' } }, type: 'model' }, - scoring_params: { - foo: { - aggregation_functions: ['average'], - judge_model: 'judge_model', - judge_score_regexes: ['string'], - type: 'llm_as_judge', - }, - }, - }, - input_rows: [{ foo: true }], - scoring_functions: ['string'], - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('evaluateRows: required and optional params', async () => { - const response = await client.eval.evaluateRows('benchmark_id', { - benchmark_config: { - eval_candidate: { - model: 'model', - sampling_params: { - strategy: { type: 'greedy' }, - max_tokens: 0, - repetition_penalty: 0, - stop: ['string'], - }, - type: 'model', - system_message: { content: 'string', role: 'system' }, - }, - scoring_params: { - foo: { - aggregation_functions: ['average'], - judge_model: 'judge_model', - judge_score_regexes: ['string'], - type: 'llm_as_judge', - prompt_template: 'prompt_template', - }, - }, - num_examples: 0, - }, - input_rows: [{ foo: true }], - scoring_functions: ['string'], - }); - }); - - test('evaluateRowsAlpha: only required params', async () => { - const responsePromise = client.eval.evaluateRowsAlpha('benchmark_id', { - benchmark_config: { - eval_candidate: { model: 'model', sampling_params: { strategy: { type: 'greedy' } }, type: 'model' }, - scoring_params: { - foo: { - aggregation_functions: ['average'], - judge_model: 'judge_model', - judge_score_regexes: ['string'], - type: 'llm_as_judge', - }, - }, - }, - input_rows: [{ foo: true }], - scoring_functions: ['string'], - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('evaluateRowsAlpha: required and optional params', async () => { - const response = await client.eval.evaluateRowsAlpha('benchmark_id', { - benchmark_config: { - eval_candidate: { - model: 'model', - sampling_params: { - strategy: { type: 'greedy' }, - max_tokens: 0, - repetition_penalty: 0, - stop: ['string'], - }, - type: 'model', - system_message: { content: 'string', role: 'system' }, - }, - scoring_params: { - foo: { - aggregation_functions: ['average'], - judge_model: 'judge_model', - judge_score_regexes: ['string'], - type: 'llm_as_judge', - prompt_template: 'prompt_template', - }, - }, - num_examples: 0, - }, - input_rows: [{ foo: true }], - scoring_functions: ['string'], - }); - }); - - test('runEval: only required params', async () => { - const responsePromise = client.eval.runEval('benchmark_id', { - benchmark_config: { - eval_candidate: { model: 'model', sampling_params: { strategy: { type: 'greedy' } }, type: 'model' }, - scoring_params: { - foo: { - aggregation_functions: ['average'], - judge_model: 'judge_model', - judge_score_regexes: ['string'], - type: 'llm_as_judge', - }, - }, - }, - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('runEval: required and optional params', async () => { - const response = await client.eval.runEval('benchmark_id', { - benchmark_config: { - eval_candidate: { - model: 'model', - sampling_params: { - strategy: { type: 'greedy' }, - max_tokens: 0, - repetition_penalty: 0, - stop: ['string'], - }, - type: 'model', - system_message: { content: 'string', role: 'system' }, - }, - scoring_params: { - foo: { - aggregation_functions: ['average'], - judge_model: 'judge_model', - judge_score_regexes: ['string'], - type: 'llm_as_judge', - prompt_template: 'prompt_template', - }, - }, - num_examples: 0, - }, - }); - }); - - test('runEvalAlpha: only required params', async () => { - const responsePromise = client.eval.runEvalAlpha('benchmark_id', { - benchmark_config: { - eval_candidate: { model: 'model', sampling_params: { strategy: { type: 'greedy' } }, type: 'model' }, - scoring_params: { - foo: { - aggregation_functions: ['average'], - judge_model: 'judge_model', - judge_score_regexes: ['string'], - type: 'llm_as_judge', - }, - }, - }, - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('runEvalAlpha: required and optional params', async () => { - const response = await client.eval.runEvalAlpha('benchmark_id', { - benchmark_config: { - eval_candidate: { - model: 'model', - sampling_params: { - strategy: { type: 'greedy' }, - max_tokens: 0, - repetition_penalty: 0, - stop: ['string'], - }, - type: 'model', - system_message: { content: 'string', role: 'system' }, - }, - scoring_params: { - foo: { - aggregation_functions: ['average'], - judge_model: 'judge_model', - judge_score_regexes: ['string'], - type: 'llm_as_judge', - prompt_template: 'prompt_template', - }, - }, - num_examples: 0, - }, - }); - }); -}); diff --git a/tests/api-resources/files.test.ts b/tests/api-resources/files.test.ts index 6482b2e..e3eec3d 100644 --- a/tests/api-resources/files.test.ts +++ b/tests/api-resources/files.test.ts @@ -24,6 +24,7 @@ describe('resource files', () => { const response = await client.files.create({ file: await toFile(Buffer.from('# my file contents'), 'README.md'), purpose: 'assistants', + expires_after: { anchor: 'created_at', seconds: 0 }, }); }); diff --git a/tests/api-resources/inference.test.ts b/tests/api-resources/inference.test.ts deleted file mode 100644 index e7d5df3..0000000 --- a/tests/api-resources/inference.test.ts +++ /dev/null @@ -1,186 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; -import { Response } from 'node-fetch'; - -const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); - -describe('resource inference', () => { - test('batchChatCompletion: only required params', async () => { - const responsePromise = client.inference.batchChatCompletion({ - messages_batch: [[{ content: 'string', role: 'user' }]], - model_id: 'model_id', - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('batchChatCompletion: required and optional params', async () => { - const response = await client.inference.batchChatCompletion({ - messages_batch: [[{ content: 'string', role: 'user', context: 'string' }]], - model_id: 'model_id', - logprobs: { top_k: 0 }, - response_format: { json_schema: { foo: true }, type: 'json_schema' }, - sampling_params: { - strategy: { type: 'greedy' }, - max_tokens: 0, - repetition_penalty: 0, - stop: ['string'], - }, - tool_config: { system_message_behavior: 'append', tool_choice: 'auto', tool_prompt_format: 'json' }, - tools: [ - { - tool_name: 'brave_search', - description: 'description', - parameters: { - foo: { param_type: 'param_type', default: true, description: 'description', required: true }, - }, - }, - ], - }); - }); - - test('batchCompletion: only required params', async () => { - const responsePromise = client.inference.batchCompletion({ - content_batch: ['string'], - model_id: 'model_id', - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('batchCompletion: required and optional params', async () => { - const response = await client.inference.batchCompletion({ - content_batch: ['string'], - model_id: 'model_id', - logprobs: { top_k: 0 }, - response_format: { json_schema: { foo: true }, type: 'json_schema' }, - sampling_params: { - strategy: { type: 'greedy' }, - max_tokens: 0, - repetition_penalty: 0, - stop: ['string'], - }, - }); - }); - - test('chatCompletion: only required params', async () => { - const responsePromise = client.inference.chatCompletion({ - messages: [{ content: 'string', role: 'user' }], - model_id: 'model_id', - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('chatCompletion: required and optional params', async () => { - const response = await client.inference.chatCompletion({ - messages: [{ content: 'string', role: 'user', context: 'string' }], - model_id: 'model_id', - logprobs: { top_k: 0 }, - response_format: { json_schema: { foo: true }, type: 'json_schema' }, - sampling_params: { - strategy: { type: 'greedy' }, - max_tokens: 0, - repetition_penalty: 0, - stop: ['string'], - }, - stream: false, - tool_choice: 'auto', - tool_config: { system_message_behavior: 'append', tool_choice: 'auto', tool_prompt_format: 'json' }, - tool_prompt_format: 'json', - tools: [ - { - tool_name: 'brave_search', - description: 'description', - parameters: { - foo: { param_type: 'param_type', default: true, description: 'description', required: true }, - }, - }, - ], - }); - }); - - test('completion: only required params', async () => { - const responsePromise = client.inference.completion({ content: 'string', model_id: 'model_id' }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('completion: required and optional params', async () => { - const response = await client.inference.completion({ - content: 'string', - model_id: 'model_id', - logprobs: { top_k: 0 }, - response_format: { json_schema: { foo: true }, type: 'json_schema' }, - sampling_params: { - strategy: { type: 'greedy' }, - max_tokens: 0, - repetition_penalty: 0, - stop: ['string'], - }, - stream: false, - }); - }); - - test('embeddings: only required params', async () => { - const responsePromise = client.inference.embeddings({ contents: ['string'], model_id: 'model_id' }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('embeddings: required and optional params', async () => { - const response = await client.inference.embeddings({ - contents: ['string'], - model_id: 'model_id', - output_dimension: 0, - task_type: 'query', - text_truncation: 'none', - }); - }); - - test('rerank: only required params', async () => { - const responsePromise = client.inference.rerank({ items: ['string'], model: 'model', query: 'string' }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('rerank: required and optional params', async () => { - const response = await client.inference.rerank({ - items: ['string'], - model: 'model', - query: 'string', - max_num_results: 0, - }); - }); -}); diff --git a/tests/api-resources/moderations.test.ts b/tests/api-resources/moderations.test.ts index 25a77cc..940afb2 100644 --- a/tests/api-resources/moderations.test.ts +++ b/tests/api-resources/moderations.test.ts @@ -7,7 +7,7 @@ const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] describe('resource moderations', () => { test('create: only required params', async () => { - const responsePromise = client.moderations.create({ input: 'string', model: 'model' }); + const responsePromise = client.moderations.create({ input: 'string' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; diff --git a/tests/api-resources/post-training/job.test.ts b/tests/api-resources/post-training/job.test.ts deleted file mode 100644 index 0cb1ebb..0000000 --- a/tests/api-resources/post-training/job.test.ts +++ /dev/null @@ -1,71 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; -import { Response } from 'node-fetch'; - -const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); - -describe('resource job', () => { - test('list', async () => { - const responsePromise = client.postTraining.job.list(); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('list: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(client.postTraining.job.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( - LlamaStackClient.NotFoundError, - ); - }); - - test('artifacts: only required params', async () => { - const responsePromise = client.postTraining.job.artifacts({ job_uuid: 'job_uuid' }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('artifacts: required and optional params', async () => { - const response = await client.postTraining.job.artifacts({ job_uuid: 'job_uuid' }); - }); - - test('cancel: only required params', async () => { - const responsePromise = client.postTraining.job.cancel({ job_uuid: 'job_uuid' }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('cancel: required and optional params', async () => { - const response = await client.postTraining.job.cancel({ job_uuid: 'job_uuid' }); - }); - - test('status: only required params', async () => { - const responsePromise = client.postTraining.job.status({ job_uuid: 'job_uuid' }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('status: required and optional params', async () => { - const response = await client.postTraining.job.status({ job_uuid: 'job_uuid' }); - }); -}); diff --git a/tests/api-resources/post-training/post-training.test.ts b/tests/api-resources/post-training/post-training.test.ts deleted file mode 100644 index ac7a53b..0000000 --- a/tests/api-resources/post-training/post-training.test.ts +++ /dev/null @@ -1,118 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; -import { Response } from 'node-fetch'; - -const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); - -describe('resource postTraining', () => { - test('preferenceOptimize: only required params', async () => { - const responsePromise = client.postTraining.preferenceOptimize({ - algorithm_config: { beta: 0, loss_type: 'sigmoid' }, - finetuned_model: 'finetuned_model', - hyperparam_search_config: { foo: true }, - job_uuid: 'job_uuid', - logger_config: { foo: true }, - training_config: { gradient_accumulation_steps: 0, max_steps_per_epoch: 0, n_epochs: 0 }, - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('preferenceOptimize: required and optional params', async () => { - const response = await client.postTraining.preferenceOptimize({ - algorithm_config: { beta: 0, loss_type: 'sigmoid' }, - finetuned_model: 'finetuned_model', - hyperparam_search_config: { foo: true }, - job_uuid: 'job_uuid', - logger_config: { foo: true }, - training_config: { - gradient_accumulation_steps: 0, - max_steps_per_epoch: 0, - n_epochs: 0, - data_config: { - batch_size: 0, - data_format: 'instruct', - dataset_id: 'dataset_id', - shuffle: true, - packed: true, - train_on_input: true, - validation_dataset_id: 'validation_dataset_id', - }, - dtype: 'dtype', - efficiency_config: { - enable_activation_checkpointing: true, - enable_activation_offloading: true, - fsdp_cpu_offload: true, - memory_efficient_fsdp_wrap: true, - }, - max_validation_steps: 0, - optimizer_config: { lr: 0, num_warmup_steps: 0, optimizer_type: 'adam', weight_decay: 0 }, - }, - }); - }); - - test('supervisedFineTune: only required params', async () => { - const responsePromise = client.postTraining.supervisedFineTune({ - hyperparam_search_config: { foo: true }, - job_uuid: 'job_uuid', - logger_config: { foo: true }, - training_config: { gradient_accumulation_steps: 0, max_steps_per_epoch: 0, n_epochs: 0 }, - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('supervisedFineTune: required and optional params', async () => { - const response = await client.postTraining.supervisedFineTune({ - hyperparam_search_config: { foo: true }, - job_uuid: 'job_uuid', - logger_config: { foo: true }, - training_config: { - gradient_accumulation_steps: 0, - max_steps_per_epoch: 0, - n_epochs: 0, - data_config: { - batch_size: 0, - data_format: 'instruct', - dataset_id: 'dataset_id', - shuffle: true, - packed: true, - train_on_input: true, - validation_dataset_id: 'validation_dataset_id', - }, - dtype: 'dtype', - efficiency_config: { - enable_activation_checkpointing: true, - enable_activation_offloading: true, - fsdp_cpu_offload: true, - memory_efficient_fsdp_wrap: true, - }, - max_validation_steps: 0, - optimizer_config: { lr: 0, num_warmup_steps: 0, optimizer_type: 'adam', weight_decay: 0 }, - }, - algorithm_config: { - alpha: 0, - apply_lora_to_mlp: true, - apply_lora_to_output: true, - lora_attn_modules: ['string'], - rank: 0, - type: 'LoRA', - quantize_base: true, - use_dora: true, - }, - checkpoint_dir: 'checkpoint_dir', - model: 'model', - }); - }); -}); diff --git a/tests/api-resources/responses/responses.test.ts b/tests/api-resources/responses/responses.test.ts index f1142d8..f5c86d4 100644 --- a/tests/api-resources/responses/responses.test.ts +++ b/tests/api-resources/responses/responses.test.ts @@ -21,10 +21,12 @@ describe('resource responses', () => { const response = await client.responses.create({ input: 'string', model: 'model', + conversation: 'conversation', include: ['string'], instructions: 'instructions', max_infer_iters: 0, previous_response_id: 'previous_response_id', + prompt: { id: 'id', variables: { foo: { text: 'text', type: 'input_text' } }, version: 'version' }, store: true, stream: false, temperature: 0, diff --git a/tests/api-resources/routes.test.ts b/tests/api-resources/routes.test.ts index 4373ab6..8cde566 100644 --- a/tests/api-resources/routes.test.ts +++ b/tests/api-resources/routes.test.ts @@ -23,4 +23,11 @@ describe('resource routes', () => { LlamaStackClient.NotFoundError, ); }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.routes.list({ api_filter: 'v1' }, { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); }); diff --git a/tests/api-resources/safety.test.ts b/tests/api-resources/safety.test.ts index 4ca2ca6..6b43983 100644 --- a/tests/api-resources/safety.test.ts +++ b/tests/api-resources/safety.test.ts @@ -23,7 +23,7 @@ describe('resource safety', () => { test('runShield: required and optional params', async () => { const response = await client.safety.runShield({ - messages: [{ content: 'string', role: 'user', context: 'string' }], + messages: [{ content: 'string', role: 'user', name: 'name' }], params: { foo: true }, shield_id: 'shield_id', }); diff --git a/tests/api-resources/telemetry.test.ts b/tests/api-resources/telemetry.test.ts deleted file mode 100644 index e042d08..0000000 --- a/tests/api-resources/telemetry.test.ts +++ /dev/null @@ -1,177 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import LlamaStackClient from 'llama-stack-client'; -import { Response } from 'node-fetch'; - -const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); - -describe('resource telemetry', () => { - test('getSpan', async () => { - const responsePromise = client.telemetry.getSpan('trace_id', 'span_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('getSpan: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.telemetry.getSpan('trace_id', 'span_id', { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(LlamaStackClient.NotFoundError); - }); - - test('getSpanTree', async () => { - const responsePromise = client.telemetry.getSpanTree('span_id', {}); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('getTrace', async () => { - const responsePromise = client.telemetry.getTrace('trace_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('getTrace: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(client.telemetry.getTrace('trace_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( - LlamaStackClient.NotFoundError, - ); - }); - - test('logEvent: only required params', async () => { - const responsePromise = client.telemetry.logEvent({ - event: { - message: 'message', - severity: 'verbose', - span_id: 'span_id', - timestamp: '2019-12-27T18:11:19.117Z', - trace_id: 'trace_id', - type: 'unstructured_log', - }, - ttl_seconds: 0, - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('logEvent: required and optional params', async () => { - const response = await client.telemetry.logEvent({ - event: { - message: 'message', - severity: 'verbose', - span_id: 'span_id', - timestamp: '2019-12-27T18:11:19.117Z', - trace_id: 'trace_id', - type: 'unstructured_log', - attributes: { foo: 'string' }, - }, - ttl_seconds: 0, - }); - }); - - // unsupported query params in java / kotlin - test.skip('queryMetrics: only required params', async () => { - const responsePromise = client.telemetry.queryMetrics('metric_name', { - query_type: 'range', - start_time: 0, - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // unsupported query params in java / kotlin - test.skip('queryMetrics: required and optional params', async () => { - const response = await client.telemetry.queryMetrics('metric_name', { - query_type: 'range', - start_time: 0, - end_time: 0, - granularity: 'granularity', - label_matchers: [{ name: 'name', operator: '=', value: 'value' }], - }); - }); - - // unsupported query params in java / kotlin - test.skip('querySpans: only required params', async () => { - const responsePromise = client.telemetry.querySpans({ - attribute_filters: [{ key: 'key', op: 'eq', value: true }], - attributes_to_return: ['string'], - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - // unsupported query params in java / kotlin - test.skip('querySpans: required and optional params', async () => { - const response = await client.telemetry.querySpans({ - attribute_filters: [{ key: 'key', op: 'eq', value: true }], - attributes_to_return: ['string'], - max_depth: 0, - }); - }); - - // unsupported query params in java / kotlin - test.skip('queryTraces', async () => { - const responsePromise = client.telemetry.queryTraces({}); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('saveSpansToDataset: only required params', async () => { - const responsePromise = client.telemetry.saveSpansToDataset({ - attribute_filters: [{ key: 'key', op: 'eq', value: true }], - attributes_to_save: ['string'], - dataset_id: 'dataset_id', - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('saveSpansToDataset: required and optional params', async () => { - const response = await client.telemetry.saveSpansToDataset({ - attribute_filters: [{ key: 'key', op: 'eq', value: true }], - attributes_to_save: ['string'], - dataset_id: 'dataset_id', - max_depth: 0, - }); - }); -}); diff --git a/tests/api-resources/tool-runtime/rag-tool.test.ts b/tests/api-resources/tool-runtime/rag-tool.test.ts index 6780be4..eee98f5 100644 --- a/tests/api-resources/tool-runtime/rag-tool.test.ts +++ b/tests/api-resources/tool-runtime/rag-tool.test.ts @@ -10,7 +10,7 @@ describe('resource ragTool', () => { const responsePromise = client.toolRuntime.ragTool.insert({ chunk_size_in_tokens: 0, documents: [{ content: 'string', document_id: 'document_id', metadata: { foo: true } }], - vector_db_id: 'vector_db_id', + vector_store_id: 'vector_store_id', }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -27,14 +27,14 @@ describe('resource ragTool', () => { documents: [ { content: 'string', document_id: 'document_id', metadata: { foo: true }, mime_type: 'mime_type' }, ], - vector_db_id: 'vector_db_id', + vector_store_id: 'vector_store_id', }); }); test('query: only required params', async () => { const responsePromise = client.toolRuntime.ragTool.query({ content: 'string', - vector_db_ids: ['string'], + vector_store_ids: ['string'], }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -48,7 +48,7 @@ describe('resource ragTool', () => { test('query: required and optional params', async () => { const response = await client.toolRuntime.ragTool.query({ content: 'string', - vector_db_ids: ['string'], + vector_store_ids: ['string'], query_config: { chunk_template: 'chunk_template', max_chunks: 0, diff --git a/tests/api-resources/vector-io.test.ts b/tests/api-resources/vector-io.test.ts index 51380d7..3b5ae37 100644 --- a/tests/api-resources/vector-io.test.ts +++ b/tests/api-resources/vector-io.test.ts @@ -9,7 +9,7 @@ describe('resource vectorIo', () => { test('insert: only required params', async () => { const responsePromise = client.vectorIo.insert({ chunks: [{ content: 'string', metadata: { foo: true } }], - vector_db_id: 'vector_db_id', + vector_store_id: 'vector_store_id', }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -43,13 +43,13 @@ describe('resource vectorIo', () => { stored_chunk_id: 'stored_chunk_id', }, ], - vector_db_id: 'vector_db_id', + vector_store_id: 'vector_store_id', ttl_seconds: 0, }); }); test('query: only required params', async () => { - const responsePromise = client.vectorIo.query({ query: 'string', vector_db_id: 'vector_db_id' }); + const responsePromise = client.vectorIo.query({ query: 'string', vector_store_id: 'vector_store_id' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -62,7 +62,7 @@ describe('resource vectorIo', () => { test('query: required and optional params', async () => { const response = await client.vectorIo.query({ query: 'string', - vector_db_id: 'vector_db_id', + vector_store_id: 'vector_store_id', params: { foo: true }, }); }); diff --git a/tests/api-resources/agents/session.test.ts b/tests/api-resources/vector-stores/file-batches.test.ts similarity index 64% rename from tests/api-resources/agents/session.test.ts rename to tests/api-resources/vector-stores/file-batches.test.ts index efcf0e7..98e8964 100644 --- a/tests/api-resources/agents/session.test.ts +++ b/tests/api-resources/vector-stores/file-batches.test.ts @@ -5,9 +5,11 @@ import { Response } from 'node-fetch'; const client = new LlamaStackClient({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010' }); -describe('resource session', () => { +describe('resource fileBatches', () => { test('create: only required params', async () => { - const responsePromise = client.agents.session.create('agent_id', { session_name: 'session_name' }); + const responsePromise = client.vectorStores.fileBatches.create('vector_store_id', { + file_ids: ['string'], + }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -18,11 +20,15 @@ describe('resource session', () => { }); test('create: required and optional params', async () => { - const response = await client.agents.session.create('agent_id', { session_name: 'session_name' }); + const response = await client.vectorStores.fileBatches.create('vector_store_id', { + file_ids: ['string'], + attributes: { foo: true }, + chunking_strategy: { type: 'auto' }, + }); }); test('retrieve', async () => { - const responsePromise = client.agents.session.retrieve('agent_id', 'session_id'); + const responsePromise = client.vectorStores.fileBatches.retrieve('vector_store_id', 'batch_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -35,24 +41,14 @@ describe('resource session', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.agents.session.retrieve('agent_id', 'session_id', { path: '/_stainless_unknown_path' }), + client.vectorStores.fileBatches.retrieve('vector_store_id', 'batch_id', { + path: '/_stainless_unknown_path', + }), ).rejects.toThrow(LlamaStackClient.NotFoundError); }); - test('retrieve: request options and params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.agents.session.retrieve( - 'agent_id', - 'session_id', - { turn_ids: ['string'] }, - { path: '/_stainless_unknown_path' }, - ), - ).rejects.toThrow(LlamaStackClient.NotFoundError); - }); - - test('list', async () => { - const responsePromise = client.agents.session.list('agent_id'); + test('cancel', async () => { + const responsePromise = client.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -62,26 +58,17 @@ describe('resource session', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - test('list: request options instead of params are passed correctly', async () => { + test('cancel: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.agents.session.list('agent_id', { path: '/_stainless_unknown_path' }), + client.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id', { + path: '/_stainless_unknown_path', + }), ).rejects.toThrow(LlamaStackClient.NotFoundError); }); - test('list: request options and params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.agents.session.list( - 'agent_id', - { limit: 0, start_index: 0 }, - { path: '/_stainless_unknown_path' }, - ), - ).rejects.toThrow(LlamaStackClient.NotFoundError); - }); - - test('delete', async () => { - const responsePromise = client.agents.session.delete('agent_id', 'session_id'); + test('listFiles', async () => { + const responsePromise = client.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -91,10 +78,24 @@ describe('resource session', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - test('delete: request options instead of params are passed correctly', async () => { + test('listFiles: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id', { + path: '/_stainless_unknown_path', + }), + ).rejects.toThrow(LlamaStackClient.NotFoundError); + }); + + test('listFiles: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.agents.session.delete('agent_id', 'session_id', { path: '/_stainless_unknown_path' }), + client.vectorStores.fileBatches.listFiles( + 'vector_store_id', + 'batch_id', + { after: 'after', before: 'before', filter: 'filter', limit: 0, order: 'order' }, + { path: '/_stainless_unknown_path' }, + ), ).rejects.toThrow(LlamaStackClient.NotFoundError); }); });