diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index 763462f..43fd5a7 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -9,9 +9,7 @@
"postCreateCommand": "yarn install",
"customizations": {
"vscode": {
- "extensions": [
- "esbenp.prettier-vscode"
- ]
+ "extensions": ["esbenp.prettier-vscode"]
}
}
}
diff --git a/.gitignore b/.gitignore
index d98d51a..2412bb7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,4 +7,5 @@ dist
dist-deno
/*.tgz
.idea/
+.eslintcache
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index ed9acd2..193b35f 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.2.23-alpha.1"
+ ".": "0.3.1-alpha.1"
}
diff --git a/.stats.yml b/.stats.yml
index fa9edfc..7196fab 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 111
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-f252873ea1e1f38fd207331ef2621c511154d5be3f4076e59cc15754fc58eee4.yml
-openapi_spec_hash: 10cbb4337a06a9fdd7d08612dd6044c3
-config_hash: 0358112cc0f3d880b4d55debdbe1cfa3
+configured_endpoints: 71
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-96255baaaf07826c5292cbb73073ab40aa7073c53996c3be49441a8ecf95c8ee.yml
+openapi_spec_hash: fae0303cbf75bd79be4ae084db015401
+config_hash: a3829dbdaa491194d01f399784d532cd
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e40a318..76eea73 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,69 @@
# Changelog
+## 0.3.1-alpha.1 (2025-10-29)
+
+Full Changelog: [v0.2.23-alpha.1...v0.3.1-alpha.1](https://github.com/llamastack/llama-stack-client-typescript/compare/v0.2.23-alpha.1...v0.3.1-alpha.1)
+
+### ⚠ BREAKING CHANGES
+
+* **api:** /v1/inspect only lists v1 apis by default
+* **api:** use input_schema instead of parameters for tools
+* **api:** fixes to remove deprecated inference resources
+
+### Features
+
+* **api:** expires_after changes for /files ([a0b0fb7](https://github.com/llamastack/llama-stack-client-typescript/commit/a0b0fb7aa74668f3f6996c178f9654723b8b0f22))
+* **api:** fix file batches SDK to list_files ([25a0f10](https://github.com/llamastack/llama-stack-client-typescript/commit/25a0f10cffa7de7f1457d65c97259911bc70ab0a))
+* **api:** fixes to remove deprecated inference resources ([367d775](https://github.com/llamastack/llama-stack-client-typescript/commit/367d775c3d5a2fd85bf138d2b175e91b7c185913))
+* **api:** fixes to URLs ([e4f7840](https://github.com/llamastack/llama-stack-client-typescript/commit/e4f78407f74f3ba7597de355c314e1932dd94761))
+* **api:** manual updates ([7d2e375](https://github.com/llamastack/llama-stack-client-typescript/commit/7d2e375bde7bd04ae58cc49fcd5ab7b134b25640))
+* **api:** manual updates ([0302d54](https://github.com/llamastack/llama-stack-client-typescript/commit/0302d54398d87127ab0e9221a8a92760123d235b))
+* **api:** manual updates ([98a596f](https://github.com/llamastack/llama-stack-client-typescript/commit/98a596f677fe2790e4b4765362aa19b6cff8b97e))
+* **api:** manual updates ([c6fb0b6](https://github.com/llamastack/llama-stack-client-typescript/commit/c6fb0b67d8f2e641c13836a17400e51df0b029f1))
+* **api:** move datasets to beta, vector_db -> vector_store ([f32c0be](https://github.com/llamastack/llama-stack-client-typescript/commit/f32c0becb1ec0d66129b7fcaa06de3323ee703da))
+* **api:** move post_training and eval under alpha namespace ([aec1d5f](https://github.com/llamastack/llama-stack-client-typescript/commit/aec1d5ff198473ba736bf543ad00c6626cab9b81))
+* **api:** moving { rerank, agents } to `client.alpha.` ([793e069](https://github.com/llamastack/llama-stack-client-typescript/commit/793e0694d75c2af4535bf991d5858cd1f21300b4))
+* **api:** removing openai/v1 ([b5432de](https://github.com/llamastack/llama-stack-client-typescript/commit/b5432de2ad56ff0d2fd5a5b8e1755b5237616b60))
+* **api:** SDKs for vector store file batches ([b0676c8](https://github.com/llamastack/llama-stack-client-typescript/commit/b0676c837bbd835276fea3fe12f435afdbb75ef7))
+* **api:** SDKs for vector store file batches apis ([88731bf](https://github.com/llamastack/llama-stack-client-typescript/commit/88731bfecd6f548ae79cbe2a1125620e488c42a3))
+* **api:** several updates including Conversations, Responses changes, etc. ([e0728d5](https://github.com/llamastack/llama-stack-client-typescript/commit/e0728d5dd59be8723d9f967d6164351eb05528d1))
+* **api:** sync ([7d85013](https://github.com/llamastack/llama-stack-client-typescript/commit/7d850139d1327a215312a82c98b3428ebc7e5f68))
+* **api:** tool api (input_schema, etc.) changes ([06f2bca](https://github.com/llamastack/llama-stack-client-typescript/commit/06f2bcaf0df2e5d462cbe2d9ef3704ab0cfe9248))
+* **api:** updates to vector_store, etc. ([19535c2](https://github.com/llamastack/llama-stack-client-typescript/commit/19535c27147bf6f6861b807d9eeee471b5625148))
+* **api:** updating post /v1/files to have correct multipart/form-data ([f1cf9d6](https://github.com/llamastack/llama-stack-client-typescript/commit/f1cf9d68b6b2569dfb5ea3e2d2c33eff1a832e47))
+* **api:** use input_schema instead of parameters for tools ([8910a12](https://github.com/llamastack/llama-stack-client-typescript/commit/8910a121146aeddcb8f400101e6a2232245097e0))
+* **api:** vector_db_id -> vector_store_id ([079d89d](https://github.com/llamastack/llama-stack-client-typescript/commit/079d89d6522cb4f2eed5e5a09962d94ad800e883))
+
+
+### Bug Fixes
+
+* **api:** another fix to capture correct responses.create() params ([6acae91](https://github.com/llamastack/llama-stack-client-typescript/commit/6acae910db289080e8f52864f1bdf6d7951d1c3b))
+* **api:** fix the ToolDefParam updates ([5cee3d6](https://github.com/llamastack/llama-stack-client-typescript/commit/5cee3d69650a4c827e12fc046c1d2ec3b2fa9126))
+* **client:** incorrect offset pagination check ([257285f](https://github.com/llamastack/llama-stack-client-typescript/commit/257285f33bb989c9040580dd24251d05f9657bb0))
+* fix stream event model reference ([a71b421](https://github.com/llamastack/llama-stack-client-typescript/commit/a71b421152a609e49e76d01c6e4dd46eb3dbfae0))
+
+
+### Chores
+
+* **api:** /v1/inspect only lists v1 apis by default ([e30f51c](https://github.com/llamastack/llama-stack-client-typescript/commit/e30f51c704c39129092255c040bbf5ad90ed0b07))
+* extract some types in mcp docs ([dcc7bb8](https://github.com/llamastack/llama-stack-client-typescript/commit/dcc7bb8b4d940982c2e9c6d1a541636e99fdc5ff))
+* fix readme example ([402f930](https://github.com/llamastack/llama-stack-client-typescript/commit/402f9301d033bb230c9714104fbfa554f3f7cd8f))
+* fix readme examples ([4d5517c](https://github.com/llamastack/llama-stack-client-typescript/commit/4d5517c2b9af2eb6994f5e4b2c033c95d268fb5c))
+* **internal:** codegen related update ([252e0a2](https://github.com/llamastack/llama-stack-client-typescript/commit/252e0a2a38bd8aedab91b401c440a9b10c056cec))
+* **internal:** codegen related update ([34da720](https://github.com/llamastack/llama-stack-client-typescript/commit/34da720c34c35dafb38775243d28dfbdce2497db))
+* **internal:** fix incremental formatting in some cases ([c5c8292](https://github.com/llamastack/llama-stack-client-typescript/commit/c5c8292b631c678efff5498bbab9f5a43bee50b6))
+* **internal:** use npm pack for build uploads ([a246793](https://github.com/llamastack/llama-stack-client-typescript/commit/a24679300cff93fea8ad4bc85e549ecc88198d58))
+
+
+### Documentation
+
+* update examples ([17b9eb3](https://github.com/llamastack/llama-stack-client-typescript/commit/17b9eb3c40957b63d2a71f7fc21944abcc720d80))
+
+
+### Build System
+
+* Bump version to 0.2.23 ([16e05ed](https://github.com/llamastack/llama-stack-client-typescript/commit/16e05ed9798233375e19098992632d223c3f5d8d))
+
## 0.2.23-alpha.1 (2025-09-26)
Full Changelog: [v0.2.19-alpha.1...v0.2.23-alpha.1](https://github.com/llamastack/llama-stack-client-typescript/compare/v0.2.19-alpha.1...v0.2.23-alpha.1)
diff --git a/README.md b/README.md
index a27b8c1..c0f0665 100644
--- a/README.md
+++ b/README.md
@@ -41,13 +41,13 @@ import LlamaStackClient from 'llama-stack-client';
const client = new LlamaStackClient();
-const stream = await client.inference.chatCompletion({
+const stream = await client.chat.completions.create({
messages: [{ content: 'string', role: 'user' }],
- model_id: 'model_id',
+ model: 'model',
stream: true,
});
-for await (const chatCompletionResponseStreamChunk of stream) {
- console.log(chatCompletionResponseStreamChunk.completion_message);
+for await (const chatCompletionChunk of stream) {
+ console.log(chatCompletionChunk);
}
```
@@ -64,11 +64,11 @@ import LlamaStackClient from 'llama-stack-client';
const client = new LlamaStackClient();
-const params: LlamaStackClient.InferenceChatCompletionParams = {
+const params: LlamaStackClient.Chat.CompletionCreateParams = {
messages: [{ content: 'string', role: 'user' }],
- model_id: 'model_id',
+ model: 'model',
};
-const chatCompletionResponse: LlamaStackClient.ChatCompletionResponse = await client.inference.chatCompletion(
+const completion: LlamaStackClient.Chat.CompletionCreateResponse = await client.chat.completions.create(
params,
);
```
@@ -113,8 +113,8 @@ a subclass of `APIError` will be thrown:
```ts
-const chatCompletionResponse = await client.inference
- .chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' })
+const completion = await client.chat.completions
+ .create({ messages: [{ content: 'string', role: 'user' }], model: 'model' })
.catch(async (err) => {
if (err instanceof LlamaStackClient.APIError) {
console.log(err.status); // 400
@@ -155,7 +155,7 @@ const client = new LlamaStackClient({
});
// Or, configure per-request:
-await client.inference.chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }, {
+await client.chat.completions.create({ messages: [{ content: 'string', role: 'user' }], model: 'model' }, {
maxRetries: 5,
});
```
@@ -172,7 +172,7 @@ const client = new LlamaStackClient({
});
// Override per-request:
-await client.inference.chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' }, {
+await client.chat.completions.create({ messages: [{ content: 'string', role: 'user' }], model: 'model' }, {
timeout: 5 * 1000,
});
```
@@ -193,17 +193,17 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi
```ts
const client = new LlamaStackClient();
-const response = await client.inference
- .chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' })
+const response = await client.chat.completions
+ .create({ messages: [{ content: 'string', role: 'user' }], model: 'model' })
.asResponse();
console.log(response.headers.get('X-My-Header'));
console.log(response.statusText); // access the underlying Response object
-const { data: chatCompletionResponse, response: raw } = await client.inference
- .chatCompletion({ messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' })
+const { data: completion, response: raw } = await client.chat.completions
+ .create({ messages: [{ content: 'string', role: 'user' }], model: 'model' })
.withResponse();
console.log(raw.headers.get('X-My-Header'));
-console.log(chatCompletionResponse.completion_message);
+console.log(completion);
```
### Making custom/undocumented requests
@@ -307,8 +307,8 @@ const client = new LlamaStackClient({
});
// Override per-request:
-await client.inference.chatCompletion(
- { messages: [{ content: 'string', role: 'user' }], model_id: 'model_id' },
+await client.chat.completions.create(
+ { messages: [{ content: 'string', role: 'user' }], model: 'model' },
{
httpAgent: new http.Agent({ keepAlive: false }),
},
diff --git a/api.md b/api.md
index 01d88a5..f79d69d 100644
--- a/api.md
+++ b/api.md
@@ -2,27 +2,18 @@
Types:
-- AgentConfig
-- BatchCompletion
-- ChatCompletionResponse
- CompletionMessage
-- ContentDelta
- Document
- InterleavedContent
- InterleavedContentItem
- Message
-- Metric
- ParamType
- QueryConfig
-- QueryGeneratorConfig
- QueryResult
-- ResponseFormat
- SafetyViolation
-- SamplingParams
- ScoringResult
- SystemMessage
- ToolCall
-- ToolParamDefinition
- ToolResponseMessage
- UserMessage
@@ -45,14 +36,12 @@ Methods:
Types:
-- ListToolsResponse
-- Tool
- ToolListResponse
Methods:
- client.tools.list({ ...params }) -> ToolListResponse
-- client.tools.get(toolName) -> Tool
+- client.tools.get(toolName) -> ToolDef
# ToolRuntime
@@ -85,10 +74,10 @@ Types:
Methods:
-- client.responses.create({ ...params }) -> ResponseObject
-- client.responses.retrieve(responseId) -> ResponseObject
-- client.responses.list({ ...params }) -> ResponseListResponsesOpenAICursorPage
-- client.responses.delete(responseId) -> ResponseDeleteResponse
+- client.responses.create({ ...params }) -> ResponseObject
+- client.responses.retrieve(responseId) -> ResponseObject
+- client.responses.list({ ...params }) -> ResponseListResponsesOpenAICursorPage
+- client.responses.delete(responseId) -> ResponseDeleteResponse
## InputItems
@@ -98,110 +87,35 @@ Types:
Methods:
-- client.responses.inputItems.list(responseId, { ...params }) -> InputItemListResponse
+- client.responses.inputItems.list(responseId, { ...params }) -> InputItemListResponse
-# Agents
+# Conversations
Types:
-- InferenceStep
-- MemoryRetrievalStep
-- ShieldCallStep
-- ToolExecutionStep
-- ToolResponse
-- AgentCreateResponse
-- AgentRetrieveResponse
-- AgentListResponse
+- ConversationObject
+- ConversationDeleteResponse
Methods:
-- client.agents.create({ ...params }) -> AgentCreateResponse
-- client.agents.retrieve(agentId) -> AgentRetrieveResponse
-- client.agents.list({ ...params }) -> AgentListResponse
-- client.agents.delete(agentId) -> void
+- client.conversations.create({ ...params }) -> ConversationObject
+- client.conversations.retrieve(conversationId) -> ConversationObject
+- client.conversations.update(conversationId, { ...params }) -> ConversationObject
+- client.conversations.delete(conversationId) -> ConversationDeleteResponse
-## Session
+## Items
Types:
-- Session
-- SessionCreateResponse
-- SessionListResponse
+- ItemCreateResponse
+- ItemListResponse
+- ItemGetResponse
Methods:
-- client.agents.session.create(agentId, { ...params }) -> SessionCreateResponse
-- client.agents.session.retrieve(agentId, sessionId, { ...params }) -> Session
-- client.agents.session.list(agentId, { ...params }) -> SessionListResponse
-- client.agents.session.delete(agentId, sessionId) -> void
-
-## Steps
-
-Types:
-
-- StepRetrieveResponse
-
-Methods:
-
-- client.agents.steps.retrieve(agentId, sessionId, turnId, stepId) -> StepRetrieveResponse
-
-## Turn
-
-Types:
-
-- AgentTurnResponseStreamChunk
-- Turn
-- TurnResponseEvent
-- TurnResponseEventPayload
-
-Methods:
-
-- client.agents.turn.create(agentId, sessionId, { ...params }) -> Turn
-- client.agents.turn.retrieve(agentId, sessionId, turnId) -> Turn
-- client.agents.turn.resume(agentId, sessionId, turnId, { ...params }) -> Turn
-
-# Datasets
-
-Types:
-
-- ListDatasetsResponse
-- DatasetRetrieveResponse
-- DatasetListResponse
-- DatasetIterrowsResponse
-- DatasetRegisterResponse
-
-Methods:
-
-- client.datasets.retrieve(datasetId) -> DatasetRetrieveResponse
-- client.datasets.list() -> DatasetListResponse
-- client.datasets.appendrows(datasetId, { ...params }) -> void
-- client.datasets.iterrows(datasetId, { ...params }) -> DatasetIterrowsResponse
-- client.datasets.register({ ...params }) -> DatasetRegisterResponse
-- client.datasets.unregister(datasetId) -> void
-
-# Eval
-
-Types:
-
-- BenchmarkConfig
-- EvalCandidate
-- EvaluateResponse
-- Job
-
-Methods:
-
-- client.eval.evaluateRows(benchmarkId, { ...params }) -> EvaluateResponse
-- client.eval.evaluateRowsAlpha(benchmarkId, { ...params }) -> EvaluateResponse
-- client.eval.runEval(benchmarkId, { ...params }) -> Job
-- client.eval.runEvalAlpha(benchmarkId, { ...params }) -> Job
-
-## Jobs
-
-Methods:
-
-- client.eval.jobs.retrieve(benchmarkId, jobId) -> EvaluateResponse
-- client.eval.jobs.cancel(benchmarkId, jobId) -> void
-- client.eval.jobs.status(benchmarkId, jobId) -> Job
+- client.conversations.items.create(conversationId, { ...params }) -> ItemCreateResponse
+- client.conversations.items.list(conversationId, { ...params }) -> ItemListResponsesOpenAICursorPage
+- client.conversations.items.get(conversationId, itemId) -> ItemGetResponse
# Inspect
@@ -217,26 +131,6 @@ Methods:
- client.inspect.health() -> HealthInfo
- client.inspect.version() -> VersionInfo
-# Inference
-
-Types:
-
-- ChatCompletionResponseStreamChunk
-- CompletionResponse
-- EmbeddingsResponse
-- TokenLogProbs
-- InferenceBatchChatCompletionResponse
-- InferenceRerankResponse
-
-Methods:
-
-- client.inference.batchChatCompletion({ ...params }) -> InferenceBatchChatCompletionResponse
-- client.inference.batchCompletion({ ...params }) -> BatchCompletion
-- client.inference.chatCompletion({ ...params }) -> ChatCompletionResponse
-- client.inference.completion({ ...params }) -> CompletionResponse
-- client.inference.embeddings({ ...params }) -> EmbeddingsResponse
-- client.inference.rerank({ ...params }) -> InferenceRerankResponse
-
# Embeddings
Types:
@@ -245,7 +139,7 @@ Types:
Methods:
-- client.embeddings.create({ ...params }) -> CreateEmbeddingsResponse
+- client.embeddings.create({ ...params }) -> CreateEmbeddingsResponse
# Chat
@@ -263,9 +157,9 @@ Types:
Methods:
-- client.chat.completions.create({ ...params }) -> CompletionCreateResponse
-- client.chat.completions.retrieve(completionId) -> CompletionRetrieveResponse
-- client.chat.completions.list({ ...params }) -> CompletionListResponsesOpenAICursorPage
+- client.chat.completions.create({ ...params }) -> CompletionCreateResponse
+- client.chat.completions.retrieve(completionId) -> CompletionRetrieveResponse
+- client.chat.completions.list({ ...params }) -> CompletionListResponsesOpenAICursorPage
# Completions
@@ -275,7 +169,7 @@ Types:
Methods:
-- client.completions.create({ ...params }) -> CompletionCreateResponse
+- client.completions.create({ ...params }) -> CompletionCreateResponse
# VectorIo
@@ -288,22 +182,6 @@ Methods:
- client.vectorIo.insert({ ...params }) -> void
- client.vectorIo.query({ ...params }) -> QueryChunksResponse
-# VectorDBs
-
-Types:
-
-- ListVectorDBsResponse
-- VectorDBRetrieveResponse
-- VectorDBListResponse
-- VectorDBRegisterResponse
-
-Methods:
-
-- client.vectorDBs.retrieve(vectorDBId) -> VectorDBRetrieveResponse
-- client.vectorDBs.list() -> VectorDBListResponse
-- client.vectorDBs.register({ ...params }) -> VectorDBRegisterResponse
-- client.vectorDBs.unregister(vectorDBId) -> void
-
# VectorStores
Types:
@@ -315,12 +193,12 @@ Types:
Methods:
-- client.vectorStores.create({ ...params }) -> VectorStore
-- client.vectorStores.retrieve(vectorStoreId) -> VectorStore
-- client.vectorStores.update(vectorStoreId, { ...params }) -> VectorStore
-- client.vectorStores.list({ ...params }) -> VectorStoresOpenAICursorPage
-- client.vectorStores.delete(vectorStoreId) -> VectorStoreDeleteResponse
-- client.vectorStores.search(vectorStoreId, { ...params }) -> VectorStoreSearchResponse
+- client.vectorStores.create({ ...params }) -> VectorStore
+- client.vectorStores.retrieve(vectorStoreId) -> VectorStore
+- client.vectorStores.update(vectorStoreId, { ...params }) -> VectorStore
+- client.vectorStores.list({ ...params }) -> VectorStoresOpenAICursorPage
+- client.vectorStores.delete(vectorStoreId) -> VectorStoreDeleteResponse
+- client.vectorStores.search(vectorStoreId, { ...params }) -> VectorStoreSearchResponse
## Files
@@ -332,12 +210,26 @@ Types:
Methods:
-- client.vectorStores.files.create(vectorStoreId, { ...params }) -> VectorStoreFile
-- client.vectorStores.files.retrieve(vectorStoreId, fileId) -> VectorStoreFile
-- client.vectorStores.files.update(vectorStoreId, fileId, { ...params }) -> VectorStoreFile
-- client.vectorStores.files.list(vectorStoreId, { ...params }) -> VectorStoreFilesOpenAICursorPage
-- client.vectorStores.files.delete(vectorStoreId, fileId) -> FileDeleteResponse
-- client.vectorStores.files.content(vectorStoreId, fileId) -> FileContentResponse
+- client.vectorStores.files.create(vectorStoreId, { ...params }) -> VectorStoreFile
+- client.vectorStores.files.retrieve(vectorStoreId, fileId) -> VectorStoreFile
+- client.vectorStores.files.update(vectorStoreId, fileId, { ...params }) -> VectorStoreFile
+- client.vectorStores.files.list(vectorStoreId, { ...params }) -> VectorStoreFilesOpenAICursorPage
+- client.vectorStores.files.delete(vectorStoreId, fileId) -> FileDeleteResponse
+- client.vectorStores.files.content(vectorStoreId, fileId) -> FileContentResponse
+
+## FileBatches
+
+Types:
+
+- ListVectorStoreFilesInBatchResponse
+- VectorStoreFileBatches
+
+Methods:
+
+- client.vectorStores.fileBatches.create(vectorStoreId, { ...params }) -> VectorStoreFileBatches
+- client.vectorStores.fileBatches.retrieve(vectorStoreId, batchId) -> VectorStoreFileBatches
+- client.vectorStores.fileBatches.cancel(vectorStoreId, batchId) -> VectorStoreFileBatches
+- client.vectorStores.fileBatches.listFiles(vectorStoreId, batchId, { ...params }) -> VectorStoreFilesOpenAICursorPage
# Models
@@ -356,41 +248,9 @@ Methods:
## OpenAI
-Types:
-
-- OpenAIListResponse
-
-Methods:
-
-- client.models.openai.list() -> OpenAIListResponse
-
-# PostTraining
-
-Types:
-
-- AlgorithmConfig
-- ListPostTrainingJobsResponse
-- PostTrainingJob
-
-Methods:
-
-- client.postTraining.preferenceOptimize({ ...params }) -> PostTrainingJob
-- client.postTraining.supervisedFineTune({ ...params }) -> PostTrainingJob
-
-## Job
-
-Types:
-
-- JobListResponse
-- JobArtifactsResponse
-- JobStatusResponse
-
Methods:
-- client.postTraining.job.list() -> Array<ListPostTrainingJobsResponse.Data>
-- client.postTraining.job.artifacts({ ...params }) -> JobArtifactsResponse
-- client.postTraining.job.cancel({ ...params }) -> void
-- client.postTraining.job.status({ ...params }) -> JobStatusResponse
+- client.models.openai.list() -> ModelListResponse
# Providers
@@ -413,7 +273,7 @@ Types:
Methods:
-- client.routes.list() -> RouteListResponse
+- client.routes.list({ ...params }) -> RouteListResponse
# Moderations
@@ -423,7 +283,7 @@ Types:
Methods:
-- client.moderations.create({ ...params }) -> CreateResponse
+- client.moderations.create({ ...params }) -> CreateResponse
# Safety
@@ -460,32 +320,6 @@ Methods:
- client.syntheticDataGeneration.generate({ ...params }) -> SyntheticDataGenerationResponse
-# Telemetry
-
-Types:
-
-- Event
-- QueryCondition
-- QuerySpansResponse
-- SpanWithStatus
-- Trace
-- TelemetryGetSpanResponse
-- TelemetryGetSpanTreeResponse
-- TelemetryQueryMetricsResponse
-- TelemetryQuerySpansResponse
-- TelemetryQueryTracesResponse
-
-Methods:
-
-- client.telemetry.getSpan(traceId, spanId) -> TelemetryGetSpanResponse
-- client.telemetry.getSpanTree(spanId, { ...params }) -> TelemetryGetSpanTreeResponse
-- client.telemetry.getTrace(traceId) -> Trace
-- client.telemetry.logEvent({ ...params }) -> void
-- client.telemetry.queryMetrics(metricName, { ...params }) -> TelemetryQueryMetricsResponse
-- client.telemetry.querySpans({ ...params }) -> TelemetryQuerySpansResponse
-- client.telemetry.queryTraces({ ...params }) -> TelemetryQueryTracesResponse
-- client.telemetry.saveSpansToDataset({ ...params }) -> void
-
# Scoring
Types:
@@ -513,20 +347,6 @@ Methods:
- client.scoringFunctions.list() -> ScoringFunctionListResponse
- client.scoringFunctions.register({ ...params }) -> void
-# Benchmarks
-
-Types:
-
-- Benchmark
-- ListBenchmarksResponse
-- BenchmarkListResponse
-
-Methods:
-
-- client.benchmarks.retrieve(benchmarkId) -> Benchmark
-- client.benchmarks.list() -> BenchmarkListResponse
-- client.benchmarks.register({ ...params }) -> void
-
# Files
Types:
@@ -538,8 +358,34 @@ Types:
Methods:
-- client.files.create({ ...params }) -> File
-- client.files.retrieve(fileId) -> File
-- client.files.list({ ...params }) -> FilesOpenAICursorPage
-- client.files.delete(fileId) -> DeleteFileResponse
-- client.files.content(fileId) -> unknown
+- client.files.create({ ...params }) -> File
+- client.files.retrieve(fileId) -> File
+- client.files.list({ ...params }) -> FilesOpenAICursorPage
+- client.files.delete(fileId) -> DeleteFileResponse
+- client.files.content(fileId) -> unknown
+
+# Alpha
+
+## Inference
+
+## PostTraining
+
+### Job
+
+## Benchmarks
+
+## Eval
+
+### Jobs
+
+## Agents
+
+### Session
+
+### Steps
+
+### Turn
+
+# Beta
+
+## Datasets
diff --git a/package.json b/package.json
index 6daf907..32dcc66 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "llama-stack-client",
- "version": "0.2.23",
+ "version": "0.3.1-alpha.1",
"description": "The official TypeScript library for the Llama Stack Client API",
"author": "Llama Stack Client ",
"types": "dist/index.d.ts",
diff --git a/release-please-config.json b/release-please-config.json
index 624ed99..1ebd0bd 100644
--- a/release-please-config.json
+++ b/release-please-config.json
@@ -60,8 +60,5 @@
}
],
"release-type": "node",
- "extra-files": [
- "src/version.ts",
- "README.md"
- ]
+ "extra-files": ["src/version.ts", "README.md"]
}
diff --git a/scripts/fast-format b/scripts/fast-format
index 03fb1a3..8a8e9d5 100755
--- a/scripts/fast-format
+++ b/scripts/fast-format
@@ -35,6 +35,6 @@ echo "==> Running prettier --write"
PRETTIER_FILES="$(grep '\.\(js\|json\)$' "$FILE_LIST" || true)"
if ! [ -z "$PRETTIER_FILES" ]; then
echo "$PRETTIER_FILES" | xargs ./node_modules/.bin/prettier \
- --write --cache --cache-strategy metadata \
+ --write --cache --cache-strategy metadata --no-error-on-unmatched-pattern \
'!**/dist' '!**/*.ts' '!**/*.mts' '!**/*.cts' '!**/*.js' '!**/*.mjs' '!**/*.cjs'
fi
diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh
index 211e0b9..831e716 100755
--- a/scripts/utils/upload-artifact.sh
+++ b/scripts/utils/upload-artifact.sh
@@ -12,9 +12,11 @@ if [[ "$SIGNED_URL" == "null" ]]; then
exit 1
fi
-UPLOAD_RESPONSE=$(tar "${BASE_PATH:+-C$BASE_PATH}" -cz "${ARTIFACT_PATH:-dist}" | curl -v -X PUT \
+TARBALL=$(cd dist && npm pack --silent)
+
+UPLOAD_RESPONSE=$(curl -v -X PUT \
-H "Content-Type: application/gzip" \
- --data-binary @- "$SIGNED_URL" 2>&1)
+ --data-binary "@dist/$TARBALL" "$SIGNED_URL" 2>&1)
if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then
echo -e "\033[32mUploaded build to Stainless storage.\033[0m"
diff --git a/src/index.ts b/src/index.ts
index 68d219d..682a884 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -13,13 +13,6 @@ import {
} from './pagination';
import * as Uploads from './uploads';
import * as API from './resources/index';
-import {
- Benchmark,
- BenchmarkListResponse,
- BenchmarkRegisterParams,
- Benchmarks,
- ListBenchmarksResponse,
-} from './resources/benchmarks';
import {
CompletionCreateParams,
CompletionCreateParamsNonStreaming,
@@ -27,17 +20,6 @@ import {
CompletionCreateResponse,
Completions,
} from './resources/completions';
-import {
- DatasetAppendrowsParams,
- DatasetIterrowsParams,
- DatasetIterrowsResponse,
- DatasetListResponse,
- DatasetRegisterParams,
- DatasetRegisterResponse,
- DatasetRetrieveResponse,
- Datasets,
- ListDatasetsResponse,
-} from './resources/datasets';
import { CreateEmbeddingsResponse, EmbeddingCreateParams, Embeddings } from './resources/embeddings';
import {
DeleteFileResponse,
@@ -49,29 +31,10 @@ import {
FilesOpenAICursorPage,
ListFilesResponse,
} from './resources/files';
-import {
- ChatCompletionResponseStreamChunk,
- CompletionResponse,
- EmbeddingsResponse,
- Inference,
- InferenceBatchChatCompletionParams,
- InferenceBatchChatCompletionResponse,
- InferenceBatchCompletionParams,
- InferenceChatCompletionParams,
- InferenceChatCompletionParamsNonStreaming,
- InferenceChatCompletionParamsStreaming,
- InferenceCompletionParams,
- InferenceCompletionParamsNonStreaming,
- InferenceCompletionParamsStreaming,
- InferenceEmbeddingsParams,
- InferenceRerankParams,
- InferenceRerankResponse,
- TokenLogProbs,
-} from './resources/inference';
import { HealthInfo, Inspect, ProviderInfo, RouteInfo, VersionInfo } from './resources/inspect';
import { CreateResponse, ModerationCreateParams, Moderations } from './resources/moderations';
import { ListProvidersResponse, ProviderListResponse, Providers } from './resources/providers';
-import { ListRoutesResponse, RouteListResponse, Routes } from './resources/routes';
+import { ListRoutesResponse, RouteListParams, RouteListResponse, Routes } from './resources/routes';
import { RunShieldResponse, Safety, SafetyRunShieldParams } from './resources/safety';
import {
Scoring,
@@ -100,25 +63,6 @@ import {
SyntheticDataGenerationGenerateParams,
SyntheticDataGenerationResponse,
} from './resources/synthetic-data-generation';
-import {
- Event,
- QueryCondition,
- QuerySpansResponse,
- SpanWithStatus,
- Telemetry,
- TelemetryGetSpanResponse,
- TelemetryGetSpanTreeParams,
- TelemetryGetSpanTreeResponse,
- TelemetryLogEventParams,
- TelemetryQueryMetricsParams,
- TelemetryQueryMetricsResponse,
- TelemetryQuerySpansParams,
- TelemetryQuerySpansResponse,
- TelemetryQueryTracesParams,
- TelemetryQueryTracesResponse,
- TelemetrySaveSpansToDatasetParams,
- Trace,
-} from './resources/telemetry';
import {
ListToolGroupsResponse,
ToolGroup,
@@ -126,46 +70,23 @@ import {
ToolgroupRegisterParams,
Toolgroups,
} from './resources/toolgroups';
-import { ListToolsResponse, Tool, ToolListParams, ToolListResponse, Tools } from './resources/tools';
-import {
- ListVectorDBsResponse,
- VectorDBListResponse,
- VectorDBRegisterParams,
- VectorDBRegisterResponse,
- VectorDBRetrieveResponse,
- VectorDBs,
-} from './resources/vector-dbs';
+import { ToolListParams, ToolListResponse, Tools } from './resources/tools';
import {
QueryChunksResponse,
VectorIo,
VectorIoInsertParams,
VectorIoQueryParams,
} from './resources/vector-io';
-import {
- AgentCreateParams,
- AgentCreateResponse,
- AgentListParams,
- AgentListResponse,
- AgentRetrieveResponse,
- Agents,
- InferenceStep,
- MemoryRetrievalStep,
- ShieldCallStep,
- ToolExecutionStep,
- ToolResponse,
-} from './resources/agents/agents';
+import { Alpha } from './resources/alpha/alpha';
+import { Beta } from './resources/beta/beta';
import { Chat, ChatCompletionChunk } from './resources/chat/chat';
import {
- BenchmarkConfig,
- Eval,
- EvalCandidate,
- EvalEvaluateRowsAlphaParams,
- EvalEvaluateRowsParams,
- EvalRunEvalAlphaParams,
- EvalRunEvalParams,
- EvaluateResponse,
- Job,
-} from './resources/eval/eval';
+ ConversationCreateParams,
+ ConversationDeleteResponse,
+ ConversationObject,
+ ConversationUpdateParams,
+ Conversations,
+} from './resources/conversations/conversations';
import {
ListModelsResponse,
Model,
@@ -173,14 +94,6 @@ import {
ModelRegisterParams,
Models,
} from './resources/models/models';
-import {
- AlgorithmConfig,
- ListPostTrainingJobsResponse,
- PostTraining,
- PostTrainingJob,
- PostTrainingPreferenceOptimizeParams,
- PostTrainingSupervisedFineTuneParams,
-} from './resources/post-training/post-training';
import {
ResponseCreateParams,
ResponseCreateParamsNonStreaming,
@@ -328,30 +241,25 @@ export class LlamaStackClient extends Core.APIClient {
tools: API.Tools = new API.Tools(this);
toolRuntime: API.ToolRuntime = new API.ToolRuntime(this);
responses: API.Responses = new API.Responses(this);
- agents: API.Agents = new API.Agents(this);
- datasets: API.Datasets = new API.Datasets(this);
- eval: API.Eval = new API.Eval(this);
+ conversations: API.Conversations = new API.Conversations(this);
inspect: API.Inspect = new API.Inspect(this);
- inference: API.Inference = new API.Inference(this);
embeddings: API.Embeddings = new API.Embeddings(this);
chat: API.Chat = new API.Chat(this);
completions: API.Completions = new API.Completions(this);
vectorIo: API.VectorIo = new API.VectorIo(this);
- vectorDBs: API.VectorDBs = new API.VectorDBs(this);
vectorStores: API.VectorStores = new API.VectorStores(this);
models: API.Models = new API.Models(this);
- postTraining: API.PostTraining = new API.PostTraining(this);
providers: API.Providers = new API.Providers(this);
routes: API.Routes = new API.Routes(this);
moderations: API.Moderations = new API.Moderations(this);
safety: API.Safety = new API.Safety(this);
shields: API.Shields = new API.Shields(this);
syntheticDataGeneration: API.SyntheticDataGeneration = new API.SyntheticDataGeneration(this);
- telemetry: API.Telemetry = new API.Telemetry(this);
scoring: API.Scoring = new API.Scoring(this);
scoringFunctions: API.ScoringFunctions = new API.ScoringFunctions(this);
- benchmarks: API.Benchmarks = new API.Benchmarks(this);
files: API.Files = new API.Files(this);
+ alpha: API.Alpha = new API.Alpha(this);
+ beta: API.Beta = new API.Beta(this);
/**
* Check whether the base URL is set to its default.
@@ -408,32 +316,27 @@ LlamaStackClient.Tools = Tools;
LlamaStackClient.ToolRuntime = ToolRuntime;
LlamaStackClient.Responses = Responses;
LlamaStackClient.ResponseListResponsesOpenAICursorPage = ResponseListResponsesOpenAICursorPage;
-LlamaStackClient.Agents = Agents;
-LlamaStackClient.Datasets = Datasets;
-LlamaStackClient.Eval = Eval;
+LlamaStackClient.Conversations = Conversations;
LlamaStackClient.Inspect = Inspect;
-LlamaStackClient.Inference = Inference;
LlamaStackClient.Embeddings = Embeddings;
LlamaStackClient.Chat = Chat;
LlamaStackClient.Completions = Completions;
LlamaStackClient.VectorIo = VectorIo;
-LlamaStackClient.VectorDBs = VectorDBs;
LlamaStackClient.VectorStores = VectorStores;
LlamaStackClient.VectorStoresOpenAICursorPage = VectorStoresOpenAICursorPage;
LlamaStackClient.Models = Models;
-LlamaStackClient.PostTraining = PostTraining;
LlamaStackClient.Providers = Providers;
LlamaStackClient.Routes = Routes;
LlamaStackClient.Moderations = Moderations;
LlamaStackClient.Safety = Safety;
LlamaStackClient.Shields = Shields;
LlamaStackClient.SyntheticDataGeneration = SyntheticDataGeneration;
-LlamaStackClient.Telemetry = Telemetry;
LlamaStackClient.Scoring = Scoring;
LlamaStackClient.ScoringFunctions = ScoringFunctions;
-LlamaStackClient.Benchmarks = Benchmarks;
LlamaStackClient.Files = Files;
LlamaStackClient.FilesOpenAICursorPage = FilesOpenAICursorPage;
+LlamaStackClient.Alpha = Alpha;
+LlamaStackClient.Beta = Beta;
export declare namespace LlamaStackClient {
export type RequestOptions = Core.RequestOptions;
@@ -458,13 +361,7 @@ export declare namespace LlamaStackClient {
type ToolgroupRegisterParams as ToolgroupRegisterParams,
};
- export {
- Tools as Tools,
- type ListToolsResponse as ListToolsResponse,
- type Tool as Tool,
- type ToolListResponse as ToolListResponse,
- type ToolListParams as ToolListParams,
- };
+ export { Tools as Tools, type ToolListResponse as ToolListResponse, type ToolListParams as ToolListParams };
export {
ToolRuntime as ToolRuntime,
@@ -489,41 +386,11 @@ export declare namespace LlamaStackClient {
};
export {
- Agents as Agents,
- type InferenceStep as InferenceStep,
- type MemoryRetrievalStep as MemoryRetrievalStep,
- type ShieldCallStep as ShieldCallStep,
- type ToolExecutionStep as ToolExecutionStep,
- type ToolResponse as ToolResponse,
- type AgentCreateResponse as AgentCreateResponse,
- type AgentRetrieveResponse as AgentRetrieveResponse,
- type AgentListResponse as AgentListResponse,
- type AgentCreateParams as AgentCreateParams,
- type AgentListParams as AgentListParams,
- };
-
- export {
- Datasets as Datasets,
- type ListDatasetsResponse as ListDatasetsResponse,
- type DatasetRetrieveResponse as DatasetRetrieveResponse,
- type DatasetListResponse as DatasetListResponse,
- type DatasetIterrowsResponse as DatasetIterrowsResponse,
- type DatasetRegisterResponse as DatasetRegisterResponse,
- type DatasetAppendrowsParams as DatasetAppendrowsParams,
- type DatasetIterrowsParams as DatasetIterrowsParams,
- type DatasetRegisterParams as DatasetRegisterParams,
- };
-
- export {
- Eval as Eval,
- type BenchmarkConfig as BenchmarkConfig,
- type EvalCandidate as EvalCandidate,
- type EvaluateResponse as EvaluateResponse,
- type Job as Job,
- type EvalEvaluateRowsParams as EvalEvaluateRowsParams,
- type EvalEvaluateRowsAlphaParams as EvalEvaluateRowsAlphaParams,
- type EvalRunEvalParams as EvalRunEvalParams,
- type EvalRunEvalAlphaParams as EvalRunEvalAlphaParams,
+ Conversations as Conversations,
+ type ConversationObject as ConversationObject,
+ type ConversationDeleteResponse as ConversationDeleteResponse,
+ type ConversationCreateParams as ConversationCreateParams,
+ type ConversationUpdateParams as ConversationUpdateParams,
};
export {
@@ -534,26 +401,6 @@ export declare namespace LlamaStackClient {
type VersionInfo as VersionInfo,
};
- export {
- Inference as Inference,
- type ChatCompletionResponseStreamChunk as ChatCompletionResponseStreamChunk,
- type CompletionResponse as CompletionResponse,
- type EmbeddingsResponse as EmbeddingsResponse,
- type TokenLogProbs as TokenLogProbs,
- type InferenceBatchChatCompletionResponse as InferenceBatchChatCompletionResponse,
- type InferenceRerankResponse as InferenceRerankResponse,
- type InferenceBatchChatCompletionParams as InferenceBatchChatCompletionParams,
- type InferenceBatchCompletionParams as InferenceBatchCompletionParams,
- type InferenceChatCompletionParams as InferenceChatCompletionParams,
- type InferenceChatCompletionParamsNonStreaming as InferenceChatCompletionParamsNonStreaming,
- type InferenceChatCompletionParamsStreaming as InferenceChatCompletionParamsStreaming,
- type InferenceCompletionParams as InferenceCompletionParams,
- type InferenceCompletionParamsNonStreaming as InferenceCompletionParamsNonStreaming,
- type InferenceCompletionParamsStreaming as InferenceCompletionParamsStreaming,
- type InferenceEmbeddingsParams as InferenceEmbeddingsParams,
- type InferenceRerankParams as InferenceRerankParams,
- };
-
export {
Embeddings as Embeddings,
type CreateEmbeddingsResponse as CreateEmbeddingsResponse,
@@ -577,15 +424,6 @@ export declare namespace LlamaStackClient {
type VectorIoQueryParams as VectorIoQueryParams,
};
- export {
- VectorDBs as VectorDBs,
- type ListVectorDBsResponse as ListVectorDBsResponse,
- type VectorDBRetrieveResponse as VectorDBRetrieveResponse,
- type VectorDBListResponse as VectorDBListResponse,
- type VectorDBRegisterResponse as VectorDBRegisterResponse,
- type VectorDBRegisterParams as VectorDBRegisterParams,
- };
-
export {
VectorStores as VectorStores,
type ListVectorStoresResponse as ListVectorStoresResponse,
@@ -607,15 +445,6 @@ export declare namespace LlamaStackClient {
type ModelRegisterParams as ModelRegisterParams,
};
- export {
- PostTraining as PostTraining,
- type AlgorithmConfig as AlgorithmConfig,
- type ListPostTrainingJobsResponse as ListPostTrainingJobsResponse,
- type PostTrainingJob as PostTrainingJob,
- type PostTrainingPreferenceOptimizeParams as PostTrainingPreferenceOptimizeParams,
- type PostTrainingSupervisedFineTuneParams as PostTrainingSupervisedFineTuneParams,
- };
-
export {
Providers as Providers,
type ListProvidersResponse as ListProvidersResponse,
@@ -626,6 +455,7 @@ export declare namespace LlamaStackClient {
Routes as Routes,
type ListRoutesResponse as ListRoutesResponse,
type RouteListResponse as RouteListResponse,
+ type RouteListParams as RouteListParams,
};
export {
@@ -654,26 +484,6 @@ export declare namespace LlamaStackClient {
type SyntheticDataGenerationGenerateParams as SyntheticDataGenerationGenerateParams,
};
- export {
- Telemetry as Telemetry,
- type Event as Event,
- type QueryCondition as QueryCondition,
- type QuerySpansResponse as QuerySpansResponse,
- type SpanWithStatus as SpanWithStatus,
- type Trace as Trace,
- type TelemetryGetSpanResponse as TelemetryGetSpanResponse,
- type TelemetryGetSpanTreeResponse as TelemetryGetSpanTreeResponse,
- type TelemetryQueryMetricsResponse as TelemetryQueryMetricsResponse,
- type TelemetryQuerySpansResponse as TelemetryQuerySpansResponse,
- type TelemetryQueryTracesResponse as TelemetryQueryTracesResponse,
- type TelemetryGetSpanTreeParams as TelemetryGetSpanTreeParams,
- type TelemetryLogEventParams as TelemetryLogEventParams,
- type TelemetryQueryMetricsParams as TelemetryQueryMetricsParams,
- type TelemetryQuerySpansParams as TelemetryQuerySpansParams,
- type TelemetryQueryTracesParams as TelemetryQueryTracesParams,
- type TelemetrySaveSpansToDatasetParams as TelemetrySaveSpansToDatasetParams,
- };
-
export {
Scoring as Scoring,
type ScoringScoreResponse as ScoringScoreResponse,
@@ -691,14 +501,6 @@ export declare namespace LlamaStackClient {
type ScoringFunctionRegisterParams as ScoringFunctionRegisterParams,
};
- export {
- Benchmarks as Benchmarks,
- type Benchmark as Benchmark,
- type ListBenchmarksResponse as ListBenchmarksResponse,
- type BenchmarkListResponse as BenchmarkListResponse,
- type BenchmarkRegisterParams as BenchmarkRegisterParams,
- };
-
export {
Files as Files,
type DeleteFileResponse as DeleteFileResponse,
@@ -710,27 +512,22 @@ export declare namespace LlamaStackClient {
type FileListParams as FileListParams,
};
- export type AgentConfig = API.AgentConfig;
- export type BatchCompletion = API.BatchCompletion;
- export type ChatCompletionResponse = API.ChatCompletionResponse;
+ export { Alpha as Alpha };
+
+ export { Beta as Beta };
+
export type CompletionMessage = API.CompletionMessage;
- export type ContentDelta = API.ContentDelta;
export type Document = API.Document;
export type InterleavedContent = API.InterleavedContent;
export type InterleavedContentItem = API.InterleavedContentItem;
export type Message = API.Message;
- export type Metric = API.Metric;
export type ParamType = API.ParamType;
export type QueryConfig = API.QueryConfig;
- export type QueryGeneratorConfig = API.QueryGeneratorConfig;
export type QueryResult = API.QueryResult;
- export type ResponseFormat = API.ResponseFormat;
export type SafetyViolation = API.SafetyViolation;
- export type SamplingParams = API.SamplingParams;
export type ScoringResult = API.ScoringResult;
export type SystemMessage = API.SystemMessage;
export type ToolCall = API.ToolCall;
- export type ToolParamDefinition = API.ToolParamDefinition;
export type ToolResponseMessage = API.ToolResponseMessage;
export type UserMessage = API.UserMessage;
}
diff --git a/src/pagination.ts b/src/pagination.ts
index 79ab3eb..61184a7 100644
--- a/src/pagination.ts
+++ b/src/pagination.ts
@@ -48,11 +48,7 @@ export class DatasetsIterrows- extends AbstractPage
- implements Datase
}
nextPageInfo(): PageInfo | null {
- const offset = this.next_index;
- if (!offset) {
- return null;
- }
-
+ const offset = this.next_index ?? 0;
const length = this.getPaginatedItems().length;
const currentCount = offset + length;
diff --git a/src/resources/agents/agents.ts b/src/resources/agents/agents.ts
deleted file mode 100644
index 35a4d62..0000000
--- a/src/resources/agents/agents.ts
+++ /dev/null
@@ -1,366 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../../resource';
-import { isRequestOptions } from '../../core';
-import * as Core from '../../core';
-import * as Shared from '../shared';
-import * as SessionAPI from './session';
-import {
- Session,
- SessionCreateParams,
- SessionCreateResponse,
- SessionListParams,
- SessionListResponse,
- SessionResource,
- SessionRetrieveParams,
-} from './session';
-import * as StepsAPI from './steps';
-import { StepRetrieveResponse, Steps } from './steps';
-import * as TurnAPI from './turn';
-import {
- AgentTurnResponseStreamChunk,
- Turn,
- TurnCreateParams,
- TurnCreateParamsNonStreaming,
- TurnCreateParamsStreaming,
- TurnResource,
- TurnResponseEvent,
- TurnResponseEventPayload,
- TurnResumeParams,
- TurnResumeParamsNonStreaming,
- TurnResumeParamsStreaming,
-} from './turn';
-
-export class Agents extends APIResource {
- session: SessionAPI.SessionResource = new SessionAPI.SessionResource(this._client);
- steps: StepsAPI.Steps = new StepsAPI.Steps(this._client);
- turn: TurnAPI.TurnResource = new TurnAPI.TurnResource(this._client);
-
- /**
- * Create an agent with the given configuration.
- */
- create(body: AgentCreateParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post('/v1/agents', { body, ...options });
- }
-
- /**
- * Describe an agent by its ID.
- */
- retrieve(agentId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/agents/${agentId}`, options);
- }
-
- /**
- * List all agents.
- */
- list(query?: AgentListParams, options?: Core.RequestOptions): Core.APIPromise;
- list(options?: Core.RequestOptions): Core.APIPromise;
- list(
- query: AgentListParams | Core.RequestOptions = {},
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- if (isRequestOptions(query)) {
- return this.list({}, query);
- }
- return this._client.get('/v1/agents', { query, ...options });
- }
-
- /**
- * Delete an agent by its ID and its associated sessions and turns.
- */
- delete(agentId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/v1/agents/${agentId}`, {
- ...options,
- headers: { Accept: '*/*', ...options?.headers },
- });
- }
-}
-
-/**
- * An inference step in an agent turn.
- */
-export interface InferenceStep {
- /**
- * The response from the LLM.
- */
- model_response: Shared.CompletionMessage;
-
- /**
- * The ID of the step.
- */
- step_id: string;
-
- /**
- * Type of the step in an agent turn.
- */
- step_type: 'inference';
-
- /**
- * The ID of the turn.
- */
- turn_id: string;
-
- /**
- * The time the step completed.
- */
- completed_at?: string;
-
- /**
- * The time the step started.
- */
- started_at?: string;
-}
-
-/**
- * A memory retrieval step in an agent turn.
- */
-export interface MemoryRetrievalStep {
- /**
- * The context retrieved from the vector databases.
- */
- inserted_context: Shared.InterleavedContent;
-
- /**
- * The ID of the step.
- */
- step_id: string;
-
- /**
- * Type of the step in an agent turn.
- */
- step_type: 'memory_retrieval';
-
- /**
- * The ID of the turn.
- */
- turn_id: string;
-
- /**
- * The IDs of the vector databases to retrieve context from.
- */
- vector_db_ids: string;
-
- /**
- * The time the step completed.
- */
- completed_at?: string;
-
- /**
- * The time the step started.
- */
- started_at?: string;
-}
-
-/**
- * A shield call step in an agent turn.
- */
-export interface ShieldCallStep {
- /**
- * The ID of the step.
- */
- step_id: string;
-
- /**
- * Type of the step in an agent turn.
- */
- step_type: 'shield_call';
-
- /**
- * The ID of the turn.
- */
- turn_id: string;
-
- /**
- * The time the step completed.
- */
- completed_at?: string;
-
- /**
- * The time the step started.
- */
- started_at?: string;
-
- /**
- * The violation from the shield call.
- */
- violation?: Shared.SafetyViolation;
-}
-
-/**
- * A tool execution step in an agent turn.
- */
-export interface ToolExecutionStep {
- /**
- * The ID of the step.
- */
- step_id: string;
-
- /**
- * Type of the step in an agent turn.
- */
- step_type: 'tool_execution';
-
- /**
- * The tool calls to execute.
- */
- tool_calls: Array;
-
- /**
- * The tool responses from the tool calls.
- */
- tool_responses: Array;
-
- /**
- * The ID of the turn.
- */
- turn_id: string;
-
- /**
- * The time the step completed.
- */
- completed_at?: string;
-
- /**
- * The time the step started.
- */
- started_at?: string;
-}
-
-/**
- * Response from a tool invocation.
- */
-export interface ToolResponse {
- /**
- * Unique identifier for the tool call this response is for
- */
- call_id: string;
-
- /**
- * The response content from the tool
- */
- content: Shared.InterleavedContent;
-
- /**
- * Name of the tool that was invoked
- */
- tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {});
-
- /**
- * (Optional) Additional metadata about the tool response
- */
- metadata?: { [key: string]: boolean | number | string | Array | unknown | null };
-}
-
-/**
- * Response returned when creating a new agent.
- */
-export interface AgentCreateResponse {
- /**
- * Unique identifier for the created agent
- */
- agent_id: string;
-}
-
-/**
- * An agent instance with configuration and metadata.
- */
-export interface AgentRetrieveResponse {
- /**
- * Configuration settings for the agent
- */
- agent_config: Shared.AgentConfig;
-
- /**
- * Unique identifier for the agent
- */
- agent_id: string;
-
- /**
- * Timestamp when the agent was created
- */
- created_at: string;
-}
-
-/**
- * A generic paginated response that follows a simple format.
- */
-export interface AgentListResponse {
- /**
- * The list of items for the current page
- */
- data: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
-
- /**
- * Whether there are more items available after this set
- */
- has_more: boolean;
-
- /**
- * The URL for accessing this list
- */
- url?: string;
-}
-
-export interface AgentCreateParams {
- /**
- * The configuration for the agent.
- */
- agent_config: Shared.AgentConfig;
-}
-
-export interface AgentListParams {
- /**
- * The number of agents to return.
- */
- limit?: number;
-
- /**
- * The index to start the pagination from.
- */
- start_index?: number;
-}
-
-Agents.SessionResource = SessionResource;
-Agents.Steps = Steps;
-Agents.TurnResource = TurnResource;
-
-export declare namespace Agents {
- export {
- type InferenceStep as InferenceStep,
- type MemoryRetrievalStep as MemoryRetrievalStep,
- type ShieldCallStep as ShieldCallStep,
- type ToolExecutionStep as ToolExecutionStep,
- type ToolResponse as ToolResponse,
- type AgentCreateResponse as AgentCreateResponse,
- type AgentRetrieveResponse as AgentRetrieveResponse,
- type AgentListResponse as AgentListResponse,
- type AgentCreateParams as AgentCreateParams,
- type AgentListParams as AgentListParams,
- };
-
- export {
- SessionResource as SessionResource,
- type Session as Session,
- type SessionCreateResponse as SessionCreateResponse,
- type SessionListResponse as SessionListResponse,
- type SessionCreateParams as SessionCreateParams,
- type SessionRetrieveParams as SessionRetrieveParams,
- type SessionListParams as SessionListParams,
- };
-
- export { Steps as Steps, type StepRetrieveResponse as StepRetrieveResponse };
-
- export {
- TurnResource as TurnResource,
- type AgentTurnResponseStreamChunk as AgentTurnResponseStreamChunk,
- type Turn as Turn,
- type TurnResponseEvent as TurnResponseEvent,
- type TurnResponseEventPayload as TurnResponseEventPayload,
- type TurnCreateParams as TurnCreateParams,
- type TurnCreateParamsNonStreaming as TurnCreateParamsNonStreaming,
- type TurnCreateParamsStreaming as TurnCreateParamsStreaming,
- type TurnResumeParams as TurnResumeParams,
- type TurnResumeParamsNonStreaming as TurnResumeParamsNonStreaming,
- type TurnResumeParamsStreaming as TurnResumeParamsStreaming,
- };
-}
diff --git a/src/resources/agents/index.ts b/src/resources/agents/index.ts
deleted file mode 100644
index 88a44bf..0000000
--- a/src/resources/agents/index.ts
+++ /dev/null
@@ -1,38 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-export {
- Agents,
- type InferenceStep,
- type MemoryRetrievalStep,
- type ShieldCallStep,
- type ToolExecutionStep,
- type ToolResponse,
- type AgentCreateResponse,
- type AgentRetrieveResponse,
- type AgentListResponse,
- type AgentCreateParams,
- type AgentListParams,
-} from './agents';
-export {
- SessionResource,
- type Session,
- type SessionCreateResponse,
- type SessionListResponse,
- type SessionCreateParams,
- type SessionRetrieveParams,
- type SessionListParams,
-} from './session';
-export { Steps, type StepRetrieveResponse } from './steps';
-export {
- TurnResource,
- type AgentTurnResponseStreamChunk,
- type Turn,
- type TurnResponseEvent,
- type TurnResponseEventPayload,
- type TurnCreateParams,
- type TurnCreateParamsNonStreaming,
- type TurnCreateParamsStreaming,
- type TurnResumeParams,
- type TurnResumeParamsNonStreaming,
- type TurnResumeParamsStreaming,
-} from './turn';
diff --git a/src/resources/agents/session.ts b/src/resources/agents/session.ts
deleted file mode 100644
index 35c8511..0000000
--- a/src/resources/agents/session.ts
+++ /dev/null
@@ -1,163 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../../resource';
-import { isRequestOptions } from '../../core';
-import * as Core from '../../core';
-import * as TurnAPI from './turn';
-
-export class SessionResource extends APIResource {
- /**
- * Create a new session for an agent.
- */
- create(
- agentId: string,
- body: SessionCreateParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post(`/v1/agents/${agentId}/session`, { body, ...options });
- }
-
- /**
- * Retrieve an agent session by its ID.
- */
- retrieve(
- agentId: string,
- sessionId: string,
- query?: SessionRetrieveParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise;
- retrieve(agentId: string, sessionId: string, options?: Core.RequestOptions): Core.APIPromise;
- retrieve(
- agentId: string,
- sessionId: string,
- query: SessionRetrieveParams | Core.RequestOptions = {},
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- if (isRequestOptions(query)) {
- return this.retrieve(agentId, sessionId, {}, query);
- }
- return this._client.get(`/v1/agents/${agentId}/session/${sessionId}`, { query, ...options });
- }
-
- /**
- * List all session(s) of a given agent.
- */
- list(
- agentId: string,
- query?: SessionListParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise;
- list(agentId: string, options?: Core.RequestOptions): Core.APIPromise;
- list(
- agentId: string,
- query: SessionListParams | Core.RequestOptions = {},
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- if (isRequestOptions(query)) {
- return this.list(agentId, {}, query);
- }
- return this._client.get(`/v1/agents/${agentId}/sessions`, { query, ...options });
- }
-
- /**
- * Delete an agent session by its ID and its associated turns.
- */
- delete(agentId: string, sessionId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/v1/agents/${agentId}/session/${sessionId}`, {
- ...options,
- headers: { Accept: '*/*', ...options?.headers },
- });
- }
-}
-
-/**
- * A single session of an interaction with an Agentic System.
- */
-export interface Session {
- /**
- * Unique identifier for the conversation session
- */
- session_id: string;
-
- /**
- * Human-readable name for the session
- */
- session_name: string;
-
- /**
- * Timestamp when the session was created
- */
- started_at: string;
-
- /**
- * List of all turns that have occurred in this session
- */
- turns: Array;
-}
-
-/**
- * Response returned when creating a new agent session.
- */
-export interface SessionCreateResponse {
- /**
- * Unique identifier for the created session
- */
- session_id: string;
-}
-
-/**
- * A generic paginated response that follows a simple format.
- */
-export interface SessionListResponse {
- /**
- * The list of items for the current page
- */
- data: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
-
- /**
- * Whether there are more items available after this set
- */
- has_more: boolean;
-
- /**
- * The URL for accessing this list
- */
- url?: string;
-}
-
-export interface SessionCreateParams {
- /**
- * The name of the session to create.
- */
- session_name: string;
-}
-
-export interface SessionRetrieveParams {
- /**
- * (Optional) List of turn IDs to filter the session by.
- */
- turn_ids?: Array;
-}
-
-export interface SessionListParams {
- /**
- * The number of sessions to return.
- */
- limit?: number;
-
- /**
- * The index to start the pagination from.
- */
- start_index?: number;
-}
-
-export declare namespace SessionResource {
- export {
- type Session as Session,
- type SessionCreateResponse as SessionCreateResponse,
- type SessionListResponse as SessionListResponse,
- type SessionCreateParams as SessionCreateParams,
- type SessionRetrieveParams as SessionRetrieveParams,
- type SessionListParams as SessionListParams,
- };
-}
diff --git a/src/resources/agents/steps.ts b/src/resources/agents/steps.ts
deleted file mode 100644
index 8d2d821..0000000
--- a/src/resources/agents/steps.ts
+++ /dev/null
@@ -1,41 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../../resource';
-import * as Core from '../../core';
-import * as AgentsAPI from './agents';
-
-export class Steps extends APIResource {
- /**
- * Retrieve an agent step by its ID.
- */
- retrieve(
- agentId: string,
- sessionId: string,
- turnId: string,
- stepId: string,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.get(
- `/v1/agents/${agentId}/session/${sessionId}/turn/${turnId}/step/${stepId}`,
- options,
- );
- }
-}
-
-/**
- * Response containing details of a specific agent step.
- */
-export interface StepRetrieveResponse {
- /**
- * The complete step data and execution details
- */
- step:
- | AgentsAPI.InferenceStep
- | AgentsAPI.ToolExecutionStep
- | AgentsAPI.ShieldCallStep
- | AgentsAPI.MemoryRetrievalStep;
-}
-
-export declare namespace Steps {
- export { type StepRetrieveResponse as StepRetrieveResponse };
-}
diff --git a/src/resources/agents/turn.ts b/src/resources/agents/turn.ts
deleted file mode 100644
index 0273625..0000000
--- a/src/resources/agents/turn.ts
+++ /dev/null
@@ -1,632 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../../resource';
-import { APIPromise } from '../../core';
-import * as Core from '../../core';
-import * as TurnAPI from './turn';
-import * as Shared from '../shared';
-import * as AgentsAPI from './agents';
-import { Stream } from '../../streaming';
-
-export class TurnResource extends APIResource {
- /**
- * Create a new turn for an agent.
- */
- create(
- agentId: string,
- sessionId: string,
- body: TurnCreateParamsNonStreaming,
- options?: Core.RequestOptions,
- ): APIPromise;
- create(
- agentId: string,
- sessionId: string,
- body: TurnCreateParamsStreaming,
- options?: Core.RequestOptions,
- ): APIPromise>;
- create(
- agentId: string,
- sessionId: string,
- body: TurnCreateParamsBase,
- options?: Core.RequestOptions,
- ): APIPromise | Turn>;
- create(
- agentId: string,
- sessionId: string,
- body: TurnCreateParams,
- options?: Core.RequestOptions,
- ): APIPromise | APIPromise> {
- return this._client.post(`/v1/agents/${agentId}/session/${sessionId}/turn`, {
- body,
- ...options,
- stream: body.stream ?? false,
- }) as APIPromise | APIPromise>;
- }
-
- /**
- * Retrieve an agent turn by its ID.
- */
- retrieve(
- agentId: string,
- sessionId: string,
- turnId: string,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.get(`/v1/agents/${agentId}/session/${sessionId}/turn/${turnId}`, options);
- }
-
- /**
- * Resume an agent turn with executed tool call responses. When a Turn has the
- * status `awaiting_input` due to pending input from client side tool calls, this
- * endpoint can be used to submit the outputs from the tool calls once they are
- * ready.
- */
- resume(
- agentId: string,
- sessionId: string,
- turnId: string,
- body: TurnResumeParamsNonStreaming,
- options?: Core.RequestOptions,
- ): APIPromise;
- resume(
- agentId: string,
- sessionId: string,
- turnId: string,
- body: TurnResumeParamsStreaming,
- options?: Core.RequestOptions,
- ): APIPromise>;
- resume(
- agentId: string,
- sessionId: string,
- turnId: string,
- body: TurnResumeParamsBase,
- options?: Core.RequestOptions,
- ): APIPromise | Turn>;
- resume(
- agentId: string,
- sessionId: string,
- turnId: string,
- body: TurnResumeParams,
- options?: Core.RequestOptions,
- ): APIPromise | APIPromise> {
- return this._client.post(`/v1/agents/${agentId}/session/${sessionId}/turn/${turnId}/resume`, {
- body,
- ...options,
- stream: body.stream ?? false,
- }) as APIPromise | APIPromise>;
- }
-}
-
-/**
- * Streamed agent turn completion response.
- */
-export interface AgentTurnResponseStreamChunk {
- /**
- * Individual event in the agent turn response stream
- */
- event: TurnResponseEvent;
-}
-
-/**
- * A single turn in an interaction with an Agentic System.
- */
-export interface Turn {
- /**
- * List of messages that initiated this turn
- */
- input_messages: Array;
-
- /**
- * The model's generated response containing content and metadata
- */
- output_message: Shared.CompletionMessage;
-
- /**
- * Unique identifier for the conversation session
- */
- session_id: string;
-
- /**
- * Timestamp when the turn began
- */
- started_at: string;
-
- /**
- * Ordered list of processing steps executed during this turn
- */
- steps: Array<
- | AgentsAPI.InferenceStep
- | AgentsAPI.ToolExecutionStep
- | AgentsAPI.ShieldCallStep
- | AgentsAPI.MemoryRetrievalStep
- >;
-
- /**
- * Unique identifier for the turn within a session
- */
- turn_id: string;
-
- /**
- * (Optional) Timestamp when the turn finished, if completed
- */
- completed_at?: string;
-
- /**
- * (Optional) Files or media attached to the agent's response
- */
- output_attachments?: Array;
-}
-
-export namespace Turn {
- /**
- * An attachment to an agent turn.
- */
- export interface OutputAttachment {
- /**
- * The content of the attachment.
- */
- content:
- | string
- | OutputAttachment.ImageContentItem
- | OutputAttachment.TextContentItem
- | Array
- | OutputAttachment.URL;
-
- /**
- * The MIME type of the attachment.
- */
- mime_type: string;
- }
-
- export namespace OutputAttachment {
- /**
- * A image content item
- */
- export interface ImageContentItem {
- /**
- * Image as a base64 encoded string or an URL
- */
- image: ImageContentItem.Image;
-
- /**
- * Discriminator type of the content item. Always "image"
- */
- type: 'image';
- }
-
- export namespace ImageContentItem {
- /**
- * Image as a base64 encoded string or an URL
- */
- export interface Image {
- /**
- * base64 encoded image data as string
- */
- data?: string;
-
- /**
- * A URL of the image or data URL in the format of data:image/{type};base64,{data}.
- * Note that URL could have length limits.
- */
- url?: Image.URL;
- }
-
- export namespace Image {
- /**
- * A URL of the image or data URL in the format of data:image/{type};base64,{data}.
- * Note that URL could have length limits.
- */
- export interface URL {
- /**
- * The URL string pointing to the resource
- */
- uri: string;
- }
- }
- }
-
- /**
- * A text content item
- */
- export interface TextContentItem {
- /**
- * Text content
- */
- text: string;
-
- /**
- * Discriminator type of the content item. Always "text"
- */
- type: 'text';
- }
-
- /**
- * A URL reference to external content.
- */
- export interface URL {
- /**
- * The URL string pointing to the resource
- */
- uri: string;
- }
- }
-}
-
-/**
- * An event in an agent turn response stream.
- */
-export interface TurnResponseEvent {
- /**
- * Event-specific payload containing event data
- */
- payload: TurnResponseEventPayload;
-}
-
-/**
- * Payload for step start events in agent turn responses.
- */
-export type TurnResponseEventPayload =
- | TurnResponseEventPayload.AgentTurnResponseStepStartPayload
- | TurnResponseEventPayload.AgentTurnResponseStepProgressPayload
- | TurnResponseEventPayload.AgentTurnResponseStepCompletePayload
- | TurnResponseEventPayload.AgentTurnResponseTurnStartPayload
- | TurnResponseEventPayload.AgentTurnResponseTurnCompletePayload
- | TurnResponseEventPayload.AgentTurnResponseTurnAwaitingInputPayload;
-
-export namespace TurnResponseEventPayload {
- /**
- * Payload for step start events in agent turn responses.
- */
- export interface AgentTurnResponseStepStartPayload {
- /**
- * Type of event being reported
- */
- event_type: 'step_start';
-
- /**
- * Unique identifier for the step within a turn
- */
- step_id: string;
-
- /**
- * Type of step being executed
- */
- step_type: 'inference' | 'tool_execution' | 'shield_call' | 'memory_retrieval';
-
- /**
- * (Optional) Additional metadata for the step
- */
- metadata?: { [key: string]: boolean | number | string | Array | unknown | null };
- }
-
- /**
- * Payload for step progress events in agent turn responses.
- */
- export interface AgentTurnResponseStepProgressPayload {
- /**
- * Incremental content changes during step execution
- */
- delta: Shared.ContentDelta;
-
- /**
- * Type of event being reported
- */
- event_type: 'step_progress';
-
- /**
- * Unique identifier for the step within a turn
- */
- step_id: string;
-
- /**
- * Type of step being executed
- */
- step_type: 'inference' | 'tool_execution' | 'shield_call' | 'memory_retrieval';
- }
-
- /**
- * Payload for step completion events in agent turn responses.
- */
- export interface AgentTurnResponseStepCompletePayload {
- /**
- * Type of event being reported
- */
- event_type: 'step_complete';
-
- /**
- * Complete details of the executed step
- */
- step_details:
- | AgentsAPI.InferenceStep
- | AgentsAPI.ToolExecutionStep
- | AgentsAPI.ShieldCallStep
- | AgentsAPI.MemoryRetrievalStep;
-
- /**
- * Unique identifier for the step within a turn
- */
- step_id: string;
-
- /**
- * Type of step being executed
- */
- step_type: 'inference' | 'tool_execution' | 'shield_call' | 'memory_retrieval';
- }
-
- /**
- * Payload for turn start events in agent turn responses.
- */
- export interface AgentTurnResponseTurnStartPayload {
- /**
- * Type of event being reported
- */
- event_type: 'turn_start';
-
- /**
- * Unique identifier for the turn within a session
- */
- turn_id: string;
- }
-
- /**
- * Payload for turn completion events in agent turn responses.
- */
- export interface AgentTurnResponseTurnCompletePayload {
- /**
- * Type of event being reported
- */
- event_type: 'turn_complete';
-
- /**
- * Complete turn data including all steps and results
- */
- turn: TurnAPI.Turn;
- }
-
- /**
- * Payload for turn awaiting input events in agent turn responses.
- */
- export interface AgentTurnResponseTurnAwaitingInputPayload {
- /**
- * Type of event being reported
- */
- event_type: 'turn_awaiting_input';
-
- /**
- * Turn data when waiting for external tool responses
- */
- turn: TurnAPI.Turn;
- }
-}
-
-export type TurnCreateParams = TurnCreateParamsNonStreaming | TurnCreateParamsStreaming;
-
-export interface TurnCreateParamsBase {
- /**
- * List of messages to start the turn with.
- */
- messages: Array;
-
- /**
- * (Optional) List of documents to create the turn with.
- */
- documents?: Array;
-
- /**
- * (Optional) If True, generate an SSE event stream of the response. Defaults to
- * False.
- */
- stream?: boolean;
-
- /**
- * (Optional) The tool configuration to create the turn with, will be used to
- * override the agent's tool_config.
- */
- tool_config?: TurnCreateParams.ToolConfig;
-
- /**
- * (Optional) List of toolgroups to create the turn with, will be used in addition
- * to the agent's config toolgroups for the request.
- */
- toolgroups?: Array;
-}
-
-export namespace TurnCreateParams {
- /**
- * A document to be used by an agent.
- */
- export interface Document {
- /**
- * The content of the document.
- */
- content:
- | string
- | Document.ImageContentItem
- | Document.TextContentItem
- | Array
- | Document.URL;
-
- /**
- * The MIME type of the document.
- */
- mime_type: string;
- }
-
- export namespace Document {
- /**
- * A image content item
- */
- export interface ImageContentItem {
- /**
- * Image as a base64 encoded string or an URL
- */
- image: ImageContentItem.Image;
-
- /**
- * Discriminator type of the content item. Always "image"
- */
- type: 'image';
- }
-
- export namespace ImageContentItem {
- /**
- * Image as a base64 encoded string or an URL
- */
- export interface Image {
- /**
- * base64 encoded image data as string
- */
- data?: string;
-
- /**
- * A URL of the image or data URL in the format of data:image/{type};base64,{data}.
- * Note that URL could have length limits.
- */
- url?: Image.URL;
- }
-
- export namespace Image {
- /**
- * A URL of the image or data URL in the format of data:image/{type};base64,{data}.
- * Note that URL could have length limits.
- */
- export interface URL {
- /**
- * The URL string pointing to the resource
- */
- uri: string;
- }
- }
- }
-
- /**
- * A text content item
- */
- export interface TextContentItem {
- /**
- * Text content
- */
- text: string;
-
- /**
- * Discriminator type of the content item. Always "text"
- */
- type: 'text';
- }
-
- /**
- * A URL reference to external content.
- */
- export interface URL {
- /**
- * The URL string pointing to the resource
- */
- uri: string;
- }
- }
-
- /**
- * (Optional) The tool configuration to create the turn with, will be used to
- * override the agent's tool_config.
- */
- export interface ToolConfig {
- /**
- * (Optional) Config for how to override the default system prompt. -
- * `SystemMessageBehavior.append`: Appends the provided system message to the
- * default system prompt. - `SystemMessageBehavior.replace`: Replaces the default
- * system prompt with the provided system message. The system message can include
- * the string '{{function_definitions}}' to indicate where the function definitions
- * should be inserted.
- */
- system_message_behavior?: 'append' | 'replace';
-
- /**
- * (Optional) Whether tool use is automatic, required, or none. Can also specify a
- * tool name to use a specific tool. Defaults to ToolChoice.auto.
- */
- tool_choice?: 'auto' | 'required' | 'none' | (string & {});
-
- /**
- * (Optional) Instructs the model how to format tool calls. By default, Llama Stack
- * will attempt to use a format that is best adapted to the model. -
- * `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. -
- * `ToolPromptFormat.function_tag`: The tool calls are enclosed in a
- * tag. - `ToolPromptFormat.python_list`: The tool calls
- * are output as Python syntax -- a list of function calls.
- */
- tool_prompt_format?: 'json' | 'function_tag' | 'python_list';
- }
-
- export interface AgentToolGroupWithArgs {
- args: { [key: string]: boolean | number | string | Array | unknown | null };
-
- name: string;
- }
-
- export type TurnCreateParamsNonStreaming = TurnAPI.TurnCreateParamsNonStreaming;
- export type TurnCreateParamsStreaming = TurnAPI.TurnCreateParamsStreaming;
-}
-
-export interface TurnCreateParamsNonStreaming extends TurnCreateParamsBase {
- /**
- * (Optional) If True, generate an SSE event stream of the response. Defaults to
- * False.
- */
- stream?: false;
-}
-
-export interface TurnCreateParamsStreaming extends TurnCreateParamsBase {
- /**
- * (Optional) If True, generate an SSE event stream of the response. Defaults to
- * False.
- */
- stream: true;
-}
-
-export type TurnResumeParams = TurnResumeParamsNonStreaming | TurnResumeParamsStreaming;
-
-export interface TurnResumeParamsBase {
- /**
- * The tool call responses to resume the turn with.
- */
- tool_responses: Array;
-
- /**
- * Whether to stream the response.
- */
- stream?: boolean;
-}
-
-export namespace TurnResumeParams {
- export type TurnResumeParamsNonStreaming = TurnAPI.TurnResumeParamsNonStreaming;
- export type TurnResumeParamsStreaming = TurnAPI.TurnResumeParamsStreaming;
-}
-
-export interface TurnResumeParamsNonStreaming extends TurnResumeParamsBase {
- /**
- * Whether to stream the response.
- */
- stream?: false;
-}
-
-export interface TurnResumeParamsStreaming extends TurnResumeParamsBase {
- /**
- * Whether to stream the response.
- */
- stream: true;
-}
-
-export declare namespace TurnResource {
- export {
- type AgentTurnResponseStreamChunk as AgentTurnResponseStreamChunk,
- type Turn as Turn,
- type TurnResponseEvent as TurnResponseEvent,
- type TurnResponseEventPayload as TurnResponseEventPayload,
- type TurnCreateParams as TurnCreateParams,
- type TurnCreateParamsNonStreaming as TurnCreateParamsNonStreaming,
- type TurnCreateParamsStreaming as TurnCreateParamsStreaming,
- type TurnResumeParams as TurnResumeParams,
- type TurnResumeParamsNonStreaming as TurnResumeParamsNonStreaming,
- type TurnResumeParamsStreaming as TurnResumeParamsStreaming,
- };
-}
diff --git a/src/resources/alpha.ts b/src/resources/alpha.ts
new file mode 100644
index 0000000..446b643
--- /dev/null
+++ b/src/resources/alpha.ts
@@ -0,0 +1,3 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export * from './alpha/index';
diff --git a/src/resources/agents.ts b/src/resources/alpha/agents.ts
similarity index 100%
rename from src/resources/agents.ts
rename to src/resources/alpha/agents.ts
diff --git a/src/resources/alpha/agents/agents.ts b/src/resources/alpha/agents/agents.ts
new file mode 100644
index 0000000..4732cd5
--- /dev/null
+++ b/src/resources/alpha/agents/agents.ts
@@ -0,0 +1,27 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../../resource';
+import * as SessionAPI from './session';
+import { Session } from './session';
+import * as StepsAPI from './steps';
+import { Steps } from './steps';
+import * as TurnAPI from './turn';
+import { Turn } from './turn';
+
+export class Agents extends APIResource {
+ session: SessionAPI.Session = new SessionAPI.Session(this._client);
+ steps: StepsAPI.Steps = new StepsAPI.Steps(this._client);
+ turn: TurnAPI.Turn = new TurnAPI.Turn(this._client);
+}
+
+Agents.Session = Session;
+Agents.Steps = Steps;
+Agents.Turn = Turn;
+
+export declare namespace Agents {
+ export { Session as Session };
+
+ export { Steps as Steps };
+
+ export { Turn as Turn };
+}
diff --git a/src/resources/alpha/agents/index.ts b/src/resources/alpha/agents/index.ts
new file mode 100644
index 0000000..d9edac6
--- /dev/null
+++ b/src/resources/alpha/agents/index.ts
@@ -0,0 +1,6 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export { Agents } from './agents';
+export { Session } from './session';
+export { Steps } from './steps';
+export { Turn } from './turn';
diff --git a/src/resources/alpha/agents/session.ts b/src/resources/alpha/agents/session.ts
new file mode 100644
index 0000000..9e1bdde
--- /dev/null
+++ b/src/resources/alpha/agents/session.ts
@@ -0,0 +1,5 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../../resource';
+
+export class Session extends APIResource {}
diff --git a/src/resources/alpha/agents/steps.ts b/src/resources/alpha/agents/steps.ts
new file mode 100644
index 0000000..295f1f7
--- /dev/null
+++ b/src/resources/alpha/agents/steps.ts
@@ -0,0 +1,5 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../../resource';
+
+export class Steps extends APIResource {}
diff --git a/src/resources/alpha/agents/turn.ts b/src/resources/alpha/agents/turn.ts
new file mode 100644
index 0000000..3528209
--- /dev/null
+++ b/src/resources/alpha/agents/turn.ts
@@ -0,0 +1,5 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../../resource';
+
+export class Turn extends APIResource {}
diff --git a/src/resources/alpha/alpha.ts b/src/resources/alpha/alpha.ts
new file mode 100644
index 0000000..6bf1d0c
--- /dev/null
+++ b/src/resources/alpha/alpha.ts
@@ -0,0 +1,39 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../resource';
+import * as BenchmarksAPI from './benchmarks';
+import { Benchmarks } from './benchmarks';
+import * as InferenceAPI from './inference';
+import { Inference } from './inference';
+import * as AgentsAPI from './agents/agents';
+import { Agents } from './agents/agents';
+import * as EvalAPI from './eval/eval';
+import { Eval } from './eval/eval';
+import * as PostTrainingAPI from './post-training/post-training';
+import { PostTraining } from './post-training/post-training';
+
+export class Alpha extends APIResource {
+ inference: InferenceAPI.Inference = new InferenceAPI.Inference(this._client);
+ postTraining: PostTrainingAPI.PostTraining = new PostTrainingAPI.PostTraining(this._client);
+ benchmarks: BenchmarksAPI.Benchmarks = new BenchmarksAPI.Benchmarks(this._client);
+ eval: EvalAPI.Eval = new EvalAPI.Eval(this._client);
+ agents: AgentsAPI.Agents = new AgentsAPI.Agents(this._client);
+}
+
+Alpha.Inference = Inference;
+Alpha.PostTraining = PostTraining;
+Alpha.Benchmarks = Benchmarks;
+Alpha.Eval = Eval;
+Alpha.Agents = Agents;
+
+export declare namespace Alpha {
+ export { Inference as Inference };
+
+ export { PostTraining as PostTraining };
+
+ export { Benchmarks as Benchmarks };
+
+ export { Eval as Eval };
+
+ export { Agents as Agents };
+}
diff --git a/src/resources/alpha/benchmarks.ts b/src/resources/alpha/benchmarks.ts
new file mode 100644
index 0000000..b194605
--- /dev/null
+++ b/src/resources/alpha/benchmarks.ts
@@ -0,0 +1,5 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../resource';
+
+export class Benchmarks extends APIResource {}
diff --git a/src/resources/eval.ts b/src/resources/alpha/eval.ts
similarity index 100%
rename from src/resources/eval.ts
rename to src/resources/alpha/eval.ts
diff --git a/src/resources/alpha/eval/eval.ts b/src/resources/alpha/eval/eval.ts
new file mode 100644
index 0000000..d60adf6
--- /dev/null
+++ b/src/resources/alpha/eval/eval.ts
@@ -0,0 +1,15 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../../resource';
+import * as JobsAPI from './jobs';
+import { Jobs } from './jobs';
+
+export class Eval extends APIResource {
+ jobs: JobsAPI.Jobs = new JobsAPI.Jobs(this._client);
+}
+
+Eval.Jobs = Jobs;
+
+export declare namespace Eval {
+ export { Jobs as Jobs };
+}
diff --git a/src/resources/alpha/eval/index.ts b/src/resources/alpha/eval/index.ts
new file mode 100644
index 0000000..0297aa2
--- /dev/null
+++ b/src/resources/alpha/eval/index.ts
@@ -0,0 +1,4 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export { Eval } from './eval';
+export { Jobs } from './jobs';
diff --git a/src/resources/alpha/eval/jobs.ts b/src/resources/alpha/eval/jobs.ts
new file mode 100644
index 0000000..c1b3622
--- /dev/null
+++ b/src/resources/alpha/eval/jobs.ts
@@ -0,0 +1,5 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../../resource';
+
+export class Jobs extends APIResource {}
diff --git a/src/resources/alpha/index.ts b/src/resources/alpha/index.ts
new file mode 100644
index 0000000..005e72a
--- /dev/null
+++ b/src/resources/alpha/index.ts
@@ -0,0 +1,8 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export { Agents } from './agents/index';
+export { Alpha } from './alpha';
+export { Benchmarks } from './benchmarks';
+export { Eval } from './eval/index';
+export { Inference } from './inference';
+export { PostTraining } from './post-training/index';
diff --git a/src/resources/alpha/inference.ts b/src/resources/alpha/inference.ts
new file mode 100644
index 0000000..ecdd4e7
--- /dev/null
+++ b/src/resources/alpha/inference.ts
@@ -0,0 +1,5 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../resource';
+
+export class Inference extends APIResource {}
diff --git a/src/resources/post-training.ts b/src/resources/alpha/post-training.ts
similarity index 100%
rename from src/resources/post-training.ts
rename to src/resources/alpha/post-training.ts
diff --git a/src/resources/alpha/post-training/index.ts b/src/resources/alpha/post-training/index.ts
new file mode 100644
index 0000000..55c125a
--- /dev/null
+++ b/src/resources/alpha/post-training/index.ts
@@ -0,0 +1,4 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export { Job } from './job';
+export { PostTraining } from './post-training';
diff --git a/src/resources/alpha/post-training/job.ts b/src/resources/alpha/post-training/job.ts
new file mode 100644
index 0000000..0a019ac
--- /dev/null
+++ b/src/resources/alpha/post-training/job.ts
@@ -0,0 +1,5 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../../resource';
+
+export class Job extends APIResource {}
diff --git a/src/resources/alpha/post-training/post-training.ts b/src/resources/alpha/post-training/post-training.ts
new file mode 100644
index 0000000..813ae6a
--- /dev/null
+++ b/src/resources/alpha/post-training/post-training.ts
@@ -0,0 +1,15 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../../resource';
+import * as JobAPI from './job';
+import { Job } from './job';
+
+export class PostTraining extends APIResource {
+ job: JobAPI.Job = new JobAPI.Job(this._client);
+}
+
+PostTraining.Job = Job;
+
+export declare namespace PostTraining {
+ export { Job as Job };
+}
diff --git a/src/resources/benchmarks.ts b/src/resources/benchmarks.ts
deleted file mode 100644
index b6b8363..0000000
--- a/src/resources/benchmarks.ts
+++ /dev/null
@@ -1,111 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../resource';
-import * as Core from '../core';
-
-export class Benchmarks extends APIResource {
- /**
- * Get a benchmark by its ID.
- */
- retrieve(benchmarkId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/eval/benchmarks/${benchmarkId}`, options);
- }
-
- /**
- * List all benchmarks.
- */
- list(options?: Core.RequestOptions): Core.APIPromise {
- return (
- this._client.get('/v1/eval/benchmarks', options) as Core.APIPromise<{ data: BenchmarkListResponse }>
- )._thenUnwrap((obj) => obj.data);
- }
-
- /**
- * Register a benchmark.
- */
- register(body: BenchmarkRegisterParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post('/v1/eval/benchmarks', {
- body,
- ...options,
- headers: { Accept: '*/*', ...options?.headers },
- });
- }
-}
-
-/**
- * A benchmark resource for evaluating model performance.
- */
-export interface Benchmark {
- /**
- * Identifier of the dataset to use for the benchmark evaluation
- */
- dataset_id: string;
-
- identifier: string;
-
- /**
- * Metadata for this evaluation task
- */
- metadata: { [key: string]: boolean | number | string | Array | unknown | null };
-
- provider_id: string;
-
- /**
- * List of scoring function identifiers to apply during evaluation
- */
- scoring_functions: Array;
-
- /**
- * The resource type, always benchmark
- */
- type: 'benchmark';
-
- provider_resource_id?: string;
-}
-
-export interface ListBenchmarksResponse {
- data: BenchmarkListResponse;
-}
-
-export type BenchmarkListResponse = Array;
-
-export interface BenchmarkRegisterParams {
- /**
- * The ID of the benchmark to register.
- */
- benchmark_id: string;
-
- /**
- * The ID of the dataset to use for the benchmark.
- */
- dataset_id: string;
-
- /**
- * The scoring functions to use for the benchmark.
- */
- scoring_functions: Array;
-
- /**
- * The metadata to use for the benchmark.
- */
- metadata?: { [key: string]: boolean | number | string | Array | unknown | null };
-
- /**
- * The ID of the provider benchmark to use for the benchmark.
- */
- provider_benchmark_id?: string;
-
- /**
- * The ID of the provider to use for the benchmark.
- */
- provider_id?: string;
-}
-
-export declare namespace Benchmarks {
- export {
- type Benchmark as Benchmark,
- type ListBenchmarksResponse as ListBenchmarksResponse,
- type BenchmarkListResponse as BenchmarkListResponse,
- type BenchmarkRegisterParams as BenchmarkRegisterParams,
- };
-}
diff --git a/src/resources/beta.ts b/src/resources/beta.ts
new file mode 100644
index 0000000..1542e94
--- /dev/null
+++ b/src/resources/beta.ts
@@ -0,0 +1,3 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export * from './beta/index';
diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts
new file mode 100644
index 0000000..c579b07
--- /dev/null
+++ b/src/resources/beta/beta.ts
@@ -0,0 +1,15 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../resource';
+import * as DatasetsAPI from './datasets';
+import { Datasets } from './datasets';
+
+export class Beta extends APIResource {
+ datasets: DatasetsAPI.Datasets = new DatasetsAPI.Datasets(this._client);
+}
+
+Beta.Datasets = Datasets;
+
+export declare namespace Beta {
+ export { Datasets as Datasets };
+}
diff --git a/src/resources/beta/datasets.ts b/src/resources/beta/datasets.ts
new file mode 100644
index 0000000..06f918c
--- /dev/null
+++ b/src/resources/beta/datasets.ts
@@ -0,0 +1,5 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../resource';
+
+export class Datasets extends APIResource {}
diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts
new file mode 100644
index 0000000..e2acaeb
--- /dev/null
+++ b/src/resources/beta/index.ts
@@ -0,0 +1,4 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export { Beta } from './beta';
+export { Datasets } from './datasets';
diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts
index b43e6d3..cc0e0eb 100644
--- a/src/resources/chat/chat.ts
+++ b/src/resources/chat/chat.ts
@@ -46,6 +46,11 @@ export interface ChatCompletionChunk {
* The object type, which will be "chat.completion.chunk"
*/
object: 'chat.completion.chunk';
+
+ /**
+ * Token usage information (typically included in final chunk with stream_options)
+ */
+ usage?: ChatCompletionChunk.Usage;
}
export namespace ChatCompletionChunk {
@@ -84,6 +89,11 @@ export namespace ChatCompletionChunk {
*/
content?: string;
+ /**
+ * (Optional) The reasoning content from the model (non-standard, for o1/o3 models)
+ */
+ reasoning_content?: string;
+
/**
* (Optional) The refusal of the delta
*/
@@ -217,6 +227,58 @@ export namespace ChatCompletionChunk {
}
}
}
+
+ /**
+ * Token usage information (typically included in final chunk with stream_options)
+ */
+ export interface Usage {
+ /**
+ * Number of tokens in the completion
+ */
+ completion_tokens: number;
+
+ /**
+ * Number of tokens in the prompt
+ */
+ prompt_tokens: number;
+
+ /**
+ * Total tokens used (prompt + completion)
+ */
+ total_tokens: number;
+
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ completion_tokens_details?: Usage.CompletionTokensDetails;
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ prompt_tokens_details?: Usage.PromptTokensDetails;
+ }
+
+ export namespace Usage {
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ export interface CompletionTokensDetails {
+ /**
+ * Number of tokens used for reasoning (o1/o3 models)
+ */
+ reasoning_tokens?: number;
+ }
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ export interface PromptTokensDetails {
+ /**
+ * Number of tokens retrieved from cache
+ */
+ cached_tokens?: number;
+ }
+ }
}
Chat.Completions = Completions;
diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts
index c7ed5e8..7c8f133 100644
--- a/src/resources/chat/completions.ts
+++ b/src/resources/chat/completions.ts
@@ -11,8 +11,8 @@ import { Stream } from '../../streaming';
export class Completions extends APIResource {
/**
- * Generate an OpenAI-compatible chat completion for the given messages using the
- * specified model.
+ * Create chat completions. Generate an OpenAI-compatible chat completion for the
+ * given messages using the specified model.
*/
create(
body: CompletionCreateParamsNonStreaming,
@@ -30,22 +30,20 @@ export class Completions extends APIResource {
body: CompletionCreateParams,
options?: Core.RequestOptions,
): APIPromise | APIPromise> {
- return this._client.post('/v1/openai/v1/chat/completions', {
- body,
- ...options,
- stream: body.stream ?? false,
- }) as APIPromise | APIPromise>;
+ return this._client.post('/v1/chat/completions', { body, ...options, stream: body.stream ?? false }) as
+ | APIPromise
+ | APIPromise>;
}
/**
- * Describe a chat completion by its ID.
+ * Get chat completion. Describe a chat completion by its ID.
*/
retrieve(completionId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/openai/v1/chat/completions/${completionId}`, options);
+ return this._client.get(`/v1/chat/completions/${completionId}`, options);
}
/**
- * List all chat completions.
+ * List chat completions.
*/
list(
query?: CompletionListParams,
@@ -61,11 +59,10 @@ export class Completions extends APIResource {
if (isRequestOptions(query)) {
return this.list({}, query);
}
- return this._client.getAPIList(
- '/v1/openai/v1/chat/completions',
- CompletionListResponsesOpenAICursorPage,
- { query, ...options },
- );
+ return this._client.getAPIList('/v1/chat/completions', CompletionListResponsesOpenAICursorPage, {
+ query,
+ ...options,
+ });
}
}
@@ -107,6 +104,11 @@ export namespace CompletionCreateResponse {
* The object type, which will be "chat.completion"
*/
object: 'chat.completion';
+
+ /**
+ * Token usage information for the completion
+ */
+ usage?: OpenAIChatCompletion.Usage;
}
export namespace OpenAIChatCompletion {
@@ -504,6 +506,58 @@ export namespace CompletionCreateResponse {
}
}
}
+
+ /**
+ * Token usage information for the completion
+ */
+ export interface Usage {
+ /**
+ * Number of tokens in the completion
+ */
+ completion_tokens: number;
+
+ /**
+ * Number of tokens in the prompt
+ */
+ prompt_tokens: number;
+
+ /**
+ * Total tokens used (prompt + completion)
+ */
+ total_tokens: number;
+
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ completion_tokens_details?: Usage.CompletionTokensDetails;
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ prompt_tokens_details?: Usage.PromptTokensDetails;
+ }
+
+ export namespace Usage {
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ export interface CompletionTokensDetails {
+ /**
+ * Number of tokens used for reasoning (o1/o3 models)
+ */
+ reasoning_tokens?: number;
+ }
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ export interface PromptTokensDetails {
+ /**
+ * Number of tokens retrieved from cache
+ */
+ cached_tokens?: number;
+ }
+ }
}
}
@@ -540,6 +594,11 @@ export interface CompletionRetrieveResponse {
* The object type, which will be "chat.completion"
*/
object: 'chat.completion';
+
+ /**
+ * Token usage information for the completion
+ */
+ usage?: CompletionRetrieveResponse.Usage;
}
export namespace CompletionRetrieveResponse {
@@ -1227,6 +1286,58 @@ export namespace CompletionRetrieveResponse {
type: 'text';
}
}
+
+ /**
+ * Token usage information for the completion
+ */
+ export interface Usage {
+ /**
+ * Number of tokens in the completion
+ */
+ completion_tokens: number;
+
+ /**
+ * Number of tokens in the prompt
+ */
+ prompt_tokens: number;
+
+ /**
+ * Total tokens used (prompt + completion)
+ */
+ total_tokens: number;
+
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ completion_tokens_details?: Usage.CompletionTokensDetails;
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ prompt_tokens_details?: Usage.PromptTokensDetails;
+ }
+
+ export namespace Usage {
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ export interface CompletionTokensDetails {
+ /**
+ * Number of tokens used for reasoning (o1/o3 models)
+ */
+ reasoning_tokens?: number;
+ }
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ export interface PromptTokensDetails {
+ /**
+ * Number of tokens retrieved from cache
+ */
+ cached_tokens?: number;
+ }
+ }
}
export interface CompletionListResponse {
@@ -1262,6 +1373,11 @@ export interface CompletionListResponse {
* The object type, which will be "chat.completion"
*/
object: 'chat.completion';
+
+ /**
+ * Token usage information for the completion
+ */
+ usage?: CompletionListResponse.Usage;
}
export namespace CompletionListResponse {
@@ -1949,6 +2065,58 @@ export namespace CompletionListResponse {
type: 'text';
}
}
+
+ /**
+ * Token usage information for the completion
+ */
+ export interface Usage {
+ /**
+ * Number of tokens in the completion
+ */
+ completion_tokens: number;
+
+ /**
+ * Number of tokens in the prompt
+ */
+ prompt_tokens: number;
+
+ /**
+ * Total tokens used (prompt + completion)
+ */
+ total_tokens: number;
+
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ completion_tokens_details?: Usage.CompletionTokensDetails;
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ prompt_tokens_details?: Usage.PromptTokensDetails;
+ }
+
+ export namespace Usage {
+ /**
+ * Token details for output tokens in OpenAI chat completion usage.
+ */
+ export interface CompletionTokensDetails {
+ /**
+ * Number of tokens used for reasoning (o1/o3 models)
+ */
+ reasoning_tokens?: number;
+ }
+
+ /**
+ * Token details for prompt tokens in OpenAI chat completion usage.
+ */
+ export interface PromptTokensDetails {
+ /**
+ * Number of tokens retrieved from cache
+ */
+ cached_tokens?: number;
+ }
+ }
}
export type CompletionCreateParams = CompletionCreateParamsNonStreaming | CompletionCreateParamsStreaming;
diff --git a/src/resources/completions.ts b/src/resources/completions.ts
index 0ade7ab..fe49a25 100644
--- a/src/resources/completions.ts
+++ b/src/resources/completions.ts
@@ -8,8 +8,8 @@ import { Stream } from '../streaming';
export class Completions extends APIResource {
/**
- * Generate an OpenAI-compatible completion for the given prompt using the
- * specified model.
+ * Create completion. Generate an OpenAI-compatible completion for the given prompt
+ * using the specified model.
*/
create(
body: CompletionCreateParamsNonStreaming,
@@ -27,11 +27,9 @@ export class Completions extends APIResource {
body: CompletionCreateParams,
options?: Core.RequestOptions,
): APIPromise | APIPromise> {
- return this._client.post('/v1/openai/v1/completions', {
- body,
- ...options,
- stream: body.stream ?? false,
- }) as APIPromise | APIPromise>;
+ return this._client.post('/v1/completions', { body, ...options, stream: body.stream ?? false }) as
+ | APIPromise
+ | APIPromise>;
}
}
@@ -174,8 +172,6 @@ export interface CompletionCreateParamsBase {
*/
frequency_penalty?: number;
- guided_choice?: Array;
-
/**
* (Optional) The logit bias to use.
*/
@@ -201,8 +197,6 @@ export interface CompletionCreateParamsBase {
*/
presence_penalty?: number;
- prompt_logprobs?: number;
-
/**
* (Optional) The seed to use.
*/
diff --git a/src/resources/conversations.ts b/src/resources/conversations.ts
new file mode 100644
index 0000000..6b50950
--- /dev/null
+++ b/src/resources/conversations.ts
@@ -0,0 +1,3 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export * from './conversations/index';
diff --git a/src/resources/conversations/conversations.ts b/src/resources/conversations/conversations.ts
new file mode 100644
index 0000000..0465dec
--- /dev/null
+++ b/src/resources/conversations/conversations.ts
@@ -0,0 +1,585 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../resource';
+import * as Core from '../../core';
+import * as ItemsAPI from './items';
+import {
+ ItemCreateParams,
+ ItemCreateResponse,
+ ItemGetResponse,
+ ItemListParams,
+ ItemListResponse,
+ ItemListResponsesOpenAICursorPage,
+ Items,
+} from './items';
+
+export class Conversations extends APIResource {
+ items: ItemsAPI.Items = new ItemsAPI.Items(this._client);
+
+ /**
+ * Create a conversation. Create a conversation.
+ */
+ create(body: ConversationCreateParams, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.post('/v1/conversations', { body, ...options });
+ }
+
+ /**
+ * Retrieve a conversation. Get a conversation with the given ID.
+ */
+ retrieve(conversationId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.get(`/v1/conversations/${conversationId}`, options);
+ }
+
+ /**
+ * Update a conversation. Update a conversation's metadata with the given ID.
+ */
+ update(
+ conversationId: string,
+ body: ConversationUpdateParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.post(`/v1/conversations/${conversationId}`, { body, ...options });
+ }
+
+ /**
+ * Delete a conversation. Delete a conversation with the given ID.
+ */
+ delete(conversationId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.delete(`/v1/conversations/${conversationId}`, options);
+ }
+}
+
+/**
+ * OpenAI-compatible conversation object.
+ */
+export interface ConversationObject {
+ id: string;
+
+ created_at: number;
+
+ object: 'conversation';
+
+ items?: Array;
+
+ metadata?: { [key: string]: string };
+}
+
+/**
+ * Response for deleted conversation.
+ */
+export interface ConversationDeleteResponse {
+ id: string;
+
+ deleted: boolean;
+
+ object: string;
+}
+
+export interface ConversationCreateParams {
+ /**
+ * Initial items to include in the conversation context.
+ */
+ items?: Array<
+ | ConversationCreateParams.OpenAIResponseMessage
+ | ConversationCreateParams.OpenAIResponseOutputMessageWebSearchToolCall
+ | ConversationCreateParams.OpenAIResponseOutputMessageFileSearchToolCall
+ | ConversationCreateParams.OpenAIResponseOutputMessageFunctionToolCall
+ | ConversationCreateParams.OpenAIResponseInputFunctionToolCallOutput
+ | ConversationCreateParams.OpenAIResponseMcpApprovalRequest
+ | ConversationCreateParams.OpenAIResponseMcpApprovalResponse
+ | ConversationCreateParams.OpenAIResponseOutputMessageMcpCall
+ | ConversationCreateParams.OpenAIResponseOutputMessageMcpListTools
+ >;
+
+ /**
+ * Set of key-value pairs that can be attached to an object.
+ */
+ metadata?: { [key: string]: string };
+}
+
+export namespace ConversationCreateParams {
+ /**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+ export interface OpenAIResponseMessage {
+ content:
+ | string
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile
+ >
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
+
+ role: 'system' | 'developer' | 'user' | 'assistant';
+
+ type: 'message';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
+
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * (Optional) URL of the image content
+ */
+ image_url?: string;
+ }
+
+ /**
+ * File content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentFile {
+ /**
+ * The type of the input item. Always `input_file`.
+ */
+ type: 'input_file';
+
+ /**
+ * The data of the file to be sent to the model.
+ */
+ file_data?: string;
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * The URL of the file to be sent to the model.
+ */
+ file_url?: string;
+
+ /**
+ * The name of the file to be sent to the model.
+ */
+ filename?: string;
+ }
+
+ export interface OpenAIResponseOutputMessageContentOutputText {
+ annotations: Array<
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
+ >;
+
+ text: string;
+
+ type: 'output_text';
+ }
+
+ export namespace OpenAIResponseOutputMessageContentOutputText {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
+ }
+
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * Current status of the web search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
+ type: 'web_search_call';
+ }
+
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * List of search queries executed
+ */
+ queries: Array;
+
+ /**
+ * Current status of the file search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
+ type: 'file_search_call';
+
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array;
+ }
+
+ export namespace OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Search results returned by the file search operation.
+ */
+ export interface Result {
+ /**
+ * (Optional) Key-value attributes associated with the file
+ */
+ attributes: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Unique identifier of the file containing the result
+ */
+ file_id: string;
+
+ /**
+ * Name of the file containing the result
+ */
+ filename: string;
+
+ /**
+ * Relevance score for this search result (between 0 and 1)
+ */
+ score: number;
+
+ /**
+ * Text content of the search result
+ */
+ text: string;
+ }
+ }
+
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
+ arguments: string;
+
+ /**
+ * Unique identifier for the function call
+ */
+ call_id: string;
+
+ /**
+ * Name of the function being called
+ */
+ name: string;
+
+ /**
+ * Tool call type identifier, always "function_call"
+ */
+ type: 'function_call';
+
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
+ id?: string;
+
+ /**
+ * (Optional) Current status of the function call execution
+ */
+ status?: string;
+ }
+
+ /**
+ * This represents the output of a function call that gets passed back to the
+ * model.
+ */
+ export interface OpenAIResponseInputFunctionToolCallOutput {
+ call_id: string;
+
+ output: string;
+
+ type: 'function_call_output';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
+ /**
+ * A response to an MCP approval request.
+ */
+ export interface OpenAIResponseMcpApprovalResponse {
+ approval_request_id: string;
+
+ approve: boolean;
+
+ type: 'mcp_approval_response';
+
+ id?: string;
+
+ reason?: string;
+ }
+
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
+ id: string;
+
+ /**
+ * JSON string containing the MCP call arguments
+ */
+ arguments: string;
+
+ /**
+ * Name of the MCP method being called
+ */
+ name: string;
+
+ /**
+ * Label identifying the MCP server handling the call
+ */
+ server_label: string;
+
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
+ type: 'mcp_call';
+
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
+ error?: string;
+
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
+ output?: string;
+ }
+
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
+ export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
+ id: string;
+
+ /**
+ * Label identifying the MCP server providing the tools
+ */
+ server_label: string;
+
+ /**
+ * List of available tools provided by the MCP server
+ */
+ tools: Array;
+
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
+ type: 'mcp_list_tools';
+ }
+
+ export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
+ export interface Tool {
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Name of the tool
+ */
+ name: string;
+
+ /**
+ * (Optional) Description of what the tool does
+ */
+ description?: string;
+ }
+ }
+}
+
+export interface ConversationUpdateParams {
+ /**
+ * Set of key-value pairs that can be attached to an object.
+ */
+ metadata: { [key: string]: string };
+}
+
+Conversations.Items = Items;
+Conversations.ItemListResponsesOpenAICursorPage = ItemListResponsesOpenAICursorPage;
+
+export declare namespace Conversations {
+ export {
+ type ConversationObject as ConversationObject,
+ type ConversationDeleteResponse as ConversationDeleteResponse,
+ type ConversationCreateParams as ConversationCreateParams,
+ type ConversationUpdateParams as ConversationUpdateParams,
+ };
+
+ export {
+ Items as Items,
+ type ItemCreateResponse as ItemCreateResponse,
+ type ItemListResponse as ItemListResponse,
+ type ItemGetResponse as ItemGetResponse,
+ ItemListResponsesOpenAICursorPage as ItemListResponsesOpenAICursorPage,
+ type ItemCreateParams as ItemCreateParams,
+ type ItemListParams as ItemListParams,
+ };
+}
diff --git a/src/resources/conversations/index.ts b/src/resources/conversations/index.ts
new file mode 100644
index 0000000..de33b78
--- /dev/null
+++ b/src/resources/conversations/index.ts
@@ -0,0 +1,18 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export {
+ Conversations,
+ type ConversationObject,
+ type ConversationDeleteResponse,
+ type ConversationCreateParams,
+ type ConversationUpdateParams,
+} from './conversations';
+export {
+ ItemListResponsesOpenAICursorPage,
+ Items,
+ type ItemCreateResponse,
+ type ItemListResponse,
+ type ItemGetResponse,
+ type ItemCreateParams,
+ type ItemListParams,
+} from './items';
diff --git a/src/resources/conversations/items.ts b/src/resources/conversations/items.ts
new file mode 100644
index 0000000..6c2ae87
--- /dev/null
+++ b/src/resources/conversations/items.ts
@@ -0,0 +1,1998 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../resource';
+import { isRequestOptions } from '../../core';
+import * as Core from '../../core';
+import { OpenAICursorPage, type OpenAICursorPageParams } from '../../pagination';
+
+export class Items extends APIResource {
+ /**
+ * Create items. Create items in the conversation.
+ */
+ create(
+ conversationId: string,
+ body: ItemCreateParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.post(`/v1/conversations/${conversationId}/items`, { body, ...options });
+ }
+
+ /**
+ * List items. List items in the conversation.
+ */
+ list(
+ conversationId: string,
+ query?: ItemListParams,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ list(
+ conversationId: string,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ list(
+ conversationId: string,
+ query: ItemListParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.PagePromise {
+ if (isRequestOptions(query)) {
+ return this.list(conversationId, {}, query);
+ }
+ return this._client.getAPIList(
+ `/v1/conversations/${conversationId}/items`,
+ ItemListResponsesOpenAICursorPage,
+ { query, ...options },
+ );
+ }
+
+ /**
+ * Retrieve an item. Retrieve a conversation item.
+ */
+ get(
+ conversationId: string,
+ itemId: string,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.get(`/v1/conversations/${conversationId}/items/${itemId}`, options);
+ }
+}
+
+export class ItemListResponsesOpenAICursorPage extends OpenAICursorPage {}
+
+/**
+ * List of conversation items with pagination.
+ */
+export interface ItemCreateResponse {
+ data: Array<
+ | ItemCreateResponse.OpenAIResponseMessage
+ | ItemCreateResponse.OpenAIResponseOutputMessageWebSearchToolCall
+ | ItemCreateResponse.OpenAIResponseOutputMessageFileSearchToolCall
+ | ItemCreateResponse.OpenAIResponseOutputMessageFunctionToolCall
+ | ItemCreateResponse.OpenAIResponseInputFunctionToolCallOutput
+ | ItemCreateResponse.OpenAIResponseMcpApprovalRequest
+ | ItemCreateResponse.OpenAIResponseMcpApprovalResponse
+ | ItemCreateResponse.OpenAIResponseOutputMessageMcpCall
+ | ItemCreateResponse.OpenAIResponseOutputMessageMcpListTools
+ >;
+
+ has_more: boolean;
+
+ object: string;
+
+ first_id?: string;
+
+ last_id?: string;
+}
+
+export namespace ItemCreateResponse {
+ /**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+ export interface OpenAIResponseMessage {
+ content:
+ | string
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile
+ >
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
+
+ role: 'system' | 'developer' | 'user' | 'assistant';
+
+ type: 'message';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
+
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * (Optional) URL of the image content
+ */
+ image_url?: string;
+ }
+
+ /**
+ * File content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentFile {
+ /**
+ * The type of the input item. Always `input_file`.
+ */
+ type: 'input_file';
+
+ /**
+ * The data of the file to be sent to the model.
+ */
+ file_data?: string;
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * The URL of the file to be sent to the model.
+ */
+ file_url?: string;
+
+ /**
+ * The name of the file to be sent to the model.
+ */
+ filename?: string;
+ }
+
+ export interface OpenAIResponseOutputMessageContentOutputText {
+ annotations: Array<
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
+ >;
+
+ text: string;
+
+ type: 'output_text';
+ }
+
+ export namespace OpenAIResponseOutputMessageContentOutputText {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
+ }
+
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * Current status of the web search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
+ type: 'web_search_call';
+ }
+
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * List of search queries executed
+ */
+ queries: Array;
+
+ /**
+ * Current status of the file search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
+ type: 'file_search_call';
+
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array;
+ }
+
+ export namespace OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Search results returned by the file search operation.
+ */
+ export interface Result {
+ /**
+ * (Optional) Key-value attributes associated with the file
+ */
+ attributes: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Unique identifier of the file containing the result
+ */
+ file_id: string;
+
+ /**
+ * Name of the file containing the result
+ */
+ filename: string;
+
+ /**
+ * Relevance score for this search result (between 0 and 1)
+ */
+ score: number;
+
+ /**
+ * Text content of the search result
+ */
+ text: string;
+ }
+ }
+
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
+ arguments: string;
+
+ /**
+ * Unique identifier for the function call
+ */
+ call_id: string;
+
+ /**
+ * Name of the function being called
+ */
+ name: string;
+
+ /**
+ * Tool call type identifier, always "function_call"
+ */
+ type: 'function_call';
+
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
+ id?: string;
+
+ /**
+ * (Optional) Current status of the function call execution
+ */
+ status?: string;
+ }
+
+ /**
+ * This represents the output of a function call that gets passed back to the
+ * model.
+ */
+ export interface OpenAIResponseInputFunctionToolCallOutput {
+ call_id: string;
+
+ output: string;
+
+ type: 'function_call_output';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
+ /**
+ * A response to an MCP approval request.
+ */
+ export interface OpenAIResponseMcpApprovalResponse {
+ approval_request_id: string;
+
+ approve: boolean;
+
+ type: 'mcp_approval_response';
+
+ id?: string;
+
+ reason?: string;
+ }
+
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
+ id: string;
+
+ /**
+ * JSON string containing the MCP call arguments
+ */
+ arguments: string;
+
+ /**
+ * Name of the MCP method being called
+ */
+ name: string;
+
+ /**
+ * Label identifying the MCP server handling the call
+ */
+ server_label: string;
+
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
+ type: 'mcp_call';
+
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
+ error?: string;
+
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
+ output?: string;
+ }
+
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
+ export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
+ id: string;
+
+ /**
+ * Label identifying the MCP server providing the tools
+ */
+ server_label: string;
+
+ /**
+ * List of available tools provided by the MCP server
+ */
+ tools: Array;
+
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
+ type: 'mcp_list_tools';
+ }
+
+ export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
+ export interface Tool {
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Name of the tool
+ */
+ name: string;
+
+ /**
+ * (Optional) Description of what the tool does
+ */
+ description?: string;
+ }
+ }
+}
+
+/**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+export type ItemListResponse =
+ | ItemListResponse.OpenAIResponseMessage
+ | ItemListResponse.OpenAIResponseOutputMessageWebSearchToolCall
+ | ItemListResponse.OpenAIResponseOutputMessageFileSearchToolCall
+ | ItemListResponse.OpenAIResponseOutputMessageFunctionToolCall
+ | ItemListResponse.OpenAIResponseInputFunctionToolCallOutput
+ | ItemListResponse.OpenAIResponseMcpApprovalRequest
+ | ItemListResponse.OpenAIResponseMcpApprovalResponse
+ | ItemListResponse.OpenAIResponseOutputMessageMcpCall
+ | ItemListResponse.OpenAIResponseOutputMessageMcpListTools;
+
+export namespace ItemListResponse {
+ /**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+ export interface OpenAIResponseMessage {
+ content:
+ | string
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile
+ >
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
+
+ role: 'system' | 'developer' | 'user' | 'assistant';
+
+ type: 'message';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
+
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * (Optional) URL of the image content
+ */
+ image_url?: string;
+ }
+
+ /**
+ * File content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentFile {
+ /**
+ * The type of the input item. Always `input_file`.
+ */
+ type: 'input_file';
+
+ /**
+ * The data of the file to be sent to the model.
+ */
+ file_data?: string;
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * The URL of the file to be sent to the model.
+ */
+ file_url?: string;
+
+ /**
+ * The name of the file to be sent to the model.
+ */
+ filename?: string;
+ }
+
+ export interface OpenAIResponseOutputMessageContentOutputText {
+ annotations: Array<
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
+ >;
+
+ text: string;
+
+ type: 'output_text';
+ }
+
+ export namespace OpenAIResponseOutputMessageContentOutputText {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
+ }
+
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * Current status of the web search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
+ type: 'web_search_call';
+ }
+
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * List of search queries executed
+ */
+ queries: Array;
+
+ /**
+ * Current status of the file search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
+ type: 'file_search_call';
+
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array;
+ }
+
+ export namespace OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Search results returned by the file search operation.
+ */
+ export interface Result {
+ /**
+ * (Optional) Key-value attributes associated with the file
+ */
+ attributes: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Unique identifier of the file containing the result
+ */
+ file_id: string;
+
+ /**
+ * Name of the file containing the result
+ */
+ filename: string;
+
+ /**
+ * Relevance score for this search result (between 0 and 1)
+ */
+ score: number;
+
+ /**
+ * Text content of the search result
+ */
+ text: string;
+ }
+ }
+
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
+ arguments: string;
+
+ /**
+ * Unique identifier for the function call
+ */
+ call_id: string;
+
+ /**
+ * Name of the function being called
+ */
+ name: string;
+
+ /**
+ * Tool call type identifier, always "function_call"
+ */
+ type: 'function_call';
+
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
+ id?: string;
+
+ /**
+ * (Optional) Current status of the function call execution
+ */
+ status?: string;
+ }
+
+ /**
+ * This represents the output of a function call that gets passed back to the
+ * model.
+ */
+ export interface OpenAIResponseInputFunctionToolCallOutput {
+ call_id: string;
+
+ output: string;
+
+ type: 'function_call_output';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
+ /**
+ * A response to an MCP approval request.
+ */
+ export interface OpenAIResponseMcpApprovalResponse {
+ approval_request_id: string;
+
+ approve: boolean;
+
+ type: 'mcp_approval_response';
+
+ id?: string;
+
+ reason?: string;
+ }
+
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
+ id: string;
+
+ /**
+ * JSON string containing the MCP call arguments
+ */
+ arguments: string;
+
+ /**
+ * Name of the MCP method being called
+ */
+ name: string;
+
+ /**
+ * Label identifying the MCP server handling the call
+ */
+ server_label: string;
+
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
+ type: 'mcp_call';
+
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
+ error?: string;
+
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
+ output?: string;
+ }
+
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
+ export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
+ id: string;
+
+ /**
+ * Label identifying the MCP server providing the tools
+ */
+ server_label: string;
+
+ /**
+ * List of available tools provided by the MCP server
+ */
+ tools: Array;
+
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
+ type: 'mcp_list_tools';
+ }
+
+ export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
+ export interface Tool {
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Name of the tool
+ */
+ name: string;
+
+ /**
+ * (Optional) Description of what the tool does
+ */
+ description?: string;
+ }
+ }
+}
+
+/**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+export type ItemGetResponse =
+ | ItemGetResponse.OpenAIResponseMessage
+ | ItemGetResponse.OpenAIResponseOutputMessageWebSearchToolCall
+ | ItemGetResponse.OpenAIResponseOutputMessageFileSearchToolCall
+ | ItemGetResponse.OpenAIResponseOutputMessageFunctionToolCall
+ | ItemGetResponse.OpenAIResponseInputFunctionToolCallOutput
+ | ItemGetResponse.OpenAIResponseMcpApprovalRequest
+ | ItemGetResponse.OpenAIResponseMcpApprovalResponse
+ | ItemGetResponse.OpenAIResponseOutputMessageMcpCall
+ | ItemGetResponse.OpenAIResponseOutputMessageMcpListTools;
+
+export namespace ItemGetResponse {
+ /**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+ export interface OpenAIResponseMessage {
+ content:
+ | string
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile
+ >
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
+
+ role: 'system' | 'developer' | 'user' | 'assistant';
+
+ type: 'message';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
+
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * (Optional) URL of the image content
+ */
+ image_url?: string;
+ }
+
+ /**
+ * File content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentFile {
+ /**
+ * The type of the input item. Always `input_file`.
+ */
+ type: 'input_file';
+
+ /**
+ * The data of the file to be sent to the model.
+ */
+ file_data?: string;
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * The URL of the file to be sent to the model.
+ */
+ file_url?: string;
+
+ /**
+ * The name of the file to be sent to the model.
+ */
+ filename?: string;
+ }
+
+ export interface OpenAIResponseOutputMessageContentOutputText {
+ annotations: Array<
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
+ >;
+
+ text: string;
+
+ type: 'output_text';
+ }
+
+ export namespace OpenAIResponseOutputMessageContentOutputText {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
+ }
+
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * Current status of the web search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
+ type: 'web_search_call';
+ }
+
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * List of search queries executed
+ */
+ queries: Array;
+
+ /**
+ * Current status of the file search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
+ type: 'file_search_call';
+
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array;
+ }
+
+ export namespace OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Search results returned by the file search operation.
+ */
+ export interface Result {
+ /**
+ * (Optional) Key-value attributes associated with the file
+ */
+ attributes: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Unique identifier of the file containing the result
+ */
+ file_id: string;
+
+ /**
+ * Name of the file containing the result
+ */
+ filename: string;
+
+ /**
+ * Relevance score for this search result (between 0 and 1)
+ */
+ score: number;
+
+ /**
+ * Text content of the search result
+ */
+ text: string;
+ }
+ }
+
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
+ arguments: string;
+
+ /**
+ * Unique identifier for the function call
+ */
+ call_id: string;
+
+ /**
+ * Name of the function being called
+ */
+ name: string;
+
+ /**
+ * Tool call type identifier, always "function_call"
+ */
+ type: 'function_call';
+
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
+ id?: string;
+
+ /**
+ * (Optional) Current status of the function call execution
+ */
+ status?: string;
+ }
+
+ /**
+ * This represents the output of a function call that gets passed back to the
+ * model.
+ */
+ export interface OpenAIResponseInputFunctionToolCallOutput {
+ call_id: string;
+
+ output: string;
+
+ type: 'function_call_output';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
+ /**
+ * A response to an MCP approval request.
+ */
+ export interface OpenAIResponseMcpApprovalResponse {
+ approval_request_id: string;
+
+ approve: boolean;
+
+ type: 'mcp_approval_response';
+
+ id?: string;
+
+ reason?: string;
+ }
+
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
+ id: string;
+
+ /**
+ * JSON string containing the MCP call arguments
+ */
+ arguments: string;
+
+ /**
+ * Name of the MCP method being called
+ */
+ name: string;
+
+ /**
+ * Label identifying the MCP server handling the call
+ */
+ server_label: string;
+
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
+ type: 'mcp_call';
+
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
+ error?: string;
+
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
+ output?: string;
+ }
+
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
+ export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
+ id: string;
+
+ /**
+ * Label identifying the MCP server providing the tools
+ */
+ server_label: string;
+
+ /**
+ * List of available tools provided by the MCP server
+ */
+ tools: Array;
+
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
+ type: 'mcp_list_tools';
+ }
+
+ export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
+ export interface Tool {
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Name of the tool
+ */
+ name: string;
+
+ /**
+ * (Optional) Description of what the tool does
+ */
+ description?: string;
+ }
+ }
+}
+
+export interface ItemCreateParams {
+ /**
+ * Items to include in the conversation context.
+ */
+ items: Array<
+ | ItemCreateParams.OpenAIResponseMessage
+ | ItemCreateParams.OpenAIResponseOutputMessageWebSearchToolCall
+ | ItemCreateParams.OpenAIResponseOutputMessageFileSearchToolCall
+ | ItemCreateParams.OpenAIResponseOutputMessageFunctionToolCall
+ | ItemCreateParams.OpenAIResponseInputFunctionToolCallOutput
+ | ItemCreateParams.OpenAIResponseMcpApprovalRequest
+ | ItemCreateParams.OpenAIResponseMcpApprovalResponse
+ | ItemCreateParams.OpenAIResponseOutputMessageMcpCall
+ | ItemCreateParams.OpenAIResponseOutputMessageMcpListTools
+ >;
+}
+
+export namespace ItemCreateParams {
+ /**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+ export interface OpenAIResponseMessage {
+ content:
+ | string
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile
+ >
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
+
+ role: 'system' | 'developer' | 'user' | 'assistant';
+
+ type: 'message';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
+
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * (Optional) URL of the image content
+ */
+ image_url?: string;
+ }
+
+ /**
+ * File content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentFile {
+ /**
+ * The type of the input item. Always `input_file`.
+ */
+ type: 'input_file';
+
+ /**
+ * The data of the file to be sent to the model.
+ */
+ file_data?: string;
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * The URL of the file to be sent to the model.
+ */
+ file_url?: string;
+
+ /**
+ * The name of the file to be sent to the model.
+ */
+ filename?: string;
+ }
+
+ export interface OpenAIResponseOutputMessageContentOutputText {
+ annotations: Array<
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
+ >;
+
+ text: string;
+
+ type: 'output_text';
+ }
+
+ export namespace OpenAIResponseOutputMessageContentOutputText {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
+ }
+
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * Current status of the web search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
+ type: 'web_search_call';
+ }
+
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * List of search queries executed
+ */
+ queries: Array;
+
+ /**
+ * Current status of the file search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
+ type: 'file_search_call';
+
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array;
+ }
+
+ export namespace OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Search results returned by the file search operation.
+ */
+ export interface Result {
+ /**
+ * (Optional) Key-value attributes associated with the file
+ */
+ attributes: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Unique identifier of the file containing the result
+ */
+ file_id: string;
+
+ /**
+ * Name of the file containing the result
+ */
+ filename: string;
+
+ /**
+ * Relevance score for this search result (between 0 and 1)
+ */
+ score: number;
+
+ /**
+ * Text content of the search result
+ */
+ text: string;
+ }
+ }
+
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
+ arguments: string;
+
+ /**
+ * Unique identifier for the function call
+ */
+ call_id: string;
+
+ /**
+ * Name of the function being called
+ */
+ name: string;
+
+ /**
+ * Tool call type identifier, always "function_call"
+ */
+ type: 'function_call';
+
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
+ id?: string;
+
+ /**
+ * (Optional) Current status of the function call execution
+ */
+ status?: string;
+ }
+
+ /**
+ * This represents the output of a function call that gets passed back to the
+ * model.
+ */
+ export interface OpenAIResponseInputFunctionToolCallOutput {
+ call_id: string;
+
+ output: string;
+
+ type: 'function_call_output';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
+ /**
+ * A response to an MCP approval request.
+ */
+ export interface OpenAIResponseMcpApprovalResponse {
+ approval_request_id: string;
+
+ approve: boolean;
+
+ type: 'mcp_approval_response';
+
+ id?: string;
+
+ reason?: string;
+ }
+
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
+ id: string;
+
+ /**
+ * JSON string containing the MCP call arguments
+ */
+ arguments: string;
+
+ /**
+ * Name of the MCP method being called
+ */
+ name: string;
+
+ /**
+ * Label identifying the MCP server handling the call
+ */
+ server_label: string;
+
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
+ type: 'mcp_call';
+
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
+ error?: string;
+
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
+ output?: string;
+ }
+
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
+ export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
+ id: string;
+
+ /**
+ * Label identifying the MCP server providing the tools
+ */
+ server_label: string;
+
+ /**
+ * List of available tools provided by the MCP server
+ */
+ tools: Array;
+
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
+ type: 'mcp_list_tools';
+ }
+
+ export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
+ export interface Tool {
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Name of the tool
+ */
+ name: string;
+
+ /**
+ * (Optional) Description of what the tool does
+ */
+ description?: string;
+ }
+ }
+}
+
+export interface ItemListParams extends OpenAICursorPageParams {
+ /**
+ * Specify additional output data to include in the response.
+ */
+ include?: Array<
+ | 'web_search_call.action.sources'
+ | 'code_interpreter_call.outputs'
+ | 'computer_call_output.output.image_url'
+ | 'file_search_call.results'
+ | 'message.input_image.image_url'
+ | 'message.output_text.logprobs'
+ | 'reasoning.encrypted_content'
+ >;
+
+ /**
+ * The order to return items in (asc or desc, default desc).
+ */
+ order?: 'asc' | 'desc';
+}
+
+Items.ItemListResponsesOpenAICursorPage = ItemListResponsesOpenAICursorPage;
+
+export declare namespace Items {
+ export {
+ type ItemCreateResponse as ItemCreateResponse,
+ type ItemListResponse as ItemListResponse,
+ type ItemGetResponse as ItemGetResponse,
+ ItemListResponsesOpenAICursorPage as ItemListResponsesOpenAICursorPage,
+ type ItemCreateParams as ItemCreateParams,
+ type ItemListParams as ItemListParams,
+ };
+}
diff --git a/src/resources/datasets.ts b/src/resources/datasets.ts
deleted file mode 100644
index 5ed6661..0000000
--- a/src/resources/datasets.ts
+++ /dev/null
@@ -1,407 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../resource';
-import { isRequestOptions } from '../core';
-import * as Core from '../core';
-
-export class Datasets extends APIResource {
- /**
- * Get a dataset by its ID.
- */
- retrieve(datasetId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/datasets/${datasetId}`, options);
- }
-
- /**
- * List all datasets.
- */
- list(options?: Core.RequestOptions): Core.APIPromise {
- return (
- this._client.get('/v1/datasets', options) as Core.APIPromise<{ data: DatasetListResponse }>
- )._thenUnwrap((obj) => obj.data);
- }
-
- /**
- * Append rows to a dataset.
- */
- appendrows(
- datasetId: string,
- body: DatasetAppendrowsParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post(`/v1/datasetio/append-rows/${datasetId}`, {
- body,
- ...options,
- headers: { Accept: '*/*', ...options?.headers },
- });
- }
-
- /**
- * Get a paginated list of rows from a dataset. Uses offset-based pagination where:
- *
- * - start_index: The starting index (0-based). If None, starts from beginning.
- * - limit: Number of items to return. If None or -1, returns all items.
- *
- * The response includes:
- *
- * - data: List of items for the current page.
- * - has_more: Whether there are more items available after this set.
- */
- iterrows(
- datasetId: string,
- query?: DatasetIterrowsParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise;
- iterrows(datasetId: string, options?: Core.RequestOptions): Core.APIPromise;
- iterrows(
- datasetId: string,
- query: DatasetIterrowsParams | Core.RequestOptions = {},
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- if (isRequestOptions(query)) {
- return this.iterrows(datasetId, {}, query);
- }
- return this._client.get(`/v1/datasetio/iterrows/${datasetId}`, { query, ...options });
- }
-
- /**
- * Register a new dataset.
- */
- register(
- body: DatasetRegisterParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post('/v1/datasets', { body, ...options });
- }
-
- /**
- * Unregister a dataset by its ID.
- */
- unregister(datasetId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/v1/datasets/${datasetId}`, {
- ...options,
- headers: { Accept: '*/*', ...options?.headers },
- });
- }
-}
-
-/**
- * Response from listing datasets.
- */
-export interface ListDatasetsResponse {
- /**
- * List of datasets
- */
- data: DatasetListResponse;
-}
-
-/**
- * Dataset resource for storing and accessing training or evaluation data.
- */
-export interface DatasetRetrieveResponse {
- identifier: string;
-
- /**
- * Additional metadata for the dataset
- */
- metadata: { [key: string]: boolean | number | string | Array | unknown | null };
-
- provider_id: string;
-
- /**
- * Purpose of the dataset indicating its intended use
- */
- purpose: 'post-training/messages' | 'eval/question-answer' | 'eval/messages-answer';
-
- /**
- * Data source configuration for the dataset
- */
- source: DatasetRetrieveResponse.UriDataSource | DatasetRetrieveResponse.RowsDataSource;
-
- /**
- * Type of resource, always 'dataset' for datasets
- */
- type: 'dataset';
-
- provider_resource_id?: string;
-}
-
-export namespace DatasetRetrieveResponse {
- /**
- * A dataset that can be obtained from a URI.
- */
- export interface UriDataSource {
- type: 'uri';
-
- /**
- * The dataset can be obtained from a URI. E.g. -
- * "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" -
- * "data:csv;base64,{base64_content}"
- */
- uri: string;
- }
-
- /**
- * A dataset stored in rows.
- */
- export interface RowsDataSource {
- /**
- * The dataset is stored in rows. E.g. - [ {"messages": [{"role": "user",
- * "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}]}
- * ]
- */
- rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
-
- type: 'rows';
- }
-}
-
-/**
- * List of datasets
- */
-export type DatasetListResponse = Array;
-
-export namespace DatasetListResponse {
- /**
- * Dataset resource for storing and accessing training or evaluation data.
- */
- export interface DatasetListResponseItem {
- identifier: string;
-
- /**
- * Additional metadata for the dataset
- */
- metadata: { [key: string]: boolean | number | string | Array | unknown | null };
-
- provider_id: string;
-
- /**
- * Purpose of the dataset indicating its intended use
- */
- purpose: 'post-training/messages' | 'eval/question-answer' | 'eval/messages-answer';
-
- /**
- * Data source configuration for the dataset
- */
- source: DatasetListResponseItem.UriDataSource | DatasetListResponseItem.RowsDataSource;
-
- /**
- * Type of resource, always 'dataset' for datasets
- */
- type: 'dataset';
-
- provider_resource_id?: string;
- }
-
- export namespace DatasetListResponseItem {
- /**
- * A dataset that can be obtained from a URI.
- */
- export interface UriDataSource {
- type: 'uri';
-
- /**
- * The dataset can be obtained from a URI. E.g. -
- * "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" -
- * "data:csv;base64,{base64_content}"
- */
- uri: string;
- }
-
- /**
- * A dataset stored in rows.
- */
- export interface RowsDataSource {
- /**
- * The dataset is stored in rows. E.g. - [ {"messages": [{"role": "user",
- * "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}]}
- * ]
- */
- rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
-
- type: 'rows';
- }
- }
-}
-
-/**
- * A generic paginated response that follows a simple format.
- */
-export interface DatasetIterrowsResponse {
- /**
- * The list of items for the current page
- */
- data: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
-
- /**
- * Whether there are more items available after this set
- */
- has_more: boolean;
-
- /**
- * The URL for accessing this list
- */
- url?: string;
-}
-
-/**
- * Dataset resource for storing and accessing training or evaluation data.
- */
-export interface DatasetRegisterResponse {
- identifier: string;
-
- /**
- * Additional metadata for the dataset
- */
- metadata: { [key: string]: boolean | number | string | Array | unknown | null };
-
- provider_id: string;
-
- /**
- * Purpose of the dataset indicating its intended use
- */
- purpose: 'post-training/messages' | 'eval/question-answer' | 'eval/messages-answer';
-
- /**
- * Data source configuration for the dataset
- */
- source: DatasetRegisterResponse.UriDataSource | DatasetRegisterResponse.RowsDataSource;
-
- /**
- * Type of resource, always 'dataset' for datasets
- */
- type: 'dataset';
-
- provider_resource_id?: string;
-}
-
-export namespace DatasetRegisterResponse {
- /**
- * A dataset that can be obtained from a URI.
- */
- export interface UriDataSource {
- type: 'uri';
-
- /**
- * The dataset can be obtained from a URI. E.g. -
- * "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" -
- * "data:csv;base64,{base64_content}"
- */
- uri: string;
- }
-
- /**
- * A dataset stored in rows.
- */
- export interface RowsDataSource {
- /**
- * The dataset is stored in rows. E.g. - [ {"messages": [{"role": "user",
- * "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}]}
- * ]
- */
- rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
-
- type: 'rows';
- }
-}
-
-export interface DatasetAppendrowsParams {
- /**
- * The rows to append to the dataset.
- */
- rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
-}
-
-export interface DatasetIterrowsParams {
- /**
- * The number of rows to get.
- */
- limit?: number;
-
- /**
- * Index into dataset for the first row to get. Get all rows if None.
- */
- start_index?: number;
-}
-
-export interface DatasetRegisterParams {
- /**
- * The purpose of the dataset. One of: - "post-training/messages": The dataset
- * contains a messages column with list of messages for post-training. {
- * "messages": [ {"role": "user", "content": "Hello, world!"}, {"role":
- * "assistant", "content": "Hello, world!"}, ] } - "eval/question-answer": The
- * dataset contains a question column and an answer column for evaluation. {
- * "question": "What is the capital of France?", "answer": "Paris" } -
- * "eval/messages-answer": The dataset contains a messages column with list of
- * messages and an answer column for evaluation. { "messages": [ {"role": "user",
- * "content": "Hello, my name is John Doe."}, {"role": "assistant", "content":
- * "Hello, John Doe. How can I help you today?"}, {"role": "user", "content":
- * "What's my name?"}, ], "answer": "John Doe" }
- */
- purpose: 'post-training/messages' | 'eval/question-answer' | 'eval/messages-answer';
-
- /**
- * The data source of the dataset. Ensure that the data source schema is compatible
- * with the purpose of the dataset. Examples: - { "type": "uri", "uri":
- * "https://mywebsite.com/mydata.jsonl" } - { "type": "uri", "uri":
- * "lsfs://mydata.jsonl" } - { "type": "uri", "uri":
- * "data:csv;base64,{base64_content}" } - { "type": "uri", "uri":
- * "huggingface://llamastack/simpleqa?split=train" } - { "type": "rows", "rows": [
- * { "messages": [ {"role": "user", "content": "Hello, world!"}, {"role":
- * "assistant", "content": "Hello, world!"}, ] } ] }
- */
- source: DatasetRegisterParams.UriDataSource | DatasetRegisterParams.RowsDataSource;
-
- /**
- * The ID of the dataset. If not provided, an ID will be generated.
- */
- dataset_id?: string;
-
- /**
- * The metadata for the dataset. - E.g. {"description": "My dataset"}.
- */
- metadata?: { [key: string]: boolean | number | string | Array | unknown | null };
-}
-
-export namespace DatasetRegisterParams {
- /**
- * A dataset that can be obtained from a URI.
- */
- export interface UriDataSource {
- type: 'uri';
-
- /**
- * The dataset can be obtained from a URI. E.g. -
- * "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" -
- * "data:csv;base64,{base64_content}"
- */
- uri: string;
- }
-
- /**
- * A dataset stored in rows.
- */
- export interface RowsDataSource {
- /**
- * The dataset is stored in rows. E.g. - [ {"messages": [{"role": "user",
- * "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}]}
- * ]
- */
- rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
-
- type: 'rows';
- }
-}
-
-export declare namespace Datasets {
- export {
- type ListDatasetsResponse as ListDatasetsResponse,
- type DatasetRetrieveResponse as DatasetRetrieveResponse,
- type DatasetListResponse as DatasetListResponse,
- type DatasetIterrowsResponse as DatasetIterrowsResponse,
- type DatasetRegisterResponse as DatasetRegisterResponse,
- type DatasetAppendrowsParams as DatasetAppendrowsParams,
- type DatasetIterrowsParams as DatasetIterrowsParams,
- type DatasetRegisterParams as DatasetRegisterParams,
- };
-}
diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts
index 89758af..f07ff14 100644
--- a/src/resources/embeddings.ts
+++ b/src/resources/embeddings.ts
@@ -5,14 +5,14 @@ import * as Core from '../core';
export class Embeddings extends APIResource {
/**
- * Generate OpenAI-compatible embeddings for the given input using the specified
- * model.
+ * Create embeddings. Generate OpenAI-compatible embeddings for the given input
+ * using the specified model.
*/
create(
body: EmbeddingCreateParams,
options?: Core.RequestOptions,
): Core.APIPromise {
- return this._client.post('/v1/openai/v1/embeddings', { body, ...options });
+ return this._client.post('/v1/embeddings', { body, ...options });
}
}
diff --git a/src/resources/eval/eval.ts b/src/resources/eval/eval.ts
deleted file mode 100644
index 961b24e..0000000
--- a/src/resources/eval/eval.ts
+++ /dev/null
@@ -1,210 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../../resource';
-import * as Core from '../../core';
-import * as ScoringFunctionsAPI from '../scoring-functions';
-import * as Shared from '../shared';
-import * as JobsAPI from './jobs';
-import { Jobs } from './jobs';
-
-export class Eval extends APIResource {
- jobs: JobsAPI.Jobs = new JobsAPI.Jobs(this._client);
-
- /**
- * Evaluate a list of rows on a benchmark.
- */
- evaluateRows(
- benchmarkId: string,
- body: EvalEvaluateRowsParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/evaluations`, { body, ...options });
- }
-
- /**
- * Evaluate a list of rows on a benchmark.
- */
- evaluateRowsAlpha(
- benchmarkId: string,
- body: EvalEvaluateRowsAlphaParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/evaluations`, { body, ...options });
- }
-
- /**
- * Run an evaluation on a benchmark.
- */
- runEval(benchmarkId: string, body: EvalRunEvalParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/jobs`, { body, ...options });
- }
-
- /**
- * Run an evaluation on a benchmark.
- */
- runEvalAlpha(
- benchmarkId: string,
- body: EvalRunEvalAlphaParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post(`/v1/eval/benchmarks/${benchmarkId}/jobs`, { body, ...options });
- }
-}
-
-/**
- * A benchmark configuration for evaluation.
- */
-export interface BenchmarkConfig {
- /**
- * The candidate to evaluate.
- */
- eval_candidate: EvalCandidate;
-
- /**
- * Map between scoring function id and parameters for each scoring function you
- * want to run
- */
- scoring_params: { [key: string]: ScoringFunctionsAPI.ScoringFnParams };
-
- /**
- * (Optional) The number of examples to evaluate. If not provided, all examples in
- * the dataset will be evaluated
- */
- num_examples?: number;
-}
-
-/**
- * A model candidate for evaluation.
- */
-export type EvalCandidate = EvalCandidate.ModelCandidate | EvalCandidate.AgentCandidate;
-
-export namespace EvalCandidate {
- /**
- * A model candidate for evaluation.
- */
- export interface ModelCandidate {
- /**
- * The model ID to evaluate.
- */
- model: string;
-
- /**
- * The sampling parameters for the model.
- */
- sampling_params: Shared.SamplingParams;
-
- type: 'model';
-
- /**
- * (Optional) The system message providing instructions or context to the model.
- */
- system_message?: Shared.SystemMessage;
- }
-
- /**
- * An agent candidate for evaluation.
- */
- export interface AgentCandidate {
- /**
- * The configuration for the agent candidate.
- */
- config: Shared.AgentConfig;
-
- type: 'agent';
- }
-}
-
-/**
- * The response from an evaluation.
- */
-export interface EvaluateResponse {
- /**
- * The generations from the evaluation.
- */
- generations: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
-
- /**
- * The scores from the evaluation.
- */
- scores: { [key: string]: Shared.ScoringResult };
-}
-
-/**
- * A job execution instance with status tracking.
- */
-export interface Job {
- /**
- * Unique identifier for the job
- */
- job_id: string;
-
- /**
- * Current execution status of the job
- */
- status: 'completed' | 'in_progress' | 'failed' | 'scheduled' | 'cancelled';
-}
-
-export interface EvalEvaluateRowsParams {
- /**
- * The configuration for the benchmark.
- */
- benchmark_config: BenchmarkConfig;
-
- /**
- * The rows to evaluate.
- */
- input_rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
-
- /**
- * The scoring functions to use for the evaluation.
- */
- scoring_functions: Array;
-}
-
-export interface EvalEvaluateRowsAlphaParams {
- /**
- * The configuration for the benchmark.
- */
- benchmark_config: BenchmarkConfig;
-
- /**
- * The rows to evaluate.
- */
- input_rows: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
-
- /**
- * The scoring functions to use for the evaluation.
- */
- scoring_functions: Array;
-}
-
-export interface EvalRunEvalParams {
- /**
- * The configuration for the benchmark.
- */
- benchmark_config: BenchmarkConfig;
-}
-
-export interface EvalRunEvalAlphaParams {
- /**
- * The configuration for the benchmark.
- */
- benchmark_config: BenchmarkConfig;
-}
-
-Eval.Jobs = Jobs;
-
-export declare namespace Eval {
- export {
- type BenchmarkConfig as BenchmarkConfig,
- type EvalCandidate as EvalCandidate,
- type EvaluateResponse as EvaluateResponse,
- type Job as Job,
- type EvalEvaluateRowsParams as EvalEvaluateRowsParams,
- type EvalEvaluateRowsAlphaParams as EvalEvaluateRowsAlphaParams,
- type EvalRunEvalParams as EvalRunEvalParams,
- type EvalRunEvalAlphaParams as EvalRunEvalAlphaParams,
- };
-
- export { Jobs as Jobs };
-}
diff --git a/src/resources/eval/index.ts b/src/resources/eval/index.ts
deleted file mode 100644
index e8c35f3..0000000
--- a/src/resources/eval/index.ts
+++ /dev/null
@@ -1,14 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-export {
- Eval,
- type BenchmarkConfig,
- type EvalCandidate,
- type EvaluateResponse,
- type Job,
- type EvalEvaluateRowsParams,
- type EvalEvaluateRowsAlphaParams,
- type EvalRunEvalParams,
- type EvalRunEvalAlphaParams,
-} from './eval';
-export { Jobs } from './jobs';
diff --git a/src/resources/eval/jobs.ts b/src/resources/eval/jobs.ts
deleted file mode 100644
index 13d4a4d..0000000
--- a/src/resources/eval/jobs.ts
+++ /dev/null
@@ -1,35 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../../resource';
-import * as Core from '../../core';
-import * as EvalAPI from './eval';
-
-export class Jobs extends APIResource {
- /**
- * Get the result of a job.
- */
- retrieve(
- benchmarkId: string,
- jobId: string,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.get(`/v1/eval/benchmarks/${benchmarkId}/jobs/${jobId}/result`, options);
- }
-
- /**
- * Cancel a job.
- */
- cancel(benchmarkId: string, jobId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/v1/eval/benchmarks/${benchmarkId}/jobs/${jobId}`, {
- ...options,
- headers: { Accept: '*/*', ...options?.headers },
- });
- }
-
- /**
- * Get the status of a job.
- */
- status(benchmarkId: string, jobId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/eval/benchmarks/${benchmarkId}/jobs/${jobId}`, options);
- }
-}
diff --git a/src/resources/files.ts b/src/resources/files.ts
index 4dc5223..e59026e 100644
--- a/src/resources/files.ts
+++ b/src/resources/files.ts
@@ -7,25 +7,27 @@ import { OpenAICursorPage, type OpenAICursorPageParams } from '../pagination';
export class Files extends APIResource {
/**
- * Upload a file that can be used across various endpoints. The file upload should
- * be a multipart form request with:
+ * Upload file. Upload a file that can be used across various endpoints.
+ *
+ * The file upload should be a multipart form request with:
*
* - file: The File object (not file name) to be uploaded.
* - purpose: The intended purpose of the uploaded file.
+ * - expires_after: Optional form values describing expiration for the file.
*/
create(body: FileCreateParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post('/v1/openai/v1/files', Core.multipartFormRequestOptions({ body, ...options }));
+ return this._client.post('/v1/files', Core.multipartFormRequestOptions({ body, ...options }));
}
/**
- * Returns information about a specific file.
+ * Retrieve file. Returns information about a specific file.
*/
retrieve(fileId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/openai/v1/files/${fileId}`, options);
+ return this._client.get(`/v1/files/${fileId}`, options);
}
/**
- * Returns a list of files that belong to the user's organization.
+ * List files. Returns a list of files that belong to the user's organization.
*/
list(query?: FileListParams, options?: Core.RequestOptions): Core.PagePromise;
list(options?: Core.RequestOptions): Core.PagePromise;
@@ -36,21 +38,21 @@ export class Files extends APIResource {
if (isRequestOptions(query)) {
return this.list({}, query);
}
- return this._client.getAPIList('/v1/openai/v1/files', FilesOpenAICursorPage, { query, ...options });
+ return this._client.getAPIList('/v1/files', FilesOpenAICursorPage, { query, ...options });
}
/**
- * Delete a file.
+ * Delete file.
*/
delete(fileId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/v1/openai/v1/files/${fileId}`, options);
+ return this._client.delete(`/v1/files/${fileId}`, options);
}
/**
- * Returns the contents of the specified file.
+ * Retrieve file content. Returns the contents of the specified file.
*/
content(fileId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/openai/v1/files/${fileId}/content`, options);
+ return this._client.get(`/v1/files/${fileId}/content`, options);
}
}
@@ -155,6 +157,28 @@ export interface FileCreateParams {
* Valid purpose values for OpenAI Files API.
*/
purpose: 'assistants' | 'batch';
+
+ /**
+ * Control expiration of uploaded files. Params:
+ *
+ * - anchor, must be "created_at"
+ * - seconds, must be int between 3600 and 2592000 (1 hour to 30 days)
+ */
+ expires_after?: FileCreateParams.ExpiresAfter;
+}
+
+export namespace FileCreateParams {
+ /**
+ * Control expiration of uploaded files. Params:
+ *
+ * - anchor, must be "created_at"
+ * - seconds, must be int between 3600 and 2592000 (1 hour to 30 days)
+ */
+ export interface ExpiresAfter {
+ anchor: 'created_at';
+
+ seconds: number;
+ }
}
export interface FileListParams extends OpenAICursorPageParams {
diff --git a/src/resources/index.ts b/src/resources/index.ts
index 58ad928..b255e39 100644
--- a/src/resources/index.ts
+++ b/src/resources/index.ts
@@ -1,26 +1,8 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
export * from './shared';
-export {
- Agents,
- type InferenceStep,
- type MemoryRetrievalStep,
- type ShieldCallStep,
- type ToolExecutionStep,
- type ToolResponse,
- type AgentCreateResponse,
- type AgentRetrieveResponse,
- type AgentListResponse,
- type AgentCreateParams,
- type AgentListParams,
-} from './agents/agents';
-export {
- Benchmarks,
- type Benchmark,
- type ListBenchmarksResponse,
- type BenchmarkListResponse,
- type BenchmarkRegisterParams,
-} from './benchmarks';
+export { Alpha } from './alpha/alpha';
+export { Beta } from './beta/beta';
export { Chat, type ChatCompletionChunk } from './chat/chat';
export {
Completions,
@@ -30,28 +12,13 @@ export {
type CompletionCreateParamsStreaming,
} from './completions';
export {
- Datasets,
- type ListDatasetsResponse,
- type DatasetRetrieveResponse,
- type DatasetListResponse,
- type DatasetIterrowsResponse,
- type DatasetRegisterResponse,
- type DatasetAppendrowsParams,
- type DatasetIterrowsParams,
- type DatasetRegisterParams,
-} from './datasets';
+ Conversations,
+ type ConversationObject,
+ type ConversationDeleteResponse,
+ type ConversationCreateParams,
+ type ConversationUpdateParams,
+} from './conversations/conversations';
export { Embeddings, type CreateEmbeddingsResponse, type EmbeddingCreateParams } from './embeddings';
-export {
- Eval,
- type BenchmarkConfig,
- type EvalCandidate,
- type EvaluateResponse,
- type Job,
- type EvalEvaluateRowsParams,
- type EvalEvaluateRowsAlphaParams,
- type EvalRunEvalParams,
- type EvalRunEvalAlphaParams,
-} from './eval/eval';
export {
FilesOpenAICursorPage,
Files,
@@ -62,25 +29,6 @@ export {
type FileCreateParams,
type FileListParams,
} from './files';
-export {
- Inference,
- type ChatCompletionResponseStreamChunk,
- type CompletionResponse,
- type EmbeddingsResponse,
- type TokenLogProbs,
- type InferenceBatchChatCompletionResponse,
- type InferenceRerankResponse,
- type InferenceBatchChatCompletionParams,
- type InferenceBatchCompletionParams,
- type InferenceChatCompletionParams,
- type InferenceChatCompletionParamsNonStreaming,
- type InferenceChatCompletionParamsStreaming,
- type InferenceCompletionParams,
- type InferenceCompletionParamsNonStreaming,
- type InferenceCompletionParamsStreaming,
- type InferenceEmbeddingsParams,
- type InferenceRerankParams,
-} from './inference';
export { Inspect, type HealthInfo, type ProviderInfo, type RouteInfo, type VersionInfo } from './inspect';
export {
Models,
@@ -90,14 +38,6 @@ export {
type ModelRegisterParams,
} from './models/models';
export { Moderations, type CreateResponse, type ModerationCreateParams } from './moderations';
-export {
- PostTraining,
- type AlgorithmConfig,
- type ListPostTrainingJobsResponse,
- type PostTrainingJob,
- type PostTrainingPreferenceOptimizeParams,
- type PostTrainingSupervisedFineTuneParams,
-} from './post-training/post-training';
export { Providers, type ListProvidersResponse, type ProviderListResponse } from './providers';
export {
ResponseListResponsesOpenAICursorPage,
@@ -111,7 +51,7 @@ export {
type ResponseCreateParamsStreaming,
type ResponseListParams,
} from './responses/responses';
-export { Routes, type ListRoutesResponse, type RouteListResponse } from './routes';
+export { Routes, type ListRoutesResponse, type RouteListResponse, type RouteListParams } from './routes';
export { Safety, type RunShieldResponse, type SafetyRunShieldParams } from './safety';
export {
Scoring,
@@ -140,25 +80,6 @@ export {
type SyntheticDataGenerationResponse,
type SyntheticDataGenerationGenerateParams,
} from './synthetic-data-generation';
-export {
- Telemetry,
- type Event,
- type QueryCondition,
- type QuerySpansResponse,
- type SpanWithStatus,
- type Trace,
- type TelemetryGetSpanResponse,
- type TelemetryGetSpanTreeResponse,
- type TelemetryQueryMetricsResponse,
- type TelemetryQuerySpansResponse,
- type TelemetryQueryTracesResponse,
- type TelemetryGetSpanTreeParams,
- type TelemetryLogEventParams,
- type TelemetryQueryMetricsParams,
- type TelemetryQuerySpansParams,
- type TelemetryQueryTracesParams,
- type TelemetrySaveSpansToDatasetParams,
-} from './telemetry';
export {
ToolRuntime,
type ToolDef,
@@ -174,21 +95,7 @@ export {
type ToolgroupListResponse,
type ToolgroupRegisterParams,
} from './toolgroups';
-export {
- Tools,
- type ListToolsResponse,
- type Tool,
- type ToolListResponse,
- type ToolListParams,
-} from './tools';
-export {
- VectorDBs,
- type ListVectorDBsResponse,
- type VectorDBRetrieveResponse,
- type VectorDBListResponse,
- type VectorDBRegisterResponse,
- type VectorDBRegisterParams,
-} from './vector-dbs';
+export { Tools, type ToolListResponse, type ToolListParams } from './tools';
export {
VectorIo,
type QueryChunksResponse,
diff --git a/src/resources/inference.ts b/src/resources/inference.ts
deleted file mode 100644
index a6f3e1e..0000000
--- a/src/resources/inference.ts
+++ /dev/null
@@ -1,762 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../resource';
-import { APIPromise } from '../core';
-import * as Core from '../core';
-import * as InferenceAPI from './inference';
-import * as Shared from './shared';
-import { Stream } from '../streaming';
-
-export class Inference extends APIResource {
- /**
- * Generate chat completions for a batch of messages using the specified model.
- */
- batchChatCompletion(
- body: InferenceBatchChatCompletionParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post('/v1/inference/batch-chat-completion', { body, ...options });
- }
-
- /**
- * Generate completions for a batch of content using the specified model.
- */
- batchCompletion(
- body: InferenceBatchCompletionParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post('/v1/inference/batch-completion', { body, ...options });
- }
-
- /**
- * Generate a chat completion for the given messages using the specified model.
- *
- * @deprecated /v1/inference/chat-completion is deprecated. Please use /v1/openai/v1/chat/completions.
- */
- chatCompletion(
- body: InferenceChatCompletionParamsNonStreaming,
- options?: Core.RequestOptions,
- ): APIPromise;
- chatCompletion(
- body: InferenceChatCompletionParamsStreaming,
- options?: Core.RequestOptions,
- ): APIPromise>;
- chatCompletion(
- body: InferenceChatCompletionParamsBase,
- options?: Core.RequestOptions,
- ): APIPromise | Shared.ChatCompletionResponse>;
- chatCompletion(
- body: InferenceChatCompletionParams,
- options?: Core.RequestOptions,
- ): APIPromise | APIPromise> {
- return this._client.post('/v1/inference/chat-completion', {
- body,
- ...options,
- stream: body.stream ?? false,
- }) as APIPromise | APIPromise>;
- }
-
- /**
- * Generate a completion for the given content using the specified model.
- *
- * @deprecated /v1/inference/completion is deprecated. Please use /v1/openai/v1/completions.
- */
- completion(
- body: InferenceCompletionParamsNonStreaming,
- options?: Core.RequestOptions,
- ): APIPromise;
- completion(
- body: InferenceCompletionParamsStreaming,
- options?: Core.RequestOptions,
- ): APIPromise>;
- completion(
- body: InferenceCompletionParamsBase,
- options?: Core.RequestOptions,
- ): APIPromise | CompletionResponse>;
- completion(
- body: InferenceCompletionParams,
- options?: Core.RequestOptions,
- ): APIPromise | APIPromise> {
- return this._client.post('/v1/inference/completion', {
- body,
- ...options,
- stream: body.stream ?? false,
- }) as APIPromise | APIPromise>;
- }
-
- /**
- * Generate embeddings for content pieces using the specified model.
- *
- * @deprecated /v1/inference/embeddings is deprecated. Please use /v1/openai/v1/embeddings.
- */
- embeddings(
- body: InferenceEmbeddingsParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post('/v1/inference/embeddings', { body, ...options });
- }
-
- /**
- * Rerank a list of documents based on their relevance to a query.
- */
- rerank(
- body: InferenceRerankParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return (
- this._client.post('/v1/inference/rerank', { body, ...options }) as Core.APIPromise<{
- data: InferenceRerankResponse;
- }>
- )._thenUnwrap((obj) => obj.data);
- }
-}
-
-/**
- * A chunk of a streamed chat completion response.
- */
-export interface ChatCompletionResponseStreamChunk {
- /**
- * The event containing the new content
- */
- event: ChatCompletionResponseStreamChunk.Event;
-
- /**
- * (Optional) List of metrics associated with the API response
- */
- metrics?: Array;
-}
-
-export namespace ChatCompletionResponseStreamChunk {
- /**
- * The event containing the new content
- */
- export interface Event {
- /**
- * Content generated since last event. This can be one or more tokens, or a tool
- * call.
- */
- delta: Shared.ContentDelta;
-
- /**
- * Type of the event
- */
- event_type: 'start' | 'complete' | 'progress';
-
- /**
- * Optional log probabilities for generated tokens
- */
- logprobs?: Array;
-
- /**
- * Optional reason why generation stopped, if complete
- */
- stop_reason?: 'end_of_turn' | 'end_of_message' | 'out_of_tokens';
- }
-}
-
-/**
- * Response from a completion request.
- */
-export interface CompletionResponse {
- /**
- * The generated completion text
- */
- content: string;
-
- /**
- * Reason why generation stopped
- */
- stop_reason: 'end_of_turn' | 'end_of_message' | 'out_of_tokens';
-
- /**
- * Optional log probabilities for generated tokens
- */
- logprobs?: Array;
-
- /**
- * (Optional) List of metrics associated with the API response
- */
- metrics?: Array;
-}
-
-/**
- * Response containing generated embeddings.
- */
-export interface EmbeddingsResponse {
- /**
- * List of embedding vectors, one per input content. Each embedding is a list of
- * floats. The dimensionality of the embedding is model-specific; you can check
- * model metadata using /models/{model_id}
- */
- embeddings: Array>;
-}
-
-/**
- * Log probabilities for generated tokens.
- */
-export interface TokenLogProbs {
- /**
- * Dictionary mapping tokens to their log probabilities
- */
- logprobs_by_token: { [key: string]: number };
-}
-
-/**
- * Response from a batch chat completion request.
- */
-export interface InferenceBatchChatCompletionResponse {
- /**
- * List of chat completion responses, one for each conversation in the batch
- */
- batch: Array;
-}
-
-/**
- * List of rerank result objects, sorted by relevance score (descending)
- */
-export type InferenceRerankResponse = Array;
-
-export namespace InferenceRerankResponse {
- /**
- * A single rerank result from a reranking response.
- */
- export interface InferenceRerankResponseItem {
- /**
- * The original index of the document in the input list
- */
- index: number;
-
- /**
- * The relevance score from the model output. Values are inverted when applicable
- * so that higher scores indicate greater relevance.
- */
- relevance_score: number;
- }
-}
-
-export interface InferenceBatchChatCompletionParams {
- /**
- * The messages to generate completions for.
- */
- messages_batch: Array>;
-
- /**
- * The identifier of the model to use. The model must be registered with Llama
- * Stack and available via the /models endpoint.
- */
- model_id: string;
-
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- logprobs?: InferenceBatchChatCompletionParams.Logprobs;
-
- /**
- * (Optional) Grammar specification for guided (structured) decoding.
- */
- response_format?: Shared.ResponseFormat;
-
- /**
- * (Optional) Parameters to control the sampling strategy.
- */
- sampling_params?: Shared.SamplingParams;
-
- /**
- * (Optional) Configuration for tool use.
- */
- tool_config?: InferenceBatchChatCompletionParams.ToolConfig;
-
- /**
- * (Optional) List of tool definitions available to the model.
- */
- tools?: Array;
-}
-
-export namespace InferenceBatchChatCompletionParams {
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- export interface Logprobs {
- /**
- * How many tokens (for each position) to return log probabilities for.
- */
- top_k?: number;
- }
-
- /**
- * (Optional) Configuration for tool use.
- */
- export interface ToolConfig {
- /**
- * (Optional) Config for how to override the default system prompt. -
- * `SystemMessageBehavior.append`: Appends the provided system message to the
- * default system prompt. - `SystemMessageBehavior.replace`: Replaces the default
- * system prompt with the provided system message. The system message can include
- * the string '{{function_definitions}}' to indicate where the function definitions
- * should be inserted.
- */
- system_message_behavior?: 'append' | 'replace';
-
- /**
- * (Optional) Whether tool use is automatic, required, or none. Can also specify a
- * tool name to use a specific tool. Defaults to ToolChoice.auto.
- */
- tool_choice?: 'auto' | 'required' | 'none' | (string & {});
-
- /**
- * (Optional) Instructs the model how to format tool calls. By default, Llama Stack
- * will attempt to use a format that is best adapted to the model. -
- * `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. -
- * `ToolPromptFormat.function_tag`: The tool calls are enclosed in a
- * tag. - `ToolPromptFormat.python_list`: The tool calls
- * are output as Python syntax -- a list of function calls.
- */
- tool_prompt_format?: 'json' | 'function_tag' | 'python_list';
- }
-
- export interface Tool {
- tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {});
-
- description?: string;
-
- parameters?: { [key: string]: Shared.ToolParamDefinition };
- }
-}
-
-export interface InferenceBatchCompletionParams {
- /**
- * The content to generate completions for.
- */
- content_batch: Array;
-
- /**
- * The identifier of the model to use. The model must be registered with Llama
- * Stack and available via the /models endpoint.
- */
- model_id: string;
-
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- logprobs?: InferenceBatchCompletionParams.Logprobs;
-
- /**
- * (Optional) Grammar specification for guided (structured) decoding.
- */
- response_format?: Shared.ResponseFormat;
-
- /**
- * (Optional) Parameters to control the sampling strategy.
- */
- sampling_params?: Shared.SamplingParams;
-}
-
-export namespace InferenceBatchCompletionParams {
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- export interface Logprobs {
- /**
- * How many tokens (for each position) to return log probabilities for.
- */
- top_k?: number;
- }
-}
-
-export type InferenceChatCompletionParams =
- | InferenceChatCompletionParamsNonStreaming
- | InferenceChatCompletionParamsStreaming;
-
-export interface InferenceChatCompletionParamsBase {
- /**
- * List of messages in the conversation.
- */
- messages: Array;
-
- /**
- * The identifier of the model to use. The model must be registered with Llama
- * Stack and available via the /models endpoint.
- */
- model_id: string;
-
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- logprobs?: InferenceChatCompletionParams.Logprobs;
-
- /**
- * (Optional) Grammar specification for guided (structured) decoding. There are two
- * options: - `ResponseFormat.json_schema`: The grammar is a JSON schema. Most
- * providers support this format. - `ResponseFormat.grammar`: The grammar is a BNF
- * grammar. This format is more flexible, but not all providers support it.
- */
- response_format?: Shared.ResponseFormat;
-
- /**
- * Parameters to control the sampling strategy.
- */
- sampling_params?: Shared.SamplingParams;
-
- /**
- * (Optional) If True, generate an SSE event stream of the response. Defaults to
- * False.
- */
- stream?: boolean;
-
- /**
- * (Optional) Whether tool use is required or automatic. Defaults to
- * ToolChoice.auto. .. deprecated:: Use tool_config instead.
- */
- tool_choice?: 'auto' | 'required' | 'none';
-
- /**
- * (Optional) Configuration for tool use.
- */
- tool_config?: InferenceChatCompletionParams.ToolConfig;
-
- /**
- * (Optional) Instructs the model how to format tool calls. By default, Llama Stack
- * will attempt to use a format that is best adapted to the model. -
- * `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. -
- * `ToolPromptFormat.function_tag`: The tool calls are enclosed in a
- * tag. - `ToolPromptFormat.python_list`: The tool calls
- * are output as Python syntax -- a list of function calls. .. deprecated:: Use
- * tool_config instead.
- */
- tool_prompt_format?: 'json' | 'function_tag' | 'python_list';
-
- /**
- * (Optional) List of tool definitions available to the model.
- */
- tools?: Array;
-}
-
-export namespace InferenceChatCompletionParams {
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- export interface Logprobs {
- /**
- * How many tokens (for each position) to return log probabilities for.
- */
- top_k?: number;
- }
-
- /**
- * (Optional) Configuration for tool use.
- */
- export interface ToolConfig {
- /**
- * (Optional) Config for how to override the default system prompt. -
- * `SystemMessageBehavior.append`: Appends the provided system message to the
- * default system prompt. - `SystemMessageBehavior.replace`: Replaces the default
- * system prompt with the provided system message. The system message can include
- * the string '{{function_definitions}}' to indicate where the function definitions
- * should be inserted.
- */
- system_message_behavior?: 'append' | 'replace';
-
- /**
- * (Optional) Whether tool use is automatic, required, or none. Can also specify a
- * tool name to use a specific tool. Defaults to ToolChoice.auto.
- */
- tool_choice?: 'auto' | 'required' | 'none' | (string & {});
-
- /**
- * (Optional) Instructs the model how to format tool calls. By default, Llama Stack
- * will attempt to use a format that is best adapted to the model. -
- * `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. -
- * `ToolPromptFormat.function_tag`: The tool calls are enclosed in a
- * tag. - `ToolPromptFormat.python_list`: The tool calls
- * are output as Python syntax -- a list of function calls.
- */
- tool_prompt_format?: 'json' | 'function_tag' | 'python_list';
- }
-
- export interface Tool {
- tool_name: 'brave_search' | 'wolfram_alpha' | 'photogen' | 'code_interpreter' | (string & {});
-
- description?: string;
-
- parameters?: { [key: string]: Shared.ToolParamDefinition };
- }
-
- export type InferenceChatCompletionParamsNonStreaming =
- InferenceAPI.InferenceChatCompletionParamsNonStreaming;
- export type InferenceChatCompletionParamsStreaming = InferenceAPI.InferenceChatCompletionParamsStreaming;
-}
-
-export interface InferenceChatCompletionParamsNonStreaming extends InferenceChatCompletionParamsBase {
- /**
- * (Optional) If True, generate an SSE event stream of the response. Defaults to
- * False.
- */
- stream?: false;
-}
-
-export interface InferenceChatCompletionParamsStreaming extends InferenceChatCompletionParamsBase {
- /**
- * (Optional) If True, generate an SSE event stream of the response. Defaults to
- * False.
- */
- stream: true;
-}
-
-export type InferenceCompletionParams =
- | InferenceCompletionParamsNonStreaming
- | InferenceCompletionParamsStreaming;
-
-export interface InferenceCompletionParamsBase {
- /**
- * The content to generate a completion for.
- */
- content: Shared.InterleavedContent;
-
- /**
- * The identifier of the model to use. The model must be registered with Llama
- * Stack and available via the /models endpoint.
- */
- model_id: string;
-
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- logprobs?: InferenceCompletionParams.Logprobs;
-
- /**
- * (Optional) Grammar specification for guided (structured) decoding.
- */
- response_format?: Shared.ResponseFormat;
-
- /**
- * (Optional) Parameters to control the sampling strategy.
- */
- sampling_params?: Shared.SamplingParams;
-
- /**
- * (Optional) If True, generate an SSE event stream of the response. Defaults to
- * False.
- */
- stream?: boolean;
-}
-
-export namespace InferenceCompletionParams {
- /**
- * (Optional) If specified, log probabilities for each token position will be
- * returned.
- */
- export interface Logprobs {
- /**
- * How many tokens (for each position) to return log probabilities for.
- */
- top_k?: number;
- }
-
- export type InferenceCompletionParamsNonStreaming = InferenceAPI.InferenceCompletionParamsNonStreaming;
- export type InferenceCompletionParamsStreaming = InferenceAPI.InferenceCompletionParamsStreaming;
-}
-
-export interface InferenceCompletionParamsNonStreaming extends InferenceCompletionParamsBase {
- /**
- * (Optional) If True, generate an SSE event stream of the response. Defaults to
- * False.
- */
- stream?: false;
-}
-
-export interface InferenceCompletionParamsStreaming extends InferenceCompletionParamsBase {
- /**
- * (Optional) If True, generate an SSE event stream of the response. Defaults to
- * False.
- */
- stream: true;
-}
-
-export interface InferenceEmbeddingsParams {
- /**
- * List of contents to generate embeddings for. Each content can be a string or an
- * InterleavedContentItem (and hence can be multimodal). The behavior depends on
- * the model and provider. Some models may only support text.
- */
- contents: Array | Array;
-
- /**
- * The identifier of the model to use. The model must be an embedding model
- * registered with Llama Stack and available via the /models endpoint.
- */
- model_id: string;
-
- /**
- * (Optional) Output dimensionality for the embeddings. Only supported by
- * Matryoshka models.
- */
- output_dimension?: number;
-
- /**
- * (Optional) How is the embedding being used? This is only supported by asymmetric
- * embedding models.
- */
- task_type?: 'query' | 'document';
-
- /**
- * (Optional) Config for how to truncate text for embedding when text is longer
- * than the model's max sequence length.
- */
- text_truncation?: 'none' | 'start' | 'end';
-}
-
-export interface InferenceRerankParams {
- /**
- * List of items to rerank. Each item can be a string, text content part, or image
- * content part. Each input must not exceed the model's max input token length.
- */
- items: Array<
- | string
- | InferenceRerankParams.OpenAIChatCompletionContentPartTextParam
- | InferenceRerankParams.OpenAIChatCompletionContentPartImageParam
- >;
-
- /**
- * The identifier of the reranking model to use.
- */
- model: string;
-
- /**
- * The search query to rank items against. Can be a string, text content part, or
- * image content part. The input must not exceed the model's max input token
- * length.
- */
- query:
- | string
- | InferenceRerankParams.OpenAIChatCompletionContentPartTextParam
- | InferenceRerankParams.OpenAIChatCompletionContentPartImageParam;
-
- /**
- * (Optional) Maximum number of results to return. Default: returns all.
- */
- max_num_results?: number;
-}
-
-export namespace InferenceRerankParams {
- /**
- * Text content part for OpenAI-compatible chat completion messages.
- */
- export interface OpenAIChatCompletionContentPartTextParam {
- /**
- * The text content of the message
- */
- text: string;
-
- /**
- * Must be "text" to identify this as text content
- */
- type: 'text';
- }
-
- /**
- * Image content part for OpenAI-compatible chat completion messages.
- */
- export interface OpenAIChatCompletionContentPartImageParam {
- /**
- * Image URL specification and processing details
- */
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- /**
- * Must be "image_url" to identify this as image content
- */
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- /**
- * Image URL specification and processing details
- */
- export interface ImageURL {
- /**
- * URL of the image to include in the message
- */
- url: string;
-
- /**
- * (Optional) Level of detail for image processing. Can be "low", "high", or "auto"
- */
- detail?: string;
- }
- }
-
- /**
- * Text content part for OpenAI-compatible chat completion messages.
- */
- export interface OpenAIChatCompletionContentPartTextParam {
- /**
- * The text content of the message
- */
- text: string;
-
- /**
- * Must be "text" to identify this as text content
- */
- type: 'text';
- }
-
- /**
- * Image content part for OpenAI-compatible chat completion messages.
- */
- export interface OpenAIChatCompletionContentPartImageParam {
- /**
- * Image URL specification and processing details
- */
- image_url: OpenAIChatCompletionContentPartImageParam.ImageURL;
-
- /**
- * Must be "image_url" to identify this as image content
- */
- type: 'image_url';
- }
-
- export namespace OpenAIChatCompletionContentPartImageParam {
- /**
- * Image URL specification and processing details
- */
- export interface ImageURL {
- /**
- * URL of the image to include in the message
- */
- url: string;
-
- /**
- * (Optional) Level of detail for image processing. Can be "low", "high", or "auto"
- */
- detail?: string;
- }
- }
-}
-
-export declare namespace Inference {
- export {
- type ChatCompletionResponseStreamChunk as ChatCompletionResponseStreamChunk,
- type CompletionResponse as CompletionResponse,
- type EmbeddingsResponse as EmbeddingsResponse,
- type TokenLogProbs as TokenLogProbs,
- type InferenceBatchChatCompletionResponse as InferenceBatchChatCompletionResponse,
- type InferenceRerankResponse as InferenceRerankResponse,
- type InferenceBatchChatCompletionParams as InferenceBatchChatCompletionParams,
- type InferenceBatchCompletionParams as InferenceBatchCompletionParams,
- type InferenceChatCompletionParams as InferenceChatCompletionParams,
- type InferenceChatCompletionParamsNonStreaming as InferenceChatCompletionParamsNonStreaming,
- type InferenceChatCompletionParamsStreaming as InferenceChatCompletionParamsStreaming,
- type InferenceCompletionParams as InferenceCompletionParams,
- type InferenceCompletionParamsNonStreaming as InferenceCompletionParamsNonStreaming,
- type InferenceCompletionParamsStreaming as InferenceCompletionParamsStreaming,
- type InferenceEmbeddingsParams as InferenceEmbeddingsParams,
- type InferenceRerankParams as InferenceRerankParams,
- };
-}
diff --git a/src/resources/inspect.ts b/src/resources/inspect.ts
index 4e5d87c..0c10896 100644
--- a/src/resources/inspect.ts
+++ b/src/resources/inspect.ts
@@ -5,14 +5,14 @@ import * as Core from '../core';
export class Inspect extends APIResource {
/**
- * Get the current health status of the service.
+ * Get health status. Get the current health status of the service.
*/
health(options?: Core.RequestOptions): Core.APIPromise {
return this._client.get('/v1/health', options);
}
/**
- * Get the version of the service.
+ * Get version. Get the version of the service.
*/
version(options?: Core.RequestOptions): Core.APIPromise {
return this._client.get('/v1/version', options);
diff --git a/src/resources/models/index.ts b/src/resources/models/index.ts
index de6ecf3..e05a022 100644
--- a/src/resources/models/index.ts
+++ b/src/resources/models/index.ts
@@ -7,4 +7,4 @@ export {
type ModelListResponse,
type ModelRegisterParams,
} from './models';
-export { OpenAI, type OpenAIListResponse } from './openai';
+export { OpenAI } from './openai';
diff --git a/src/resources/models/models.ts b/src/resources/models/models.ts
index d72281f..b5d04cd 100644
--- a/src/resources/models/models.ts
+++ b/src/resources/models/models.ts
@@ -3,13 +3,13 @@
import { APIResource } from '../../resource';
import * as Core from '../../core';
import * as OpenAIAPI from './openai';
-import { OpenAI, OpenAIListResponse } from './openai';
+import { OpenAI } from './openai';
export class Models extends APIResource {
openai: OpenAIAPI.OpenAI = new OpenAIAPI.OpenAI(this._client);
/**
- * Get a model by its identifier.
+ * Get model. Get a model by its identifier.
*/
retrieve(modelId: string, options?: Core.RequestOptions): Core.APIPromise {
return this._client.get(`/v1/models/${modelId}`, options);
@@ -25,14 +25,14 @@ export class Models extends APIResource {
}
/**
- * Register a model.
+ * Register model. Register a model.
*/
register(body: ModelRegisterParams, options?: Core.RequestOptions): Core.APIPromise {
return this._client.post('/v1/models', { body, ...options });
}
/**
- * Unregister a model.
+ * Unregister model. Unregister a model.
*/
unregister(modelId: string, options?: Core.RequestOptions): Core.APIPromise {
return this._client.delete(`/v1/models/${modelId}`, {
@@ -63,7 +63,7 @@ export interface Model {
/**
* The type of model (LLM or embedding model)
*/
- model_type: 'llm' | 'embedding';
+ model_type: 'llm' | 'embedding' | 'rerank';
/**
* ID of the provider that owns this resource
@@ -97,7 +97,7 @@ export interface ModelRegisterParams {
/**
* The type of model to register.
*/
- model_type?: 'llm' | 'embedding';
+ model_type?: 'llm' | 'embedding' | 'rerank';
/**
* The identifier of the provider.
@@ -120,5 +120,5 @@ export declare namespace Models {
type ModelRegisterParams as ModelRegisterParams,
};
- export { OpenAI as OpenAI, type OpenAIListResponse as OpenAIListResponse };
+ export { OpenAI as OpenAI };
}
diff --git a/src/resources/models/openai.ts b/src/resources/models/openai.ts
index bcdef6f..c6b90d1 100644
--- a/src/resources/models/openai.ts
+++ b/src/resources/models/openai.ts
@@ -2,35 +2,15 @@
import { APIResource } from '../../resource';
import * as Core from '../../core';
+import * as ModelsAPI from './models';
export class OpenAI extends APIResource {
/**
- * List models using the OpenAI API.
+ * List all models.
*/
- list(options?: Core.RequestOptions): Core.APIPromise {
+ list(options?: Core.RequestOptions): Core.APIPromise {
return (
- this._client.get('/v1/openai/v1/models', options) as Core.APIPromise<{ data: OpenAIListResponse }>
+ this._client.get('/v1/models', options) as Core.APIPromise<{ data: ModelsAPI.ModelListResponse }>
)._thenUnwrap((obj) => obj.data);
}
}
-
-export type OpenAIListResponse = Array;
-
-export namespace OpenAIListResponse {
- /**
- * A model from OpenAI.
- */
- export interface OpenAIListResponseItem {
- id: string;
-
- created: number;
-
- object: 'model';
-
- owned_by: string;
- }
-}
-
-export declare namespace OpenAI {
- export { type OpenAIListResponse as OpenAIListResponse };
-}
diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts
index a945ab3..40bf49c 100644
--- a/src/resources/moderations.ts
+++ b/src/resources/moderations.ts
@@ -5,10 +5,11 @@ import * as Core from '../core';
export class Moderations extends APIResource {
/**
- * Classifies if text and/or image inputs are potentially harmful.
+ * Create moderation. Classifies if text and/or image inputs are potentially
+ * harmful.
*/
create(body: ModerationCreateParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post('/v1/openai/v1/moderations', { body, ...options });
+ return this._client.post('/v1/moderations', { body, ...options });
}
}
@@ -71,9 +72,9 @@ export interface ModerationCreateParams {
input: string | Array;
/**
- * The content moderation model you would like to use.
+ * (Optional) The content moderation model you would like to use.
*/
- model: string;
+ model?: string;
}
export declare namespace Moderations {
diff --git a/src/resources/post-training/index.ts b/src/resources/post-training/index.ts
deleted file mode 100644
index 6fc7e36..0000000
--- a/src/resources/post-training/index.ts
+++ /dev/null
@@ -1,19 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-export {
- Job,
- type JobListResponse,
- type JobArtifactsResponse,
- type JobStatusResponse,
- type JobArtifactsParams,
- type JobCancelParams,
- type JobStatusParams,
-} from './job';
-export {
- PostTraining,
- type AlgorithmConfig,
- type ListPostTrainingJobsResponse,
- type PostTrainingJob,
- type PostTrainingPreferenceOptimizeParams,
- type PostTrainingSupervisedFineTuneParams,
-} from './post-training';
diff --git a/src/resources/post-training/job.ts b/src/resources/post-training/job.ts
deleted file mode 100644
index a250ac9..0000000
--- a/src/resources/post-training/job.ts
+++ /dev/null
@@ -1,268 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../../resource';
-import * as Core from '../../core';
-import * as PostTrainingAPI from './post-training';
-
-export class Job extends APIResource {
- /**
- * Get all training jobs.
- */
- list(
- options?: Core.RequestOptions,
- ): Core.APIPromise> {
- return (
- this._client.get('/v1/post-training/jobs', options) as Core.APIPromise<{
- data: Array;
- }>
- )._thenUnwrap((obj) => obj.data);
- }
-
- /**
- * Get the artifacts of a training job.
- */
- artifacts(query: JobArtifactsParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get('/v1/post-training/job/artifacts', { query, ...options });
- }
-
- /**
- * Cancel a training job.
- */
- cancel(body: JobCancelParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post('/v1/post-training/job/cancel', {
- body,
- ...options,
- headers: { Accept: '*/*', ...options?.headers },
- });
- }
-
- /**
- * Get the status of a training job.
- */
- status(query: JobStatusParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get('/v1/post-training/job/status', { query, ...options });
- }
-}
-
-export type JobListResponse = Array;
-
-export namespace JobListResponse {
- export interface JobListResponseItem {
- job_uuid: string;
- }
-}
-
-/**
- * Artifacts of a finetuning job.
- */
-export interface JobArtifactsResponse {
- /**
- * List of model checkpoints created during training
- */
- checkpoints: Array;
-
- /**
- * Unique identifier for the training job
- */
- job_uuid: string;
-}
-
-export namespace JobArtifactsResponse {
- /**
- * Checkpoint created during training runs.
- */
- export interface Checkpoint {
- /**
- * Timestamp when the checkpoint was created
- */
- created_at: string;
-
- /**
- * Training epoch when the checkpoint was saved
- */
- epoch: number;
-
- /**
- * Unique identifier for the checkpoint
- */
- identifier: string;
-
- /**
- * File system path where the checkpoint is stored
- */
- path: string;
-
- /**
- * Identifier of the training job that created this checkpoint
- */
- post_training_job_id: string;
-
- /**
- * (Optional) Training metrics associated with this checkpoint
- */
- training_metrics?: Checkpoint.TrainingMetrics;
- }
-
- export namespace Checkpoint {
- /**
- * (Optional) Training metrics associated with this checkpoint
- */
- export interface TrainingMetrics {
- /**
- * Training epoch number
- */
- epoch: number;
-
- /**
- * Perplexity metric indicating model confidence
- */
- perplexity: number;
-
- /**
- * Loss value on the training dataset
- */
- train_loss: number;
-
- /**
- * Loss value on the validation dataset
- */
- validation_loss: number;
- }
- }
-}
-
-/**
- * Status of a finetuning job.
- */
-export interface JobStatusResponse {
- /**
- * List of model checkpoints created during training
- */
- checkpoints: Array;
-
- /**
- * Unique identifier for the training job
- */
- job_uuid: string;
-
- /**
- * Current status of the training job
- */
- status: 'completed' | 'in_progress' | 'failed' | 'scheduled' | 'cancelled';
-
- /**
- * (Optional) Timestamp when the job finished, if completed
- */
- completed_at?: string;
-
- /**
- * (Optional) Information about computational resources allocated to the job
- */
- resources_allocated?: { [key: string]: boolean | number | string | Array | unknown | null };
-
- /**
- * (Optional) Timestamp when the job was scheduled
- */
- scheduled_at?: string;
-
- /**
- * (Optional) Timestamp when the job execution began
- */
- started_at?: string;
-}
-
-export namespace JobStatusResponse {
- /**
- * Checkpoint created during training runs.
- */
- export interface Checkpoint {
- /**
- * Timestamp when the checkpoint was created
- */
- created_at: string;
-
- /**
- * Training epoch when the checkpoint was saved
- */
- epoch: number;
-
- /**
- * Unique identifier for the checkpoint
- */
- identifier: string;
-
- /**
- * File system path where the checkpoint is stored
- */
- path: string;
-
- /**
- * Identifier of the training job that created this checkpoint
- */
- post_training_job_id: string;
-
- /**
- * (Optional) Training metrics associated with this checkpoint
- */
- training_metrics?: Checkpoint.TrainingMetrics;
- }
-
- export namespace Checkpoint {
- /**
- * (Optional) Training metrics associated with this checkpoint
- */
- export interface TrainingMetrics {
- /**
- * Training epoch number
- */
- epoch: number;
-
- /**
- * Perplexity metric indicating model confidence
- */
- perplexity: number;
-
- /**
- * Loss value on the training dataset
- */
- train_loss: number;
-
- /**
- * Loss value on the validation dataset
- */
- validation_loss: number;
- }
- }
-}
-
-export interface JobArtifactsParams {
- /**
- * The UUID of the job to get the artifacts of.
- */
- job_uuid: string;
-}
-
-export interface JobCancelParams {
- /**
- * The UUID of the job to cancel.
- */
- job_uuid: string;
-}
-
-export interface JobStatusParams {
- /**
- * The UUID of the job to get the status of.
- */
- job_uuid: string;
-}
-
-export declare namespace Job {
- export {
- type JobListResponse as JobListResponse,
- type JobArtifactsResponse as JobArtifactsResponse,
- type JobStatusResponse as JobStatusResponse,
- type JobArtifactsParams as JobArtifactsParams,
- type JobCancelParams as JobCancelParams,
- type JobStatusParams as JobStatusParams,
- };
-}
diff --git a/src/resources/post-training/post-training.ts b/src/resources/post-training/post-training.ts
deleted file mode 100644
index 8f6eb3f..0000000
--- a/src/resources/post-training/post-training.ts
+++ /dev/null
@@ -1,510 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../../resource';
-import * as Core from '../../core';
-import * as JobAPI from './job';
-import {
- Job,
- JobArtifactsParams,
- JobArtifactsResponse,
- JobCancelParams,
- JobListResponse,
- JobStatusParams,
- JobStatusResponse,
-} from './job';
-
-export class PostTraining extends APIResource {
- job: JobAPI.Job = new JobAPI.Job(this._client);
-
- /**
- * Run preference optimization of a model.
- */
- preferenceOptimize(
- body: PostTrainingPreferenceOptimizeParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post('/v1/post-training/preference-optimize', { body, ...options });
- }
-
- /**
- * Run supervised fine-tuning of a model.
- */
- supervisedFineTune(
- body: PostTrainingSupervisedFineTuneParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post('/v1/post-training/supervised-fine-tune', { body, ...options });
- }
-}
-
-/**
- * Configuration for Low-Rank Adaptation (LoRA) fine-tuning.
- */
-export type AlgorithmConfig = AlgorithmConfig.LoraFinetuningConfig | AlgorithmConfig.QatFinetuningConfig;
-
-export namespace AlgorithmConfig {
- /**
- * Configuration for Low-Rank Adaptation (LoRA) fine-tuning.
- */
- export interface LoraFinetuningConfig {
- /**
- * LoRA scaling parameter that controls adaptation strength
- */
- alpha: number;
-
- /**
- * Whether to apply LoRA to MLP layers
- */
- apply_lora_to_mlp: boolean;
-
- /**
- * Whether to apply LoRA to output projection layers
- */
- apply_lora_to_output: boolean;
-
- /**
- * List of attention module names to apply LoRA to
- */
- lora_attn_modules: Array;
-
- /**
- * Rank of the LoRA adaptation (lower rank = fewer parameters)
- */
- rank: number;
-
- /**
- * Algorithm type identifier, always "LoRA"
- */
- type: 'LoRA';
-
- /**
- * (Optional) Whether to quantize the base model weights
- */
- quantize_base?: boolean;
-
- /**
- * (Optional) Whether to use DoRA (Weight-Decomposed Low-Rank Adaptation)
- */
- use_dora?: boolean;
- }
-
- /**
- * Configuration for Quantization-Aware Training (QAT) fine-tuning.
- */
- export interface QatFinetuningConfig {
- /**
- * Size of groups for grouped quantization
- */
- group_size: number;
-
- /**
- * Name of the quantization algorithm to use
- */
- quantizer_name: string;
-
- /**
- * Algorithm type identifier, always "QAT"
- */
- type: 'QAT';
- }
-}
-
-export interface ListPostTrainingJobsResponse {
- data: Array;
-}
-
-export namespace ListPostTrainingJobsResponse {
- export interface Data {
- job_uuid: string;
- }
-}
-
-export interface PostTrainingJob {
- job_uuid: string;
-}
-
-export interface PostTrainingPreferenceOptimizeParams {
- /**
- * The algorithm configuration.
- */
- algorithm_config: PostTrainingPreferenceOptimizeParams.AlgorithmConfig;
-
- /**
- * The model to fine-tune.
- */
- finetuned_model: string;
-
- /**
- * The hyperparam search configuration.
- */
- hyperparam_search_config: { [key: string]: boolean | number | string | Array | unknown | null };
-
- /**
- * The UUID of the job to create.
- */
- job_uuid: string;
-
- /**
- * The logger configuration.
- */
- logger_config: { [key: string]: boolean | number | string | Array | unknown | null };
-
- /**
- * The training configuration.
- */
- training_config: PostTrainingPreferenceOptimizeParams.TrainingConfig;
-}
-
-export namespace PostTrainingPreferenceOptimizeParams {
- /**
- * The algorithm configuration.
- */
- export interface AlgorithmConfig {
- /**
- * Temperature parameter for the DPO loss
- */
- beta: number;
-
- /**
- * The type of loss function to use for DPO
- */
- loss_type: 'sigmoid' | 'hinge' | 'ipo' | 'kto_pair';
- }
-
- /**
- * The training configuration.
- */
- export interface TrainingConfig {
- /**
- * Number of steps to accumulate gradients before updating
- */
- gradient_accumulation_steps: number;
-
- /**
- * Maximum number of steps to run per epoch
- */
- max_steps_per_epoch: number;
-
- /**
- * Number of training epochs to run
- */
- n_epochs: number;
-
- /**
- * (Optional) Configuration for data loading and formatting
- */
- data_config?: TrainingConfig.DataConfig;
-
- /**
- * (Optional) Data type for model parameters (bf16, fp16, fp32)
- */
- dtype?: string;
-
- /**
- * (Optional) Configuration for memory and compute optimizations
- */
- efficiency_config?: TrainingConfig.EfficiencyConfig;
-
- /**
- * (Optional) Maximum number of validation steps per epoch
- */
- max_validation_steps?: number;
-
- /**
- * (Optional) Configuration for the optimization algorithm
- */
- optimizer_config?: TrainingConfig.OptimizerConfig;
- }
-
- export namespace TrainingConfig {
- /**
- * (Optional) Configuration for data loading and formatting
- */
- export interface DataConfig {
- /**
- * Number of samples per training batch
- */
- batch_size: number;
-
- /**
- * Format of the dataset (instruct or dialog)
- */
- data_format: 'instruct' | 'dialog';
-
- /**
- * Unique identifier for the training dataset
- */
- dataset_id: string;
-
- /**
- * Whether to shuffle the dataset during training
- */
- shuffle: boolean;
-
- /**
- * (Optional) Whether to pack multiple samples into a single sequence for
- * efficiency
- */
- packed?: boolean;
-
- /**
- * (Optional) Whether to compute loss on input tokens as well as output tokens
- */
- train_on_input?: boolean;
-
- /**
- * (Optional) Unique identifier for the validation dataset
- */
- validation_dataset_id?: string;
- }
-
- /**
- * (Optional) Configuration for memory and compute optimizations
- */
- export interface EfficiencyConfig {
- /**
- * (Optional) Whether to use activation checkpointing to reduce memory usage
- */
- enable_activation_checkpointing?: boolean;
-
- /**
- * (Optional) Whether to offload activations to CPU to save GPU memory
- */
- enable_activation_offloading?: boolean;
-
- /**
- * (Optional) Whether to offload FSDP parameters to CPU
- */
- fsdp_cpu_offload?: boolean;
-
- /**
- * (Optional) Whether to use memory-efficient FSDP wrapping
- */
- memory_efficient_fsdp_wrap?: boolean;
- }
-
- /**
- * (Optional) Configuration for the optimization algorithm
- */
- export interface OptimizerConfig {
- /**
- * Learning rate for the optimizer
- */
- lr: number;
-
- /**
- * Number of steps for learning rate warmup
- */
- num_warmup_steps: number;
-
- /**
- * Type of optimizer to use (adam, adamw, or sgd)
- */
- optimizer_type: 'adam' | 'adamw' | 'sgd';
-
- /**
- * Weight decay coefficient for regularization
- */
- weight_decay: number;
- }
- }
-}
-
-export interface PostTrainingSupervisedFineTuneParams {
- /**
- * The hyperparam search configuration.
- */
- hyperparam_search_config: { [key: string]: boolean | number | string | Array | unknown | null };
-
- /**
- * The UUID of the job to create.
- */
- job_uuid: string;
-
- /**
- * The logger configuration.
- */
- logger_config: { [key: string]: boolean | number | string | Array | unknown | null };
-
- /**
- * The training configuration.
- */
- training_config: PostTrainingSupervisedFineTuneParams.TrainingConfig;
-
- /**
- * The algorithm configuration.
- */
- algorithm_config?: AlgorithmConfig;
-
- /**
- * The directory to save checkpoint(s) to.
- */
- checkpoint_dir?: string;
-
- /**
- * The model to fine-tune.
- */
- model?: string;
-}
-
-export namespace PostTrainingSupervisedFineTuneParams {
- /**
- * The training configuration.
- */
- export interface TrainingConfig {
- /**
- * Number of steps to accumulate gradients before updating
- */
- gradient_accumulation_steps: number;
-
- /**
- * Maximum number of steps to run per epoch
- */
- max_steps_per_epoch: number;
-
- /**
- * Number of training epochs to run
- */
- n_epochs: number;
-
- /**
- * (Optional) Configuration for data loading and formatting
- */
- data_config?: TrainingConfig.DataConfig;
-
- /**
- * (Optional) Data type for model parameters (bf16, fp16, fp32)
- */
- dtype?: string;
-
- /**
- * (Optional) Configuration for memory and compute optimizations
- */
- efficiency_config?: TrainingConfig.EfficiencyConfig;
-
- /**
- * (Optional) Maximum number of validation steps per epoch
- */
- max_validation_steps?: number;
-
- /**
- * (Optional) Configuration for the optimization algorithm
- */
- optimizer_config?: TrainingConfig.OptimizerConfig;
- }
-
- export namespace TrainingConfig {
- /**
- * (Optional) Configuration for data loading and formatting
- */
- export interface DataConfig {
- /**
- * Number of samples per training batch
- */
- batch_size: number;
-
- /**
- * Format of the dataset (instruct or dialog)
- */
- data_format: 'instruct' | 'dialog';
-
- /**
- * Unique identifier for the training dataset
- */
- dataset_id: string;
-
- /**
- * Whether to shuffle the dataset during training
- */
- shuffle: boolean;
-
- /**
- * (Optional) Whether to pack multiple samples into a single sequence for
- * efficiency
- */
- packed?: boolean;
-
- /**
- * (Optional) Whether to compute loss on input tokens as well as output tokens
- */
- train_on_input?: boolean;
-
- /**
- * (Optional) Unique identifier for the validation dataset
- */
- validation_dataset_id?: string;
- }
-
- /**
- * (Optional) Configuration for memory and compute optimizations
- */
- export interface EfficiencyConfig {
- /**
- * (Optional) Whether to use activation checkpointing to reduce memory usage
- */
- enable_activation_checkpointing?: boolean;
-
- /**
- * (Optional) Whether to offload activations to CPU to save GPU memory
- */
- enable_activation_offloading?: boolean;
-
- /**
- * (Optional) Whether to offload FSDP parameters to CPU
- */
- fsdp_cpu_offload?: boolean;
-
- /**
- * (Optional) Whether to use memory-efficient FSDP wrapping
- */
- memory_efficient_fsdp_wrap?: boolean;
- }
-
- /**
- * (Optional) Configuration for the optimization algorithm
- */
- export interface OptimizerConfig {
- /**
- * Learning rate for the optimizer
- */
- lr: number;
-
- /**
- * Number of steps for learning rate warmup
- */
- num_warmup_steps: number;
-
- /**
- * Type of optimizer to use (adam, adamw, or sgd)
- */
- optimizer_type: 'adam' | 'adamw' | 'sgd';
-
- /**
- * Weight decay coefficient for regularization
- */
- weight_decay: number;
- }
- }
-}
-
-PostTraining.Job = Job;
-
-export declare namespace PostTraining {
- export {
- type AlgorithmConfig as AlgorithmConfig,
- type ListPostTrainingJobsResponse as ListPostTrainingJobsResponse,
- type PostTrainingJob as PostTrainingJob,
- type PostTrainingPreferenceOptimizeParams as PostTrainingPreferenceOptimizeParams,
- type PostTrainingSupervisedFineTuneParams as PostTrainingSupervisedFineTuneParams,
- };
-
- export {
- Job as Job,
- type JobListResponse as JobListResponse,
- type JobArtifactsResponse as JobArtifactsResponse,
- type JobStatusResponse as JobStatusResponse,
- type JobArtifactsParams as JobArtifactsParams,
- type JobCancelParams as JobCancelParams,
- type JobStatusParams as JobStatusParams,
- };
-}
diff --git a/src/resources/providers.ts b/src/resources/providers.ts
index d27b9ab..2736f37 100644
--- a/src/resources/providers.ts
+++ b/src/resources/providers.ts
@@ -6,14 +6,14 @@ import * as InspectAPI from './inspect';
export class Providers extends APIResource {
/**
- * Get detailed information about a specific provider.
+ * Get provider. Get detailed information about a specific provider.
*/
retrieve(providerId: string, options?: Core.RequestOptions): Core.APIPromise {
return this._client.get(`/v1/providers/${providerId}`, options);
}
/**
- * List all available providers.
+ * List providers. List all available providers.
*/
list(options?: Core.RequestOptions): Core.APIPromise {
return (
diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts
index 74c556c..549533c 100644
--- a/src/resources/responses/input-items.ts
+++ b/src/resources/responses/input-items.ts
@@ -6,7 +6,7 @@ import * as Core from '../../core';
export class InputItems extends APIResource {
/**
- * List input items for a given OpenAI response.
+ * List input items.
*/
list(
responseId: string,
@@ -22,7 +22,7 @@ export class InputItems extends APIResource {
if (isRequestOptions(query)) {
return this.list(responseId, {}, query);
}
- return this._client.get(`/v1/openai/v1/responses/${responseId}/input_items`, { query, ...options });
+ return this._client.get(`/v1/responses/${responseId}/input_items`, { query, ...options });
}
}
@@ -34,10 +34,15 @@ export interface InputItemListResponse {
* List of input items
*/
data: Array<
+ | InputItemListResponse.OpenAIResponseMessage
| InputItemListResponse.OpenAIResponseOutputMessageWebSearchToolCall
| InputItemListResponse.OpenAIResponseOutputMessageFileSearchToolCall
| InputItemListResponse.OpenAIResponseOutputMessageFunctionToolCall
+ | InputItemListResponse.OpenAIResponseOutputMessageMcpCall
+ | InputItemListResponse.OpenAIResponseOutputMessageMcpListTools
+ | InputItemListResponse.OpenAIResponseMcpApprovalRequest
| InputItemListResponse.OpenAIResponseInputFunctionToolCallOutput
+ | InputItemListResponse.OpenAIResponseMcpApprovalResponse
| InputItemListResponse.OpenAIResponseMessage
>;
@@ -48,6 +53,212 @@ export interface InputItemListResponse {
}
export namespace InputItemListResponse {
+ /**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+ export interface OpenAIResponseMessage {
+ content:
+ | string
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile
+ >
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
+
+ role: 'system' | 'developer' | 'user' | 'assistant';
+
+ type: 'message';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
+
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * (Optional) URL of the image content
+ */
+ image_url?: string;
+ }
+
+ /**
+ * File content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentFile {
+ /**
+ * The type of the input item. Always `input_file`.
+ */
+ type: 'input_file';
+
+ /**
+ * The data of the file to be sent to the model.
+ */
+ file_data?: string;
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * The URL of the file to be sent to the model.
+ */
+ file_url?: string;
+
+ /**
+ * The name of the file to be sent to the model.
+ */
+ filename?: string;
+ }
+
+ export interface OpenAIResponseOutputMessageContentOutputText {
+ annotations: Array<
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
+ >;
+
+ text: string;
+
+ type: 'output_text';
+ }
+
+ export namespace OpenAIResponseOutputMessageContentOutputText {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
+ }
+
/**
* Web search tool call output message for OpenAI responses.
*/
@@ -165,6 +376,108 @@ export namespace InputItemListResponse {
status?: string;
}
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
+ id: string;
+
+ /**
+ * JSON string containing the MCP call arguments
+ */
+ arguments: string;
+
+ /**
+ * Name of the MCP method being called
+ */
+ name: string;
+
+ /**
+ * Label identifying the MCP server handling the call
+ */
+ server_label: string;
+
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
+ type: 'mcp_call';
+
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
+ error?: string;
+
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
+ output?: string;
+ }
+
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
+ export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
+ id: string;
+
+ /**
+ * Label identifying the MCP server providing the tools
+ */
+ server_label: string;
+
+ /**
+ * List of available tools provided by the MCP server
+ */
+ tools: Array;
+
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
+ type: 'mcp_list_tools';
+ }
+
+ export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
+ export interface Tool {
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Name of the tool
+ */
+ name: string;
+
+ /**
+ * (Optional) Description of what the tool does
+ */
+ description?: string;
+ }
+ }
+
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
/**
* This represents the output of a function call that gets passed back to the
* model.
@@ -181,6 +494,21 @@ export namespace InputItemListResponse {
status?: string;
}
+ /**
+ * A response to an MCP approval request.
+ */
+ export interface OpenAIResponseMcpApprovalResponse {
+ approval_request_id: string;
+
+ approve: boolean;
+
+ type: 'mcp_approval_response';
+
+ id?: string;
+
+ reason?: string;
+ }
+
/**
* Corresponds to the various Message types in the Responses API. They are all
* under one type because the Responses API gives them all the same "type" value,
@@ -192,8 +520,12 @@ export namespace InputItemListResponse {
| Array<
| OpenAIResponseMessage.OpenAIResponseInputMessageContentText
| OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile
>
- | Array;
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
role: 'system' | 'developer' | 'user' | 'assistant';
@@ -234,18 +566,53 @@ export namespace InputItemListResponse {
*/
type: 'input_image';
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
/**
* (Optional) URL of the image content
*/
image_url?: string;
}
- export interface UnionMember2 {
+ /**
+ * File content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentFile {
+ /**
+ * The type of the input item. Always `input_file`.
+ */
+ type: 'input_file';
+
+ /**
+ * The data of the file to be sent to the model.
+ */
+ file_data?: string;
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * The URL of the file to be sent to the model.
+ */
+ file_url?: string;
+
+ /**
+ * The name of the file to be sent to the model.
+ */
+ filename?: string;
+ }
+
+ export interface OpenAIResponseOutputMessageContentOutputText {
annotations: Array<
- | UnionMember2.OpenAIResponseAnnotationFileCitation
- | UnionMember2.OpenAIResponseAnnotationCitation
- | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
- | UnionMember2.OpenAIResponseAnnotationFilePath
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
>;
text: string;
@@ -253,7 +620,7 @@ export namespace InputItemListResponse {
type: 'output_text';
}
- export namespace UnionMember2 {
+ export namespace OpenAIResponseOutputMessageContentOutputText {
/**
* File citation annotation for referencing specific files in response content.
*/
@@ -331,6 +698,21 @@ export namespace InputItemListResponse {
type: 'file_path';
}
}
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
}
}
diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts
index e329519..2b8a74c 100644
--- a/src/resources/responses/responses.ts
+++ b/src/resources/responses/responses.ts
@@ -14,7 +14,7 @@ export class Responses extends APIResource {
inputItems: InputItemsAPI.InputItems = new InputItemsAPI.InputItems(this._client);
/**
- * Create a new OpenAI response.
+ * Create a model response.
*/
create(body: ResponseCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise;
create(
@@ -29,22 +29,20 @@ export class Responses extends APIResource {
body: ResponseCreateParams,
options?: Core.RequestOptions,
): APIPromise | APIPromise> {
- return this._client.post('/v1/openai/v1/responses', {
- body,
- ...options,
- stream: body.stream ?? false,
- }) as APIPromise | APIPromise>;
+ return this._client.post('/v1/responses', { body, ...options, stream: body.stream ?? false }) as
+ | APIPromise
+ | APIPromise>;
}
/**
- * Retrieve an OpenAI response by its ID.
+ * Get a model response.
*/
retrieve(responseId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/v1/openai/v1/responses/${responseId}`, options);
+ return this._client.get(`/v1/responses/${responseId}`, options);
}
/**
- * List all OpenAI responses.
+ * List all responses.
*/
list(
query?: ResponseListParams,
@@ -60,17 +58,17 @@ export class Responses extends APIResource {
if (isRequestOptions(query)) {
return this.list({}, query);
}
- return this._client.getAPIList('/v1/openai/v1/responses', ResponseListResponsesOpenAICursorPage, {
+ return this._client.getAPIList('/v1/responses', ResponseListResponsesOpenAICursorPage, {
query,
...options,
});
}
/**
- * Delete an OpenAI response by its ID.
+ * Delete a response.
*/
delete(responseId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/v1/openai/v1/responses/${responseId}`, options);
+ return this._client.delete(`/v1/responses/${responseId}`, options);
}
}
@@ -110,6 +108,7 @@ export interface ResponseObject {
| ResponseObject.OpenAIResponseOutputMessageFunctionToolCall
| ResponseObject.OpenAIResponseOutputMessageMcpCall
| ResponseObject.OpenAIResponseOutputMessageMcpListTools
+ | ResponseObject.OpenAIResponseMcpApprovalRequest
>;
/**
@@ -132,16 +131,36 @@ export interface ResponseObject {
*/
error?: ResponseObject.Error;
+ /**
+ * (Optional) System message inserted into the model's context
+ */
+ instructions?: string;
+
/**
* (Optional) ID of the previous response in a conversation
*/
previous_response_id?: string;
+ /**
+ * (Optional) Reference to a prompt template and its variables.
+ */
+ prompt?: ResponseObject.Prompt;
+
/**
* (Optional) Sampling temperature used for generation
*/
temperature?: number;
+ /**
+ * (Optional) An array of tools the model may call while generating a response.
+ */
+ tools?: Array<
+ | ResponseObject.OpenAIResponseInputToolWebSearch
+ | ResponseObject.OpenAIResponseInputToolFileSearch
+ | ResponseObject.OpenAIResponseInputToolFunction
+ | ResponseObject.OpenAIResponseToolMcp
+ >;
+
/**
* (Optional) Nucleus sampling parameter used for generation
*/
@@ -153,9 +172,9 @@ export interface ResponseObject {
truncation?: string;
/**
- * (Optional) User identifier associated with the request
+ * (Optional) Token usage information for the response
*/
- user?: string;
+ usage?: ResponseObject.Usage;
}
export namespace ResponseObject {
@@ -170,8 +189,12 @@ export namespace ResponseObject {
| Array<
| OpenAIResponseMessage.OpenAIResponseInputMessageContentText
| OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile
>
- | Array;
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
role: 'system' | 'developer' | 'user' | 'assistant';
@@ -212,18 +235,53 @@ export namespace ResponseObject {
*/
type: 'input_image';
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
/**
* (Optional) URL of the image content
*/
image_url?: string;
}
- export interface UnionMember2 {
+ /**
+ * File content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentFile {
+ /**
+ * The type of the input item. Always `input_file`.
+ */
+ type: 'input_file';
+
+ /**
+ * The data of the file to be sent to the model.
+ */
+ file_data?: string;
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * The URL of the file to be sent to the model.
+ */
+ file_url?: string;
+
+ /**
+ * The name of the file to be sent to the model.
+ */
+ filename?: string;
+ }
+
+ export interface OpenAIResponseOutputMessageContentOutputText {
annotations: Array<
- | UnionMember2.OpenAIResponseAnnotationFileCitation
- | UnionMember2.OpenAIResponseAnnotationCitation
- | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
- | UnionMember2.OpenAIResponseAnnotationFilePath
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
>;
text: string;
@@ -231,7 +289,7 @@ export namespace ResponseObject {
type: 'output_text';
}
- export namespace UnionMember2 {
+ export namespace OpenAIResponseOutputMessageContentOutputText {
/**
* File citation annotation for referencing specific files in response content.
*/
@@ -309,6 +367,21 @@ export namespace ResponseObject {
type: 'file_path';
}
}
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
}
/**
@@ -515,6 +588,21 @@ export namespace ResponseObject {
}
}
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
/**
* Text formatting configuration for the response
*/
@@ -573,152 +661,502 @@ export namespace ResponseObject {
*/
message: string;
}
-}
-
-/**
- * Streaming event indicating a new response has been created.
- */
-export type ResponseObjectStream =
- | ResponseObjectStream.OpenAIResponseObjectStreamResponseCreated
- | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputItemAdded
- | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputItemDone
- | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputTextDelta
- | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputTextDone
- | ResponseObjectStream.OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta
- | ResponseObjectStream.OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone
- | ResponseObjectStream.OpenAIResponseObjectStreamResponseWebSearchCallInProgress
- | ResponseObjectStream.OpenAIResponseObjectStreamResponseWebSearchCallSearching
- | ResponseObjectStream.OpenAIResponseObjectStreamResponseWebSearchCallCompleted
- | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpListToolsInProgress
- | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpListToolsFailed
- | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpListToolsCompleted
- | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta
- | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallArgumentsDone
- | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallInProgress
- | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallFailed
- | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallCompleted
- | ResponseObjectStream.OpenAIResponseObjectStreamResponseContentPartAdded
- | ResponseObjectStream.OpenAIResponseObjectStreamResponseContentPartDone
- | ResponseObjectStream.OpenAIResponseObjectStreamResponseCompleted;
-export namespace ResponseObjectStream {
/**
- * Streaming event indicating a new response has been created.
+ * (Optional) Reference to a prompt template and its variables.
*/
- export interface OpenAIResponseObjectStreamResponseCreated {
+ export interface Prompt {
/**
- * The newly created response object
+ * Unique identifier of the prompt template
*/
- response: ResponsesAPI.ResponseObject;
+ id: string;
/**
- * Event type identifier, always "response.created"
+ * Dictionary of variable names to OpenAIResponseInputMessageContent structure for
+ * template substitution. The substitution values can either be strings, or other
+ * Response input types like images or files.
*/
- type: 'response.created';
+ variables?: {
+ [key: string]:
+ | Prompt.OpenAIResponseInputMessageContentText
+ | Prompt.OpenAIResponseInputMessageContentImage
+ | Prompt.OpenAIResponseInputMessageContentFile;
+ };
+
+ /**
+ * Version number of the prompt to use (defaults to latest if not specified)
+ */
+ version?: string;
}
- /**
- * Streaming event for when a new output item is added to the response.
- */
- export interface OpenAIResponseObjectStreamResponseOutputItemAdded {
+ export namespace Prompt {
/**
- * The output item that was added (message, tool call, etc.)
+ * Text content for input messages in OpenAI response format.
*/
- item:
- | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseMessage
- | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageWebSearchToolCall
- | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageFileSearchToolCall
- | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageFunctionToolCall
- | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageMcpCall
- | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageMcpListTools;
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
/**
- * Index position of this item in the output list
+ * Image content for input messages in OpenAI response format.
*/
- output_index: number;
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * (Optional) URL of the image content
+ */
+ image_url?: string;
+ }
/**
- * Unique identifier of the response containing this output
+ * File content for input messages in OpenAI response format.
*/
- response_id: string;
+ export interface OpenAIResponseInputMessageContentFile {
+ /**
+ * The type of the input item. Always `input_file`.
+ */
+ type: 'input_file';
+
+ /**
+ * The data of the file to be sent to the model.
+ */
+ file_data?: string;
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * The URL of the file to be sent to the model.
+ */
+ file_url?: string;
+
+ /**
+ * The name of the file to be sent to the model.
+ */
+ filename?: string;
+ }
+ }
+ /**
+ * Web search tool configuration for OpenAI response inputs.
+ */
+ export interface OpenAIResponseInputToolWebSearch {
/**
- * Sequential number for ordering streaming events
+ * Web search tool type variant to use
*/
- sequence_number: number;
+ type: 'web_search' | 'web_search_preview' | 'web_search_preview_2025_03_11';
/**
- * Event type identifier, always "response.output_item.added"
+ * (Optional) Size of search context, must be "low", "medium", or "high"
*/
- type: 'response.output_item.added';
+ search_context_size?: string;
}
- export namespace OpenAIResponseObjectStreamResponseOutputItemAdded {
+ /**
+ * File search tool configuration for OpenAI response inputs.
+ */
+ export interface OpenAIResponseInputToolFileSearch {
/**
- * Corresponds to the various Message types in the Responses API. They are all
- * under one type because the Responses API gives them all the same "type" value,
- * and there is no way to tell them apart in certain scenarios.
+ * Tool type identifier, always "file_search"
*/
- export interface OpenAIResponseMessage {
- content:
- | string
- | Array<
- | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
- | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
- >
- | Array;
+ type: 'file_search';
- role: 'system' | 'developer' | 'user' | 'assistant';
+ /**
+ * List of vector store identifiers to search within
+ */
+ vector_store_ids: Array;
- type: 'message';
+ /**
+ * (Optional) Additional filters to apply to the search
+ */
+ filters?: { [key: string]: boolean | number | string | Array | unknown | null };
- id?: string;
+ /**
+ * (Optional) Maximum number of search results to return (1-50)
+ */
+ max_num_results?: number;
- status?: string;
- }
+ /**
+ * (Optional) Options for ranking and scoring search results
+ */
+ ranking_options?: OpenAIResponseInputToolFileSearch.RankingOptions;
+ }
- export namespace OpenAIResponseMessage {
+ export namespace OpenAIResponseInputToolFileSearch {
+ /**
+ * (Optional) Options for ranking and scoring search results
+ */
+ export interface RankingOptions {
/**
- * Text content for input messages in OpenAI response format.
+ * (Optional) Name of the ranking algorithm to use
*/
- export interface OpenAIResponseInputMessageContentText {
- /**
- * The text content of the input message
- */
- text: string;
-
- /**
- * Content type identifier, always "input_text"
- */
- type: 'input_text';
- }
+ ranker?: string;
/**
- * Image content for input messages in OpenAI response format.
+ * (Optional) Minimum relevance score threshold for results
*/
- export interface OpenAIResponseInputMessageContentImage {
- /**
- * Level of detail for image processing, can be "low", "high", or "auto"
- */
- detail: 'low' | 'high' | 'auto';
+ score_threshold?: number;
+ }
+ }
- /**
- * Content type identifier, always "input_image"
- */
- type: 'input_image';
+ /**
+ * Function tool configuration for OpenAI response inputs.
+ */
+ export interface OpenAIResponseInputToolFunction {
+ /**
+ * Name of the function that can be called
+ */
+ name: string;
- /**
- * (Optional) URL of the image content
- */
+ /**
+ * Tool type identifier, always "function"
+ */
+ type: 'function';
+
+ /**
+ * (Optional) Description of what the function does
+ */
+ description?: string;
+
+ /**
+ * (Optional) JSON schema defining the function's parameters
+ */
+ parameters?: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * (Optional) Whether to enforce strict parameter validation
+ */
+ strict?: boolean;
+ }
+
+ /**
+ * Model Context Protocol (MCP) tool configuration for OpenAI response object.
+ */
+ export interface OpenAIResponseToolMcp {
+ /**
+ * Label to identify this MCP server
+ */
+ server_label: string;
+
+ /**
+ * Tool type identifier, always "mcp"
+ */
+ type: 'mcp';
+
+ /**
+ * (Optional) Restriction on which tools can be used from this server
+ */
+ allowed_tools?: Array | OpenAIResponseToolMcp.AllowedToolsFilter;
+ }
+
+ export namespace OpenAIResponseToolMcp {
+ /**
+ * Filter configuration for restricting which MCP tools can be used.
+ */
+ export interface AllowedToolsFilter {
+ /**
+ * (Optional) List of specific tool names that are allowed
+ */
+ tool_names?: Array;
+ }
+ }
+
+ /**
+ * (Optional) Token usage information for the response
+ */
+ export interface Usage {
+ /**
+ * Number of tokens in the input
+ */
+ input_tokens: number;
+
+ /**
+ * Number of tokens in the output
+ */
+ output_tokens: number;
+
+ /**
+ * Total tokens used (input + output)
+ */
+ total_tokens: number;
+
+ /**
+ * Detailed breakdown of input token usage
+ */
+ input_tokens_details?: Usage.InputTokensDetails;
+
+ /**
+ * Detailed breakdown of output token usage
+ */
+ output_tokens_details?: Usage.OutputTokensDetails;
+ }
+
+ export namespace Usage {
+ /**
+ * Detailed breakdown of input token usage
+ */
+ export interface InputTokensDetails {
+ /**
+ * Number of tokens retrieved from cache
+ */
+ cached_tokens?: number;
+ }
+
+ /**
+ * Detailed breakdown of output token usage
+ */
+ export interface OutputTokensDetails {
+ /**
+ * Number of tokens used for reasoning (o1/o3 models)
+ */
+ reasoning_tokens?: number;
+ }
+ }
+}
+
+/**
+ * Streaming event indicating a new response has been created.
+ */
+export type ResponseObjectStream =
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseCreated
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseInProgress
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputItemAdded
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputItemDone
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputTextDelta
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputTextDone
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseWebSearchCallInProgress
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseWebSearchCallSearching
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseWebSearchCallCompleted
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpListToolsInProgress
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpListToolsFailed
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpListToolsCompleted
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallArgumentsDone
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallInProgress
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallFailed
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseMcpCallCompleted
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseContentPartAdded
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseContentPartDone
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningTextDelta
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningTextDone
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningSummaryPartDone
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseReasoningSummaryTextDone
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseRefusalDelta
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseRefusalDone
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseFileSearchCallInProgress
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseFileSearchCallSearching
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseFileSearchCallCompleted
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseIncomplete
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseFailed
+ | ResponseObjectStream.OpenAIResponseObjectStreamResponseCompleted;
+
+export namespace ResponseObjectStream {
+ /**
+ * Streaming event indicating a new response has been created.
+ */
+ export interface OpenAIResponseObjectStreamResponseCreated {
+ /**
+ * The response object that was created
+ */
+ response: ResponsesAPI.ResponseObject;
+
+ /**
+ * Event type identifier, always "response.created"
+ */
+ type: 'response.created';
+ }
+
+ /**
+ * Streaming event indicating the response remains in progress.
+ */
+ export interface OpenAIResponseObjectStreamResponseInProgress {
+ /**
+ * Current response state while in progress
+ */
+ response: ResponsesAPI.ResponseObject;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.in_progress"
+ */
+ type: 'response.in_progress';
+ }
+
+ /**
+ * Streaming event for when a new output item is added to the response.
+ */
+ export interface OpenAIResponseObjectStreamResponseOutputItemAdded {
+ /**
+ * The output item that was added (message, tool call, etc.)
+ */
+ item:
+ | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseMessage
+ | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageWebSearchToolCall
+ | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageFileSearchToolCall
+ | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageFunctionToolCall
+ | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageMcpCall
+ | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseOutputMessageMcpListTools
+ | OpenAIResponseObjectStreamResponseOutputItemAdded.OpenAIResponseMcpApprovalRequest;
+
+ /**
+ * Index position of this item in the output list
+ */
+ output_index: number;
+
+ /**
+ * Unique identifier of the response containing this output
+ */
+ response_id: string;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.output_item.added"
+ */
+ type: 'response.output_item.added';
+ }
+
+ export namespace OpenAIResponseObjectStreamResponseOutputItemAdded {
+ /**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+ export interface OpenAIResponseMessage {
+ content:
+ | string
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile
+ >
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
+
+ role: 'system' | 'developer' | 'user' | 'assistant';
+
+ type: 'message';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
+
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * (Optional) URL of the image content
+ */
image_url?: string;
}
- export interface UnionMember2 {
+ /**
+ * File content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentFile {
+ /**
+ * The type of the input item. Always `input_file`.
+ */
+ type: 'input_file';
+
+ /**
+ * The data of the file to be sent to the model.
+ */
+ file_data?: string;
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * The URL of the file to be sent to the model.
+ */
+ file_url?: string;
+
+ /**
+ * The name of the file to be sent to the model.
+ */
+ filename?: string;
+ }
+
+ export interface OpenAIResponseOutputMessageContentOutputText {
annotations: Array<
- | UnionMember2.OpenAIResponseAnnotationFileCitation
- | UnionMember2.OpenAIResponseAnnotationCitation
- | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
- | UnionMember2.OpenAIResponseAnnotationFilePath
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
>;
text: string;
@@ -726,7 +1164,7 @@ export namespace ResponseObjectStream {
type: 'output_text';
}
- export namespace UnionMember2 {
+ export namespace OpenAIResponseOutputMessageContentOutputText {
/**
* File citation annotation for referencing specific files in response content.
*/
@@ -804,14 +1242,29 @@ export namespace ResponseObjectStream {
type: 'file_path';
}
}
- }
- /**
- * Web search tool call output message for OpenAI responses.
- */
- export interface OpenAIResponseOutputMessageWebSearchToolCall {
/**
- * Unique identifier for this tool call
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
+ }
+
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
*/
id: string;
@@ -1009,6 +1462,21 @@ export namespace ResponseObjectStream {
description?: string;
}
}
+
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
}
/**
@@ -1024,7 +1492,8 @@ export namespace ResponseObjectStream {
| OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageFileSearchToolCall
| OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageFunctionToolCall
| OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageMcpCall
- | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageMcpListTools;
+ | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseOutputMessageMcpListTools
+ | OpenAIResponseObjectStreamResponseOutputItemDone.OpenAIResponseMcpApprovalRequest;
/**
* Index position of this item in the output list
@@ -1059,8 +1528,12 @@ export namespace ResponseObjectStream {
| Array<
| OpenAIResponseMessage.OpenAIResponseInputMessageContentText
| OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile
>
- | Array;
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
role: 'system' | 'developer' | 'user' | 'assistant';
@@ -1101,18 +1574,53 @@ export namespace ResponseObjectStream {
*/
type: 'input_image';
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
/**
* (Optional) URL of the image content
*/
image_url?: string;
}
- export interface UnionMember2 {
+ /**
+ * File content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentFile {
+ /**
+ * The type of the input item. Always `input_file`.
+ */
+ type: 'input_file';
+
+ /**
+ * The data of the file to be sent to the model.
+ */
+ file_data?: string;
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * The URL of the file to be sent to the model.
+ */
+ file_url?: string;
+
+ /**
+ * The name of the file to be sent to the model.
+ */
+ filename?: string;
+ }
+
+ export interface OpenAIResponseOutputMessageContentOutputText {
annotations: Array<
- | UnionMember2.OpenAIResponseAnnotationFileCitation
- | UnionMember2.OpenAIResponseAnnotationCitation
- | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
- | UnionMember2.OpenAIResponseAnnotationFilePath
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
>;
text: string;
@@ -1120,7 +1628,7 @@ export namespace ResponseObjectStream {
type: 'output_text';
}
- export namespace UnionMember2 {
+ export namespace OpenAIResponseOutputMessageContentOutputText {
/**
* File citation annotation for referencing specific files in response content.
*/
@@ -1198,6 +1706,21 @@ export namespace ResponseObjectStream {
type: 'file_path';
}
}
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
}
/**
@@ -1403,6 +1926,21 @@ export namespace ResponseObjectStream {
description?: string;
}
}
+
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
}
/**
@@ -1696,17 +2234,28 @@ export namespace ResponseObjectStream {
* Streaming event for when a new content part is added to a response item.
*/
export interface OpenAIResponseObjectStreamResponseContentPartAdded {
+ /**
+ * Index position of the part within the content array
+ */
+ content_index: number;
+
/**
* Unique identifier of the output item containing this content part
*/
item_id: string;
+ /**
+ * Index position of the output item in the response
+ */
+ output_index: number;
+
/**
* The content part that was added
*/
part:
| OpenAIResponseObjectStreamResponseContentPartAdded.OpenAIResponseContentPartOutputText
- | OpenAIResponseObjectStreamResponseContentPartAdded.OpenAIResponseContentPartRefusal;
+ | OpenAIResponseObjectStreamResponseContentPartAdded.OpenAIResponseContentPartRefusal
+ | OpenAIResponseObjectStreamResponseContentPartAdded.OpenAIResponseContentPartReasoningText;
/**
* Unique identifier of the response containing this content
@@ -1725,34 +2274,172 @@ export namespace ResponseObjectStream {
}
export namespace OpenAIResponseObjectStreamResponseContentPartAdded {
+ /**
+ * Text content within a streamed response part.
+ */
export interface OpenAIResponseContentPartOutputText {
+ /**
+ * Structured annotations associated with the text
+ */
+ annotations: Array<
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationFilePath
+ >;
+
+ /**
+ * Text emitted for this content part
+ */
text: string;
+ /**
+ * Content part type identifier, always "output_text"
+ */
type: 'output_text';
+
+ /**
+ * (Optional) Token log probability details
+ */
+ logprobs?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
+ }
+
+ export namespace OpenAIResponseContentPartOutputText {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
}
+ /**
+ * Refusal content within a streamed response part.
+ */
export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
refusal: string;
+ /**
+ * Content part type identifier, always "refusal"
+ */
type: 'refusal';
}
+
+ /**
+ * Reasoning text emitted as part of a streamed response.
+ */
+ export interface OpenAIResponseContentPartReasoningText {
+ /**
+ * Reasoning text supplied by the model
+ */
+ text: string;
+
+ /**
+ * Content part type identifier, always "reasoning_text"
+ */
+ type: 'reasoning_text';
+ }
}
/**
* Streaming event for when a content part is completed.
*/
export interface OpenAIResponseObjectStreamResponseContentPartDone {
+ /**
+ * Index position of the part within the content array
+ */
+ content_index: number;
+
/**
* Unique identifier of the output item containing this content part
*/
item_id: string;
+ /**
+ * Index position of the output item in the response
+ */
+ output_index: number;
+
/**
* The completed content part
*/
part:
| OpenAIResponseObjectStreamResponseContentPartDone.OpenAIResponseContentPartOutputText
- | OpenAIResponseObjectStreamResponseContentPartDone.OpenAIResponseContentPartRefusal;
+ | OpenAIResponseObjectStreamResponseContentPartDone.OpenAIResponseContentPartRefusal
+ | OpenAIResponseObjectStreamResponseContentPartDone.OpenAIResponseContentPartReasoningText;
/**
* Unique identifier of the response containing this content
@@ -1771,427 +2458,851 @@ export namespace ResponseObjectStream {
}
export namespace OpenAIResponseObjectStreamResponseContentPartDone {
+ /**
+ * Text content within a streamed response part.
+ */
export interface OpenAIResponseContentPartOutputText {
+ /**
+ * Structured annotations associated with the text
+ */
+ annotations: Array<
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseContentPartOutputText.OpenAIResponseAnnotationFilePath
+ >;
+
+ /**
+ * Text emitted for this content part
+ */
text: string;
+ /**
+ * Content part type identifier, always "output_text"
+ */
type: 'output_text';
- }
-
- export interface OpenAIResponseContentPartRefusal {
- refusal: string;
- type: 'refusal';
+ /**
+ * (Optional) Token log probability details
+ */
+ logprobs?: Array<{ [key: string]: boolean | number | string | Array | unknown | null }>;
}
- }
-
- /**
- * Streaming event indicating a response has been completed.
- */
- export interface OpenAIResponseObjectStreamResponseCompleted {
- /**
- * The completed response object
- */
- response: ResponsesAPI.ResponseObject;
- /**
- * Event type identifier, always "response.completed"
- */
- type: 'response.completed';
- }
-}
+ export namespace OpenAIResponseContentPartOutputText {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
-/**
- * OpenAI response object extended with input context information.
- */
-export interface ResponseListResponse {
- /**
- * Unique identifier for this response
- */
- id: string;
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
- /**
- * Unix timestamp when the response was created
- */
- created_at: number;
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
- /**
- * List of input items that led to this response
- */
- input: Array<
- | ResponseListResponse.OpenAIResponseOutputMessageWebSearchToolCall
- | ResponseListResponse.OpenAIResponseOutputMessageFileSearchToolCall
- | ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall
- | ResponseListResponse.OpenAIResponseInputFunctionToolCallOutput
- | ResponseListResponse.OpenAIResponseMessage
- >;
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
- /**
- * Model identifier used for generation
- */
- model: string;
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
- /**
- * Object type identifier, always "response"
- */
- object: 'response';
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
- /**
- * List of generated output items (messages, tool calls, etc.)
- */
- output: Array<
- | ResponseListResponse.OpenAIResponseMessage
- | ResponseListResponse.OpenAIResponseOutputMessageWebSearchToolCall
- | ResponseListResponse.OpenAIResponseOutputMessageFileSearchToolCall
- | ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall
- | ResponseListResponse.OpenAIResponseOutputMessageMcpCall
- | ResponseListResponse.OpenAIResponseOutputMessageMcpListTools
- >;
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
- /**
- * Whether tool calls can be executed in parallel
- */
- parallel_tool_calls: boolean;
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
- /**
- * Current status of the response generation
- */
- status: string;
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
- /**
- * Text formatting configuration for the response
- */
- text: ResponseListResponse.Text;
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
- /**
- * (Optional) Error details if the response generation failed
- */
- error?: ResponseListResponse.Error;
+ end_index: number;
- /**
- * (Optional) ID of the previous response in a conversation
- */
- previous_response_id?: string;
+ file_id: string;
- /**
- * (Optional) Sampling temperature used for generation
- */
- temperature?: number;
+ filename: string;
- /**
- * (Optional) Nucleus sampling parameter used for generation
- */
- top_p?: number;
+ start_index: number;
- /**
- * (Optional) Truncation strategy applied to the response
- */
- truncation?: string;
+ type: 'container_file_citation';
+ }
- /**
- * (Optional) User identifier associated with the request
- */
- user?: string;
-}
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
+
+ /**
+ * Reasoning text emitted as part of a streamed response.
+ */
+ export interface OpenAIResponseContentPartReasoningText {
+ /**
+ * Reasoning text supplied by the model
+ */
+ text: string;
+
+ /**
+ * Content part type identifier, always "reasoning_text"
+ */
+ type: 'reasoning_text';
+ }
+ }
-export namespace ResponseListResponse {
/**
- * Web search tool call output message for OpenAI responses.
+ * Streaming event for incremental reasoning text updates.
*/
- export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ export interface OpenAIResponseObjectStreamResponseReasoningTextDelta {
/**
- * Unique identifier for this tool call
+ * Index position of the reasoning content part
*/
- id: string;
+ content_index: number;
/**
- * Current status of the web search operation
+ * Incremental reasoning text being added
*/
- status: string;
+ delta: string;
/**
- * Tool call type identifier, always "web_search_call"
+ * Unique identifier of the output item being updated
*/
- type: 'web_search_call';
+ item_id: string;
+
+ /**
+ * Index position of the item in the output list
+ */
+ output_index: number;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.reasoning_text.delta"
+ */
+ type: 'response.reasoning_text.delta';
}
/**
- * File search tool call output message for OpenAI responses.
+ * Streaming event for when reasoning text is completed.
*/
- export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ export interface OpenAIResponseObjectStreamResponseReasoningTextDone {
/**
- * Unique identifier for this tool call
+ * Index position of the reasoning content part
*/
- id: string;
+ content_index: number;
/**
- * List of search queries executed
+ * Unique identifier of the completed output item
*/
- queries: Array;
+ item_id: string;
/**
- * Current status of the file search operation
+ * Index position of the item in the output list
*/
- status: string;
+ output_index: number;
/**
- * Tool call type identifier, always "file_search_call"
+ * Sequential number for ordering streaming events
*/
- type: 'file_search_call';
+ sequence_number: number;
/**
- * (Optional) Search results returned by the file search operation
+ * Final complete reasoning text
*/
- results?: Array;
+ text: string;
+
+ /**
+ * Event type identifier, always "response.reasoning_text.done"
+ */
+ type: 'response.reasoning_text.done';
}
- export namespace OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Streaming event for when a new reasoning summary part is added.
+ */
+ export interface OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded {
/**
- * Search results returned by the file search operation.
+ * Unique identifier of the output item
*/
- export interface Result {
- /**
- * (Optional) Key-value attributes associated with the file
- */
- attributes: { [key: string]: boolean | number | string | Array | unknown | null };
+ item_id: string;
- /**
- * Unique identifier of the file containing the result
- */
- file_id: string;
+ /**
+ * Index position of the output item
+ */
+ output_index: number;
- /**
- * Name of the file containing the result
- */
- filename: string;
+ /**
+ * The summary part that was added
+ */
+ part: OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded.Part;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Index of the summary part within the reasoning summary
+ */
+ summary_index: number;
+
+ /**
+ * Event type identifier, always "response.reasoning_summary_part.added"
+ */
+ type: 'response.reasoning_summary_part.added';
+ }
+ export namespace OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded {
+ /**
+ * The summary part that was added
+ */
+ export interface Part {
/**
- * Relevance score for this search result (between 0 and 1)
+ * Summary text
*/
- score: number;
+ text: string;
/**
- * Text content of the search result
+ * Content part type identifier, always "summary_text"
*/
- text: string;
+ type: 'summary_text';
}
}
/**
- * Function tool call output message for OpenAI responses.
+ * Streaming event for when a reasoning summary part is completed.
*/
- export interface OpenAIResponseOutputMessageFunctionToolCall {
+ export interface OpenAIResponseObjectStreamResponseReasoningSummaryPartDone {
/**
- * JSON string containing the function arguments
+ * Unique identifier of the output item
*/
- arguments: string;
+ item_id: string;
/**
- * Unique identifier for the function call
+ * Index position of the output item
*/
- call_id: string;
+ output_index: number;
/**
- * Name of the function being called
+ * The completed summary part
*/
- name: string;
+ part: OpenAIResponseObjectStreamResponseReasoningSummaryPartDone.Part;
/**
- * Tool call type identifier, always "function_call"
+ * Sequential number for ordering streaming events
*/
- type: 'function_call';
+ sequence_number: number;
/**
- * (Optional) Additional identifier for the tool call
+ * Index of the summary part within the reasoning summary
*/
- id?: string;
+ summary_index: number;
/**
- * (Optional) Current status of the function call execution
+ * Event type identifier, always "response.reasoning_summary_part.done"
*/
- status?: string;
+ type: 'response.reasoning_summary_part.done';
+ }
+
+ export namespace OpenAIResponseObjectStreamResponseReasoningSummaryPartDone {
+ /**
+ * The completed summary part
+ */
+ export interface Part {
+ /**
+ * Summary text
+ */
+ text: string;
+
+ /**
+ * Content part type identifier, always "summary_text"
+ */
+ type: 'summary_text';
+ }
}
/**
- * This represents the output of a function call that gets passed back to the
- * model.
+ * Streaming event for incremental reasoning summary text updates.
*/
- export interface OpenAIResponseInputFunctionToolCallOutput {
- call_id: string;
+ export interface OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta {
+ /**
+ * Incremental summary text being added
+ */
+ delta: string;
- output: string;
+ /**
+ * Unique identifier of the output item
+ */
+ item_id: string;
- type: 'function_call_output';
+ /**
+ * Index position of the output item
+ */
+ output_index: number;
- id?: string;
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
- status?: string;
+ /**
+ * Index of the summary part within the reasoning summary
+ */
+ summary_index: number;
+
+ /**
+ * Event type identifier, always "response.reasoning_summary_text.delta"
+ */
+ type: 'response.reasoning_summary_text.delta';
}
/**
- * Corresponds to the various Message types in the Responses API. They are all
- * under one type because the Responses API gives them all the same "type" value,
- * and there is no way to tell them apart in certain scenarios.
+ * Streaming event for when reasoning summary text is completed.
*/
- export interface OpenAIResponseMessage {
- content:
- | string
- | Array<
- | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
- | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
- >
- | Array;
+ export interface OpenAIResponseObjectStreamResponseReasoningSummaryTextDone {
+ /**
+ * Unique identifier of the output item
+ */
+ item_id: string;
- role: 'system' | 'developer' | 'user' | 'assistant';
+ /**
+ * Index position of the output item
+ */
+ output_index: number;
- type: 'message';
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
- id?: string;
+ /**
+ * Index of the summary part within the reasoning summary
+ */
+ summary_index: number;
- status?: string;
+ /**
+ * Final complete summary text
+ */
+ text: string;
+
+ /**
+ * Event type identifier, always "response.reasoning_summary_text.done"
+ */
+ type: 'response.reasoning_summary_text.done';
}
- export namespace OpenAIResponseMessage {
+ /**
+ * Streaming event for incremental refusal text updates.
+ */
+ export interface OpenAIResponseObjectStreamResponseRefusalDelta {
/**
- * Text content for input messages in OpenAI response format.
+ * Index position of the content part
*/
- export interface OpenAIResponseInputMessageContentText {
- /**
- * The text content of the input message
- */
- text: string;
+ content_index: number;
- /**
- * Content type identifier, always "input_text"
- */
- type: 'input_text';
- }
+ /**
+ * Incremental refusal text being added
+ */
+ delta: string;
/**
- * Image content for input messages in OpenAI response format.
+ * Unique identifier of the output item
*/
- export interface OpenAIResponseInputMessageContentImage {
+ item_id: string;
+
+ /**
+ * Index position of the item in the output list
+ */
+ output_index: number;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.refusal.delta"
+ */
+ type: 'response.refusal.delta';
+ }
+
+ /**
+ * Streaming event for when refusal text is completed.
+ */
+ export interface OpenAIResponseObjectStreamResponseRefusalDone {
+ /**
+ * Index position of the content part
+ */
+ content_index: number;
+
+ /**
+ * Unique identifier of the output item
+ */
+ item_id: string;
+
+ /**
+ * Index position of the item in the output list
+ */
+ output_index: number;
+
+ /**
+ * Final complete refusal text
+ */
+ refusal: string;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.refusal.done"
+ */
+ type: 'response.refusal.done';
+ }
+
+ /**
+ * Streaming event for when an annotation is added to output text.
+ */
+ export interface OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded {
+ /**
+ * The annotation object being added
+ */
+ annotation:
+ | OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded.OpenAIResponseAnnotationCitation
+ | OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded.OpenAIResponseAnnotationFilePath;
+
+ /**
+ * Index of the annotation within the content part
+ */
+ annotation_index: number;
+
+ /**
+ * Index position of the content part within the output item
+ */
+ content_index: number;
+
+ /**
+ * Unique identifier of the item to which the annotation is being added
+ */
+ item_id: string;
+
+ /**
+ * Index position of the output item in the response's output array
+ */
+ output_index: number;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.output_text.annotation.added"
+ */
+ type: 'response.output_text.annotation.added';
+ }
+
+ export namespace OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
/**
- * Level of detail for image processing, can be "low", "high", or "auto"
+ * Unique identifier of the referenced file
*/
- detail: 'low' | 'high' | 'auto';
+ file_id: string;
/**
- * Content type identifier, always "input_image"
+ * Name of the referenced file
*/
- type: 'input_image';
+ filename: string;
/**
- * (Optional) URL of the image content
+ * Position index of the citation within the content
*/
- image_url?: string;
- }
-
- export interface UnionMember2 {
- annotations: Array<
- | UnionMember2.OpenAIResponseAnnotationFileCitation
- | UnionMember2.OpenAIResponseAnnotationCitation
- | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
- | UnionMember2.OpenAIResponseAnnotationFilePath
- >;
+ index: number;
- text: string;
-
- type: 'output_text';
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
}
- export namespace UnionMember2 {
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
/**
- * File citation annotation for referencing specific files in response content.
+ * End position of the citation span in the content
*/
- export interface OpenAIResponseAnnotationFileCitation {
- /**
- * Unique identifier of the referenced file
- */
- file_id: string;
+ end_index: number;
- /**
- * Name of the referenced file
- */
- filename: string;
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
- /**
- * Position index of the citation within the content
- */
- index: number;
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
- /**
- * Annotation type identifier, always "file_citation"
- */
- type: 'file_citation';
- }
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
/**
- * URL citation annotation for referencing external web resources.
+ * URL of the referenced web resource
*/
- export interface OpenAIResponseAnnotationCitation {
- /**
- * End position of the citation span in the content
- */
- end_index: number;
+ url: string;
+ }
- /**
- * Start position of the citation span in the content
- */
- start_index: number;
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
- /**
- * Title of the referenced web resource
- */
- title: string;
+ end_index: number;
- /**
- * Annotation type identifier, always "url_citation"
- */
- type: 'url_citation';
+ file_id: string;
- /**
- * URL of the referenced web resource
- */
- url: string;
- }
+ filename: string;
- export interface OpenAIResponseAnnotationContainerFileCitation {
- container_id: string;
+ start_index: number;
- end_index: number;
+ type: 'container_file_citation';
+ }
- file_id: string;
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
- filename: string;
+ index: number;
- start_index: number;
+ type: 'file_path';
+ }
+ }
- type: 'container_file_citation';
- }
+ /**
+ * Streaming event for file search calls in progress.
+ */
+ export interface OpenAIResponseObjectStreamResponseFileSearchCallInProgress {
+ /**
+ * Unique identifier of the file search call
+ */
+ item_id: string;
- export interface OpenAIResponseAnnotationFilePath {
- file_id: string;
+ /**
+ * Index position of the item in the output list
+ */
+ output_index: number;
- index: number;
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
- type: 'file_path';
- }
- }
+ /**
+ * Event type identifier, always "response.file_search_call.in_progress"
+ */
+ type: 'response.file_search_call.in_progress';
}
/**
- * Corresponds to the various Message types in the Responses API. They are all
- * under one type because the Responses API gives them all the same "type" value,
- * and there is no way to tell them apart in certain scenarios.
+ * Streaming event for file search currently searching.
*/
- export interface OpenAIResponseMessage {
- content:
+ export interface OpenAIResponseObjectStreamResponseFileSearchCallSearching {
+ /**
+ * Unique identifier of the file search call
+ */
+ item_id: string;
+
+ /**
+ * Index position of the item in the output list
+ */
+ output_index: number;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.file_search_call.searching"
+ */
+ type: 'response.file_search_call.searching';
+ }
+
+ /**
+ * Streaming event for completed file search calls.
+ */
+ export interface OpenAIResponseObjectStreamResponseFileSearchCallCompleted {
+ /**
+ * Unique identifier of the completed file search call
+ */
+ item_id: string;
+
+ /**
+ * Index position of the item in the output list
+ */
+ output_index: number;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.file_search_call.completed"
+ */
+ type: 'response.file_search_call.completed';
+ }
+
+ /**
+ * Streaming event emitted when a response ends in an incomplete state.
+ */
+ export interface OpenAIResponseObjectStreamResponseIncomplete {
+ /**
+ * Response object describing the incomplete state
+ */
+ response: ResponsesAPI.ResponseObject;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.incomplete"
+ */
+ type: 'response.incomplete';
+ }
+
+ /**
+ * Streaming event emitted when a response fails.
+ */
+ export interface OpenAIResponseObjectStreamResponseFailed {
+ /**
+ * Response object describing the failure
+ */
+ response: ResponsesAPI.ResponseObject;
+
+ /**
+ * Sequential number for ordering streaming events
+ */
+ sequence_number: number;
+
+ /**
+ * Event type identifier, always "response.failed"
+ */
+ type: 'response.failed';
+ }
+
+ /**
+ * Streaming event indicating a response has been completed.
+ */
+ export interface OpenAIResponseObjectStreamResponseCompleted {
+ /**
+ * Completed response object
+ */
+ response: ResponsesAPI.ResponseObject;
+
+ /**
+ * Event type identifier, always "response.completed"
+ */
+ type: 'response.completed';
+ }
+}
+
+/**
+ * OpenAI response object extended with input context information.
+ */
+export interface ResponseListResponse {
+ /**
+ * Unique identifier for this response
+ */
+ id: string;
+
+ /**
+ * Unix timestamp when the response was created
+ */
+ created_at: number;
+
+ /**
+ * List of input items that led to this response
+ */
+ input: Array<
+ | ResponseListResponse.OpenAIResponseMessage
+ | ResponseListResponse.OpenAIResponseOutputMessageWebSearchToolCall
+ | ResponseListResponse.OpenAIResponseOutputMessageFileSearchToolCall
+ | ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall
+ | ResponseListResponse.OpenAIResponseOutputMessageMcpCall
+ | ResponseListResponse.OpenAIResponseOutputMessageMcpListTools
+ | ResponseListResponse.OpenAIResponseMcpApprovalRequest
+ | ResponseListResponse.OpenAIResponseInputFunctionToolCallOutput
+ | ResponseListResponse.OpenAIResponseMcpApprovalResponse
+ | ResponseListResponse.OpenAIResponseMessage
+ >;
+
+ /**
+ * Model identifier used for generation
+ */
+ model: string;
+
+ /**
+ * Object type identifier, always "response"
+ */
+ object: 'response';
+
+ /**
+ * List of generated output items (messages, tool calls, etc.)
+ */
+ output: Array<
+ | ResponseListResponse.OpenAIResponseMessage
+ | ResponseListResponse.OpenAIResponseOutputMessageWebSearchToolCall
+ | ResponseListResponse.OpenAIResponseOutputMessageFileSearchToolCall
+ | ResponseListResponse.OpenAIResponseOutputMessageFunctionToolCall
+ | ResponseListResponse.OpenAIResponseOutputMessageMcpCall
+ | ResponseListResponse.OpenAIResponseOutputMessageMcpListTools
+ | ResponseListResponse.OpenAIResponseMcpApprovalRequest
+ >;
+
+ /**
+ * Whether tool calls can be executed in parallel
+ */
+ parallel_tool_calls: boolean;
+
+ /**
+ * Current status of the response generation
+ */
+ status: string;
+
+ /**
+ * Text formatting configuration for the response
+ */
+ text: ResponseListResponse.Text;
+
+ /**
+ * (Optional) Error details if the response generation failed
+ */
+ error?: ResponseListResponse.Error;
+
+ /**
+ * (Optional) System message inserted into the model's context
+ */
+ instructions?: string;
+
+ /**
+ * (Optional) ID of the previous response in a conversation
+ */
+ previous_response_id?: string;
+
+ /**
+ * (Optional) Reference to a prompt template and its variables.
+ */
+ prompt?: ResponseListResponse.Prompt;
+
+ /**
+ * (Optional) Sampling temperature used for generation
+ */
+ temperature?: number;
+
+ /**
+ * (Optional) An array of tools the model may call while generating a response.
+ */
+ tools?: Array<
+ | ResponseListResponse.OpenAIResponseInputToolWebSearch
+ | ResponseListResponse.OpenAIResponseInputToolFileSearch
+ | ResponseListResponse.OpenAIResponseInputToolFunction
+ | ResponseListResponse.OpenAIResponseToolMcp
+ >;
+
+ /**
+ * (Optional) Nucleus sampling parameter used for generation
+ */
+ top_p?: number;
+
+ /**
+ * (Optional) Truncation strategy applied to the response
+ */
+ truncation?: string;
+
+ /**
+ * (Optional) Token usage information for the response
+ */
+ usage?: ResponseListResponse.Usage;
+}
+
+export namespace ResponseListResponse {
+ /**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+ export interface OpenAIResponseMessage {
+ content:
| string
| Array<
| OpenAIResponseMessage.OpenAIResponseInputMessageContentText
| OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile
>
- | Array;
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
role: 'system' | 'developer' | 'user' | 'assistant';
@@ -2202,7 +3313,1153 @@ export namespace ResponseListResponse {
status?: string;
}
- export namespace OpenAIResponseMessage {
+ export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
+
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * (Optional) URL of the image content
+ */
+ image_url?: string;
+ }
+
+ /**
+ * File content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentFile {
+ /**
+ * The type of the input item. Always `input_file`.
+ */
+ type: 'input_file';
+
+ /**
+ * The data of the file to be sent to the model.
+ */
+ file_data?: string;
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * The URL of the file to be sent to the model.
+ */
+ file_url?: string;
+
+ /**
+ * The name of the file to be sent to the model.
+ */
+ filename?: string;
+ }
+
+ export interface OpenAIResponseOutputMessageContentOutputText {
+ annotations: Array<
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
+ >;
+
+ text: string;
+
+ type: 'output_text';
+ }
+
+ export namespace OpenAIResponseOutputMessageContentOutputText {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
+ }
+
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * Current status of the web search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
+ type: 'web_search_call';
+ }
+
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * List of search queries executed
+ */
+ queries: Array;
+
+ /**
+ * Current status of the file search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
+ type: 'file_search_call';
+
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array;
+ }
+
+ export namespace OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Search results returned by the file search operation.
+ */
+ export interface Result {
+ /**
+ * (Optional) Key-value attributes associated with the file
+ */
+ attributes: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Unique identifier of the file containing the result
+ */
+ file_id: string;
+
+ /**
+ * Name of the file containing the result
+ */
+ filename: string;
+
+ /**
+ * Relevance score for this search result (between 0 and 1)
+ */
+ score: number;
+
+ /**
+ * Text content of the search result
+ */
+ text: string;
+ }
+ }
+
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
+ arguments: string;
+
+ /**
+ * Unique identifier for the function call
+ */
+ call_id: string;
+
+ /**
+ * Name of the function being called
+ */
+ name: string;
+
+ /**
+ * Tool call type identifier, always "function_call"
+ */
+ type: 'function_call';
+
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
+ id?: string;
+
+ /**
+ * (Optional) Current status of the function call execution
+ */
+ status?: string;
+ }
+
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
+ id: string;
+
+ /**
+ * JSON string containing the MCP call arguments
+ */
+ arguments: string;
+
+ /**
+ * Name of the MCP method being called
+ */
+ name: string;
+
+ /**
+ * Label identifying the MCP server handling the call
+ */
+ server_label: string;
+
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
+ type: 'mcp_call';
+
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
+ error?: string;
+
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
+ output?: string;
+ }
+
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
+ export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
+ id: string;
+
+ /**
+ * Label identifying the MCP server providing the tools
+ */
+ server_label: string;
+
+ /**
+ * List of available tools provided by the MCP server
+ */
+ tools: Array;
+
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
+ type: 'mcp_list_tools';
+ }
+
+ export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
+ export interface Tool {
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Name of the tool
+ */
+ name: string;
+
+ /**
+ * (Optional) Description of what the tool does
+ */
+ description?: string;
+ }
+ }
+
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
+ /**
+ * This represents the output of a function call that gets passed back to the
+ * model.
+ */
+ export interface OpenAIResponseInputFunctionToolCallOutput {
+ call_id: string;
+
+ output: string;
+
+ type: 'function_call_output';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ /**
+ * A response to an MCP approval request.
+ */
+ export interface OpenAIResponseMcpApprovalResponse {
+ approval_request_id: string;
+
+ approve: boolean;
+
+ type: 'mcp_approval_response';
+
+ id?: string;
+
+ reason?: string;
+ }
+
+ /**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+ export interface OpenAIResponseMessage {
+ content:
+ | string
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile
+ >
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
+
+ role: 'system' | 'developer' | 'user' | 'assistant';
+
+ type: 'message';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
+
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * (Optional) URL of the image content
+ */
+ image_url?: string;
+ }
+
+ /**
+ * File content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentFile {
+ /**
+ * The type of the input item. Always `input_file`.
+ */
+ type: 'input_file';
+
+ /**
+ * The data of the file to be sent to the model.
+ */
+ file_data?: string;
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * The URL of the file to be sent to the model.
+ */
+ file_url?: string;
+
+ /**
+ * The name of the file to be sent to the model.
+ */
+ filename?: string;
+ }
+
+ export interface OpenAIResponseOutputMessageContentOutputText {
+ annotations: Array<
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
+ >;
+
+ text: string;
+
+ type: 'output_text';
+ }
+
+ export namespace OpenAIResponseOutputMessageContentOutputText {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
+ }
+
+ /**
+ * Corresponds to the various Message types in the Responses API. They are all
+ * under one type because the Responses API gives them all the same "type" value,
+ * and there is no way to tell them apart in certain scenarios.
+ */
+ export interface OpenAIResponseMessage {
+ content:
+ | string
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentText
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentImage
+ | OpenAIResponseMessage.OpenAIResponseInputMessageContentFile
+ >
+ | Array<
+ | OpenAIResponseMessage.OpenAIResponseOutputMessageContentOutputText
+ | OpenAIResponseMessage.OpenAIResponseContentPartRefusal
+ >;
+
+ role: 'system' | 'developer' | 'user' | 'assistant';
+
+ type: 'message';
+
+ id?: string;
+
+ status?: string;
+ }
+
+ export namespace OpenAIResponseMessage {
+ /**
+ * Text content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentText {
+ /**
+ * The text content of the input message
+ */
+ text: string;
+
+ /**
+ * Content type identifier, always "input_text"
+ */
+ type: 'input_text';
+ }
+
+ /**
+ * Image content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentImage {
+ /**
+ * Level of detail for image processing, can be "low", "high", or "auto"
+ */
+ detail: 'low' | 'high' | 'auto';
+
+ /**
+ * Content type identifier, always "input_image"
+ */
+ type: 'input_image';
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * (Optional) URL of the image content
+ */
+ image_url?: string;
+ }
+
+ /**
+ * File content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentFile {
+ /**
+ * The type of the input item. Always `input_file`.
+ */
+ type: 'input_file';
+
+ /**
+ * The data of the file to be sent to the model.
+ */
+ file_data?: string;
+
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
+ /**
+ * The URL of the file to be sent to the model.
+ */
+ file_url?: string;
+
+ /**
+ * The name of the file to be sent to the model.
+ */
+ filename?: string;
+ }
+
+ export interface OpenAIResponseOutputMessageContentOutputText {
+ annotations: Array<
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationContainerFileCitation
+ | OpenAIResponseOutputMessageContentOutputText.OpenAIResponseAnnotationFilePath
+ >;
+
+ text: string;
+
+ type: 'output_text';
+ }
+
+ export namespace OpenAIResponseOutputMessageContentOutputText {
+ /**
+ * File citation annotation for referencing specific files in response content.
+ */
+ export interface OpenAIResponseAnnotationFileCitation {
+ /**
+ * Unique identifier of the referenced file
+ */
+ file_id: string;
+
+ /**
+ * Name of the referenced file
+ */
+ filename: string;
+
+ /**
+ * Position index of the citation within the content
+ */
+ index: number;
+
+ /**
+ * Annotation type identifier, always "file_citation"
+ */
+ type: 'file_citation';
+ }
+
+ /**
+ * URL citation annotation for referencing external web resources.
+ */
+ export interface OpenAIResponseAnnotationCitation {
+ /**
+ * End position of the citation span in the content
+ */
+ end_index: number;
+
+ /**
+ * Start position of the citation span in the content
+ */
+ start_index: number;
+
+ /**
+ * Title of the referenced web resource
+ */
+ title: string;
+
+ /**
+ * Annotation type identifier, always "url_citation"
+ */
+ type: 'url_citation';
+
+ /**
+ * URL of the referenced web resource
+ */
+ url: string;
+ }
+
+ export interface OpenAIResponseAnnotationContainerFileCitation {
+ container_id: string;
+
+ end_index: number;
+
+ file_id: string;
+
+ filename: string;
+
+ start_index: number;
+
+ type: 'container_file_citation';
+ }
+
+ export interface OpenAIResponseAnnotationFilePath {
+ file_id: string;
+
+ index: number;
+
+ type: 'file_path';
+ }
+ }
+
+ /**
+ * Refusal content within a streamed response part.
+ */
+ export interface OpenAIResponseContentPartRefusal {
+ /**
+ * Refusal text supplied by the model
+ */
+ refusal: string;
+
+ /**
+ * Content part type identifier, always "refusal"
+ */
+ type: 'refusal';
+ }
+ }
+
+ /**
+ * Web search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageWebSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * Current status of the web search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "web_search_call"
+ */
+ type: 'web_search_call';
+ }
+
+ /**
+ * File search tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Unique identifier for this tool call
+ */
+ id: string;
+
+ /**
+ * List of search queries executed
+ */
+ queries: Array;
+
+ /**
+ * Current status of the file search operation
+ */
+ status: string;
+
+ /**
+ * Tool call type identifier, always "file_search_call"
+ */
+ type: 'file_search_call';
+
+ /**
+ * (Optional) Search results returned by the file search operation
+ */
+ results?: Array;
+ }
+
+ export namespace OpenAIResponseOutputMessageFileSearchToolCall {
+ /**
+ * Search results returned by the file search operation.
+ */
+ export interface Result {
+ /**
+ * (Optional) Key-value attributes associated with the file
+ */
+ attributes: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Unique identifier of the file containing the result
+ */
+ file_id: string;
+
+ /**
+ * Name of the file containing the result
+ */
+ filename: string;
+
+ /**
+ * Relevance score for this search result (between 0 and 1)
+ */
+ score: number;
+
+ /**
+ * Text content of the search result
+ */
+ text: string;
+ }
+ }
+
+ /**
+ * Function tool call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageFunctionToolCall {
+ /**
+ * JSON string containing the function arguments
+ */
+ arguments: string;
+
+ /**
+ * Unique identifier for the function call
+ */
+ call_id: string;
+
+ /**
+ * Name of the function being called
+ */
+ name: string;
+
+ /**
+ * Tool call type identifier, always "function_call"
+ */
+ type: 'function_call';
+
+ /**
+ * (Optional) Additional identifier for the tool call
+ */
+ id?: string;
+
+ /**
+ * (Optional) Current status of the function call execution
+ */
+ status?: string;
+ }
+
+ /**
+ * Model Context Protocol (MCP) call output message for OpenAI responses.
+ */
+ export interface OpenAIResponseOutputMessageMcpCall {
+ /**
+ * Unique identifier for this MCP call
+ */
+ id: string;
+
+ /**
+ * JSON string containing the MCP call arguments
+ */
+ arguments: string;
+
+ /**
+ * Name of the MCP method being called
+ */
+ name: string;
+
+ /**
+ * Label identifying the MCP server handling the call
+ */
+ server_label: string;
+
+ /**
+ * Tool call type identifier, always "mcp_call"
+ */
+ type: 'mcp_call';
+
+ /**
+ * (Optional) Error message if the MCP call failed
+ */
+ error?: string;
+
+ /**
+ * (Optional) Output result from the successful MCP call
+ */
+ output?: string;
+ }
+
+ /**
+ * MCP list tools output message containing available tools from an MCP server.
+ */
+ export interface OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Unique identifier for this MCP list tools operation
+ */
+ id: string;
+
+ /**
+ * Label identifying the MCP server providing the tools
+ */
+ server_label: string;
+
+ /**
+ * List of available tools provided by the MCP server
+ */
+ tools: Array;
+
+ /**
+ * Tool call type identifier, always "mcp_list_tools"
+ */
+ type: 'mcp_list_tools';
+ }
+
+ export namespace OpenAIResponseOutputMessageMcpListTools {
+ /**
+ * Tool definition returned by MCP list tools operation.
+ */
+ export interface Tool {
+ /**
+ * JSON schema defining the tool's input parameters
+ */
+ input_schema: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * Name of the tool
+ */
+ name: string;
+
+ /**
+ * (Optional) Description of what the tool does
+ */
+ description?: string;
+ }
+ }
+
+ /**
+ * A request for human approval of a tool invocation.
+ */
+ export interface OpenAIResponseMcpApprovalRequest {
+ id: string;
+
+ arguments: string;
+
+ name: string;
+
+ server_label: string;
+
+ type: 'mcp_approval_request';
+ }
+
+ /**
+ * Text formatting configuration for the response
+ */
+ export interface Text {
+ /**
+ * (Optional) Text format configuration specifying output format requirements
+ */
+ format?: Text.Format;
+ }
+
+ export namespace Text {
+ /**
+ * (Optional) Text format configuration specifying output format requirements
+ */
+ export interface Format {
+ /**
+ * Must be "text", "json_schema", or "json_object" to identify the format type
+ */
+ type: 'text' | 'json_schema' | 'json_object';
+
+ /**
+ * (Optional) A description of the response format. Only used for json_schema.
+ */
+ description?: string;
+
+ /**
+ * The name of the response format. Only used for json_schema.
+ */
+ name?: string;
+
+ /**
+ * The JSON schema the response should conform to. In a Python SDK, this is often a
+ * `pydantic` model. Only used for json_schema.
+ */
+ schema?: { [key: string]: boolean | number | string | Array | unknown | null };
+
+ /**
+ * (Optional) Whether to strictly enforce the JSON schema. If true, the response
+ * must match the schema exactly. Only used for json_schema.
+ */
+ strict?: boolean;
+ }
+ }
+
+ /**
+ * (Optional) Error details if the response generation failed
+ */
+ export interface Error {
+ /**
+ * Error code identifying the type of failure
+ */
+ code: string;
+
+ /**
+ * Human-readable error message describing the failure
+ */
+ message: string;
+ }
+
+ /**
+ * (Optional) Reference to a prompt template and its variables.
+ */
+ export interface Prompt {
+ /**
+ * Unique identifier of the prompt template
+ */
+ id: string;
+
+ /**
+ * Dictionary of variable names to OpenAIResponseInputMessageContent structure for
+ * template substitution. The substitution values can either be strings, or other
+ * Response input types like images or files.
+ */
+ variables?: {
+ [key: string]:
+ | Prompt.OpenAIResponseInputMessageContentText
+ | Prompt.OpenAIResponseInputMessageContentImage
+ | Prompt.OpenAIResponseInputMessageContentFile;
+ };
+
+ /**
+ * Version number of the prompt to use (defaults to latest if not specified)
+ */
+ version?: string;
+ }
+
+ export namespace Prompt {
/**
* Text content for input messages in OpenAI response format.
*/
@@ -2232,366 +4489,222 @@ export namespace ResponseListResponse {
*/
type: 'input_image';
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
+
/**
* (Optional) URL of the image content
*/
image_url?: string;
}
- export interface UnionMember2 {
- annotations: Array<
- | UnionMember2.OpenAIResponseAnnotationFileCitation
- | UnionMember2.OpenAIResponseAnnotationCitation
- | UnionMember2.OpenAIResponseAnnotationContainerFileCitation
- | UnionMember2.OpenAIResponseAnnotationFilePath
- >;
-
- text: string;
-
- type: 'output_text';
- }
-
- export namespace UnionMember2 {
+ /**
+ * File content for input messages in OpenAI response format.
+ */
+ export interface OpenAIResponseInputMessageContentFile {
/**
- * File citation annotation for referencing specific files in response content.
+ * The type of the input item. Always `input_file`.
*/
- export interface OpenAIResponseAnnotationFileCitation {
- /**
- * Unique identifier of the referenced file
- */
- file_id: string;
-
- /**
- * Name of the referenced file
- */
- filename: string;
-
- /**
- * Position index of the citation within the content
- */
- index: number;
-
- /**
- * Annotation type identifier, always "file_citation"
- */
- type: 'file_citation';
- }
+ type: 'input_file';
/**
- * URL citation annotation for referencing external web resources.
+ * The data of the file to be sent to the model.
*/
- export interface OpenAIResponseAnnotationCitation {
- /**
- * End position of the citation span in the content
- */
- end_index: number;
-
- /**
- * Start position of the citation span in the content
- */
- start_index: number;
-
- /**
- * Title of the referenced web resource
- */
- title: string;
-
- /**
- * Annotation type identifier, always "url_citation"
- */
- type: 'url_citation';
-
- /**
- * URL of the referenced web resource
- */
- url: string;
- }
-
- export interface OpenAIResponseAnnotationContainerFileCitation {
- container_id: string;
-
- end_index: number;
-
- file_id: string;
+ file_data?: string;
- filename: string;
-
- start_index: number;
-
- type: 'container_file_citation';
- }
-
- export interface OpenAIResponseAnnotationFilePath {
- file_id: string;
+ /**
+ * (Optional) The ID of the file to be sent to the model.
+ */
+ file_id?: string;
- index: number;
+ /**
+ * The URL of the file to be sent to the model.
+ */
+ file_url?: string;
- type: 'file_path';
- }
+ /**
+ * The name of the file to be sent to the model.
+ */
+ filename?: string;
}
}
/**
- * Web search tool call output message for OpenAI responses.
+ * Web search tool configuration for OpenAI response inputs.
*/
- export interface OpenAIResponseOutputMessageWebSearchToolCall {
- /**
- * Unique identifier for this tool call
- */
- id: string;
-
+ export interface OpenAIResponseInputToolWebSearch {
/**
- * Current status of the web search operation
+ * Web search tool type variant to use
*/
- status: string;
+ type: 'web_search' | 'web_search_preview' | 'web_search_preview_2025_03_11';
/**
- * Tool call type identifier, always "web_search_call"
+ * (Optional) Size of search context, must be "low", "medium", or "high"
*/
- type: 'web_search_call';
+ search_context_size?: string;
}
/**
- * File search tool call output message for OpenAI responses.
+ * File search tool configuration for OpenAI response inputs.
*/
- export interface OpenAIResponseOutputMessageFileSearchToolCall {
+ export interface OpenAIResponseInputToolFileSearch {
/**
- * Unique identifier for this tool call
+ * Tool type identifier, always "file_search"
*/
- id: string;
+ type: 'file_search';
/**
- * List of search queries executed
+ * List of vector store identifiers to search within
*/
- queries: Array;
+ vector_store_ids: Array;
/**
- * Current status of the file search operation
+ * (Optional) Additional filters to apply to the search
*/
- status: string;
+ filters?: { [key: string]: boolean | number | string | Array | unknown | null };
/**
- * Tool call type identifier, always "file_search_call"
+ * (Optional) Maximum number of search results to return (1-50)
*/
- type: 'file_search_call';
+ max_num_results?: number;
/**
- * (Optional) Search results returned by the file search operation
+ * (Optional) Options for ranking and scoring search results
*/
- results?: Array;
+ ranking_options?: OpenAIResponseInputToolFileSearch.RankingOptions;
}
- export namespace OpenAIResponseOutputMessageFileSearchToolCall {
+ export namespace OpenAIResponseInputToolFileSearch {
/**
- * Search results returned by the file search operation.
+ * (Optional) Options for ranking and scoring search results
*/
- export interface Result {
- /**
- * (Optional) Key-value attributes associated with the file
- */
- attributes: { [key: string]: boolean | number | string | Array | unknown | null };
-
- /**
- * Unique identifier of the file containing the result
- */
- file_id: string;
-
- /**
- * Name of the file containing the result
- */
- filename: string;
-
+ export interface RankingOptions {
/**
- * Relevance score for this search result (between 0 and 1)
+ * (Optional) Name of the ranking algorithm to use
*/
- score: number;
+ ranker?: string;
/**
- * Text content of the search result
+ * (Optional) Minimum relevance score threshold for results
*/
- text: string;
+ score_threshold?: number;
}
}
/**
- * Function tool call output message for OpenAI responses.
- */
- export interface OpenAIResponseOutputMessageFunctionToolCall {
- /**
- * JSON string containing the function arguments
- */
- arguments: string;
-
- /**
- * Unique identifier for the function call
- */
- call_id: string;
-
- /**
- * Name of the function being called
- */
- name: string;
-
- /**
- * Tool call type identifier, always "function_call"
- */
- type: 'function_call';
-
- /**
- * (Optional) Additional identifier for the tool call
- */
- id?: string;
-
- /**
- * (Optional) Current status of the function call execution
- */
- status?: string;
- }
-
- /**
- * Model Context Protocol (MCP) call output message for OpenAI responses.
+ * Function tool configuration for OpenAI response inputs.
*/
- export interface OpenAIResponseOutputMessageMcpCall {
- /**
- * Unique identifier for this MCP call
- */
- id: string;
-
- /**
- * JSON string containing the MCP call arguments
- */
- arguments: string;
-
+ export interface OpenAIResponseInputToolFunction {
/**
- * Name of the MCP method being called
+ * Name of the function that can be called
*/
name: string;
/**
- * Label identifying the MCP server handling the call
+ * Tool type identifier, always "function"
*/
- server_label: string;
+ type: 'function';
/**
- * Tool call type identifier, always "mcp_call"
+ * (Optional) Description of what the function does
*/
- type: 'mcp_call';
+ description?: string;
/**
- * (Optional) Error message if the MCP call failed
+ * (Optional) JSON schema defining the function's parameters
*/
- error?: string;
+ parameters?: { [key: string]: boolean | number | string | Array