diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index f3a4a10b..270368cb 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
 {
-  ".": "0.3.0-alpha.4"
+  ".": "0.3.0-alpha.5"
 }
diff --git a/.stats.yml b/.stats.yml
index 5588dfb4..721d5772 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 108
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-b220f9f8667d2af8007134d0403b24452c20c9c512ca87d0b69b20b761272609.yml
-openapi_spec_hash: cde1096a830f2081d68f858f020fd53f
-config_hash: 8800bdff1a087b9d5211dda2a7b9f66f
+configured_endpoints: 115
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-8a12a05ba6892999ac506f69d5cbbc7218f28ee1a11bf8e0e548c603435bb643.yml
+openapi_spec_hash: 871ce212a98bdad4a44ec7fbf58d9fcb
+config_hash: 4c1ba9dc45c31189cd1b039d003a3544
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 76851aac..b4c4268b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,18 @@
 # Changelog
 
+## 0.3.0-alpha.5 (2025-10-10)
+
+Full Changelog: [v0.3.0-alpha.4...v0.3.0-alpha.5](https://github.com/llamastack/llama-stack-client-python/compare/v0.3.0-alpha.4...v0.3.0-alpha.5)
+
+### Features
+
+* **api:** several updates including Conversations, Responses changes, etc. ([3680c9f](https://github.com/llamastack/llama-stack-client-python/commit/3680c9f61146d2a42243a89c4ab28e2161f7ce29))
+
+
+### Chores
+
+* **internal:** codegen related update ([b8bfc9c](https://github.com/llamastack/llama-stack-client-python/commit/b8bfc9c69e378c89cc806b659bf3d42e4b4e2ec5))
+
 ## 0.3.0-alpha.4 (2025-10-02)
 
 Full Changelog: [v0.3.0-alpha.3...v0.3.0-alpha.4](https://github.com/llamastack/llama-stack-client-python/compare/v0.3.0-alpha.3...v0.3.0-alpha.4)
diff --git a/api.md b/api.md
index 3319e27c..af028626 100644
--- a/api.md
+++ b/api.md
@@ -102,6 +102,39 @@ Methods:
 
 - client.responses.input_items.list(response_id, \*\*params) -> InputItemListResponse
 
+# Conversations
+
+Types:
+
+```python
+from llama_stack_client.types import ConversationObject, ConversationDeleteResponse
+```
+
+Methods:
+
+- client.conversations.create(\*\*params) -> ConversationObject
+- client.conversations.retrieve(conversation_id) -> ConversationObject
+- client.conversations.update(conversation_id, \*\*params) -> ConversationObject
+- client.conversations.delete(conversation_id) -> ConversationDeleteResponse
+
+## Items
+
+Types:
+
+```python
+from llama_stack_client.types.conversations import (
+    ItemCreateResponse,
+    ItemListResponse,
+    ItemGetResponse,
+)
+```
+
+Methods:
+
+- client.conversations.items.create(conversation_id, \*\*params) -> ItemCreateResponse
+- client.conversations.items.list(conversation_id, \*\*params) -> ItemListResponse
+- client.conversations.items.get(item_id, \*, conversation_id) -> ItemGetResponse
+
 # Datasets
 
 Types:
@@ -299,12 +332,6 @@ Methods:
 
 ## OpenAI
 
-Types:
-
-```python
-from llama_stack_client.types.models import OpenAIListResponse
-```
-
 Methods:
 
 - client.models.openai.list() -> ModelListResponse
@@ -521,7 +548,7 @@ from llama_stack_client.types.alpha.post_training import (
 
 Methods:
 
-- client.alpha.post_training.job.list() -> List[Data]
+- client.alpha.post_training.job.list() -> JobListResponse
 - client.alpha.post_training.job.artifacts(\*\*params) -> JobArtifactsResponse
 - client.alpha.post_training.job.cancel(\*\*params) -> None
 - client.alpha.post_training.job.status(\*\*params) -> JobStatusResponse
diff --git a/pyproject.toml b/pyproject.toml
index 99c36889..660038c5 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
 [project]
 name = "llama_stack_client"
-version = "0.3.0-alpha.4"
+version = "0.3.0-alpha.5"
 description = "The official Python library for the llama-stack-client API"
 dynamic = ["readme"]
 license = "MIT"
diff --git a/src/llama_stack_client/_client.py b/src/llama_stack_client/_client.py
index 6b8f11b2..a9252b61 100644
--- a/src/llama_stack_client/_client.py
+++ b/src/llama_stack_client/_client.py
@@ -55,6 +55,7 @@
         completions,
         moderations,
         tool_runtime,
+        conversations,
         vector_stores,
         scoring_functions,
         synthetic_data_generation,
@@ -86,6 +87,7 @@
         AsyncSyntheticDataGenerationResource,
     )
     from .resources.tool_runtime.tool_runtime import ToolRuntimeResource, AsyncToolRuntimeResource
+    from .resources.conversations.conversations import ConversationsResource, AsyncConversationsResource
     from .resources.vector_stores.vector_stores import VectorStoresResource, AsyncVectorStoresResource
 
 __all__ = [
@@ -181,6 +183,12 @@ def responses(self) -> ResponsesResource:
 
         return ResponsesResource(self)
 
+    @cached_property
+    def conversations(self) -> ConversationsResource:
+        from .resources.conversations import ConversationsResource
+
+        return ConversationsResource(self)
+
     @cached_property
     def datasets(self) -> DatasetsResource:
         from .resources.datasets import DatasetsResource
@@ -503,6 +511,12 @@ def responses(self) -> AsyncResponsesResource:
 
         return AsyncResponsesResource(self)
 
+    @cached_property
+    def conversations(self) -> AsyncConversationsResource:
+        from .resources.conversations import AsyncConversationsResource
+
+        return AsyncConversationsResource(self)
+
     @cached_property
     def datasets(self) -> AsyncDatasetsResource:
         from .resources.datasets import AsyncDatasetsResource
@@ -774,6 +788,12 @@ def responses(self) -> responses.ResponsesResourceWithRawResponse:
 
         return ResponsesResourceWithRawResponse(self._client.responses)
 
+    @cached_property
+    def conversations(self) -> conversations.ConversationsResourceWithRawResponse:
+        from .resources.conversations import ConversationsResourceWithRawResponse
+
+        return ConversationsResourceWithRawResponse(self._client.conversations)
+
     @cached_property
     def datasets(self) -> datasets.DatasetsResourceWithRawResponse:
         from .resources.datasets import DatasetsResourceWithRawResponse
@@ -931,6 +951,12 @@ def responses(self) -> responses.AsyncResponsesResourceWithRawResponse:
 
         return AsyncResponsesResourceWithRawResponse(self._client.responses)
 
+    @cached_property
+    def conversations(self) -> conversations.AsyncConversationsResourceWithRawResponse:
+        from .resources.conversations import AsyncConversationsResourceWithRawResponse
+
+        return AsyncConversationsResourceWithRawResponse(self._client.conversations)
+
     @cached_property
     def datasets(self) -> datasets.AsyncDatasetsResourceWithRawResponse:
         from .resources.datasets import AsyncDatasetsResourceWithRawResponse
@@ -1090,6 +1116,12 @@ def responses(self) -> responses.ResponsesResourceWithStreamingResponse:
 
         return ResponsesResourceWithStreamingResponse(self._client.responses)
 
+    @cached_property
+    def conversations(self) -> conversations.ConversationsResourceWithStreamingResponse:
+        from .resources.conversations import ConversationsResourceWithStreamingResponse
+
+        return ConversationsResourceWithStreamingResponse(self._client.conversations)
+
     @cached_property
     def datasets(self) -> datasets.DatasetsResourceWithStreamingResponse:
         from .resources.datasets import DatasetsResourceWithStreamingResponse
@@ -1249,6 +1281,12 @@ def responses(self) -> responses.AsyncResponsesResourceWithStreamingResponse:
 
         return AsyncResponsesResourceWithStreamingResponse(self._client.responses)
 
+    @cached_property
+    def conversations(self) -> conversations.AsyncConversationsResourceWithStreamingResponse:
+        from .resources.conversations import AsyncConversationsResourceWithStreamingResponse
+
+        return AsyncConversationsResourceWithStreamingResponse(self._client.conversations)
+
     @cached_property
     def datasets(self) -> datasets.AsyncDatasetsResourceWithStreamingResponse:
         from .resources.datasets import AsyncDatasetsResourceWithStreamingResponse
diff --git a/src/llama_stack_client/resources/__init__.py b/src/llama_stack_client/resources/__init__.py
index 3089ae21..2ceae6c3 100644
--- a/src/llama_stack_client/resources/__init__.py
+++ b/src/llama_stack_client/resources/__init__.py
@@ -176,6 +176,14 @@
     ToolRuntimeResourceWithStreamingResponse,
     AsyncToolRuntimeResourceWithStreamingResponse,
 )
+from .conversations import (
+    ConversationsResource,
+    AsyncConversationsResource,
+    ConversationsResourceWithRawResponse,
+    AsyncConversationsResourceWithRawResponse,
+    ConversationsResourceWithStreamingResponse,
+    AsyncConversationsResourceWithStreamingResponse,
+)
 from .vector_stores import (
     VectorStoresResource,
     AsyncVectorStoresResource,
@@ -226,6 +234,12 @@
     "AsyncResponsesResourceWithRawResponse",
     "ResponsesResourceWithStreamingResponse",
     "AsyncResponsesResourceWithStreamingResponse",
+    "ConversationsResource",
+    "AsyncConversationsResource",
+    "ConversationsResourceWithRawResponse",
+    "AsyncConversationsResourceWithRawResponse",
+    "ConversationsResourceWithStreamingResponse",
+    "AsyncConversationsResourceWithStreamingResponse",
     "DatasetsResource",
     "AsyncDatasetsResource",
     "DatasetsResourceWithRawResponse",
diff --git a/src/llama_stack_client/resources/alpha/post_training/job.py b/src/llama_stack_client/resources/alpha/post_training/job.py
index 083697a1..d9b7173e 100644
--- a/src/llama_stack_client/resources/alpha/post_training/job.py
+++ b/src/llama_stack_client/resources/alpha/post_training/job.py
@@ -2,7 +2,7 @@
 
 from __future__ import annotations
 
-from typing import List, Type, cast
+from typing import Type, cast
 
 import httpx
 
@@ -19,7 +19,7 @@
 from ...._wrappers import DataWrapper
 from ...._base_client import make_request_options
 from ....types.alpha.post_training import job_cancel_params, job_status_params, job_artifacts_params
-from ....types.alpha.list_post_training_jobs_response import Data
+from ....types.alpha.post_training.job_list_response import JobListResponse
 from ....types.alpha.post_training.job_status_response import JobStatusResponse
 from ....types.alpha.post_training.job_artifacts_response import JobArtifactsResponse
 
@@ -55,7 +55,7 @@ def list(
         extra_query: Query | None = None,
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
-    ) -> List[Data]:
+    ) -> JobListResponse:
         """Get all training jobs."""
         return self._get(
             "/v1alpha/post-training/jobs",
@@ -64,9 +64,9 @@ def list(
                 extra_query=extra_query,
                 extra_body=extra_body,
                 timeout=timeout,
-                post_parser=DataWrapper[List[Data]]._unwrapper,
+                post_parser=DataWrapper[JobListResponse]._unwrapper,
             ),
-            cast_to=cast(Type[List[Data]], DataWrapper[Data]),
+            cast_to=cast(Type[JobListResponse], DataWrapper[JobListResponse]),
         )
 
     def artifacts(
@@ -208,7 +208,7 @@ async def list(
         extra_query: Query | None = None,
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
-    ) -> List[Data]:
+    ) -> JobListResponse:
         """Get all training jobs."""
         return await self._get(
             "/v1alpha/post-training/jobs",
@@ -217,9 +217,9 @@ async def list(
                 extra_query=extra_query,
                 extra_body=extra_body,
                 timeout=timeout,
-                post_parser=DataWrapper[List[Data]]._unwrapper,
+                post_parser=DataWrapper[JobListResponse]._unwrapper,
             ),
-            cast_to=cast(Type[List[Data]], DataWrapper[Data]),
+            cast_to=cast(Type[JobListResponse], DataWrapper[JobListResponse]),
         )
 
     async def artifacts(
diff --git a/src/llama_stack_client/resources/chat/completions.py b/src/llama_stack_client/resources/chat/completions.py
index 2fb19980..bfcb0e73 100644
--- a/src/llama_stack_client/resources/chat/completions.py
+++ b/src/llama_stack_client/resources/chat/completions.py
@@ -83,9 +83,10 @@ def create(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> CompletionCreateResponse:
-        """
-        Generate an OpenAI-compatible chat completion for the given messages using the
-        specified model.
+        """Create chat completions.
+
+        Generate an OpenAI-compatible chat completion for the
+        given messages using the specified model.
 
         Args:
           messages: List of messages in the conversation.
@@ -179,9 +180,10 @@ def create(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> Stream[ChatCompletionChunk]:
-        """
-        Generate an OpenAI-compatible chat completion for the given messages using the
-        specified model.
+        """Create chat completions.
+
+        Generate an OpenAI-compatible chat completion for the
+        given messages using the specified model.
 
         Args:
           messages: List of messages in the conversation.
@@ -275,9 +277,10 @@ def create(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]:
-        """
-        Generate an OpenAI-compatible chat completion for the given messages using the
-        specified model.
+        """Create chat completions.
+
+        Generate an OpenAI-compatible chat completion for the
+        given messages using the specified model.
 
         Args:
           messages: List of messages in the conversation.
@@ -424,7 +427,8 @@ def retrieve(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> CompletionRetrieveResponse:
-        """
+        """Get chat completion.
+
         Describe a chat completion by its ID.
 
         Args:
@@ -461,7 +465,7 @@ def list(
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> SyncOpenAICursorPage[CompletionListResponse]:
         """
-        List all chat completions.
+        List chat completions.
 
         Args:
           after: The ID of the last chat completion to return.
@@ -556,9 +560,10 @@ async def create(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> CompletionCreateResponse:
-        """
-        Generate an OpenAI-compatible chat completion for the given messages using the
-        specified model.
+        """Create chat completions.
+
+        Generate an OpenAI-compatible chat completion for the
+        given messages using the specified model.
 
         Args:
           messages: List of messages in the conversation.
@@ -652,9 +657,10 @@ async def create(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> AsyncStream[ChatCompletionChunk]:
-        """
-        Generate an OpenAI-compatible chat completion for the given messages using the
-        specified model.
+        """Create chat completions.
+
+        Generate an OpenAI-compatible chat completion for the
+        given messages using the specified model.
 
         Args:
           messages: List of messages in the conversation.
@@ -748,9 +754,10 @@ async def create(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]:
-        """
-        Generate an OpenAI-compatible chat completion for the given messages using the
-        specified model.
+        """Create chat completions.
+
+        Generate an OpenAI-compatible chat completion for the
+        given messages using the specified model.
 
         Args:
           messages: List of messages in the conversation.
@@ -897,7 +904,8 @@ async def retrieve(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> CompletionRetrieveResponse:
-        """
+        """Get chat completion.
+
         Describe a chat completion by its ID.
 
         Args:
@@ -934,7 +942,7 @@ def list(
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> AsyncPaginator[CompletionListResponse, AsyncOpenAICursorPage[CompletionListResponse]]:
         """
-        List all chat completions.
+        List chat completions.
 
         Args:
           after: The ID of the last chat completion to return.
diff --git a/src/llama_stack_client/resources/completions.py b/src/llama_stack_client/resources/completions.py
index caeab7a1..8791e16e 100644
--- a/src/llama_stack_client/resources/completions.py
+++ b/src/llama_stack_client/resources/completions.py
@@ -76,9 +76,10 @@ def create(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> CompletionCreateResponse:
-        """
-        Generate an OpenAI-compatible completion for the given prompt using the
-        specified model.
+        """Create completion.
+
+        Generate an OpenAI-compatible completion for the given prompt
+        using the specified model.
 
         Args:
           model: The identifier of the model to use. The model must be registered with Llama
@@ -159,9 +160,10 @@ def create(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> Stream[CompletionCreateResponse]:
-        """
-        Generate an OpenAI-compatible completion for the given prompt using the
-        specified model.
+        """Create completion.
+
+        Generate an OpenAI-compatible completion for the given prompt
+        using the specified model.
 
         Args:
           model: The identifier of the model to use. The model must be registered with Llama
@@ -242,9 +244,10 @@ def create(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> CompletionCreateResponse | Stream[CompletionCreateResponse]:
-        """
-        Generate an OpenAI-compatible completion for the given prompt using the
-        specified model.
+        """Create completion.
+
+        Generate an OpenAI-compatible completion for the given prompt
+        using the specified model.
 
         Args:
           model: The identifier of the model to use. The model must be registered with Llama
@@ -414,9 +417,10 @@ async def create(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> CompletionCreateResponse:
-        """
-        Generate an OpenAI-compatible completion for the given prompt using the
-        specified model.
+        """Create completion.
+
+        Generate an OpenAI-compatible completion for the given prompt
+        using the specified model.
 
         Args:
           model: The identifier of the model to use. The model must be registered with Llama
@@ -497,9 +501,10 @@ async def create(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> AsyncStream[CompletionCreateResponse]:
-        """
-        Generate an OpenAI-compatible completion for the given prompt using the
-        specified model.
+        """Create completion.
+
+        Generate an OpenAI-compatible completion for the given prompt
+        using the specified model.
 
         Args:
           model: The identifier of the model to use. The model must be registered with Llama
@@ -580,9 +585,10 @@ async def create(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> CompletionCreateResponse | AsyncStream[CompletionCreateResponse]:
-        """
-        Generate an OpenAI-compatible completion for the given prompt using the
-        specified model.
+        """Create completion.
+
+        Generate an OpenAI-compatible completion for the given prompt
+        using the specified model.
 
         Args:
           model: The identifier of the model to use. The model must be registered with Llama
diff --git a/src/llama_stack_client/resources/conversations/__init__.py b/src/llama_stack_client/resources/conversations/__init__.py
new file mode 100644
index 00000000..2dc61926
--- /dev/null
+++ b/src/llama_stack_client/resources/conversations/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .items import (
+    ItemsResource,
+    AsyncItemsResource,
+    ItemsResourceWithRawResponse,
+    AsyncItemsResourceWithRawResponse,
+    ItemsResourceWithStreamingResponse,
+    AsyncItemsResourceWithStreamingResponse,
+)
+from .conversations import (
+    ConversationsResource,
+    AsyncConversationsResource,
+    ConversationsResourceWithRawResponse,
+    AsyncConversationsResourceWithRawResponse,
+    ConversationsResourceWithStreamingResponse,
+    AsyncConversationsResourceWithStreamingResponse,
+)
+
+__all__ = [
+    "ItemsResource",
+    "AsyncItemsResource",
+    "ItemsResourceWithRawResponse",
+    "AsyncItemsResourceWithRawResponse",
+    "ItemsResourceWithStreamingResponse",
+    "AsyncItemsResourceWithStreamingResponse",
+    "ConversationsResource",
+    "AsyncConversationsResource",
+    "ConversationsResourceWithRawResponse",
+    "AsyncConversationsResourceWithRawResponse",
+    "ConversationsResourceWithStreamingResponse",
+    "AsyncConversationsResourceWithStreamingResponse",
+]
diff --git a/src/llama_stack_client/resources/conversations/conversations.py b/src/llama_stack_client/resources/conversations/conversations.py
new file mode 100644
index 00000000..14be8591
--- /dev/null
+++ b/src/llama_stack_client/resources/conversations/conversations.py
@@ -0,0 +1,464 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Iterable
+
+import httpx
+
+from .items import (
+    ItemsResource,
+    AsyncItemsResource,
+    ItemsResourceWithRawResponse,
+    AsyncItemsResourceWithRawResponse,
+    ItemsResourceWithStreamingResponse,
+    AsyncItemsResourceWithStreamingResponse,
+)
+from ...types import conversation_create_params, conversation_update_params
+from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+    to_raw_response_wrapper,
+    to_streamed_response_wrapper,
+    async_to_raw_response_wrapper,
+    async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.conversation_object import ConversationObject
+from ...types.conversation_delete_response import ConversationDeleteResponse
+
+__all__ = ["ConversationsResource", "AsyncConversationsResource"]
+
+
+class ConversationsResource(SyncAPIResource):
+    @cached_property
+    def items(self) -> ItemsResource:
+        return ItemsResource(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> ConversationsResourceWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+        """
+        return ConversationsResourceWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> ConversationsResourceWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+        """
+        return ConversationsResourceWithStreamingResponse(self)
+
+    def create(
+        self,
+        *,
+        items: Iterable[conversation_create_params.Item] | Omit = omit,
+        metadata: Dict[str, str] | Omit = omit,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ConversationObject:
+        """
+        Create a conversation.
+
+        Args:
+          items: Initial items to include in the conversation context.
+
+          metadata: Set of key-value pairs that can be attached to an object.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return self._post(
+            "/v1/conversations",
+            body=maybe_transform(
+                {
+                    "items": items,
+                    "metadata": metadata,
+                },
+                conversation_create_params.ConversationCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ConversationObject,
+        )
+
+    def retrieve(
+        self,
+        conversation_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ConversationObject:
+        """
+        Get a conversation with the given ID.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        return self._get(
+            f"/v1/conversations/{conversation_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ConversationObject,
+        )
+
+    def update(
+        self,
+        conversation_id: str,
+        *,
+        metadata: Dict[str, str],
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ConversationObject:
+        """
+        Update a conversation's metadata with the given ID.
+
+        Args:
+          metadata: Set of key-value pairs that can be attached to an object.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        return self._post(
+            f"/v1/conversations/{conversation_id}",
+            body=maybe_transform({"metadata": metadata}, conversation_update_params.ConversationUpdateParams),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ConversationObject,
+        )
+
+    def delete(
+        self,
+        conversation_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ConversationDeleteResponse:
+        """
+        Delete a conversation with the given ID.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        return self._delete(
+            f"/v1/conversations/{conversation_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ConversationDeleteResponse,
+        )
+
+
+class AsyncConversationsResource(AsyncAPIResource):
+    @cached_property
+    def items(self) -> AsyncItemsResource:
+        return AsyncItemsResource(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> AsyncConversationsResourceWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncConversationsResourceWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncConversationsResourceWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+        """
+        return AsyncConversationsResourceWithStreamingResponse(self)
+
+    async def create(
+        self,
+        *,
+        items: Iterable[conversation_create_params.Item] | Omit = omit,
+        metadata: Dict[str, str] | Omit = omit,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ConversationObject:
+        """
+        Create a conversation.
+
+        Args:
+          items: Initial items to include in the conversation context.
+
+          metadata: Set of key-value pairs that can be attached to an object.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return await self._post(
+            "/v1/conversations",
+            body=await async_maybe_transform(
+                {
+                    "items": items,
+                    "metadata": metadata,
+                },
+                conversation_create_params.ConversationCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ConversationObject,
+        )
+
+    async def retrieve(
+        self,
+        conversation_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ConversationObject:
+        """
+        Get a conversation with the given ID.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        return await self._get(
+            f"/v1/conversations/{conversation_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ConversationObject,
+        )
+
+    async def update(
+        self,
+        conversation_id: str,
+        *,
+        metadata: Dict[str, str],
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ConversationObject:
+        """
+        Update a conversation's metadata with the given ID.
+
+        Args:
+          metadata: Set of key-value pairs that can be attached to an object.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        return await self._post(
+            f"/v1/conversations/{conversation_id}",
+            body=await async_maybe_transform(
+                {"metadata": metadata}, conversation_update_params.ConversationUpdateParams
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ConversationObject,
+        )
+
+    async def delete(
+        self,
+        conversation_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ConversationDeleteResponse:
+        """
+        Delete a conversation with the given ID.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        return await self._delete(
+            f"/v1/conversations/{conversation_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ConversationDeleteResponse,
+        )
+
+
+class ConversationsResourceWithRawResponse:
+    def __init__(self, conversations: ConversationsResource) -> None:
+        self._conversations = conversations
+
+        self.create = to_raw_response_wrapper(
+            conversations.create,
+        )
+        self.retrieve = to_raw_response_wrapper(
+            conversations.retrieve,
+        )
+        self.update = to_raw_response_wrapper(
+            conversations.update,
+        )
+        self.delete = to_raw_response_wrapper(
+            conversations.delete,
+        )
+
+    @cached_property
+    def items(self) -> ItemsResourceWithRawResponse:
+        return ItemsResourceWithRawResponse(self._conversations.items)
+
+
+class AsyncConversationsResourceWithRawResponse:
+    def __init__(self, conversations: AsyncConversationsResource) -> None:
+        self._conversations = conversations
+
+        self.create = async_to_raw_response_wrapper(
+            conversations.create,
+        )
+        self.retrieve = async_to_raw_response_wrapper(
+            conversations.retrieve,
+        )
+        self.update = async_to_raw_response_wrapper(
+            conversations.update,
+        )
+        self.delete = async_to_raw_response_wrapper(
+            conversations.delete,
+        )
+
+    @cached_property
+    def items(self) -> AsyncItemsResourceWithRawResponse:
+        return AsyncItemsResourceWithRawResponse(self._conversations.items)
+
+
+class ConversationsResourceWithStreamingResponse:
+    def __init__(self, conversations: ConversationsResource) -> None:
+        self._conversations = conversations
+
+        self.create = to_streamed_response_wrapper(
+            conversations.create,
+        )
+        self.retrieve = to_streamed_response_wrapper(
+            conversations.retrieve,
+        )
+        self.update = to_streamed_response_wrapper(
+            conversations.update,
+        )
+        self.delete = to_streamed_response_wrapper(
+            conversations.delete,
+        )
+
+    @cached_property
+    def items(self) -> ItemsResourceWithStreamingResponse:
+        return ItemsResourceWithStreamingResponse(self._conversations.items)
+
+
+class AsyncConversationsResourceWithStreamingResponse:
+    def __init__(self, conversations: AsyncConversationsResource) -> None:
+        self._conversations = conversations
+
+        self.create = async_to_streamed_response_wrapper(
+            conversations.create,
+        )
+        self.retrieve = async_to_streamed_response_wrapper(
+            conversations.retrieve,
+        )
+        self.update = async_to_streamed_response_wrapper(
+            conversations.update,
+        )
+        self.delete = async_to_streamed_response_wrapper(
+            conversations.delete,
+        )
+
+    @cached_property
+    def items(self) -> AsyncItemsResourceWithStreamingResponse:
+        return AsyncItemsResourceWithStreamingResponse(self._conversations.items)
diff --git a/src/llama_stack_client/resources/conversations/items.py b/src/llama_stack_client/resources/conversations/items.py
new file mode 100644
index 00000000..76d246f1
--- /dev/null
+++ b/src/llama_stack_client/resources/conversations/items.py
@@ -0,0 +1,418 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Any, List, Union, Iterable, cast
+from typing_extensions import Literal
+
+import httpx
+
+from ..._types import Body, Query, Headers, NotGiven, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+    to_raw_response_wrapper,
+    to_streamed_response_wrapper,
+    async_to_raw_response_wrapper,
+    async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.conversations import item_list_params, item_create_params
+from ...types.conversations.item_get_response import ItemGetResponse
+from ...types.conversations.item_list_response import ItemListResponse
+from ...types.conversations.item_create_response import ItemCreateResponse
+
+__all__ = ["ItemsResource", "AsyncItemsResource"]
+
+
+class ItemsResource(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> ItemsResourceWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+        """
+        return ItemsResourceWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> ItemsResourceWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+        """
+        return ItemsResourceWithStreamingResponse(self)
+
+    def create(
+        self,
+        conversation_id: str,
+        *,
+        items: Iterable[item_create_params.Item],
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ItemCreateResponse:
+        """
+        Create items in the conversation.
+
+        Args:
+          items: Items to include in the conversation context.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        return self._post(
+            f"/v1/conversations/{conversation_id}/items",
+            body=maybe_transform({"items": items}, item_create_params.ItemCreateParams),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ItemCreateResponse,
+        )
+
+    def list(
+        self,
+        conversation_id: str,
+        *,
+        after: Union[str, object],
+        include: Union[
+            List[
+                Literal[
+                    "code_interpreter_call.outputs",
+                    "computer_call_output.output.image_url",
+                    "file_search_call.results",
+                    "message.input_image.image_url",
+                    "message.output_text.logprobs",
+                    "reasoning.encrypted_content",
+                ]
+            ],
+            object,
+        ],
+        limit: Union[int, object],
+        order: Union[Literal["asc", "desc"], object],
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ItemListResponse:
+        """
+        List items in the conversation.
+
+        Args:
+          after: An item ID to list items after, used in pagination.
+
+          include: Specify additional output data to include in the response.
+
+          limit: A limit on the number of objects to be returned (1-100, default 20).
+
+          order: The order to return items in (asc or desc, default desc).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        return self._get(
+            f"/v1/conversations/{conversation_id}/items",
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "include": include,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    item_list_params.ItemListParams,
+                ),
+            ),
+            cast_to=ItemListResponse,
+        )
+
+    def get(
+        self,
+        item_id: str,
+        *,
+        conversation_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ItemGetResponse:
+        """
+        Retrieve a conversation item.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        if not item_id:
+            raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
+        return cast(
+            ItemGetResponse,
+            self._get(
+                f"/v1/conversations/{conversation_id}/items/{item_id}",
+                options=make_request_options(
+                    extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+                ),
+                cast_to=cast(Any, ItemGetResponse),  # Union types cannot be passed in as arguments in the type system
+            ),
+        )
+
+
+class AsyncItemsResource(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncItemsResourceWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncItemsResourceWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncItemsResourceWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+        """
+        return AsyncItemsResourceWithStreamingResponse(self)
+
+    async def create(
+        self,
+        conversation_id: str,
+        *,
+        items: Iterable[item_create_params.Item],
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ItemCreateResponse:
+        """
+        Create items in the conversation.
+
+        Args:
+          items: Items to include in the conversation context.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        return await self._post(
+            f"/v1/conversations/{conversation_id}/items",
+            body=await async_maybe_transform({"items": items}, item_create_params.ItemCreateParams),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ItemCreateResponse,
+        )
+
+    async def list(
+        self,
+        conversation_id: str,
+        *,
+        after: Union[str, object],
+        include: Union[
+            List[
+                Literal[
+                    "code_interpreter_call.outputs",
+                    "computer_call_output.output.image_url",
+                    "file_search_call.results",
+                    "message.input_image.image_url",
+                    "message.output_text.logprobs",
+                    "reasoning.encrypted_content",
+                ]
+            ],
+            object,
+        ],
+        limit: Union[int, object],
+        order: Union[Literal["asc", "desc"], object],
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ItemListResponse:
+        """
+        List items in the conversation.
+
+        Args:
+          after: An item ID to list items after, used in pagination.
+
+          include: Specify additional output data to include in the response.
+
+          limit: A limit on the number of objects to be returned (1-100, default 20).
+
+          order: The order to return items in (asc or desc, default desc).
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        return await self._get(
+            f"/v1/conversations/{conversation_id}/items",
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=await async_maybe_transform(
+                    {
+                        "after": after,
+                        "include": include,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    item_list_params.ItemListParams,
+                ),
+            ),
+            cast_to=ItemListResponse,
+        )
+
+    async def get(
+        self,
+        item_id: str,
+        *,
+        conversation_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ItemGetResponse:
+        """
+        Retrieve a conversation item.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        if not item_id:
+            raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
+        return cast(
+            ItemGetResponse,
+            await self._get(
+                f"/v1/conversations/{conversation_id}/items/{item_id}",
+                options=make_request_options(
+                    extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+                ),
+                cast_to=cast(Any, ItemGetResponse),  # Union types cannot be passed in as arguments in the type system
+            ),
+        )
+
+
+class ItemsResourceWithRawResponse:
+    def __init__(self, items: ItemsResource) -> None:
+        self._items = items
+
+        self.create = to_raw_response_wrapper(
+            items.create,
+        )
+        self.list = to_raw_response_wrapper(
+            items.list,
+        )
+        self.get = to_raw_response_wrapper(
+            items.get,
+        )
+
+
+class AsyncItemsResourceWithRawResponse:
+    def __init__(self, items: AsyncItemsResource) -> None:
+        self._items = items
+
+        self.create = async_to_raw_response_wrapper(
+            items.create,
+        )
+        self.list = async_to_raw_response_wrapper(
+            items.list,
+        )
+        self.get = async_to_raw_response_wrapper(
+            items.get,
+        )
+
+
+class ItemsResourceWithStreamingResponse:
+    def __init__(self, items: ItemsResource) -> None:
+        self._items = items
+
+        self.create = to_streamed_response_wrapper(
+            items.create,
+        )
+        self.list = to_streamed_response_wrapper(
+            items.list,
+        )
+        self.get = to_streamed_response_wrapper(
+            items.get,
+        )
+
+
+class AsyncItemsResourceWithStreamingResponse:
+    def __init__(self, items: AsyncItemsResource) -> None:
+        self._items = items
+
+        self.create = async_to_streamed_response_wrapper(
+            items.create,
+        )
+        self.list = async_to_streamed_response_wrapper(
+            items.list,
+        )
+        self.get = async_to_streamed_response_wrapper(
+            items.get,
+        )
diff --git a/src/llama_stack_client/resources/embeddings.py b/src/llama_stack_client/resources/embeddings.py
index 29cd69d8..512695f6 100644
--- a/src/llama_stack_client/resources/embeddings.py
+++ b/src/llama_stack_client/resources/embeddings.py
@@ -58,9 +58,10 @@ def create(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> CreateEmbeddingsResponse:
-        """
-        Generate OpenAI-compatible embeddings for the given input using the specified
-        model.
+        """Create embeddings.
+
+        Generate OpenAI-compatible embeddings for the given input
+        using the specified model.
 
         Args:
           input: Input text to embed, encoded as a string or array of strings. To embed multiple
@@ -140,9 +141,10 @@ async def create(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> CreateEmbeddingsResponse:
-        """
-        Generate OpenAI-compatible embeddings for the given input using the specified
-        model.
+        """Create embeddings.
+
+        Generate OpenAI-compatible embeddings for the given input
+        using the specified model.
 
         Args:
           input: Input text to embed, encoded as a string or array of strings. To embed multiple
diff --git a/src/llama_stack_client/resources/files.py b/src/llama_stack_client/resources/files.py
index 39add811..64071d91 100644
--- a/src/llama_stack_client/resources/files.py
+++ b/src/llama_stack_client/resources/files.py
@@ -59,10 +59,11 @@ def create(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> File:
-        """Upload a file that can be used across various endpoints.
+        """Upload file.
 
-        The file upload should
-        be a multipart form request with:
+        Upload a file that can be used across various endpoints.
+
+        The file upload should be a multipart form request with:
 
         - file: The File object (not file name) to be uploaded.
         - purpose: The intended purpose of the uploaded file.
@@ -118,7 +119,8 @@ def retrieve(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> File:
-        """
+        """Retrieve file.
+
         Returns information about a specific file.
 
         Args:
@@ -154,7 +156,8 @@ def list(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> SyncOpenAICursorPage[File]:
-        """
+        """List files.
+
         Returns a list of files that belong to the user's organization.
 
         Args:
@@ -212,7 +215,7 @@ def delete(
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> DeleteFileResponse:
         """
-        Delete a file.
+        Delete file.
 
         Args:
           extra_headers: Send extra headers
@@ -244,7 +247,8 @@ def content(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> object:
-        """
+        """Retrieve file content.
+
         Returns the contents of the specified file.
 
         Args:
@@ -300,10 +304,11 @@ async def create(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> File:
-        """Upload a file that can be used across various endpoints.
+        """Upload file.
 
-        The file upload should
-        be a multipart form request with:
+        Upload a file that can be used across various endpoints.
+
+        The file upload should be a multipart form request with:
 
         - file: The File object (not file name) to be uploaded.
         - purpose: The intended purpose of the uploaded file.
@@ -359,7 +364,8 @@ async def retrieve(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> File:
-        """
+        """Retrieve file.
+
         Returns information about a specific file.
 
         Args:
@@ -395,7 +401,8 @@ def list(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> AsyncPaginator[File, AsyncOpenAICursorPage[File]]:
-        """
+        """List files.
+
         Returns a list of files that belong to the user's organization.
 
         Args:
@@ -453,7 +460,7 @@ async def delete(
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> DeleteFileResponse:
         """
-        Delete a file.
+        Delete file.
 
         Args:
           extra_headers: Send extra headers
@@ -485,7 +492,8 @@ async def content(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> object:
-        """
+        """Retrieve file content.
+
         Returns the contents of the specified file.
 
         Args:
diff --git a/src/llama_stack_client/resources/inspect.py b/src/llama_stack_client/resources/inspect.py
index cca2f501..45c94e8b 100644
--- a/src/llama_stack_client/resources/inspect.py
+++ b/src/llama_stack_client/resources/inspect.py
@@ -50,7 +50,7 @@ def health(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> HealthInfo:
-        """Get the current health status of the service."""
+        """Get health status. Get the current health status of the service."""
         return self._get(
             "/v1/health",
             options=make_request_options(
@@ -69,7 +69,7 @@ def version(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> VersionInfo:
-        """Get the version of the service."""
+        """Get version. Get the version of the service."""
         return self._get(
             "/v1/version",
             options=make_request_options(
@@ -109,7 +109,7 @@ async def health(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> HealthInfo:
-        """Get the current health status of the service."""
+        """Get health status. Get the current health status of the service."""
         return await self._get(
             "/v1/health",
             options=make_request_options(
@@ -128,7 +128,7 @@ async def version(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> VersionInfo:
-        """Get the version of the service."""
+        """Get version. Get the version of the service."""
         return await self._get(
             "/v1/version",
             options=make_request_options(
diff --git a/src/llama_stack_client/resources/models/models.py b/src/llama_stack_client/resources/models/models.py
index f044c50d..376b6f33 100644
--- a/src/llama_stack_client/resources/models/models.py
+++ b/src/llama_stack_client/resources/models/models.py
@@ -69,7 +69,8 @@ def retrieve(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> Model:
-        """
+        """Get model.
+
         Get a model by its identifier.
 
         Args:
@@ -129,7 +130,8 @@ def register(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> Model:
-        """
+        """Register model.
+
         Register a model.
 
         Args:
@@ -180,7 +182,8 @@ def unregister(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> None:
-        """
+        """Unregister model.
+
         Unregister a model.
 
         Args:
@@ -239,7 +242,8 @@ async def retrieve(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> Model:
-        """
+        """Get model.
+
         Get a model by its identifier.
 
         Args:
@@ -299,7 +303,8 @@ async def register(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> Model:
-        """
+        """Register model.
+
         Register a model.
 
         Args:
@@ -350,7 +355,8 @@ async def unregister(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> None:
-        """
+        """Unregister model.
+
         Unregister a model.
 
         Args:
diff --git a/src/llama_stack_client/resources/moderations.py b/src/llama_stack_client/resources/moderations.py
index a73dc85a..199a8d0a 100644
--- a/src/llama_stack_client/resources/moderations.py
+++ b/src/llama_stack_client/resources/moderations.py
@@ -55,8 +55,10 @@ def create(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> CreateResponse:
-        """
-        Classifies if text and/or image inputs are potentially harmful.
+        """Create moderation.
+
+        Classifies if text and/or image inputs are potentially
+        harmful.
 
         Args:
           input: Input (or inputs) to classify. Can be a single string, an array of strings, or
@@ -120,8 +122,10 @@ async def create(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> CreateResponse:
-        """
-        Classifies if text and/or image inputs are potentially harmful.
+        """Create moderation.
+
+        Classifies if text and/or image inputs are potentially
+        harmful.
 
         Args:
           input: Input (or inputs) to classify. Can be a single string, an array of strings, or
diff --git a/src/llama_stack_client/resources/providers.py b/src/llama_stack_client/resources/providers.py
index 5a52e070..f8fea984 100644
--- a/src/llama_stack_client/resources/providers.py
+++ b/src/llama_stack_client/resources/providers.py
@@ -54,7 +54,8 @@ def retrieve(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> ProviderInfo:
-        """
+        """Get provider.
+
         Get detailed information about a specific provider.
 
         Args:
@@ -86,7 +87,7 @@ def list(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> ProviderListResponse:
-        """List all available providers."""
+        """List providers. List all available providers."""
         return self._get(
             "/v1/providers",
             options=make_request_options(
@@ -131,7 +132,8 @@ async def retrieve(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> ProviderInfo:
-        """
+        """Get provider.
+
         Get detailed information about a specific provider.
 
         Args:
@@ -163,7 +165,7 @@ async def list(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> ProviderListResponse:
-        """List all available providers."""
+        """List providers. List all available providers."""
         return await self._get(
             "/v1/providers",
             options=make_request_options(
diff --git a/src/llama_stack_client/resources/responses/input_items.py b/src/llama_stack_client/resources/responses/input_items.py
index a5836ba7..d6643a3b 100644
--- a/src/llama_stack_client/resources/responses/input_items.py
+++ b/src/llama_stack_client/resources/responses/input_items.py
@@ -60,7 +60,7 @@ def list(
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> InputItemListResponse:
         """
-        List input items for a given OpenAI response.
+        List input items.
 
         Args:
           after: An item ID to list items after, used for pagination.
@@ -143,7 +143,7 @@ async def list(
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> InputItemListResponse:
         """
-        List input items for a given OpenAI response.
+        List input items.
 
         Args:
           after: An item ID to list items after, used for pagination.
diff --git a/src/llama_stack_client/resources/responses/responses.py b/src/llama_stack_client/resources/responses/responses.py
index 16e38fd0..caa60802 100644
--- a/src/llama_stack_client/resources/responses/responses.py
+++ b/src/llama_stack_client/resources/responses/responses.py
@@ -67,6 +67,7 @@ def create(
         *,
         input: Union[str, Iterable[response_create_params.InputUnionMember1]],
         model: str,
+        conversation: str | Omit = omit,
         include: SequenceNotStr[str] | Omit = omit,
         instructions: str | Omit = omit,
         max_infer_iters: int | Omit = omit,
@@ -84,13 +85,17 @@ def create(
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> ResponseObject:
         """
-        Create a new OpenAI response.
+        Create a model response.
 
         Args:
           input: Input message(s) to create the response.
 
           model: The underlying LLM used for completions.
 
+          conversation: (Optional) The ID of a conversation to add the response to. Must begin with
+              'conv\\__'. Input and output messages will be automatically added to the
+              conversation.
+
           include: (Optional) Additional fields to include in the response.
 
           previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
@@ -116,6 +121,7 @@ def create(
         input: Union[str, Iterable[response_create_params.InputUnionMember1]],
         model: str,
         stream: Literal[True],
+        conversation: str | Omit = omit,
         include: SequenceNotStr[str] | Omit = omit,
         instructions: str | Omit = omit,
         max_infer_iters: int | Omit = omit,
@@ -132,13 +138,17 @@ def create(
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> Stream[ResponseObjectStream]:
         """
-        Create a new OpenAI response.
+        Create a model response.
 
         Args:
           input: Input message(s) to create the response.
 
           model: The underlying LLM used for completions.
 
+          conversation: (Optional) The ID of a conversation to add the response to. Must begin with
+              'conv\\__'. Input and output messages will be automatically added to the
+              conversation.
+
           include: (Optional) Additional fields to include in the response.
 
           previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
@@ -164,6 +174,7 @@ def create(
         input: Union[str, Iterable[response_create_params.InputUnionMember1]],
         model: str,
         stream: bool,
+        conversation: str | Omit = omit,
         include: SequenceNotStr[str] | Omit = omit,
         instructions: str | Omit = omit,
         max_infer_iters: int | Omit = omit,
@@ -180,13 +191,17 @@ def create(
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> ResponseObject | Stream[ResponseObjectStream]:
         """
-        Create a new OpenAI response.
+        Create a model response.
 
         Args:
           input: Input message(s) to create the response.
 
           model: The underlying LLM used for completions.
 
+          conversation: (Optional) The ID of a conversation to add the response to. Must begin with
+              'conv\\__'. Input and output messages will be automatically added to the
+              conversation.
+
           include: (Optional) Additional fields to include in the response.
 
           previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
@@ -211,6 +226,7 @@ def create(
         *,
         input: Union[str, Iterable[response_create_params.InputUnionMember1]],
         model: str,
+        conversation: str | Omit = omit,
         include: SequenceNotStr[str] | Omit = omit,
         instructions: str | Omit = omit,
         max_infer_iters: int | Omit = omit,
@@ -233,6 +249,7 @@ def create(
                 {
                     "input": input,
                     "model": model,
+                    "conversation": conversation,
                     "include": include,
                     "instructions": instructions,
                     "max_infer_iters": max_infer_iters,
@@ -267,7 +284,7 @@ def retrieve(
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> ResponseObject:
         """
-        Retrieve an OpenAI response by its ID.
+        Get a model response.
 
         Args:
           extra_headers: Send extra headers
@@ -303,7 +320,7 @@ def list(
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> SyncOpenAICursorPage[ResponseListResponse]:
         """
-        List all OpenAI responses.
+        List all responses.
 
         Args:
           after: The ID of the last response to return.
@@ -355,7 +372,7 @@ def delete(
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> ResponseDeleteResponse:
         """
-        Delete an OpenAI response by its ID.
+        Delete a response.
 
         Args:
           extra_headers: Send extra headers
@@ -407,6 +424,7 @@ async def create(
         *,
         input: Union[str, Iterable[response_create_params.InputUnionMember1]],
         model: str,
+        conversation: str | Omit = omit,
         include: SequenceNotStr[str] | Omit = omit,
         instructions: str | Omit = omit,
         max_infer_iters: int | Omit = omit,
@@ -424,13 +442,17 @@ async def create(
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> ResponseObject:
         """
-        Create a new OpenAI response.
+        Create a model response.
 
         Args:
           input: Input message(s) to create the response.
 
           model: The underlying LLM used for completions.
 
+          conversation: (Optional) The ID of a conversation to add the response to. Must begin with
+              'conv\\__'. Input and output messages will be automatically added to the
+              conversation.
+
           include: (Optional) Additional fields to include in the response.
 
           previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
@@ -456,6 +478,7 @@ async def create(
         input: Union[str, Iterable[response_create_params.InputUnionMember1]],
         model: str,
         stream: Literal[True],
+        conversation: str | Omit = omit,
         include: SequenceNotStr[str] | Omit = omit,
         instructions: str | Omit = omit,
         max_infer_iters: int | Omit = omit,
@@ -472,13 +495,17 @@ async def create(
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> AsyncStream[ResponseObjectStream]:
         """
-        Create a new OpenAI response.
+        Create a model response.
 
         Args:
           input: Input message(s) to create the response.
 
           model: The underlying LLM used for completions.
 
+          conversation: (Optional) The ID of a conversation to add the response to. Must begin with
+              'conv\\__'. Input and output messages will be automatically added to the
+              conversation.
+
           include: (Optional) Additional fields to include in the response.
 
           previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
@@ -504,6 +531,7 @@ async def create(
         input: Union[str, Iterable[response_create_params.InputUnionMember1]],
         model: str,
         stream: bool,
+        conversation: str | Omit = omit,
         include: SequenceNotStr[str] | Omit = omit,
         instructions: str | Omit = omit,
         max_infer_iters: int | Omit = omit,
@@ -520,13 +548,17 @@ async def create(
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> ResponseObject | AsyncStream[ResponseObjectStream]:
         """
-        Create a new OpenAI response.
+        Create a model response.
 
         Args:
           input: Input message(s) to create the response.
 
           model: The underlying LLM used for completions.
 
+          conversation: (Optional) The ID of a conversation to add the response to. Must begin with
+              'conv\\__'. Input and output messages will be automatically added to the
+              conversation.
+
           include: (Optional) Additional fields to include in the response.
 
           previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
@@ -551,6 +583,7 @@ async def create(
         *,
         input: Union[str, Iterable[response_create_params.InputUnionMember1]],
         model: str,
+        conversation: str | Omit = omit,
         include: SequenceNotStr[str] | Omit = omit,
         instructions: str | Omit = omit,
         max_infer_iters: int | Omit = omit,
@@ -573,6 +606,7 @@ async def create(
                 {
                     "input": input,
                     "model": model,
+                    "conversation": conversation,
                     "include": include,
                     "instructions": instructions,
                     "max_infer_iters": max_infer_iters,
@@ -607,7 +641,7 @@ async def retrieve(
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> ResponseObject:
         """
-        Retrieve an OpenAI response by its ID.
+        Get a model response.
 
         Args:
           extra_headers: Send extra headers
@@ -643,7 +677,7 @@ def list(
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> AsyncPaginator[ResponseListResponse, AsyncOpenAICursorPage[ResponseListResponse]]:
         """
-        List all OpenAI responses.
+        List all responses.
 
         Args:
           after: The ID of the last response to return.
@@ -695,7 +729,7 @@ async def delete(
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> ResponseDeleteResponse:
         """
-        Delete an OpenAI response by its ID.
+        Delete a response.
 
         Args:
           extra_headers: Send extra headers
diff --git a/src/llama_stack_client/resources/routes.py b/src/llama_stack_client/resources/routes.py
index 9a1e73e6..fc584215 100644
--- a/src/llama_stack_client/resources/routes.py
+++ b/src/llama_stack_client/resources/routes.py
@@ -52,7 +52,11 @@ def list(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> RouteListResponse:
-        """List all available API routes with their methods and implementing providers."""
+        """List routes.
+
+        List all available API routes with their methods and implementing
+        providers.
+        """
         return self._get(
             "/v1/inspect/routes",
             options=make_request_options(
@@ -96,7 +100,11 @@ async def list(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> RouteListResponse:
-        """List all available API routes with their methods and implementing providers."""
+        """List routes.
+
+        List all available API routes with their methods and implementing
+        providers.
+        """
         return await self._get(
             "/v1/inspect/routes",
             options=make_request_options(
diff --git a/src/llama_stack_client/resources/safety.py b/src/llama_stack_client/resources/safety.py
index e886dc08..8468a9ef 100644
--- a/src/llama_stack_client/resources/safety.py
+++ b/src/llama_stack_client/resources/safety.py
@@ -57,7 +57,8 @@ def run_shield(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> RunShieldResponse:
-        """
+        """Run shield.
+
         Run a shield.
 
         Args:
@@ -125,7 +126,8 @@ async def run_shield(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> RunShieldResponse:
-        """
+        """Run shield.
+
         Run a shield.
 
         Args:
diff --git a/src/llama_stack_client/types/__init__.py b/src/llama_stack_client/types/__init__.py
index 2b89de40..2b769b76 100644
--- a/src/llama_stack_client/types/__init__.py
+++ b/src/llama_stack_client/types/__init__.py
@@ -43,6 +43,7 @@
 from .scoring_fn_params import ScoringFnParams as ScoringFnParams
 from .file_create_params import FileCreateParams as FileCreateParams
 from .tool_list_response import ToolListResponse as ToolListResponse
+from .conversation_object import ConversationObject as ConversationObject
 from .list_files_response import ListFilesResponse as ListFilesResponse
 from .model_list_response import ModelListResponse as ModelListResponse
 from .route_list_response import RouteListResponse as RouteListResponse
@@ -94,6 +95,8 @@
 from .toolgroup_register_params import ToolgroupRegisterParams as ToolgroupRegisterParams
 from .vector_db_register_params import VectorDBRegisterParams as VectorDBRegisterParams
 from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse
+from .conversation_create_params import ConversationCreateParams as ConversationCreateParams
+from .conversation_update_params import ConversationUpdateParams as ConversationUpdateParams
 from .create_embeddings_response import CreateEmbeddingsResponse as CreateEmbeddingsResponse
 from .scoring_score_batch_params import ScoringScoreBatchParams as ScoringScoreBatchParams
 from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams
@@ -103,6 +106,7 @@
 from .telemetry_get_span_response import TelemetryGetSpanResponse as TelemetryGetSpanResponse
 from .vector_db_register_response import VectorDBRegisterResponse as VectorDBRegisterResponse
 from .vector_db_retrieve_response import VectorDBRetrieveResponse as VectorDBRetrieveResponse
+from .conversation_delete_response import ConversationDeleteResponse as ConversationDeleteResponse
 from .scoring_score_batch_response import ScoringScoreBatchResponse as ScoringScoreBatchResponse
 from .telemetry_query_spans_params import TelemetryQuerySpansParams as TelemetryQuerySpansParams
 from .vector_store_delete_response import VectorStoreDeleteResponse as VectorStoreDeleteResponse
diff --git a/src/llama_stack_client/types/alpha/list_post_training_jobs_response.py b/src/llama_stack_client/types/alpha/list_post_training_jobs_response.py
index 746afe99..7af3bd96 100644
--- a/src/llama_stack_client/types/alpha/list_post_training_jobs_response.py
+++ b/src/llama_stack_client/types/alpha/list_post_training_jobs_response.py
@@ -1,15 +1,10 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing import List
-
 from ..._models import BaseModel
+from .post_training.job_list_response import JobListResponse
 
-__all__ = ["ListPostTrainingJobsResponse", "Data"]
-
-
-class Data(BaseModel):
-    job_uuid: str
+__all__ = ["ListPostTrainingJobsResponse"]
 
 
 class ListPostTrainingJobsResponse(BaseModel):
-    data: List[Data]
+    data: JobListResponse
diff --git a/src/llama_stack_client/types/chat/completion_create_response.py b/src/llama_stack_client/types/chat/completion_create_response.py
index 7c6b2299..8e723db3 100644
--- a/src/llama_stack_client/types/chat/completion_create_response.py
+++ b/src/llama_stack_client/types/chat/completion_create_response.py
@@ -34,6 +34,9 @@
     "OpenAIChatCompletionChoiceLogprobsContentTopLogprob",
     "OpenAIChatCompletionChoiceLogprobsRefusal",
     "OpenAIChatCompletionChoiceLogprobsRefusalTopLogprob",
+    "OpenAIChatCompletionUsage",
+    "OpenAIChatCompletionUsageCompletionTokensDetails",
+    "OpenAIChatCompletionUsagePromptTokensDetails",
 ]
 
 
@@ -283,6 +286,33 @@ class OpenAIChatCompletionChoice(BaseModel):
     """(Optional) The log probabilities for the tokens in the message"""
 
 
+class OpenAIChatCompletionUsageCompletionTokensDetails(BaseModel):
+    reasoning_tokens: Optional[int] = None
+    """Number of tokens used for reasoning (o1/o3 models)"""
+
+
+class OpenAIChatCompletionUsagePromptTokensDetails(BaseModel):
+    cached_tokens: Optional[int] = None
+    """Number of tokens retrieved from cache"""
+
+
+class OpenAIChatCompletionUsage(BaseModel):
+    completion_tokens: int
+    """Number of tokens in the completion"""
+
+    prompt_tokens: int
+    """Number of tokens in the prompt"""
+
+    total_tokens: int
+    """Total tokens used (prompt + completion)"""
+
+    completion_tokens_details: Optional[OpenAIChatCompletionUsageCompletionTokensDetails] = None
+    """Token details for output tokens in OpenAI chat completion usage."""
+
+    prompt_tokens_details: Optional[OpenAIChatCompletionUsagePromptTokensDetails] = None
+    """Token details for prompt tokens in OpenAI chat completion usage."""
+
+
 class OpenAIChatCompletion(BaseModel):
     id: str
     """The ID of the chat completion"""
@@ -299,5 +329,8 @@ class OpenAIChatCompletion(BaseModel):
     object: Literal["chat.completion"]
     """The object type, which will be "chat.completion" """
 
+    usage: Optional[OpenAIChatCompletionUsage] = None
+    """Token usage information for the completion"""
+
 
 CompletionCreateResponse: TypeAlias = Union[OpenAIChatCompletion, ChatCompletionChunk]
diff --git a/src/llama_stack_client/types/chat/completion_list_response.py b/src/llama_stack_client/types/chat/completion_list_response.py
index e448e35c..6be72a0a 100644
--- a/src/llama_stack_client/types/chat/completion_list_response.py
+++ b/src/llama_stack_client/types/chat/completion_list_response.py
@@ -50,6 +50,9 @@
     "InputMessageOpenAIToolMessageParamContentUnionMember1",
     "InputMessageOpenAIDeveloperMessageParam",
     "InputMessageOpenAIDeveloperMessageParamContentUnionMember1",
+    "Usage",
+    "UsageCompletionTokensDetails",
+    "UsagePromptTokensDetails",
 ]
 
 
@@ -473,6 +476,33 @@ class InputMessageOpenAIDeveloperMessageParam(BaseModel):
 ]
 
 
+class UsageCompletionTokensDetails(BaseModel):
+    reasoning_tokens: Optional[int] = None
+    """Number of tokens used for reasoning (o1/o3 models)"""
+
+
+class UsagePromptTokensDetails(BaseModel):
+    cached_tokens: Optional[int] = None
+    """Number of tokens retrieved from cache"""
+
+
+class Usage(BaseModel):
+    completion_tokens: int
+    """Number of tokens in the completion"""
+
+    prompt_tokens: int
+    """Number of tokens in the prompt"""
+
+    total_tokens: int
+    """Total tokens used (prompt + completion)"""
+
+    completion_tokens_details: Optional[UsageCompletionTokensDetails] = None
+    """Token details for output tokens in OpenAI chat completion usage."""
+
+    prompt_tokens_details: Optional[UsagePromptTokensDetails] = None
+    """Token details for prompt tokens in OpenAI chat completion usage."""
+
+
 class CompletionListResponse(BaseModel):
     id: str
     """The ID of the chat completion"""
@@ -490,3 +520,6 @@ class CompletionListResponse(BaseModel):
 
     object: Literal["chat.completion"]
     """The object type, which will be "chat.completion" """
+
+    usage: Optional[Usage] = None
+    """Token usage information for the completion"""
diff --git a/src/llama_stack_client/types/chat/completion_retrieve_response.py b/src/llama_stack_client/types/chat/completion_retrieve_response.py
index 74b60c35..e71e1175 100644
--- a/src/llama_stack_client/types/chat/completion_retrieve_response.py
+++ b/src/llama_stack_client/types/chat/completion_retrieve_response.py
@@ -50,6 +50,9 @@
     "InputMessageOpenAIToolMessageParamContentUnionMember1",
     "InputMessageOpenAIDeveloperMessageParam",
     "InputMessageOpenAIDeveloperMessageParamContentUnionMember1",
+    "Usage",
+    "UsageCompletionTokensDetails",
+    "UsagePromptTokensDetails",
 ]
 
 
@@ -473,6 +476,33 @@ class InputMessageOpenAIDeveloperMessageParam(BaseModel):
 ]
 
 
+class UsageCompletionTokensDetails(BaseModel):
+    reasoning_tokens: Optional[int] = None
+    """Number of tokens used for reasoning (o1/o3 models)"""
+
+
+class UsagePromptTokensDetails(BaseModel):
+    cached_tokens: Optional[int] = None
+    """Number of tokens retrieved from cache"""
+
+
+class Usage(BaseModel):
+    completion_tokens: int
+    """Number of tokens in the completion"""
+
+    prompt_tokens: int
+    """Number of tokens in the prompt"""
+
+    total_tokens: int
+    """Total tokens used (prompt + completion)"""
+
+    completion_tokens_details: Optional[UsageCompletionTokensDetails] = None
+    """Token details for output tokens in OpenAI chat completion usage."""
+
+    prompt_tokens_details: Optional[UsagePromptTokensDetails] = None
+    """Token details for prompt tokens in OpenAI chat completion usage."""
+
+
 class CompletionRetrieveResponse(BaseModel):
     id: str
     """The ID of the chat completion"""
@@ -490,3 +520,6 @@ class CompletionRetrieveResponse(BaseModel):
 
     object: Literal["chat.completion"]
     """The object type, which will be "chat.completion" """
+
+    usage: Optional[Usage] = None
+    """Token usage information for the completion"""
diff --git a/src/llama_stack_client/types/chat_completion_chunk.py b/src/llama_stack_client/types/chat_completion_chunk.py
index 788a34ed..91cae09a 100644
--- a/src/llama_stack_client/types/chat_completion_chunk.py
+++ b/src/llama_stack_client/types/chat_completion_chunk.py
@@ -16,6 +16,9 @@
     "ChoiceLogprobsContentTopLogprob",
     "ChoiceLogprobsRefusal",
     "ChoiceLogprobsRefusalTopLogprob",
+    "Usage",
+    "UsageCompletionTokensDetails",
+    "UsagePromptTokensDetails",
 ]
 
 
@@ -113,6 +116,33 @@ class Choice(BaseModel):
     """(Optional) The log probabilities for the tokens in the message"""
 
 
+class UsageCompletionTokensDetails(BaseModel):
+    reasoning_tokens: Optional[int] = None
+    """Number of tokens used for reasoning (o1/o3 models)"""
+
+
+class UsagePromptTokensDetails(BaseModel):
+    cached_tokens: Optional[int] = None
+    """Number of tokens retrieved from cache"""
+
+
+class Usage(BaseModel):
+    completion_tokens: int
+    """Number of tokens in the completion"""
+
+    prompt_tokens: int
+    """Number of tokens in the prompt"""
+
+    total_tokens: int
+    """Total tokens used (prompt + completion)"""
+
+    completion_tokens_details: Optional[UsageCompletionTokensDetails] = None
+    """Token details for output tokens in OpenAI chat completion usage."""
+
+    prompt_tokens_details: Optional[UsagePromptTokensDetails] = None
+    """Token details for prompt tokens in OpenAI chat completion usage."""
+
+
 class ChatCompletionChunk(BaseModel):
     id: str
     """The ID of the chat completion"""
@@ -128,3 +158,6 @@ class ChatCompletionChunk(BaseModel):
 
     object: Literal["chat.completion.chunk"]
     """The object type, which will be "chat.completion.chunk" """
+
+    usage: Optional[Usage] = None
+    """Token usage information (typically included in final chunk with stream_options)"""
diff --git a/src/llama_stack_client/types/conversation_create_params.py b/src/llama_stack_client/types/conversation_create_params.py
new file mode 100644
index 00000000..30a27324
--- /dev/null
+++ b/src/llama_stack_client/types/conversation_create_params.py
@@ -0,0 +1,277 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .._types import SequenceNotStr
+
+__all__ = [
+    "ConversationCreateParams",
+    "Item",
+    "ItemOpenAIResponseMessage",
+    "ItemOpenAIResponseMessageContentUnionMember1",
+    "ItemOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText",
+    "ItemOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage",
+    "ItemOpenAIResponseMessageContentUnionMember2",
+    "ItemOpenAIResponseMessageContentUnionMember2Annotation",
+    "ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation",
+    "ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation",
+    "ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation",
+    "ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath",
+    "ItemOpenAIResponseOutputMessageFunctionToolCall",
+    "ItemOpenAIResponseOutputMessageFileSearchToolCall",
+    "ItemOpenAIResponseOutputMessageFileSearchToolCallResult",
+    "ItemOpenAIResponseOutputMessageWebSearchToolCall",
+    "ItemOpenAIResponseOutputMessageMcpCall",
+    "ItemOpenAIResponseOutputMessageMcpListTools",
+    "ItemOpenAIResponseOutputMessageMcpListToolsTool",
+]
+
+
+class ConversationCreateParams(TypedDict, total=False):
+    items: Iterable[Item]
+    """Initial items to include in the conversation context."""
+
+    metadata: Dict[str, str]
+    """Set of key-value pairs that can be attached to an object."""
+
+
+class ItemOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(TypedDict, total=False):
+    text: Required[str]
+    """The text content of the input message"""
+
+    type: Required[Literal["input_text"]]
+    """Content type identifier, always "input_text" """
+
+
+class ItemOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage(TypedDict, total=False):
+    detail: Required[Literal["low", "high", "auto"]]
+    """Level of detail for image processing, can be "low", "high", or "auto" """
+
+    type: Required[Literal["input_image"]]
+    """Content type identifier, always "input_image" """
+
+    image_url: str
+    """(Optional) URL of the image content"""
+
+
+ItemOpenAIResponseMessageContentUnionMember1: TypeAlias = Union[
+    ItemOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText,
+    ItemOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage,
+]
+
+
+class ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation(
+    TypedDict, total=False
+):
+    file_id: Required[str]
+    """Unique identifier of the referenced file"""
+
+    filename: Required[str]
+    """Name of the referenced file"""
+
+    index: Required[int]
+    """Position index of the citation within the content"""
+
+    type: Required[Literal["file_citation"]]
+    """Annotation type identifier, always "file_citation" """
+
+
+class ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation(TypedDict, total=False):
+    end_index: Required[int]
+    """End position of the citation span in the content"""
+
+    start_index: Required[int]
+    """Start position of the citation span in the content"""
+
+    title: Required[str]
+    """Title of the referenced web resource"""
+
+    type: Required[Literal["url_citation"]]
+    """Annotation type identifier, always "url_citation" """
+
+    url: Required[str]
+    """URL of the referenced web resource"""
+
+
+class ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation(
+    TypedDict, total=False
+):
+    container_id: Required[str]
+
+    end_index: Required[int]
+
+    file_id: Required[str]
+
+    filename: Required[str]
+
+    start_index: Required[int]
+
+    type: Required[Literal["container_file_citation"]]
+
+
+class ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath(TypedDict, total=False):
+    file_id: Required[str]
+
+    index: Required[int]
+
+    type: Required[Literal["file_path"]]
+
+
+ItemOpenAIResponseMessageContentUnionMember2Annotation: TypeAlias = Union[
+    ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation,
+    ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation,
+    ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation,
+    ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath,
+]
+
+
+class ItemOpenAIResponseMessageContentUnionMember2(TypedDict, total=False):
+    annotations: Required[Iterable[ItemOpenAIResponseMessageContentUnionMember2Annotation]]
+
+    text: Required[str]
+
+    type: Required[Literal["output_text"]]
+
+
+class ItemOpenAIResponseMessage(TypedDict, total=False):
+    content: Required[
+        Union[
+            str,
+            Iterable[ItemOpenAIResponseMessageContentUnionMember1],
+            Iterable[ItemOpenAIResponseMessageContentUnionMember2],
+        ]
+    ]
+
+    role: Required[Literal["system", "developer", "user", "assistant"]]
+
+    type: Required[Literal["message"]]
+
+    id: str
+
+    status: str
+
+
+class ItemOpenAIResponseOutputMessageFunctionToolCall(TypedDict, total=False):
+    arguments: Required[str]
+    """JSON string containing the function arguments"""
+
+    call_id: Required[str]
+    """Unique identifier for the function call"""
+
+    name: Required[str]
+    """Name of the function being called"""
+
+    type: Required[Literal["function_call"]]
+    """Tool call type identifier, always "function_call" """
+
+    id: str
+    """(Optional) Additional identifier for the tool call"""
+
+    status: str
+    """(Optional) Current status of the function call execution"""
+
+
+class ItemOpenAIResponseOutputMessageFileSearchToolCallResult(TypedDict, total=False):
+    attributes: Required[Dict[str, Union[bool, float, str, Iterable[object], object, None]]]
+    """(Optional) Key-value attributes associated with the file"""
+
+    file_id: Required[str]
+    """Unique identifier of the file containing the result"""
+
+    filename: Required[str]
+    """Name of the file containing the result"""
+
+    score: Required[float]
+    """Relevance score for this search result (between 0 and 1)"""
+
+    text: Required[str]
+    """Text content of the search result"""
+
+
+class ItemOpenAIResponseOutputMessageFileSearchToolCall(TypedDict, total=False):
+    id: Required[str]
+    """Unique identifier for this tool call"""
+
+    queries: Required[SequenceNotStr[str]]
+    """List of search queries executed"""
+
+    status: Required[str]
+    """Current status of the file search operation"""
+
+    type: Required[Literal["file_search_call"]]
+    """Tool call type identifier, always "file_search_call" """
+
+    results: Iterable[ItemOpenAIResponseOutputMessageFileSearchToolCallResult]
+    """(Optional) Search results returned by the file search operation"""
+
+
+class ItemOpenAIResponseOutputMessageWebSearchToolCall(TypedDict, total=False):
+    id: Required[str]
+    """Unique identifier for this tool call"""
+
+    status: Required[str]
+    """Current status of the web search operation"""
+
+    type: Required[Literal["web_search_call"]]
+    """Tool call type identifier, always "web_search_call" """
+
+
+class ItemOpenAIResponseOutputMessageMcpCall(TypedDict, total=False):
+    id: Required[str]
+    """Unique identifier for this MCP call"""
+
+    arguments: Required[str]
+    """JSON string containing the MCP call arguments"""
+
+    name: Required[str]
+    """Name of the MCP method being called"""
+
+    server_label: Required[str]
+    """Label identifying the MCP server handling the call"""
+
+    type: Required[Literal["mcp_call"]]
+    """Tool call type identifier, always "mcp_call" """
+
+    error: str
+    """(Optional) Error message if the MCP call failed"""
+
+    output: str
+    """(Optional) Output result from the successful MCP call"""
+
+
+class ItemOpenAIResponseOutputMessageMcpListToolsTool(TypedDict, total=False):
+    input_schema: Required[Dict[str, Union[bool, float, str, Iterable[object], object, None]]]
+    """JSON schema defining the tool's input parameters"""
+
+    name: Required[str]
+    """Name of the tool"""
+
+    description: str
+    """(Optional) Description of what the tool does"""
+
+
+class ItemOpenAIResponseOutputMessageMcpListTools(TypedDict, total=False):
+    id: Required[str]
+    """Unique identifier for this MCP list tools operation"""
+
+    server_label: Required[str]
+    """Label identifying the MCP server providing the tools"""
+
+    tools: Required[Iterable[ItemOpenAIResponseOutputMessageMcpListToolsTool]]
+    """List of available tools provided by the MCP server"""
+
+    type: Required[Literal["mcp_list_tools"]]
+    """Tool call type identifier, always "mcp_list_tools" """
+
+
+Item: TypeAlias = Union[
+    ItemOpenAIResponseMessage,
+    ItemOpenAIResponseOutputMessageFunctionToolCall,
+    ItemOpenAIResponseOutputMessageFileSearchToolCall,
+    ItemOpenAIResponseOutputMessageWebSearchToolCall,
+    ItemOpenAIResponseOutputMessageMcpCall,
+    ItemOpenAIResponseOutputMessageMcpListTools,
+]
diff --git a/src/llama_stack_client/types/conversation_delete_response.py b/src/llama_stack_client/types/conversation_delete_response.py
new file mode 100644
index 00000000..2b675a69
--- /dev/null
+++ b/src/llama_stack_client/types/conversation_delete_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .._models import BaseModel
+
+__all__ = ["ConversationDeleteResponse"]
+
+
+class ConversationDeleteResponse(BaseModel):
+    id: str
+
+    deleted: bool
+
+    object: str
diff --git a/src/llama_stack_client/types/conversation_object.py b/src/llama_stack_client/types/conversation_object.py
new file mode 100644
index 00000000..617be127
--- /dev/null
+++ b/src/llama_stack_client/types/conversation_object.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import builtins
+from typing import Dict, List, Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["ConversationObject"]
+
+
+class ConversationObject(BaseModel):
+    id: str
+
+    created_at: int
+
+    object: Literal["conversation"]
+
+    items: Optional[List[builtins.object]] = None
+
+    metadata: Optional[Dict[str, str]] = None
diff --git a/src/llama_stack_client/types/conversation_update_params.py b/src/llama_stack_client/types/conversation_update_params.py
new file mode 100644
index 00000000..3b34c327
--- /dev/null
+++ b/src/llama_stack_client/types/conversation_update_params.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict
+from typing_extensions import Required, TypedDict
+
+__all__ = ["ConversationUpdateParams"]
+
+
+class ConversationUpdateParams(TypedDict, total=False):
+    metadata: Required[Dict[str, str]]
+    """Set of key-value pairs that can be attached to an object."""
diff --git a/src/llama_stack_client/types/conversations/__init__.py b/src/llama_stack_client/types/conversations/__init__.py
new file mode 100644
index 00000000..88543e32
--- /dev/null
+++ b/src/llama_stack_client/types/conversations/__init__.py
@@ -0,0 +1,9 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .item_list_params import ItemListParams as ItemListParams
+from .item_get_response import ItemGetResponse as ItemGetResponse
+from .item_create_params import ItemCreateParams as ItemCreateParams
+from .item_list_response import ItemListResponse as ItemListResponse
+from .item_create_response import ItemCreateResponse as ItemCreateResponse
diff --git a/src/llama_stack_client/types/conversations/item_create_params.py b/src/llama_stack_client/types/conversations/item_create_params.py
new file mode 100644
index 00000000..43176c98
--- /dev/null
+++ b/src/llama_stack_client/types/conversations/item_create_params.py
@@ -0,0 +1,274 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..._types import SequenceNotStr
+
+__all__ = [
+    "ItemCreateParams",
+    "Item",
+    "ItemOpenAIResponseMessage",
+    "ItemOpenAIResponseMessageContentUnionMember1",
+    "ItemOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText",
+    "ItemOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage",
+    "ItemOpenAIResponseMessageContentUnionMember2",
+    "ItemOpenAIResponseMessageContentUnionMember2Annotation",
+    "ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation",
+    "ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation",
+    "ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation",
+    "ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath",
+    "ItemOpenAIResponseOutputMessageFunctionToolCall",
+    "ItemOpenAIResponseOutputMessageFileSearchToolCall",
+    "ItemOpenAIResponseOutputMessageFileSearchToolCallResult",
+    "ItemOpenAIResponseOutputMessageWebSearchToolCall",
+    "ItemOpenAIResponseOutputMessageMcpCall",
+    "ItemOpenAIResponseOutputMessageMcpListTools",
+    "ItemOpenAIResponseOutputMessageMcpListToolsTool",
+]
+
+
+class ItemCreateParams(TypedDict, total=False):
+    items: Required[Iterable[Item]]
+    """Items to include in the conversation context."""
+
+
+class ItemOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(TypedDict, total=False):
+    text: Required[str]
+    """The text content of the input message"""
+
+    type: Required[Literal["input_text"]]
+    """Content type identifier, always "input_text" """
+
+
+class ItemOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage(TypedDict, total=False):
+    detail: Required[Literal["low", "high", "auto"]]
+    """Level of detail for image processing, can be "low", "high", or "auto" """
+
+    type: Required[Literal["input_image"]]
+    """Content type identifier, always "input_image" """
+
+    image_url: str
+    """(Optional) URL of the image content"""
+
+
+ItemOpenAIResponseMessageContentUnionMember1: TypeAlias = Union[
+    ItemOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText,
+    ItemOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage,
+]
+
+
+class ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation(
+    TypedDict, total=False
+):
+    file_id: Required[str]
+    """Unique identifier of the referenced file"""
+
+    filename: Required[str]
+    """Name of the referenced file"""
+
+    index: Required[int]
+    """Position index of the citation within the content"""
+
+    type: Required[Literal["file_citation"]]
+    """Annotation type identifier, always "file_citation" """
+
+
+class ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation(TypedDict, total=False):
+    end_index: Required[int]
+    """End position of the citation span in the content"""
+
+    start_index: Required[int]
+    """Start position of the citation span in the content"""
+
+    title: Required[str]
+    """Title of the referenced web resource"""
+
+    type: Required[Literal["url_citation"]]
+    """Annotation type identifier, always "url_citation" """
+
+    url: Required[str]
+    """URL of the referenced web resource"""
+
+
+class ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation(
+    TypedDict, total=False
+):
+    container_id: Required[str]
+
+    end_index: Required[int]
+
+    file_id: Required[str]
+
+    filename: Required[str]
+
+    start_index: Required[int]
+
+    type: Required[Literal["container_file_citation"]]
+
+
+class ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath(TypedDict, total=False):
+    file_id: Required[str]
+
+    index: Required[int]
+
+    type: Required[Literal["file_path"]]
+
+
+ItemOpenAIResponseMessageContentUnionMember2Annotation: TypeAlias = Union[
+    ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation,
+    ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation,
+    ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation,
+    ItemOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath,
+]
+
+
+class ItemOpenAIResponseMessageContentUnionMember2(TypedDict, total=False):
+    annotations: Required[Iterable[ItemOpenAIResponseMessageContentUnionMember2Annotation]]
+
+    text: Required[str]
+
+    type: Required[Literal["output_text"]]
+
+
+class ItemOpenAIResponseMessage(TypedDict, total=False):
+    content: Required[
+        Union[
+            str,
+            Iterable[ItemOpenAIResponseMessageContentUnionMember1],
+            Iterable[ItemOpenAIResponseMessageContentUnionMember2],
+        ]
+    ]
+
+    role: Required[Literal["system", "developer", "user", "assistant"]]
+
+    type: Required[Literal["message"]]
+
+    id: str
+
+    status: str
+
+
+class ItemOpenAIResponseOutputMessageFunctionToolCall(TypedDict, total=False):
+    arguments: Required[str]
+    """JSON string containing the function arguments"""
+
+    call_id: Required[str]
+    """Unique identifier for the function call"""
+
+    name: Required[str]
+    """Name of the function being called"""
+
+    type: Required[Literal["function_call"]]
+    """Tool call type identifier, always "function_call" """
+
+    id: str
+    """(Optional) Additional identifier for the tool call"""
+
+    status: str
+    """(Optional) Current status of the function call execution"""
+
+
+class ItemOpenAIResponseOutputMessageFileSearchToolCallResult(TypedDict, total=False):
+    attributes: Required[Dict[str, Union[bool, float, str, Iterable[object], object, None]]]
+    """(Optional) Key-value attributes associated with the file"""
+
+    file_id: Required[str]
+    """Unique identifier of the file containing the result"""
+
+    filename: Required[str]
+    """Name of the file containing the result"""
+
+    score: Required[float]
+    """Relevance score for this search result (between 0 and 1)"""
+
+    text: Required[str]
+    """Text content of the search result"""
+
+
+class ItemOpenAIResponseOutputMessageFileSearchToolCall(TypedDict, total=False):
+    id: Required[str]
+    """Unique identifier for this tool call"""
+
+    queries: Required[SequenceNotStr[str]]
+    """List of search queries executed"""
+
+    status: Required[str]
+    """Current status of the file search operation"""
+
+    type: Required[Literal["file_search_call"]]
+    """Tool call type identifier, always "file_search_call" """
+
+    results: Iterable[ItemOpenAIResponseOutputMessageFileSearchToolCallResult]
+    """(Optional) Search results returned by the file search operation"""
+
+
+class ItemOpenAIResponseOutputMessageWebSearchToolCall(TypedDict, total=False):
+    id: Required[str]
+    """Unique identifier for this tool call"""
+
+    status: Required[str]
+    """Current status of the web search operation"""
+
+    type: Required[Literal["web_search_call"]]
+    """Tool call type identifier, always "web_search_call" """
+
+
+class ItemOpenAIResponseOutputMessageMcpCall(TypedDict, total=False):
+    id: Required[str]
+    """Unique identifier for this MCP call"""
+
+    arguments: Required[str]
+    """JSON string containing the MCP call arguments"""
+
+    name: Required[str]
+    """Name of the MCP method being called"""
+
+    server_label: Required[str]
+    """Label identifying the MCP server handling the call"""
+
+    type: Required[Literal["mcp_call"]]
+    """Tool call type identifier, always "mcp_call" """
+
+    error: str
+    """(Optional) Error message if the MCP call failed"""
+
+    output: str
+    """(Optional) Output result from the successful MCP call"""
+
+
+class ItemOpenAIResponseOutputMessageMcpListToolsTool(TypedDict, total=False):
+    input_schema: Required[Dict[str, Union[bool, float, str, Iterable[object], object, None]]]
+    """JSON schema defining the tool's input parameters"""
+
+    name: Required[str]
+    """Name of the tool"""
+
+    description: str
+    """(Optional) Description of what the tool does"""
+
+
+class ItemOpenAIResponseOutputMessageMcpListTools(TypedDict, total=False):
+    id: Required[str]
+    """Unique identifier for this MCP list tools operation"""
+
+    server_label: Required[str]
+    """Label identifying the MCP server providing the tools"""
+
+    tools: Required[Iterable[ItemOpenAIResponseOutputMessageMcpListToolsTool]]
+    """List of available tools provided by the MCP server"""
+
+    type: Required[Literal["mcp_list_tools"]]
+    """Tool call type identifier, always "mcp_list_tools" """
+
+
+Item: TypeAlias = Union[
+    ItemOpenAIResponseMessage,
+    ItemOpenAIResponseOutputMessageFunctionToolCall,
+    ItemOpenAIResponseOutputMessageFileSearchToolCall,
+    ItemOpenAIResponseOutputMessageWebSearchToolCall,
+    ItemOpenAIResponseOutputMessageMcpCall,
+    ItemOpenAIResponseOutputMessageMcpListTools,
+]
diff --git a/src/llama_stack_client/types/conversations/item_create_response.py b/src/llama_stack_client/types/conversations/item_create_response.py
new file mode 100644
index 00000000..d998dda5
--- /dev/null
+++ b/src/llama_stack_client/types/conversations/item_create_response.py
@@ -0,0 +1,281 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+
+__all__ = [
+    "ItemCreateResponse",
+    "Data",
+    "DataOpenAIResponseMessage",
+    "DataOpenAIResponseMessageContentUnionMember1",
+    "DataOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText",
+    "DataOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage",
+    "DataOpenAIResponseMessageContentUnionMember2",
+    "DataOpenAIResponseMessageContentUnionMember2Annotation",
+    "DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation",
+    "DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation",
+    "DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation",
+    "DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath",
+    "DataOpenAIResponseOutputMessageFunctionToolCall",
+    "DataOpenAIResponseOutputMessageFileSearchToolCall",
+    "DataOpenAIResponseOutputMessageFileSearchToolCallResult",
+    "DataOpenAIResponseOutputMessageWebSearchToolCall",
+    "DataOpenAIResponseOutputMessageMcpCall",
+    "DataOpenAIResponseOutputMessageMcpListTools",
+    "DataOpenAIResponseOutputMessageMcpListToolsTool",
+]
+
+
+class DataOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(BaseModel):
+    text: str
+    """The text content of the input message"""
+
+    type: Literal["input_text"]
+    """Content type identifier, always "input_text" """
+
+
+class DataOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage(BaseModel):
+    detail: Literal["low", "high", "auto"]
+    """Level of detail for image processing, can be "low", "high", or "auto" """
+
+    type: Literal["input_image"]
+    """Content type identifier, always "input_image" """
+
+    image_url: Optional[str] = None
+    """(Optional) URL of the image content"""
+
+
+DataOpenAIResponseMessageContentUnionMember1: TypeAlias = Annotated[
+    Union[
+        DataOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText,
+        DataOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage,
+    ],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation(BaseModel):
+    file_id: str
+    """Unique identifier of the referenced file"""
+
+    filename: str
+    """Name of the referenced file"""
+
+    index: int
+    """Position index of the citation within the content"""
+
+    type: Literal["file_citation"]
+    """Annotation type identifier, always "file_citation" """
+
+
+class DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation(BaseModel):
+    end_index: int
+    """End position of the citation span in the content"""
+
+    start_index: int
+    """Start position of the citation span in the content"""
+
+    title: str
+    """Title of the referenced web resource"""
+
+    type: Literal["url_citation"]
+    """Annotation type identifier, always "url_citation" """
+
+    url: str
+    """URL of the referenced web resource"""
+
+
+class DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation(BaseModel):
+    container_id: str
+
+    end_index: int
+
+    file_id: str
+
+    filename: str
+
+    start_index: int
+
+    type: Literal["container_file_citation"]
+
+
+class DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath(BaseModel):
+    file_id: str
+
+    index: int
+
+    type: Literal["file_path"]
+
+
+DataOpenAIResponseMessageContentUnionMember2Annotation: TypeAlias = Annotated[
+    Union[
+        DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation,
+        DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation,
+        DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation,
+        DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath,
+    ],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class DataOpenAIResponseMessageContentUnionMember2(BaseModel):
+    annotations: List[DataOpenAIResponseMessageContentUnionMember2Annotation]
+
+    text: str
+
+    type: Literal["output_text"]
+
+
+class DataOpenAIResponseMessage(BaseModel):
+    content: Union[
+        str, List[DataOpenAIResponseMessageContentUnionMember1], List[DataOpenAIResponseMessageContentUnionMember2]
+    ]
+
+    role: Literal["system", "developer", "user", "assistant"]
+
+    type: Literal["message"]
+
+    id: Optional[str] = None
+
+    status: Optional[str] = None
+
+
+class DataOpenAIResponseOutputMessageFunctionToolCall(BaseModel):
+    arguments: str
+    """JSON string containing the function arguments"""
+
+    call_id: str
+    """Unique identifier for the function call"""
+
+    name: str
+    """Name of the function being called"""
+
+    type: Literal["function_call"]
+    """Tool call type identifier, always "function_call" """
+
+    id: Optional[str] = None
+    """(Optional) Additional identifier for the tool call"""
+
+    status: Optional[str] = None
+    """(Optional) Current status of the function call execution"""
+
+
+class DataOpenAIResponseOutputMessageFileSearchToolCallResult(BaseModel):
+    attributes: Dict[str, Union[bool, float, str, List[object], object, None]]
+    """(Optional) Key-value attributes associated with the file"""
+
+    file_id: str
+    """Unique identifier of the file containing the result"""
+
+    filename: str
+    """Name of the file containing the result"""
+
+    score: float
+    """Relevance score for this search result (between 0 and 1)"""
+
+    text: str
+    """Text content of the search result"""
+
+
+class DataOpenAIResponseOutputMessageFileSearchToolCall(BaseModel):
+    id: str
+    """Unique identifier for this tool call"""
+
+    queries: List[str]
+    """List of search queries executed"""
+
+    status: str
+    """Current status of the file search operation"""
+
+    type: Literal["file_search_call"]
+    """Tool call type identifier, always "file_search_call" """
+
+    results: Optional[List[DataOpenAIResponseOutputMessageFileSearchToolCallResult]] = None
+    """(Optional) Search results returned by the file search operation"""
+
+
+class DataOpenAIResponseOutputMessageWebSearchToolCall(BaseModel):
+    id: str
+    """Unique identifier for this tool call"""
+
+    status: str
+    """Current status of the web search operation"""
+
+    type: Literal["web_search_call"]
+    """Tool call type identifier, always "web_search_call" """
+
+
+class DataOpenAIResponseOutputMessageMcpCall(BaseModel):
+    id: str
+    """Unique identifier for this MCP call"""
+
+    arguments: str
+    """JSON string containing the MCP call arguments"""
+
+    name: str
+    """Name of the MCP method being called"""
+
+    server_label: str
+    """Label identifying the MCP server handling the call"""
+
+    type: Literal["mcp_call"]
+    """Tool call type identifier, always "mcp_call" """
+
+    error: Optional[str] = None
+    """(Optional) Error message if the MCP call failed"""
+
+    output: Optional[str] = None
+    """(Optional) Output result from the successful MCP call"""
+
+
+class DataOpenAIResponseOutputMessageMcpListToolsTool(BaseModel):
+    input_schema: Dict[str, Union[bool, float, str, List[object], object, None]]
+    """JSON schema defining the tool's input parameters"""
+
+    name: str
+    """Name of the tool"""
+
+    description: Optional[str] = None
+    """(Optional) Description of what the tool does"""
+
+
+class DataOpenAIResponseOutputMessageMcpListTools(BaseModel):
+    id: str
+    """Unique identifier for this MCP list tools operation"""
+
+    server_label: str
+    """Label identifying the MCP server providing the tools"""
+
+    tools: List[DataOpenAIResponseOutputMessageMcpListToolsTool]
+    """List of available tools provided by the MCP server"""
+
+    type: Literal["mcp_list_tools"]
+    """Tool call type identifier, always "mcp_list_tools" """
+
+
+Data: TypeAlias = Annotated[
+    Union[
+        DataOpenAIResponseMessage,
+        DataOpenAIResponseOutputMessageFunctionToolCall,
+        DataOpenAIResponseOutputMessageFileSearchToolCall,
+        DataOpenAIResponseOutputMessageWebSearchToolCall,
+        DataOpenAIResponseOutputMessageMcpCall,
+        DataOpenAIResponseOutputMessageMcpListTools,
+    ],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class ItemCreateResponse(BaseModel):
+    data: List[Data]
+
+    has_more: bool
+
+    object: str
+
+    first_id: Optional[str] = None
+
+    last_id: Optional[str] = None
diff --git a/src/llama_stack_client/types/conversations/item_get_response.py b/src/llama_stack_client/types/conversations/item_get_response.py
new file mode 100644
index 00000000..2bcb6c20
--- /dev/null
+++ b/src/llama_stack_client/types/conversations/item_get_response.py
@@ -0,0 +1,266 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+
+__all__ = [
+    "ItemGetResponse",
+    "OpenAIResponseMessage",
+    "OpenAIResponseMessageContentUnionMember1",
+    "OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText",
+    "OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage",
+    "OpenAIResponseMessageContentUnionMember2",
+    "OpenAIResponseMessageContentUnionMember2Annotation",
+    "OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation",
+    "OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation",
+    "OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation",
+    "OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath",
+    "OpenAIResponseOutputMessageFunctionToolCall",
+    "OpenAIResponseOutputMessageFileSearchToolCall",
+    "OpenAIResponseOutputMessageFileSearchToolCallResult",
+    "OpenAIResponseOutputMessageWebSearchToolCall",
+    "OpenAIResponseOutputMessageMcpCall",
+    "OpenAIResponseOutputMessageMcpListTools",
+    "OpenAIResponseOutputMessageMcpListToolsTool",
+]
+
+
+class OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(BaseModel):
+    text: str
+    """The text content of the input message"""
+
+    type: Literal["input_text"]
+    """Content type identifier, always "input_text" """
+
+
+class OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage(BaseModel):
+    detail: Literal["low", "high", "auto"]
+    """Level of detail for image processing, can be "low", "high", or "auto" """
+
+    type: Literal["input_image"]
+    """Content type identifier, always "input_image" """
+
+    image_url: Optional[str] = None
+    """(Optional) URL of the image content"""
+
+
+OpenAIResponseMessageContentUnionMember1: TypeAlias = Annotated[
+    Union[
+        OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText,
+        OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage,
+    ],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation(BaseModel):
+    file_id: str
+    """Unique identifier of the referenced file"""
+
+    filename: str
+    """Name of the referenced file"""
+
+    index: int
+    """Position index of the citation within the content"""
+
+    type: Literal["file_citation"]
+    """Annotation type identifier, always "file_citation" """
+
+
+class OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation(BaseModel):
+    end_index: int
+    """End position of the citation span in the content"""
+
+    start_index: int
+    """Start position of the citation span in the content"""
+
+    title: str
+    """Title of the referenced web resource"""
+
+    type: Literal["url_citation"]
+    """Annotation type identifier, always "url_citation" """
+
+    url: str
+    """URL of the referenced web resource"""
+
+
+class OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation(BaseModel):
+    container_id: str
+
+    end_index: int
+
+    file_id: str
+
+    filename: str
+
+    start_index: int
+
+    type: Literal["container_file_citation"]
+
+
+class OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath(BaseModel):
+    file_id: str
+
+    index: int
+
+    type: Literal["file_path"]
+
+
+OpenAIResponseMessageContentUnionMember2Annotation: TypeAlias = Annotated[
+    Union[
+        OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation,
+        OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation,
+        OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation,
+        OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath,
+    ],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class OpenAIResponseMessageContentUnionMember2(BaseModel):
+    annotations: List[OpenAIResponseMessageContentUnionMember2Annotation]
+
+    text: str
+
+    type: Literal["output_text"]
+
+
+class OpenAIResponseMessage(BaseModel):
+    content: Union[str, List[OpenAIResponseMessageContentUnionMember1], List[OpenAIResponseMessageContentUnionMember2]]
+
+    role: Literal["system", "developer", "user", "assistant"]
+
+    type: Literal["message"]
+
+    id: Optional[str] = None
+
+    status: Optional[str] = None
+
+
+class OpenAIResponseOutputMessageFunctionToolCall(BaseModel):
+    arguments: str
+    """JSON string containing the function arguments"""
+
+    call_id: str
+    """Unique identifier for the function call"""
+
+    name: str
+    """Name of the function being called"""
+
+    type: Literal["function_call"]
+    """Tool call type identifier, always "function_call" """
+
+    id: Optional[str] = None
+    """(Optional) Additional identifier for the tool call"""
+
+    status: Optional[str] = None
+    """(Optional) Current status of the function call execution"""
+
+
+class OpenAIResponseOutputMessageFileSearchToolCallResult(BaseModel):
+    attributes: Dict[str, Union[bool, float, str, List[object], object, None]]
+    """(Optional) Key-value attributes associated with the file"""
+
+    file_id: str
+    """Unique identifier of the file containing the result"""
+
+    filename: str
+    """Name of the file containing the result"""
+
+    score: float
+    """Relevance score for this search result (between 0 and 1)"""
+
+    text: str
+    """Text content of the search result"""
+
+
+class OpenAIResponseOutputMessageFileSearchToolCall(BaseModel):
+    id: str
+    """Unique identifier for this tool call"""
+
+    queries: List[str]
+    """List of search queries executed"""
+
+    status: str
+    """Current status of the file search operation"""
+
+    type: Literal["file_search_call"]
+    """Tool call type identifier, always "file_search_call" """
+
+    results: Optional[List[OpenAIResponseOutputMessageFileSearchToolCallResult]] = None
+    """(Optional) Search results returned by the file search operation"""
+
+
+class OpenAIResponseOutputMessageWebSearchToolCall(BaseModel):
+    id: str
+    """Unique identifier for this tool call"""
+
+    status: str
+    """Current status of the web search operation"""
+
+    type: Literal["web_search_call"]
+    """Tool call type identifier, always "web_search_call" """
+
+
+class OpenAIResponseOutputMessageMcpCall(BaseModel):
+    id: str
+    """Unique identifier for this MCP call"""
+
+    arguments: str
+    """JSON string containing the MCP call arguments"""
+
+    name: str
+    """Name of the MCP method being called"""
+
+    server_label: str
+    """Label identifying the MCP server handling the call"""
+
+    type: Literal["mcp_call"]
+    """Tool call type identifier, always "mcp_call" """
+
+    error: Optional[str] = None
+    """(Optional) Error message if the MCP call failed"""
+
+    output: Optional[str] = None
+    """(Optional) Output result from the successful MCP call"""
+
+
+class OpenAIResponseOutputMessageMcpListToolsTool(BaseModel):
+    input_schema: Dict[str, Union[bool, float, str, List[object], object, None]]
+    """JSON schema defining the tool's input parameters"""
+
+    name: str
+    """Name of the tool"""
+
+    description: Optional[str] = None
+    """(Optional) Description of what the tool does"""
+
+
+class OpenAIResponseOutputMessageMcpListTools(BaseModel):
+    id: str
+    """Unique identifier for this MCP list tools operation"""
+
+    server_label: str
+    """Label identifying the MCP server providing the tools"""
+
+    tools: List[OpenAIResponseOutputMessageMcpListToolsTool]
+    """List of available tools provided by the MCP server"""
+
+    type: Literal["mcp_list_tools"]
+    """Tool call type identifier, always "mcp_list_tools" """
+
+
+ItemGetResponse: TypeAlias = Annotated[
+    Union[
+        OpenAIResponseMessage,
+        OpenAIResponseOutputMessageFunctionToolCall,
+        OpenAIResponseOutputMessageFileSearchToolCall,
+        OpenAIResponseOutputMessageWebSearchToolCall,
+        OpenAIResponseOutputMessageMcpCall,
+        OpenAIResponseOutputMessageMcpListTools,
+    ],
+    PropertyInfo(discriminator="type"),
+]
diff --git a/src/llama_stack_client/types/conversations/item_list_params.py b/src/llama_stack_client/types/conversations/item_list_params.py
new file mode 100644
index 00000000..e90c51a0
--- /dev/null
+++ b/src/llama_stack_client/types/conversations/item_list_params.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Union
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ItemListParams"]
+
+
+class ItemListParams(TypedDict, total=False):
+    after: Required[Union[str, object]]
+    """An item ID to list items after, used in pagination."""
+
+    include: Required[
+        Union[
+            List[
+                Literal[
+                    "code_interpreter_call.outputs",
+                    "computer_call_output.output.image_url",
+                    "file_search_call.results",
+                    "message.input_image.image_url",
+                    "message.output_text.logprobs",
+                    "reasoning.encrypted_content",
+                ]
+            ],
+            object,
+        ]
+    ]
+    """Specify additional output data to include in the response."""
+
+    limit: Required[Union[int, object]]
+    """A limit on the number of objects to be returned (1-100, default 20)."""
+
+    order: Required[Union[Literal["asc", "desc"], object]]
+    """The order to return items in (asc or desc, default desc)."""
diff --git a/src/llama_stack_client/types/conversations/item_list_response.py b/src/llama_stack_client/types/conversations/item_list_response.py
new file mode 100644
index 00000000..c0c3e06f
--- /dev/null
+++ b/src/llama_stack_client/types/conversations/item_list_response.py
@@ -0,0 +1,281 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+
+__all__ = [
+    "ItemListResponse",
+    "Data",
+    "DataOpenAIResponseMessage",
+    "DataOpenAIResponseMessageContentUnionMember1",
+    "DataOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText",
+    "DataOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage",
+    "DataOpenAIResponseMessageContentUnionMember2",
+    "DataOpenAIResponseMessageContentUnionMember2Annotation",
+    "DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation",
+    "DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation",
+    "DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation",
+    "DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath",
+    "DataOpenAIResponseOutputMessageFunctionToolCall",
+    "DataOpenAIResponseOutputMessageFileSearchToolCall",
+    "DataOpenAIResponseOutputMessageFileSearchToolCallResult",
+    "DataOpenAIResponseOutputMessageWebSearchToolCall",
+    "DataOpenAIResponseOutputMessageMcpCall",
+    "DataOpenAIResponseOutputMessageMcpListTools",
+    "DataOpenAIResponseOutputMessageMcpListToolsTool",
+]
+
+
+class DataOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(BaseModel):
+    text: str
+    """The text content of the input message"""
+
+    type: Literal["input_text"]
+    """Content type identifier, always "input_text" """
+
+
+class DataOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage(BaseModel):
+    detail: Literal["low", "high", "auto"]
+    """Level of detail for image processing, can be "low", "high", or "auto" """
+
+    type: Literal["input_image"]
+    """Content type identifier, always "input_image" """
+
+    image_url: Optional[str] = None
+    """(Optional) URL of the image content"""
+
+
+DataOpenAIResponseMessageContentUnionMember1: TypeAlias = Annotated[
+    Union[
+        DataOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText,
+        DataOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage,
+    ],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation(BaseModel):
+    file_id: str
+    """Unique identifier of the referenced file"""
+
+    filename: str
+    """Name of the referenced file"""
+
+    index: int
+    """Position index of the citation within the content"""
+
+    type: Literal["file_citation"]
+    """Annotation type identifier, always "file_citation" """
+
+
+class DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation(BaseModel):
+    end_index: int
+    """End position of the citation span in the content"""
+
+    start_index: int
+    """Start position of the citation span in the content"""
+
+    title: str
+    """Title of the referenced web resource"""
+
+    type: Literal["url_citation"]
+    """Annotation type identifier, always "url_citation" """
+
+    url: str
+    """URL of the referenced web resource"""
+
+
+class DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation(BaseModel):
+    container_id: str
+
+    end_index: int
+
+    file_id: str
+
+    filename: str
+
+    start_index: int
+
+    type: Literal["container_file_citation"]
+
+
+class DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath(BaseModel):
+    file_id: str
+
+    index: int
+
+    type: Literal["file_path"]
+
+
+DataOpenAIResponseMessageContentUnionMember2Annotation: TypeAlias = Annotated[
+    Union[
+        DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation,
+        DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation,
+        DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation,
+        DataOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath,
+    ],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class DataOpenAIResponseMessageContentUnionMember2(BaseModel):
+    annotations: List[DataOpenAIResponseMessageContentUnionMember2Annotation]
+
+    text: str
+
+    type: Literal["output_text"]
+
+
+class DataOpenAIResponseMessage(BaseModel):
+    content: Union[
+        str, List[DataOpenAIResponseMessageContentUnionMember1], List[DataOpenAIResponseMessageContentUnionMember2]
+    ]
+
+    role: Literal["system", "developer", "user", "assistant"]
+
+    type: Literal["message"]
+
+    id: Optional[str] = None
+
+    status: Optional[str] = None
+
+
+class DataOpenAIResponseOutputMessageFunctionToolCall(BaseModel):
+    arguments: str
+    """JSON string containing the function arguments"""
+
+    call_id: str
+    """Unique identifier for the function call"""
+
+    name: str
+    """Name of the function being called"""
+
+    type: Literal["function_call"]
+    """Tool call type identifier, always "function_call" """
+
+    id: Optional[str] = None
+    """(Optional) Additional identifier for the tool call"""
+
+    status: Optional[str] = None
+    """(Optional) Current status of the function call execution"""
+
+
+class DataOpenAIResponseOutputMessageFileSearchToolCallResult(BaseModel):
+    attributes: Dict[str, Union[bool, float, str, List[object], object, None]]
+    """(Optional) Key-value attributes associated with the file"""
+
+    file_id: str
+    """Unique identifier of the file containing the result"""
+
+    filename: str
+    """Name of the file containing the result"""
+
+    score: float
+    """Relevance score for this search result (between 0 and 1)"""
+
+    text: str
+    """Text content of the search result"""
+
+
+class DataOpenAIResponseOutputMessageFileSearchToolCall(BaseModel):
+    id: str
+    """Unique identifier for this tool call"""
+
+    queries: List[str]
+    """List of search queries executed"""
+
+    status: str
+    """Current status of the file search operation"""
+
+    type: Literal["file_search_call"]
+    """Tool call type identifier, always "file_search_call" """
+
+    results: Optional[List[DataOpenAIResponseOutputMessageFileSearchToolCallResult]] = None
+    """(Optional) Search results returned by the file search operation"""
+
+
+class DataOpenAIResponseOutputMessageWebSearchToolCall(BaseModel):
+    id: str
+    """Unique identifier for this tool call"""
+
+    status: str
+    """Current status of the web search operation"""
+
+    type: Literal["web_search_call"]
+    """Tool call type identifier, always "web_search_call" """
+
+
+class DataOpenAIResponseOutputMessageMcpCall(BaseModel):
+    id: str
+    """Unique identifier for this MCP call"""
+
+    arguments: str
+    """JSON string containing the MCP call arguments"""
+
+    name: str
+    """Name of the MCP method being called"""
+
+    server_label: str
+    """Label identifying the MCP server handling the call"""
+
+    type: Literal["mcp_call"]
+    """Tool call type identifier, always "mcp_call" """
+
+    error: Optional[str] = None
+    """(Optional) Error message if the MCP call failed"""
+
+    output: Optional[str] = None
+    """(Optional) Output result from the successful MCP call"""
+
+
+class DataOpenAIResponseOutputMessageMcpListToolsTool(BaseModel):
+    input_schema: Dict[str, Union[bool, float, str, List[object], object, None]]
+    """JSON schema defining the tool's input parameters"""
+
+    name: str
+    """Name of the tool"""
+
+    description: Optional[str] = None
+    """(Optional) Description of what the tool does"""
+
+
+class DataOpenAIResponseOutputMessageMcpListTools(BaseModel):
+    id: str
+    """Unique identifier for this MCP list tools operation"""
+
+    server_label: str
+    """Label identifying the MCP server providing the tools"""
+
+    tools: List[DataOpenAIResponseOutputMessageMcpListToolsTool]
+    """List of available tools provided by the MCP server"""
+
+    type: Literal["mcp_list_tools"]
+    """Tool call type identifier, always "mcp_list_tools" """
+
+
+Data: TypeAlias = Annotated[
+    Union[
+        DataOpenAIResponseMessage,
+        DataOpenAIResponseOutputMessageFunctionToolCall,
+        DataOpenAIResponseOutputMessageFileSearchToolCall,
+        DataOpenAIResponseOutputMessageWebSearchToolCall,
+        DataOpenAIResponseOutputMessageMcpCall,
+        DataOpenAIResponseOutputMessageMcpListTools,
+    ],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class ItemListResponse(BaseModel):
+    data: List[Data]
+
+    has_more: bool
+
+    object: str
+
+    first_id: Optional[str] = None
+
+    last_id: Optional[str] = None
diff --git a/src/llama_stack_client/types/models/__init__.py b/src/llama_stack_client/types/models/__init__.py
index 6b0c3091..f8ee8b14 100644
--- a/src/llama_stack_client/types/models/__init__.py
+++ b/src/llama_stack_client/types/models/__init__.py
@@ -1,5 +1,3 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
 from __future__ import annotations
-
-from .openai_list_response import OpenAIListResponse as OpenAIListResponse
diff --git a/src/llama_stack_client/types/models/openai_list_response.py b/src/llama_stack_client/types/models/openai_list_response.py
deleted file mode 100644
index 5b6c0358..00000000
--- a/src/llama_stack_client/types/models/openai_list_response.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import TypeAlias
-
-from ..model import Model
-
-__all__ = ["OpenAIListResponse"]
-
-OpenAIListResponse: TypeAlias = List[Model]
diff --git a/src/llama_stack_client/types/response_create_params.py b/src/llama_stack_client/types/response_create_params.py
index daf7f6cf..219ba515 100644
--- a/src/llama_stack_client/types/response_create_params.py
+++ b/src/llama_stack_client/types/response_create_params.py
@@ -51,6 +51,13 @@ class ResponseCreateParamsBase(TypedDict, total=False):
     model: Required[str]
     """The underlying LLM used for completions."""
 
+    conversation: str
+    """(Optional) The ID of a conversation to add the response to.
+
+    Must begin with 'conv\\__'. Input and output messages will be automatically added
+    to the conversation.
+    """
+
     include: SequenceNotStr[str]
     """(Optional) Additional fields to include in the response."""
 
diff --git a/src/llama_stack_client/types/response_list_response.py b/src/llama_stack_client/types/response_list_response.py
index dec51231..0164e109 100644
--- a/src/llama_stack_client/types/response_list_response.py
+++ b/src/llama_stack_client/types/response_list_response.py
@@ -50,6 +50,17 @@
     "Text",
     "TextFormat",
     "Error",
+    "Tool",
+    "ToolOpenAIResponseInputToolWebSearch",
+    "ToolOpenAIResponseInputToolFileSearch",
+    "ToolOpenAIResponseInputToolFileSearchRankingOptions",
+    "ToolOpenAIResponseInputToolFunction",
+    "ToolOpenAIResponseToolMcp",
+    "ToolOpenAIResponseToolMcpAllowedTools",
+    "ToolOpenAIResponseToolMcpAllowedToolsAllowedToolsFilter",
+    "Usage",
+    "UsageInputTokensDetails",
+    "UsageOutputTokensDetails",
 ]
 
 
@@ -570,6 +581,112 @@ class Error(BaseModel):
     """Human-readable error message describing the failure"""
 
 
+class ToolOpenAIResponseInputToolWebSearch(BaseModel):
+    type: Literal["web_search", "web_search_preview", "web_search_preview_2025_03_11"]
+    """Web search tool type variant to use"""
+
+    search_context_size: Optional[str] = None
+    """(Optional) Size of search context, must be "low", "medium", or "high" """
+
+
+class ToolOpenAIResponseInputToolFileSearchRankingOptions(BaseModel):
+    ranker: Optional[str] = None
+    """(Optional) Name of the ranking algorithm to use"""
+
+    score_threshold: Optional[float] = None
+    """(Optional) Minimum relevance score threshold for results"""
+
+
+class ToolOpenAIResponseInputToolFileSearch(BaseModel):
+    type: Literal["file_search"]
+    """Tool type identifier, always "file_search" """
+
+    vector_store_ids: List[str]
+    """List of vector store identifiers to search within"""
+
+    filters: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
+    """(Optional) Additional filters to apply to the search"""
+
+    max_num_results: Optional[int] = None
+    """(Optional) Maximum number of search results to return (1-50)"""
+
+    ranking_options: Optional[ToolOpenAIResponseInputToolFileSearchRankingOptions] = None
+    """(Optional) Options for ranking and scoring search results"""
+
+
+class ToolOpenAIResponseInputToolFunction(BaseModel):
+    name: str
+    """Name of the function that can be called"""
+
+    type: Literal["function"]
+    """Tool type identifier, always "function" """
+
+    description: Optional[str] = None
+    """(Optional) Description of what the function does"""
+
+    parameters: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
+    """(Optional) JSON schema defining the function's parameters"""
+
+    strict: Optional[bool] = None
+    """(Optional) Whether to enforce strict parameter validation"""
+
+
+class ToolOpenAIResponseToolMcpAllowedToolsAllowedToolsFilter(BaseModel):
+    tool_names: Optional[List[str]] = None
+    """(Optional) List of specific tool names that are allowed"""
+
+
+ToolOpenAIResponseToolMcpAllowedTools: TypeAlias = Union[
+    List[str], ToolOpenAIResponseToolMcpAllowedToolsAllowedToolsFilter
+]
+
+
+class ToolOpenAIResponseToolMcp(BaseModel):
+    server_label: str
+    """Label to identify this MCP server"""
+
+    type: Literal["mcp"]
+    """Tool type identifier, always "mcp" """
+
+    allowed_tools: Optional[ToolOpenAIResponseToolMcpAllowedTools] = None
+    """(Optional) Restriction on which tools can be used from this server"""
+
+
+Tool: TypeAlias = Union[
+    ToolOpenAIResponseInputToolWebSearch,
+    ToolOpenAIResponseInputToolFileSearch,
+    ToolOpenAIResponseInputToolFunction,
+    ToolOpenAIResponseToolMcp,
+]
+
+
+class UsageInputTokensDetails(BaseModel):
+    cached_tokens: Optional[int] = None
+    """Number of tokens retrieved from cache"""
+
+
+class UsageOutputTokensDetails(BaseModel):
+    reasoning_tokens: Optional[int] = None
+    """Number of tokens used for reasoning (o1/o3 models)"""
+
+
+class Usage(BaseModel):
+    input_tokens: int
+    """Number of tokens in the input"""
+
+    output_tokens: int
+    """Number of tokens in the output"""
+
+    total_tokens: int
+    """Total tokens used (input + output)"""
+
+    input_tokens_details: Optional[UsageInputTokensDetails] = None
+    """Detailed breakdown of input token usage"""
+
+    output_tokens_details: Optional[UsageOutputTokensDetails] = None
+    """Detailed breakdown of output token usage"""
+
+
 class ResponseListResponse(BaseModel):
     id: str
     """Unique identifier for this response"""
@@ -607,8 +724,14 @@ class ResponseListResponse(BaseModel):
     temperature: Optional[float] = None
     """(Optional) Sampling temperature used for generation"""
 
+    tools: Optional[List[Tool]] = None
+    """(Optional) An array of tools the model may call while generating a response."""
+
     top_p: Optional[float] = None
     """(Optional) Nucleus sampling parameter used for generation"""
 
     truncation: Optional[str] = None
     """(Optional) Truncation strategy applied to the response"""
+
+    usage: Optional[Usage] = None
+    """(Optional) Token usage information for the response"""
diff --git a/src/llama_stack_client/types/response_object.py b/src/llama_stack_client/types/response_object.py
index 84a0297b..d9c1eb93 100644
--- a/src/llama_stack_client/types/response_object.py
+++ b/src/llama_stack_client/types/response_object.py
@@ -32,6 +32,17 @@
     "Text",
     "TextFormat",
     "Error",
+    "Tool",
+    "ToolOpenAIResponseInputToolWebSearch",
+    "ToolOpenAIResponseInputToolFileSearch",
+    "ToolOpenAIResponseInputToolFileSearchRankingOptions",
+    "ToolOpenAIResponseInputToolFunction",
+    "ToolOpenAIResponseToolMcp",
+    "ToolOpenAIResponseToolMcpAllowedTools",
+    "ToolOpenAIResponseToolMcpAllowedToolsAllowedToolsFilter",
+    "Usage",
+    "UsageInputTokensDetails",
+    "UsageOutputTokensDetails",
 ]
 
 
@@ -326,6 +337,112 @@ class Error(BaseModel):
     """Human-readable error message describing the failure"""
 
 
+class ToolOpenAIResponseInputToolWebSearch(BaseModel):
+    type: Literal["web_search", "web_search_preview", "web_search_preview_2025_03_11"]
+    """Web search tool type variant to use"""
+
+    search_context_size: Optional[str] = None
+    """(Optional) Size of search context, must be "low", "medium", or "high" """
+
+
+class ToolOpenAIResponseInputToolFileSearchRankingOptions(BaseModel):
+    ranker: Optional[str] = None
+    """(Optional) Name of the ranking algorithm to use"""
+
+    score_threshold: Optional[float] = None
+    """(Optional) Minimum relevance score threshold for results"""
+
+
+class ToolOpenAIResponseInputToolFileSearch(BaseModel):
+    type: Literal["file_search"]
+    """Tool type identifier, always "file_search" """
+
+    vector_store_ids: List[str]
+    """List of vector store identifiers to search within"""
+
+    filters: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
+    """(Optional) Additional filters to apply to the search"""
+
+    max_num_results: Optional[int] = None
+    """(Optional) Maximum number of search results to return (1-50)"""
+
+    ranking_options: Optional[ToolOpenAIResponseInputToolFileSearchRankingOptions] = None
+    """(Optional) Options for ranking and scoring search results"""
+
+
+class ToolOpenAIResponseInputToolFunction(BaseModel):
+    name: str
+    """Name of the function that can be called"""
+
+    type: Literal["function"]
+    """Tool type identifier, always "function" """
+
+    description: Optional[str] = None
+    """(Optional) Description of what the function does"""
+
+    parameters: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
+    """(Optional) JSON schema defining the function's parameters"""
+
+    strict: Optional[bool] = None
+    """(Optional) Whether to enforce strict parameter validation"""
+
+
+class ToolOpenAIResponseToolMcpAllowedToolsAllowedToolsFilter(BaseModel):
+    tool_names: Optional[List[str]] = None
+    """(Optional) List of specific tool names that are allowed"""
+
+
+ToolOpenAIResponseToolMcpAllowedTools: TypeAlias = Union[
+    List[str], ToolOpenAIResponseToolMcpAllowedToolsAllowedToolsFilter
+]
+
+
+class ToolOpenAIResponseToolMcp(BaseModel):
+    server_label: str
+    """Label to identify this MCP server"""
+
+    type: Literal["mcp"]
+    """Tool type identifier, always "mcp" """
+
+    allowed_tools: Optional[ToolOpenAIResponseToolMcpAllowedTools] = None
+    """(Optional) Restriction on which tools can be used from this server"""
+
+
+Tool: TypeAlias = Union[
+    ToolOpenAIResponseInputToolWebSearch,
+    ToolOpenAIResponseInputToolFileSearch,
+    ToolOpenAIResponseInputToolFunction,
+    ToolOpenAIResponseToolMcp,
+]
+
+
+class UsageInputTokensDetails(BaseModel):
+    cached_tokens: Optional[int] = None
+    """Number of tokens retrieved from cache"""
+
+
+class UsageOutputTokensDetails(BaseModel):
+    reasoning_tokens: Optional[int] = None
+    """Number of tokens used for reasoning (o1/o3 models)"""
+
+
+class Usage(BaseModel):
+    input_tokens: int
+    """Number of tokens in the input"""
+
+    output_tokens: int
+    """Number of tokens in the output"""
+
+    total_tokens: int
+    """Total tokens used (input + output)"""
+
+    input_tokens_details: Optional[UsageInputTokensDetails] = None
+    """Detailed breakdown of input token usage"""
+
+    output_tokens_details: Optional[UsageOutputTokensDetails] = None
+    """Detailed breakdown of output token usage"""
+
+
 class ResponseObject(BaseModel):
     @property
     def output_text(self) -> str:
@@ -370,8 +487,14 @@ def output_text(self) -> str:
     temperature: Optional[float] = None
     """(Optional) Sampling temperature used for generation"""
 
+    tools: Optional[List[Tool]] = None
+    """(Optional) An array of tools the model may call while generating a response."""
+
     top_p: Optional[float] = None
     """(Optional) Nucleus sampling parameter used for generation"""
 
     truncation: Optional[str] = None
     """(Optional) Truncation strategy applied to the response"""
+
+    usage: Optional[Usage] = None
+    """(Optional) Token usage information for the response"""
diff --git a/src/llama_stack_client/types/response_object_stream.py b/src/llama_stack_client/types/response_object_stream.py
index 7ec15480..acb9c38e 100644
--- a/src/llama_stack_client/types/response_object_stream.py
+++ b/src/llama_stack_client/types/response_object_stream.py
@@ -10,6 +10,7 @@
 __all__ = [
     "ResponseObjectStream",
     "OpenAIResponseObjectStreamResponseCreated",
+    "OpenAIResponseObjectStreamResponseInProgress",
     "OpenAIResponseObjectStreamResponseOutputItemAdded",
     "OpenAIResponseObjectStreamResponseOutputItemAddedItem",
     "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessage",
@@ -68,23 +69,48 @@
     "OpenAIResponseObjectStreamResponseContentPartAdded",
     "OpenAIResponseObjectStreamResponseContentPartAddedPart",
     "OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartOutputText",
+    "OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartOutputTextAnnotation",
+    "OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationFileCitation",
+    "OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationCitation",
+    "OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationContainerFileCitation",
+    "OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationFilePath",
     "OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartRefusal",
+    "OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartReasoningText",
     "OpenAIResponseObjectStreamResponseContentPartDone",
     "OpenAIResponseObjectStreamResponseContentPartDonePart",
     "OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartOutputText",
+    "OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartOutputTextAnnotation",
+    "OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationFileCitation",
+    "OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationCitation",
+    "OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationContainerFileCitation",
+    "OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationFilePath",
     "OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartRefusal",
+    "OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartReasoningText",
+    "OpenAIResponseObjectStreamResponseIncomplete",
+    "OpenAIResponseObjectStreamResponseFailed",
     "OpenAIResponseObjectStreamResponseCompleted",
 ]
 
 
 class OpenAIResponseObjectStreamResponseCreated(BaseModel):
     response: ResponseObject
-    """The newly created response object"""
+    """The response object that was created"""
 
     type: Literal["response.created"]
     """Event type identifier, always "response.created" """
 
 
+class OpenAIResponseObjectStreamResponseInProgress(BaseModel):
+    response: ResponseObject
+    """Current response state while in progress"""
+
+    sequence_number: int
+    """Sequential number for ordering streaming events"""
+
+    type: Literal["response.in_progress"]
+    """Event type identifier, always "response.in_progress" """
+
+
 class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(
     BaseModel
 ):
@@ -849,31 +875,130 @@ class OpenAIResponseObjectStreamResponseMcpCallCompleted(BaseModel):
     """Event type identifier, always "response.mcp_call.completed" """
 
 
+class OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationFileCitation(
+    BaseModel
+):
+    file_id: str
+    """Unique identifier of the referenced file"""
+
+    filename: str
+    """Name of the referenced file"""
+
+    index: int
+    """Position index of the citation within the content"""
+
+    type: Literal["file_citation"]
+    """Annotation type identifier, always "file_citation" """
+
+
+class OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationCitation(
+    BaseModel
+):
+    end_index: int
+    """End position of the citation span in the content"""
+
+    start_index: int
+    """Start position of the citation span in the content"""
+
+    title: str
+    """Title of the referenced web resource"""
+
+    type: Literal["url_citation"]
+    """Annotation type identifier, always "url_citation" """
+
+    url: str
+    """URL of the referenced web resource"""
+
+
+class OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationContainerFileCitation(
+    BaseModel
+):
+    container_id: str
+
+    end_index: int
+
+    file_id: str
+
+    filename: str
+
+    start_index: int
+
+    type: Literal["container_file_citation"]
+
+
+class OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationFilePath(
+    BaseModel
+):
+    file_id: str
+
+    index: int
+
+    type: Literal["file_path"]
+
+
+OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartOutputTextAnnotation: TypeAlias = Annotated[
+    Union[
+        OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationFileCitation,
+        OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationCitation,
+        OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationContainerFileCitation,
+        OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationFilePath,
+    ],
+    PropertyInfo(discriminator="type"),
+]
+
+
 class OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartOutputText(BaseModel):
+    annotations: List[
+        OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartOutputTextAnnotation
+    ]
+    """Structured annotations associated with the text"""
+
     text: str
+    """Text emitted for this content part"""
 
     type: Literal["output_text"]
+    """Content part type identifier, always "output_text" """
+
+    logprobs: Optional[List[Dict[str, Union[bool, float, str, List[object], object, None]]]] = None
+    """(Optional) Token log probability details"""
 
 
 class OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartRefusal(BaseModel):
     refusal: str
+    """Refusal text supplied by the model"""
 
     type: Literal["refusal"]
+    """Content part type identifier, always "refusal" """
+
+
+class OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartReasoningText(BaseModel):
+    text: str
+    """Reasoning text supplied by the model"""
+
+    type: Literal["reasoning_text"]
+    """Content part type identifier, always "reasoning_text" """
 
 
 OpenAIResponseObjectStreamResponseContentPartAddedPart: TypeAlias = Annotated[
     Union[
         OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartOutputText,
         OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartRefusal,
+        OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartReasoningText,
     ],
     PropertyInfo(discriminator="type"),
 ]
 
 
 class OpenAIResponseObjectStreamResponseContentPartAdded(BaseModel):
+    content_index: int
+    """Index position of the part within the content array"""
+
     item_id: str
     """Unique identifier of the output item containing this content part"""
 
+    output_index: int
+    """Index position of the output item in the response"""
+
     part: OpenAIResponseObjectStreamResponseContentPartAddedPart
     """The content part that was added"""
 
@@ -887,31 +1012,130 @@ class OpenAIResponseObjectStreamResponseContentPartAdded(BaseModel):
     """Event type identifier, always "response.content_part.added" """
 
 
+class OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationFileCitation(
+    BaseModel
+):
+    file_id: str
+    """Unique identifier of the referenced file"""
+
+    filename: str
+    """Name of the referenced file"""
+
+    index: int
+    """Position index of the citation within the content"""
+
+    type: Literal["file_citation"]
+    """Annotation type identifier, always "file_citation" """
+
+
+class OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationCitation(
+    BaseModel
+):
+    end_index: int
+    """End position of the citation span in the content"""
+
+    start_index: int
+    """Start position of the citation span in the content"""
+
+    title: str
+    """Title of the referenced web resource"""
+
+    type: Literal["url_citation"]
+    """Annotation type identifier, always "url_citation" """
+
+    url: str
+    """URL of the referenced web resource"""
+
+
+class OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationContainerFileCitation(
+    BaseModel
+):
+    container_id: str
+
+    end_index: int
+
+    file_id: str
+
+    filename: str
+
+    start_index: int
+
+    type: Literal["container_file_citation"]
+
+
+class OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationFilePath(
+    BaseModel
+):
+    file_id: str
+
+    index: int
+
+    type: Literal["file_path"]
+
+
+OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartOutputTextAnnotation: TypeAlias = Annotated[
+    Union[
+        OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationFileCitation,
+        OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationCitation,
+        OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationContainerFileCitation,
+        OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationFilePath,
+    ],
+    PropertyInfo(discriminator="type"),
+]
+
+
 class OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartOutputText(BaseModel):
+    annotations: List[
+        OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartOutputTextAnnotation
+    ]
+    """Structured annotations associated with the text"""
+
     text: str
+    """Text emitted for this content part"""
 
     type: Literal["output_text"]
+    """Content part type identifier, always "output_text" """
+
+    logprobs: Optional[List[Dict[str, Union[bool, float, str, List[object], object, None]]]] = None
+    """(Optional) Token log probability details"""
 
 
 class OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartRefusal(BaseModel):
     refusal: str
+    """Refusal text supplied by the model"""
 
     type: Literal["refusal"]
+    """Content part type identifier, always "refusal" """
+
+
+class OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartReasoningText(BaseModel):
+    text: str
+    """Reasoning text supplied by the model"""
+
+    type: Literal["reasoning_text"]
+    """Content part type identifier, always "reasoning_text" """
 
 
 OpenAIResponseObjectStreamResponseContentPartDonePart: TypeAlias = Annotated[
     Union[
         OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartOutputText,
         OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartRefusal,
+        OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartReasoningText,
     ],
     PropertyInfo(discriminator="type"),
 ]
 
 
 class OpenAIResponseObjectStreamResponseContentPartDone(BaseModel):
+    content_index: int
+    """Index position of the part within the content array"""
+
     item_id: str
     """Unique identifier of the output item containing this content part"""
 
+    output_index: int
+    """Index position of the output item in the response"""
+
     part: OpenAIResponseObjectStreamResponseContentPartDonePart
     """The completed content part"""
 
@@ -925,9 +1149,31 @@ class OpenAIResponseObjectStreamResponseContentPartDone(BaseModel):
     """Event type identifier, always "response.content_part.done" """
 
 
+class OpenAIResponseObjectStreamResponseIncomplete(BaseModel):
+    response: ResponseObject
+    """Response object describing the incomplete state"""
+
+    sequence_number: int
+    """Sequential number for ordering streaming events"""
+
+    type: Literal["response.incomplete"]
+    """Event type identifier, always "response.incomplete" """
+
+
+class OpenAIResponseObjectStreamResponseFailed(BaseModel):
+    response: ResponseObject
+    """Response object describing the failure"""
+
+    sequence_number: int
+    """Sequential number for ordering streaming events"""
+
+    type: Literal["response.failed"]
+    """Event type identifier, always "response.failed" """
+
+
 class OpenAIResponseObjectStreamResponseCompleted(BaseModel):
     response: ResponseObject
-    """The completed response object"""
+    """Completed response object"""
 
     type: Literal["response.completed"]
     """Event type identifier, always "response.completed" """
@@ -936,6 +1182,7 @@ class OpenAIResponseObjectStreamResponseCompleted(BaseModel):
 ResponseObjectStream: TypeAlias = Annotated[
     Union[
         OpenAIResponseObjectStreamResponseCreated,
+        OpenAIResponseObjectStreamResponseInProgress,
         OpenAIResponseObjectStreamResponseOutputItemAdded,
         OpenAIResponseObjectStreamResponseOutputItemDone,
         OpenAIResponseObjectStreamResponseOutputTextDelta,
@@ -955,6 +1202,8 @@ class OpenAIResponseObjectStreamResponseCompleted(BaseModel):
         OpenAIResponseObjectStreamResponseMcpCallCompleted,
         OpenAIResponseObjectStreamResponseContentPartAdded,
         OpenAIResponseObjectStreamResponseContentPartDone,
+        OpenAIResponseObjectStreamResponseIncomplete,
+        OpenAIResponseObjectStreamResponseFailed,
         OpenAIResponseObjectStreamResponseCompleted,
     ],
     PropertyInfo(discriminator="type"),
diff --git a/tests/api_resources/alpha/post_training/test_job.py b/tests/api_resources/alpha/post_training/test_job.py
index 3b47132f..bec18796 100644
--- a/tests/api_resources/alpha/post_training/test_job.py
+++ b/tests/api_resources/alpha/post_training/test_job.py
@@ -3,17 +3,17 @@
 from __future__ import annotations
 
 import os
-from typing import Any, List, cast
+from typing import Any, cast
 
 import pytest
 
 from tests.utils import assert_matches_type
 from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
 from llama_stack_client.types.alpha.post_training import (
+    JobListResponse,
     JobStatusResponse,
     JobArtifactsResponse,
 )
-from llama_stack_client.types.alpha.list_post_training_jobs_response import Data
 
 base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
 
@@ -24,7 +24,7 @@ class TestJob:
     @parametrize
     def test_method_list(self, client: LlamaStackClient) -> None:
         job = client.alpha.post_training.job.list()
-        assert_matches_type(List[Data], job, path=["response"])
+        assert_matches_type(JobListResponse, job, path=["response"])
 
     @parametrize
     def test_raw_response_list(self, client: LlamaStackClient) -> None:
@@ -33,7 +33,7 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None:
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
         job = response.parse()
-        assert_matches_type(List[Data], job, path=["response"])
+        assert_matches_type(JobListResponse, job, path=["response"])
 
     @parametrize
     def test_streaming_response_list(self, client: LlamaStackClient) -> None:
@@ -42,7 +42,7 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None:
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
             job = response.parse()
-            assert_matches_type(List[Data], job, path=["response"])
+            assert_matches_type(JobListResponse, job, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
@@ -148,7 +148,7 @@ class TestAsyncJob:
     @parametrize
     async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
         job = await async_client.alpha.post_training.job.list()
-        assert_matches_type(List[Data], job, path=["response"])
+        assert_matches_type(JobListResponse, job, path=["response"])
 
     @parametrize
     async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
@@ -157,7 +157,7 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
         job = await response.parse()
-        assert_matches_type(List[Data], job, path=["response"])
+        assert_matches_type(JobListResponse, job, path=["response"])
 
     @parametrize
     async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
@@ -166,7 +166,7 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
             job = await response.parse()
-            assert_matches_type(List[Data], job, path=["response"])
+            assert_matches_type(JobListResponse, job, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
diff --git a/tests/api_resources/conversations/__init__.py b/tests/api_resources/conversations/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/conversations/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/conversations/test_items.py b/tests/api_resources/conversations/test_items.py
new file mode 100644
index 00000000..a0d58fa2
--- /dev/null
+++ b/tests/api_resources/conversations/test_items.py
@@ -0,0 +1,364 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from tests.utils import assert_matches_type
+from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
+from llama_stack_client.types.conversations import (
+    ItemGetResponse,
+    ItemListResponse,
+    ItemCreateResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestItems:
+    parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+    @parametrize
+    def test_method_create(self, client: LlamaStackClient) -> None:
+        item = client.conversations.items.create(
+            conversation_id="conversation_id",
+            items=[
+                {
+                    "content": "string",
+                    "role": "system",
+                    "type": "message",
+                }
+            ],
+        )
+        assert_matches_type(ItemCreateResponse, item, path=["response"])
+
+    @parametrize
+    def test_raw_response_create(self, client: LlamaStackClient) -> None:
+        response = client.conversations.items.with_raw_response.create(
+            conversation_id="conversation_id",
+            items=[
+                {
+                    "content": "string",
+                    "role": "system",
+                    "type": "message",
+                }
+            ],
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        item = response.parse()
+        assert_matches_type(ItemCreateResponse, item, path=["response"])
+
+    @parametrize
+    def test_streaming_response_create(self, client: LlamaStackClient) -> None:
+        with client.conversations.items.with_streaming_response.create(
+            conversation_id="conversation_id",
+            items=[
+                {
+                    "content": "string",
+                    "role": "system",
+                    "type": "message",
+                }
+            ],
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            item = response.parse()
+            assert_matches_type(ItemCreateResponse, item, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_create(self, client: LlamaStackClient) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            client.conversations.items.with_raw_response.create(
+                conversation_id="",
+                items=[
+                    {
+                        "content": "string",
+                        "role": "system",
+                        "type": "message",
+                    }
+                ],
+            )
+
+    @parametrize
+    def test_method_list(self, client: LlamaStackClient) -> None:
+        item = client.conversations.items.list(
+            conversation_id="conversation_id",
+            after="string",
+            include=["code_interpreter_call.outputs"],
+            limit=0,
+            order="asc",
+        )
+        assert_matches_type(ItemListResponse, item, path=["response"])
+
+    @parametrize
+    def test_raw_response_list(self, client: LlamaStackClient) -> None:
+        response = client.conversations.items.with_raw_response.list(
+            conversation_id="conversation_id",
+            after="string",
+            include=["code_interpreter_call.outputs"],
+            limit=0,
+            order="asc",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        item = response.parse()
+        assert_matches_type(ItemListResponse, item, path=["response"])
+
+    @parametrize
+    def test_streaming_response_list(self, client: LlamaStackClient) -> None:
+        with client.conversations.items.with_streaming_response.list(
+            conversation_id="conversation_id",
+            after="string",
+            include=["code_interpreter_call.outputs"],
+            limit=0,
+            order="asc",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            item = response.parse()
+            assert_matches_type(ItemListResponse, item, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_list(self, client: LlamaStackClient) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            client.conversations.items.with_raw_response.list(
+                conversation_id="",
+                after="string",
+                include=["code_interpreter_call.outputs"],
+                limit=0,
+                order="asc",
+            )
+
+    @parametrize
+    def test_method_get(self, client: LlamaStackClient) -> None:
+        item = client.conversations.items.get(
+            item_id="item_id",
+            conversation_id="conversation_id",
+        )
+        assert_matches_type(ItemGetResponse, item, path=["response"])
+
+    @parametrize
+    def test_raw_response_get(self, client: LlamaStackClient) -> None:
+        response = client.conversations.items.with_raw_response.get(
+            item_id="item_id",
+            conversation_id="conversation_id",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        item = response.parse()
+        assert_matches_type(ItemGetResponse, item, path=["response"])
+
+    @parametrize
+    def test_streaming_response_get(self, client: LlamaStackClient) -> None:
+        with client.conversations.items.with_streaming_response.get(
+            item_id="item_id",
+            conversation_id="conversation_id",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            item = response.parse()
+            assert_matches_type(ItemGetResponse, item, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_get(self, client: LlamaStackClient) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            client.conversations.items.with_raw_response.get(
+                item_id="item_id",
+                conversation_id="",
+            )
+
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `item_id` but received ''"):
+            client.conversations.items.with_raw_response.get(
+                item_id="",
+                conversation_id="conversation_id",
+            )
+
+
+class TestAsyncItems:
+    parametrize = pytest.mark.parametrize(
+        "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+    )
+
+    @parametrize
+    async def test_method_create(self, async_client: AsyncLlamaStackClient) -> None:
+        item = await async_client.conversations.items.create(
+            conversation_id="conversation_id",
+            items=[
+                {
+                    "content": "string",
+                    "role": "system",
+                    "type": "message",
+                }
+            ],
+        )
+        assert_matches_type(ItemCreateResponse, item, path=["response"])
+
+    @parametrize
+    async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+        response = await async_client.conversations.items.with_raw_response.create(
+            conversation_id="conversation_id",
+            items=[
+                {
+                    "content": "string",
+                    "role": "system",
+                    "type": "message",
+                }
+            ],
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        item = await response.parse()
+        assert_matches_type(ItemCreateResponse, item, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+        async with async_client.conversations.items.with_streaming_response.create(
+            conversation_id="conversation_id",
+            items=[
+                {
+                    "content": "string",
+                    "role": "system",
+                    "type": "message",
+                }
+            ],
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            item = await response.parse()
+            assert_matches_type(ItemCreateResponse, item, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_create(self, async_client: AsyncLlamaStackClient) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            await async_client.conversations.items.with_raw_response.create(
+                conversation_id="",
+                items=[
+                    {
+                        "content": "string",
+                        "role": "system",
+                        "type": "message",
+                    }
+                ],
+            )
+
+    @parametrize
+    async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
+        item = await async_client.conversations.items.list(
+            conversation_id="conversation_id",
+            after="string",
+            include=["code_interpreter_call.outputs"],
+            limit=0,
+            order="asc",
+        )
+        assert_matches_type(ItemListResponse, item, path=["response"])
+
+    @parametrize
+    async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+        response = await async_client.conversations.items.with_raw_response.list(
+            conversation_id="conversation_id",
+            after="string",
+            include=["code_interpreter_call.outputs"],
+            limit=0,
+            order="asc",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        item = await response.parse()
+        assert_matches_type(ItemListResponse, item, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+        async with async_client.conversations.items.with_streaming_response.list(
+            conversation_id="conversation_id",
+            after="string",
+            include=["code_interpreter_call.outputs"],
+            limit=0,
+            order="asc",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            item = await response.parse()
+            assert_matches_type(ItemListResponse, item, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_list(self, async_client: AsyncLlamaStackClient) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            await async_client.conversations.items.with_raw_response.list(
+                conversation_id="",
+                after="string",
+                include=["code_interpreter_call.outputs"],
+                limit=0,
+                order="asc",
+            )
+
+    @parametrize
+    async def test_method_get(self, async_client: AsyncLlamaStackClient) -> None:
+        item = await async_client.conversations.items.get(
+            item_id="item_id",
+            conversation_id="conversation_id",
+        )
+        assert_matches_type(ItemGetResponse, item, path=["response"])
+
+    @parametrize
+    async def test_raw_response_get(self, async_client: AsyncLlamaStackClient) -> None:
+        response = await async_client.conversations.items.with_raw_response.get(
+            item_id="item_id",
+            conversation_id="conversation_id",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        item = await response.parse()
+        assert_matches_type(ItemGetResponse, item, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_get(self, async_client: AsyncLlamaStackClient) -> None:
+        async with async_client.conversations.items.with_streaming_response.get(
+            item_id="item_id",
+            conversation_id="conversation_id",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            item = await response.parse()
+            assert_matches_type(ItemGetResponse, item, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_get(self, async_client: AsyncLlamaStackClient) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            await async_client.conversations.items.with_raw_response.get(
+                item_id="item_id",
+                conversation_id="",
+            )
+
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `item_id` but received ''"):
+            await async_client.conversations.items.with_raw_response.get(
+                item_id="",
+                conversation_id="conversation_id",
+            )
diff --git a/tests/api_resources/test_conversations.py b/tests/api_resources/test_conversations.py
new file mode 100644
index 00000000..a499ac91
--- /dev/null
+++ b/tests/api_resources/test_conversations.py
@@ -0,0 +1,345 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from tests.utils import assert_matches_type
+from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
+from llama_stack_client.types import (
+    ConversationObject,
+    ConversationDeleteResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestConversations:
+    parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+    @parametrize
+    def test_method_create(self, client: LlamaStackClient) -> None:
+        conversation = client.conversations.create()
+        assert_matches_type(ConversationObject, conversation, path=["response"])
+
+    @parametrize
+    def test_method_create_with_all_params(self, client: LlamaStackClient) -> None:
+        conversation = client.conversations.create(
+            items=[
+                {
+                    "content": "string",
+                    "role": "system",
+                    "type": "message",
+                    "id": "id",
+                    "status": "status",
+                }
+            ],
+            metadata={"foo": "string"},
+        )
+        assert_matches_type(ConversationObject, conversation, path=["response"])
+
+    @parametrize
+    def test_raw_response_create(self, client: LlamaStackClient) -> None:
+        response = client.conversations.with_raw_response.create()
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        conversation = response.parse()
+        assert_matches_type(ConversationObject, conversation, path=["response"])
+
+    @parametrize
+    def test_streaming_response_create(self, client: LlamaStackClient) -> None:
+        with client.conversations.with_streaming_response.create() as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            conversation = response.parse()
+            assert_matches_type(ConversationObject, conversation, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_method_retrieve(self, client: LlamaStackClient) -> None:
+        conversation = client.conversations.retrieve(
+            "conversation_id",
+        )
+        assert_matches_type(ConversationObject, conversation, path=["response"])
+
+    @parametrize
+    def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
+        response = client.conversations.with_raw_response.retrieve(
+            "conversation_id",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        conversation = response.parse()
+        assert_matches_type(ConversationObject, conversation, path=["response"])
+
+    @parametrize
+    def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
+        with client.conversations.with_streaming_response.retrieve(
+            "conversation_id",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            conversation = response.parse()
+            assert_matches_type(ConversationObject, conversation, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            client.conversations.with_raw_response.retrieve(
+                "",
+            )
+
+    @parametrize
+    def test_method_update(self, client: LlamaStackClient) -> None:
+        conversation = client.conversations.update(
+            conversation_id="conversation_id",
+            metadata={"foo": "string"},
+        )
+        assert_matches_type(ConversationObject, conversation, path=["response"])
+
+    @parametrize
+    def test_raw_response_update(self, client: LlamaStackClient) -> None:
+        response = client.conversations.with_raw_response.update(
+            conversation_id="conversation_id",
+            metadata={"foo": "string"},
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        conversation = response.parse()
+        assert_matches_type(ConversationObject, conversation, path=["response"])
+
+    @parametrize
+    def test_streaming_response_update(self, client: LlamaStackClient) -> None:
+        with client.conversations.with_streaming_response.update(
+            conversation_id="conversation_id",
+            metadata={"foo": "string"},
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            conversation = response.parse()
+            assert_matches_type(ConversationObject, conversation, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_update(self, client: LlamaStackClient) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            client.conversations.with_raw_response.update(
+                conversation_id="",
+                metadata={"foo": "string"},
+            )
+
+    @parametrize
+    def test_method_delete(self, client: LlamaStackClient) -> None:
+        conversation = client.conversations.delete(
+            "conversation_id",
+        )
+        assert_matches_type(ConversationDeleteResponse, conversation, path=["response"])
+
+    @parametrize
+    def test_raw_response_delete(self, client: LlamaStackClient) -> None:
+        response = client.conversations.with_raw_response.delete(
+            "conversation_id",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        conversation = response.parse()
+        assert_matches_type(ConversationDeleteResponse, conversation, path=["response"])
+
+    @parametrize
+    def test_streaming_response_delete(self, client: LlamaStackClient) -> None:
+        with client.conversations.with_streaming_response.delete(
+            "conversation_id",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            conversation = response.parse()
+            assert_matches_type(ConversationDeleteResponse, conversation, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_delete(self, client: LlamaStackClient) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            client.conversations.with_raw_response.delete(
+                "",
+            )
+
+
+class TestAsyncConversations:
+    parametrize = pytest.mark.parametrize(
+        "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+    )
+
+    @parametrize
+    async def test_method_create(self, async_client: AsyncLlamaStackClient) -> None:
+        conversation = await async_client.conversations.create()
+        assert_matches_type(ConversationObject, conversation, path=["response"])
+
+    @parametrize
+    async def test_method_create_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+        conversation = await async_client.conversations.create(
+            items=[
+                {
+                    "content": "string",
+                    "role": "system",
+                    "type": "message",
+                    "id": "id",
+                    "status": "status",
+                }
+            ],
+            metadata={"foo": "string"},
+        )
+        assert_matches_type(ConversationObject, conversation, path=["response"])
+
+    @parametrize
+    async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+        response = await async_client.conversations.with_raw_response.create()
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        conversation = await response.parse()
+        assert_matches_type(ConversationObject, conversation, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+        async with async_client.conversations.with_streaming_response.create() as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            conversation = await response.parse()
+            assert_matches_type(ConversationObject, conversation, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+        conversation = await async_client.conversations.retrieve(
+            "conversation_id",
+        )
+        assert_matches_type(ConversationObject, conversation, path=["response"])
+
+    @parametrize
+    async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+        response = await async_client.conversations.with_raw_response.retrieve(
+            "conversation_id",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        conversation = await response.parse()
+        assert_matches_type(ConversationObject, conversation, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+        async with async_client.conversations.with_streaming_response.retrieve(
+            "conversation_id",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            conversation = await response.parse()
+            assert_matches_type(ConversationObject, conversation, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            await async_client.conversations.with_raw_response.retrieve(
+                "",
+            )
+
+    @parametrize
+    async def test_method_update(self, async_client: AsyncLlamaStackClient) -> None:
+        conversation = await async_client.conversations.update(
+            conversation_id="conversation_id",
+            metadata={"foo": "string"},
+        )
+        assert_matches_type(ConversationObject, conversation, path=["response"])
+
+    @parametrize
+    async def test_raw_response_update(self, async_client: AsyncLlamaStackClient) -> None:
+        response = await async_client.conversations.with_raw_response.update(
+            conversation_id="conversation_id",
+            metadata={"foo": "string"},
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        conversation = await response.parse()
+        assert_matches_type(ConversationObject, conversation, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_update(self, async_client: AsyncLlamaStackClient) -> None:
+        async with async_client.conversations.with_streaming_response.update(
+            conversation_id="conversation_id",
+            metadata={"foo": "string"},
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            conversation = await response.parse()
+            assert_matches_type(ConversationObject, conversation, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_update(self, async_client: AsyncLlamaStackClient) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            await async_client.conversations.with_raw_response.update(
+                conversation_id="",
+                metadata={"foo": "string"},
+            )
+
+    @parametrize
+    async def test_method_delete(self, async_client: AsyncLlamaStackClient) -> None:
+        conversation = await async_client.conversations.delete(
+            "conversation_id",
+        )
+        assert_matches_type(ConversationDeleteResponse, conversation, path=["response"])
+
+    @parametrize
+    async def test_raw_response_delete(self, async_client: AsyncLlamaStackClient) -> None:
+        response = await async_client.conversations.with_raw_response.delete(
+            "conversation_id",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        conversation = await response.parse()
+        assert_matches_type(ConversationDeleteResponse, conversation, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_delete(self, async_client: AsyncLlamaStackClient) -> None:
+        async with async_client.conversations.with_streaming_response.delete(
+            "conversation_id",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            conversation = await response.parse()
+            assert_matches_type(ConversationDeleteResponse, conversation, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_delete(self, async_client: AsyncLlamaStackClient) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            await async_client.conversations.with_raw_response.delete(
+                "",
+            )
diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py
index ad2ab3be..9e862716 100644
--- a/tests/api_resources/test_responses.py
+++ b/tests/api_resources/test_responses.py
@@ -35,6 +35,7 @@ def test_method_create_with_all_params_overload_1(self, client: LlamaStackClient
         response = client.responses.create(
             input="string",
             model="model",
+            conversation="conversation",
             include=["string"],
             instructions="instructions",
             max_infer_iters=0,
@@ -101,6 +102,7 @@ def test_method_create_with_all_params_overload_2(self, client: LlamaStackClient
             input="string",
             model="model",
             stream=True,
+            conversation="conversation",
             include=["string"],
             instructions="instructions",
             max_infer_iters=0,
@@ -282,6 +284,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
         response = await async_client.responses.create(
             input="string",
             model="model",
+            conversation="conversation",
             include=["string"],
             instructions="instructions",
             max_infer_iters=0,
@@ -348,6 +351,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
             input="string",
             model="model",
             stream=True,
+            conversation="conversation",
             include=["string"],
             instructions="instructions",
             max_infer_iters=0,