diff --git a/docs/agents.md b/docs/agents.md index fb40341484..0633fb88ba 100644 --- a/docs/agents.md +++ b/docs/agents.md @@ -320,7 +320,8 @@ async def main(): content='What is the capital of France?', timestamp=datetime.datetime(...), ) - ] + ], + run_id='...', ) ), CallToolsNode( @@ -329,6 +330,7 @@ async def main(): usage=RequestUsage(input_tokens=56, output_tokens=7), model_name='gpt-5', timestamp=datetime.datetime(...), + run_id='...', ) ), End(data=FinalResult(output='The capital of France is Paris.')), @@ -382,7 +384,8 @@ async def main(): content='What is the capital of France?', timestamp=datetime.datetime(...), ) - ] + ], + run_id='...', ) ), CallToolsNode( @@ -391,6 +394,7 @@ async def main(): usage=RequestUsage(input_tokens=56, output_tokens=7), model_name='gpt-5', timestamp=datetime.datetime(...), + run_id='...', ) ), End(data=FinalResult(output='The capital of France is Paris.')), @@ -1044,7 +1048,8 @@ with capture_run_messages() as messages: # (2)! content='Please get me the volume of a box with size 6.', timestamp=datetime.datetime(...), ) - ] + ], + run_id='...', ), ModelResponse( parts=[ @@ -1057,6 +1062,7 @@ with capture_run_messages() as messages: # (2)! usage=RequestUsage(input_tokens=62, output_tokens=4), model_name='gpt-5', timestamp=datetime.datetime(...), + run_id='...', ), ModelRequest( parts=[ @@ -1066,7 +1072,8 @@ with capture_run_messages() as messages: # (2)! tool_call_id='pyd_ai_tool_call_id', timestamp=datetime.datetime(...), ) - ] + ], + run_id='...', ), ModelResponse( parts=[ @@ -1079,6 +1086,7 @@ with capture_run_messages() as messages: # (2)! usage=RequestUsage(input_tokens=72, output_tokens=8), model_name='gpt-5', timestamp=datetime.datetime(...), + run_id='...', ), ] """ diff --git a/docs/api/models/function.md b/docs/api/models/function.md index 4049d757c6..4cdceb449f 100644 --- a/docs/api/models/function.md +++ b/docs/api/models/function.md @@ -29,7 +29,8 @@ async def model_function( content='Testing my agent...', timestamp=datetime.datetime(...), ) - ] + ], + run_id='...', ) ] """ diff --git a/docs/deferred-tools.md b/docs/deferred-tools.md index e5e5201163..b5dcb38685 100644 --- a/docs/deferred-tools.md +++ b/docs/deferred-tools.md @@ -106,7 +106,8 @@ print(result.all_messages()) content='Delete `__init__.py`, write `Hello, world!` to `README.md`, and clear `.env`', timestamp=datetime.datetime(...), ) - ] + ], + run_id='...', ), ModelResponse( parts=[ @@ -129,6 +130,7 @@ print(result.all_messages()) usage=RequestUsage(input_tokens=63, output_tokens=21), model_name='gpt-5', timestamp=datetime.datetime(...), + run_id='...', ), ModelRequest( parts=[ @@ -138,7 +140,8 @@ print(result.all_messages()) tool_call_id='update_file_readme', timestamp=datetime.datetime(...), ) - ] + ], + run_id='...', ), ModelRequest( parts=[ @@ -154,7 +157,8 @@ print(result.all_messages()) tool_call_id='delete_file', timestamp=datetime.datetime(...), ), - ] + ], + run_id='...', ), ModelResponse( parts=[ @@ -165,6 +169,7 @@ print(result.all_messages()) usage=RequestUsage(input_tokens=79, output_tokens=39), model_name='gpt-5', timestamp=datetime.datetime(...), + run_id='...', ), ] """ @@ -275,7 +280,8 @@ async def main(): content='Calculate the answer to the ultimate question of life, the universe, and everything', timestamp=datetime.datetime(...), ) - ] + ], + run_id='...', ), ModelResponse( parts=[ @@ -290,6 +296,7 @@ async def main(): usage=RequestUsage(input_tokens=63, output_tokens=13), model_name='gpt-5', timestamp=datetime.datetime(...), + run_id='...', ), ModelRequest( parts=[ @@ -299,7 +306,8 @@ async def main(): tool_call_id='pyd_ai_tool_call_id', timestamp=datetime.datetime(...), ) - ] + ], + run_id='...', ), ModelResponse( parts=[ @@ -310,6 +318,7 @@ async def main(): usage=RequestUsage(input_tokens=64, output_tokens=28), model_name='gpt-5', timestamp=datetime.datetime(...), + run_id='...', ), ] """ diff --git a/docs/durable_execution/temporal.md b/docs/durable_execution/temporal.md index d39f17f055..01872b9186 100644 --- a/docs/durable_execution/temporal.md +++ b/docs/durable_execution/temporal.md @@ -172,7 +172,7 @@ As workflows and activities run in separate processes, any values passed between To account for these limitations, tool functions and the [event stream handler](#streaming) running inside activities receive a limited version of the agent's [`RunContext`][pydantic_ai.tools.RunContext], and it's your responsibility to make sure that the [dependencies](../dependencies.md) object provided to [`TemporalAgent.run()`][pydantic_ai.durable_exec.temporal.TemporalAgent.run] can be serialized using Pydantic. -Specifically, only the `deps`, `retries`, `tool_call_id`, `tool_name`, `tool_call_approved`, `retry`, `max_retries`, `run_step` and `partial_output` fields are available by default, and trying to access `model`, `usage`, `prompt`, `messages`, or `tracer` will raise an error. +Specifically, only the `deps`, `run_id`, `retries`, `tool_call_id`, `tool_name`, `tool_call_approved`, `retry`, `max_retries`, `run_step` and `partial_output` fields are available by default, and trying to access `model`, `usage`, `prompt`, `messages`, or `tracer` will raise an error. If you need one or more of these attributes to be available inside activities, you can create a [`TemporalRunContext`][pydantic_ai.durable_exec.temporal.TemporalRunContext] subclass with custom `serialize_run_context` and `deserialize_run_context` class methods and pass it to [`TemporalAgent`][pydantic_ai.durable_exec.temporal.TemporalAgent] as `run_context_type`. ### Streaming diff --git a/docs/message-history.md b/docs/message-history.md index a022d1575f..3363312fed 100644 --- a/docs/message-history.md +++ b/docs/message-history.md @@ -50,7 +50,8 @@ print(result.all_messages()) content='Tell me a joke.', timestamp=datetime.datetime(...), ), - ] + ], + run_id='...', ), ModelResponse( parts=[ @@ -61,6 +62,7 @@ print(result.all_messages()) usage=RequestUsage(input_tokens=60, output_tokens=12), model_name='gpt-5', timestamp=datetime.datetime(...), + run_id='...', ), ] """ @@ -92,7 +94,8 @@ async def main(): content='Tell me a joke.', timestamp=datetime.datetime(...), ), - ] + ], + run_id='...', ) ] """ @@ -118,7 +121,8 @@ async def main(): content='Tell me a joke.', timestamp=datetime.datetime(...), ), - ] + ], + run_id='...', ), ModelResponse( parts=[ @@ -129,6 +133,7 @@ async def main(): usage=RequestUsage(input_tokens=50, output_tokens=12), model_name='gpt-5', timestamp=datetime.datetime(...), + run_id='...', ), ] """ @@ -172,7 +177,8 @@ print(result2.all_messages()) content='Tell me a joke.', timestamp=datetime.datetime(...), ), - ] + ], + run_id='...', ), ModelResponse( parts=[ @@ -183,6 +189,7 @@ print(result2.all_messages()) usage=RequestUsage(input_tokens=60, output_tokens=12), model_name='gpt-5', timestamp=datetime.datetime(...), + run_id='...', ), ModelRequest( parts=[ @@ -190,7 +197,8 @@ print(result2.all_messages()) content='Explain?', timestamp=datetime.datetime(...), ) - ] + ], + run_id='...', ), ModelResponse( parts=[ @@ -201,6 +209,7 @@ print(result2.all_messages()) usage=RequestUsage(input_tokens=61, output_tokens=26), model_name='gpt-5', timestamp=datetime.datetime(...), + run_id='...', ), ] """ @@ -293,7 +302,8 @@ print(result2.all_messages()) content='Tell me a joke.', timestamp=datetime.datetime(...), ), - ] + ], + run_id='...', ), ModelResponse( parts=[ @@ -304,6 +314,7 @@ print(result2.all_messages()) usage=RequestUsage(input_tokens=60, output_tokens=12), model_name='gpt-5', timestamp=datetime.datetime(...), + run_id='...', ), ModelRequest( parts=[ @@ -311,7 +322,8 @@ print(result2.all_messages()) content='Explain?', timestamp=datetime.datetime(...), ) - ] + ], + run_id='...', ), ModelResponse( parts=[ @@ -322,6 +334,7 @@ print(result2.all_messages()) usage=RequestUsage(input_tokens=61, output_tokens=26), model_name='gemini-2.5-pro', timestamp=datetime.datetime(...), + run_id='...', ), ] """ diff --git a/docs/testing.md b/docs/testing.md index 66a313f873..3089585ab0 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -127,7 +127,8 @@ async def test_forecast(): content='What will the weather be like in London on 2024-11-28?', timestamp=IsNow(tz=timezone.utc), # (7)! ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -146,6 +147,7 @@ async def test_forecast(): ), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -156,6 +158,7 @@ async def test_forecast(): timestamp=IsNow(tz=timezone.utc), ), ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -169,6 +172,7 @@ async def test_forecast(): ), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ``` diff --git a/docs/tools.md b/docs/tools.md index 6ca0af7cad..40dcf5c810 100644 --- a/docs/tools.md +++ b/docs/tools.md @@ -87,7 +87,8 @@ print(dice_result.all_messages()) content='My guess is 4', timestamp=datetime.datetime(...), ), - ] + ], + run_id='...', ), ModelResponse( parts=[ @@ -98,6 +99,7 @@ print(dice_result.all_messages()) usage=RequestUsage(input_tokens=90, output_tokens=2), model_name='gemini-2.5-flash', timestamp=datetime.datetime(...), + run_id='...', ), ModelRequest( parts=[ @@ -107,7 +109,8 @@ print(dice_result.all_messages()) tool_call_id='pyd_ai_tool_call_id', timestamp=datetime.datetime(...), ) - ] + ], + run_id='...', ), ModelResponse( parts=[ @@ -118,6 +121,7 @@ print(dice_result.all_messages()) usage=RequestUsage(input_tokens=91, output_tokens=4), model_name='gemini-2.5-flash', timestamp=datetime.datetime(...), + run_id='...', ), ModelRequest( parts=[ @@ -127,7 +131,8 @@ print(dice_result.all_messages()) tool_call_id='pyd_ai_tool_call_id', timestamp=datetime.datetime(...), ) - ] + ], + run_id='...', ), ModelResponse( parts=[ @@ -138,6 +143,7 @@ print(dice_result.all_messages()) usage=RequestUsage(input_tokens=92, output_tokens=12), model_name='gemini-2.5-flash', timestamp=datetime.datetime(...), + run_id='...', ), ] """ diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index c167521079..f7f6db0933 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -92,6 +92,7 @@ class GraphAgentState: usage: _usage.RunUsage = dataclasses.field(default_factory=_usage.RunUsage) retries: int = 0 run_step: int = 0 + run_id: str | None = None def increment_retries( self, @@ -469,6 +470,7 @@ async def _make_request( async def _prepare_request( self, ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]] ) -> tuple[ModelSettings | None, models.ModelRequestParameters, list[_messages.ModelMessage], RunContext[DepsT]]: + self.request.run_id = self.request.run_id or ctx.state.run_id ctx.state.message_history.append(self.request) ctx.state.run_step += 1 @@ -510,6 +512,7 @@ def _finish_handling( ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]], response: _messages.ModelResponse, ) -> CallToolsNode[DepsT, NodeRunEndT]: + response.run_id = response.run_id or ctx.state.run_id # Update usage ctx.state.usage.incr(response.usage) if ctx.deps.usage_limits: # pragma: no branch @@ -741,7 +744,7 @@ def _handle_final_result( # For backwards compatibility, append a new ModelRequest using the tool returns and retries if tool_responses: - messages.append(_messages.ModelRequest(parts=tool_responses)) + messages.append(_messages.ModelRequest(parts=tool_responses, run_id=ctx.state.run_id)) return End(final_result) @@ -775,6 +778,7 @@ def build_run_context(ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT if ctx.deps.instrumentation_settings else DEFAULT_INSTRUMENTATION_VERSION, run_step=ctx.state.run_step, + run_id=ctx.state.run_id, ) diff --git a/pydantic_ai_slim/pydantic_ai/_run_context.py b/pydantic_ai_slim/pydantic_ai/_run_context.py index 1848c42eb1..4f9b253767 100644 --- a/pydantic_ai_slim/pydantic_ai/_run_context.py +++ b/pydantic_ai_slim/pydantic_ai/_run_context.py @@ -60,6 +60,8 @@ class RunContext(Generic[RunContextAgentDepsT]): """Whether a tool call that required approval has now been approved.""" partial_output: bool = False """Whether the output passed to an output validator is partial.""" + run_id: str | None = None + """"Unique identifier for the agent run.""" @property def last_attempt(self) -> bool: diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 5bcfa6baae..2744f62a42 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -3,6 +3,7 @@ import dataclasses import inspect import json +import uuid import warnings from asyncio import Lock from collections.abc import AsyncIterator, Awaitable, Callable, Iterator, Sequence @@ -500,7 +501,8 @@ async def main(): content='What is the capital of France?', timestamp=datetime.datetime(...), ) - ] + ], + run_id='...', ) ), CallToolsNode( @@ -509,6 +511,7 @@ async def main(): usage=RequestUsage(input_tokens=56, output_tokens=7), model_name='gpt-4o', timestamp=datetime.datetime(...), + run_id='...', ) ), End(data=FinalResult(output='The capital of France is Paris.')), @@ -572,6 +575,7 @@ async def main(): usage=usage, retries=0, run_step=0, + run_id=str(uuid.uuid4()), ) # Merge model settings in order of precedence: run > agent > model diff --git a/pydantic_ai_slim/pydantic_ai/agent/abstract.py b/pydantic_ai_slim/pydantic_ai/agent/abstract.py index fa5846a31d..c7c1cb2b5c 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/abstract.py +++ b/pydantic_ai_slim/pydantic_ai/agent/abstract.py @@ -559,7 +559,7 @@ async def on_complete() -> None: # For backwards compatibility, append a new ModelRequest using the tool returns and retries if parts: - messages.append(_messages.ModelRequest(parts)) + messages.append(_messages.ModelRequest(parts, run_id=graph_ctx.state.run_id)) await agent_run.next(_agent_graph.SetFinalResult(final_result)) @@ -1003,7 +1003,8 @@ async def main(): content='What is the capital of France?', timestamp=datetime.datetime(...), ) - ] + ], + run_id='...', ) ), CallToolsNode( @@ -1012,6 +1013,7 @@ async def main(): usage=RequestUsage(input_tokens=56, output_tokens=7), model_name='gpt-4o', timestamp=datetime.datetime(...), + run_id='...', ) ), End(data=FinalResult(output='The capital of France is Paris.')), diff --git a/pydantic_ai_slim/pydantic_ai/agent/wrapper.py b/pydantic_ai_slim/pydantic_ai/agent/wrapper.py index fcf7826f13..38e832fa2b 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/wrapper.py +++ b/pydantic_ai_slim/pydantic_ai/agent/wrapper.py @@ -163,7 +163,8 @@ async def main(): content='What is the capital of France?', timestamp=datetime.datetime(...), ) - ] + ], + run_id='...', ) ), CallToolsNode( @@ -172,6 +173,7 @@ async def main(): usage=RequestUsage(input_tokens=56, output_tokens=7), model_name='gpt-4o', timestamp=datetime.datetime(...), + run_id='...', ) ), End(data=FinalResult(output='The capital of France is Paris.')), diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py index 42aec0bd83..9e1c8ee3c0 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py @@ -799,7 +799,8 @@ async def main(): content='What is the capital of France?', timestamp=datetime.datetime(...), ) - ] + ], + run_id='...', ) ), CallToolsNode( @@ -808,6 +809,7 @@ async def main(): usage=RequestUsage(input_tokens=56, output_tokens=7), model_name='gpt-4o', timestamp=datetime.datetime(...), + run_id='...', ) ), End(data=FinalResult(output='The capital of France is Paris.')), diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py index 0867a60e36..8b1b6af44a 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py @@ -767,7 +767,8 @@ async def main(): content='What is the capital of France?', timestamp=datetime.datetime(...), ) - ] + ], + run_id='...', ) ), CallToolsNode( @@ -776,6 +777,7 @@ async def main(): usage=RequestUsage(input_tokens=56, output_tokens=7), model_name='gpt-4o', timestamp=datetime.datetime(...), + run_id='...', ) ), End(data=FinalResult(output='The capital of France is Paris.')), diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py index b6be1e7b9d..82ec0e76fa 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py @@ -841,7 +841,8 @@ async def main(): content='What is the capital of France?', timestamp=datetime.datetime(...), ) - ] + ], + run_id='...', ) ), CallToolsNode( @@ -850,6 +851,7 @@ async def main(): usage=RequestUsage(input_tokens=56, output_tokens=7), model_name='gpt-4o', timestamp=datetime.datetime(...), + run_id='...', ) ), End(data=FinalResult(output='The capital of France is Paris.')), diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_run_context.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_run_context.py index c24587553d..aab04ea093 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_run_context.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_run_context.py @@ -14,7 +14,7 @@ class TemporalRunContext(RunContext[AgentDepsT]): """The [`RunContext`][pydantic_ai.tools.RunContext] subclass to use to serialize and deserialize the run context for use inside a Temporal activity. - By default, only the `deps`, `retries`, `tool_call_id`, `tool_name`, `tool_call_approved`, `retry`, `max_retries`, `run_step` and `partial_output` attributes will be available. + By default, only the `deps`, `run_id`, `retries`, `tool_call_id`, `tool_name`, `tool_call_approved`, `retry`, `max_retries`, `run_step` and `partial_output` attributes will be available. To make another attribute available, create a `TemporalRunContext` subclass with a custom `serialize_run_context` class method that returns a dictionary that includes the attribute and pass it to [`TemporalAgent`][pydantic_ai.durable_exec.temporal.TemporalAgent]. """ @@ -42,6 +42,7 @@ def __getattribute__(self, name: str) -> Any: def serialize_run_context(cls, ctx: RunContext[Any]) -> dict[str, Any]: """Serialize the run context to a `dict[str, Any]`.""" return { + 'run_id': ctx.run_id, 'retries': ctx.retries, 'tool_call_id': ctx.tool_call_id, 'tool_name': ctx.tool_name, diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index d5aaa5e791..c0b5e4c532 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -947,6 +947,9 @@ class ModelRequest: kind: Literal['request'] = 'request' """Message type identifier, this is available on all parts as a discriminator.""" + run_id: str | None = None + """The unique identifier for the agent run that this request was created in.""" + @classmethod def user_text_prompt(cls, user_prompt: str, *, instructions: str | None = None) -> ModelRequest: """Create a `ModelRequest` with a single user prompt as text.""" @@ -1188,6 +1191,9 @@ class ModelResponse: finish_reason: FinishReason | None = None """Reason the model finished generating the response, normalized to OpenTelemetry values.""" + run_id: str | None = None + """The unique identifier for the agent run that this response was created in.""" + @property def text(self) -> str | None: """Get the text in the response.""" diff --git a/pydantic_ai_slim/pydantic_ai/result.py b/pydantic_ai_slim/pydantic_ai/result.py index a9c92c9ce9..58f4cb3b98 100644 --- a/pydantic_ai_slim/pydantic_ai/result.py +++ b/pydantic_ai_slim/pydantic_ai/result.py @@ -120,6 +120,12 @@ async def stream_text(self, *, delta: bool = False, debounce_by: float | None = text = await validator.validate(text, replace(self._run_ctx, partial_output=True)) yield text + @property + def run_id(self) -> str: + """The unique identifier for the agent run.""" + assert self._run_ctx.run_id is not None + return self._run_ctx.run_id + # TODO (v2): Drop in favor of `response` property def get(self) -> _messages.ModelResponse: """Get the current state of the response.""" @@ -533,6 +539,18 @@ def timestamp(self) -> datetime: else: raise ValueError('No stream response or run result provided') # pragma: no cover + @property + def run_id(self) -> str: + """The unique identifier for the agent run.""" + if self._run_result is not None: + assert self._run_result.run_id is not None + return self._run_result.run_id + elif self._stream_response is not None: + assert self._stream_response.run_id is not None + return self._stream_response.run_id + else: + raise ValueError('No stream response or run result provided') # pragma: no cover + @deprecated('`validate_structured_output` is deprecated, use `validate_response_output` instead.') async def validate_structured_output( self, message: _messages.ModelResponse, *, allow_partial: bool = False @@ -553,6 +571,8 @@ async def validate_response_output( async def _marked_completed(self, message: _messages.ModelResponse | None = None) -> None: self.is_complete = True if message is not None: + if self._stream_response: # pragma: no branch + message.run_id = self._stream_response.run_id self._all_messages.append(message) if self._on_complete is not None: await self._on_complete() @@ -691,6 +711,11 @@ def timestamp(self) -> datetime: """Get the timestamp of the response.""" return self._streamed_run_result.timestamp() + @property + def run_id(self) -> str: + """The unique identifier for the agent run.""" + return self._streamed_run_result.run_id + def validate_response_output(self, message: _messages.ModelResponse, *, allow_partial: bool = False) -> OutputDataT: """Validate a structured result message.""" return _utils.get_event_loop().run_until_complete( diff --git a/pydantic_ai_slim/pydantic_ai/run.py b/pydantic_ai_slim/pydantic_ai/run.py index f22ec93f22..61dda9a6cc 100644 --- a/pydantic_ai_slim/pydantic_ai/run.py +++ b/pydantic_ai_slim/pydantic_ai/run.py @@ -63,7 +63,8 @@ async def main(): content='What is the capital of France?', timestamp=datetime.datetime(...), ) - ] + ], + run_id='...', ) ), CallToolsNode( @@ -72,6 +73,7 @@ async def main(): usage=RequestUsage(input_tokens=56, output_tokens=7), model_name='gpt-4o', timestamp=datetime.datetime(...), + run_id='...', ) ), End(data=FinalResult(output='The capital of France is Paris.')), @@ -240,7 +242,8 @@ async def main(): content='What is the capital of France?', timestamp=datetime.datetime(...), ) - ] + ], + run_id='...', ) ), CallToolsNode( @@ -249,6 +252,7 @@ async def main(): usage=RequestUsage(input_tokens=56, output_tokens=7), model_name='gpt-4o', timestamp=datetime.datetime(...), + run_id='...', ) ), End(data=FinalResult(output='The capital of France is Paris.')), @@ -279,6 +283,12 @@ def usage(self) -> _usage.RunUsage: """Get usage statistics for the run so far, including token usage, model requests, and so on.""" return self._graph_run.state.usage + @property + def run_id(self) -> str: + """The unique identifier for the agent run.""" + assert self._graph_run.state.run_id is not None + return self._graph_run.state.run_id + def __repr__(self) -> str: # pragma: no cover result = self._graph_run.output result_repr = '' if result is None else repr(result.output) @@ -413,6 +423,12 @@ def timestamp(self) -> datetime: """Return the timestamp of last response.""" return self.response.timestamp + @property + def run_id(self) -> str: + """The unique identifier for the agent run.""" + assert self._state.run_id is not None + return self._state.run_id + @dataclasses.dataclass(repr=False) class AgentRunResultEvent(Generic[OutputDataT]): diff --git a/tests/models/test_anthropic.py b/tests/models/test_anthropic.py index 130206e3aa..b7b66404e0 100644 --- a/tests/models/test_anthropic.py +++ b/tests/models/test_anthropic.py @@ -222,7 +222,10 @@ async def test_sync_request_text_response(allow_model_requests: None): ) assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[TextPart(content='world')], usage=RequestUsage(input_tokens=5, output_tokens=10, details={'input_tokens': 5, 'output_tokens': 10}), @@ -232,8 +235,12 @@ async def test_sync_request_text_response(allow_model_requests: None): provider_details={'finish_reason': 'end_turn'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), + ), + ModelRequest( + parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), ), - ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]), ModelResponse( parts=[TextPart(content='world')], usage=RequestUsage(input_tokens=5, output_tokens=10, details={'input_tokens': 5, 'output_tokens': 10}), @@ -243,6 +250,7 @@ async def test_sync_request_text_response(allow_model_requests: None): provider_details={'finish_reason': 'end_turn'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -318,7 +326,10 @@ async def test_request_structured_response(allow_model_requests: None): assert result.output == [1, 2, 3] assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ ToolCallPart( @@ -334,6 +345,7 @@ async def test_request_structured_response(allow_model_requests: None): provider_details={'finish_reason': 'end_turn'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -343,7 +355,8 @@ async def test_request_structured_response(allow_model_requests: None): tool_call_id='123', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -384,7 +397,8 @@ async def get_location(loc_name: str) -> str: parts=[ SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc)), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -401,6 +415,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'end_turn'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -410,7 +425,8 @@ async def get_location(loc_name: str) -> str: tool_call_id='1', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -427,6 +443,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'end_turn'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -436,7 +453,8 @@ async def get_location(loc_name: str) -> str: tool_call_id='2', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final response')], @@ -447,6 +465,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'end_turn'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -788,7 +807,8 @@ async def get_image() -> BinaryContent: content=['What fruit is in the image you can get from the get_image tool?'], timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -811,6 +831,7 @@ async def get_image() -> BinaryContent: provider_details={'finish_reason': 'tool_use'}, provider_response_id='msg_01Kwjzggomz7bv9og51qGFuH', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -827,7 +848,8 @@ async def get_image() -> BinaryContent: ], timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -851,6 +873,7 @@ async def get_image() -> BinaryContent: provider_details={'finish_reason': 'end_turn'}, provider_response_id='msg_015btMBYLTuDnMP7zAeuHQGi', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -956,6 +979,7 @@ def simple_instructions(): ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], instructions='You are a helpful assistant.', + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='The capital of France is Paris.')], @@ -975,6 +999,7 @@ def simple_instructions(): provider_details={'finish_reason': 'end_turn'}, provider_response_id='msg_01Fg1JVgvCYUHWsxrj9GkpEv', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -988,7 +1013,10 @@ async def test_anthropic_model_thinking_part(allow_model_requests: None, anthrop result = await agent.run('How do I cross the street?') assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + run_id=IsStr(), + ), ModelResponse( parts=[ ThinkingPart( @@ -1026,6 +1054,7 @@ async def test_anthropic_model_thinking_part(allow_model_requests: None, anthrop provider_details={'finish_reason': 'end_turn'}, provider_response_id='msg_01BnZvs3naGorn93wjjCDwbd', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1042,7 +1071,8 @@ async def test_anthropic_model_thinking_part(allow_model_requests: None, anthrop content='Considering the way to cross the street, analogously, how do I cross the river?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1081,6 +1111,7 @@ async def test_anthropic_model_thinking_part(allow_model_requests: None, anthrop provider_details={'finish_reason': 'end_turn'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1102,7 +1133,8 @@ async def test_anthropic_model_thinking_part_redacted(allow_model_requests: None content='ANTHROPIC_MAGIC_STRING_TRIGGER_REDACTED_THINKING_46C9A13E193C177646C7398A98432ECCCE4C1253D5E2D82641AC0E52CC2876CB', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1130,6 +1162,7 @@ async def test_anthropic_model_thinking_part_redacted(allow_model_requests: None provider_details={'finish_reason': 'end_turn'}, provider_response_id='msg_01TbZ1ZKNMPq28AgBLyLX3c4', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1146,7 +1179,8 @@ async def test_anthropic_model_thinking_part_redacted(allow_model_requests: None content='What was that?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1174,6 +1208,7 @@ async def test_anthropic_model_thinking_part_redacted(allow_model_requests: None provider_details={'finish_reason': 'end_turn'}, provider_response_id='msg_012oSSVsQdwoGH6b2fryM4fF', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1203,7 +1238,8 @@ async def test_anthropic_model_thinking_part_redacted_stream(allow_model_request content='ANTHROPIC_MAGIC_STRING_TRIGGER_REDACTED_THINKING_46C9A13E193C177646C7398A98432ECCCE4C1253D5E2D82641AC0E52CC2876CB', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1237,6 +1273,7 @@ async def test_anthropic_model_thinking_part_redacted_stream(allow_model_request provider_details={'finish_reason': 'end_turn'}, provider_response_id='msg_018XZkwvj9asBiffg3fXt88s', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1344,7 +1381,8 @@ async def test_anthropic_model_thinking_part_from_other_model( content='How do I cross the street?', timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1383,6 +1421,7 @@ async def test_anthropic_model_thinking_part_from_other_model( provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68c1fda6f11081a1b9fa80ae9122743506da9901a3d98ab7', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1404,7 +1443,8 @@ async def test_anthropic_model_thinking_part_from_other_model( content='Considering the way to cross the street, analogously, how do I cross the river?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1431,6 +1471,7 @@ async def test_anthropic_model_thinking_part_from_other_model( provider_details={'finish_reason': 'end_turn'}, provider_response_id='msg_016e2w8nkCuArd5HFSfEwke7', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1458,7 +1499,8 @@ async def test_anthropic_model_thinking_part_stream(allow_model_requests: None, content='How do I cross the street?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1485,6 +1527,7 @@ async def test_anthropic_model_thinking_part_stream(allow_model_requests: None, provider_details={'finish_reason': 'end_turn'}, provider_response_id='msg_01PiJ6i3vjEZjHxojahi2YNc', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1866,7 +1909,8 @@ async def test_anthropic_web_search_tool(allow_model_requests: None, anthropic_a assert result.all_messages() == snapshot( [ ModelRequest( - parts=[UserPromptPart(content='What is the weather in San Francisco today?', timestamp=IsDatetime())] + parts=[UserPromptPart(content='What is the weather in San Francisco today?', timestamp=IsDatetime())], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2058,6 +2102,7 @@ async def test_anthropic_web_search_tool(allow_model_requests: None, anthropic_a provider_details={'finish_reason': 'end_turn'}, provider_response_id='msg_0119wM5YxCLg3hwUWrxEQ9Y8', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2072,7 +2117,8 @@ async def test_anthropic_web_search_tool(allow_model_requests: None, anthropic_a content='how about Mexico City?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2255,6 +2301,7 @@ async def test_anthropic_web_search_tool(allow_model_requests: None, anthropic_a provider_details={'finish_reason': 'end_turn'}, provider_response_id='msg_01Vatv9GeGaeqVHfSGhkU7mo', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2283,7 +2330,8 @@ async def test_anthropic_model_web_search_tool_stream(allow_model_requests: None content='What is the weather in San Francisco today?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2536,6 +2584,7 @@ async def test_anthropic_model_web_search_tool_stream(allow_model_requests: None provider_details={'finish_reason': 'end_turn'}, provider_response_id='msg_01QmxBSdEbD9ZeBWDVgFDoQ5', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -3329,7 +3378,8 @@ async def test_anthropic_mcp_servers(allow_model_requests: None, anthropic_api_k content='Can you tell me more about the pydantic/pydantic-ai repo? Keep your answer short', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -3397,6 +3447,7 @@ async def test_anthropic_mcp_servers(allow_model_requests: None, anthropic_api_k provider_details={'finish_reason': 'end_turn'}, provider_response_id='msg_01MYDjkvBDRaKsY6PDwQz3n6', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -3411,7 +3462,8 @@ async def test_anthropic_mcp_servers(allow_model_requests: None, anthropic_api_k content='How about the pydantic repo in the same org?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -3530,6 +3582,7 @@ async def test_anthropic_mcp_servers(allow_model_requests: None, anthropic_api_k provider_details={'finish_reason': 'end_turn'}, provider_response_id='msg_01DSGib8F7nNoYprfYSGp1sd', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -3574,7 +3627,8 @@ async def test_anthropic_mcp_servers_stream(allow_model_requests: None, anthropi content='Can you tell me more about the pydantic/pydantic-ai repo? Keep your answer short', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -3637,6 +3691,7 @@ async def test_anthropic_mcp_servers_stream(allow_model_requests: None, anthropi provider_details={'finish_reason': 'end_turn'}, provider_response_id='msg_01Xf6SmUVY1mDrSwFc5RsY3n', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -3836,6 +3891,7 @@ async def test_anthropic_code_execution_tool(allow_model_requests: None, anthrop ModelRequest( parts=[UserPromptPart(content='How much is 3 * 12390?', timestamp=IsDatetime())], instructions='Always use the code execution tool for math.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -3886,6 +3942,7 @@ async def test_anthropic_code_execution_tool(allow_model_requests: None, anthrop provider_details={'finish_reason': 'end_turn'}, provider_response_id='msg_018bVTPr9khzuds31rFDuqW4', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -3901,6 +3958,7 @@ async def test_anthropic_code_execution_tool(allow_model_requests: None, anthrop ) ], instructions='Always use the code execution tool for math.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -3951,6 +4009,7 @@ async def test_anthropic_code_execution_tool(allow_model_requests: None, anthrop provider_details={'finish_reason': 'end_turn'}, provider_response_id='msg_01VngRFBcNddwrYQoKUmdePY', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -3978,7 +4037,8 @@ async def test_anthropic_code_execution_tool_stream(allow_model_requests: None, content='what is 65465-6544 * 65464-6+1.02255', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -4058,6 +4118,7 @@ async def test_anthropic_code_execution_tool_stream(allow_model_requests: None, provider_details={'finish_reason': 'end_turn'}, provider_response_id='msg_01TaPV5KLA8MsCPDuJNKPLF4', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -4573,7 +4634,10 @@ async def test_anthropic_server_tool_pass_history_to_another_provider( result = await agent.run('What day is tomorrow?', model=openai_model, message_history=result.all_messages()) assert result.new_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='What day is tomorrow?', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='What day is tomorrow?', timestamp=IsDatetime())], + run_id=IsStr(), + ), ModelResponse( parts=[ TextPart( @@ -4588,6 +4652,7 @@ async def test_anthropic_server_tool_pass_history_to_another_provider( provider_details={'finish_reason': 'completed'}, provider_response_id='resp_689dc4abe31c81968ed493d15d8810fe0afe80ec3d42722e', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -4684,7 +4749,8 @@ async def get_user_country() -> str: content='What is the largest city in the user country?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -4706,6 +4772,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'tool_use'}, provider_response_id='msg_012TXW181edhmR5JCsQRsBKx', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -4715,7 +4782,8 @@ async def get_user_country() -> str: tool_call_id='toolu_01X9wcHKKAZD9tBC711xipPa', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -4741,6 +4809,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'tool_use'}, provider_response_id='msg_01K4Fzcf1bhiyLzHpwLdrefj', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -4750,7 +4819,8 @@ async def get_user_country() -> str: tool_call_id='toolu_01LZABsgreMefH2Go8D5PQbW', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -4783,7 +4853,8 @@ async def get_user_country() -> str: content='What is the largest city in the user country? Use the get_user_country tool and then your own world knowledge.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -4808,6 +4879,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'tool_use'}, provider_response_id='msg_01MsqUB7ZyhjGkvepS1tCXp3', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -4817,7 +4889,8 @@ async def get_user_country() -> str: tool_call_id='toolu_01JJ8TequDsrEU2pv1QFRWAK', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -4841,6 +4914,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'end_turn'}, provider_response_id='msg_0142umg4diSckrDtV9vAmmPL', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -4872,7 +4946,8 @@ async def get_user_country() -> str: content='What is the largest city in the user country? Use the get_user_country tool and then your own world knowledge.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -4894,6 +4969,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'tool_use'}, provider_response_id='msg_018YiNXULHGpoKoHkTt6GivG', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -4903,7 +4979,8 @@ async def get_user_country() -> str: tool_call_id='toolu_01ArHq5f2wxRpRF2PVQcKExM', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"city": "Mexico City", "country": "Mexico"}')], @@ -4923,6 +5000,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'end_turn'}, provider_response_id='msg_01WiRVmLhCrJbJZRqmAWKv3X', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -4952,7 +5030,8 @@ class CountryLanguage(BaseModel): content='What is the largest city in Mexico?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -4976,6 +5055,7 @@ class CountryLanguage(BaseModel): provider_details={'finish_reason': 'end_turn'}, provider_response_id='msg_01N2PwwVQo2aBtt6UFhMDtEX', finish_reason='stop', + run_id=IsStr(), ), ] ) diff --git a/tests/models/test_bedrock.py b/tests/models/test_bedrock.py index 3246c495e0..b374bd020a 100644 --- a/tests/models/test_bedrock.py +++ b/tests/models/test_bedrock.py @@ -76,7 +76,8 @@ async def test_bedrock_model(allow_model_requests: None, bedrock_provider: Bedro content='Hello!', timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -90,6 +91,7 @@ async def test_bedrock_model(allow_model_requests: None, bedrock_provider: Bedro provider_name='bedrock', provider_details={'finish_reason': 'end_turn'}, finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -132,7 +134,8 @@ async def temperature(city: str, date: datetime.date) -> str: content='What was the temperature in London 1st January 2022?', timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -151,6 +154,7 @@ async def temperature(city: str, date: datetime.date) -> str: provider_name='bedrock', provider_details={'finish_reason': 'tool_use'}, finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -160,7 +164,8 @@ async def temperature(city: str, date: datetime.date) -> str: tool_call_id='tooluse_5WEci1UmQ8ifMFkUcy2gHQ', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -179,6 +184,7 @@ async def temperature(city: str, date: datetime.date) -> str: provider_name='bedrock', provider_details={'finish_reason': 'tool_use'}, finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -188,7 +194,8 @@ async def temperature(city: str, date: datetime.date) -> str: tool_call_id='tooluse_9AjloJSaQDKmpPFff-2Clg', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -263,7 +270,8 @@ async def get_capital(country: str) -> str: content='What is the capital of France?', timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -282,6 +290,7 @@ async def get_capital(country: str) -> str: provider_name='bedrock', provider_details={'finish_reason': 'tool_use'}, finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -291,7 +300,8 @@ async def get_capital(country: str) -> str: tool_call_id='tooluse_F8LnaCMtQ0-chKTnPhNH2g', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -309,6 +319,7 @@ async def get_capital(country: str) -> str: provider_name='bedrock', provider_details={'finish_reason': 'end_turn'}, finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -584,6 +595,7 @@ def instructions() -> str: ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], instructions='You are a helpful assistant.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -597,6 +609,7 @@ def instructions() -> str: provider_name='bedrock', provider_details={'finish_reason': 'end_turn'}, finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -641,7 +654,10 @@ async def test_bedrock_model_thinking_part_deepseek(allow_model_requests: None, result = await agent.run('How do I cross the street?') assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + run_id=IsStr(), + ), ModelResponse( parts=[TextPart(content=IsStr()), ThinkingPart(content=IsStr())], usage=RequestUsage(input_tokens=12, output_tokens=693), @@ -650,6 +666,7 @@ async def test_bedrock_model_thinking_part_deepseek(allow_model_requests: None, provider_name='bedrock', provider_details={'finish_reason': 'end_turn'}, finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -666,7 +683,8 @@ async def test_bedrock_model_thinking_part_deepseek(allow_model_requests: None, content='Considering the way to cross the street, analogously, how do I cross the river?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content=IsStr()), ThinkingPart(content=IsStr())], @@ -676,6 +694,7 @@ async def test_bedrock_model_thinking_part_deepseek(allow_model_requests: None, provider_name='bedrock', provider_details={'finish_reason': 'end_turn'}, finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -696,7 +715,10 @@ async def test_bedrock_model_thinking_part_anthropic(allow_model_requests: None, result = await agent.run('How do I cross the street?') assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + run_id=IsStr(), + ), ModelResponse( parts=[ ThinkingPart( @@ -712,6 +734,7 @@ async def test_bedrock_model_thinking_part_anthropic(allow_model_requests: None, provider_name='bedrock', provider_details={'finish_reason': 'end_turn'}, finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -728,7 +751,8 @@ async def test_bedrock_model_thinking_part_anthropic(allow_model_requests: None, content='Considering the way to cross the street, analogously, how do I cross the river?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -745,6 +769,7 @@ async def test_bedrock_model_thinking_part_anthropic(allow_model_requests: None, provider_name='bedrock', provider_details={'finish_reason': 'end_turn'}, finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -773,7 +798,8 @@ async def test_bedrock_model_thinking_part_redacted(allow_model_requests: None, content='ANTHROPIC_MAGIC_STRING_TRIGGER_REDACTED_THINKING_46C9A13E193C177646C7398A98432ECCCE4C1253D5E2D82641AC0E52CC2876CB', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -791,6 +817,7 @@ async def test_bedrock_model_thinking_part_redacted(allow_model_requests: None, provider_name='bedrock', provider_details={'finish_reason': 'end_turn'}, finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -807,7 +834,8 @@ async def test_bedrock_model_thinking_part_redacted(allow_model_requests: None, content='What was that?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -825,6 +853,7 @@ async def test_bedrock_model_thinking_part_redacted(allow_model_requests: None, provider_name='bedrock', provider_details={'finish_reason': 'end_turn'}, finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -863,7 +892,8 @@ async def test_bedrock_model_thinking_part_redacted_stream( content='ANTHROPIC_MAGIC_STRING_TRIGGER_REDACTED_THINKING_46C9A13E193C177646C7398A98432ECCCE4C1253D5E2D82641AC0E52CC2876CB', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -887,6 +917,7 @@ async def test_bedrock_model_thinking_part_redacted_stream( provider_name='bedrock', provider_details={'finish_reason': 'end_turn'}, finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -995,7 +1026,8 @@ async def test_bedrock_model_thinking_part_from_other_model( content='How do I cross the street?', timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1030,6 +1062,7 @@ async def test_bedrock_model_thinking_part_from_other_model( provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68c1ffe0f9a48191894c46b63c1a4f440003919771fccd27', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1053,7 +1086,8 @@ async def test_bedrock_model_thinking_part_from_other_model( content='Considering the way to cross the street, analogously, how do I cross the river?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1070,6 +1104,7 @@ async def test_bedrock_model_thinking_part_from_other_model( provider_name='bedrock', provider_details={'finish_reason': 'end_turn'}, finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1204,7 +1239,8 @@ async def test_bedrock_model_thinking_part_stream(allow_model_requests: None, be content='Hello', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1221,6 +1257,7 @@ async def test_bedrock_model_thinking_part_stream(allow_model_requests: None, be provider_name='bedrock', provider_details={'finish_reason': 'end_turn'}, finish_reason='stop', + run_id=IsStr(), ), ] ) diff --git a/tests/models/test_cohere.py b/tests/models/test_cohere.py index cceaf7111a..b3560a8da1 100644 --- a/tests/models/test_cohere.py +++ b/tests/models/test_cohere.py @@ -29,7 +29,7 @@ from pydantic_ai.tools import RunContext from pydantic_ai.usage import RequestUsage, RunUsage -from ..conftest import IsDatetime, IsInstance, IsNow, raise_if_exception, try_import +from ..conftest import IsDatetime, IsInstance, IsNow, IsStr, raise_if_exception, try_import with try_import() as imports_successful: import cohere @@ -115,7 +115,10 @@ async def test_request_simple_success(allow_model_requests: None): assert result.usage() == snapshot(RunUsage(requests=1)) assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[TextPart(content='world')], model_name='command-r7b-12-2024', @@ -123,8 +126,12 @@ async def test_request_simple_success(allow_model_requests: None): provider_name='cohere', provider_details={'finish_reason': 'COMPLETE'}, finish_reason='stop', + run_id=IsStr(), + ), + ModelRequest( + parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), ), - ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]), ModelResponse( parts=[TextPart(content='world')], model_name='command-r7b-12-2024', @@ -132,6 +139,7 @@ async def test_request_simple_success(allow_model_requests: None): provider_name='cohere', provider_details={'finish_reason': 'COMPLETE'}, finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -189,7 +197,10 @@ async def test_request_structured_response(allow_model_requests: None): assert result.output == [1, 2, 123] assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ ToolCallPart( @@ -203,6 +214,7 @@ async def test_request_structured_response(allow_model_requests: None): provider_name='cohere', provider_details={'finish_reason': 'COMPLETE'}, finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -212,7 +224,8 @@ async def test_request_structured_response(allow_model_requests: None): tool_call_id='123', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -277,7 +290,8 @@ async def get_location(loc_name: str) -> str: parts=[ SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -292,6 +306,7 @@ async def get_location(loc_name: str) -> str: provider_name='cohere', provider_details={'finish_reason': 'COMPLETE'}, finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -301,7 +316,8 @@ async def get_location(loc_name: str) -> str: tool_call_id='1', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -317,6 +333,7 @@ async def get_location(loc_name: str) -> str: provider_name='cohere', provider_details={'finish_reason': 'COMPLETE'}, finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -326,7 +343,8 @@ async def get_location(loc_name: str) -> str: tool_call_id='2', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final response')], @@ -335,6 +353,7 @@ async def get_location(loc_name: str) -> str: provider_name='cohere', provider_details={'finish_reason': 'COMPLETE'}, finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -403,6 +422,7 @@ def simple_instructions(ctx: RunContext): ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], instructions='You are a helpful assistant.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -418,6 +438,7 @@ def simple_instructions(ctx: RunContext): provider_name='cohere', provider_details={'finish_reason': 'COMPLETE'}, finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -446,7 +467,10 @@ async def test_cohere_model_thinking_part(allow_model_requests: None, co_api_key ) assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + run_id=IsStr(), + ), ModelResponse( parts=[ IsInstance(ThinkingPart), @@ -461,6 +485,7 @@ async def test_cohere_model_thinking_part(allow_model_requests: None, co_api_key provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68bb5f153efc81a2b3958ddb1f257ff30886f4f20524f3b9', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -478,7 +503,8 @@ async def test_cohere_model_thinking_part(allow_model_requests: None, co_api_key content='Considering the way to cross the street, analogously, how do I cross the river?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -493,6 +519,7 @@ async def test_cohere_model_thinking_part(allow_model_requests: None, co_api_key provider_name='cohere', provider_details={'finish_reason': 'COMPLETE'}, finish_reason='stop', + run_id=IsStr(), ), ] ) diff --git a/tests/models/test_deepseek.py b/tests/models/test_deepseek.py index 6d6c93a930..a8c0d8ceaa 100644 --- a/tests/models/test_deepseek.py +++ b/tests/models/test_deepseek.py @@ -34,7 +34,10 @@ async def test_deepseek_model_thinking_part(allow_model_requests: None, deepseek result = await agent.run('How do I cross the street?') assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + run_id=IsStr(), + ), ModelResponse( parts=[ ThinkingPart(content=IsStr(), id='reasoning_content', provider_name='deepseek'), @@ -55,6 +58,7 @@ async def test_deepseek_model_thinking_part(allow_model_requests: None, deepseek provider_details={'finish_reason': 'stop'}, provider_response_id='181d9669-2b3a-445e-bd13-2ebff2c378f6', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -78,7 +82,8 @@ async def test_deepseek_model_thinking_stream(allow_model_requests: None, deepse content='How do I cross the street?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -100,6 +105,7 @@ async def test_deepseek_model_thinking_stream(allow_model_requests: None, deepse provider_details={'finish_reason': 'stop'}, provider_response_id='33be18fc-3842-486c-8c29-dd8e578f7f20', finish_reason='stop', + run_id=IsStr(), ), ] ) diff --git a/tests/models/test_fallback.py b/tests/models/test_fallback.py index d7ee01f481..d03726330a 100644 --- a/tests/models/test_fallback.py +++ b/tests/models/test_fallback.py @@ -32,7 +32,7 @@ from pydantic_ai.settings import ModelSettings from pydantic_ai.usage import RequestUsage -from ..conftest import IsDatetime, IsNow, try_import +from ..conftest import IsDatetime, IsNow, IsStr, try_import if sys.version_info < (3, 11): from exceptiongroup import ExceptionGroup as ExceptionGroup # pragma: lax no cover @@ -75,13 +75,15 @@ def test_first_successful() -> None: ModelRequest( parts=[ UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc)), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='success')], usage=RequestUsage(input_tokens=51, output_tokens=1), model_name='function:success_response:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) @@ -100,13 +102,15 @@ def test_first_failed() -> None: content='hello', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='success')], usage=RequestUsage(input_tokens=51, output_tokens=1), model_name='function:success_response:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) @@ -126,13 +130,15 @@ def test_first_failed_instrumented(capfire: CaptureLogfire) -> None: content='hello', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='success')], usage=RequestUsage(input_tokens=51, output_tokens=1), model_name='function:success_response:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) @@ -239,6 +245,7 @@ async def test_first_failed_instrumented_stream(capfire: CaptureLogfire) -> None usage=RequestUsage(input_tokens=50, output_tokens=2), model_name='function::success_response_stream', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -479,6 +486,7 @@ async def test_first_success_streaming() -> None: usage=RequestUsage(input_tokens=50, output_tokens=2), model_name='function::success_response_stream', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -514,6 +522,7 @@ async def test_first_failed_streaming() -> None: usage=RequestUsage(input_tokens=50, output_tokens=2), model_name='function::success_response_stream', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -794,12 +803,14 @@ def prompted_output_func(_: list[ModelMessage], info: AgentInfo) -> ModelRespons ) ], instructions='Be kind', + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"bar":"baz"}')], usage=RequestUsage(input_tokens=51, output_tokens=4), model_name='function:prompted_output_func:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) diff --git a/tests/models/test_gemini.py b/tests/models/test_gemini.py index d24189c90b..d07e23452e 100644 --- a/tests/models/test_gemini.py +++ b/tests/models/test_gemini.py @@ -622,13 +622,17 @@ async def test_text_success(get_gemini_client: GetGeminiClient): assert result.output == 'Hello world' assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[TextPart(content='Hello world')], usage=RequestUsage(input_tokens=1, output_tokens=2), model_name='gemini-1.5-flash-123', timestamp=IsNow(tz=timezone.utc), provider_details={'finish_reason': 'STOP'}, + run_id=IsStr(), ), ] ) @@ -638,21 +642,29 @@ async def test_text_success(get_gemini_client: GetGeminiClient): assert result.output == 'Hello world' assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[TextPart(content='Hello world')], usage=RequestUsage(input_tokens=1, output_tokens=2), model_name='gemini-1.5-flash-123', timestamp=IsNow(tz=timezone.utc), provider_details={'finish_reason': 'STOP'}, + run_id=IsStr(), + ), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), ), - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), ModelResponse( parts=[TextPart(content='Hello world')], usage=RequestUsage(input_tokens=1, output_tokens=2), model_name='gemini-1.5-flash-123', timestamp=IsNow(tz=timezone.utc), provider_details={'finish_reason': 'STOP'}, + run_id=IsStr(), ), ] ) @@ -670,13 +682,17 @@ async def test_request_structured_response(get_gemini_client: GetGeminiClient): assert result.output == [1, 2, 123] assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ToolCallPart(tool_name='final_result', args={'response': [1, 2, 123]}, tool_call_id=IsStr())], usage=RequestUsage(input_tokens=1, output_tokens=2), model_name='gemini-1.5-flash-123', timestamp=IsNow(tz=timezone.utc), provider_details={'finish_reason': 'STOP'}, + run_id=IsStr(), ), ModelRequest( parts=[ @@ -686,7 +702,8 @@ async def test_request_structured_response(get_gemini_client: GetGeminiClient): timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -730,7 +747,8 @@ async def get_location(loc_name: str) -> str: parts=[ SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -740,6 +758,7 @@ async def get_location(loc_name: str) -> str: model_name='gemini-1.5-flash-123', timestamp=IsNow(tz=timezone.utc), provider_details={'finish_reason': 'STOP'}, + run_id=IsStr(), ), ModelRequest( parts=[ @@ -749,7 +768,8 @@ async def get_location(loc_name: str) -> str: tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -760,6 +780,7 @@ async def get_location(loc_name: str) -> str: model_name='gemini-1.5-flash-123', timestamp=IsNow(tz=timezone.utc), provider_details={'finish_reason': 'STOP'}, + run_id=IsStr(), ), ModelRequest( parts=[ @@ -775,7 +796,8 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final response')], @@ -783,6 +805,7 @@ async def get_location(loc_name: str) -> str: model_name='gemini-1.5-flash-123', timestamp=IsNow(tz=timezone.utc), provider_details={'finish_reason': 'STOP'}, + run_id=IsStr(), ), ] ) @@ -930,7 +953,10 @@ async def bar(y: str) -> str: assert result.usage() == snapshot(RunUsage(requests=2, input_tokens=2, output_tokens=4, tool_calls=2)) assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ ToolCallPart(tool_name='foo', args={'x': 'a'}, tool_call_id=IsStr()), @@ -940,6 +966,7 @@ async def bar(y: str) -> str: model_name='gemini-1.5-flash', timestamp=IsNow(tz=timezone.utc), provider_name='google-gla', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -949,7 +976,8 @@ async def bar(y: str) -> str: ToolReturnPart( tool_name='bar', content='b', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='final_result', args={'response': [1, 2]}, tool_call_id=IsStr())], @@ -957,6 +985,7 @@ async def bar(y: str) -> str: model_name='gemini-1.5-flash', timestamp=IsNow(tz=timezone.utc), provider_name='google-gla', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -966,7 +995,8 @@ async def bar(y: str) -> str: timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -1011,7 +1041,8 @@ def get_location(loc_name: str) -> str: content='Hello', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1026,6 +1057,7 @@ def get_location(loc_name: str) -> str: model_name='gemini-1.5-flash', timestamp=IsDatetime(), provider_name='google-gla', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1035,7 +1067,8 @@ def get_location(loc_name: str) -> str: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -1212,7 +1245,8 @@ async def get_image() -> BinaryContent: content=['What fruit is in the image you can get from the get_image tool?'], timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1230,6 +1264,7 @@ async def get_image() -> BinaryContent: model_name='gemini-2.5-pro-preview-03-25', timestamp=IsDatetime(), provider_details={'finish_reason': 'STOP'}, + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1246,7 +1281,8 @@ async def get_image() -> BinaryContent: ], timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='The image shows a kiwi fruit, sliced in half.')], @@ -1258,6 +1294,7 @@ async def get_image() -> BinaryContent: model_name='gemini-2.5-pro-preview-03-25', timestamp=IsDatetime(), provider_details={'finish_reason': 'STOP'}, + run_id=IsStr(), ), ] ) @@ -1374,6 +1411,7 @@ async def test_gemini_model_instructions(allow_model_requests: None, gemini_api_ ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], instructions='You are a helpful assistant.', + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='The capital of France is Paris.\n')], @@ -1383,6 +1421,7 @@ async def test_gemini_model_instructions(allow_model_requests: None, gemini_api_ model_name='gemini-1.5-flash', timestamp=IsDatetime(), provider_details={'finish_reason': 'STOP'}, + run_id=IsStr(), ), ] ) @@ -1448,7 +1487,10 @@ async def test_gemini_model_thinking_part(allow_model_requests: None, gemini_api ) assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + run_id=IsStr(), + ), ModelResponse( parts=[ IsInstance(ThinkingPart), @@ -1490,6 +1532,7 @@ async def test_gemini_model_thinking_part(allow_model_requests: None, gemini_api provider_details={'finish_reason': 'completed'}, provider_response_id='resp_680393ff82488191a7d0850bf0dd99a004f0817ea037a07b', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1504,7 +1547,10 @@ async def test_gemini_model_thinking_part(allow_model_requests: None, gemini_api ) assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + run_id=IsStr(), + ), ModelResponse( parts=[ IsInstance(ThinkingPart), @@ -1519,6 +1565,7 @@ async def test_gemini_model_thinking_part(allow_model_requests: None, gemini_api provider_details={'finish_reason': 'completed'}, provider_response_id='resp_680393ff82488191a7d0850bf0dd99a004f0817ea037a07b', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1526,7 +1573,8 @@ async def test_gemini_model_thinking_part(allow_model_requests: None, gemini_api content='Considering the way to cross the street, analogously, how do I cross the river?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1575,6 +1623,7 @@ async def test_gemini_model_thinking_part(allow_model_requests: None, gemini_api model_name='gemini-2.5-flash-preview-04-17', timestamp=IsDatetime(), provider_details={'finish_reason': 'STOP'}, + run_id=IsStr(), ), ] ) @@ -1597,6 +1646,7 @@ async def test_gemini_youtube_video_url_input(allow_model_requests: None, gemini parts=[ UserPromptPart(content=['What is the main content of this URL?', url], timestamp=IsDatetime()), ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1617,6 +1667,7 @@ async def test_gemini_youtube_video_url_input(allow_model_requests: None, gemini model_name='gemini-2.0-flash', timestamp=IsDatetime(), provider_details={'finish_reason': 'STOP'}, + run_id=IsStr(), ), ] ) @@ -1678,7 +1729,8 @@ async def bar() -> str: content='run bar for me please', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='bar', args={}, tool_call_id=IsStr())], @@ -1689,6 +1741,7 @@ async def bar() -> str: timestamp=IsDatetime(), provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1698,7 +1751,8 @@ async def bar() -> str: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1715,6 +1769,7 @@ async def bar() -> str: timestamp=IsDatetime(), provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1724,7 +1779,8 @@ async def bar() -> str: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -1755,7 +1811,8 @@ async def get_user_country() -> str: content='What is the largest city in the user country?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='get_user_country', args={}, tool_call_id=IsStr())], @@ -1766,6 +1823,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1775,7 +1833,8 @@ async def get_user_country() -> str: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1792,6 +1851,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1801,7 +1861,8 @@ async def get_user_country() -> str: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -1831,7 +1892,8 @@ def upcase(text: str) -> str: content='What is the largest city in Mexico?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1850,6 +1912,7 @@ def upcase(text: str) -> str: timestamp=IsDatetime(), provider_details={'finish_reason': 'STOP'}, provider_response_id='TT9IaNfGN_DmqtsPzKnE4AE', + run_id=IsStr(), ), ] ) @@ -1901,7 +1964,8 @@ class CityLocation(BaseModel): content='What is the largest city in Mexico?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1921,6 +1985,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), + run_id=IsStr(), ), ] ) @@ -1951,7 +2016,8 @@ class CountryLanguage(BaseModel): content='What is the primarily language spoken in Mexico?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1976,6 +2042,7 @@ class CountryLanguage(BaseModel): timestamp=IsDatetime(), provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), + run_id=IsStr(), ), ] ) @@ -2002,7 +2069,8 @@ class CityLocation(BaseModel): content='What is the largest city in Mexico?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2017,6 +2085,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), + run_id=IsStr(), ), ] ) @@ -2049,7 +2118,8 @@ async def get_user_country() -> str: content='What is the largest city in the user country? Use the get_user_country tool and then your own world knowledge.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='get_user_country', args={}, tool_call_id=IsStr())], @@ -2060,6 +2130,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2069,7 +2140,8 @@ async def get_user_country() -> str: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"city": "Mexico City", "country": "Mexico"}')], @@ -2080,6 +2152,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), + run_id=IsStr(), ), ] ) @@ -2110,7 +2183,8 @@ class CountryLanguage(BaseModel): content='What is the largest city in Mexico?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2127,6 +2201,7 @@ class CountryLanguage(BaseModel): timestamp=IsDatetime(), provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), + run_id=IsStr(), ), ] ) diff --git a/tests/models/test_gemini_vertex.py b/tests/models/test_gemini_vertex.py index df9b606cf8..20e2e5e0dc 100644 --- a/tests/models/test_gemini_vertex.py +++ b/tests/models/test_gemini_vertex.py @@ -140,7 +140,8 @@ async def test_url_input( content=['What is the main content of this URL?', Is(url)], timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content=Is(expected_output))], @@ -149,6 +150,7 @@ async def test_url_input( timestamp=IsDatetime(), provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), + run_id=IsStr(), ), ] ) @@ -177,7 +179,8 @@ async def test_url_input_force_download(allow_model_requests: None) -> None: # content=['What is the main content of this URL?', Is(video_url)], timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content=Is(output))], @@ -186,6 +189,7 @@ async def test_url_input_force_download(allow_model_requests: None) -> None: # timestamp=IsDatetime(), provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), + run_id=IsStr(), ), ] ) diff --git a/tests/models/test_google.py b/tests/models/test_google.py index 6866ca9b21..82332f38ef 100644 --- a/tests/models/test_google.py +++ b/tests/models/test_google.py @@ -123,7 +123,8 @@ async def test_google_model(allow_model_requests: None, google_provider: GoogleP content='Hello!', timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='Hello there! How can I help you today?\n')], @@ -136,6 +137,7 @@ async def test_google_model(allow_model_requests: None, google_provider: GoogleP provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -186,7 +188,8 @@ async def temperature(city: str, date: datetime.date) -> str: content='What was the temperature in London 1st January 2022?', timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -205,13 +208,15 @@ async def temperature(city: str, date: datetime.date) -> str: provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ ToolReturnPart( tool_name='temperature', content='30°C', tool_call_id=IsStr(), timestamp=IsDatetime() ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -232,6 +237,7 @@ async def temperature(city: str, date: datetime.date) -> str: provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -241,7 +247,8 @@ async def temperature(city: str, date: datetime.date) -> str: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -306,7 +313,8 @@ async def test_google_model_builtin_code_execution_stream( content='what is 65465-6544 * 65464-6+1.02255', timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -368,6 +376,7 @@ async def test_google_model_builtin_code_execution_stream( provider_details={'finish_reason': 'STOP'}, provider_response_id='1NjJaIDxJcL7qtsP5aPfqQs', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -541,7 +550,8 @@ async def get_capital(country: str) -> str: parts=[ SystemPromptPart(content='You are a helpful chatbot.', timestamp=IsDatetime()), UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime()), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='get_capital', args={'country': 'France'}, tool_call_id=IsStr())], @@ -554,6 +564,7 @@ async def get_capital(country: str) -> str: provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -563,7 +574,8 @@ async def get_capital(country: str) -> str: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -580,6 +592,7 @@ async def get_capital(country: str) -> str: provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -867,6 +880,7 @@ def instructions() -> str: ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], instructions='You are a helpful assistant.', + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='The capital of France is Paris.\n')], @@ -879,6 +893,7 @@ def instructions() -> str: provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -936,7 +951,8 @@ async def test_google_model_web_search_tool(allow_model_requests: None, google_p content='What is the weather in San Francisco today?', timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -999,6 +1015,7 @@ async def test_google_model_web_search_tool(allow_model_requests: None, google_p provider_details={'finish_reason': 'STOP'}, provider_response_id='btnJaOrqE4_6qtsP7bOboQs', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1013,7 +1030,8 @@ async def test_google_model_web_search_tool(allow_model_requests: None, google_p content='how about Mexico City?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1074,6 +1092,7 @@ async def test_google_model_web_search_tool(allow_model_requests: None, google_p provider_details={'finish_reason': 'STOP'}, provider_response_id='dtnJaKyTAri3qtsPu4imqQs', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1105,7 +1124,8 @@ async def test_google_model_web_search_tool_stream(allow_model_requests: None, g content='What is the weather in San Francisco today?', timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1139,6 +1159,7 @@ async def test_google_model_web_search_tool_stream(allow_model_requests: None, g provider_details={'finish_reason': 'STOP'}, provider_response_id='ftnJaMmAMcm-qtsPwvCCoAo', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1248,7 +1269,8 @@ async def test_google_model_web_search_tool_stream(allow_model_requests: None, g content='how about Mexico City?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1314,6 +1336,7 @@ async def test_google_model_web_search_tool_stream(allow_model_requests: None, g provider_details={'finish_reason': 'STOP'}, provider_response_id='itnJaJK1BsGxqtsPrIeb6Ao', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1343,7 +1366,8 @@ async def test_google_model_code_execution_tool(allow_model_requests: None, goog parts=[ SystemPromptPart(content='You are a helpful chatbot.', timestamp=IsDatetime()), UserPromptPart(content='What day is today in Utrecht?', timestamp=IsDatetime()), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1401,6 +1425,7 @@ async def test_google_model_code_execution_tool(allow_model_requests: None, goog provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1408,7 +1433,10 @@ async def test_google_model_code_execution_tool(allow_model_requests: None, goog result = await agent.run('What day is tomorrow?', message_history=result.all_messages()) assert result.new_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='What day is tomorrow?', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='What day is tomorrow?', timestamp=IsDatetime())], + run_id=IsStr(), + ), ModelResponse( parts=[ BuiltinToolCallPart( @@ -1453,6 +1481,7 @@ async def test_google_model_code_execution_tool(allow_model_requests: None, goog provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1619,7 +1648,8 @@ def dummy() -> None: ... # pragma: no cover content='How do I cross the street?', timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1639,6 +1669,7 @@ def dummy() -> None: ... # pragma: no cover provider_details={'finish_reason': 'STOP'}, provider_response_id='sebBaN7rGrSsqtsPhf3J0Q4', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1655,7 +1686,8 @@ def dummy() -> None: ... # pragma: no cover content='Considering the way to cross the street, analogously, how do I cross the river?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1675,6 +1707,7 @@ def dummy() -> None: ... # pragma: no cover provider_details={'finish_reason': 'STOP'}, provider_response_id='zObBaKreOqSIqtsP7uur4A0', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1705,7 +1738,8 @@ def dummy() -> None: ... # pragma: no cover content='How do I cross the street?', timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1740,6 +1774,7 @@ def dummy() -> None: ... # pragma: no cover provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68c1fb6b6a248196a6216e80fc2ace380c14a8a9087e8689', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1761,7 +1796,8 @@ def dummy() -> None: ... # pragma: no cover content='Considering the way to cross the street, analogously, how do I cross the river?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1781,6 +1817,7 @@ def dummy() -> None: ... # pragma: no cover provider_details={'finish_reason': 'STOP'}, provider_response_id='mPvBaJmNOMywqtsPsb_l2A4', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1816,7 +1853,8 @@ def dummy() -> None: ... # pragma: no cover content='How do I cross the street?', timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1836,6 +1874,7 @@ def dummy() -> None: ... # pragma: no cover provider_details={'finish_reason': 'STOP'}, provider_response_id='beHBaJfEMIi-qtsP3769-Q8', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2015,7 +2054,8 @@ async def test_google_url_input( content=['What is the main content of this URL?', Is(url)], timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content=Is(expected_output))], @@ -2026,6 +2066,7 @@ async def test_google_url_input( provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2055,7 +2096,8 @@ async def test_google_url_input_force_download( content=['What is the main content of this URL?', Is(video_url)], timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content=Is(output))], @@ -2066,6 +2108,7 @@ async def test_google_url_input_force_download( provider_response_id=IsStr(), provider_name='google-vertex', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2103,7 +2146,8 @@ async def bar() -> str: content='run bar for me please', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='bar', args={}, tool_call_id=IsStr())], @@ -2116,6 +2160,7 @@ async def bar() -> str: provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2125,7 +2170,8 @@ async def bar() -> str: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2144,6 +2190,7 @@ async def bar() -> str: provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2153,7 +2200,8 @@ async def bar() -> str: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -2194,7 +2242,8 @@ async def get_user_country() -> str: content='What is the largest city in the user country?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='get_user_country', args={}, tool_call_id=IsStr())], @@ -2207,6 +2256,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2216,7 +2266,8 @@ async def get_user_country() -> str: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2235,6 +2286,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2244,7 +2296,8 @@ async def get_user_country() -> str: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -2275,7 +2328,8 @@ async def get_user_country() -> str: content='What is the largest city in the user country? Use the get_user_country tool and then your own world knowledge.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='get_user_country', args={}, tool_call_id=IsStr())], @@ -2288,6 +2342,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2297,7 +2352,8 @@ async def get_user_country() -> str: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='The largest city in Mexico is Mexico City.')], @@ -2310,6 +2366,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2359,7 +2416,8 @@ class CityLocation(BaseModel): content='What is the largest city in Mexico?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2381,6 +2439,7 @@ class CityLocation(BaseModel): provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2410,7 +2469,8 @@ class CountryLanguage(BaseModel): content='What is the primarily language spoken in Mexico?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2437,6 +2497,7 @@ class CountryLanguage(BaseModel): provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2462,7 +2523,8 @@ class CityLocation(BaseModel): content='What is the largest city in Mexico?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"city": "Mexico City", "country": "Mexico"}')], @@ -2475,6 +2537,7 @@ class CityLocation(BaseModel): provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2506,7 +2569,8 @@ async def get_user_country() -> str: content='What is the largest city in the user country? Use the get_user_country tool and then your own world knowledge.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='get_user_country', args={}, tool_call_id=IsStr())], @@ -2519,6 +2583,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2528,7 +2593,8 @@ async def get_user_country() -> str: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"city": "Mexico City", "country": "Mexico"}')], @@ -2541,6 +2607,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2570,7 +2637,8 @@ class CountryLanguage(BaseModel): content='What is the largest city in Mexico?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2589,6 +2657,7 @@ class CountryLanguage(BaseModel): provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2741,7 +2810,8 @@ async def test_google_image_generation(allow_model_requests: None, google_provid content='Generate an image of an axolotl.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2766,6 +2836,7 @@ async def test_google_image_generation(allow_model_requests: None, google_provid provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2787,7 +2858,8 @@ async def test_google_image_generation(allow_model_requests: None, google_provid content='Now give it a sombrero.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2812,6 +2884,7 @@ async def test_google_image_generation(allow_model_requests: None, google_provid provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2826,6 +2899,7 @@ async def test_google_image_generation_stream(allow_model_requests: None, google BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='9ff9cc', identifier='9ff9cc', ) ) @@ -2855,7 +2929,8 @@ async def test_google_image_generation_stream(allow_model_requests: None, google content='Generate an image of an axolotl.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2880,6 +2955,7 @@ async def test_google_image_generation_stream(allow_model_requests: None, google provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2922,7 +2998,8 @@ async def test_google_image_generation_with_text(allow_model_requests: None, goo content='Generate an illustrated two-sentence story about an axolotl.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2933,6 +3010,7 @@ async def test_google_image_generation_with_text(allow_model_requests: None, goo content=BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='00f2af', identifier=IsStr(), ) ), @@ -2948,6 +3026,7 @@ async def test_google_image_generation_with_text(allow_model_requests: None, goo provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2968,6 +3047,7 @@ async def test_google_image_or_text_output(allow_model_requests: None, google_pr BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='f82faf', identifier='f82faf', ) ) @@ -2986,6 +3066,7 @@ async def test_google_image_and_text_output(allow_model_requests: None, google_p BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='67b12f', identifier='67b12f', ) ] diff --git a/tests/models/test_groq.py b/tests/models/test_groq.py index 928ebd8907..515892d58c 100644 --- a/tests/models/test_groq.py +++ b/tests/models/test_groq.py @@ -165,7 +165,10 @@ async def test_request_simple_success(allow_model_requests: None): assert result.usage() == snapshot(RunUsage(requests=1)) assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[TextPart(content='world')], model_name='llama-3.3-70b-versatile-123', @@ -174,8 +177,12 @@ async def test_request_simple_success(allow_model_requests: None): provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), + ), + ModelRequest( + parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), ), - ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]), ModelResponse( parts=[TextPart(content='world')], model_name='llama-3.3-70b-versatile-123', @@ -184,6 +191,7 @@ async def test_request_simple_success(allow_model_requests: None): provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -224,7 +232,10 @@ async def test_request_structured_response(allow_model_requests: None): assert result.output == [1, 2, 123] assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ ToolCallPart( @@ -239,6 +250,7 @@ async def test_request_structured_response(allow_model_requests: None): provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -248,7 +260,8 @@ async def test_request_structured_response(allow_model_requests: None): tool_call_id='123', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -313,7 +326,8 @@ async def get_location(loc_name: str) -> str: parts=[ SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -330,6 +344,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -339,7 +354,8 @@ async def get_location(loc_name: str) -> str: tool_call_id='1', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -356,6 +372,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -365,7 +382,8 @@ async def get_location(loc_name: str) -> str: tool_call_id='2', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final response')], @@ -375,6 +393,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -476,7 +495,10 @@ async def test_stream_structured(allow_model_requests: None): assert result.usage() == snapshot(RunUsage(requests=1)) assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ ToolCallPart( @@ -489,6 +511,7 @@ async def test_stream_structured(allow_model_requests: None): timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), provider_name='groq', provider_response_id='x', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -498,7 +521,8 @@ async def test_stream_structured(allow_model_requests: None): tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -581,7 +605,8 @@ async def get_image() -> BinaryContent: ], timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='get_image', args='{}', tool_call_id='call_wkpd')], @@ -592,6 +617,7 @@ async def get_image() -> BinaryContent: provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-3c327c89-e9f5-4aac-a5d5-190e6f6f25c9', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -608,7 +634,8 @@ async def get_image() -> BinaryContent: ], timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='The fruit in the image is a kiwi.')], @@ -619,6 +646,7 @@ async def get_image() -> BinaryContent: provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-82dfad42-6a28-4089-82c3-c8633f626c0d', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -690,6 +718,7 @@ async def test_groq_model_instructions(allow_model_requests: None, groq_api_key: ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], instructions='You are a helpful assistant.', + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='The capital of France is Paris.')], @@ -700,6 +729,7 @@ async def test_groq_model_instructions(allow_model_requests: None, groq_api_key: provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-7586b6a9-fb4b-4ec7-86a0-59f0a77844cf', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -725,7 +755,8 @@ async def test_groq_model_web_search_tool(allow_model_requests: None, groq_api_k content='What is the weather in San Francisco today?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -966,6 +997,7 @@ async def test_groq_model_web_search_tool(allow_model_requests: None, groq_api_k provider_details={'finish_reason': 'stop'}, provider_response_id='stub', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -993,7 +1025,8 @@ async def test_groq_model_web_search_tool_stream(allow_model_requests: None, gro content='What is the weather in San Francisco today?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1131,6 +1164,7 @@ async def test_groq_model_web_search_tool_stream(allow_model_requests: None, gro provider_details={'finish_reason': 'stop'}, provider_response_id='stub', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1884,6 +1918,7 @@ async def test_groq_model_thinking_part(allow_model_requests: None, groq_api_key ModelRequest( parts=[UserPromptPart(content='I want a recipe to cook Uruguayan alfajores.', timestamp=IsDatetime())], instructions='You are a chef.', + run_id=IsStr(), ), ModelResponse( parts=[IsInstance(ThinkingPart), IsInstance(TextPart)], @@ -1894,6 +1929,7 @@ async def test_groq_model_thinking_part(allow_model_requests: None, groq_api_key provider_details={'finish_reason': 'stop'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1908,6 +1944,7 @@ async def test_groq_model_thinking_part(allow_model_requests: None, groq_api_key ModelRequest( parts=[UserPromptPart(content='I want a recipe to cook Uruguayan alfajores.', timestamp=IsDatetime())], instructions='You are a chef.', + run_id=IsStr(), ), ModelResponse( parts=[IsInstance(ThinkingPart), IsInstance(TextPart)], @@ -1918,6 +1955,7 @@ async def test_groq_model_thinking_part(allow_model_requests: None, groq_api_key provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-9748c1af-1065-410a-969a-d7fb48039fbb', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1927,6 +1965,7 @@ async def test_groq_model_thinking_part(allow_model_requests: None, groq_api_key ) ], instructions='You are a chef.', + run_id=IsStr(), ), ModelResponse( parts=[IsInstance(ThinkingPart), IsInstance(TextPart)], @@ -1937,6 +1976,7 @@ async def test_groq_model_thinking_part(allow_model_requests: None, groq_api_key provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-994aa228-883a-498c-8b20-9655d770b697', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1968,6 +2008,7 @@ async def test_groq_model_thinking_part_iter(allow_model_requests: None, groq_ap ) ], instructions='You are a chef.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2055,6 +2096,7 @@ async def test_groq_model_thinking_part_iter(allow_model_requests: None, groq_ap provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-4ef92b12-fb9d-486f-8b98-af9b5ecac736', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -3309,6 +3351,7 @@ async def test_groq_model_thinking_part_iter(allow_model_requests: None, groq_ap ) ], instructions='You are a chef.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -3415,6 +3458,7 @@ async def test_groq_model_thinking_part_iter(allow_model_requests: None, groq_ap provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-dd0af56b-f71d-4101-be2f-89efcf3f05ac', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -5253,6 +5297,7 @@ async def get_something_by_name(name: str) -> str: ) ], instructions='Be concise. Never use pretty double quotes, just regular ones.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -5266,6 +5311,7 @@ async def get_something_by_name(name: str) -> str: timestamp=IsDatetime(), provider_name='groq', finish_reason='error', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -5290,6 +5336,7 @@ async def get_something_by_name(name: str) -> str: ) ], instructions='Be concise. Never use pretty double quotes, just regular ones.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -5309,6 +5356,7 @@ async def get_something_by_name(name: str) -> str: provider_details={'finish_reason': 'tool_calls'}, provider_response_id=IsStr(), finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -5320,6 +5368,7 @@ async def get_something_by_name(name: str) -> str: ) ], instructions='Be concise. Never use pretty double quotes, just regular ones.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -5335,6 +5384,7 @@ async def get_something_by_name(name: str) -> str: provider_details={'finish_reason': 'stop'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -5368,6 +5418,7 @@ async def get_something_by_name(name: str) -> str: ) ], instructions='Be concise. Never use pretty double quotes, just regular ones.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -5388,6 +5439,7 @@ async def get_something_by_name(name: str) -> str: timestamp=IsDatetime(), provider_name='groq', provider_response_id='chatcmpl-4e0ca299-7515-490a-a98a-16d7664d4fba', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -5412,6 +5464,7 @@ async def get_something_by_name(name: str) -> str: ) ], instructions='Be concise. Never use pretty double quotes, just regular ones.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -5429,6 +5482,7 @@ async def get_something_by_name(name: str) -> str: provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-fffa1d41-1763-493a-9ced-083bd3f2d98b', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -5440,6 +5494,7 @@ async def get_something_by_name(name: str) -> str: ) ], instructions='Be concise. Never use pretty double quotes, just regular ones.', + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='The tool call succeeded with the name "test_name".')], @@ -5450,6 +5505,7 @@ async def get_something_by_name(name: str) -> str: provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-fe6b5685-166f-4c71-9cd7-3d5a97301bf1', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -5487,7 +5543,8 @@ class CityLocation(BaseModel): content='What is the largest city in Mexico?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -5503,6 +5560,7 @@ class CityLocation(BaseModel): provider_details={'finish_reason': 'stop'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -5528,7 +5586,8 @@ class CityLocation(BaseModel): content='What is the largest city in Mexico?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -5544,6 +5603,7 @@ class CityLocation(BaseModel): provider_details={'finish_reason': 'stop'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) diff --git a/tests/models/test_huggingface.py b/tests/models/test_huggingface.py index a49dc187b7..3bbb0d3e7b 100644 --- a/tests/models/test_huggingface.py +++ b/tests/models/test_huggingface.py @@ -170,6 +170,7 @@ async def test_simple_completion(allow_model_requests: None, huggingface_api_key provider_name='huggingface', provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-d445c0d473a84791af2acf356cc00df7', + run_id=IsStr(), ) ) @@ -239,6 +240,7 @@ async def test_request_structured_response( provider_name='huggingface', provider_details={'finish_reason': 'stop'}, provider_response_id='123', + run_id=IsStr(), ) ) @@ -357,7 +359,8 @@ async def get_location(loc_name: str) -> str: parts=[ SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -373,6 +376,7 @@ async def get_location(loc_name: str) -> str: provider_name='huggingface', provider_details={'finish_reason': 'stop'}, provider_response_id='123', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -382,7 +386,8 @@ async def get_location(loc_name: str) -> str: tool_call_id='1', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -398,6 +403,7 @@ async def get_location(loc_name: str) -> str: provider_name='huggingface', provider_details={'finish_reason': 'stop'}, provider_response_id='123', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -407,7 +413,8 @@ async def get_location(loc_name: str) -> str: tool_call_id='2', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final response')], @@ -416,6 +423,7 @@ async def get_location(loc_name: str) -> str: provider_name='huggingface', provider_details={'finish_reason': 'stop'}, provider_response_id='123', + run_id=IsStr(), ), ] ) @@ -630,7 +638,8 @@ async def test_image_url_input(allow_model_requests: None, huggingface_api_key: ], timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='Hello! How can I assist you with this image of a potato?')], @@ -640,6 +649,7 @@ async def test_image_url_input(allow_model_requests: None, huggingface_api_key: provider_name='huggingface', provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-49aa100effab4ca28514d5ccc00d7944', + run_id=IsStr(), ), ] ) @@ -699,6 +709,7 @@ def simple_instructions(ctx: RunContext): ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], instructions='You are a helpful assistant.', + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='Paris')], @@ -708,6 +719,7 @@ def simple_instructions(ctx: RunContext): provider_name='huggingface', provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-b3936940372c481b8d886e596dc75524', + run_id=IsStr(), ), ] ) @@ -793,7 +805,10 @@ def response_validator(value: str) -> str: assert result.output == 'final-response' assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[TextPart(content='invalid-response')], model_name='hf-model', @@ -801,6 +816,7 @@ def response_validator(value: str) -> str: provider_name='huggingface', provider_details={'finish_reason': 'stop'}, provider_response_id='123', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -810,7 +826,8 @@ def response_validator(value: str) -> str: tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final-response')], @@ -819,6 +836,7 @@ def response_validator(value: str) -> str: provider_name='huggingface', provider_details={'finish_reason': 'stop'}, provider_response_id='123', + run_id=IsStr(), ), ] ) @@ -905,7 +923,10 @@ async def test_hf_model_thinking_part(allow_model_requests: None, huggingface_ap result = await agent.run('How do I cross the street?') assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + run_id=IsStr(), + ), ModelResponse( parts=[ IsInstance(ThinkingPart), @@ -917,6 +938,7 @@ async def test_hf_model_thinking_part(allow_model_requests: None, huggingface_ap provider_name='huggingface', provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-957db61fe60d4440bcfe1f11f2c5b4b9', + run_id=IsStr(), ), ] ) @@ -936,7 +958,8 @@ async def test_hf_model_thinking_part(allow_model_requests: None, huggingface_ap content='Considering the way to cross the street, analogously, how do I cross the river?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -949,6 +972,7 @@ async def test_hf_model_thinking_part(allow_model_requests: None, huggingface_ap provider_name='huggingface', provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-35fdec1307634f94a39f7e26f52e12a7', + run_id=IsStr(), ), ] ) @@ -975,7 +999,8 @@ async def test_hf_model_thinking_part_iter(allow_model_requests: None, huggingfa content='How do I cross the street?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -987,6 +1012,7 @@ async def test_hf_model_thinking_part_iter(allow_model_requests: None, huggingfa provider_name='huggingface', provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-357f347a3f5d4897b36a128fb4e4cf7b', + run_id=IsStr(), ), ] ) diff --git a/tests/models/test_mcp_sampling.py b/tests/models/test_mcp_sampling.py index caaa0e3719..1da0851c20 100644 --- a/tests/models/test_mcp_sampling.py +++ b/tests/models/test_mcp_sampling.py @@ -11,7 +11,7 @@ from pydantic_ai.agent import Agent from pydantic_ai.exceptions import UnexpectedModelBehavior -from ..conftest import IsNow, try_import +from ..conftest import IsNow, IsStr, try_import with try_import() as imports_successful: from mcp import CreateMessageResult @@ -54,12 +54,14 @@ def test_assistant_text(): content='Hello', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='text content')], model_name='test-model', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) @@ -88,17 +90,27 @@ def test_assistant_text_history(): assert result.output == snapshot('text content') assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='1', timestamp=IsNow(tz=timezone.utc))], instructions='testing'), + ModelRequest( + parts=[UserPromptPart(content='1', timestamp=IsNow(tz=timezone.utc))], + instructions='testing', + run_id=IsStr(), + ), ModelResponse( parts=[TextPart(content='text content')], model_name='test-model', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), + ), + ModelRequest( + parts=[UserPromptPart(content='2', timestamp=IsNow(tz=timezone.utc))], + instructions='testing', + run_id=IsStr(), ), - ModelRequest(parts=[UserPromptPart(content='2', timestamp=IsNow(tz=timezone.utc))], instructions='testing'), ModelResponse( parts=[TextPart(content='text content')], model_name='test-model', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) diff --git a/tests/models/test_mistral.py b/tests/models/test_mistral.py index 76ae344c5b..4a39791309 100644 --- a/tests/models/test_mistral.py +++ b/tests/models/test_mistral.py @@ -213,7 +213,10 @@ async def test_multiple_completions(allow_model_requests: None): assert result.usage().output_tokens == 1 assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[TextPart(content='world')], usage=RequestUsage(input_tokens=1, output_tokens=1), @@ -223,8 +226,12 @@ async def test_multiple_completions(allow_model_requests: None): provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), + ), + ModelRequest( + parts=[UserPromptPart(content='hello again', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), ), - ModelRequest(parts=[UserPromptPart(content='hello again', timestamp=IsNow(tz=timezone.utc))]), ModelResponse( parts=[TextPart(content='hello again')], usage=RequestUsage(input_tokens=1, output_tokens=1), @@ -234,6 +241,7 @@ async def test_multiple_completions(allow_model_requests: None): provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -269,7 +277,10 @@ async def test_three_completions(allow_model_requests: None): assert result.usage().output_tokens == 1 assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[TextPart(content='world')], usage=RequestUsage(input_tokens=1, output_tokens=1), @@ -279,8 +290,12 @@ async def test_three_completions(allow_model_requests: None): provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), + ), + ModelRequest( + parts=[UserPromptPart(content='hello again', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), ), - ModelRequest(parts=[UserPromptPart(content='hello again', timestamp=IsNow(tz=timezone.utc))]), ModelResponse( parts=[TextPart(content='hello again')], usage=RequestUsage(input_tokens=1, output_tokens=1), @@ -290,8 +305,12 @@ async def test_three_completions(allow_model_requests: None): provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), + ), + ModelRequest( + parts=[UserPromptPart(content='final message', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), ), - ModelRequest(parts=[UserPromptPart(content='final message', timestamp=IsNow(tz=timezone.utc))]), ModelResponse( parts=[TextPart(content='final message')], usage=RequestUsage(input_tokens=1, output_tokens=1), @@ -301,6 +320,7 @@ async def test_three_completions(allow_model_requests: None): provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -404,7 +424,10 @@ class CityLocation(BaseModel): assert result.usage().output_tokens == 2 assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ ToolCallPart( @@ -420,6 +443,7 @@ class CityLocation(BaseModel): provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -429,7 +453,8 @@ class CityLocation(BaseModel): tool_call_id='123', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -467,7 +492,10 @@ class CityLocation(BaseModel): assert result.usage().details == {} assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ ToolCallPart( @@ -483,6 +511,7 @@ class CityLocation(BaseModel): provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -492,7 +521,8 @@ class CityLocation(BaseModel): tool_call_id='123', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -528,7 +558,8 @@ async def test_request_output_type_with_arguments_str_response(allow_model_reque parts=[ SystemPromptPart(content='System prompt value', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc)), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -545,6 +576,7 @@ async def test_request_output_type_with_arguments_str_response(allow_model_reque provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -554,7 +586,8 @@ async def test_request_output_type_with_arguments_str_response(allow_model_reque tool_call_id='123', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -1080,7 +1113,8 @@ async def get_location(loc_name: str) -> str: parts=[ SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1097,6 +1131,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1106,7 +1141,8 @@ async def get_location(loc_name: str) -> str: tool_call_id='1', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1123,6 +1159,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1132,7 +1169,8 @@ async def get_location(loc_name: str) -> str: tool_call_id='2', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final response')], @@ -1143,6 +1181,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1231,7 +1270,8 @@ async def get_location(loc_name: str) -> str: parts=[ SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1248,6 +1288,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1257,7 +1298,8 @@ async def get_location(loc_name: str) -> str: tool_call_id='1', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1274,6 +1316,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1283,7 +1326,8 @@ async def get_location(loc_name: str) -> str: tool_call_id='2', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1300,6 +1344,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1309,7 +1354,8 @@ async def get_location(loc_name: str) -> str: tool_call_id='1', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -1385,7 +1431,8 @@ async def get_location(loc_name: str) -> str: parts=[ SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc)), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1402,6 +1449,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'tool_calls'}, provider_response_id='x', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1411,7 +1459,8 @@ async def get_location(loc_name: str) -> str: tool_call_id='1', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='final_result', args='{"won": true}', tool_call_id='1')], @@ -1422,6 +1471,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'tool_calls'}, provider_response_id='x', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1431,7 +1481,8 @@ async def get_location(loc_name: str) -> str: tool_call_id='1', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -1494,7 +1545,8 @@ async def get_location(loc_name: str) -> str: parts=[ SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc)), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1511,6 +1563,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'tool_calls'}, provider_response_id='x', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1520,7 +1573,8 @@ async def get_location(loc_name: str) -> str: tool_call_id='1', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final response')], @@ -1531,6 +1585,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'stop'}, provider_response_id='x', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1606,7 +1661,8 @@ async def get_location(loc_name: str) -> str: parts=[ SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc)), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1623,6 +1679,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'tool_calls'}, provider_response_id='x', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1632,7 +1689,8 @@ async def get_location(loc_name: str) -> str: tool_call_id='1', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1649,6 +1707,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'tool_calls'}, provider_response_id='x', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1658,7 +1717,8 @@ async def get_location(loc_name: str) -> str: tool_call_id='2', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final response')], @@ -1669,6 +1729,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'stop'}, provider_response_id='x', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1836,7 +1897,8 @@ async def get_image() -> BinaryContent: content=['What fruit is in the image you can get from the get_image tool? Call the tool.'], timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='get_image', args='{}', tool_call_id='utZJMAZN4')], @@ -1847,6 +1909,7 @@ async def get_image() -> BinaryContent: provider_details={'finish_reason': 'tool_calls'}, provider_response_id='fce6d16a4e5940edb24ae16dd0369947', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1863,7 +1926,8 @@ async def get_image() -> BinaryContent: ], timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1878,6 +1942,7 @@ async def get_image() -> BinaryContent: provider_details={'finish_reason': 'stop'}, provider_response_id='26e7de193646460e8904f8e604a60dc1', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1909,7 +1974,8 @@ async def test_image_url_input(allow_model_requests: None): ], timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='world')], @@ -1920,6 +1986,7 @@ async def test_image_url_input(allow_model_requests: None): provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1948,7 +2015,8 @@ async def test_image_as_binary_content_input(allow_model_requests: None): ], timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='world')], @@ -1959,6 +2027,7 @@ async def test_image_as_binary_content_input(allow_model_requests: None): provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1990,7 +2059,8 @@ async def test_pdf_url_input(allow_model_requests: None): ], timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='world')], @@ -2001,6 +2071,7 @@ async def test_pdf_url_input(allow_model_requests: None): provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2026,7 +2097,8 @@ async def test_pdf_as_binary_content_input(allow_model_requests: None): ], timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='world')], @@ -2037,6 +2109,7 @@ async def test_pdf_as_binary_content_input(allow_model_requests: None): provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2106,6 +2179,7 @@ async def test_mistral_model_instructions(allow_model_requests: None, mistral_ap ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsDatetime())], instructions='You are a helpful assistant.', + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='world')], @@ -2116,6 +2190,7 @@ async def test_mistral_model_instructions(allow_model_requests: None, mistral_ap provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2130,7 +2205,10 @@ async def test_mistral_model_thinking_part(allow_model_requests: None, openai_ap result = await agent.run('How do I cross the street?') assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + run_id=IsStr(), + ), ModelResponse( parts=[ ThinkingPart( @@ -2150,6 +2228,7 @@ async def test_mistral_model_thinking_part(allow_model_requests: None, openai_ap provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68bb6452990081968f5aff503a55e3b903498c8aa840cf12', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2168,7 +2247,8 @@ async def test_mistral_model_thinking_part(allow_model_requests: None, openai_ap content='Considering the way to cross the street, analogously, how do I cross the river?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2182,6 +2262,7 @@ async def test_mistral_model_thinking_part(allow_model_requests: None, openai_ap provider_details={'finish_reason': 'stop'}, provider_response_id='9abe8b736bff46af8e979b52334a57cd', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2208,7 +2289,8 @@ async def test_mistral_model_thinking_part_iter(allow_model_requests: None, mist content='How do I cross the street?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2260,6 +2342,7 @@ async def test_mistral_model_thinking_part_iter(allow_model_requests: None, mist provider_details={'finish_reason': 'stop'}, provider_response_id='9faf4309c1d743d189f16b29211d8b45', finish_reason='stop', + run_id=IsStr(), ), ] ) diff --git a/tests/models/test_model_function.py b/tests/models/test_model_function.py index 8d7ada98cf..196b140454 100644 --- a/tests/models/test_model_function.py +++ b/tests/models/test_model_function.py @@ -66,12 +66,16 @@ def test_simple(): assert result.output == snapshot("content='Hello' part_kind='user-prompt' message_count=1") assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[TextPart(content="content='Hello' part_kind='user-prompt' message_count=1")], usage=RequestUsage(input_tokens=51, output_tokens=3), model_name='function:return_last:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) @@ -80,19 +84,27 @@ def test_simple(): assert result2.output == snapshot("content='World' part_kind='user-prompt' message_count=3") assert result2.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[TextPart(content="content='Hello' part_kind='user-prompt' message_count=1")], usage=RequestUsage(input_tokens=51, output_tokens=3), model_name='function:return_last:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), + ), + ModelRequest( + parts=[UserPromptPart(content='World', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), ), - ModelRequest(parts=[UserPromptPart(content='World', timestamp=IsNow(tz=timezone.utc))]), ModelResponse( parts=[TextPart(content="content='World' part_kind='user-prompt' message_count=3")], usage=RequestUsage(input_tokens=52, output_tokens=6), model_name='function:return_last:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) @@ -153,7 +165,10 @@ def test_weather(): assert result.output == 'Raining in London' assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='London', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='London', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ ToolCallPart( @@ -163,6 +178,7 @@ def test_weather(): usage=RequestUsage(input_tokens=51, output_tokens=5), model_name='function:weather_model:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -172,13 +188,15 @@ def test_weather(): timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='get_weather', args='{"lat": 51, "lng": 0}', tool_call_id=IsStr())], usage=RequestUsage(input_tokens=56, output_tokens=11), model_name='function:weather_model:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -188,13 +206,15 @@ def test_weather(): timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='Raining in London')], usage=RequestUsage(input_tokens=57, output_tokens=14), model_name='function:weather_model:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) @@ -350,7 +370,8 @@ def test_call_all(): parts=[ SystemPromptPart(content='foobar', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -363,6 +384,7 @@ def test_call_all(): usage=RequestUsage(input_tokens=52, output_tokens=21), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -381,13 +403,15 @@ def test_call_all(): ToolReturnPart( tool_name='quz', content='a', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"foo":"1","bar":"2","baz":"3","qux":"4","quz":"a"}')], usage=RequestUsage(input_tokens=57, output_tokens=33), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) @@ -451,12 +475,16 @@ async def test_stream_text(): assert await result.get_output() == snapshot('hello world') assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[TextPart(content='hello world')], usage=RequestUsage(input_tokens=50, output_tokens=2), model_name='function::stream_text_function', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) diff --git a/tests/models/test_model_test.py b/tests/models/test_model_test.py index f6b4af74b1..f7a6809a71 100644 --- a/tests/models/test_model_test.py +++ b/tests/models/test_model_test.py @@ -77,7 +77,8 @@ def test_custom_output_args(): content='x', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -90,6 +91,7 @@ def test_custom_output_args(): usage=RequestUsage(input_tokens=51, output_tokens=7), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -99,7 +101,8 @@ def test_custom_output_args(): tool_call_id='pyd_ai_tool_call_id__final_result', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -121,7 +124,8 @@ class Foo(BaseModel): content='x', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -134,6 +138,7 @@ class Foo(BaseModel): usage=RequestUsage(input_tokens=51, output_tokens=6), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -143,7 +148,8 @@ class Foo(BaseModel): tool_call_id='pyd_ai_tool_call_id__final_result', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -161,7 +167,8 @@ def test_output_type(): content='x', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -174,6 +181,7 @@ def test_output_type(): usage=RequestUsage(input_tokens=51, output_tokens=7), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -183,7 +191,8 @@ def test_output_type(): tool_call_id='pyd_ai_tool_call_id__final_result', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -207,12 +216,16 @@ async def my_ret(x: int) -> str: assert result.output == snapshot('{"my_ret":"1"}') assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ToolCallPart(tool_name='my_ret', args={'x': 0}, tool_call_id=IsStr())], usage=RequestUsage(input_tokens=51, output_tokens=4), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -222,26 +235,30 @@ async def my_ret(x: int) -> str: timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='my_ret', args={'x': 0}, tool_call_id=IsStr())], usage=RequestUsage(input_tokens=61, output_tokens=8), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ ToolReturnPart( tool_name='my_ret', content='1', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"my_ret":"1"}')], usage=RequestUsage(input_tokens=62, output_tokens=12), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) diff --git a/tests/models/test_openai.py b/tests/models/test_openai.py index f4d0496966..0181437cff 100644 --- a/tests/models/test_openai.py +++ b/tests/models/test_openai.py @@ -121,7 +121,10 @@ async def test_request_simple_success(allow_model_requests: None): assert result.usage() == snapshot(RunUsage(requests=1)) assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[TextPart(content='world')], model_name='gpt-4o-123', @@ -130,8 +133,12 @@ async def test_request_simple_success(allow_model_requests: None): provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), + ), + ModelRequest( + parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), ), - ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]), ModelResponse( parts=[TextPart(content='world')], model_name='gpt-4o-123', @@ -140,6 +147,7 @@ async def test_request_simple_success(allow_model_requests: None): provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -226,7 +234,10 @@ async def test_request_structured_response(allow_model_requests: None): assert result.output == [1, 2, 123] assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ ToolCallPart( @@ -241,6 +252,7 @@ async def test_request_structured_response(allow_model_requests: None): provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -250,7 +262,8 @@ async def test_request_structured_response(allow_model_requests: None): tool_call_id='123', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -317,7 +330,8 @@ async def get_location(loc_name: str) -> str: parts=[ SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -338,6 +352,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -347,7 +362,8 @@ async def get_location(loc_name: str) -> str: tool_call_id='1', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -368,6 +384,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -377,7 +394,8 @@ async def get_location(loc_name: str) -> str: tool_call_id='2', timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final response')], @@ -387,6 +405,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'stop'}, provider_response_id='123', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -870,7 +889,8 @@ async def get_image() -> ImageUrl: content=['What food is in the image you can get from the get_image tool?'], timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='get_image', args='{}', tool_call_id='call_4hrT4QP9jfojtK69vGiFCFjG')], @@ -890,6 +910,7 @@ async def get_image() -> ImageUrl: provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-BRmTHlrARTzAHK1na9s80xDlQGYPX', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -909,7 +930,8 @@ async def get_image() -> ImageUrl: ], timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='The image shows a potato.')], @@ -929,6 +951,7 @@ async def get_image() -> ImageUrl: provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-BRmTI0Y2zmkGw27kLarhsmiFQTGxR', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -953,7 +976,8 @@ async def get_image() -> BinaryContent: content=['What fruit is in the image you can get from the get_image tool?'], timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='get_image', args='{}', tool_call_id='call_Btn0GIzGr4ugNlLmkQghQUMY')], @@ -973,6 +997,7 @@ async def get_image() -> BinaryContent: provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-BRlkLhPc87BdohVobEJJCGq3rUAG2', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -989,7 +1014,8 @@ async def get_image() -> BinaryContent: ], timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='The image shows a kiwi fruit.')], @@ -1009,6 +1035,7 @@ async def get_image() -> BinaryContent: provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-BRlkORPA5rXMV3uzcOcgK4eQFKCVW', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1935,6 +1962,7 @@ async def test_openai_instructions(allow_model_requests: None, openai_api_key: s ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], instructions='You are a helpful assistant.', + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='The capital of France is Paris.')], @@ -1954,6 +1982,7 @@ async def test_openai_instructions(allow_model_requests: None, openai_api_key: s provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-BJjf61mLb9z5H45ClJzbx0UWKwjo1', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1982,6 +2011,7 @@ async def get_temperature(city: str) -> float: ModelRequest( parts=[UserPromptPart(content='What is the temperature in Tokyo?', timestamp=IsDatetime())], instructions='You are a helpful assistant.', + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='get_temperature', args='{"city":"Tokyo"}', tool_call_id=IsStr())], @@ -2001,6 +2031,7 @@ async def get_temperature(city: str) -> float: provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-BMxEwRA0p0gJ52oKS7806KAlfMhqq', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2009,6 +2040,7 @@ async def get_temperature(city: str) -> float: ) ], instructions='You are a helpful assistant.', + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='The temperature in Tokyo is currently 20.0 degrees Celsius.')], @@ -2028,6 +2060,7 @@ async def get_temperature(city: str) -> float: provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-BMxEx6B8JEj6oDC45MOWKp0phg8UP', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2042,7 +2075,10 @@ async def test_openai_model_thinking_part(allow_model_requests: None, openai_api result = await agent.run('How do I cross the street?') assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + run_id=IsStr(), + ), ModelResponse( parts=[ ThinkingPart( @@ -2061,6 +2097,7 @@ async def test_openai_model_thinking_part(allow_model_requests: None, openai_api provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68c1fa0523248197888681b898567bde093f57e27128848a', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2078,7 +2115,8 @@ async def test_openai_model_thinking_part(allow_model_requests: None, openai_api content='Considering the way to cross the street, analogously, how do I cross the river?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content=IsStr())], @@ -2098,6 +2136,7 @@ async def test_openai_model_thinking_part(allow_model_requests: None, openai_api provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-CENUmtwDD0HdvTUYL6lUeijDtxrZL', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2352,7 +2391,8 @@ async def get_user_country() -> str: content='What is the largest city in the user country?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='get_user_country', args='{}', tool_call_id=IsStr())], @@ -2372,6 +2412,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-BSXk0dWkG4hfPt0lph4oFO35iT73I', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2381,7 +2422,8 @@ async def get_user_country() -> str: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2407,6 +2449,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-BSXk1xGHYzbhXgUkSutK08bdoNv5s', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2416,7 +2459,8 @@ async def get_user_country() -> str: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -2445,7 +2489,8 @@ async def get_user_country() -> str: content='What is the largest city in the user country?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2467,6 +2512,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-BgeDFS85bfHosRFEEAvq8reaCPCZ8', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2476,7 +2522,8 @@ async def get_user_country() -> str: tool_call_id='call_J1YabdC7G7kzEZNbbZopwenH', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='The largest city in Mexico is Mexico City.')], @@ -2496,6 +2543,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-BgeDGX9eDyVrEI56aP2vtIHahBzFH', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2527,7 +2575,8 @@ async def get_user_country() -> str: content='What is the largest city in the user country?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2549,6 +2598,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-BSXjyBwGuZrtuuSzNCeaWMpGv2MZ3', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2558,7 +2608,8 @@ async def get_user_country() -> str: tool_call_id='call_PkRGedQNRFUzJp2R7dO7avWR', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"city":"Mexico City","country":"Mexico"}')], @@ -2578,6 +2629,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-BSXjzYGu67dhTy5r8KmjJvQ4HhDVO', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2611,7 +2663,8 @@ async def get_user_country() -> str: content='What is the largest city in the user country?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2633,6 +2686,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-Bgg5utuCSXMQ38j0n2qgfdQKcR9VD', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2642,7 +2696,8 @@ async def get_user_country() -> str: tool_call_id='call_SIttSeiOistt33Htj4oiHOOX', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2666,6 +2721,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-Bgg5vrxUtCDlvgMreoxYxPaKxANmd', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2695,7 +2751,8 @@ async def get_user_country() -> str: content='What is the largest city in the user country?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2717,6 +2774,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-Bgh27PeOaFW6qmF04qC5uI2H9mviw', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2726,7 +2784,8 @@ async def get_user_country() -> str: tool_call_id='call_s7oT9jaLAsEqTgvxZTmFh0wB', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"city":"Mexico City","country":"Mexico"}')], @@ -2746,6 +2805,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-Bgh28advCSFhGHPnzUevVS6g6Uwg0', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2779,7 +2839,8 @@ async def get_user_country() -> str: content='What is the largest city in the user country?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2801,6 +2862,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-Bgh2AW2NXGgMc7iS639MJXNRgtatR', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2810,7 +2872,8 @@ async def get_user_country() -> str: tool_call_id='call_wJD14IyJ4KKVtjCrGyNCHO09', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2834,6 +2897,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-Bgh2BthuopRnSqCuUgMbBnOqgkDHC', finish_reason='stop', + run_id=IsStr(), ), ] ) diff --git a/tests/models/test_openai_responses.py b/tests/models/test_openai_responses.py index 8215515276..96133a4f28 100644 --- a/tests/models/test_openai_responses.py +++ b/tests/models/test_openai_responses.py @@ -247,7 +247,8 @@ async def get_location(loc_name: str) -> str: content='What is the location of Londos and London?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -271,6 +272,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'completed'}, provider_response_id='resp_67e547c48c9481918c5c4394464ce0c60ae6111e84dd5c08', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -286,7 +288,8 @@ async def get_location(loc_name: str) -> str: tool_call_id=IsStr(), timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -306,6 +309,7 @@ async def get_location(loc_name: str) -> str: provider_details={'finish_reason': 'completed'}, provider_response_id='resp_67e547c5a2f08191802a1f43620f348503a2086afed73b47', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -331,7 +335,8 @@ async def get_image() -> BinaryContent: content=['What fruit is in the image you can get from the get_image tool?'], timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -349,6 +354,7 @@ async def get_image() -> BinaryContent: provider_details={'finish_reason': 'completed'}, provider_response_id='resp_681134d3aa3481919ca581a267db1e510fe7a5a4e2123dc3', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -365,7 +371,8 @@ async def get_image() -> BinaryContent: ], timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -381,6 +388,7 @@ async def get_image() -> BinaryContent: provider_details={'finish_reason': 'completed'}, provider_response_id='resp_681134d53c48819198ce7b89db78dffd02cbfeaababb040c', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -512,7 +520,8 @@ async def test_openai_responses_model_builtin_tools_web_search(allow_model_reque content='Give me the top 3 news in the world today', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -659,6 +668,7 @@ async def test_openai_responses_model_builtin_tools_web_search(allow_model_reque provider_details={'finish_reason': 'completed'}, provider_response_id='resp_0e3d55e9502941380068c4aa9a62f48195a373978ed720ac63', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -675,6 +685,7 @@ async def test_openai_responses_model_instructions(allow_model_requests: None, o ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], instructions='You are a helpful assistant.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -690,6 +701,7 @@ async def test_openai_responses_model_instructions(allow_model_requests: None, o provider_details={'finish_reason': 'completed'}, provider_response_id='resp_67f3fdfd9fa08191a3d5825db81b8df6003bc73febb56d77', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -710,6 +722,7 @@ async def test_openai_responses_model_web_search_tool(allow_model_requests: None ) ], instructions='You are a helpful assistant.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -752,6 +765,7 @@ async def test_openai_responses_model_web_search_tool(allow_model_requests: None provider_details={'finish_reason': 'completed'}, provider_response_id='resp_028829e50fbcad090068c9c82e1e0081958ddc581008b39428', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -768,6 +782,7 @@ async def test_openai_responses_model_web_search_tool(allow_model_requests: None ) ], instructions='You are a helpful assistant.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -810,6 +825,7 @@ async def test_openai_responses_model_web_search_tool(allow_model_requests: None provider_details={'finish_reason': 'completed'}, provider_response_id='resp_028829e50fbcad090068c9c83b9fb88195b6b84a32e1fc83c0', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -836,6 +852,7 @@ async def test_openai_responses_model_web_search_tool_with_user_location( ) ], instructions='You are a helpful assistant.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -878,6 +895,7 @@ async def test_openai_responses_model_web_search_tool_with_user_location( provider_details={'finish_reason': 'completed'}, provider_response_id='resp_0b385a0fdc82fd920068c4aaf3ced88197a88711e356b032c4', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -905,6 +923,7 @@ async def test_openai_responses_model_web_search_tool_with_invalid_region( ) ], instructions='You are a helpful assistant.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -947,6 +966,7 @@ async def test_openai_responses_model_web_search_tool_with_invalid_region( provider_details={'finish_reason': 'completed'}, provider_response_id='resp_0b4f29854724a3120068c4ab0b660081919707b95b47552782', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -981,6 +1001,7 @@ async def test_openai_responses_model_web_search_tool_stream(allow_model_request ) ], instructions='You are a helpful assistant.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1029,6 +1050,7 @@ async def test_openai_responses_model_web_search_tool_stream(allow_model_request provider_details={'finish_reason': 'completed'}, provider_response_id='resp_00a60507bf41223d0068c9d2fbf93481a0ba2a7796ae2cab4c', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1198,6 +1220,7 @@ async def test_openai_responses_model_web_search_tool_stream(allow_model_request ) ], instructions='You are a helpful assistant.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1246,6 +1269,7 @@ async def test_openai_responses_model_web_search_tool_stream(allow_model_request provider_details={'finish_reason': 'completed'}, provider_response_id='resp_00a60507bf41223d0068c9d31574d881a090c232646860a771', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1326,7 +1350,8 @@ async def get_user_country() -> str: content='What is the largest city in the user country?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1344,6 +1369,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68477f0b40a8819cb8d55594bc2c232a001fd29e2d5573f7', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1353,7 +1379,8 @@ async def get_user_country() -> str: tool_call_id='call_ZWkVhdUjupo528U9dqgFeRkH', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1371,6 +1398,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68477f0bfda8819ea65458cd7cc389b801dc81d4bc91f560', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1380,7 +1408,8 @@ async def get_user_country() -> str: tool_call_id='call_iFBd0zULhSZRR908DfH73VwN', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -1410,7 +1439,8 @@ async def get_user_country() -> str: content='What is the largest city in the user country?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1428,6 +1458,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68477f0d9494819ea4f123bba707c9ee0356a60c98816d6a', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1437,7 +1468,8 @@ async def get_user_country() -> str: tool_call_id='call_aTJhYjzmixZaVGqwl5gn2Ncr', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1453,6 +1485,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68477f0e2b28819d9c828ef4ee526d6a03434b607c02582d', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1485,7 +1518,8 @@ async def get_user_country() -> str: content='What is the largest city in the user country?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1503,6 +1537,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68477f0f220081a1a621d6bcdc7f31a50b8591d9001d2329', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1512,7 +1547,8 @@ async def get_user_country() -> str: tool_call_id='call_tTAThu8l2S9hNky2krdwijGP', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1528,6 +1564,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68477f0fde708192989000a62809c6e5020197534e39cc1f', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1562,7 +1599,8 @@ async def get_user_country() -> str: content='What is the largest city in the user country?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1580,6 +1618,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68477f10f2d081a39b3438f413b3bafc0dd57d732903c563', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1589,7 +1628,8 @@ async def get_user_country() -> str: tool_call_id='call_UaLahjOtaM2tTyYZLxTCbOaP', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1605,6 +1645,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68477f119830819da162aa6e10552035061ad97e2eef7871', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1635,7 +1676,8 @@ async def get_user_country() -> str: content='What is the largest city in the user country?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1653,6 +1695,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68482f12d63881a1830201ed101ecfbf02f8ef7f2fb42b50', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1662,7 +1705,8 @@ async def get_user_country() -> str: tool_call_id='call_FrlL4M0CbAy8Dhv4VqF1Shom', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1678,6 +1722,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68482f1b556081918d64c9088a470bf0044fdb7d019d4115', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1712,7 +1757,8 @@ async def get_user_country() -> str: content='What is the largest city in the user country?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1730,6 +1776,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68482f1d38e081a1ac828acda978aa6b08e79646fe74d5ee', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1739,7 +1786,8 @@ async def get_user_country() -> str: tool_call_id='call_my4OyoVXRT0m7bLWmsxcaCQI', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1755,6 +1803,7 @@ async def get_user_country() -> str: provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68482f28c1b081a1ae73cbbee012ee4906b4ab2d00d03024', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1953,7 +2002,8 @@ async def test_openai_responses_usage_without_tokens_details(allow_model_request content='What is 2+2?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='4', id='123')], @@ -1962,6 +2012,7 @@ async def test_openai_responses_usage_without_tokens_details(allow_model_request timestamp=IsDatetime(), provider_name='openai', provider_response_id='123', + run_id=IsStr(), ), ] ) @@ -1979,7 +2030,10 @@ async def test_openai_responses_model_thinking_part(allow_model_requests: None, result = await agent.run('How do I cross the street?') assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + run_id=IsStr(), + ), ModelResponse( parts=[ ThinkingPart( @@ -2005,6 +2059,7 @@ async def test_openai_responses_model_thinking_part(allow_model_requests: None, provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68c42c902794819cb9335264c342f65407460311b0c8d3de', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2021,7 +2076,8 @@ async def test_openai_responses_model_thinking_part(allow_model_requests: None, content='Considering the way to cross the street, analogously, how do I cross the river?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2047,6 +2103,7 @@ async def test_openai_responses_model_thinking_part(allow_model_requests: None, provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68c42cb3d520819c9d28b07036e9059507460311b0c8d3de', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2071,7 +2128,8 @@ async def test_openai_responses_thinking_part_from_other_model( content='How do I cross the street?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2098,6 +2156,7 @@ async def test_openai_responses_thinking_part_from_other_model( provider_details={'finish_reason': 'end_turn'}, provider_response_id='msg_0114iHK2ditgTf1N8FWomc4E', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2119,7 +2178,8 @@ async def test_openai_responses_thinking_part_from_other_model( content='Considering the way to cross the street, analogously, how do I cross the river?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2143,6 +2203,7 @@ async def test_openai_responses_thinking_part_from_other_model( provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68c42ce277ac8193ba08881bcefabaf70ad492c7955fc6fc', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2170,7 +2231,8 @@ async def test_openai_responses_thinking_part_iter(allow_model_requests: None, o content='How do I cross the street?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2204,6 +2266,7 @@ async def test_openai_responses_thinking_part_iter(allow_model_requests: None, o provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68c42d0fb418819dbfa579f69406b49508fbf9b1584184ff', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2247,6 +2310,7 @@ def update_plan(plan: str) -> str: ) ], instructions="You are a helpful assistant that uses planning. You MUST use the update_plan tool and continually update it as you make progress against the user's prompt", + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2274,6 +2338,7 @@ def update_plan(plan: str) -> str: provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68c42d28772c819684459966ee2201ed0e8bc41441c948f6', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2285,6 +2350,7 @@ def update_plan(plan: str) -> str: ) ], instructions="You are a helpful assistant that uses planning. You MUST use the update_plan tool and continually update it as you make progress against the user's prompt", + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content=IsStr(), id='msg_68c42d408eec8196ae1c5883e07c093e0e8bc41441c948f6')], @@ -2297,6 +2363,7 @@ def update_plan(plan: str) -> str: provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68c42d3fd6a08196bce23d6be960ff8a0e8bc41441c948f6', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2333,7 +2400,8 @@ async def test_openai_responses_thinking_without_summary(allow_model_requests: N content='What is 2+2?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2344,6 +2412,7 @@ async def test_openai_responses_thinking_without_summary(allow_model_requests: N timestamp=IsDatetime(), provider_name='openai', provider_response_id='123', + run_id=IsStr(), ), ] ) @@ -2404,7 +2473,8 @@ async def test_openai_responses_thinking_with_multiple_summaries(allow_model_req content='What is 2+2?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2418,6 +2488,7 @@ async def test_openai_responses_thinking_with_multiple_summaries(allow_model_req timestamp=IsDatetime(), provider_name='openai', provider_response_id='123', + run_id=IsStr(), ), ] ) @@ -2467,7 +2538,8 @@ async def test_openai_responses_thinking_with_modified_history(allow_model_reque content='What is the meaning of life?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2486,6 +2558,7 @@ async def test_openai_responses_thinking_with_modified_history(allow_model_reque provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68c42ddf9bbc8194aa7b97304dd909cb0202c9ad459e0d23', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2518,7 +2591,8 @@ async def test_openai_responses_thinking_with_modified_history(allow_model_reque content='Anything to add?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2537,6 +2611,7 @@ async def test_openai_responses_thinking_with_modified_history(allow_model_reque provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68c42de4afcc819f995a1c59fe87c9d5051f82c608a83beb', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2564,7 +2639,8 @@ async def test_openai_responses_thinking_with_code_execution_tool(allow_model_re content='what is 65465-6544 * 65464-6+1.02255', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2619,6 +2695,7 @@ async def test_openai_responses_thinking_with_code_execution_tool(allow_model_re provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68cdba511c7081a389e67b16621029c609b7445677780c8f', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2633,7 +2710,8 @@ async def test_openai_responses_thinking_with_code_execution_tool(allow_model_re content='how about 2 to the power of 8?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2652,6 +2730,7 @@ async def test_openai_responses_thinking_with_code_execution_tool(allow_model_re provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68cdba6a610481a3b4533f345bea8a7b09b7445677780c8f', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2685,7 +2764,8 @@ async def test_openai_responses_thinking_with_code_execution_tool_stream( content="what's 123456 to the power of 123?", timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2748,6 +2828,7 @@ async def test_openai_responses_thinking_with_code_execution_tool_stream( provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68c35098e6fc819e80fb94b25b7d031b0f2d670b80edc507', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -3503,7 +3584,8 @@ def get_meaning_of_life() -> int: content='What is the meaning of life?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -3521,6 +3603,7 @@ def get_meaning_of_life() -> int: provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68cc4fa5603481958e2143685133fe530548824120ffcf74', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -3530,7 +3613,8 @@ def get_meaning_of_life() -> int: tool_call_id='call_3WCunBU7lCG1HHaLmnnRJn8I', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -3550,6 +3634,7 @@ def get_meaning_of_life() -> int: provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68cc4fa6a8a881a187b0fe1603057bff0307c6d4d2ee5985', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -3595,6 +3680,7 @@ async def test_openai_responses_code_execution_return_image(allow_model_requests BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='653a61', identifier='653a61', ) ) @@ -3607,7 +3693,8 @@ async def test_openai_responses_code_execution_return_image(allow_model_requests content='Create a chart of y=x^2 for x=-5 to 5', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -3653,6 +3740,7 @@ async def test_openai_responses_code_execution_return_image(allow_model_requests content=BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='653a61', identifier='653a61', ), id='ci_68cdc39029a481909399d54b0a3637a10187028ba77f15f7', @@ -3678,6 +3766,7 @@ async def test_openai_responses_code_execution_return_image(allow_model_requests provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68cdc382bc98819083a5b47ec92e077b0187028ba77f15f7', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -3687,6 +3776,7 @@ async def test_openai_responses_code_execution_return_image(allow_model_requests BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='81863d', identifier='81863d', ) ) @@ -3698,7 +3788,8 @@ async def test_openai_responses_code_execution_return_image(allow_model_requests content='Style it more futuristically.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -3793,6 +3884,7 @@ async def test_openai_responses_code_execution_return_image(allow_model_requests content=BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='81863d', identifier='81863d', ), id='ci_68cdc3be6f3481908f64d8f0a71dc6bb0187028ba77f15f7', @@ -3833,6 +3925,7 @@ async def test_openai_responses_code_execution_return_image(allow_model_requests provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68cdc39da72481909e0512fef9d646240187028ba77f15f7', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -3860,6 +3953,7 @@ async def test_openai_responses_code_execution_return_image_stream(allow_model_r BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='df0d78', identifier='df0d78', ) ) @@ -3871,7 +3965,8 @@ async def test_openai_responses_code_execution_return_image_stream(allow_model_r content='Create a chart of y=x^2 for x=-5 to 5', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -3891,6 +3986,7 @@ async def test_openai_responses_code_execution_return_image_stream(allow_model_r content=BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='df0d78', identifier='df0d78', ), id='ci_06c1a26fd89d07f20068dd937636948197b6c45865da36d8f7', @@ -3914,6 +4010,7 @@ async def test_openai_responses_code_execution_return_image_stream(allow_model_r provider_details={'finish_reason': 'completed'}, provider_response_id='resp_06c1a26fd89d07f20068dd9367869c819788cb28e6f19eff9b', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -5245,10 +5342,7 @@ async def test_openai_responses_code_execution_return_image_stream(allow_model_r PartStartEvent( index=2, part=FilePart( - content=BinaryImage( - data=IsBytes(), - media_type='image/png', - ), + content=BinaryImage(data=IsBytes(), media_type='image/png', _identifier='df0d78'), id='ci_06c1a26fd89d07f20068dd937636948197b6c45865da36d8f7', ), previous_part_kind='builtin-tool-call', @@ -5359,7 +5453,8 @@ async def test_openai_responses_image_generation(allow_model_requests: None, ope content='Generate an image of an axolotl.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -5409,6 +5504,7 @@ async def test_openai_responses_image_generation(allow_model_requests: None, ope provider_details={'finish_reason': 'completed'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -5429,7 +5525,8 @@ async def test_openai_responses_image_generation(allow_model_requests: None, ope content='Now give it a sombrero.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -5479,6 +5576,7 @@ async def test_openai_responses_image_generation(allow_model_requests: None, ope provider_details={'finish_reason': 'completed'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -5521,7 +5619,8 @@ async def test_openai_responses_image_generation_stream(allow_model_requests: No content='Generate an image of an axolotl.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -5569,6 +5668,7 @@ async def test_openai_responses_image_generation_stream(allow_model_requests: No provider_details={'finish_reason': 'completed'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -5699,7 +5799,8 @@ async def test_openai_responses_image_generation_tool_without_image_output( content='Generate an image of an axolotl.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -5746,6 +5847,7 @@ async def test_openai_responses_image_generation_tool_without_image_output( provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68cdec1f3290819f99d9caba8703b251079003437d26d0c0', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -5754,7 +5856,8 @@ async def test_openai_responses_image_generation_tool_without_image_output( tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -5801,6 +5904,7 @@ async def test_openai_responses_image_generation_tool_without_image_output( provider_details={'finish_reason': 'completed'}, provider_response_id='resp_68cdec61d0a0819fac14ed057a9946a1079003437d26d0c0', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -5858,7 +5962,8 @@ class Animal(BaseModel): content='Generate an image of an axolotl.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -5903,6 +6008,7 @@ class Animal(BaseModel): provider_details={'finish_reason': 'completed'}, provider_response_id='resp_0360827931d9421b0068dd8328c08c81a0ba854f245883906f', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -5911,7 +6017,8 @@ class Animal(BaseModel): tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -5935,6 +6042,7 @@ class Animal(BaseModel): provider_details={'finish_reason': 'completed'}, provider_response_id='resp_0360827931d9421b0068dd8370a70081a09d6de822ee43bbc4', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -5944,7 +6052,8 @@ class Animal(BaseModel): tool_call_id='call_eE7MHM5WMJnMt5srV69NmBJk', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -5968,7 +6077,8 @@ class Animal(BaseModel): content='Generate an image of an axolotl.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -6016,6 +6126,7 @@ class Animal(BaseModel): provider_details={'finish_reason': 'completed'}, provider_response_id='resp_09b7ce6df817433c0068dd8407c37881a0ad817ef3cc3a3600', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -6039,7 +6150,8 @@ class Animal(BaseModel): content='Generate an image of an axolotl.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -6087,6 +6199,7 @@ class Animal(BaseModel): provider_details={'finish_reason': 'completed'}, provider_response_id='resp_0d14a5e3c26c21180068dd871d439081908dc36e63fab0cedf', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -6116,7 +6229,8 @@ async def get_animal() -> str: content='Generate an image of the animal returned by the get_animal tool.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -6140,6 +6254,7 @@ async def get_animal() -> str: provider_details={'finish_reason': 'completed'}, provider_response_id='resp_0481074da98340df0068dd88dceb1481918b1d167d99bc51cd', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -6149,7 +6264,8 @@ async def get_animal() -> str: tool_call_id='call_t76xO1K2zqrJkawkU3tur8vj', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -6188,6 +6304,7 @@ async def get_animal() -> str: provider_details={'finish_reason': 'completed'}, provider_response_id='resp_0481074da98340df0068dd88f0ba04819185a168065ef28040', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -6214,7 +6331,8 @@ async def test_openai_responses_multiple_images(allow_model_requests: None, open content='Generate two separate images of axolotls.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -6289,6 +6407,7 @@ async def test_openai_responses_multiple_images(allow_model_requests: None, open provider_details={'finish_reason': 'completed'}, provider_response_id='resp_0b6169df6e16e9690068dd80d64aec81919c65f238307673bb', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -6315,7 +6434,8 @@ async def test_openai_responses_image_generation_jpeg(allow_model_requests: None content='Generate an image of axolotl.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -6360,6 +6480,7 @@ async def test_openai_responses_image_generation_jpeg(allow_model_requests: None provider_details={'finish_reason': 'completed'}, provider_response_id='resp_08acbdf1ae54befc0068dd9ced226c8197a2e974b29c565407', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -6416,7 +6537,8 @@ class CityLocation(BaseModel): content='What is the largest city in the user country?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -6440,6 +6562,7 @@ class CityLocation(BaseModel): provider_details={'finish_reason': 'completed'}, provider_response_id='resp_001fd29e2d5573f70068ece2e6dfbc819c96557f0de72802be', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -6449,7 +6572,8 @@ class CityLocation(BaseModel): tool_call_id='call_LIXPi261Xx3dGYzlDsOoyHGk', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -6485,6 +6609,7 @@ async def test_openai_responses_model_mcp_server_tool(allow_model_requests: None ) ], instructions='You are a helpful assistant.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -6611,6 +6736,7 @@ async def test_openai_responses_model_mcp_server_tool(allow_model_requests: None provider_details={'finish_reason': 'completed'}, provider_response_id='resp_0083938b3a28070e0068fabd81970881a0a1195f2cab45bd04', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -6627,6 +6753,7 @@ async def test_openai_responses_model_mcp_server_tool(allow_model_requests: None ) ], instructions='You are a helpful assistant.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -6656,6 +6783,7 @@ async def test_openai_responses_model_mcp_server_tool(allow_model_requests: None provider_details={'finish_reason': 'completed'}, provider_response_id='resp_0083938b3a28070e0068fabd9d414881a089cf24784f80e021', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -6702,6 +6830,7 @@ async def test_openai_responses_model_mcp_server_tool_stream(allow_model_request ) ], instructions='You are a helpful assistant.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -6874,6 +7003,7 @@ async def test_openai_responses_model_mcp_server_tool_stream(allow_model_request provider_details={'finish_reason': 'completed'}, provider_response_id='resp_00b9cc7a23d047270068faa0e25934819f9c3bfdec80065bc4', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -7090,6 +7220,7 @@ async def test_openai_responses_model_mcp_server_tool_with_connector(allow_model UserPromptPart(content='What do I have on my Google Calendar for today?', timestamp=IsDatetime()) ], instructions='You are a helpful assistant.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -7250,6 +7381,7 @@ async def test_openai_responses_model_mcp_server_tool_with_connector(allow_model provider_details={'finish_reason': 'completed'}, provider_response_id='resp_0558010cf1416a490068faa0f945bc81a0b6a6dfb7391030d5', finish_reason='stop', + run_id=IsStr(), ), ] ) diff --git a/tests/models/test_outlines.py b/tests/models/test_outlines.py index 89024bc12b..73adc28853 100644 --- a/tests/models/test_outlines.py +++ b/tests/models/test_outlines.py @@ -324,11 +324,9 @@ async def test_request_async(llamacpp_model: OutlinesModel) -> None: ) ], instructions='Answer in one word.', + run_id=IsStr(), ), - ModelResponse( - parts=[TextPart(content=IsStr())], - timestamp=IsDatetime(), - ), + ModelResponse(parts=[TextPart(content=IsStr())], timestamp=IsDatetime(), run_id=IsStr()), ] ) result = await agent.run('What is the capital of Germany?', message_history=result.all_messages()) @@ -342,11 +340,9 @@ async def test_request_async(llamacpp_model: OutlinesModel) -> None: ) ], instructions='Answer in one word.', + run_id=IsStr(), ), - ModelResponse( - parts=[TextPart(content=IsStr())], - timestamp=IsDatetime(), - ), + ModelResponse(parts=[TextPart(content=IsStr())], timestamp=IsDatetime(), run_id=IsStr()), ModelRequest( parts=[ UserPromptPart( @@ -355,11 +351,9 @@ async def test_request_async(llamacpp_model: OutlinesModel) -> None: ) ], instructions='Answer in one word.', + run_id=IsStr(), ), - ModelResponse( - parts=[TextPart(content=IsStr())], - timestamp=IsDatetime(), - ), + ModelResponse(parts=[TextPart(content=IsStr())], timestamp=IsDatetime(), run_id=IsStr()), ] ) @@ -376,12 +370,10 @@ def test_request_sync(llamacpp_model: OutlinesModel) -> None: content='What is the capital of France?', timestamp=IsDatetime(), ) - ] - ), - ModelResponse( - parts=[TextPart(content=IsStr())], - timestamp=IsDatetime(), + ], + run_id=IsStr(), ), + ModelResponse(parts=[TextPart(content=IsStr())], timestamp=IsDatetime(), run_id=IsStr()), ] ) @@ -408,12 +400,10 @@ async def test_request_async_model(mock_async_model: OutlinesModel) -> None: content='What is the capital of France?', timestamp=IsDatetime(), ) - ] - ), - ModelResponse( - parts=[TextPart(content=IsStr())], - timestamp=IsDatetime(), + ], + run_id=IsStr(), ), + ModelResponse(parts=[TextPart(content=IsStr())], timestamp=IsDatetime(), run_id=IsStr()), ] ) @@ -445,12 +435,10 @@ def test_request_image_binary(transformers_multimodal_model: OutlinesModel, bina ], timestamp=IsDatetime(), ) - ] - ), - ModelResponse( - parts=[TextPart(content=IsStr())], - timestamp=IsDatetime(), + ], + run_id=IsStr(), ), + ModelResponse(parts=[TextPart(content=IsStr())], timestamp=IsDatetime(), run_id=IsStr()), ] ) @@ -478,12 +466,10 @@ def test_request_image_url(transformers_multimodal_model: OutlinesModel) -> None ], timestamp=IsDatetime(), ) - ] - ), - ModelResponse( - parts=[TextPart(content=IsStr())], - timestamp=IsDatetime(), + ], + run_id=IsStr(), ), + ModelResponse(parts=[TextPart(content=IsStr())], timestamp=IsDatetime(), run_id=IsStr()), ] ) @@ -536,12 +522,10 @@ class Box(BaseModel): content='Give me the dimensions of a box', timestamp=IsDatetime(), ) - ] - ), - ModelResponse( - parts=[TextPart(content=IsStr())], - timestamp=IsDatetime(), + ], + run_id=IsStr(), ), + ModelResponse(parts=[TextPart(content=IsStr())], timestamp=IsDatetime(), run_id=IsStr()), ] ) diff --git a/tests/test_a2a.py b/tests/test_a2a.py index 0481120bd3..ab67c00587 100644 --- a/tests/test_a2a.py +++ b/tests/test_a2a.py @@ -570,7 +570,12 @@ def track_messages(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon assert len(messages_received) == 1 first_run_history = messages_received[0] assert first_run_history == snapshot( - [ModelRequest(parts=[UserPromptPart(content='First message', timestamp=IsDatetime())])] + [ + ModelRequest( + parts=[UserPromptPart(content='First message', timestamp=IsDatetime())], + run_id=IsStr(), + ) + ] ) # Second message - reuse the same context_id @@ -605,7 +610,10 @@ def track_messages(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon assert second_run_history == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='First message', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='First message', timestamp=IsDatetime())], + run_id=IsStr(), + ), ModelResponse( parts=[ ToolCallPart( @@ -615,6 +623,7 @@ def track_messages(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon usage=RequestUsage(input_tokens=52, output_tokens=7), model_name='function:track_messages:', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -626,6 +635,7 @@ def track_messages(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon ), UserPromptPart(content='Second message', timestamp=IsDatetime()), ], + run_id=IsStr(), ), ] ) diff --git a/tests/test_agent.py b/tests/test_agent.py index 0a6bf1e325..9dba1db4cc 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -80,6 +80,7 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: agent = Agent(FunctionModel(return_tuple), output_type=tuple[str, str]) result = agent.run_sync('Hello') + assert isinstance(result.run_id, str) assert result.output == ('foo', 'bar') assert result.response == snapshot( ModelResponse( @@ -87,6 +88,7 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: usage=RequestUsage(input_tokens=51, output_tokens=7), model_name='function:return_tuple:', timestamp=IsDatetime(), + run_id=IsStr(), ) ) @@ -174,12 +176,16 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse assert result.output.model_dump() == {'a': 42, 'b': 'foo'} assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ToolCallPart(tool_name='final_result', args='{"a": "wrong", "b": "foo"}', tool_call_id=IsStr())], usage=RequestUsage(input_tokens=51, output_tokens=7), model_name='function:return_model:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -196,13 +202,15 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='final_result', args='{"a": 42, "b": "foo"}', tool_call_id=IsStr())], usage=RequestUsage(input_tokens=89, output_tokens=14), model_name='function:return_model:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -212,7 +220,8 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -300,12 +309,16 @@ def validate_output(ctx: RunContext[None], o: Foo) -> Foo: assert result.output.model_dump() == {'a': 42, 'b': 'foo'} assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ToolCallPart(tool_name='final_result', args='{"a": 41, "b": "foo"}', tool_call_id=IsStr())], usage=RequestUsage(input_tokens=51, output_tokens=7), model_name='function:return_model:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -315,13 +328,15 @@ def validate_output(ctx: RunContext[None], o: Foo) -> Foo: tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='final_result', args='{"a": 42, "b": "foo"}', tool_call_id=IsStr())], usage=RequestUsage(input_tokens=63, output_tokens=14), model_name='function:return_model:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -331,7 +346,8 @@ def validate_output(ctx: RunContext[None], o: Foo) -> Foo: tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -437,12 +453,16 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert call_index == 2 assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[TextPart(content='hello')], usage=RequestUsage(input_tokens=51, output_tokens=1), model_name='function:return_tuple:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -451,7 +471,8 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -460,6 +481,7 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: usage=RequestUsage(input_tokens=68, output_tokens=8), model_name='function:return_tuple:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -469,7 +491,8 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -480,7 +503,8 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: ToolReturnPart( tool_name='final_result', content='foobar', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) - ] + ], + run_id=IsStr(), ) ) assert result.all_messages()[-1] == snapshot( @@ -492,7 +516,8 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ) ) @@ -508,6 +533,7 @@ def test_output_tool_return_content_str_return(): usage=RequestUsage(input_tokens=51, output_tokens=4), model_name='test', timestamp=IsDatetime(), + run_id=IsStr(), ) ) @@ -1016,7 +1042,8 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='New York City', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1029,6 +1056,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: usage=RequestUsage(input_tokens=53, output_tokens=7), model_name='function:call_tool:', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1038,7 +1066,8 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1051,6 +1080,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: usage=RequestUsage(input_tokens=68, output_tokens=13), model_name='function:call_tool:', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1060,7 +1090,8 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -1098,13 +1129,15 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='New York City', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='New York City')], usage=RequestUsage(input_tokens=53, output_tokens=3), model_name='function:call_tool:', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1113,13 +1146,15 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='Mexico City')], usage=RequestUsage(input_tokens=70, output_tokens=5), model_name='function:call_tool:', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -1286,13 +1321,15 @@ def say_world(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='hello', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='world')], usage=RequestUsage(input_tokens=51, output_tokens=1), model_name='function:say_world:', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -1340,7 +1377,8 @@ def call_handoff_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelRes content='Mexico City', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1353,6 +1391,7 @@ def call_handoff_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelRes usage=RequestUsage(input_tokens=52, output_tokens=6), model_name='function:call_handoff_tool:', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1362,7 +1401,8 @@ def call_handoff_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelRes tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -1375,7 +1415,8 @@ def call_handoff_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelRes content='Get me the weather in Mexico City', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1388,6 +1429,7 @@ def call_handoff_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelRes usage=RequestUsage(input_tokens=57, output_tokens=6), model_name='function:call_tool:', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1397,7 +1439,8 @@ def call_handoff_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelRes tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -1736,13 +1779,15 @@ class CityLocation(BaseModel): content='What is the capital of Mexico?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"city":"Mexico City","country":"Mexico"}')], usage=RequestUsage(input_tokens=56, output_tokens=7), model_name='function:return_city_location:', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -1770,13 +1815,15 @@ class Foo(BaseModel): content='What is the capital of Mexico?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"bar":"baz"}')], usage=RequestUsage(input_tokens=56, output_tokens=4), model_name='function:return_foo:', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -1833,7 +1880,8 @@ def return_foo_bar(_: list[ModelMessage], _info: AgentInfo) -> ModelResponse: content='What is foo?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1844,6 +1892,7 @@ def return_foo_bar(_: list[ModelMessage], _info: AgentInfo) -> ModelResponse: usage=RequestUsage(input_tokens=53, output_tokens=17), model_name='function:return_foo_bar:', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -1878,13 +1927,15 @@ class CityLocation(BaseModel): content='What is the capital of Mexico?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"city": "Mexico City"}')], usage=RequestUsage(input_tokens=56, output_tokens=5), model_name='function:return_city_location:', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1900,13 +1951,15 @@ class CityLocation(BaseModel): tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"city": "Mexico City", "country": "Mexico"}')], usage=RequestUsage(input_tokens=87, output_tokens=12), model_name='function:return_city_location:', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -1955,13 +2008,15 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='New York City', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"city": "New York City"}')], usage=RequestUsage(input_tokens=53, output_tokens=6), model_name='function:call_tool:', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1970,13 +2025,15 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"city": "Mexico City"}')], usage=RequestUsage(input_tokens=70, output_tokens=11), model_name='function:call_tool:', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -1998,26 +2055,30 @@ async def ret_a(x: str) -> str: parts=[ SystemPromptPart(content='Foobar', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='ret_a', args={'x': 'a'}, tool_call_id=IsStr())], usage=RequestUsage(input_tokens=52, output_tokens=5), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ ToolReturnPart( tool_name='ret_a', content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"ret_a":"a-apple"}')], usage=RequestUsage(input_tokens=53, output_tokens=9), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) @@ -2030,33 +2091,41 @@ async def ret_a(x: str) -> str: parts=[ SystemPromptPart(content='Foobar', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='ret_a', args={'x': 'a'}, tool_call_id=IsStr())], usage=RequestUsage(input_tokens=52, output_tokens=5), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ ToolReturnPart( tool_name='ret_a', content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"ret_a":"a-apple"}')], usage=RequestUsage(input_tokens=53, output_tokens=9), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), + ), + ModelRequest( + parts=[UserPromptPart(content='Hello again', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), ), - ModelRequest(parts=[UserPromptPart(content='Hello again', timestamp=IsNow(tz=timezone.utc))]), ModelResponse( parts=[TextPart(content='{"ret_a":"a-apple"}')], usage=RequestUsage(input_tokens=55, output_tokens=13), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) @@ -2087,33 +2156,41 @@ async def ret_a(x: str) -> str: parts=[ SystemPromptPart(content='Foobar', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='ret_a', args={'x': 'a'}, tool_call_id=IsStr())], usage=RequestUsage(input_tokens=52, output_tokens=5), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ ToolReturnPart( tool_name='ret_a', content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"ret_a":"a-apple"}')], usage=RequestUsage(input_tokens=53, output_tokens=9), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), + ), + ModelRequest( + parts=[UserPromptPart(content='Hello again', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), ), - ModelRequest(parts=[UserPromptPart(content='Hello again', timestamp=IsNow(tz=timezone.utc))]), ModelResponse( parts=[TextPart(content='{"ret_a":"a-apple"}')], usage=RequestUsage(input_tokens=55, output_tokens=13), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) @@ -2143,20 +2220,23 @@ async def ret_a(x: str) -> str: parts=[ SystemPromptPart(content='Foobar', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='ret_a', args={'x': 'a'}, tool_call_id=IsStr())], usage=RequestUsage(input_tokens=52, output_tokens=5), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ ToolReturnPart( tool_name='ret_a', content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2169,6 +2249,7 @@ async def ret_a(x: str) -> str: usage=RequestUsage(input_tokens=53, output_tokens=9), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2178,7 +2259,8 @@ async def ret_a(x: str) -> str: tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -2191,12 +2273,14 @@ async def ret_a(x: str) -> str: SystemPromptPart(content='Foobar', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='ret_a', args={'x': 'a'}, tool_call_id=IsStr())], usage=RequestUsage(input_tokens=52, output_tokens=5), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2204,12 +2288,14 @@ async def ret_a(x: str) -> str: tool_name='ret_a', content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='final_result', args={'a': 0}, tool_call_id=IsStr())], usage=RequestUsage(input_tokens=53, output_tokens=9), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2220,18 +2306,21 @@ async def ret_a(x: str) -> str: timestamp=IsNow(tz=timezone.utc), ), ], + run_id=IsStr(), ), # second call, notice no repeated system prompt ModelRequest( parts=[ UserPromptPart(content='Hello again', timestamp=IsNow(tz=timezone.utc)), ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='final_result', args={'a': 0}, tool_call_id=IsStr())], usage=RequestUsage(input_tokens=59, output_tokens=13), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2241,7 +2330,8 @@ async def ret_a(x: str) -> str: tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), ), - ] + ], + run_id=IsStr(), ), ] ) @@ -2309,12 +2399,14 @@ async def instructions(ctx: RunContext) -> str: ), ], instructions='New instructions', + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='success (no tool calls)')], usage=RequestUsage(input_tokens=61, output_tokens=4), model_name='test', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -2363,13 +2455,15 @@ def test_tool() -> str: tool_call_id='call_123', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='Final response')], usage=RequestUsage(input_tokens=53, output_tokens=4), model_name='function:simple_response:', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -2450,20 +2544,26 @@ def llm(messages: list[ModelMessage], _info: AgentInfo) -> ModelResponse: content='Hello', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[], usage=RequestUsage(input_tokens=51), model_name='function:llm:', timestamp=IsDatetime(), + run_id=IsStr(), + ), + ModelRequest( + parts=[], + run_id=IsStr(), ), - ModelRequest(parts=[]), ModelResponse( parts=[TextPart(content='ok here is text')], usage=RequestUsage(input_tokens=51, output_tokens=4), model_name='function:llm:', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -2487,25 +2587,42 @@ def llm(messages: list[ModelMessage], _info: AgentInfo) -> ModelResponse: content='Hello', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[], usage=RequestUsage(input_tokens=51), model_name='function:llm:', timestamp=IsDatetime(), + run_id=IsStr(), + ), + ModelRequest( + parts=[], + run_id=IsStr(), ), - ModelRequest(parts=[]), ModelResponse( parts=[], usage=RequestUsage(input_tokens=51), model_name='function:llm:', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) +def test_agent_message_history_includes_run_id() -> None: + agent = Agent(TestModel(custom_output_text='testing run_id')) + + result = agent.run_sync('Hello') + history = result.all_messages() + + run_ids = [message.run_id for message in history] + assert run_ids == snapshot([IsStr(), IsStr()]) + assert len({*run_ids}) == snapshot(1) + + def test_unknown_tool(): def empty(_: list[ModelMessage], _info: AgentInfo) -> ModelResponse: return ModelResponse(parts=[ToolCallPart('foobar', '{}')]) @@ -2517,12 +2634,16 @@ def empty(_: list[ModelMessage], _info: AgentInfo) -> ModelResponse: agent.run_sync('Hello') assert messages == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ToolCallPart(tool_name='foobar', args='{}', tool_call_id=IsStr())], usage=RequestUsage(input_tokens=51, output_tokens=2), model_name='function:empty:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2532,13 +2653,15 @@ def empty(_: list[ModelMessage], _info: AgentInfo) -> ModelResponse: tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='foobar', args='{}', tool_call_id=IsStr())], usage=RequestUsage(input_tokens=65, output_tokens=4), model_name='function:empty:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) @@ -2557,12 +2680,16 @@ def empty(m: list[ModelMessage], _info: AgentInfo) -> ModelResponse: assert result.output == 'success' assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ToolCallPart(tool_name='foobar', args='{}', tool_call_id=IsStr())], usage=RequestUsage(input_tokens=51, output_tokens=2), model_name='function:empty:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2572,13 +2699,15 @@ def empty(m: list[ModelMessage], _info: AgentInfo) -> ModelResponse: tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='success')], usage=RequestUsage(input_tokens=65, output_tokens=3), model_name='function:empty:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) @@ -2906,7 +3035,8 @@ def deferred_tool(x: int) -> int: # pragma: no cover assert result.all_messages() == snapshot( [ ModelRequest( - parts=[UserPromptPart(content='test exhaustive strategy', timestamp=IsNow(tz=timezone.utc))] + parts=[UserPromptPart(content='test exhaustive strategy', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -2924,6 +3054,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover usage=RequestUsage(input_tokens=53, output_tokens=27), model_name='function:return_model:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2960,7 +3091,8 @@ def deferred_tool(x: int) -> int: # pragma: no cover tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), ), - ] + ], + run_id=IsStr(), ), ] ) @@ -3015,7 +3147,8 @@ def deferred_tool(x: int) -> int: # pragma: no cover UserPromptPart( content='test early strategy with final result in middle', timestamp=IsNow(tz=timezone.utc) ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -3032,6 +3165,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover usage=RequestUsage(input_tokens=58, output_tokens=22), model_name='function:return_model:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -3065,7 +3199,8 @@ def deferred_tool(x: int) -> int: # pragma: no cover tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), ), - ] + ], + run_id=IsStr(), ), ] ) @@ -3181,7 +3316,10 @@ async def get_location(loc_name: str) -> str: assert result.output == 'final response' assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ TextPart(content='foo'), @@ -3190,6 +3328,7 @@ async def get_location(loc_name: str) -> str: usage=RequestUsage(input_tokens=51, output_tokens=6), model_name='function:return_model:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -3199,13 +3338,15 @@ async def get_location(loc_name: str) -> str: tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final response')], usage=RequestUsage(input_tokens=56, output_tokens=8), model_name='function:return_model:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) @@ -3224,12 +3365,16 @@ def test_nested_capture_run_messages() -> None: assert messages1 == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[TextPart(content='success (no tool calls)')], usage=RequestUsage(input_tokens=51, output_tokens=4), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) @@ -3247,12 +3392,16 @@ def test_double_capture_run_messages() -> None: assert result2.output == 'success (no tool calls)' assert messages == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[TextPart(content='success (no tool calls)')], usage=RequestUsage(input_tokens=51, output_tokens=4), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) @@ -3296,6 +3445,7 @@ async def func() -> str: ), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc), part_kind='user-prompt'), ], + run_id=IsStr(), kind='request', ), ModelResponse( @@ -3303,6 +3453,7 @@ async def func() -> str: usage=RequestUsage(input_tokens=53, output_tokens=4), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), kind='response', ), ] @@ -3324,6 +3475,7 @@ async def func() -> str: ), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc), part_kind='user-prompt'), ], + run_id=IsStr(), kind='request', ), ModelResponse( @@ -3331,10 +3483,12 @@ async def func() -> str: usage=RequestUsage(input_tokens=53, output_tokens=4), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), kind='response', ), ModelRequest( parts=[UserPromptPart(content='World', timestamp=IsNow(tz=timezone.utc), part_kind='user-prompt')], + run_id=IsStr(), kind='request', ), ModelResponse( @@ -3342,6 +3496,7 @@ async def func() -> str: usage=RequestUsage(input_tokens=54, output_tokens=8), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), kind='response', ), ] @@ -3379,6 +3534,7 @@ async def func(): ), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc), part_kind='user-prompt'), ], + run_id=IsStr(), kind='request', ), ModelResponse( @@ -3386,6 +3542,7 @@ async def func(): usage=RequestUsage(input_tokens=53, output_tokens=4), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), kind='response', ), ] @@ -3408,6 +3565,7 @@ async def func(): ), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc), part_kind='user-prompt'), ], + run_id=IsStr(), kind='request', ), ModelResponse( @@ -3415,10 +3573,12 @@ async def func(): usage=RequestUsage(input_tokens=53, output_tokens=4), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), kind='response', ), ModelRequest( parts=[UserPromptPart(content='World', timestamp=IsNow(tz=timezone.utc), part_kind='user-prompt')], + run_id=IsStr(), kind='request', ), ModelResponse( @@ -3426,6 +3586,7 @@ async def func(): usage=RequestUsage(input_tokens=54, output_tokens=8), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), kind='response', ), ] @@ -3470,12 +3631,16 @@ async def foobar(x: str) -> str: assert result.output == snapshot('{"foobar":"inner agent result"}') assert messages == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='foobar', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='foobar', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ToolCallPart(tool_name='foobar', args={'x': 'a'}, tool_call_id=IsStr())], usage=RequestUsage(input_tokens=51, output_tokens=5), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -3485,13 +3650,15 @@ async def foobar(x: str) -> str: tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"foobar":"inner agent result"}')], usage=RequestUsage(input_tokens=54, output_tokens=11), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) @@ -3562,6 +3729,7 @@ def test_binary_content_serializable(): ], 'instructions': None, 'kind': 'request', + 'run_id': IsStr(), }, { 'parts': [{'content': 'success (no tool calls)', 'id': None, 'part_kind': 'text'}], @@ -3582,6 +3750,7 @@ def test_binary_content_serializable(): 'timestamp': IsStr(), 'kind': 'response', 'finish_reason': None, + 'run_id': IsStr(), }, ] ) @@ -3618,6 +3787,7 @@ def test_image_url_serializable_missing_media_type(): ], 'instructions': None, 'kind': 'request', + 'run_id': IsStr(), }, { 'parts': [{'content': 'success (no tool calls)', 'id': None, 'part_kind': 'text'}], @@ -3638,6 +3808,7 @@ def test_image_url_serializable_missing_media_type(): 'provider_response_id': None, 'kind': 'response', 'finish_reason': None, + 'run_id': IsStr(), }, ] ) @@ -3681,6 +3852,7 @@ def test_image_url_serializable(): ], 'instructions': None, 'kind': 'request', + 'run_id': IsStr(), }, { 'parts': [{'content': 'success (no tool calls)', 'id': None, 'part_kind': 'text'}], @@ -3701,6 +3873,7 @@ def test_image_url_serializable(): 'provider_response_id': None, 'kind': 'response', 'finish_reason': None, + 'run_id': IsStr(), }, ] ) @@ -3796,7 +3969,8 @@ def get_image() -> BinaryContent: ], timestamp=IsNow(tz=timezone.utc), ), - ] + ], + run_id=IsStr(), ) ) @@ -3847,7 +4021,8 @@ def get_files(): ], timestamp=IsNow(tz=timezone.utc), ), - ] + ], + run_id=IsStr(), ) ) @@ -3867,6 +4042,7 @@ def system_prompt() -> str: UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], instructions='An instructions!', + run_id=IsStr(), ) ) @@ -3890,6 +4066,7 @@ def empty_instructions() -> str: UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], instructions='An instructions!', + run_id=IsStr(), ) ) @@ -3904,6 +4081,7 @@ def test_instructions_both_instructions_and_system_prompt_are_set(): UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], instructions='An instructions!', + run_id=IsStr(), ) ) @@ -3920,6 +4098,7 @@ def instructions() -> str: ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], instructions='You are a helpful assistant.', + run_id=IsStr(), ) ) @@ -3936,6 +4115,7 @@ def instructions_2() -> str: ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], instructions='You are a helpful assistant.', + run_id=IsStr(), ) ) @@ -3954,12 +4134,14 @@ def test_instructions_with_message_history(): ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], instructions='You are a helpful assistant.', + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='success (no tool calls)')], usage=RequestUsage(input_tokens=56, output_tokens=4), model_name='test', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) @@ -3984,6 +4166,7 @@ def empty_instructions() -> str: You are a potato.\ """, + run_id=IsStr(), ) ) @@ -3998,6 +4181,7 @@ def test_instructions_during_run(): You are a helpful assistant. Your task is to greet people.\ """, + run_id=IsStr(), ) ) @@ -4008,6 +4192,7 @@ def test_instructions_during_run(): instructions="""\ You are a helpful assistant.\ """, + run_id=IsStr(), ) ) @@ -4058,7 +4243,10 @@ def my_tool(x: int) -> int: assert result.new_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ TextPart(content='foo'), @@ -4067,13 +4255,15 @@ def my_tool(x: int) -> int: usage=RequestUsage(input_tokens=51, output_tokens=5), model_name='function:llm:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ ToolReturnPart( tool_name='my_tool', content=2, tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -4083,19 +4273,22 @@ def my_tool(x: int) -> int: usage=RequestUsage(input_tokens=52, output_tokens=10), model_name='function:llm:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ ToolReturnPart( tool_name='my_tool', content=4, tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[], usage=RequestUsage(input_tokens=53, output_tokens=10), model_name='function:llm:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) @@ -4161,6 +4354,7 @@ def foo_tool(foo: Foo) -> int: ], 'instructions': None, 'kind': 'request', + 'run_id': IsStr(), } ) @@ -4227,7 +4421,10 @@ def analyze_data() -> ToolReturn: # Verify the complete message structure using snapshot assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Please analyze the data', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Please analyze the data', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ TextPart(content='Starting analysis'), @@ -4240,6 +4437,7 @@ def analyze_data() -> ToolReturn: usage=RequestUsage(input_tokens=54, output_tokens=4), model_name='function:llm:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -4258,13 +4456,15 @@ def analyze_data() -> ToolReturn: ], timestamp=IsNow(tz=timezone.utc), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='Analysis completed')], usage=RequestUsage(input_tokens=70, output_tokens=6), model_name='function:llm:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) @@ -4302,7 +4502,10 @@ def analyze_data() -> ToolReturn: # Verify the complete message structure using snapshot assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Please analyze the data', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Please analyze the data', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ TextPart(content='Starting analysis'), @@ -4315,6 +4518,7 @@ def analyze_data() -> ToolReturn: usage=RequestUsage(input_tokens=54, output_tokens=4), model_name='function:llm:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -4325,13 +4529,15 @@ def analyze_data() -> ToolReturn: metadata={'foo': 'bar'}, timestamp=IsNow(tz=timezone.utc), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='Analysis completed')], usage=RequestUsage(input_tokens=58, output_tokens=6), model_name='function:llm:', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) @@ -4601,13 +4807,15 @@ def respond(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='Add the foo tool and run it', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='add_foo_tool', tool_call_id=IsStr())], usage=RequestUsage(input_tokens=57, output_tokens=2), model_name='function:respond:', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -4617,13 +4825,15 @@ def respond(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='foo', tool_call_id=IsStr())], usage=RequestUsage(input_tokens=60, output_tokens=4), model_name='function:respond:', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -4633,13 +4843,15 @@ def respond(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='Done')], usage=RequestUsage(input_tokens=63, output_tokens=5), model_name='function:respond:', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -4686,7 +4898,8 @@ async def only_if_plan_presented( content='Hello', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -4699,6 +4912,7 @@ async def only_if_plan_presented( usage=RequestUsage(input_tokens=51, output_tokens=5), model_name='test', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -4708,7 +4922,8 @@ async def only_if_plan_presented( tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -4721,6 +4936,7 @@ async def only_if_plan_presented( usage=RequestUsage(input_tokens=52, output_tokens=12), model_name='test', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -4730,7 +4946,8 @@ async def only_if_plan_presented( tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -4993,13 +5210,15 @@ def model_function(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon content='Hello', timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ThinkingPart(content='Let me think about this...')], usage=RequestUsage(input_tokens=57, output_tokens=6), model_name='function:model_function:', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -5008,13 +5227,15 @@ def model_function(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='Final answer')], usage=RequestUsage(input_tokens=73, output_tokens=8), model_name='function:model_function:', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -5063,7 +5284,8 @@ def create_file(path: str, content: str) -> str: content='Create new_file.py and delete ok_to_delete.py and never_delete.py', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -5082,6 +5304,7 @@ def create_file(path: str, content: str) -> str: usage=RequestUsage(input_tokens=60, output_tokens=23), model_name='function:model_function:', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -5091,7 +5314,8 @@ def create_file(path: str, content: str) -> str: tool_call_id='create_file', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -5118,7 +5342,8 @@ def create_file(path: str, content: str) -> str: content='Create new_file.py and delete ok_to_delete.py and never_delete.py', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -5137,6 +5362,7 @@ def create_file(path: str, content: str) -> str: usage=RequestUsage(input_tokens=60, output_tokens=23), model_name='function:model_function:', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -5146,7 +5372,8 @@ def create_file(path: str, content: str) -> str: tool_call_id='create_file', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelRequest( parts=[ @@ -5162,13 +5389,15 @@ def create_file(path: str, content: str) -> str: tool_call_id='never_delete', timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='Done!')], usage=RequestUsage(input_tokens=78, output_tokens=24), model_name='function:model_function:', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -5190,13 +5419,15 @@ def create_file(path: str, content: str) -> str: tool_call_id='never_delete', timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='Done!')], usage=RequestUsage(input_tokens=78, output_tokens=24), model_name='function:model_function:', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -5354,13 +5585,15 @@ def llm(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='No thanks', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='All right then, goodbye!')], usage=RequestUsage(input_tokens=54, output_tokens=12), model_name='function:llm:', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -5373,13 +5606,15 @@ def llm(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='No thanks', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='All right then, goodbye!')], usage=RequestUsage(input_tokens=54, output_tokens=12), model_name='function:llm:', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -5404,7 +5639,8 @@ def llm(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='No thanks', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -5673,13 +5909,15 @@ def roll_dice() -> int: content='Roll me a dice.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='roll_dice', args={}, tool_call_id='pyd_ai_tool_call_id__roll_dice')], usage=RequestUsage(input_tokens=55, output_tokens=2), model_name='function:llm:', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -5689,7 +5927,8 @@ def roll_dice() -> int: tool_call_id='pyd_ai_tool_call_id__roll_dice', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -5702,6 +5941,7 @@ def roll_dice() -> int: usage=RequestUsage(input_tokens=56, output_tokens=6), model_name='function:llm:', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -5711,7 +5951,8 @@ def roll_dice() -> int: tool_call_id='pyd_ai_tool_call_id__final_result', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -5726,13 +5967,15 @@ def roll_dice() -> int: content='Roll me a dice again.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='roll_dice', args={}, tool_call_id='pyd_ai_tool_call_id__roll_dice')], usage=RequestUsage(input_tokens=66, output_tokens=8), model_name='function:llm:', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -5742,7 +5985,8 @@ def roll_dice() -> int: tool_call_id='pyd_ai_tool_call_id__roll_dice', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -5755,6 +5999,7 @@ def roll_dice() -> int: usage=RequestUsage(input_tokens=67, output_tokens=12), model_name='function:llm:', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -5764,7 +6009,8 @@ def roll_dice() -> int: tool_call_id='pyd_ai_tool_call_id__final_result', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -5855,6 +6101,7 @@ def llm(messages: list[ModelMessage], _info: AgentInfo) -> ModelResponse: usage=RequestUsage(input_tokens=51, output_tokens=4), model_name='function:llm:', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -5867,13 +6114,15 @@ def llm(messages: list[ModelMessage], _info: AgentInfo) -> ModelResponse: content='Hello', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='ok here is text')], usage=RequestUsage(input_tokens=51, output_tokens=4), model_name='function:llm:', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) diff --git a/tests/test_dbos.py b/tests/test_dbos.py index 256aba83fb..051a63ba74 100644 --- a/tests/test_dbos.py +++ b/tests/test_dbos.py @@ -1401,6 +1401,7 @@ async def hitl_main_loop(prompt: str) -> AgentRunResult[str | DeferredToolReques ) ], instructions='Just call tools without asking for confirmation.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1431,6 +1432,7 @@ async def hitl_main_loop(prompt: str) -> AgentRunResult[str | DeferredToolReques provider_details={'finish_reason': 'tool_calls'}, provider_response_id=IsStr(), finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1448,6 +1450,7 @@ async def hitl_main_loop(prompt: str) -> AgentRunResult[str | DeferredToolReques ), ], instructions='Just call tools without asking for confirmation.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1469,6 +1472,7 @@ async def hitl_main_loop(prompt: str) -> AgentRunResult[str | DeferredToolReques provider_details={'finish_reason': 'stop'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1528,6 +1532,7 @@ def hitl_main_loop_sync(prompt: str) -> AgentRunResult[str | DeferredToolRequest ) ], instructions='Just call tools without asking for confirmation.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1558,6 +1563,7 @@ def hitl_main_loop_sync(prompt: str) -> AgentRunResult[str | DeferredToolRequest provider_details={'finish_reason': 'tool_calls'}, provider_response_id=IsStr(), finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1575,6 +1581,7 @@ def hitl_main_loop_sync(prompt: str) -> AgentRunResult[str | DeferredToolRequest ), ], instructions='Just call tools without asking for confirmation.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1596,6 +1603,7 @@ def hitl_main_loop_sync(prompt: str) -> AgentRunResult[str | DeferredToolRequest provider_details={'finish_reason': 'stop'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1629,7 +1637,8 @@ async def test_dbos_agent_with_model_retry(allow_model_requests: None, dbos: DBO content='What is the weather in CDMX?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1655,6 +1664,7 @@ async def test_dbos_agent_with_model_retry(allow_model_requests: None, dbos: DBO provider_details={'finish_reason': 'tool_calls'}, provider_response_id=IsStr(), finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1664,7 +1674,8 @@ async def test_dbos_agent_with_model_retry(allow_model_requests: None, dbos: DBO tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1690,6 +1701,7 @@ async def test_dbos_agent_with_model_retry(allow_model_requests: None, dbos: DBO provider_details={'finish_reason': 'tool_calls'}, provider_response_id=IsStr(), finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1699,7 +1711,8 @@ async def test_dbos_agent_with_model_retry(allow_model_requests: None, dbos: DBO tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='The weather in Mexico City is currently sunny.')], @@ -1719,6 +1732,7 @@ async def test_dbos_agent_with_model_retry(allow_model_requests: None, dbos: DBO provider_details={'finish_reason': 'stop'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) diff --git a/tests/test_examples.py b/tests/test_examples.py index c7c32c340d..cc0ae7b593 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -258,7 +258,9 @@ def print(self, *args: Any, **kwargs: Any) -> None: def print_callback(s: str) -> str: s = re.sub(r'datetime\.datetime\(.+?\)', 'datetime.datetime(...)', s, flags=re.DOTALL) s = re.sub(r'\d\.\d{4,}e-0\d', '0.0...', s) - return re.sub(r'datetime.date\(', 'date(', s) + s = re.sub(r'datetime.date\(', 'date(', s) + s = re.sub(r"run_id='.+?'", "run_id='...'", s) + return s def mock_render_duration(seconds: float, force_signed: bool) -> str: diff --git a/tests/test_history_processor.py b/tests/test_history_processor.py index 54d38935d2..89e487dc8c 100644 --- a/tests/test_history_processor.py +++ b/tests/test_history_processor.py @@ -20,7 +20,7 @@ from pydantic_ai.tools import RunContext from pydantic_ai.usage import RequestUsage -from .conftest import IsDatetime +from .conftest import IsDatetime, IsStr pytestmark = [pytest.mark.anyio] @@ -64,7 +64,10 @@ def no_op_history_processor(messages: list[ModelMessage]) -> list[ModelMessage]: [ ModelRequest(parts=[UserPromptPart(content='Previous question', timestamp=IsDatetime())]), ModelResponse(parts=[TextPart(content='Previous answer')], timestamp=IsDatetime()), - ModelRequest(parts=[UserPromptPart(content='New question', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='New question', timestamp=IsDatetime())], + run_id=IsStr(), + ), ] ) assert captured_messages == result.all_messages() @@ -72,12 +75,16 @@ def no_op_history_processor(messages: list[ModelMessage]) -> list[ModelMessage]: [ ModelRequest(parts=[UserPromptPart(content='Previous question', timestamp=IsDatetime())]), ModelResponse(parts=[TextPart(content='Previous answer')], timestamp=IsDatetime()), - ModelRequest(parts=[UserPromptPart(content='New question', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='New question', timestamp=IsDatetime())], + run_id=IsStr(), + ), ModelResponse( parts=[TextPart(content='Provider response')], usage=RequestUsage(input_tokens=54, output_tokens=4), model_name='function:capture_model_function:capture_model_stream_function', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -124,13 +131,17 @@ def process_previous_answers(messages: list[ModelMessage]) -> list[ModelMessage] assert captured_messages == result.all_messages() assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Question 3', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='Question 3', timestamp=IsDatetime())], + run_id=IsStr(), + ), ModelRequest(parts=[SystemPromptPart(content='Processed answer', timestamp=IsDatetime())]), ModelResponse( parts=[TextPart(content='Provider response')], usage=RequestUsage(input_tokens=54, output_tokens=2), model_name='function:capture_model_function:capture_model_stream_function', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -179,13 +190,17 @@ def process_previous_answers(messages: list[ModelMessage]) -> list[ModelMessage] assert captured_messages == result.all_messages() assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Question 3', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='Question 3', timestamp=IsDatetime())], + run_id=IsStr(), + ), ModelRequest(parts=[SystemPromptPart(content='Processed answer', timestamp=IsDatetime())]), ModelResponse( parts=[TextPart(content='hello')], usage=RequestUsage(input_tokens=50, output_tokens=1), model_name='function:capture_model_function:capture_model_stream_function', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -231,12 +246,16 @@ def capture_messages_processor(messages: list[ModelMessage]) -> list[ModelMessag assert result.all_messages() == snapshot( [ ModelRequest(parts=[UserPromptPart(content='Previous question', timestamp=IsDatetime())]), - ModelRequest(parts=[UserPromptPart(content='New question', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='New question', timestamp=IsDatetime())], + run_id=IsStr(), + ), ModelResponse( parts=[TextPart(content='Provider response')], usage=RequestUsage(input_tokens=54, output_tokens=2), model_name='function:capture_model_function:capture_model_stream_function', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -318,6 +337,7 @@ def second_processor(messages: list[ModelMessage]) -> list[ModelMessage]: usage=RequestUsage(input_tokens=57, output_tokens=3), model_name='function:capture_model_function:capture_model_stream_function', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -372,13 +392,15 @@ async def async_processor(messages: list[ModelMessage]) -> list[ModelMessage]: content='Question 2', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='Provider response')], usage=RequestUsage(input_tokens=54, output_tokens=2), model_name='function:capture_model_function:capture_model_stream_function', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -440,13 +462,15 @@ async def async_processor(messages: list[ModelMessage]) -> list[ModelMessage]: content='Question 2', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='hello')], usage=RequestUsage(input_tokens=50, output_tokens=1), model_name='function:capture_model_function:capture_model_stream_function', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -505,6 +529,7 @@ def context_processor(ctx: RunContext[str], messages: list[ModelMessage]) -> lis usage=RequestUsage(input_tokens=52, output_tokens=2), model_name='function:capture_model_function:capture_model_stream_function', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -538,7 +563,8 @@ async def async_context_processor(ctx: RunContext[Any], messages: list[ModelMess content='Question 3', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ) ] ) @@ -551,13 +577,15 @@ async def async_context_processor(ctx: RunContext[Any], messages: list[ModelMess content='Question 3', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='Provider response')], usage=RequestUsage(input_tokens=52, output_tokens=2), model_name='function:capture_model_function:capture_model_stream_function', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -642,6 +670,7 @@ class Deps: usage=RequestUsage(input_tokens=56, output_tokens=2), model_name='function:capture_model_function:capture_model_stream_function', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -693,6 +722,7 @@ def return_new_history(messages: list[ModelMessage]) -> list[ModelMessage]: usage=RequestUsage(input_tokens=52, output_tokens=2), model_name='function:capture_model_function:capture_model_stream_function', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -742,7 +772,10 @@ def __call__(self, messages: list[ModelMessage]) -> list[ModelMessage]: [ ModelRequest(parts=[UserPromptPart(content='Previous question', timestamp=IsDatetime())]), ModelResponse(parts=[TextPart(content='Previous answer')], timestamp=IsDatetime()), - ModelRequest(parts=[UserPromptPart(content='New question', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='New question', timestamp=IsDatetime())], + run_id=IsStr(), + ), ] ) assert captured_messages == result.all_messages() @@ -750,12 +783,16 @@ def __call__(self, messages: list[ModelMessage]) -> list[ModelMessage]: [ ModelRequest(parts=[UserPromptPart(content='Previous question', timestamp=IsDatetime())]), ModelResponse(parts=[TextPart(content='Previous answer')], timestamp=IsDatetime()), - ModelRequest(parts=[UserPromptPart(content='New question', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='New question', timestamp=IsDatetime())], + run_id=IsStr(), + ), ModelResponse( parts=[TextPart(content='Provider response')], usage=RequestUsage(input_tokens=54, output_tokens=4), model_name='function:capture_model_function:capture_model_stream_function', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -783,7 +820,10 @@ def __call__(self, _: RunContext, messages: list[ModelMessage]) -> list[ModelMes [ ModelRequest(parts=[UserPromptPart(content='Previous question', timestamp=IsDatetime())]), ModelResponse(parts=[TextPart(content='Previous answer')], timestamp=IsDatetime()), - ModelRequest(parts=[UserPromptPart(content='New question', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='New question', timestamp=IsDatetime())], + run_id=IsStr(), + ), ] ) assert captured_messages == result.all_messages() @@ -791,12 +831,16 @@ def __call__(self, _: RunContext, messages: list[ModelMessage]) -> list[ModelMes [ ModelRequest(parts=[UserPromptPart(content='Previous question', timestamp=IsDatetime())]), ModelResponse(parts=[TextPart(content='Previous answer')], timestamp=IsDatetime()), - ModelRequest(parts=[UserPromptPart(content='New question', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='New question', timestamp=IsDatetime())], + run_id=IsStr(), + ), ModelResponse( parts=[TextPart(content='Provider response')], usage=RequestUsage(input_tokens=54, output_tokens=4), model_name='function:capture_model_function:capture_model_stream_function', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) diff --git a/tests/test_mcp.py b/tests/test_mcp.py index 2408b9b267..fc7a8e5dc2 100644 --- a/tests/test_mcp.py +++ b/tests/test_mcp.py @@ -206,7 +206,8 @@ async def test_agent_with_stdio_server(allow_model_requests: None, agent: Agent) content='What is 0 degrees Celsius in Fahrenheit?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -232,6 +233,7 @@ async def test_agent_with_stdio_server(allow_model_requests: None, agent: Agent) provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-BRlnvvqIPFofAtKqtQKMWZkgXhzlT', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -241,7 +243,8 @@ async def test_agent_with_stdio_server(allow_model_requests: None, agent: Agent) tool_call_id='call_QssdxTGkPblTYHmyVES1tKBj', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='0 degrees Celsius is equal to 32 degrees Fahrenheit.')], @@ -261,6 +264,7 @@ async def test_agent_with_stdio_server(allow_model_requests: None, agent: Agent) provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-BRlnyjUo5wlyqvdNdM5I8vIWjo1qF', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -336,7 +340,8 @@ async def test_tool_returning_str(allow_model_requests: None, agent: Agent): content='What is the weather in Mexico City?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -362,6 +367,7 @@ async def test_tool_returning_str(allow_model_requests: None, agent: Agent): provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-BRlo3e1Ud2lnvkddMilmwC7LAemiy', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -371,7 +377,8 @@ async def test_tool_returning_str(allow_model_requests: None, agent: Agent): tool_call_id='call_m9goNwaHBbU926w47V7RtWPt', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -395,6 +402,7 @@ async def test_tool_returning_str(allow_model_requests: None, agent: Agent): provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-BRlo41LxqBYgGKWgGrQn67fQacOLp', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -412,7 +420,8 @@ async def test_tool_returning_text_resource(allow_model_requests: None, agent: A content='Get me the product name', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -438,6 +447,7 @@ async def test_tool_returning_text_resource(allow_model_requests: None, agent: A provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-BRmhyweJVYonarb7s9ckIMSHf2vHo', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -447,7 +457,8 @@ async def test_tool_returning_text_resource(allow_model_requests: None, agent: A tool_call_id='call_LaiWltzI39sdquflqeuF0EyE', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='The product name is "Pydantic AI".')], @@ -467,6 +478,7 @@ async def test_tool_returning_text_resource(allow_model_requests: None, agent: A provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-BRmhzqXFObpYwSzREMpJvX9kbDikR', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -484,7 +496,8 @@ async def test_tool_returning_text_resource_link(allow_model_requests: None, age content='Get me the product name via get_product_name_link', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -510,6 +523,7 @@ async def test_tool_returning_text_resource_link(allow_model_requests: None, age provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-BwdHSFe0EykAOpf0LWZzsWAodIQzb', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -519,7 +533,8 @@ async def test_tool_returning_text_resource_link(allow_model_requests: None, age tool_call_id='call_qi5GtBeIEyT7Y3yJvVFIi062', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='The product name is "Pydantic AI".')], @@ -539,6 +554,7 @@ async def test_tool_returning_text_resource_link(allow_model_requests: None, age provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-BwdHTIlBZWzXJPBR8VTOdC4O57ZQA', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -558,7 +574,8 @@ async def test_tool_returning_image_resource(allow_model_requests: None, agent: content='Get me the image resource', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -584,6 +601,7 @@ async def test_tool_returning_image_resource(allow_model_requests: None, agent: provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-BRlo7KYJVXuNZ5lLLdYcKZDsX2CHb', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -594,7 +612,8 @@ async def test_tool_returning_image_resource(allow_model_requests: None, agent: timestamp=IsDatetime(), ), UserPromptPart(content=['This is file 1c8566:', image_content], timestamp=IsDatetime()), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -618,6 +637,7 @@ async def test_tool_returning_image_resource(allow_model_requests: None, agent: provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-BRloBGHh27w3fQKwxq4fX2cPuZJa9', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -639,7 +659,8 @@ async def test_tool_returning_image_resource_link( content='Get me the image resource via get_image_resource_link', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -665,6 +686,7 @@ async def test_tool_returning_image_resource_link( provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-BwdHygYePH1mZgHo2Xxzib0Y7sId7', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -675,7 +697,8 @@ async def test_tool_returning_image_resource_link( timestamp=IsDatetime(), ), UserPromptPart(content=['This is file 1c8566:', image_content], timestamp=IsDatetime()), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -699,6 +722,7 @@ async def test_tool_returning_image_resource_link( provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-BwdI2D2r9dvqq3pbsA0qgwKDEdTtD', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -714,7 +738,8 @@ async def test_tool_returning_audio_resource( assert result.all_messages() == snapshot( [ ModelRequest( - parts=[UserPromptPart(content="What's the content of the audio resource?", timestamp=IsDatetime())] + parts=[UserPromptPart(content="What's the content of the audio resource?", timestamp=IsDatetime())], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='get_audio_resource', args={}, tool_call_id=IsStr())], @@ -727,6 +752,7 @@ async def test_tool_returning_audio_resource( provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -737,7 +763,8 @@ async def test_tool_returning_audio_resource( timestamp=IsDatetime(), ), UserPromptPart(content=['This is file 2d36ae:', audio_content], timestamp=IsDatetime()), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='The audio resource contains a voice saying "Hello, my name is Marcelo."')], @@ -753,6 +780,7 @@ async def test_tool_returning_audio_resource( provider_details={'finish_reason': 'STOP'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -773,7 +801,8 @@ async def test_tool_returning_audio_resource_link( content="What's the content of the audio resource via get_audio_resource_link?", timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -797,6 +826,7 @@ async def test_tool_returning_audio_resource_link( provider_details={'finish_reason': 'STOP'}, provider_response_id='Pe_BaJGqOKSdz7IP0NqogA8', finish_reason='stop', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -813,7 +843,8 @@ async def test_tool_returning_audio_resource_link( ], timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='00:05')], @@ -829,6 +860,7 @@ async def test_tool_returning_audio_resource_link( provider_details={'finish_reason': 'STOP'}, provider_response_id='QO_BaLC6AozQz7IPh5Kj4Q4', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -846,7 +878,8 @@ async def test_tool_returning_image(allow_model_requests: None, agent: Agent, im content='Get me an image', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -872,6 +905,7 @@ async def test_tool_returning_image(allow_model_requests: None, agent: Agent, im provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-BRloGQJWIX0Qk7gtNzF4s2Fez0O29', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -888,7 +922,8 @@ async def test_tool_returning_image(allow_model_requests: None, agent: Agent, im ], timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='Here is an image of a sliced kiwi on a white background.')], @@ -908,6 +943,7 @@ async def test_tool_returning_image(allow_model_requests: None, agent: Agent, im provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-BRloJHR654fSD0fcvLWZxtKtn0pag', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -925,7 +961,8 @@ async def test_tool_returning_dict(allow_model_requests: None, agent: Agent): content='Get me a dict, respond on one line', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='get_dict', args='{}', tool_call_id='call_oqKviITBj8PwpQjGyUu4Zu5x')], @@ -945,6 +982,7 @@ async def test_tool_returning_dict(allow_model_requests: None, agent: Agent): provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-BRloOs7Bb2tq8wJyy9Rv7SQ7L65a7', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -954,7 +992,8 @@ async def test_tool_returning_dict(allow_model_requests: None, agent: Agent): tool_call_id='call_oqKviITBj8PwpQjGyUu4Zu5x', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"foo":"bar","baz":123}')], @@ -974,6 +1013,7 @@ async def test_tool_returning_dict(allow_model_requests: None, agent: Agent): provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-BRloPczU1HSCWnreyo21DdNtdOM7L', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -991,7 +1031,8 @@ async def test_tool_returning_unstructured_dict(allow_model_requests: None, agen content='Get me an unstructured dict, respond on one line', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1015,6 +1056,7 @@ async def test_tool_returning_unstructured_dict(allow_model_requests: None, agen provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-CLbP82ODQMEznhobUKdq6Rjn9Aa12', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1024,7 +1066,8 @@ async def test_tool_returning_unstructured_dict(allow_model_requests: None, agen tool_call_id='call_R0n2R7S9vL2aZOX25T9jahTd', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"foo":"bar","baz":123}')], @@ -1044,6 +1087,7 @@ async def test_tool_returning_unstructured_dict(allow_model_requests: None, agen provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-CLbPAOYN3jPYdvYeD8JNOOXF5N554', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1063,7 +1107,8 @@ async def test_tool_returning_error(allow_model_requests: None, agent: Agent): content='Get me an error, pass False as a value, unless the tool tells you otherwise', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1089,6 +1134,7 @@ async def test_tool_returning_error(allow_model_requests: None, agent: Agent): provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-BRloSNg7aGSp1rXDkhInjMIUHKd7A', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1098,7 +1144,8 @@ async def test_tool_returning_error(allow_model_requests: None, agent: Agent): tool_call_id='call_rETXZWddAGZSHyVHAxptPGgc', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1124,6 +1171,7 @@ async def test_tool_returning_error(allow_model_requests: None, agent: Agent): provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-BRloTvSkFeX4DZKQLqfH9KbQkWlpt', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1133,7 +1181,8 @@ async def test_tool_returning_error(allow_model_requests: None, agent: Agent): tool_call_id='call_4xGyvdghYKHN8x19KWkRtA5N', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1157,6 +1206,7 @@ async def test_tool_returning_error(allow_model_requests: None, agent: Agent): provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-BRloU3MhnqNEqujs28a3ofRbs7VPF', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1174,7 +1224,8 @@ async def test_tool_returning_none(allow_model_requests: None, agent: Agent): content='Call the none tool and say Hello', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='get_none', args='{}', tool_call_id='call_mJTuQ2Cl5SaHPTJbIILEUhJC')], @@ -1194,6 +1245,7 @@ async def test_tool_returning_none(allow_model_requests: None, agent: Agent): provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-BRloX2RokWc9j9PAXAuNXGR73WNqY', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1203,7 +1255,8 @@ async def test_tool_returning_none(allow_model_requests: None, agent: Agent): tool_call_id='call_mJTuQ2Cl5SaHPTJbIILEUhJC', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='Hello! How can I assist you today?')], @@ -1223,6 +1276,7 @@ async def test_tool_returning_none(allow_model_requests: None, agent: Agent): provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-BRloYWGujk8yE94gfVSsM1T1Ol2Ej', finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1242,7 +1296,8 @@ async def test_tool_returning_multiple_items(allow_model_requests: None, agent: content='Get me multiple items and summarize in one sentence', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1268,6 +1323,7 @@ async def test_tool_returning_multiple_items(allow_model_requests: None, agent: provider_details={'finish_reason': 'tool_calls'}, provider_response_id='chatcmpl-BRlobKLgm6vf79c9O8sloZaYx3coC', finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1289,7 +1345,8 @@ async def test_tool_returning_multiple_items(allow_model_requests: None, agent: ], timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1313,6 +1370,7 @@ async def test_tool_returning_multiple_items(allow_model_requests: None, agent: provider_details={'finish_reason': 'stop'}, provider_response_id='chatcmpl-BRloepWR5NJpTgSqFBGTSPeM1SWm8', finish_reason='stop', + run_id=IsStr(), ), ] ) diff --git a/tests/test_messages.py b/tests/test_messages.py index 6ced1396cd..9b6ad3fbb8 100644 --- a/tests/test_messages.py +++ b/tests/test_messages.py @@ -462,6 +462,7 @@ def test_file_part_serialization_roundtrip(): 'provider_details': None, 'provider_response_id': None, 'finish_reason': None, + 'run_id': None, } ] ) @@ -469,6 +470,24 @@ def test_file_part_serialization_roundtrip(): assert deserialized == messages +def test_model_messages_type_adapter_preserves_run_id(): + messages: list[ModelMessage] = [ + ModelRequest( + parts=[UserPromptPart(content='Hi there', timestamp=datetime.now(tz=timezone.utc))], + run_id='run-123', + ), + ModelResponse( + parts=[TextPart(content='Hello!')], + run_id='run-123', + ), + ] + + serialized = ModelMessagesTypeAdapter.dump_python(messages, mode='python') + deserialized = ModelMessagesTypeAdapter.validate_python(serialized) + + assert [message.run_id for message in deserialized] == snapshot(['run-123', 'run-123']) + + def test_model_response_convenience_methods(): response = ModelResponse(parts=[]) assert response.text == snapshot(None) diff --git a/tests/test_streaming.py b/tests/test_streaming.py index 1a126f26dc..230a19501a 100644 --- a/tests/test_streaming.py +++ b/tests/test_streaming.py @@ -67,23 +67,29 @@ async def ret_a(x: str) -> str: async with test_agent.run_stream('Hello') as result: assert test_agent.name == 'test_agent' + assert isinstance(result.run_id, str) assert not result.is_complete assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ToolCallPart(tool_name='ret_a', args={'x': 'a'}, tool_call_id=IsStr())], usage=RequestUsage(input_tokens=51), model_name='test', timestamp=IsNow(tz=timezone.utc), provider_name='test', + run_id=IsStr(), ), ModelRequest( parts=[ ToolReturnPart( tool_name='ret_a', content='a-apple', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() ) - ] + ], + run_id=IsStr(), ), ] ) @@ -101,20 +107,25 @@ async def ret_a(x: str) -> str: assert result.timestamp() == IsNow(tz=timezone.utc) assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ToolCallPart(tool_name='ret_a', args={'x': 'a'}, tool_call_id=IsStr())], usage=RequestUsage(input_tokens=51), model_name='test', timestamp=IsNow(tz=timezone.utc), provider_name='test', + run_id=IsStr(), ), ModelRequest( parts=[ ToolReturnPart( tool_name='ret_a', content='a-apple', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"ret_a":"a-apple"}')], @@ -122,6 +133,7 @@ async def ret_a(x: str) -> str: model_name='test', timestamp=IsNow(tz=timezone.utc), provider_name='test', + run_id=IsStr(), ), ] ) @@ -147,23 +159,29 @@ async def ret_a(x: str) -> str: result = test_agent.run_stream_sync('Hello') assert test_agent.name == 'test_agent' + assert isinstance(result.run_id, str) assert not result.is_complete assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ToolCallPart(tool_name='ret_a', args={'x': 'a'}, tool_call_id=IsStr())], usage=RequestUsage(input_tokens=51), model_name='test', timestamp=IsNow(tz=timezone.utc), provider_name='test', + run_id=IsStr(), ), ModelRequest( parts=[ ToolReturnPart( tool_name='ret_a', content='a-apple', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() ) - ] + ], + run_id=IsStr(), ), ] ) @@ -191,20 +209,25 @@ async def ret_a(x: str) -> str: ) assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ToolCallPart(tool_name='ret_a', args={'x': 'a'}, tool_call_id=IsStr())], usage=RequestUsage(input_tokens=51), model_name='test', timestamp=IsNow(tz=timezone.utc), provider_name='test', + run_id=IsStr(), ), ModelRequest( parts=[ ToolReturnPart( tool_name='ret_a', content='a-apple', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"ret_a":"a-apple"}')], @@ -212,6 +235,7 @@ async def ret_a(x: str) -> str: model_name='test', timestamp=IsNow(tz=timezone.utc), provider_name='test', + run_id=IsStr(), ), ] ) @@ -387,6 +411,7 @@ def upcase(text: str) -> str: model_name='test', timestamp=IsDatetime(), provider_name='test', + run_id=IsStr(), ), ] ) @@ -500,6 +525,7 @@ def upcase(text: str) -> str: model_name='test', timestamp=IsDatetime(), provider_name='test', + run_id=IsStr(), ), ] ) @@ -561,12 +587,16 @@ async def ret_a(x: str) -> str: async with agent.run_stream('hello') as result: assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ToolCallPart(tool_name='ret_a', args='{"x": "hello"}', tool_call_id=IsStr())], usage=RequestUsage(input_tokens=50, output_tokens=5), model_name='function::stream_structured_function', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -576,19 +606,24 @@ async def ret_a(x: str) -> str: timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), ) - ] + ], + run_id=IsStr(), ), ] ) assert await result.get_output() == snapshot(('hello world', 2)) assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ToolCallPart(tool_name='ret_a', args='{"x": "hello"}', tool_call_id=IsStr())], usage=RequestUsage(input_tokens=50, output_tokens=5), model_name='function::stream_structured_function', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -598,7 +633,8 @@ async def ret_a(x: str) -> str: timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -611,6 +647,7 @@ async def ret_a(x: str) -> str: usage=RequestUsage(input_tokens=50, output_tokens=7), model_name='function::stream_structured_function', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -620,7 +657,8 @@ async def ret_a(x: str) -> str: timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -650,20 +688,23 @@ async def stream_structured_function( content='hello', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[], usage=RequestUsage(input_tokens=50), model_name='function::stream_structured_function', timestamp=IsDatetime(), + run_id=IsStr(), ), - ModelRequest(parts=[]), + ModelRequest(parts=[], run_id=IsStr()), ModelResponse( parts=[TextPart(content='ok here is text')], usage=RequestUsage(input_tokens=50, output_tokens=4), model_name='function::stream_structured_function', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -690,12 +731,16 @@ async def ret_a(x: str) -> str: # pragma: no cover assert messages == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ToolCallPart(tool_name='foobar', args='{}', tool_call_id=IsStr())], usage=RequestUsage(input_tokens=50, output_tokens=1), model_name='function::stream_structured_function', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ] ) @@ -742,7 +787,10 @@ def another_tool(y: int) -> int: # pragma: no cover # Verify we got tool returns for all calls assert messages == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='test early strategy', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='test early strategy', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ ToolCallPart(tool_name='final_result', args='{"value": "final"}', tool_call_id=IsStr()), @@ -752,6 +800,7 @@ def another_tool(y: int) -> int: # pragma: no cover usage=RequestUsage(input_tokens=50, output_tokens=10), model_name='function::sf', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -773,7 +822,8 @@ def another_tool(y: int) -> int: # pragma: no cover timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), ), - ] + ], + run_id=IsStr(), ), ] ) @@ -798,7 +848,8 @@ async def sf(_: list[ModelMessage], info: AgentInfo) -> AsyncIterator[str | Delt assert messages == snapshot( [ ModelRequest( - parts=[UserPromptPart(content='test multiple final results', timestamp=IsNow(tz=timezone.utc))] + parts=[UserPromptPart(content='test multiple final results', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -808,6 +859,7 @@ async def sf(_: list[ModelMessage], info: AgentInfo) -> AsyncIterator[str | Delt usage=RequestUsage(input_tokens=50, output_tokens=8), model_name='function::sf', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -823,7 +875,8 @@ async def sf(_: list[ModelMessage], info: AgentInfo) -> AsyncIterator[str | Delt timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), ), - ] + ], + run_id=IsStr(), ), ] ) @@ -863,7 +916,10 @@ def another_tool(y: int) -> int: # Verify we got tool returns in the correct order assert messages == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='test exhaustive strategy', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='test exhaustive strategy', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ ToolCallPart(tool_name='final_result', args='{"value": "first"}', tool_call_id=IsStr()), @@ -875,6 +931,7 @@ def another_tool(y: int) -> int: usage=RequestUsage(input_tokens=50, output_tokens=18), model_name='function::sf', timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -902,7 +959,8 @@ def another_tool(y: int) -> int: tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), ), - ] + ], + run_id=IsStr(), ), ] ) @@ -952,6 +1010,7 @@ def another_tool(y: int) -> int: # pragma: no cover part_kind='user-prompt', ) ], + run_id=IsStr(), kind='request', ), ModelResponse( @@ -984,6 +1043,7 @@ def another_tool(y: int) -> int: # pragma: no cover usage=RequestUsage(input_tokens=50, output_tokens=14), model_name='function::sf', timestamp=IsNow(tz=datetime.timezone.utc), + run_id=IsStr(), kind='response', ), ModelRequest( @@ -1017,6 +1077,7 @@ def another_tool(y: int) -> int: # pragma: no cover part_kind='retry-prompt', ), ], + run_id=IsStr(), kind='request', ), ] @@ -1082,6 +1143,7 @@ def regular_tool(x: int) -> int: # pragma: no cover part_kind='user-prompt', ) ], + run_id=IsStr(), kind='request', ), ModelResponse( @@ -1101,6 +1163,7 @@ def regular_tool(x: int) -> int: # pragma: no cover usage=RequestUsage(input_tokens=50, output_tokens=7), model_name='function::sf', timestamp=IsNow(tz=datetime.timezone.utc), + run_id=IsStr(), kind='response', ), ModelRequest( @@ -1118,6 +1181,7 @@ def regular_tool(x: int) -> int: # pragma: no cover timestamp=IsNow(tz=datetime.timezone.utc), ), ], + run_id=IsStr(), kind='request', ), ] @@ -1168,6 +1232,7 @@ def regular_tool(x: int) -> int: part_kind='user-prompt', ) ], + run_id=IsStr(), kind='request', ), ModelResponse( @@ -1182,6 +1247,7 @@ def regular_tool(x: int) -> int: usage=RequestUsage(input_tokens=50, output_tokens=3), model_name='function::sf', timestamp=IsNow(tz=datetime.timezone.utc), + run_id=IsStr(), kind='response', ), ModelRequest( @@ -1193,6 +1259,7 @@ def regular_tool(x: int) -> int: timestamp=IsNow(tz=datetime.timezone.utc), ) ], + run_id=IsStr(), kind='request', ), ] @@ -1224,7 +1291,8 @@ def regular_tool(x: int) -> int: UserPromptPart( content='test early strategy with regular tool calls', timestamp=IsNow(tz=timezone.utc) ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='regular_tool', args={'x': 0}, tool_call_id=IsStr())], @@ -1232,13 +1300,15 @@ def regular_tool(x: int) -> int: model_name='test', timestamp=IsNow(tz=timezone.utc), provider_name='test', + run_id=IsStr(), ), ModelRequest( parts=[ ToolReturnPart( tool_name='regular_tool', content=0, timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='final_result', args={'value': 'a'}, tool_call_id=IsStr())], @@ -1246,6 +1316,7 @@ def regular_tool(x: int) -> int: model_name='test', timestamp=IsNow(tz=timezone.utc), provider_name='test', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1255,7 +1326,8 @@ def regular_tool(x: int) -> int: timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -1356,6 +1428,7 @@ def output_validator_simple(data: str) -> str: stream: AgentStream messages: list[ModelResponse] = [] async with agent.iter('Hello') as run: + assert isinstance(run.run_id, str) async for node in run: if agent.is_model_request_node(node): async with node.stream(run.ctx) as stream: @@ -1634,6 +1707,7 @@ def my_tool(x: int) -> int: async with agent.run_stream('Hello') as result: assert not result.is_complete + assert isinstance(result.run_id, str) assert [c async for c in result.stream_output(debounce_by=None)] == snapshot( [DeferredToolRequests(calls=[ToolCallPart(tool_name='my_tool', args={'x': 0}, tool_call_id=IsStr())])] ) @@ -1651,6 +1725,7 @@ def my_tool(x: int) -> int: model_name='test', timestamp=IsDatetime(), provider_name='test', + run_id=IsStr(), ) ] ) @@ -1702,13 +1777,15 @@ def my_tool(ctx: RunContext[None], x: int) -> int: content='Hello', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='my_tool', args='{"x": 1}', tool_call_id='my_tool')], usage=RequestUsage(input_tokens=50, output_tokens=3), model_name='function::llm', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1718,13 +1795,15 @@ def my_tool(ctx: RunContext[None], x: int) -> int: tool_call_id='my_tool', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='Done!')], usage=RequestUsage(input_tokens=50, output_tokens=1), model_name='function::llm', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) diff --git a/tests/test_temporal.py b/tests/test_temporal.py index b3fe75d911..c32309d596 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -31,6 +31,7 @@ PartStartEvent, RetryPromptPart, RunContext, + RunUsage, TextPart, TextPartDelta, ToolCallPart, @@ -44,6 +45,7 @@ from pydantic_ai.exceptions import ApprovalRequired, CallDeferred, ModelRetry, UserError from pydantic_ai.models import Model, cached_async_http_client from pydantic_ai.models.function import AgentInfo, FunctionModel +from pydantic_ai.models.test import TestModel from pydantic_ai.run import AgentRunResult from pydantic_ai.tools import DeferredToolRequests, DeferredToolResults, ToolDefinition from pydantic_ai.usage import RequestUsage @@ -63,6 +65,7 @@ from pydantic_ai.durable_exec.temporal._function_toolset import TemporalFunctionToolset from pydantic_ai.durable_exec.temporal._mcp_server import TemporalMCPServer from pydantic_ai.durable_exec.temporal._model import TemporalModel + from pydantic_ai.durable_exec.temporal._run_context import TemporalRunContext except ImportError: # pragma: lax no cover pytest.skip('temporal not installed', allow_module_level=True) @@ -1814,6 +1817,7 @@ async def test_temporal_agent_with_hitl_tool(allow_model_requests: None, client: ) ], instructions='Just call tools without asking for confirmation.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1844,6 +1848,7 @@ async def test_temporal_agent_with_hitl_tool(allow_model_requests: None, client: provider_details={'finish_reason': 'tool_calls'}, provider_response_id=IsStr(), finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1861,6 +1866,7 @@ async def test_temporal_agent_with_hitl_tool(allow_model_requests: None, client: ), ], instructions='Just call tools without asking for confirmation.', + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1884,6 +1890,7 @@ async def test_temporal_agent_with_hitl_tool(allow_model_requests: None, client: provider_details={'finish_reason': 'stop'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -1933,7 +1940,8 @@ async def test_temporal_agent_with_model_retry(allow_model_requests: None, clien content='What is the weather in CDMX?', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1959,6 +1967,7 @@ async def test_temporal_agent_with_model_retry(allow_model_requests: None, clien provider_details={'finish_reason': 'tool_calls'}, provider_response_id=IsStr(), finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1968,7 +1977,8 @@ async def test_temporal_agent_with_model_retry(allow_model_requests: None, clien tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1994,6 +2004,7 @@ async def test_temporal_agent_with_model_retry(allow_model_requests: None, clien provider_details={'finish_reason': 'tool_calls'}, provider_response_id=IsStr(), finish_reason='tool_call', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2003,7 +2014,8 @@ async def test_temporal_agent_with_model_retry(allow_model_requests: None, clien tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='The weather in Mexico City is currently sunny.')], @@ -2023,6 +2035,7 @@ async def test_temporal_agent_with_model_retry(allow_model_requests: None, clien provider_details={'finish_reason': 'stop'}, provider_response_id=IsStr(), finish_reason='stop', + run_id=IsStr(), ), ] ) @@ -2152,3 +2165,18 @@ async def test_web_search_agent_run_in_workflow(allow_model_requests: None, clie assert output == snapshot( 'Severe floods and landslides across Veracruz, Hidalgo, and Puebla have cut off hundreds of communities and left dozens dead and many missing, prompting a major federal emergency response. ([apnews.com](https://apnews.com/article/5d036e18057361281e984b44402d3b1b?utm_source=openai))' ) + + +def test_temporal_run_context_preserves_run_id(): + ctx = RunContext( + deps=None, + model=TestModel(), + usage=RunUsage(), + run_id='run-123', + ) + + serialized = TemporalRunContext.serialize_run_context(ctx) + assert serialized['run_id'] == 'run-123' + + reconstructed = TemporalRunContext.deserialize_run_context(serialized, deps=None) + assert reconstructed.run_id == 'run-123' diff --git a/tests/test_tools.py b/tests/test_tools.py index ea26d8ac91..3b30056c3a 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -1365,13 +1365,15 @@ def my_tool(ctx: RunContext[None], x: int) -> int: content='Hello', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='my_tool', args={'x': 1}, tool_call_id='my_tool')], usage=RequestUsage(input_tokens=51, output_tokens=4), model_name='function:llm:', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1381,13 +1383,15 @@ def my_tool(ctx: RunContext[None], x: int) -> int: tool_call_id='my_tool', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='Done!')], usage=RequestUsage(input_tokens=52, output_tokens=5), model_name='function:llm:', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -1521,7 +1525,8 @@ def buy(fruit: str): content='What do an apple, a banana, a pear and a grape cost? Also buy me a pear.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1536,6 +1541,7 @@ def buy(fruit: str): usage=RequestUsage(input_tokens=68, output_tokens=35), model_name='function:llm:', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1573,7 +1579,8 @@ def buy(fruit: str): content='The price of pear is 10.0.', timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ] ) @@ -1611,7 +1618,8 @@ def buy(fruit: str): content='What do an apple, a banana, a pear and a grape cost? Also buy me a pear.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1626,6 +1634,7 @@ def buy(fruit: str): usage=RequestUsage(input_tokens=68, output_tokens=35), model_name='function:llm:', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1663,7 +1672,8 @@ def buy(fruit: str): content='The price of pear is 10.0.', timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1690,13 +1700,15 @@ def buy(fruit: str): content='I bought a banana', timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='Done!')], usage=RequestUsage(input_tokens=137, output_tokens=36), model_name='function:llm:', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -1728,13 +1740,15 @@ def buy(fruit: str): content='I bought a banana', timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='Done!')], usage=RequestUsage(input_tokens=137, output_tokens=36), model_name='function:llm:', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -1747,7 +1761,8 @@ def buy(fruit: str): content='What do an apple, a banana, a pear and a grape cost? Also buy me a pear.', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1762,6 +1777,7 @@ def buy(fruit: str): usage=RequestUsage(input_tokens=68, output_tokens=35), model_name='function:llm:', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1902,7 +1918,8 @@ def bar(x: int) -> int: content='foo', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1913,6 +1930,7 @@ def bar(x: int) -> int: usage=RequestUsage(input_tokens=51, output_tokens=12), model_name='function:llm:', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1922,7 +1940,8 @@ def bar(x: int) -> int: tool_call_id='bar', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ] ) @@ -1952,7 +1971,8 @@ def bar(x: int) -> int: content='foo', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ @@ -1963,6 +1983,7 @@ def bar(x: int) -> int: usage=RequestUsage(input_tokens=51, output_tokens=12), model_name='function:llm:', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1972,7 +1993,8 @@ def bar(x: int) -> int: tool_call_id='bar', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelRequest( parts=[ @@ -1988,13 +2010,15 @@ def bar(x: int) -> int: tool_call_id='foo2', timestamp=IsDatetime(), ), - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='Done!')], usage=RequestUsage(input_tokens=59, output_tokens=13), model_name='function:llm:', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) @@ -2133,13 +2157,15 @@ def always_fail(ctx: RunContext[None]) -> str: content='Always fail!', timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='always_fail', args={}, tool_call_id=IsStr())], usage=RequestUsage(input_tokens=52, output_tokens=2), model_name='test', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2149,13 +2175,15 @@ def always_fail(ctx: RunContext[None]) -> str: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='always_fail', args={}, tool_call_id=IsStr())], usage=RequestUsage(input_tokens=62, output_tokens=4), model_name='test', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2165,13 +2193,15 @@ def always_fail(ctx: RunContext[None]) -> str: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='always_fail', args={}, tool_call_id=IsStr())], usage=RequestUsage(input_tokens=72, output_tokens=6), model_name='test', timestamp=IsDatetime(), + run_id=IsStr(), ), ModelRequest( parts=[ @@ -2181,13 +2211,15 @@ def always_fail(ctx: RunContext[None]) -> str: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"always_fail":"I guess you never learn"}')], usage=RequestUsage(input_tokens=77, output_tokens=14), model_name='test', timestamp=IsDatetime(), + run_id=IsStr(), ), ] ) diff --git a/tests/test_usage_limits.py b/tests/test_usage_limits.py index d3c40ed375..ac17fd0be5 100644 --- a/tests/test_usage_limits.py +++ b/tests/test_usage_limits.py @@ -98,7 +98,10 @@ async def ret_a(x: str) -> str: assert not result.is_complete assert result.all_messages() == snapshot( [ - ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]), + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), ModelResponse( parts=[ ToolCallPart( @@ -111,6 +114,7 @@ async def ret_a(x: str) -> str: model_name='test', timestamp=IsNow(tz=timezone.utc), provider_name='test', + run_id=IsStr(), ), ModelRequest( parts=[ @@ -120,7 +124,8 @@ async def ret_a(x: str) -> str: timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), ) - ] + ], + run_id=IsStr(), ), ] )