diff --git a/python/mirascope/llm/providers/__init__.py b/python/mirascope/llm/providers/__init__.py index 726879ebad..fdc02ce67d 100644 --- a/python/mirascope/llm/providers/__init__.py +++ b/python/mirascope/llm/providers/__init__.py @@ -6,6 +6,7 @@ ) from .base import BaseProvider, Params, Provider from .google import GoogleModelId, GoogleProvider +from .mirascope import MirascopeProvider from .mlx import MLXModelId, MLXProvider from .model_id import ModelId from .ollama import OllamaProvider @@ -32,6 +33,7 @@ "GoogleProvider", "MLXModelId", "MLXProvider", + "MirascopeProvider", "ModelId", "OllamaProvider", "OpenAIModelId", diff --git a/python/mirascope/llm/providers/mirascope/__init__.py b/python/mirascope/llm/providers/mirascope/__init__.py new file mode 100644 index 0000000000..72a324063c --- /dev/null +++ b/python/mirascope/llm/providers/mirascope/__init__.py @@ -0,0 +1,5 @@ +"""Mirascope Router provider for routing LLM requests.""" + +from .provider import MirascopeProvider + +__all__ = ["MirascopeProvider"] diff --git a/python/mirascope/llm/providers/mirascope/_utils.py b/python/mirascope/llm/providers/mirascope/_utils.py new file mode 100644 index 0000000000..1012df9710 --- /dev/null +++ b/python/mirascope/llm/providers/mirascope/_utils.py @@ -0,0 +1,77 @@ +"""Utility functions for Mirascope Router provider.""" + +import os +from typing import cast + +from ..base import Provider +from ..provider_id import ProviderId + + +def extract_provider_prefix(model_id: str) -> str | None: + """Extract provider prefix from model ID. + + Args: + model_id: Model identifier in the format "provider/model-name" + e.g., "openai/gpt-4", "anthropic/claude-3", "google/gemini-pro" + + Returns: + The provider prefix (e.g., "openai", "anthropic", "google") or None if invalid format. + """ + if "/" not in model_id: + return None + return model_id.split("/", 1)[0] + + +def get_default_router_base_url() -> str: + """Get the default router base URL from environment or use default. + + Returns: + The router base URL (without trailing provider path). + """ + return os.environ.get( + "MIRASCOPE_ROUTER_BASE_URL", "https://mirascope.com/router/v0" + ) + + +def create_underlying_provider( + provider_prefix: str, api_key: str, router_base_url: str +) -> Provider: + """Create and cache an underlying provider instance using provider_singleton. + + This function constructs the appropriate router URL for the provider and + delegates to provider_singleton for caching and instantiation. + + Args: + provider_prefix: The provider name (e.g., "openai", "anthropic", "google", + "openai:completions", "openai:responses") + api_key: The API key to use for authentication + router_base_url: The base router URL (e.g., "http://mirascope.com/router/v0") + + Returns: + A cached provider instance configured for the Mirascope Router. + + Raises: + ValueError: If the provider is unsupported. + """ + # Extract base provider name (handles variants like "openai:completions") + base_provider = provider_prefix.split(":")[0] + + if base_provider not in ["anthropic", "google", "openai"]: + raise ValueError( + f"Unsupported provider: {provider_prefix}. " + f"Mirascope Router currently supports: anthropic, google, openai" + ) + + base_url = f"{router_base_url}/{base_provider}" + if base_provider == "openai": # OpenAI expects /v1, which their SDK doesn't add + base_url = f"{base_url}/v1" + + # Lazy import to avoid circular dependencies + from ..provider_registry import provider_singleton + + # Use provider_singleton which provides caching + return provider_singleton( + cast(ProviderId, provider_prefix), + api_key=api_key, + base_url=base_url, + ) diff --git a/python/mirascope/llm/providers/mirascope/provider.py b/python/mirascope/llm/providers/mirascope/provider.py new file mode 100644 index 0000000000..74e4983373 --- /dev/null +++ b/python/mirascope/llm/providers/mirascope/provider.py @@ -0,0 +1,318 @@ +"""Mirascope Router provider that routes requests through the Mirascope Router API.""" + +import os +from collections.abc import Sequence +from typing_extensions import Unpack + +from ...context import Context, DepsT +from ...formatting import Format, FormattableT +from ...messages import Message +from ...responses import ( + AsyncContextResponse, + AsyncContextStreamResponse, + AsyncResponse, + AsyncStreamResponse, + ContextResponse, + ContextStreamResponse, + Response, + StreamResponse, +) +from ...tools import ( + AsyncContextTool, + AsyncContextToolkit, + AsyncTool, + AsyncToolkit, + ContextTool, + ContextToolkit, + Tool, + Toolkit, +) +from ..base import BaseProvider, Params, Provider +from . import _utils + + +class MirascopeProvider(BaseProvider[None]): + """Provider that routes LLM requests through the Mirascope Router API. + + The Mirascope Router provides a unified API for multiple LLM providers + (Anthropic, Google, OpenAI) with usage tracking and cost calculation. + + This provider: + - Takes model IDs in the format "provider/model-name" (e.g., "openai/gpt-4") + - Routes requests to the Mirascope Router endpoint + - Delegates to the appropriate underlying provider (Anthropic, Google, or OpenAI) + - Uses MIRASCOPE_API_KEY for authentication + + Environment Variables: + MIRASCOPE_API_KEY: Required API key for Mirascope Router authentication + MIRASCOPE_ROUTER_BASE_URL: Optional base URL override (default: https://mirascope.com/router/v0) + + Example: + ```python + import os + from mirascope import llm + + os.environ["MIRASCOPE_API_KEY"] = "mk..." + + # Register the Mirascope provider + llm.register_provider( + "mirascope", + scope=["anthropic/", "google/", "openai/"], + ) + + # Use with llm.call decorator + @llm.call("openai/gpt-4") + def recommend_book(genre: str): + return f"Recommend a {genre} book" + + response = recommend_book("fantasy") + print(response.content) + ``` + """ + + id = "mirascope" + default_scope = ["anthropic/", "google/", "openai/"] + error_map = {} + """Empty error map since MirascopeProvider delegates to underlying providers. + + Error handling is performed by the underlying provider instances (Anthropic, + Google, OpenAI), which have their own error maps. Any exceptions that bubble + up from underlying providers are already converted to Mirascope exceptions. + """ + + def __init__( + self, *, api_key: str | None = None, base_url: str | None = None + ) -> None: + """Initialize the Mirascope provider. + + Args: + api_key: Mirascope API key. If not provided, reads from MIRASCOPE_API_KEY + environment variable. + base_url: Optional base URL override for the Mirascope Router. If not + provided, reads from MIRASCOPE_ROUTER_BASE_URL environment variable + or defaults to https://mirascope.com/router/v0 + """ + api_key = api_key or os.environ.get("MIRASCOPE_API_KEY") + if not api_key: + raise ValueError( + "Mirascope API key not found. " + "Set MIRASCOPE_API_KEY environment variable or pass api_key parameter." + ) + + self.api_key = api_key + self.router_base_url = base_url or _utils.get_default_router_base_url() + self.client = None # No single client; we create per-provider clients + + def get_error_status(self, e: Exception) -> int | None: + """Extract HTTP status code from exception. + + Since MirascopeProvider delegates to underlying providers, this method + is not used for direct error extraction. Underlying providers handle + their own status code extraction. + + Args: + e: The exception to extract status code from. + + Returns: + None, as status extraction is handled by underlying providers. + """ + return None + + def _get_underlying_provider(self, model_id: str) -> Provider: + """Get the underlying provider for a model ID. + + Args: + model_id: Model identifier in format "provider/model-name" + + Returns: + The appropriate cached provider instance (Anthropic, Google, or OpenAI) + + Raises: + ValueError: If the model ID format is invalid or provider is unsupported + """ + provider_prefix = _utils.extract_provider_prefix(model_id) + if not provider_prefix: + raise ValueError( + f"Invalid model ID format: {model_id}. " + f"Expected format 'provider/model-name' (e.g., 'openai/gpt-4')" + ) + + # Use the cached function to get/create the provider + return _utils.create_underlying_provider( + provider_prefix=provider_prefix, + api_key=self.api_key, + router_base_url=self.router_base_url, + ) + + def _call( + self, + *, + model_id: str, + messages: Sequence[Message], + tools: Sequence[Tool] | Toolkit | None = None, + format: type[FormattableT] | Format[FormattableT] | None = None, + **params: Unpack[Params], + ) -> Response | Response[FormattableT]: + """Generate an `llm.Response` by calling through the Mirascope Router.""" + provider = self._get_underlying_provider(model_id) + return provider.call( + model_id=model_id, + messages=messages, + tools=tools, + format=format, + **params, + ) + + def _context_call( + self, + *, + ctx: Context[DepsT], + model_id: str, + messages: Sequence[Message], + tools: Sequence[Tool | ContextTool[DepsT]] + | ContextToolkit[DepsT] + | None = None, + format: type[FormattableT] | Format[FormattableT] | None = None, + **params: Unpack[Params], + ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]: + """Generate an `llm.ContextResponse` by calling through the Mirascope Router.""" + provider = self._get_underlying_provider(model_id) + return provider.context_call( + ctx=ctx, + model_id=model_id, + messages=messages, + tools=tools, + format=format, + **params, + ) + + async def _call_async( + self, + *, + model_id: str, + messages: Sequence[Message], + tools: Sequence[AsyncTool] | AsyncToolkit | None = None, + format: type[FormattableT] | Format[FormattableT] | None = None, + **params: Unpack[Params], + ) -> AsyncResponse | AsyncResponse[FormattableT]: + """Generate an `llm.AsyncResponse` by calling through the Mirascope Router.""" + provider = self._get_underlying_provider(model_id) + return await provider.call_async( + model_id=model_id, + messages=messages, + tools=tools, + format=format, + **params, + ) + + async def _context_call_async( + self, + *, + ctx: Context[DepsT], + model_id: str, + messages: Sequence[Message], + tools: Sequence[AsyncTool | AsyncContextTool[DepsT]] + | AsyncContextToolkit[DepsT] + | None = None, + format: type[FormattableT] | Format[FormattableT] | None = None, + **params: Unpack[Params], + ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]: + """Generate an `llm.AsyncContextResponse` by calling through the Mirascope Router.""" + provider = self._get_underlying_provider(model_id) + return await provider.context_call_async( + ctx=ctx, + model_id=model_id, + messages=messages, + tools=tools, + format=format, + **params, + ) + + def _stream( + self, + *, + model_id: str, + messages: Sequence[Message], + tools: Sequence[Tool] | Toolkit | None = None, + format: type[FormattableT] | Format[FormattableT] | None = None, + **params: Unpack[Params], + ) -> StreamResponse | StreamResponse[FormattableT]: + """Stream an `llm.StreamResponse` by calling through the Mirascope Router.""" + provider = self._get_underlying_provider(model_id) + return provider.stream( + model_id=model_id, + messages=messages, + tools=tools, + format=format, + **params, + ) + + def _context_stream( + self, + *, + ctx: Context[DepsT], + model_id: str, + messages: Sequence[Message], + tools: Sequence[Tool | ContextTool[DepsT]] + | ContextToolkit[DepsT] + | None = None, + format: type[FormattableT] | Format[FormattableT] | None = None, + **params: Unpack[Params], + ) -> ( + ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT] + ): + """Stream an `llm.ContextStreamResponse` by calling through the Mirascope Router.""" + provider = self._get_underlying_provider(model_id) + return provider.context_stream( + ctx=ctx, + model_id=model_id, + messages=messages, + tools=tools, + format=format, + **params, + ) + + async def _stream_async( + self, + *, + model_id: str, + messages: Sequence[Message], + tools: Sequence[AsyncTool] | AsyncToolkit | None = None, + format: type[FormattableT] | Format[FormattableT] | None = None, + **params: Unpack[Params], + ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]: + """Stream an `llm.AsyncStreamResponse` by calling through the Mirascope Router.""" + provider = self._get_underlying_provider(model_id) + return await provider.stream_async( + model_id=model_id, + messages=messages, + tools=tools, + format=format, + **params, + ) + + async def _context_stream_async( + self, + *, + ctx: Context[DepsT], + model_id: str, + messages: Sequence[Message], + tools: Sequence[AsyncTool | AsyncContextTool[DepsT]] + | AsyncContextToolkit[DepsT] + | None = None, + format: type[FormattableT] | Format[FormattableT] | None = None, + **params: Unpack[Params], + ) -> ( + AsyncContextStreamResponse[DepsT, None] + | AsyncContextStreamResponse[DepsT, FormattableT] + ): + """Stream an `llm.AsyncContextStreamResponse` by calling through the Mirascope Router.""" + provider = self._get_underlying_provider(model_id) + return await provider.context_stream_async( + ctx=ctx, + model_id=model_id, + messages=messages, + tools=tools, + format=format, + **params, + ) diff --git a/python/mirascope/llm/providers/provider_id.py b/python/mirascope/llm/providers/provider_id.py index 2573ae621f..84f4f37e38 100644 --- a/python/mirascope/llm/providers/provider_id.py +++ b/python/mirascope/llm/providers/provider_id.py @@ -6,6 +6,7 @@ "anthropic", # Anthropic provider via AnthropicProvider "anthropic-beta", # Anthropic beta provider via AnthropicBetaProvider "google", # Google provider via GoogleProvider + "mirascope", # Mirascope Router provider via MirascopeProvider "mlx", # Local inference powered by `mlx-lm`, via MLXProvider "ollama", # Ollama provider via OllamaProvider "openai", # OpenAI provider via OpenAIProvider (prefers Responses routing when available) diff --git a/python/mirascope/llm/providers/provider_registry.py b/python/mirascope/llm/providers/provider_registry.py index 16033838d6..c698eaac72 100644 --- a/python/mirascope/llm/providers/provider_registry.py +++ b/python/mirascope/llm/providers/provider_registry.py @@ -7,6 +7,7 @@ from .anthropic import AnthropicProvider from .base import Provider from .google import GoogleProvider +from .mirascope import MirascopeProvider from .mlx import MLXProvider from .ollama import OllamaProvider from .openai import OpenAIProvider @@ -60,6 +61,8 @@ def provider_singleton( return AnthropicProvider(api_key=api_key, base_url=base_url) case "google": return GoogleProvider(api_key=api_key, base_url=base_url) + case "mirascope": + return MirascopeProvider(api_key=api_key, base_url=base_url) case "mlx": # pragma: no cover (MLX is only available on macOS) return MLXProvider() case "ollama": diff --git a/python/tests/api/test_client.py b/python/tests/api/test_client.py index 94137cba48..1b6af89278 100644 --- a/python/tests/api/test_client.py +++ b/python/tests/api/test_client.py @@ -2,6 +2,7 @@ from __future__ import annotations +import os from collections.abc import Generator import httpx @@ -26,6 +27,17 @@ def reset_client_caches() -> Generator[None, None, None]: close_cached_clients() +@pytest.fixture(autouse=True) +def clear_mirascope_api_key_from_environment() -> Generator[None, None, None]: + """Ensure that the environment does not have a MIRASCOPE_API_KEY so tests pass.""" + if "MIRASCOPE_API_KEY" in os.environ: + api_key = os.environ.pop("MIRASCOPE_API_KEY") + yield + os.environ["MIRASCOPE_API_KEY"] = api_key + else: + yield + + def test_sync_client_requires_api_key() -> None: """Test that synchronous client requires an API key when instantiating.""" with settings(api_key=None), pytest.raises(RuntimeError): diff --git a/python/tests/conftest.py b/python/tests/conftest.py index 090a5b4475..604cee02f3 100644 --- a/python/tests/conftest.py +++ b/python/tests/conftest.py @@ -22,6 +22,7 @@ def load_api_keys() -> None: # Set dummy keys if not present so that tests pass in CI. os.environ.setdefault("ANTHROPIC_API_KEY", "dummy-anthropic-key") os.environ.setdefault("GOOGLE_API_KEY", "dummy-google-key") + os.environ.setdefault("MIRASCOPE_API_KEY", "dummy-mirascope-key") os.environ.setdefault("OPENAI_API_KEY", "dummy-openai-key") os.environ.setdefault("TOGETHER_API_KEY", "dummy-together-key") diff --git a/python/tests/e2e/input/cassettes/test_mirascope_provider/anthropic_claude_haiku_4_5.yaml b/python/tests/e2e/input/cassettes/test_mirascope_provider/anthropic_claude_haiku_4_5.yaml new file mode 100644 index 0000000000..13dff9c596 --- /dev/null +++ b/python/tests/e2e/input/cassettes/test_mirascope_provider/anthropic_claude_haiku_4_5.yaml @@ -0,0 +1,106 @@ +interactions: +- request: + body: '{"max_tokens":16000,"messages":[{"role":"user","content":"What is 4200 + + 42?"}],"model":"claude-haiku-4-5"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '107' + content-type: + - application/json + host: + - localhost:3000 + user-agent: + - Anthropic/Python 0.75.0 + x-api-key: + - + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 0.75.0 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.10.16 + x-stainless-timeout: + - '600' + method: POST + uri: http://localhost:3000/router/v0/anthropic/v1/messages + response: + body: + string: '{"model":"claude-haiku-4-5-20251001","id":"msg_01RysMuqFTMTVDxjjk63Ahst","type":"message","role":"assistant","content":[{"type":"text","text":"4200 + + 42 = 4242"}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":17,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":15,"service_tier":"standard"}}' + headers: + Connection: + - keep-alive + Keep-Alive: + - timeout=5 + Vary: + - Origin + anthropic-organization-id: + - 217a607f-ed5e-40af-8a7d-f83ed52d59d6 + anthropic-ratelimit-input-tokens-limit: + - '4000000' + anthropic-ratelimit-input-tokens-remaining: + - '4000000' + anthropic-ratelimit-input-tokens-reset: + - '2026-01-06T07:11:49Z' + anthropic-ratelimit-output-tokens-limit: + - '800000' + anthropic-ratelimit-output-tokens-remaining: + - '800000' + anthropic-ratelimit-output-tokens-reset: + - '2026-01-06T07:11:49Z' + anthropic-ratelimit-requests-limit: + - '4000' + anthropic-ratelimit-requests-remaining: + - '3999' + anthropic-ratelimit-requests-reset: + - '2026-01-06T07:11:49Z' + anthropic-ratelimit-tokens-limit: + - '4800000' + anthropic-ratelimit-tokens-remaining: + - '4800000' + anthropic-ratelimit-tokens-reset: + - '2026-01-06T07:11:49Z' + cf-cache-status: + - DYNAMIC + cf-ray: + - 9b99674aaf592f75-LAX + content-type: + - application/json + date: + - Tue, 06 Jan 2026 07:11:49 GMT + request-id: + - req_011CWqi2ar6T8frrWYB2Txd6 + server: + - cloudflare + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-envoy-upstream-service-time: + - '814' + x-robots-tag: + - none + status: + code: 200 + message: OK +version: 1 diff --git a/python/tests/e2e/input/cassettes/test_mirascope_provider/google_gemini_2_5_flash.yaml b/python/tests/e2e/input/cassettes/test_mirascope_provider/google_gemini_2_5_flash.yaml new file mode 100644 index 0000000000..75068dba3f --- /dev/null +++ b/python/tests/e2e/input/cassettes/test_mirascope_provider/google_gemini_2_5_flash.yaml @@ -0,0 +1,66 @@ +interactions: +- request: + body: '{"contents": [{"parts": [{"text": "What is 4200 + 42?"}], "role": "user"}]}' + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + authorization: + - + connection: + - keep-alive + content-length: + - '75' + content-type: + - application/json + host: + - localhost:3000 + user-agent: + - google-genai-sdk/1.48.0 gl-python/3.10.16 + x-goog-api-client: + - google-genai-sdk/1.48.0 gl-python/3.10.16 + x-goog-api-key: + - + method: POST + uri: http://localhost:3000/router/v0/google/v1beta/models/gemini-2.5-flash:generateContent + response: + body: + string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": + [\n {\n \"text\": \"To find the sum of 4200 and 42, you + can add them:\\n\\n4200\\n+ 42\\n-----\\n4242\\n\\nSo, 4200 + 42 = **4242**.\"\n + \ }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": + \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": + 13,\n \"candidatesTokenCount\": 56,\n \"totalTokenCount\": 107,\n \"promptTokensDetails\": + [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 13\n + \ }\n ],\n \"thoughtsTokenCount\": 38\n },\n \"modelVersion\": + \"gemini-2.5-flash\",\n \"responseId\": \"trVcae7UNO-hz7IP8PSGqQ8\"\n}\n" + headers: + Connection: + - keep-alive + Keep-Alive: + - timeout=5 + alt-svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + content-type: + - application/json; charset=UTF-8 + date: + - Tue, 06 Jan 2026 07:11:50 GMT + server: + - scaffolding on HTTPServer2 + server-timing: + - gfet4t7; dur=889 + transfer-encoding: + - chunked + vary: + - Origin, X-Origin, Referer + x-content-type-options: + - nosniff + x-frame-options: + - SAMEORIGIN + x-xss-protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/python/tests/e2e/input/cassettes/test_mirascope_provider/openai_gpt_5_mini.yaml b/python/tests/e2e/input/cassettes/test_mirascope_provider/openai_gpt_5_mini.yaml new file mode 100644 index 0000000000..a7554b6eaf --- /dev/null +++ b/python/tests/e2e/input/cassettes/test_mirascope_provider/openai_gpt_5_mini.yaml @@ -0,0 +1,124 @@ +interactions: +- request: + body: '{"input":[{"content":"What is 4200 + 42?","role":"user"}],"model":"gpt-5-mini"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - + connection: + - keep-alive + content-length: + - '79' + content-type: + - application/json + host: + - localhost:3000 + user-agent: + - OpenAI/Python 2.7.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 2.7.1 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.10.16 + method: POST + uri: http://localhost:3000/router/v0/openai/v1/responses + response: + body: + string: "{\n \"id\": \"resp_0682958ac604b79f00695cb5aecaf8819795863248566ec2bd\",\n + \ \"object\": \"response\",\n \"created_at\": 1767683502,\n \"status\": + \"completed\",\n \"background\": false,\n \"billing\": {\n \"payer\": + \"developer\"\n },\n \"completed_at\": 1767683504,\n \"error\": null,\n + \ \"incomplete_details\": null,\n \"instructions\": null,\n \"max_output_tokens\": + null,\n \"max_tool_calls\": null,\n \"model\": \"gpt-5-mini-2025-08-07\",\n + \ \"output\": [\n {\n \"id\": \"rs_0682958ac604b79f00695cb5af7f0c8197be05619c7bfb342a\",\n + \ \"type\": \"reasoning\",\n \"summary\": []\n },\n {\n \"id\": + \"msg_0682958ac604b79f00695cb5afeab88197876619489f88d7ce\",\n \"type\": + \"message\",\n \"status\": \"completed\",\n \"content\": [\n {\n + \ \"type\": \"output_text\",\n \"annotations\": [],\n \"logprobs\": + [],\n \"text\": \"4242\"\n }\n ],\n \"role\": \"assistant\"\n + \ }\n ],\n \"parallel_tool_calls\": true,\n \"previous_response_id\": + null,\n \"prompt_cache_key\": null,\n \"prompt_cache_retention\": null,\n + \ \"reasoning\": {\n \"effort\": \"medium\",\n \"summary\": null\n },\n + \ \"safety_identifier\": null,\n \"service_tier\": \"default\",\n \"store\": + true,\n \"temperature\": 1.0,\n \"text\": {\n \"format\": {\n \"type\": + \"text\"\n },\n \"verbosity\": \"medium\"\n },\n \"tool_choice\": + \"auto\",\n \"tools\": [],\n \"top_logprobs\": 0,\n \"top_p\": 1.0,\n \"truncation\": + \"disabled\",\n \"usage\": {\n \"input_tokens\": 15,\n \"input_tokens_details\": + {\n \"cached_tokens\": 0\n },\n \"output_tokens\": 8,\n \"output_tokens_details\": + {\n \"reasoning_tokens\": 0\n },\n \"total_tokens\": 23\n },\n + \ \"user\": null,\n \"metadata\": {}\n}" + headers: + Connection: + - keep-alive + Keep-Alive: + - timeout=5 + Vary: + - Origin + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + cf-ray: + - 9b99672008926b8d-LAX + content-type: + - application/json + date: + - Tue, 06 Jan 2026 07:11:44 GMT + openai-organization: + - sotai-i3ryiz + openai-processing-ms: + - '1550' + openai-project: + - proj_2kPLXdwNOjkHt3ifb0aZ4FwU + openai-version: + - '2020-10-01' + server: + - cloudflare + set-cookie: + - __cf_bm=3gT_NafgHr.v.PlgnZRYPpEc80LSgBl0Rv.7C9TF.4I-1767683504-1.0.1.1-y7vkMjQiO.p_Q7ktrFDLbc3gF7ml2Te_9Ae7hnHWChcf2oyZb2vCGQC2qoqSZW8saVPkIxLjW5SVlXku69xEjbcQx6iPY3aOnAdGxjWt1qw; + path=/; expires=Tue, 06-Jan-26 07:41:44 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=xnb5Z8kcKqkwnxBsm0LoqvizqK15iRjLHSd1q2Zr7lI-1767683504354-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '1554' + x-ratelimit-limit-requests: + - '5000' + x-ratelimit-limit-tokens: + - '4000000' + x-ratelimit-remaining-requests: + - '4999' + x-ratelimit-remaining-tokens: + - '4000000' + x-ratelimit-reset-requests: + - 12ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_7105337fc6514ddd8c1c4051bb3bf98b + status: + code: 200 + message: OK +version: 1 diff --git a/python/tests/e2e/input/cassettes/test_mirascope_provider/openai_gpt_5_mini_completions.yaml b/python/tests/e2e/input/cassettes/test_mirascope_provider/openai_gpt_5_mini_completions.yaml new file mode 100644 index 0000000000..f813bd80a7 --- /dev/null +++ b/python/tests/e2e/input/cassettes/test_mirascope_provider/openai_gpt_5_mini_completions.yaml @@ -0,0 +1,116 @@ +interactions: +- request: + body: '{"messages":[{"role":"user","content":"What is 4200 + 42?"}],"model":"gpt-5-mini"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - + connection: + - keep-alive + content-length: + - '82' + content-type: + - application/json + host: + - localhost:3000 + user-agent: + - OpenAI/Python 2.7.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 2.7.1 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.10.16 + method: POST + uri: http://localhost:3000/router/v0/openai/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-CuvWD952J4PtKXUAH54iXxVG4AiGd\",\n \"object\": + \"chat.completion\",\n \"created\": 1767683505,\n \"model\": \"gpt-5-mini-2025-08-07\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"4242\",\n \"refusal\": null,\n + \ \"annotations\": []\n },\n \"finish_reason\": \"stop\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 15,\n \"completion_tokens\": + 11,\n \"total_tokens\": 26,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": null\n}\n" + headers: + Connection: + - keep-alive + Keep-Alive: + - timeout=5 + Vary: + - Origin + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + cf-ray: + - 9b996730a9b96b8d-LAX + content-type: + - application/json + date: + - Tue, 06 Jan 2026 07:11:47 GMT + openai-organization: + - sotai-i3ryiz + openai-processing-ms: + - '1516' + openai-project: + - proj_2kPLXdwNOjkHt3ifb0aZ4FwU + openai-version: + - '2020-10-01' + server: + - cloudflare + set-cookie: + - __cf_bm=aZW9PckvES.HS1DQSmWy7RJlJg5iPOmAG6INWL6WFW4-1767683507-1.0.1.1-eP98mDp9vPsOmjF62V9y3Ebrl8YwSpEOTxAtLlIWE_leyjO7L3ViOGEvNmrPp1GzYJZJTWPGF407YfXNWw7rJR_Rs4Q046GK0C8Rsd7ETmI; + path=/; expires=Tue, 06-Jan-26 07:41:47 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=89W6zMw4DvNpn8med4zV9kzW60eyFiJMu40vaJFyNMI-1767683507066-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '1746' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '5000' + x-ratelimit-limit-tokens: + - '4000000' + x-ratelimit-remaining-requests: + - '4999' + x-ratelimit-remaining-tokens: + - '3999993' + x-ratelimit-reset-requests: + - 12ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_687497ab4b8c4467ba4f8d8e249e2ef2 + status: + code: 200 + message: OK +version: 1 diff --git a/python/tests/e2e/input/cassettes/test_mirascope_provider/openai_gpt_5_mini_responses.yaml b/python/tests/e2e/input/cassettes/test_mirascope_provider/openai_gpt_5_mini_responses.yaml new file mode 100644 index 0000000000..d558bbfbdc --- /dev/null +++ b/python/tests/e2e/input/cassettes/test_mirascope_provider/openai_gpt_5_mini_responses.yaml @@ -0,0 +1,124 @@ +interactions: +- request: + body: '{"input":[{"content":"What is 4200 + 42?","role":"user"}],"model":"gpt-5-mini"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - + connection: + - keep-alive + content-length: + - '79' + content-type: + - application/json + host: + - localhost:3000 + user-agent: + - OpenAI/Python 2.7.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 2.7.1 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.10.16 + method: POST + uri: http://localhost:3000/router/v0/openai/v1/responses + response: + body: + string: "{\n \"id\": \"resp_0024f5b14c683a8c00695cb5b36d088197ad2fcd00269413e9\",\n + \ \"object\": \"response\",\n \"created_at\": 1767683507,\n \"status\": + \"completed\",\n \"background\": false,\n \"billing\": {\n \"payer\": + \"developer\"\n },\n \"completed_at\": 1767683508,\n \"error\": null,\n + \ \"incomplete_details\": null,\n \"instructions\": null,\n \"max_output_tokens\": + null,\n \"max_tool_calls\": null,\n \"model\": \"gpt-5-mini-2025-08-07\",\n + \ \"output\": [\n {\n \"id\": \"rs_0024f5b14c683a8c00695cb5b3c13481979337a73e4da67821\",\n + \ \"type\": \"reasoning\",\n \"summary\": []\n },\n {\n \"id\": + \"msg_0024f5b14c683a8c00695cb5b4356c8197a76a5b6db4380e03\",\n \"type\": + \"message\",\n \"status\": \"completed\",\n \"content\": [\n {\n + \ \"type\": \"output_text\",\n \"annotations\": [],\n \"logprobs\": + [],\n \"text\": \"4242\"\n }\n ],\n \"role\": \"assistant\"\n + \ }\n ],\n \"parallel_tool_calls\": true,\n \"previous_response_id\": + null,\n \"prompt_cache_key\": null,\n \"prompt_cache_retention\": null,\n + \ \"reasoning\": {\n \"effort\": \"medium\",\n \"summary\": null\n },\n + \ \"safety_identifier\": null,\n \"service_tier\": \"default\",\n \"store\": + true,\n \"temperature\": 1.0,\n \"text\": {\n \"format\": {\n \"type\": + \"text\"\n },\n \"verbosity\": \"medium\"\n },\n \"tool_choice\": + \"auto\",\n \"tools\": [],\n \"top_logprobs\": 0,\n \"top_p\": 1.0,\n \"truncation\": + \"disabled\",\n \"usage\": {\n \"input_tokens\": 15,\n \"input_tokens_details\": + {\n \"cached_tokens\": 0\n },\n \"output_tokens\": 8,\n \"output_tokens_details\": + {\n \"reasoning_tokens\": 0\n },\n \"total_tokens\": 23\n },\n + \ \"user\": null,\n \"metadata\": {}\n}" + headers: + Connection: + - keep-alive + Keep-Alive: + - timeout=5 + Vary: + - Origin + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + cf-ray: + - 9b99674129b86b8d-LAX + content-type: + - application/json + date: + - Tue, 06 Jan 2026 07:11:48 GMT + openai-organization: + - sotai-i3ryiz + openai-processing-ms: + - '1227' + openai-project: + - proj_2kPLXdwNOjkHt3ifb0aZ4FwU + openai-version: + - '2020-10-01' + server: + - cloudflare + set-cookie: + - __cf_bm=p8Sv48ZjY6cfR.PgkMZkLXde9X2fyCgHaWJG66p5jdA-1767683508-1.0.1.1-DE4bMjFBMDOlXyE.akVCFoOLsNeqeSyXUG7nmalzHbHREW5k_vJV6EB9HzBdI2DoTc2XNJqJcQeJqCKyKzl9.q1gjzshA4EbI9ILW0ivpL4; + path=/; expires=Tue, 06-Jan-26 07:41:48 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=S8gtOHDb.m9bv8EoGFFVG9z61Fho20nHY8MkeK6JUmQ-1767683508670-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '1229' + x-ratelimit-limit-requests: + - '5000' + x-ratelimit-limit-tokens: + - '4000000' + x-ratelimit-remaining-requests: + - '4999' + x-ratelimit-remaining-tokens: + - '4000000' + x-ratelimit-reset-requests: + - 12ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_4349904d2d7448b0955cd7206d259549 + status: + code: 200 + message: OK +version: 1 diff --git a/python/tests/e2e/input/cassettes/test_mirascope_provider_streaming/anthropic_claude_haiku_4_5.yaml b/python/tests/e2e/input/cassettes/test_mirascope_provider_streaming/anthropic_claude_haiku_4_5.yaml new file mode 100644 index 0000000000..13a80a000e --- /dev/null +++ b/python/tests/e2e/input/cassettes/test_mirascope_provider_streaming/anthropic_claude_haiku_4_5.yaml @@ -0,0 +1,168 @@ +interactions: +- request: + body: '{"max_tokens":16000,"messages":[{"role":"user","content":"What is 4200 + + 42?"}],"model":"claude-haiku-4-5","stream":true}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '121' + content-type: + - application/json + host: + - localhost:3000 + user-agent: + - Anthropic/Python 0.75.0 + x-api-key: + - + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-helper-method: + - stream + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 0.75.0 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.10.16 + x-stainless-stream-helper: + - messages + x-stainless-timeout: + - NOT_GIVEN + method: POST + uri: http://localhost:3000/router/v0/anthropic/v1/messages + response: + body: + string: 'event: message_start + + data: {"type":"message_start","message":{"model":"claude-haiku-4-5-20251001","id":"msg_01Myi6KALiBz18mUR4jmTdja","type":"message","role":"assistant","content":[],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":17,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":1,"service_tier":"standard"}} } + + + event: content_block_start + + data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"4200 + + 42 ="} } + + + event: ping + + data: {"type": "ping"} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + 4242"} } + + + event: ping + + data: {"type": "ping"} + + + event: content_block_stop + + data: {"type":"content_block_stop","index":0 } + + + event: ping + + data: {"type": "ping"} + + + event: ping + + data: {"type": "ping"} + + + event: message_delta + + data: {"type":"message_delta","delta":{"stop_reason":"end_turn","stop_sequence":null},"usage":{"input_tokens":17,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"output_tokens":15} } + + + event: message_stop + + data: {"type":"message_stop" } + + + ' + headers: + Connection: + - keep-alive + Keep-Alive: + - timeout=5 + Vary: + - Origin + anthropic-organization-id: + - 217a607f-ed5e-40af-8a7d-f83ed52d59d6 + anthropic-ratelimit-input-tokens-limit: + - '4000000' + anthropic-ratelimit-input-tokens-remaining: + - '4000000' + anthropic-ratelimit-input-tokens-reset: + - '2026-01-06T07:11:55Z' + anthropic-ratelimit-output-tokens-limit: + - '800000' + anthropic-ratelimit-output-tokens-remaining: + - '800000' + anthropic-ratelimit-output-tokens-reset: + - '2026-01-06T07:11:55Z' + anthropic-ratelimit-requests-limit: + - '4000' + anthropic-ratelimit-requests-remaining: + - '3999' + anthropic-ratelimit-requests-reset: + - '2026-01-06T07:11:55Z' + anthropic-ratelimit-tokens-limit: + - '4800000' + anthropic-ratelimit-tokens-remaining: + - '4800000' + anthropic-ratelimit-tokens-reset: + - '2026-01-06T07:11:55Z' + cache-control: + - no-cache + cf-cache-status: + - DYNAMIC + cf-ray: + - 9b9967740a6dc8c1-LAX + content-type: + - text/event-stream; charset=utf-8 + date: + - Tue, 06 Jan 2026 07:11:55 GMT + request-id: + - req_011CWqi35AvdifMRTKD5Mnov + server: + - cloudflare + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-envoy-upstream-service-time: + - '363' + x-robots-tag: + - none + status: + code: 200 + message: OK +version: 1 diff --git a/python/tests/e2e/input/cassettes/test_mirascope_provider_streaming/google_gemini_2_5_flash.yaml b/python/tests/e2e/input/cassettes/test_mirascope_provider_streaming/google_gemini_2_5_flash.yaml new file mode 100644 index 0000000000..b38d756f27 --- /dev/null +++ b/python/tests/e2e/input/cassettes/test_mirascope_provider_streaming/google_gemini_2_5_flash.yaml @@ -0,0 +1,70 @@ +interactions: +- request: + body: '{"contents": [{"parts": [{"text": "What is 4200 + 42?"}], "role": "user"}]}' + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + authorization: + - + connection: + - keep-alive + content-length: + - '75' + content-type: + - application/json + host: + - localhost:3000 + user-agent: + - google-genai-sdk/1.48.0 gl-python/3.10.16 + x-goog-api-client: + - google-genai-sdk/1.48.0 gl-python/3.10.16 + x-goog-api-key: + - + method: POST + uri: http://localhost:3000/router/v0/google/v1beta/models/gemini-2.5-flash:streamGenerateContent?alt=sse + response: + body: + string: "data: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"To + find the sum of 4200 + 42, you can add them together:\\n\\n4200\\n+ 42\\n-----\\n4242\\n\\nSo, + 4200 + 4\"}],\"role\": \"model\"},\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": + 13,\"candidatesTokenCount\": 49,\"totalTokenCount\": 85,\"promptTokensDetails\": + [{\"modality\": \"TEXT\",\"tokenCount\": 13}],\"thoughtsTokenCount\": 23},\"modelVersion\": + \"gemini-2.5-flash\",\"responseId\": \"vbVcaeCuFejSz7IPqbnIkAo\"}\r\n\r\ndata: + {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"2 = **4242**.\"}],\"role\": + \"model\"},\"finishReason\": \"STOP\",\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": + 13,\"candidatesTokenCount\": 57,\"totalTokenCount\": 93,\"promptTokensDetails\": + [{\"modality\": \"TEXT\",\"tokenCount\": 13}],\"thoughtsTokenCount\": 23},\"modelVersion\": + \"gemini-2.5-flash\",\"responseId\": \"vbVcaeCuFejSz7IPqbnIkAo\"}\r\n\r\n" + headers: + Connection: + - keep-alive + Keep-Alive: + - timeout=5 + alt-svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + content-disposition: + - attachment + content-type: + - text/event-stream + date: + - Tue, 06 Jan 2026 07:11:57 GMT + server: + - scaffolding on HTTPServer2 + server-timing: + - gfet4t7; dur=1099 + transfer-encoding: + - chunked + vary: + - Origin, X-Origin, Referer + x-content-type-options: + - nosniff + x-frame-options: + - SAMEORIGIN + x-xss-protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/python/tests/e2e/input/cassettes/test_mirascope_provider_streaming/openai_gpt_5_mini.yaml b/python/tests/e2e/input/cassettes/test_mirascope_provider_streaming/openai_gpt_5_mini.yaml new file mode 100644 index 0000000000..867f540fd7 --- /dev/null +++ b/python/tests/e2e/input/cassettes/test_mirascope_provider_streaming/openai_gpt_5_mini.yaml @@ -0,0 +1,150 @@ +interactions: +- request: + body: '{"input":[{"content":"What is 4200 + 42?","role":"user"}],"model":"gpt-5-mini","stream":true}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - + connection: + - keep-alive + content-length: + - '93' + content-type: + - application/json + host: + - localhost:3000 + user-agent: + - OpenAI/Python 2.7.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 2.7.1 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.10.16 + method: POST + uri: http://localhost:3000/router/v0/openai/v1/responses + response: + body: + string: 'event: response.created + + data: {"type":"response.created","sequence_number":0,"response":{"id":"resp_0149c5fe3ad7762400695cb5b726a8819699e12076f6b75243","object":"response","created_at":1767683511,"status":"in_progress","background":false,"completed_at":null,"error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"max_tool_calls":null,"model":"gpt-5-mini-2025-08-07","output":[],"parallel_tool_calls":true,"previous_response_id":null,"prompt_cache_key":null,"prompt_cache_retention":null,"reasoning":{"effort":"medium","summary":null},"safety_identifier":null,"service_tier":"auto","store":true,"temperature":1.0,"text":{"format":{"type":"text"},"verbosity":"medium"},"tool_choice":"auto","tools":[],"top_logprobs":0,"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}} + + + event: response.in_progress + + data: {"type":"response.in_progress","sequence_number":1,"response":{"id":"resp_0149c5fe3ad7762400695cb5b726a8819699e12076f6b75243","object":"response","created_at":1767683511,"status":"in_progress","background":false,"completed_at":null,"error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"max_tool_calls":null,"model":"gpt-5-mini-2025-08-07","output":[],"parallel_tool_calls":true,"previous_response_id":null,"prompt_cache_key":null,"prompt_cache_retention":null,"reasoning":{"effort":"medium","summary":null},"safety_identifier":null,"service_tier":"auto","store":true,"temperature":1.0,"text":{"format":{"type":"text"},"verbosity":"medium"},"tool_choice":"auto","tools":[],"top_logprobs":0,"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}} + + + event: response.output_item.added + + data: {"type":"response.output_item.added","sequence_number":2,"output_index":0,"item":{"id":"rs_0149c5fe3ad7762400695cb5b77d808196876083bbb23e8596","type":"reasoning","summary":[]}} + + + event: response.output_item.done + + data: {"type":"response.output_item.done","sequence_number":3,"output_index":0,"item":{"id":"rs_0149c5fe3ad7762400695cb5b77d808196876083bbb23e8596","type":"reasoning","summary":[]}} + + + event: response.output_item.added + + data: {"type":"response.output_item.added","sequence_number":4,"output_index":1,"item":{"id":"msg_0149c5fe3ad7762400695cb5b7dfd88196bf4e6587d59c4958","type":"message","status":"in_progress","content":[],"role":"assistant"}} + + + event: response.content_part.added + + data: {"type":"response.content_part.added","sequence_number":5,"item_id":"msg_0149c5fe3ad7762400695cb5b7dfd88196bf4e6587d59c4958","output_index":1,"content_index":0,"part":{"type":"output_text","annotations":[],"logprobs":[],"text":""}} + + + event: response.output_text.delta + + data: {"type":"response.output_text.delta","sequence_number":6,"item_id":"msg_0149c5fe3ad7762400695cb5b7dfd88196bf4e6587d59c4958","output_index":1,"content_index":0,"delta":"424","logprobs":[],"obfuscation":"Fm7p3pIPFiVk7"} + + + event: response.output_text.delta + + data: {"type":"response.output_text.delta","sequence_number":7,"item_id":"msg_0149c5fe3ad7762400695cb5b7dfd88196bf4e6587d59c4958","output_index":1,"content_index":0,"delta":"2","logprobs":[],"obfuscation":"xwTb1UqkfJKfg3Q"} + + + event: response.output_text.done + + data: {"type":"response.output_text.done","sequence_number":8,"item_id":"msg_0149c5fe3ad7762400695cb5b7dfd88196bf4e6587d59c4958","output_index":1,"content_index":0,"text":"4242","logprobs":[]} + + + event: response.content_part.done + + data: {"type":"response.content_part.done","sequence_number":9,"item_id":"msg_0149c5fe3ad7762400695cb5b7dfd88196bf4e6587d59c4958","output_index":1,"content_index":0,"part":{"type":"output_text","annotations":[],"logprobs":[],"text":"4242"}} + + + event: response.output_item.done + + data: {"type":"response.output_item.done","sequence_number":10,"output_index":1,"item":{"id":"msg_0149c5fe3ad7762400695cb5b7dfd88196bf4e6587d59c4958","type":"message","status":"completed","content":[{"type":"output_text","annotations":[],"logprobs":[],"text":"4242"}],"role":"assistant"}} + + + event: response.completed + + data: {"type":"response.completed","sequence_number":11,"response":{"id":"resp_0149c5fe3ad7762400695cb5b726a8819699e12076f6b75243","object":"response","created_at":1767683511,"status":"completed","background":false,"completed_at":1767683511,"error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"max_tool_calls":null,"model":"gpt-5-mini-2025-08-07","output":[{"id":"rs_0149c5fe3ad7762400695cb5b77d808196876083bbb23e8596","type":"reasoning","summary":[]},{"id":"msg_0149c5fe3ad7762400695cb5b7dfd88196bf4e6587d59c4958","type":"message","status":"completed","content":[{"type":"output_text","annotations":[],"logprobs":[],"text":"4242"}],"role":"assistant"}],"parallel_tool_calls":true,"previous_response_id":null,"prompt_cache_key":null,"prompt_cache_retention":null,"reasoning":{"effort":"medium","summary":null},"safety_identifier":null,"service_tier":"default","store":true,"temperature":1.0,"text":{"format":{"type":"text"},"verbosity":"medium"},"tool_choice":"auto","tools":[],"top_logprobs":0,"top_p":1.0,"truncation":"disabled","usage":{"input_tokens":15,"input_tokens_details":{"cached_tokens":0},"output_tokens":8,"output_tokens_details":{"reasoning_tokens":0},"total_tokens":23},"user":null,"metadata":{}}} + + + ' + headers: + Connection: + - keep-alive + Keep-Alive: + - timeout=5 + Vary: + - Origin + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + cf-ray: + - 9b9967584d9e6b8d-LAX + content-type: + - text/event-stream; charset=utf-8 + date: + - Tue, 06 Jan 2026 07:11:51 GMT + openai-organization: + - sotai-i3ryiz + openai-processing-ms: + - '73' + openai-project: + - proj_2kPLXdwNOjkHt3ifb0aZ4FwU + openai-version: + - '2020-10-01' + server: + - cloudflare + set-cookie: + - __cf_bm=BNFWksX3MWtpFaFKDCQFIFIt_MRNCxXLEYYhtjhycZM-1767683511-1.0.1.1-PcFQYbl8jcJgGnMxN3yka3f9JyngaU5nncD360RGFGE2IIu1Haah2WTZk.2Fv9T_peTyHxssHkJ4LE8TjX0mmrRoushJiacY344FAEagggY; + path=/; expires=Tue, 06-Jan-26 07:41:51 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=b6i_KLCcyZM4Y7hY8HXTrRy6VXui8sKzmVEmZKugVEo-1767683511241-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '77' + x-request-id: + - req_2d11cb3210984a0094c09b1d097ef58a + status: + code: 200 + message: OK +version: 1 diff --git a/python/tests/e2e/input/cassettes/test_mirascope_provider_streaming/openai_gpt_5_mini_completions.yaml b/python/tests/e2e/input/cassettes/test_mirascope_provider_streaming/openai_gpt_5_mini_completions.yaml new file mode 100644 index 0000000000..c8e0db4f1b --- /dev/null +++ b/python/tests/e2e/input/cassettes/test_mirascope_provider_streaming/openai_gpt_5_mini_completions.yaml @@ -0,0 +1,124 @@ +interactions: +- request: + body: '{"messages":[{"role":"user","content":"What is 4200 + 42?"}],"model":"gpt-5-mini","stream":true,"stream_options":{"include_usage":true}}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - + connection: + - keep-alive + content-length: + - '136' + content-type: + - application/json + host: + - localhost:3000 + user-agent: + - OpenAI/Python 2.7.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 2.7.1 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.10.16 + method: POST + uri: http://localhost:3000/router/v0/openai/v1/chat/completions + response: + body: + string: 'data: {"id":"chatcmpl-CuvWLOqHN8DnuHRkCADkE1cyZOB16","object":"chat.completion.chunk","created":1767683513,"model":"gpt-5-mini-2025-08-07","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"finish_reason":null}],"usage":null,"obfuscation":"QPBHO"} + + + data: {"id":"chatcmpl-CuvWLOqHN8DnuHRkCADkE1cyZOB16","object":"chat.completion.chunk","created":1767683513,"model":"gpt-5-mini-2025-08-07","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"424"},"finish_reason":null}],"usage":null,"obfuscation":"sPRe"} + + + data: {"id":"chatcmpl-CuvWLOqHN8DnuHRkCADkE1cyZOB16","object":"chat.completion.chunk","created":1767683513,"model":"gpt-5-mini-2025-08-07","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"2"},"finish_reason":null}],"usage":null,"obfuscation":"TzznLc"} + + + data: {"id":"chatcmpl-CuvWLOqHN8DnuHRkCADkE1cyZOB16","object":"chat.completion.chunk","created":1767683513,"model":"gpt-5-mini-2025-08-07","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{},"finish_reason":"stop"}],"usage":null,"obfuscation":"9"} + + + data: {"id":"chatcmpl-CuvWLOqHN8DnuHRkCADkE1cyZOB16","object":"chat.completion.chunk","created":1767683513,"model":"gpt-5-mini-2025-08-07","service_tier":"default","system_fingerprint":null,"choices":[],"usage":{"prompt_tokens":15,"completion_tokens":11,"total_tokens":26,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":"RNwCse"} + + + data: [DONE] + + + ' + headers: + Connection: + - keep-alive + Keep-Alive: + - timeout=5 + Vary: + - Origin + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + cf-ray: + - 9b99675f8c8f6b8d-LAX + content-type: + - text/event-stream; charset=utf-8 + date: + - Tue, 06 Jan 2026 07:11:53 GMT + openai-organization: + - sotai-i3ryiz + openai-processing-ms: + - '963' + openai-project: + - proj_2kPLXdwNOjkHt3ifb0aZ4FwU + openai-version: + - '2020-10-01' + server: + - cloudflare + set-cookie: + - __cf_bm=adoW_5tH5b7lHitqCK2UKu7sRam7xmEIvscR7gfL1PI-1767683513-1.0.1.1-Wb3chTgImr4ifz1mSi1AJ0Y8v2UVwZMEF1xgCnHF3kePbXmNz8Mq90_ON6d1_lwnK4R22UT0q9j5mvpGqATf92U5v9B3TEa7eW1m.A.46ss; + path=/; expires=Tue, 06-Jan-26 07:41:53 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=FSgmM.4rY1G4vek3_HqvNGHUvWYh7zoQLZP87nRFZOc-1767683513993-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '1213' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '5000' + x-ratelimit-limit-tokens: + - '4000000' + x-ratelimit-remaining-requests: + - '4999' + x-ratelimit-remaining-tokens: + - '3999993' + x-ratelimit-reset-requests: + - 12ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_35aa500b326c48509084247f213eff38 + status: + code: 200 + message: OK +version: 1 diff --git a/python/tests/e2e/input/cassettes/test_mirascope_provider_streaming/openai_gpt_5_mini_responses.yaml b/python/tests/e2e/input/cassettes/test_mirascope_provider_streaming/openai_gpt_5_mini_responses.yaml new file mode 100644 index 0000000000..827256fb9e --- /dev/null +++ b/python/tests/e2e/input/cassettes/test_mirascope_provider_streaming/openai_gpt_5_mini_responses.yaml @@ -0,0 +1,150 @@ +interactions: +- request: + body: '{"input":[{"content":"What is 4200 + 42?","role":"user"}],"model":"gpt-5-mini","stream":true}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - + connection: + - keep-alive + content-length: + - '93' + content-type: + - application/json + host: + - localhost:3000 + user-agent: + - OpenAI/Python 2.7.1 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 2.7.1 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.10.16 + method: POST + uri: http://localhost:3000/router/v0/openai/v1/responses + response: + body: + string: 'event: response.created + + data: {"type":"response.created","sequence_number":0,"response":{"id":"resp_02cbfd68e4ae57b500695cb5ba52148195a70b7e27d42bac70","object":"response","created_at":1767683514,"status":"in_progress","background":false,"completed_at":null,"error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"max_tool_calls":null,"model":"gpt-5-mini-2025-08-07","output":[],"parallel_tool_calls":true,"previous_response_id":null,"prompt_cache_key":null,"prompt_cache_retention":null,"reasoning":{"effort":"medium","summary":null},"safety_identifier":null,"service_tier":"auto","store":true,"temperature":1.0,"text":{"format":{"type":"text"},"verbosity":"medium"},"tool_choice":"auto","tools":[],"top_logprobs":0,"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}} + + + event: response.in_progress + + data: {"type":"response.in_progress","sequence_number":1,"response":{"id":"resp_02cbfd68e4ae57b500695cb5ba52148195a70b7e27d42bac70","object":"response","created_at":1767683514,"status":"in_progress","background":false,"completed_at":null,"error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"max_tool_calls":null,"model":"gpt-5-mini-2025-08-07","output":[],"parallel_tool_calls":true,"previous_response_id":null,"prompt_cache_key":null,"prompt_cache_retention":null,"reasoning":{"effort":"medium","summary":null},"safety_identifier":null,"service_tier":"auto","store":true,"temperature":1.0,"text":{"format":{"type":"text"},"verbosity":"medium"},"tool_choice":"auto","tools":[],"top_logprobs":0,"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}} + + + event: response.output_item.added + + data: {"type":"response.output_item.added","sequence_number":2,"output_index":0,"item":{"id":"rs_02cbfd68e4ae57b500695cb5baa70881958fc5f6be59e1512b","type":"reasoning","summary":[]}} + + + event: response.output_item.done + + data: {"type":"response.output_item.done","sequence_number":3,"output_index":0,"item":{"id":"rs_02cbfd68e4ae57b500695cb5baa70881958fc5f6be59e1512b","type":"reasoning","summary":[]}} + + + event: response.output_item.added + + data: {"type":"response.output_item.added","sequence_number":4,"output_index":1,"item":{"id":"msg_02cbfd68e4ae57b500695cb5baff7c8195976581ed7c4d5108","type":"message","status":"in_progress","content":[],"role":"assistant"}} + + + event: response.content_part.added + + data: {"type":"response.content_part.added","sequence_number":5,"item_id":"msg_02cbfd68e4ae57b500695cb5baff7c8195976581ed7c4d5108","output_index":1,"content_index":0,"part":{"type":"output_text","annotations":[],"logprobs":[],"text":""}} + + + event: response.output_text.delta + + data: {"type":"response.output_text.delta","sequence_number":6,"item_id":"msg_02cbfd68e4ae57b500695cb5baff7c8195976581ed7c4d5108","output_index":1,"content_index":0,"delta":"424","logprobs":[],"obfuscation":"PrD7L1olzdTEz"} + + + event: response.output_text.delta + + data: {"type":"response.output_text.delta","sequence_number":7,"item_id":"msg_02cbfd68e4ae57b500695cb5baff7c8195976581ed7c4d5108","output_index":1,"content_index":0,"delta":"2","logprobs":[],"obfuscation":"0VvkHZzMB7neVqi"} + + + event: response.output_text.done + + data: {"type":"response.output_text.done","sequence_number":8,"item_id":"msg_02cbfd68e4ae57b500695cb5baff7c8195976581ed7c4d5108","output_index":1,"content_index":0,"text":"4242","logprobs":[]} + + + event: response.content_part.done + + data: {"type":"response.content_part.done","sequence_number":9,"item_id":"msg_02cbfd68e4ae57b500695cb5baff7c8195976581ed7c4d5108","output_index":1,"content_index":0,"part":{"type":"output_text","annotations":[],"logprobs":[],"text":"4242"}} + + + event: response.output_item.done + + data: {"type":"response.output_item.done","sequence_number":10,"output_index":1,"item":{"id":"msg_02cbfd68e4ae57b500695cb5baff7c8195976581ed7c4d5108","type":"message","status":"completed","content":[{"type":"output_text","annotations":[],"logprobs":[],"text":"4242"}],"role":"assistant"}} + + + event: response.completed + + data: {"type":"response.completed","sequence_number":11,"response":{"id":"resp_02cbfd68e4ae57b500695cb5ba52148195a70b7e27d42bac70","object":"response","created_at":1767683514,"status":"completed","background":false,"completed_at":1767683515,"error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"max_tool_calls":null,"model":"gpt-5-mini-2025-08-07","output":[{"id":"rs_02cbfd68e4ae57b500695cb5baa70881958fc5f6be59e1512b","type":"reasoning","summary":[]},{"id":"msg_02cbfd68e4ae57b500695cb5baff7c8195976581ed7c4d5108","type":"message","status":"completed","content":[{"type":"output_text","annotations":[],"logprobs":[],"text":"4242"}],"role":"assistant"}],"parallel_tool_calls":true,"previous_response_id":null,"prompt_cache_key":null,"prompt_cache_retention":null,"reasoning":{"effort":"medium","summary":null},"safety_identifier":null,"service_tier":"default","store":true,"temperature":1.0,"text":{"format":{"type":"text"},"verbosity":"medium"},"tool_choice":"auto","tools":[],"top_logprobs":0,"top_p":1.0,"truncation":"disabled","usage":{"input_tokens":15,"input_tokens_details":{"cached_tokens":0},"output_tokens":8,"output_tokens_details":{"reasoning_tokens":0},"total_tokens":23},"user":null,"metadata":{}}} + + + ' + headers: + Connection: + - keep-alive + Keep-Alive: + - timeout=5 + Vary: + - Origin + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + cf-ray: + - 9b99676c2eea6b8d-LAX + content-type: + - text/event-stream; charset=utf-8 + date: + - Tue, 06 Jan 2026 07:11:54 GMT + openai-organization: + - sotai-i3ryiz + openai-processing-ms: + - '68' + openai-project: + - proj_2kPLXdwNOjkHt3ifb0aZ4FwU + openai-version: + - '2020-10-01' + server: + - cloudflare + set-cookie: + - __cf_bm=Olqb1QVdATlPspuSe1rwsr79o58iNwZT4YXvYjdQ.5w-1767683514-1.0.1.1-Bg3lpn5TaDH1bDbGAya15Id6j1.bmn87CRbwoh6VS4YmOJfj6kGioZP8YZfN343Aa3xVndzt2KE8xFuq6Lpiy0id_Z4ozBWkO5QUvFCpf5o; + path=/; expires=Tue, 06-Jan-26 07:41:54 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=DQTR4eqgVU8p3rfshqJ1Yizvboyg2Zw6r8iKzOjnlyg-1767683514401-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '72' + x-request-id: + - req_006e91f2986b4d3eaab67cd5088749ce + status: + code: 200 + message: OK +version: 1 diff --git a/python/tests/e2e/input/snapshots/test_mirascope_provider/anthropic_claude_haiku_4_5_snapshots.py b/python/tests/e2e/input/snapshots/test_mirascope_provider/anthropic_claude_haiku_4_5_snapshots.py new file mode 100644 index 0000000000..4b77bb0581 --- /dev/null +++ b/python/tests/e2e/input/snapshots/test_mirascope_provider/anthropic_claude_haiku_4_5_snapshots.py @@ -0,0 +1,49 @@ +from inline_snapshot import snapshot + +from mirascope.llm import ( + AssistantMessage, + Text, + UserMessage, +) + +test_snapshot = snapshot( + { + "response": { + "provider_id": "anthropic", + "model_id": "anthropic/claude-haiku-4-5", + "provider_model_name": "claude-haiku-4-5", + "params": {}, + "finish_reason": None, + "usage": { + "input_tokens": 17, + "output_tokens": 15, + "cache_read_tokens": 0, + "cache_write_tokens": 0, + "reasoning_tokens": 0, + "raw": "Usage(cache_creation=CacheCreation(ephemeral_1h_input_tokens=0, ephemeral_5m_input_tokens=0), cache_creation_input_tokens=0, cache_read_input_tokens=0, input_tokens=17, output_tokens=15, server_tool_use=None, service_tier='standard')", + "total_tokens": 32, + }, + "messages": [ + UserMessage(content=[Text(text="What is 4200 + 42?")]), + AssistantMessage( + content=[Text(text="4200 + 42 = 4242")], + provider_id="anthropic", + model_id="anthropic/claude-haiku-4-5", + provider_model_name="claude-haiku-4-5", + raw_message={ + "role": "assistant", + "content": [ + { + "citations": None, + "text": "4200 + 42 = 4242", + "type": "text", + } + ], + }, + ), + ], + "format": None, + "tools": [], + } + } +) diff --git a/python/tests/e2e/input/snapshots/test_mirascope_provider/google_gemini_2_5_flash_snapshots.py b/python/tests/e2e/input/snapshots/test_mirascope_provider/google_gemini_2_5_flash_snapshots.py new file mode 100644 index 0000000000..aab4b8b805 --- /dev/null +++ b/python/tests/e2e/input/snapshots/test_mirascope_provider/google_gemini_2_5_flash_snapshots.py @@ -0,0 +1,83 @@ +from inline_snapshot import snapshot + +from mirascope.llm import ( + AssistantMessage, + Text, + UserMessage, +) + +test_snapshot = snapshot( + { + "response": { + "provider_id": "google", + "model_id": "google/gemini-2.5-flash", + "provider_model_name": "gemini-2.5-flash", + "params": {}, + "finish_reason": None, + "usage": { + "input_tokens": 13, + "output_tokens": 94, + "cache_read_tokens": 0, + "cache_write_tokens": 0, + "reasoning_tokens": 38, + "raw": """\ +cache_tokens_details=None cached_content_token_count=None candidates_token_count=56 candidates_tokens_details=None prompt_token_count=13 prompt_tokens_details=[ModalityTokenCount( + modality=, + token_count=13 +)] thoughts_token_count=38 tool_use_prompt_token_count=None tool_use_prompt_tokens_details=None total_token_count=107 traffic_type=None\ +""", + "total_tokens": 107, + }, + "messages": [ + UserMessage(content=[Text(text="What is 4200 + 42?")]), + AssistantMessage( + content=[ + Text( + text="""\ +To find the sum of 4200 and 42, you can add them: + +4200 ++ 42 +----- +4242 + +So, 4200 + 42 = **4242**.\ +""" + ) + ], + provider_id="google", + model_id="google/gemini-2.5-flash", + provider_model_name="gemini-2.5-flash", + raw_message={ + "parts": [ + { + "function_call": None, + "code_execution_result": None, + "executable_code": None, + "file_data": None, + "function_response": None, + "inline_data": None, + "text": """\ +To find the sum of 4200 and 42, you can add them: + +4200 ++ 42 +----- +4242 + +So, 4200 + 42 = **4242**.\ +""", + "thought": None, + "thought_signature": None, + "video_metadata": None, + } + ], + "role": "model", + }, + ), + ], + "format": None, + "tools": [], + } + } +) diff --git a/python/tests/e2e/input/snapshots/test_mirascope_provider/openai_gpt_5_mini_completions_snapshots.py b/python/tests/e2e/input/snapshots/test_mirascope_provider/openai_gpt_5_mini_completions_snapshots.py new file mode 100644 index 0000000000..74eb0250ae --- /dev/null +++ b/python/tests/e2e/input/snapshots/test_mirascope_provider/openai_gpt_5_mini_completions_snapshots.py @@ -0,0 +1,44 @@ +from inline_snapshot import snapshot + +from mirascope.llm import ( + AssistantMessage, + Text, + UserMessage, +) + +test_snapshot = snapshot( + { + "response": { + "provider_id": "openai", + "model_id": "openai/gpt-5-mini:completions", + "provider_model_name": "gpt-5-mini:completions", + "params": {}, + "finish_reason": None, + "usage": { + "input_tokens": 15, + "output_tokens": 11, + "cache_read_tokens": 0, + "cache_write_tokens": 0, + "reasoning_tokens": 0, + "raw": "CompletionUsage(completion_tokens=11, prompt_tokens=15, total_tokens=26, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0))", + "total_tokens": 26, + }, + "messages": [ + UserMessage(content=[Text(text="What is 4200 + 42?")]), + AssistantMessage( + content=[Text(text="4242")], + provider_id="openai", + model_id="openai/gpt-5-mini:completions", + provider_model_name="gpt-5-mini:completions", + raw_message={ + "content": "4242", + "role": "assistant", + "annotations": [], + }, + ), + ], + "format": None, + "tools": [], + } + } +) diff --git a/python/tests/e2e/input/snapshots/test_mirascope_provider/openai_gpt_5_mini_responses_snapshots.py b/python/tests/e2e/input/snapshots/test_mirascope_provider/openai_gpt_5_mini_responses_snapshots.py new file mode 100644 index 0000000000..a000b4a25b --- /dev/null +++ b/python/tests/e2e/input/snapshots/test_mirascope_provider/openai_gpt_5_mini_responses_snapshots.py @@ -0,0 +1,60 @@ +from inline_snapshot import snapshot + +from mirascope.llm import ( + AssistantMessage, + Text, + UserMessage, +) + +test_snapshot = snapshot( + { + "response": { + "provider_id": "openai", + "model_id": "openai/gpt-5-mini:responses", + "provider_model_name": "gpt-5-mini:responses", + "params": {}, + "finish_reason": None, + "usage": { + "input_tokens": 15, + "output_tokens": 8, + "cache_read_tokens": 0, + "cache_write_tokens": 0, + "reasoning_tokens": 0, + "raw": "ResponseUsage(input_tokens=15, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=8, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=23)", + "total_tokens": 23, + }, + "messages": [ + UserMessage(content=[Text(text="What is 4200 + 42?")]), + AssistantMessage( + content=[Text(text="4242")], + provider_id="openai", + model_id="openai/gpt-5-mini:responses", + provider_model_name="gpt-5-mini:responses", + raw_message=[ + { + "id": "rs_0024f5b14c683a8c00695cb5b3c13481979337a73e4da67821", + "summary": [], + "type": "reasoning", + }, + { + "id": "msg_0024f5b14c683a8c00695cb5b4356c8197a76a5b6db4380e03", + "content": [ + { + "annotations": [], + "text": "4242", + "type": "output_text", + "logprobs": [], + } + ], + "role": "assistant", + "status": "completed", + "type": "message", + }, + ], + ), + ], + "format": None, + "tools": [], + } + } +) diff --git a/python/tests/e2e/input/snapshots/test_mirascope_provider/openai_gpt_5_mini_snapshots.py b/python/tests/e2e/input/snapshots/test_mirascope_provider/openai_gpt_5_mini_snapshots.py new file mode 100644 index 0000000000..6964a24db7 --- /dev/null +++ b/python/tests/e2e/input/snapshots/test_mirascope_provider/openai_gpt_5_mini_snapshots.py @@ -0,0 +1,60 @@ +from inline_snapshot import snapshot + +from mirascope.llm import ( + AssistantMessage, + Text, + UserMessage, +) + +test_snapshot = snapshot( + { + "response": { + "provider_id": "openai", + "model_id": "openai/gpt-5-mini", + "provider_model_name": "gpt-5-mini:responses", + "params": {}, + "finish_reason": None, + "usage": { + "input_tokens": 15, + "output_tokens": 8, + "cache_read_tokens": 0, + "cache_write_tokens": 0, + "reasoning_tokens": 0, + "raw": "ResponseUsage(input_tokens=15, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=8, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=23)", + "total_tokens": 23, + }, + "messages": [ + UserMessage(content=[Text(text="What is 4200 + 42?")]), + AssistantMessage( + content=[Text(text="4242")], + provider_id="openai", + model_id="openai/gpt-5-mini", + provider_model_name="gpt-5-mini:responses", + raw_message=[ + { + "id": "rs_0682958ac604b79f00695cb5af7f0c8197be05619c7bfb342a", + "summary": [], + "type": "reasoning", + }, + { + "id": "msg_0682958ac604b79f00695cb5afeab88197876619489f88d7ce", + "content": [ + { + "annotations": [], + "text": "4242", + "type": "output_text", + "logprobs": [], + } + ], + "role": "assistant", + "status": "completed", + "type": "message", + }, + ], + ), + ], + "format": None, + "tools": [], + } + } +) diff --git a/python/tests/e2e/input/snapshots/test_mirascope_provider_streaming/anthropic_claude_haiku_4_5_snapshots.py b/python/tests/e2e/input/snapshots/test_mirascope_provider_streaming/anthropic_claude_haiku_4_5_snapshots.py new file mode 100644 index 0000000000..ecaa65c5f4 --- /dev/null +++ b/python/tests/e2e/input/snapshots/test_mirascope_provider_streaming/anthropic_claude_haiku_4_5_snapshots.py @@ -0,0 +1,43 @@ +from inline_snapshot import snapshot + +from mirascope.llm import ( + AssistantMessage, + Text, + UserMessage, +) + +test_snapshot = snapshot( + { + "response": { + "provider_id": "anthropic", + "model_id": "anthropic/claude-haiku-4-5", + "provider_model_name": "claude-haiku-4-5", + "finish_reason": None, + "messages": [ + UserMessage(content=[Text(text="What is 4200 + 42?")]), + AssistantMessage( + content=[Text(text="4200 + 42 = 4242")], + provider_id="anthropic", + model_id="anthropic/claude-haiku-4-5", + provider_model_name="claude-haiku-4-5", + raw_message={ + "role": "assistant", + "content": [{"type": "text", "text": "4200 + 42 = 4242"}], + }, + ), + ], + "format": None, + "tools": [], + "usage": { + "input_tokens": 17, + "output_tokens": 15, + "cache_read_tokens": 0, + "cache_write_tokens": 0, + "reasoning_tokens": 0, + "raw": "None", + "total_tokens": 32, + }, + "n_chunks": 4, + } + } +) diff --git a/python/tests/e2e/input/snapshots/test_mirascope_provider_streaming/google_gemini_2_5_flash_snapshots.py b/python/tests/e2e/input/snapshots/test_mirascope_provider_streaming/google_gemini_2_5_flash_snapshots.py new file mode 100644 index 0000000000..4511dced57 --- /dev/null +++ b/python/tests/e2e/input/snapshots/test_mirascope_provider_streaming/google_gemini_2_5_flash_snapshots.py @@ -0,0 +1,90 @@ +from inline_snapshot import snapshot + +from mirascope.llm import ( + AssistantMessage, + Text, + UserMessage, +) + +test_snapshot = snapshot( + { + "response": { + "provider_id": "google", + "model_id": "google/gemini-2.5-flash", + "provider_model_name": "gemini-2.5-flash", + "finish_reason": None, + "messages": [ + UserMessage(content=[Text(text="What is 4200 + 42?")]), + AssistantMessage( + content=[ + Text( + text="""\ +To find the sum of 4200 + 42, you can add them together: + +4200 ++ 42 +----- +4242 + +So, 4200 + 42 = **4242**.\ +""" + ) + ], + provider_id="google", + model_id="google/gemini-2.5-flash", + provider_model_name="gemini-2.5-flash", + raw_message={ + "parts": [ + { + "function_call": None, + "code_execution_result": None, + "executable_code": None, + "file_data": None, + "function_response": None, + "inline_data": None, + "text": """\ +To find the sum of 4200 + 42, you can add them together: + +4200 ++ 42 +----- +4242 + +So, 4200 + 4\ +""", + "thought": None, + "thought_signature": None, + "video_metadata": None, + }, + { + "function_call": None, + "code_execution_result": None, + "executable_code": None, + "file_data": None, + "function_response": None, + "inline_data": None, + "text": "2 = **4242**.", + "thought": None, + "thought_signature": None, + "video_metadata": None, + }, + ], + "role": "model", + }, + ), + ], + "format": None, + "tools": [], + "usage": { + "input_tokens": 13, + "output_tokens": 57, + "cache_read_tokens": 0, + "cache_write_tokens": 0, + "reasoning_tokens": 23, + "raw": "None", + "total_tokens": 70, + }, + "n_chunks": 4, + } + } +) diff --git a/python/tests/e2e/input/snapshots/test_mirascope_provider_streaming/openai_gpt_5_mini_completions_snapshots.py b/python/tests/e2e/input/snapshots/test_mirascope_provider_streaming/openai_gpt_5_mini_completions_snapshots.py new file mode 100644 index 0000000000..0e6f87f58a --- /dev/null +++ b/python/tests/e2e/input/snapshots/test_mirascope_provider_streaming/openai_gpt_5_mini_completions_snapshots.py @@ -0,0 +1,40 @@ +from inline_snapshot import snapshot + +from mirascope.llm import ( + AssistantMessage, + Text, + UserMessage, +) + +test_snapshot = snapshot( + { + "response": { + "provider_id": "openai", + "model_id": "openai/gpt-5-mini:completions", + "provider_model_name": "gpt-5-mini:completions", + "finish_reason": None, + "messages": [ + UserMessage(content=[Text(text="What is 4200 + 42?")]), + AssistantMessage( + content=[Text(text="4242")], + provider_id="openai", + model_id="openai/gpt-5-mini:completions", + provider_model_name="gpt-5-mini:completions", + raw_message=None, + ), + ], + "format": None, + "tools": [], + "usage": { + "input_tokens": 15, + "output_tokens": 11, + "cache_read_tokens": 0, + "cache_write_tokens": 0, + "reasoning_tokens": 0, + "raw": "None", + "total_tokens": 26, + }, + "n_chunks": 4, + } + } +) diff --git a/python/tests/e2e/input/snapshots/test_mirascope_provider_streaming/openai_gpt_5_mini_responses_snapshots.py b/python/tests/e2e/input/snapshots/test_mirascope_provider_streaming/openai_gpt_5_mini_responses_snapshots.py new file mode 100644 index 0000000000..a396369dca --- /dev/null +++ b/python/tests/e2e/input/snapshots/test_mirascope_provider_streaming/openai_gpt_5_mini_responses_snapshots.py @@ -0,0 +1,60 @@ +from inline_snapshot import snapshot + +from mirascope.llm import ( + AssistantMessage, + Text, + UserMessage, +) + +test_snapshot = snapshot( + { + "response": { + "provider_id": "openai", + "model_id": "openai/gpt-5-mini:responses", + "provider_model_name": "gpt-5-mini:responses", + "finish_reason": None, + "messages": [ + UserMessage(content=[Text(text="What is 4200 + 42?")]), + AssistantMessage( + content=[Text(text="4242")], + provider_id="openai", + model_id="openai/gpt-5-mini:responses", + provider_model_name="gpt-5-mini:responses", + raw_message=[ + { + "id": "rs_02cbfd68e4ae57b500695cb5baa70881958fc5f6be59e1512b", + "summary": [], + "type": "reasoning", + }, + { + "id": "msg_02cbfd68e4ae57b500695cb5baff7c8195976581ed7c4d5108", + "content": [ + { + "annotations": [], + "text": "4242", + "type": "output_text", + "logprobs": [], + } + ], + "role": "assistant", + "status": "completed", + "type": "message", + }, + ], + ), + ], + "format": None, + "tools": [], + "usage": { + "input_tokens": 15, + "output_tokens": 8, + "cache_read_tokens": 0, + "cache_write_tokens": 0, + "reasoning_tokens": 0, + "raw": "None", + "total_tokens": 23, + }, + "n_chunks": 4, + } + } +) diff --git a/python/tests/e2e/input/snapshots/test_mirascope_provider_streaming/openai_gpt_5_mini_snapshots.py b/python/tests/e2e/input/snapshots/test_mirascope_provider_streaming/openai_gpt_5_mini_snapshots.py new file mode 100644 index 0000000000..fd7ec1690d --- /dev/null +++ b/python/tests/e2e/input/snapshots/test_mirascope_provider_streaming/openai_gpt_5_mini_snapshots.py @@ -0,0 +1,60 @@ +from inline_snapshot import snapshot + +from mirascope.llm import ( + AssistantMessage, + Text, + UserMessage, +) + +test_snapshot = snapshot( + { + "response": { + "provider_id": "openai", + "model_id": "openai/gpt-5-mini", + "provider_model_name": "gpt-5-mini:responses", + "finish_reason": None, + "messages": [ + UserMessage(content=[Text(text="What is 4200 + 42?")]), + AssistantMessage( + content=[Text(text="4242")], + provider_id="openai", + model_id="openai/gpt-5-mini", + provider_model_name="gpt-5-mini:responses", + raw_message=[ + { + "id": "rs_0149c5fe3ad7762400695cb5b77d808196876083bbb23e8596", + "summary": [], + "type": "reasoning", + }, + { + "id": "msg_0149c5fe3ad7762400695cb5b7dfd88196bf4e6587d59c4958", + "content": [ + { + "annotations": [], + "text": "4242", + "type": "output_text", + "logprobs": [], + } + ], + "role": "assistant", + "status": "completed", + "type": "message", + }, + ], + ), + ], + "format": None, + "tools": [], + "usage": { + "input_tokens": 15, + "output_tokens": 8, + "cache_read_tokens": 0, + "cache_write_tokens": 0, + "reasoning_tokens": 0, + "raw": "None", + "total_tokens": 23, + }, + "n_chunks": 4, + } + } +) diff --git a/python/tests/e2e/input/test_mirascope_provider.py b/python/tests/e2e/input/test_mirascope_provider.py new file mode 100644 index 0000000000..b5cb2ababd --- /dev/null +++ b/python/tests/e2e/input/test_mirascope_provider.py @@ -0,0 +1,88 @@ +"""End-to-end tests for Mirascope Router provider.""" + +import os + +import pytest + +from mirascope import llm +from tests.utils import ( + Snapshot, + snapshot_test, +) + +MIRASCOPE_MODEL_IDS = [ + "openai/gpt-5-mini", + "openai/gpt-5-mini:completions", + "openai/gpt-5-mini:responses", + "anthropic/claude-haiku-4-5", + "google/gemini-2.5-flash", +] + + +@pytest.mark.parametrize("model_id", MIRASCOPE_MODEL_IDS) +@pytest.mark.vcr +def test_mirascope_provider(model_id: llm.ModelId, snapshot: Snapshot) -> None: + """Test that Mirascope Router provider works correctly.""" + + @llm.call(model_id) + def add_numbers(a: int, b: int) -> str: + return f"What is {a} + {b}?" + + llm.register_provider( + "mirascope", + base_url="http://localhost:3000/router/v0", + ) + + model_developer, model_name = model_id.split("/", 1) + + with snapshot_test(snapshot) as snap: + response = add_numbers(4200, 42) + assert response.provider_id == model_developer + assert model_name in response.provider_model_name + snap.set_response(response) + assert "4242" in response.pretty(), ( + f"Expected '4242' in response: {response.pretty()}" + ) + + +@pytest.mark.parametrize("model_id", MIRASCOPE_MODEL_IDS) +@pytest.mark.vcr +def test_mirascope_provider_streaming( + model_id: llm.ModelId, snapshot: Snapshot +) -> None: + """Test that Mirascope Router provider works correctly with streaming.""" + + @llm.call(model_id) + def add_numbers(a: int, b: int) -> str: + return f"What is {a} + {b}?" + + llm.register_provider( + "mirascope", + base_url="http://localhost:3000/router/v0", + ) + + model_developer, model_name = model_id.split("/", 1) + + with snapshot_test(snapshot) as snap: + stream = add_numbers.stream(4200, 42) + assert stream.provider_id == model_developer + assert model_name in stream.provider_model_name + + stream.finish() + content = stream.pretty() + + snap.set_response(stream) + assert "4242" in content, f"Expected '4242' in streamed content: {content}" + + +def test_mirascope_provider_missing_api_key() -> None: + """Test that Mirascope provider raises clear error when API key is missing.""" + original_key = os.environ.pop("MIRASCOPE_API_KEY", None) + try: + with pytest.raises(ValueError) as exc_info: + llm.providers.MirascopeProvider() + assert "Mirascope API key not found" in str(exc_info.value) + assert "MIRASCOPE_API_KEY" in str(exc_info.value) + finally: + if original_key is not None: + os.environ["MIRASCOPE_API_KEY"] = original_key diff --git a/python/tests/llm/providers/test_mirascope_provider.py b/python/tests/llm/providers/test_mirascope_provider.py new file mode 100644 index 0000000000..17b29f26a5 --- /dev/null +++ b/python/tests/llm/providers/test_mirascope_provider.py @@ -0,0 +1,409 @@ +"""Tests for MirascopeProvider and utilities.""" + +import os +from unittest.mock import AsyncMock, Mock, patch + +import pytest + +from mirascope.llm.providers.mirascope import _utils +from mirascope.llm.providers.mirascope.provider import MirascopeProvider + + +class TestMirascopeUtils: + """Tests for Mirascope utility functions.""" + + def test_extract_provider_prefix_valid(self) -> None: + """Test extracting provider prefix from valid model IDs.""" + assert _utils.extract_provider_prefix("openai/gpt-4") == "openai" + assert _utils.extract_provider_prefix("anthropic/claude-3") == "anthropic" + assert _utils.extract_provider_prefix("google/gemini-pro") == "google" + assert ( + _utils.extract_provider_prefix("openai/gpt-4-with-extra/stuff") == "openai" + ) + + def test_extract_provider_prefix_invalid(self) -> None: + """Test extracting provider prefix from invalid model IDs.""" + assert _utils.extract_provider_prefix("gpt-4") is None + assert _utils.extract_provider_prefix("") is None + assert _utils.extract_provider_prefix("no-slash") is None + + def test_get_default_router_base_url_default(self) -> None: + """Test getting default router base URL.""" + original_url = os.environ.pop("MIRASCOPE_ROUTER_BASE_URL", None) + try: + url = _utils.get_default_router_base_url() + assert url == "https://mirascope.com/router/v0" + finally: + if original_url is not None: + os.environ["MIRASCOPE_ROUTER_BASE_URL"] = original_url + + def test_get_default_router_base_url_from_env(self) -> None: + """Test getting router base URL from environment variable.""" + original_url = os.environ.get("MIRASCOPE_ROUTER_BASE_URL") + os.environ["MIRASCOPE_ROUTER_BASE_URL"] = "http://localhost:3000/router/v0" + try: + url = _utils.get_default_router_base_url() + assert url == "http://localhost:3000/router/v0" + finally: + if original_url is not None: + os.environ["MIRASCOPE_ROUTER_BASE_URL"] = original_url + else: + os.environ.pop("MIRASCOPE_ROUTER_BASE_URL", None) + + def test_create_underlying_provider_openai(self) -> None: + """Test creating OpenAI provider.""" + provider = _utils.create_underlying_provider( + provider_prefix="openai", + api_key="test-key", + router_base_url="http://localhost:3000/router/v0", + ) + assert provider.id == "openai" + assert "localhost:3000/router/v0/openai" in str(provider.client.base_url) + + def test_create_underlying_provider_anthropic(self) -> None: + """Test creating Anthropic provider.""" + provider = _utils.create_underlying_provider( + provider_prefix="anthropic", + api_key="test-key", + router_base_url="http://localhost:3000/router/v0", + ) + assert provider.id == "anthropic" + assert "localhost:3000/router/v0/anthropic" in str(provider.client.base_url) + + def test_create_underlying_provider_google(self) -> None: + """Test creating Google provider.""" + provider = _utils.create_underlying_provider( + provider_prefix="google", + api_key="test-key", + router_base_url="http://localhost:3000/router/v0", + ) + assert provider.id == "google" + + def test_create_underlying_provider_unsupported(self) -> None: + """Test creating provider with unsupported prefix.""" + with pytest.raises(ValueError) as exc_info: + _utils.create_underlying_provider( + provider_prefix="unknown", + api_key="test-key", + router_base_url="http://localhost:3000/router/v0", + ) + assert "Unsupported provider: unknown" in str(exc_info.value) + assert "anthropic, google, openai" in str(exc_info.value) + + def test_create_underlying_provider_caching(self) -> None: + """Test that provider creation is cached.""" + provider1 = _utils.create_underlying_provider( + provider_prefix="openai", + api_key="test-key", + router_base_url="http://localhost:3000/router/v0", + ) + provider2 = _utils.create_underlying_provider( + provider_prefix="openai", + api_key="test-key", + router_base_url="http://localhost:3000/router/v0", + ) + # Should be the exact same instance due to caching + assert provider1 is provider2 + + # Different parameters should create different instances + provider3 = _utils.create_underlying_provider( + provider_prefix="openai", + api_key="different-key", + router_base_url="http://localhost:3000/router/v0", + ) + assert provider1 is not provider3 + + +class TestMirascopeProvider: + """Tests for MirascopeProvider.""" + + def test_mirascope_provider_initialization_with_api_key(self) -> None: + """Test MirascopeProvider initialization with api_key.""" + provider = MirascopeProvider(api_key="test-api-key") + assert provider.id == "mirascope" + assert provider.default_scope == ["anthropic/", "google/", "openai/"] + assert provider.api_key == "test-api-key" + assert provider.router_base_url == "https://mirascope.com/router/v0" + + def test_mirascope_provider_missing_api_key(self) -> None: + """Test MirascopeProvider raises error when API key is missing.""" + original_key = os.environ.pop("MIRASCOPE_API_KEY", None) + try: + with pytest.raises(ValueError) as exc_info: + MirascopeProvider() + assert "Mirascope API key not found" in str(exc_info.value) + assert "MIRASCOPE_API_KEY" in str(exc_info.value) + finally: + if original_key is not None: + os.environ["MIRASCOPE_API_KEY"] = original_key + + def test_mirascope_provider_uses_env_var_api_key(self) -> None: + """Test MirascopeProvider uses MIRASCOPE_API_KEY from environment.""" + original_key = os.environ.get("MIRASCOPE_API_KEY") + os.environ["MIRASCOPE_API_KEY"] = "env-test-key" + try: + provider = MirascopeProvider() + assert provider.api_key == "env-test-key" + finally: + if original_key is not None: + os.environ["MIRASCOPE_API_KEY"] = original_key + else: + os.environ.pop("MIRASCOPE_API_KEY", None) + + def test_mirascope_provider_custom_base_url(self) -> None: + """Test MirascopeProvider with custom base_url.""" + provider = MirascopeProvider( + api_key="test-key", base_url="http://localhost:3000/router/v0" + ) + assert provider.router_base_url == "http://localhost:3000/router/v0" + + def test_mirascope_provider_uses_env_var_base_url(self) -> None: + """Test MirascopeProvider uses MIRASCOPE_ROUTER_BASE_URL from environment.""" + original_url = os.environ.get("MIRASCOPE_ROUTER_BASE_URL") + os.environ["MIRASCOPE_ROUTER_BASE_URL"] = "http://custom:8080/router/v0" + try: + provider = MirascopeProvider(api_key="test-key") + assert provider.router_base_url == "http://custom:8080/router/v0" + finally: + if original_url is not None: + os.environ["MIRASCOPE_ROUTER_BASE_URL"] = original_url + else: + os.environ.pop("MIRASCOPE_ROUTER_BASE_URL", None) + + def test_get_underlying_provider_invalid_format(self) -> None: + """Test _get_underlying_provider with invalid model ID format.""" + provider = MirascopeProvider(api_key="test-key") + + with pytest.raises(ValueError) as exc_info: + provider._get_underlying_provider("gpt-4") # pyright: ignore[reportPrivateUsage] + assert "Invalid model ID format: gpt-4" in str(exc_info.value) + assert "'provider/model-name'" in str(exc_info.value) + + def test_get_underlying_provider_valid_openai(self) -> None: + """Test _get_underlying_provider with valid OpenAI model ID.""" + provider = MirascopeProvider(api_key="test-key") + underlying = provider._get_underlying_provider("openai/gpt-4") # pyright: ignore[reportPrivateUsage] + assert underlying.id == "openai" + + def test_get_underlying_provider_valid_anthropic(self) -> None: + """Test _get_underlying_provider with valid Anthropic model ID.""" + provider = MirascopeProvider(api_key="test-key") + underlying = provider._get_underlying_provider("anthropic/claude-3") # pyright: ignore[reportPrivateUsage] + assert underlying.id == "anthropic" + + def test_get_underlying_provider_valid_google(self) -> None: + """Test _get_underlying_provider with valid Google model ID.""" + provider = MirascopeProvider(api_key="test-key") + underlying = provider._get_underlying_provider("google/gemini-pro") # pyright: ignore[reportPrivateUsage] + assert underlying.id == "google" + + @patch("mirascope.llm.providers.mirascope._utils.create_underlying_provider") + def test_call_delegates_to_underlying_provider( + self, mock_create_provider: Mock + ) -> None: + """Test that _call delegates to underlying provider.""" + mock_underlying = Mock() + mock_underlying.call.return_value = Mock() + mock_create_provider.return_value = mock_underlying + + provider = MirascopeProvider(api_key="test-key") + provider._call(model_id="openai/gpt-4", messages=[]) # pyright: ignore[reportPrivateUsage] + + mock_underlying.call.assert_called_once() + + @patch("mirascope.llm.providers.mirascope._utils.create_underlying_provider") + def test_context_call_delegates_to_underlying_provider( + self, mock_create_provider: Mock + ) -> None: + """Test that _context_call delegates to underlying provider.""" + from mirascope.llm.context import Context + + mock_underlying = Mock() + mock_underlying.context_call.return_value = Mock() + mock_create_provider.return_value = mock_underlying + + provider = MirascopeProvider(api_key="test-key") + ctx = Context(deps={}) + provider._context_call(ctx=ctx, model_id="openai/gpt-4", messages=[]) # pyright: ignore[reportPrivateUsage] + + mock_underlying.context_call.assert_called_once() + + @pytest.mark.asyncio + @patch("mirascope.llm.providers.mirascope._utils.create_underlying_provider") + async def test_call_async_delegates_to_underlying_provider( + self, mock_create_provider: Mock + ) -> None: + """Test that _call_async delegates to underlying provider.""" + mock_underlying = Mock() + mock_underlying.call_async = AsyncMock(return_value=Mock()) + mock_create_provider.return_value = mock_underlying + + provider = MirascopeProvider(api_key="test-key") + await provider._call_async(model_id="openai/gpt-4", messages=[]) # pyright: ignore[reportPrivateUsage] + + mock_underlying.call_async.assert_called_once() + + @pytest.mark.asyncio + @patch("mirascope.llm.providers.mirascope._utils.create_underlying_provider") + async def test_context_call_async_delegates_to_underlying_provider( + self, mock_create_provider: Mock + ) -> None: + """Test that _context_call_async delegates to underlying provider.""" + from mirascope.llm.context import Context + + mock_underlying = Mock() + mock_underlying.context_call_async = AsyncMock(return_value=Mock()) + mock_create_provider.return_value = mock_underlying + + provider = MirascopeProvider(api_key="test-key") + ctx = Context(deps={}) + await provider._context_call_async( # pyright: ignore[reportPrivateUsage] + ctx=ctx, model_id="openai/gpt-4", messages=[] + ) + + mock_underlying.context_call_async.assert_called_once() + + @patch("mirascope.llm.providers.mirascope._utils.create_underlying_provider") + def test_stream_delegates_to_underlying_provider( + self, mock_create_provider: Mock + ) -> None: + """Test that _stream delegates to underlying provider.""" + mock_underlying = Mock() + mock_underlying.stream.return_value = Mock() + mock_create_provider.return_value = mock_underlying + + provider = MirascopeProvider(api_key="test-key") + provider._stream(model_id="openai/gpt-4", messages=[]) # pyright: ignore[reportPrivateUsage] + + mock_underlying.stream.assert_called_once() + + @patch("mirascope.llm.providers.mirascope._utils.create_underlying_provider") + def test_context_stream_delegates_to_underlying_provider( + self, mock_create_provider: Mock + ) -> None: + """Test that _context_stream delegates to underlying provider.""" + from mirascope.llm.context import Context + + mock_underlying = Mock() + mock_underlying.context_stream.return_value = Mock() + mock_create_provider.return_value = mock_underlying + + provider = MirascopeProvider(api_key="test-key") + ctx = Context(deps={}) + provider._context_stream(ctx=ctx, model_id="openai/gpt-4", messages=[]) # pyright: ignore[reportPrivateUsage] + + mock_underlying.context_stream.assert_called_once() + + @pytest.mark.asyncio + @patch("mirascope.llm.providers.mirascope._utils.create_underlying_provider") + async def test_stream_async_delegates_to_underlying_provider( + self, mock_create_provider: Mock + ) -> None: + """Test that _stream_async delegates to underlying provider.""" + mock_underlying = Mock() + mock_underlying.stream_async = AsyncMock(return_value=Mock()) + mock_create_provider.return_value = mock_underlying + + provider = MirascopeProvider(api_key="test-key") + await provider._stream_async(model_id="openai/gpt-4", messages=[]) # pyright: ignore[reportPrivateUsage] + + mock_underlying.stream_async.assert_called_once() + + @pytest.mark.asyncio + @patch("mirascope.llm.providers.mirascope._utils.create_underlying_provider") + async def test_context_stream_async_delegates_to_underlying_provider( + self, mock_create_provider: Mock + ) -> None: + """Test that _context_stream_async delegates to underlying provider.""" + from mirascope.llm.context import Context + + mock_underlying = Mock() + mock_underlying.context_stream_async = AsyncMock(return_value=Mock()) + mock_create_provider.return_value = mock_underlying + + provider = MirascopeProvider(api_key="test-key") + ctx = Context(deps={}) + await provider._context_stream_async( # pyright: ignore[reportPrivateUsage] + ctx=ctx, model_id="openai/gpt-4", messages=[] + ) + + mock_underlying.context_stream_async.assert_called_once() + + +class TestMirascopeProviderErrorHandling: + """Tests for MirascopeProvider error handling.""" + + def test_error_map_exists(self) -> None: + """Test that error_map class variable is defined.""" + assert hasattr(MirascopeProvider, "error_map") + assert isinstance(MirascopeProvider.error_map, dict) + assert len(MirascopeProvider.error_map) == 0 + + def test_get_error_status_returns_none(self) -> None: + """Test that get_error_status returns None.""" + provider = MirascopeProvider(api_key="test-key") + result = provider.get_error_status(Exception("test")) + assert result is None + + @patch("mirascope.llm.providers.mirascope._utils.create_underlying_provider") + def test_error_propagation_from_underlying_provider( + self, mock_create_provider: Mock + ) -> None: + """Test that errors from underlying providers propagate correctly.""" + from mirascope.llm.exceptions import RateLimitError + + # Mock underlying provider to raise a Mirascope exception + mock_underlying = Mock() + mock_underlying.call.side_effect = RateLimitError("Rate limit exceeded") + mock_create_provider.return_value = mock_underlying + + provider = MirascopeProvider(api_key="test-key") + + # Should propagate the RateLimitError from underlying provider + with pytest.raises(RateLimitError) as exc_info: + provider.call(model_id="openai/gpt-4", messages=[]) + + assert "Rate limit exceeded" in str(exc_info.value) + + @pytest.mark.asyncio + @patch("mirascope.llm.providers.mirascope._utils.create_underlying_provider") + async def test_async_error_propagation_from_underlying_provider( + self, mock_create_provider: Mock + ) -> None: + """Test that errors from underlying providers propagate correctly in async calls.""" + from mirascope.llm.exceptions import AuthenticationError + + # Mock underlying provider to raise a Mirascope exception + mock_underlying = Mock() + mock_underlying.call_async = AsyncMock( + side_effect=AuthenticationError("Invalid API key") + ) + mock_create_provider.return_value = mock_underlying + + provider = MirascopeProvider(api_key="test-key") + + # Should propagate the AuthenticationError from underlying provider + with pytest.raises(AuthenticationError) as exc_info: + await provider.call_async(model_id="anthropic/claude-3", messages=[]) + + assert "Invalid API key" in str(exc_info.value) + + @patch("mirascope.llm.providers.mirascope._utils.create_underlying_provider") + def test_stream_error_propagation_from_underlying_provider( + self, mock_create_provider: Mock + ) -> None: + """Test that errors from underlying providers propagate correctly in streams.""" + from mirascope.llm.exceptions import ServerError + + # Mock underlying provider to raise a Mirascope exception + mock_underlying = Mock() + mock_underlying.stream.side_effect = ServerError("Internal server error") + mock_create_provider.return_value = mock_underlying + + provider = MirascopeProvider(api_key="test-key") + + # Should propagate the ServerError from underlying provider + with pytest.raises(ServerError) as exc_info: + provider.stream(model_id="google/gemini-pro", messages=[]) + + assert "Internal server error" in str(exc_info.value) diff --git a/python/tests/ops/cassettes/test_versioned_async_call.yaml b/python/tests/ops/cassettes/test_versioned_async_call.yaml index ace3383692..0ba90f76a5 100644 --- a/python/tests/ops/cassettes/test_versioned_async_call.yaml +++ b/python/tests/ops/cassettes/test_versioned_async_call.yaml @@ -152,7 +152,7 @@ interactions: - request: body: '{"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\nasync def recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","hash":"0391c2bfd9cae644a1b467679c5d6b8a03a8df17c733c4309e36838127bc6d85","signature":"@llm.call(\"openai/gpt-4o-mini\")\nasync - def recommend(genre: str) -> str: ...","signatureHash":"b9cd3d0dbb1c669832bb9bec2c556281f7587625908d698c7152a510b516ec26","name":"recommend","description":null,"tags":null,"metadata":null,"dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}}}' + def recommend(genre: str) -> str: ...","signatureHash":"b9cd3d0dbb1c669832bb9bec2c556281f7587625908d698c7152a510b516ec26","name":"recommend","description":null,"tags":null,"metadata":null,"dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}}}' headers: accept: - '*/*' @@ -177,7 +177,7 @@ interactions: string: '{"id":"1c6495c3-f5b4-4795-b99c-221ff3273156","hash":"0391c2bfd9cae644a1b467679c5d6b8a03a8df17c733c4309e36838127bc6d85","signatureHash":"b9cd3d0dbb1c669832bb9bec2c556281f7587625908d698c7152a510b516ec26","name":"recommend","description":null,"version":"2.0","tags":null,"metadata":null,"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\nasync def recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\nasync - def recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:32.841Z","updatedAt":"2025-12-24T03:24:32.841Z","isNew":true}' + def recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:32.841Z","updatedAt":"2025-12-24T03:24:32.841Z","isNew":true}' headers: Connection: - keep-alive @@ -327,7 +327,7 @@ interactions: string: '{"id":"1c6495c3-f5b4-4795-b99c-221ff3273156","hash":"0391c2bfd9cae644a1b467679c5d6b8a03a8df17c733c4309e36838127bc6d85","signatureHash":"b9cd3d0dbb1c669832bb9bec2c556281f7587625908d698c7152a510b516ec26","name":"recommend","description":null,"version":"2.0","tags":null,"metadata":null,"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\nasync def recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\nasync - def recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:32.841Z","updatedAt":"2025-12-24T03:24:32.841Z"}' + def recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:32.841Z","updatedAt":"2025-12-24T03:24:32.841Z"}' headers: Connection: - keep-alive diff --git a/python/tests/ops/cassettes/test_versioned_async_call_call_method.yaml b/python/tests/ops/cassettes/test_versioned_async_call_call_method.yaml index fb32d8b937..53027b87f6 100644 --- a/python/tests/ops/cassettes/test_versioned_async_call_call_method.yaml +++ b/python/tests/ops/cassettes/test_versioned_async_call_call_method.yaml @@ -126,7 +126,7 @@ interactions: string: '{"id":"1c6495c3-f5b4-4795-b99c-221ff3273156","hash":"0391c2bfd9cae644a1b467679c5d6b8a03a8df17c733c4309e36838127bc6d85","signatureHash":"b9cd3d0dbb1c669832bb9bec2c556281f7587625908d698c7152a510b516ec26","name":"recommend","description":null,"version":"2.0","tags":null,"metadata":null,"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\nasync def recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\nasync - def recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:32.841Z","updatedAt":"2025-12-24T03:24:32.841Z"}' + def recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:32.841Z","updatedAt":"2025-12-24T03:24:32.841Z"}' headers: Connection: - keep-alive @@ -271,7 +271,7 @@ interactions: string: '{"id":"1c6495c3-f5b4-4795-b99c-221ff3273156","hash":"0391c2bfd9cae644a1b467679c5d6b8a03a8df17c733c4309e36838127bc6d85","signatureHash":"b9cd3d0dbb1c669832bb9bec2c556281f7587625908d698c7152a510b516ec26","name":"recommend","description":null,"version":"2.0","tags":null,"metadata":null,"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\nasync def recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\nasync - def recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:32.841Z","updatedAt":"2025-12-24T03:24:32.841Z"}' + def recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:32.841Z","updatedAt":"2025-12-24T03:24:32.841Z"}' headers: Connection: - keep-alive diff --git a/python/tests/ops/cassettes/test_versioned_async_call_stream_method.yaml b/python/tests/ops/cassettes/test_versioned_async_call_stream_method.yaml index d919216083..bca5068c72 100644 --- a/python/tests/ops/cassettes/test_versioned_async_call_stream_method.yaml +++ b/python/tests/ops/cassettes/test_versioned_async_call_stream_method.yaml @@ -638,7 +638,7 @@ interactions: string: '{"id":"1c6495c3-f5b4-4795-b99c-221ff3273156","hash":"0391c2bfd9cae644a1b467679c5d6b8a03a8df17c733c4309e36838127bc6d85","signatureHash":"b9cd3d0dbb1c669832bb9bec2c556281f7587625908d698c7152a510b516ec26","name":"recommend","description":null,"version":"2.0","tags":null,"metadata":null,"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\nasync def recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\nasync - def recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:32.841Z","updatedAt":"2025-12-24T03:24:32.841Z"}' + def recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:32.841Z","updatedAt":"2025-12-24T03:24:32.841Z"}' headers: Connection: - keep-alive @@ -995,7 +995,7 @@ interactions: string: '{"id":"1c6495c3-f5b4-4795-b99c-221ff3273156","hash":"0391c2bfd9cae644a1b467679c5d6b8a03a8df17c733c4309e36838127bc6d85","signatureHash":"b9cd3d0dbb1c669832bb9bec2c556281f7587625908d698c7152a510b516ec26","name":"recommend","description":null,"version":"2.0","tags":null,"metadata":null,"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\nasync def recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\nasync - def recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:32.841Z","updatedAt":"2025-12-24T03:24:32.841Z"}' + def recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:32.841Z","updatedAt":"2025-12-24T03:24:32.841Z"}' headers: Connection: - keep-alive diff --git a/python/tests/ops/cassettes/test_versioned_async_call_wrapped_method.yaml b/python/tests/ops/cassettes/test_versioned_async_call_wrapped_method.yaml index 48923dbbde..7294659159 100644 --- a/python/tests/ops/cassettes/test_versioned_async_call_wrapped_method.yaml +++ b/python/tests/ops/cassettes/test_versioned_async_call_wrapped_method.yaml @@ -127,7 +127,7 @@ interactions: string: '{"id":"1c6495c3-f5b4-4795-b99c-221ff3273156","hash":"0391c2bfd9cae644a1b467679c5d6b8a03a8df17c733c4309e36838127bc6d85","signatureHash":"b9cd3d0dbb1c669832bb9bec2c556281f7587625908d698c7152a510b516ec26","name":"recommend","description":null,"version":"2.0","tags":null,"metadata":null,"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\nasync def recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\nasync - def recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:32.841Z","updatedAt":"2025-12-24T03:24:32.841Z"}' + def recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:32.841Z","updatedAt":"2025-12-24T03:24:32.841Z"}' headers: Connection: - keep-alive @@ -272,7 +272,7 @@ interactions: string: '{"id":"1c6495c3-f5b4-4795-b99c-221ff3273156","hash":"0391c2bfd9cae644a1b467679c5d6b8a03a8df17c733c4309e36838127bc6d85","signatureHash":"b9cd3d0dbb1c669832bb9bec2c556281f7587625908d698c7152a510b516ec26","name":"recommend","description":null,"version":"2.0","tags":null,"metadata":null,"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\nasync def recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\nasync - def recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:32.841Z","updatedAt":"2025-12-24T03:24:32.841Z"}' + def recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:32.841Z","updatedAt":"2025-12-24T03:24:32.841Z"}' headers: Connection: - keep-alive diff --git a/python/tests/ops/cassettes/test_versioned_async_call_wrapped_stream_method.yaml b/python/tests/ops/cassettes/test_versioned_async_call_wrapped_stream_method.yaml index e219840329..b48b53b8f9 100644 --- a/python/tests/ops/cassettes/test_versioned_async_call_wrapped_stream_method.yaml +++ b/python/tests/ops/cassettes/test_versioned_async_call_wrapped_stream_method.yaml @@ -682,7 +682,7 @@ interactions: string: '{"id":"1c6495c3-f5b4-4795-b99c-221ff3273156","hash":"0391c2bfd9cae644a1b467679c5d6b8a03a8df17c733c4309e36838127bc6d85","signatureHash":"b9cd3d0dbb1c669832bb9bec2c556281f7587625908d698c7152a510b516ec26","name":"recommend","description":null,"version":"2.0","tags":null,"metadata":null,"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\nasync def recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\nasync - def recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:32.841Z","updatedAt":"2025-12-24T03:24:32.841Z"}' + def recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:32.841Z","updatedAt":"2025-12-24T03:24:32.841Z"}' headers: Connection: - keep-alive @@ -1287,7 +1287,7 @@ interactions: string: '{"id":"1c6495c3-f5b4-4795-b99c-221ff3273156","hash":"0391c2bfd9cae644a1b467679c5d6b8a03a8df17c733c4309e36838127bc6d85","signatureHash":"b9cd3d0dbb1c669832bb9bec2c556281f7587625908d698c7152a510b516ec26","name":"recommend","description":null,"version":"2.0","tags":null,"metadata":null,"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\nasync def recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\nasync - def recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:32.841Z","updatedAt":"2025-12-24T03:24:32.841Z"}' + def recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:32.841Z","updatedAt":"2025-12-24T03:24:32.841Z"}' headers: Connection: - keep-alive diff --git a/python/tests/ops/cassettes/test_versioned_async_context_call.yaml b/python/tests/ops/cassettes/test_versioned_async_context_call.yaml index 2076980e1f..b114eb77be 100644 --- a/python/tests/ops/cassettes/test_versioned_async_context_call.yaml +++ b/python/tests/ops/cassettes/test_versioned_async_context_call.yaml @@ -146,7 +146,7 @@ interactions: body: '{"code":"from mirascope import llm\n\nctx = llm.Context(deps=\"As a librarian,\")\n\n\n@llm.call(\"openai/gpt-4o-mini\")\nasync def recommend(ctx: llm.Context[str], genre: str) -> str:\n return f\"{ctx.deps} Recommend a {genre} book.\"\n","hash":"79085c0ac8178f3ae453795c87e509b7738abc1b1dcf58b045137c5cfe7e7923","signature":"@llm.call(\"openai/gpt-4o-mini\")\nasync - def recommend(ctx: llm.Context[str], genre: str) -> str: ...","signatureHash":"ae19bdc73bdb0f93f143b7df3c8cead8efc708c74688fcf000de53be729c5e96","name":"recommend","description":null,"tags":null,"metadata":null,"dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}}}' + def recommend(ctx: llm.Context[str], genre: str) -> str: ...","signatureHash":"ae19bdc73bdb0f93f143b7df3c8cead8efc708c74688fcf000de53be729c5e96","name":"recommend","description":null,"tags":null,"metadata":null,"dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}}}' headers: accept: - '*/*' @@ -172,7 +172,7 @@ interactions: mirascope import llm\n\nctx = llm.Context(deps=\"As a librarian,\")\n\n\n@llm.call(\"openai/gpt-4o-mini\")\nasync def recommend(ctx: llm.Context[str], genre: str) -> str:\n return f\"{ctx.deps} Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\nasync - def recommend(ctx: llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:51.560Z","updatedAt":"2025-12-24T03:24:51.560Z","isNew":true}' + def recommend(ctx: llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:51.560Z","updatedAt":"2025-12-24T03:24:51.560Z","isNew":true}' headers: Connection: - keep-alive @@ -318,7 +318,7 @@ interactions: mirascope import llm\n\nctx = llm.Context(deps=\"As a librarian,\")\n\n\n@llm.call(\"openai/gpt-4o-mini\")\nasync def recommend(ctx: llm.Context[str], genre: str) -> str:\n return f\"{ctx.deps} Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\nasync - def recommend(ctx: llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:51.560Z","updatedAt":"2025-12-24T03:24:51.560Z"}' + def recommend(ctx: llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:51.560Z","updatedAt":"2025-12-24T03:24:51.560Z"}' headers: Connection: - keep-alive diff --git a/python/tests/ops/cassettes/test_versioned_async_context_call_call_method.yaml b/python/tests/ops/cassettes/test_versioned_async_context_call_call_method.yaml index fbbdc6b625..3c6176050d 100644 --- a/python/tests/ops/cassettes/test_versioned_async_context_call_call_method.yaml +++ b/python/tests/ops/cassettes/test_versioned_async_context_call_call_method.yaml @@ -128,7 +128,7 @@ interactions: mirascope import llm\n\nctx = llm.Context(deps=\"As a librarian,\")\n\n\n@llm.call(\"openai/gpt-4o-mini\")\nasync def recommend(ctx: llm.Context[str], genre: str) -> str:\n return f\"{ctx.deps} Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\nasync - def recommend(ctx: llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:51.560Z","updatedAt":"2025-12-24T03:24:51.560Z"}' + def recommend(ctx: llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:51.560Z","updatedAt":"2025-12-24T03:24:51.560Z"}' headers: Connection: - keep-alive @@ -274,7 +274,7 @@ interactions: mirascope import llm\n\nctx = llm.Context(deps=\"As a librarian,\")\n\n\n@llm.call(\"openai/gpt-4o-mini\")\nasync def recommend(ctx: llm.Context[str], genre: str) -> str:\n return f\"{ctx.deps} Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\nasync - def recommend(ctx: llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:51.560Z","updatedAt":"2025-12-24T03:24:51.560Z"}' + def recommend(ctx: llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:51.560Z","updatedAt":"2025-12-24T03:24:51.560Z"}' headers: Connection: - keep-alive diff --git a/python/tests/ops/cassettes/test_versioned_async_context_call_stream_method.yaml b/python/tests/ops/cassettes/test_versioned_async_context_call_stream_method.yaml index 6b60ed43e9..8ca152097c 100644 --- a/python/tests/ops/cassettes/test_versioned_async_context_call_stream_method.yaml +++ b/python/tests/ops/cassettes/test_versioned_async_context_call_stream_method.yaml @@ -633,7 +633,7 @@ interactions: mirascope import llm\n\nctx = llm.Context(deps=\"As a librarian,\")\n\n\n@llm.call(\"openai/gpt-4o-mini\")\nasync def recommend(ctx: llm.Context[str], genre: str) -> str:\n return f\"{ctx.deps} Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\nasync - def recommend(ctx: llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:51.560Z","updatedAt":"2025-12-24T03:24:51.560Z"}' + def recommend(ctx: llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:51.560Z","updatedAt":"2025-12-24T03:24:51.560Z"}' headers: Connection: - keep-alive @@ -1334,7 +1334,7 @@ interactions: mirascope import llm\n\nctx = llm.Context(deps=\"As a librarian,\")\n\n\n@llm.call(\"openai/gpt-4o-mini\")\nasync def recommend(ctx: llm.Context[str], genre: str) -> str:\n return f\"{ctx.deps} Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\nasync - def recommend(ctx: llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:51.560Z","updatedAt":"2025-12-24T03:24:51.560Z"}' + def recommend(ctx: llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:51.560Z","updatedAt":"2025-12-24T03:24:51.560Z"}' headers: Connection: - keep-alive diff --git a/python/tests/ops/cassettes/test_versioned_async_context_call_wrapped_method.yaml b/python/tests/ops/cassettes/test_versioned_async_context_call_wrapped_method.yaml index 283eaf4684..e2b0c340be 100644 --- a/python/tests/ops/cassettes/test_versioned_async_context_call_wrapped_method.yaml +++ b/python/tests/ops/cassettes/test_versioned_async_context_call_wrapped_method.yaml @@ -128,7 +128,7 @@ interactions: mirascope import llm\n\nctx = llm.Context(deps=\"As a librarian,\")\n\n\n@llm.call(\"openai/gpt-4o-mini\")\nasync def recommend(ctx: llm.Context[str], genre: str) -> str:\n return f\"{ctx.deps} Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\nasync - def recommend(ctx: llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:51.560Z","updatedAt":"2025-12-24T03:24:51.560Z"}' + def recommend(ctx: llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:51.560Z","updatedAt":"2025-12-24T03:24:51.560Z"}' headers: Connection: - keep-alive @@ -274,7 +274,7 @@ interactions: mirascope import llm\n\nctx = llm.Context(deps=\"As a librarian,\")\n\n\n@llm.call(\"openai/gpt-4o-mini\")\nasync def recommend(ctx: llm.Context[str], genre: str) -> str:\n return f\"{ctx.deps} Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\nasync - def recommend(ctx: llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:51.560Z","updatedAt":"2025-12-24T03:24:51.560Z"}' + def recommend(ctx: llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:51.560Z","updatedAt":"2025-12-24T03:24:51.560Z"}' headers: Connection: - keep-alive diff --git a/python/tests/ops/cassettes/test_versioned_async_context_call_wrapped_stream_method.yaml b/python/tests/ops/cassettes/test_versioned_async_context_call_wrapped_stream_method.yaml index 685624fc7e..cf47dbc617 100644 --- a/python/tests/ops/cassettes/test_versioned_async_context_call_wrapped_stream_method.yaml +++ b/python/tests/ops/cassettes/test_versioned_async_context_call_wrapped_stream_method.yaml @@ -657,7 +657,7 @@ interactions: mirascope import llm\n\nctx = llm.Context(deps=\"As a librarian,\")\n\n\n@llm.call(\"openai/gpt-4o-mini\")\nasync def recommend(ctx: llm.Context[str], genre: str) -> str:\n return f\"{ctx.deps} Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\nasync - def recommend(ctx: llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:51.560Z","updatedAt":"2025-12-24T03:24:51.560Z"}' + def recommend(ctx: llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:51.560Z","updatedAt":"2025-12-24T03:24:51.560Z"}' headers: Connection: - keep-alive @@ -1476,7 +1476,7 @@ interactions: mirascope import llm\n\nctx = llm.Context(deps=\"As a librarian,\")\n\n\n@llm.call(\"openai/gpt-4o-mini\")\nasync def recommend(ctx: llm.Context[str], genre: str) -> str:\n return f\"{ctx.deps} Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\nasync - def recommend(ctx: llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:51.560Z","updatedAt":"2025-12-24T03:24:51.560Z"}' + def recommend(ctx: llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:51.560Z","updatedAt":"2025-12-24T03:24:51.560Z"}' headers: Connection: - keep-alive diff --git a/python/tests/ops/cassettes/test_versioned_call_call_method.yaml b/python/tests/ops/cassettes/test_versioned_call_call_method.yaml index 9206ecd3a0..5d7122c29d 100644 --- a/python/tests/ops/cassettes/test_versioned_call_call_method.yaml +++ b/python/tests/ops/cassettes/test_versioned_call_call_method.yaml @@ -127,7 +127,7 @@ interactions: string: '{"id":"f90e3dcd-5907-4822-aa74-f051a60e37de","hash":"2820850484af684cd70ef2b89cc09bd16dffd6003b2e8bb3ed0907232e44144e","signatureHash":"d3afa65513fc5a9d79bcfdadd5775889dc259dc949c76d2c476ef916b4f234c2","name":"recommend","description":null,"version":"1.0","tags":null,"metadata":null,"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef - recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z"}' + recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z"}' headers: Connection: - keep-alive @@ -272,7 +272,7 @@ interactions: string: '{"id":"f90e3dcd-5907-4822-aa74-f051a60e37de","hash":"2820850484af684cd70ef2b89cc09bd16dffd6003b2e8bb3ed0907232e44144e","signatureHash":"d3afa65513fc5a9d79bcfdadd5775889dc259dc949c76d2c476ef916b4f234c2","name":"recommend","description":null,"version":"1.0","tags":null,"metadata":null,"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef - recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z"}' + recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z"}' headers: Connection: - keep-alive diff --git a/python/tests/ops/cassettes/test_versioned_call_stream_method.yaml b/python/tests/ops/cassettes/test_versioned_call_stream_method.yaml index f0ef24aea4..dab824a70e 100644 --- a/python/tests/ops/cassettes/test_versioned_call_stream_method.yaml +++ b/python/tests/ops/cassettes/test_versioned_call_stream_method.yaml @@ -325,7 +325,7 @@ interactions: string: '{"id":"f90e3dcd-5907-4822-aa74-f051a60e37de","hash":"2820850484af684cd70ef2b89cc09bd16dffd6003b2e8bb3ed0907232e44144e","signatureHash":"d3afa65513fc5a9d79bcfdadd5775889dc259dc949c76d2c476ef916b4f234c2","name":"recommend","description":null,"version":"1.0","tags":null,"metadata":null,"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef - recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z"}' + recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z"}' headers: Connection: - keep-alive @@ -986,7 +986,7 @@ interactions: string: '{"id":"f90e3dcd-5907-4822-aa74-f051a60e37de","hash":"2820850484af684cd70ef2b89cc09bd16dffd6003b2e8bb3ed0907232e44144e","signatureHash":"d3afa65513fc5a9d79bcfdadd5775889dc259dc949c76d2c476ef916b4f234c2","name":"recommend","description":null,"version":"1.0","tags":null,"metadata":null,"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef - recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z"}' + recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z"}' headers: Connection: - keep-alive diff --git a/python/tests/ops/cassettes/test_versioned_call_sync.yaml b/python/tests/ops/cassettes/test_versioned_call_sync.yaml index 349f6945c7..be5589e3f5 100644 --- a/python/tests/ops/cassettes/test_versioned_call_sync.yaml +++ b/python/tests/ops/cassettes/test_versioned_call_sync.yaml @@ -151,7 +151,7 @@ interactions: - request: body: '{"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","hash":"2820850484af684cd70ef2b89cc09bd16dffd6003b2e8bb3ed0907232e44144e","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef - recommend(genre: str) -> str: ...","signatureHash":"d3afa65513fc5a9d79bcfdadd5775889dc259dc949c76d2c476ef916b4f234c2","name":"recommend","description":null,"tags":null,"metadata":null,"dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}}}' + recommend(genre: str) -> str: ...","signatureHash":"d3afa65513fc5a9d79bcfdadd5775889dc259dc949c76d2c476ef916b4f234c2","name":"recommend","description":null,"tags":null,"metadata":null,"dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}}}' headers: accept: - '*/*' @@ -176,7 +176,7 @@ interactions: string: '{"id":"f90e3dcd-5907-4822-aa74-f051a60e37de","hash":"2820850484af684cd70ef2b89cc09bd16dffd6003b2e8bb3ed0907232e44144e","signatureHash":"d3afa65513fc5a9d79bcfdadd5775889dc259dc949c76d2c476ef916b4f234c2","name":"recommend","description":null,"version":"1.0","tags":null,"metadata":null,"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef - recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z","isNew":true}' + recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z","isNew":true}' headers: Connection: - keep-alive @@ -327,7 +327,7 @@ interactions: string: '{"id":"f90e3dcd-5907-4822-aa74-f051a60e37de","hash":"2820850484af684cd70ef2b89cc09bd16dffd6003b2e8bb3ed0907232e44144e","signatureHash":"d3afa65513fc5a9d79bcfdadd5775889dc259dc949c76d2c476ef916b4f234c2","name":"recommend","description":null,"version":"1.0","tags":null,"metadata":null,"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef - recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z"}' + recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z"}' headers: Connection: - keep-alive diff --git a/python/tests/ops/cassettes/test_versioned_call_with_tags.yaml b/python/tests/ops/cassettes/test_versioned_call_with_tags.yaml index 03f768c243..c19fa9badd 100644 --- a/python/tests/ops/cassettes/test_versioned_call_with_tags.yaml +++ b/python/tests/ops/cassettes/test_versioned_call_with_tags.yaml @@ -127,7 +127,7 @@ interactions: string: '{"id":"f90e3dcd-5907-4822-aa74-f051a60e37de","hash":"2820850484af684cd70ef2b89cc09bd16dffd6003b2e8bb3ed0907232e44144e","signatureHash":"d3afa65513fc5a9d79bcfdadd5775889dc259dc949c76d2c476ef916b4f234c2","name":"recommend","description":null,"version":"1.0","tags":null,"metadata":null,"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef - recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z"}' + recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z"}' headers: Connection: - keep-alive @@ -271,7 +271,7 @@ interactions: string: '{"id":"f90e3dcd-5907-4822-aa74-f051a60e37de","hash":"2820850484af684cd70ef2b89cc09bd16dffd6003b2e8bb3ed0907232e44144e","signatureHash":"d3afa65513fc5a9d79bcfdadd5775889dc259dc949c76d2c476ef916b4f234c2","name":"recommend","description":null,"version":"1.0","tags":null,"metadata":null,"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef - recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z"}' + recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z"}' headers: Connection: - keep-alive diff --git a/python/tests/ops/cassettes/test_versioned_call_wrapped_method.yaml b/python/tests/ops/cassettes/test_versioned_call_wrapped_method.yaml index 9ace3dab85..4720f5f56f 100644 --- a/python/tests/ops/cassettes/test_versioned_call_wrapped_method.yaml +++ b/python/tests/ops/cassettes/test_versioned_call_wrapped_method.yaml @@ -127,7 +127,7 @@ interactions: string: '{"id":"f90e3dcd-5907-4822-aa74-f051a60e37de","hash":"2820850484af684cd70ef2b89cc09bd16dffd6003b2e8bb3ed0907232e44144e","signatureHash":"d3afa65513fc5a9d79bcfdadd5775889dc259dc949c76d2c476ef916b4f234c2","name":"recommend","description":null,"version":"1.0","tags":null,"metadata":null,"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef - recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z"}' + recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z"}' headers: Connection: - keep-alive @@ -272,7 +272,7 @@ interactions: string: '{"id":"f90e3dcd-5907-4822-aa74-f051a60e37de","hash":"2820850484af684cd70ef2b89cc09bd16dffd6003b2e8bb3ed0907232e44144e","signatureHash":"d3afa65513fc5a9d79bcfdadd5775889dc259dc949c76d2c476ef916b4f234c2","name":"recommend","description":null,"version":"1.0","tags":null,"metadata":null,"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef - recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z"}' + recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z"}' headers: Connection: - keep-alive diff --git a/python/tests/ops/cassettes/test_versioned_call_wrapped_stream.yaml b/python/tests/ops/cassettes/test_versioned_call_wrapped_stream.yaml index 6b29cee80e..56e1febb42 100644 --- a/python/tests/ops/cassettes/test_versioned_call_wrapped_stream.yaml +++ b/python/tests/ops/cassettes/test_versioned_call_wrapped_stream.yaml @@ -837,7 +837,7 @@ interactions: string: '{"id":"f90e3dcd-5907-4822-aa74-f051a60e37de","hash":"2820850484af684cd70ef2b89cc09bd16dffd6003b2e8bb3ed0907232e44144e","signatureHash":"d3afa65513fc5a9d79bcfdadd5775889dc259dc949c76d2c476ef916b4f234c2","name":"recommend","description":null,"version":"1.0","tags":null,"metadata":null,"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef - recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z"}' + recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z"}' headers: Connection: - keep-alive @@ -1568,7 +1568,7 @@ interactions: string: '{"id":"f90e3dcd-5907-4822-aa74-f051a60e37de","hash":"2820850484af684cd70ef2b89cc09bd16dffd6003b2e8bb3ed0907232e44144e","signatureHash":"d3afa65513fc5a9d79bcfdadd5775889dc259dc949c76d2c476ef916b4f234c2","name":"recommend","description":null,"version":"1.0","tags":null,"metadata":null,"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(genre: str) -> str:\n return f\"Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef - recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z"}' + recommend(genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:17.295Z","updatedAt":"2025-12-24T03:24:17.295Z"}' headers: Connection: - keep-alive diff --git a/python/tests/ops/cassettes/test_versioned_context_call.yaml b/python/tests/ops/cassettes/test_versioned_context_call.yaml index eb21bd9fd7..9dca62eecb 100644 --- a/python/tests/ops/cassettes/test_versioned_context_call.yaml +++ b/python/tests/ops/cassettes/test_versioned_context_call.yaml @@ -147,7 +147,7 @@ interactions: body: '{"code":"from mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(ctx: llm.Context[str], genre: str) -> str:\n return f\"{ctx.deps} Recommend a {genre} book.\"\n","hash":"040ba4bb8fbe484b0fec96048fc27c7f1b8c8f3ed36a2ed94e92e8cc13407517","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef - recommend(ctx: llm.Context[str], genre: str) -> str: ...","signatureHash":"3d98c672f65730494cad6b8cc3410c69067bbaa986d745e26f416e99b9db9373","name":"recommend","description":null,"tags":null,"metadata":null,"dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}}}' + recommend(ctx: llm.Context[str], genre: str) -> str: ...","signatureHash":"3d98c672f65730494cad6b8cc3410c69067bbaa986d745e26f416e99b9db9373","name":"recommend","description":null,"tags":null,"metadata":null,"dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}}}' headers: accept: - '*/*' @@ -173,7 +173,7 @@ interactions: mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(ctx: llm.Context[str], genre: str) -> str:\n return f\"{ctx.deps} Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(ctx: - llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:42.745Z","updatedAt":"2025-12-24T03:24:42.745Z","isNew":true}' + llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:42.745Z","updatedAt":"2025-12-24T03:24:42.745Z","isNew":true}' headers: Connection: - keep-alive @@ -319,7 +319,7 @@ interactions: mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(ctx: llm.Context[str], genre: str) -> str:\n return f\"{ctx.deps} Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(ctx: - llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:42.745Z","updatedAt":"2025-12-24T03:24:42.745Z"}' + llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:42.745Z","updatedAt":"2025-12-24T03:24:42.745Z"}' headers: Connection: - keep-alive diff --git a/python/tests/ops/cassettes/test_versioned_context_call_call_method.yaml b/python/tests/ops/cassettes/test_versioned_context_call_call_method.yaml index 38a499cd6d..b79370e1d3 100644 --- a/python/tests/ops/cassettes/test_versioned_context_call_call_method.yaml +++ b/python/tests/ops/cassettes/test_versioned_context_call_call_method.yaml @@ -128,7 +128,7 @@ interactions: mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(ctx: llm.Context[str], genre: str) -> str:\n return f\"{ctx.deps} Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(ctx: - llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:42.745Z","updatedAt":"2025-12-24T03:24:42.745Z"}' + llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:42.745Z","updatedAt":"2025-12-24T03:24:42.745Z"}' headers: Connection: - keep-alive @@ -275,7 +275,7 @@ interactions: mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(ctx: llm.Context[str], genre: str) -> str:\n return f\"{ctx.deps} Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(ctx: - llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:42.745Z","updatedAt":"2025-12-24T03:24:42.745Z"}' + llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:42.745Z","updatedAt":"2025-12-24T03:24:42.745Z"}' headers: Connection: - keep-alive diff --git a/python/tests/ops/cassettes/test_versioned_context_call_stream_method.yaml b/python/tests/ops/cassettes/test_versioned_context_call_stream_method.yaml index 0fcb68514f..73ca119a2c 100644 --- a/python/tests/ops/cassettes/test_versioned_context_call_stream_method.yaml +++ b/python/tests/ops/cassettes/test_versioned_context_call_stream_method.yaml @@ -730,7 +730,7 @@ interactions: mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(ctx: llm.Context[str], genre: str) -> str:\n return f\"{ctx.deps} Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(ctx: - llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:42.745Z","updatedAt":"2025-12-24T03:24:42.745Z"}' + llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:42.745Z","updatedAt":"2025-12-24T03:24:42.745Z"}' headers: Connection: - keep-alive @@ -1447,7 +1447,7 @@ interactions: mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(ctx: llm.Context[str], genre: str) -> str:\n return f\"{ctx.deps} Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(ctx: - llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:42.745Z","updatedAt":"2025-12-24T03:24:42.745Z"}' + llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:42.745Z","updatedAt":"2025-12-24T03:24:42.745Z"}' headers: Connection: - keep-alive diff --git a/python/tests/ops/cassettes/test_versioned_context_call_wrapped_method.yaml b/python/tests/ops/cassettes/test_versioned_context_call_wrapped_method.yaml index d0c4dc3294..af59e4498b 100644 --- a/python/tests/ops/cassettes/test_versioned_context_call_wrapped_method.yaml +++ b/python/tests/ops/cassettes/test_versioned_context_call_wrapped_method.yaml @@ -128,7 +128,7 @@ interactions: mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(ctx: llm.Context[str], genre: str) -> str:\n return f\"{ctx.deps} Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(ctx: - llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:42.745Z","updatedAt":"2025-12-24T03:24:42.745Z"}' + llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:42.745Z","updatedAt":"2025-12-24T03:24:42.745Z"}' headers: Connection: - keep-alive @@ -275,7 +275,7 @@ interactions: mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(ctx: llm.Context[str], genre: str) -> str:\n return f\"{ctx.deps} Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(ctx: - llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:42.745Z","updatedAt":"2025-12-24T03:24:42.745Z"}' + llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:42.745Z","updatedAt":"2025-12-24T03:24:42.745Z"}' headers: Connection: - keep-alive diff --git a/python/tests/ops/cassettes/test_versioned_context_call_wrapped_stream_method.yaml b/python/tests/ops/cassettes/test_versioned_context_call_wrapped_stream_method.yaml index 70ff608d1a..23ca22cf7b 100644 --- a/python/tests/ops/cassettes/test_versioned_context_call_wrapped_stream_method.yaml +++ b/python/tests/ops/cassettes/test_versioned_context_call_wrapped_stream_method.yaml @@ -396,7 +396,7 @@ interactions: mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(ctx: llm.Context[str], genre: str) -> str:\n return f\"{ctx.deps} Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(ctx: - llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:42.745Z","updatedAt":"2025-12-24T03:24:42.745Z"}' + llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:42.745Z","updatedAt":"2025-12-24T03:24:42.745Z"}' headers: Connection: - keep-alive @@ -714,7 +714,7 @@ interactions: mirascope import llm\n\n\n@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(ctx: llm.Context[str], genre: str) -> str:\n return f\"{ctx.deps} Recommend a {genre} book.\"\n","signature":"@llm.call(\"openai/gpt-4o-mini\")\ndef recommend(ctx: - llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a4","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:42.745Z","updatedAt":"2025-12-24T03:24:42.745Z"}' + llm.Context[str], genre: str) -> str: ...","dependencies":{"mirascope":{"version":"2.0.0a5","extras":["all"]}},"environmentId":"5e8c6ab2-3b1a-4ec0-a1f1-e4d01c5e1f18","projectId":"100bf0f4-7503-4524-bd4b-ddec75d8a346","organizationId":"ad201cf0-4185-4952-9608-95ad50080360","createdAt":"2025-12-24T03:24:42.745Z","updatedAt":"2025-12-24T03:24:42.745Z"}' headers: Connection: - keep-alive diff --git a/python/tests/ops/test_versioning.py b/python/tests/ops/test_versioning.py index 7626e74d13..6a554862fa 100644 --- a/python/tests/ops/test_versioning.py +++ b/python/tests/ops/test_versioning.py @@ -757,6 +757,7 @@ def recommend(genre: str) -> str: "mirascope.trace.arg_values": '{"args":["fantasy"],"kwargs":{}}', "mirascope.version.hash": "2820850484af684cd70ef2b89cc09bd16dffd6003b2e8bb3ed0907232e44144e", "mirascope.version.signature_hash": "d3afa65513fc5a9d79bcfdadd5775889dc259dc949c76d2c476ef916b4f234c2", + "mirascope.version.uuid": "f90e3dcd-5907-4822-aa74-f051a60e37de", "mirascope.version.version": "1.0", "mirascope.trace.output": "I highly recommend **\"The Name of the Wind\" by Patrick Rothfuss**. It's the first book in the Kingkiller Chronicle series and follows the story of Kvothe, a gifted young man who becomes a legendary figure. The narrative weaves together magic, music, and adventure, all told in Kvothe's own voice as he recounts his life's journey. The writing is beautiful, and the world-building is rich and immersive. Enjoy your reading!", }, @@ -800,6 +801,7 @@ def recommend(genre: str) -> str: "mirascope.trace.arg_values": '{"args":["mystery"],"kwargs":{}}', "mirascope.version.hash": "2820850484af684cd70ef2b89cc09bd16dffd6003b2e8bb3ed0907232e44144e", "mirascope.version.signature_hash": "d3afa65513fc5a9d79bcfdadd5775889dc259dc949c76d2c476ef916b4f234c2", + "mirascope.version.uuid": "f90e3dcd-5907-4822-aa74-f051a60e37de", "mirascope.version.version": "1.0", "mirascope.trace.output": "I recommend **\"The No. 1 Ladies' Detective Agency\"** by Alexander McCall Smith. It's a charming mystery set in Botswana, featuring the clever and resourceful Precious Ramotswe as she solves various cases with a unique blend of humor and insight. The book combines an engaging storyline with rich cultural details, making it both an enjoyable read and a delightful introduction to the series.", }, @@ -842,6 +844,7 @@ def recommend(genre: str) -> str: "mirascope.trace.arg_values": '{"args":["fantasy"],"kwargs":{}}', "mirascope.version.hash": "2820850484af684cd70ef2b89cc09bd16dffd6003b2e8bb3ed0907232e44144e", "mirascope.version.signature_hash": "d3afa65513fc5a9d79bcfdadd5775889dc259dc949c76d2c476ef916b4f234c2", + "mirascope.version.uuid": "f90e3dcd-5907-4822-aa74-f051a60e37de", "mirascope.version.version": "1.0", "mirascope.trace.output": 'I recommend **"The Name of the Wind" by Patrick Rothfuss**. It’s the first book in the *The Kingkiller Chronicle* series and follows the story of Kvothe, a gifted young man who grows to become a legendary figure. The narrative weaves magic, music, and adventure in a richly detailed world. Its lyrical prose and deep character development make it a captivating read for fantasy lovers. Enjoy!', }, @@ -900,6 +903,7 @@ def recommend(genre: str) -> str: "mirascope.trace.arg_values": '{"args":["adventure"],"kwargs":{}}', "mirascope.version.hash": "2820850484af684cd70ef2b89cc09bd16dffd6003b2e8bb3ed0907232e44144e", "mirascope.version.signature_hash": "d3afa65513fc5a9d79bcfdadd5775889dc259dc949c76d2c476ef916b4f234c2", + "mirascope.version.uuid": "f90e3dcd-5907-4822-aa74-f051a60e37de", "mirascope.version.version": "1.0", "mirascope.trace.output": "**[No Content]**", }, @@ -945,6 +949,7 @@ async def recommend(genre: str) -> str: "mirascope.trace.arg_values": '{"args":["horror"],"kwargs":{}}', "mirascope.version.hash": "0391c2bfd9cae644a1b467679c5d6b8a03a8df17c733c4309e36838127bc6d85", "mirascope.version.signature_hash": "b9cd3d0dbb1c669832bb9bec2c556281f7587625908d698c7152a510b516ec26", + "mirascope.version.uuid": "1c6495c3-f5b4-4795-b99c-221ff3273156", "mirascope.version.version": "1.0", "mirascope.trace.output": 'I recommend **"The Haunting of Hill House" by Shirley Jackson**. This classic novel explores the eerie and unsettling experiences of a group of people staying in a supposedly haunted mansion. Jackson\'s atmospheric writing and psychological tension create a chilling experience, making it a must-read for horror fans. If you\'re looking for something more contemporary, consider **"Mexican Gothic" by Silvia Moreno-Garcia**, which combines elements of gothic horror with a rich cultural backdrop. Both books offer unique and compelling takes on the genre!', }, @@ -990,6 +995,7 @@ async def recommend(genre: str) -> str: "mirascope.trace.arg_values": '{"args":["horror"],"kwargs":{}}', "mirascope.version.hash": "0391c2bfd9cae644a1b467679c5d6b8a03a8df17c733c4309e36838127bc6d85", "mirascope.version.signature_hash": "b9cd3d0dbb1c669832bb9bec2c556281f7587625908d698c7152a510b516ec26", + "mirascope.version.uuid": "1c6495c3-f5b4-4795-b99c-221ff3273156", "mirascope.version.version": "1.0", "mirascope.trace.output": 'I recommend "The Haunting of Hill House" by Shirley Jackson. It\'s a classic in the horror genre, exploring themes of fear, isolation, and psychological disturbance. The story follows a group of people who gather at a supposedly haunted mansion, and the eerie atmosphere and character dynamics make it both chilling and thought-provoking. Enjoy!', }, @@ -1052,6 +1058,7 @@ def recommend(ctx: llm.Context[str], genre: str) -> str: "mirascope.trace.arg_values": '{"ctx":{"deps":"As a librarian,"},"args":["fantasy"],"kwargs":{}}', "mirascope.version.hash": "040ba4bb8fbe484b0fec96048fc27c7f1b8c8f3ed36a2ed94e92e8cc13407517", "mirascope.version.signature_hash": "3d98c672f65730494cad6b8cc3410c69067bbaa986d745e26f416e99b9db9373", + "mirascope.version.uuid": "cd413ec2-9f30-4be7-bfb4-a0f6e1f3bb49", "mirascope.version.version": "1.0", "mirascope.trace.output": 'I highly recommend **"The Name of the Wind" by Patrick Rothfuss**. This novel is the first book in the *Kingkiller Chronicle* series and follows the story of Kvothe, a gifted young man who grows up to become a legendary figure. The narrative combines rich world-building, a unique magic system, and its protagonist\'s journey through love, loss, and the pursuit of knowledge. The prose is lyrical, making it a joy to read while exploring themes of storytelling and identity. Perfect for fans of intricate plots and character-driven tales!', }, @@ -1133,6 +1140,7 @@ async def recommend(ctx: llm.Context[str], genre: str) -> str: "mirascope.trace.arg_values": '{"ctx":{"deps":"As a librarian,"},"args":["mystery"],"kwargs":{}}', "mirascope.version.hash": "79085c0ac8178f3ae453795c87e509b7738abc1b1dcf58b045137c5cfe7e7923", "mirascope.version.signature_hash": "ae19bdc73bdb0f93f143b7df3c8cead8efc708c74688fcf000de53be729c5e96", + "mirascope.version.uuid": "64705856-f7bc-4f08-9fbd-466386f562a3", "mirascope.version.version": "1.0", "mirascope.trace.output": 'I recommend **"The Guest List" by Lucy Foley**. This gripping mystery unfolds during a lavish wedding celebration on a remote Irish island. As the guests gather, tensions rise, and secrets begin to surface, culminating in a shocking murder. The narrative shifts between multiple perspectives, keeping you guessing until the very end. It\'s a fantastic blend of suspense, rich character development, and atmospheric setting—perfect for fans of psychological thrillers!', }, @@ -1211,6 +1219,7 @@ def recommend(genre: str) -> str: "mirascope.trace.tags": ("production", "recommendations"), "mirascope.version.hash": "2820850484af684cd70ef2b89cc09bd16dffd6003b2e8bb3ed0907232e44144e", "mirascope.version.signature_hash": "d3afa65513fc5a9d79bcfdadd5775889dc259dc949c76d2c476ef916b4f234c2", + "mirascope.version.uuid": "f90e3dcd-5907-4822-aa74-f051a60e37de", "mirascope.version.version": "1.0", "mirascope.version.tags": ("production", "recommendations"), "mirascope.trace.output": "I recommend **\"The Kiss Quotient\" by Helen Hoang**. It’s a refreshing story about Stella Lane, a successful woman with Asperger's, who decides to hire an escort to help her gain more experience in relationships. The book beautifully explores themes of love, acceptance, and self-discovery, with a charming romance that unfolds between Stella and the escort, Michael. It's both sweet and steamy, making it a wonderful read for romance lovers!",