-
Notifications
You must be signed in to change notification settings - Fork 4
⚡️(web-search) keep running when tool call fails #137
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Changes from all commits
Commits
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,154 @@ | ||
| """Tests for chat tool utilities.""" | ||
|
|
||
| import inspect | ||
| from typing import get_type_hints | ||
|
|
||
| import pytest | ||
| from pydantic_ai import ModelRetry, RunContext | ||
|
|
||
| from chat.tools.exceptions import ModelCannotRetry | ||
| from chat.tools.utils import last_model_retry_soft_fail | ||
|
|
||
|
|
||
| def test_last_model_retry_soft_fail_preserves_function_metadata(): | ||
| """Test that the decorator preserves function metadata for schema generation.""" | ||
|
|
||
| @last_model_retry_soft_fail | ||
| async def example_tool(ctx: RunContext, query: str, limit: int = 10) -> str: # pylint: disable=unused-argument | ||
| """ | ||
| Example tool function. | ||
|
|
||
| Args: | ||
| ctx: The run context. | ||
| query: The search query. | ||
| limit: Maximum number of results. | ||
|
|
||
| Returns: | ||
| The search results. | ||
| """ | ||
| return f"Results for {query} (limit: {limit})" | ||
|
|
||
| # Check that function name is preserved | ||
| assert example_tool.__name__ == "example_tool" | ||
|
|
||
| # Check that docstring is preserved | ||
| assert example_tool.__doc__ is not None | ||
| assert "Example tool function" in example_tool.__doc__ | ||
|
|
||
| # Check that signature is preserved | ||
| sig = inspect.signature(example_tool) | ||
| assert "ctx" in sig.parameters | ||
| assert "query" in sig.parameters | ||
| assert "limit" in sig.parameters | ||
| assert sig.parameters["limit"].default == 10 | ||
|
|
||
| # Check that type hints are preserved | ||
| type_hints = get_type_hints(example_tool) | ||
| assert "query" in type_hints | ||
| assert type_hints["query"] == str | ||
| assert "limit" in type_hints | ||
| assert type_hints["limit"] == int | ||
| assert type_hints["return"] == str | ||
|
|
||
|
|
||
| @pytest.mark.asyncio | ||
| async def test_last_model_retry_soft_fail_normal_execution(): | ||
| """Test that the decorator doesn't interfere with normal execution.""" | ||
|
|
||
| @last_model_retry_soft_fail | ||
| async def example_tool(_ctx: RunContext, value: str) -> str: | ||
| """Example tool.""" | ||
| return f"Result: {value}" | ||
|
|
||
| # Create a mock context | ||
| class MockContext: | ||
| """Fake context for testing.""" | ||
|
|
||
| max_retries = 3 | ||
| retries = {} | ||
| tool_name = "example_tool" | ||
|
|
||
| ctx = MockContext() | ||
| result = await example_tool(ctx, "test") | ||
| assert result == "Result: test" | ||
|
|
||
|
|
||
| @pytest.mark.asyncio | ||
| async def test_last_model_retry_soft_fail_handles_retry_exception(): | ||
| """Test that the decorator handles ModelRetry exceptions correctly.""" | ||
|
|
||
| @last_model_retry_soft_fail | ||
| async def failing_tool(_ctx: RunContext, should_fail: bool) -> str: | ||
| """Tool that can raise ModelRetry.""" | ||
| if should_fail: | ||
| raise ModelRetry("Please retry with different parameters") | ||
| return "Success" | ||
|
|
||
| # Create a mock context | ||
| class MockContext: | ||
| """Fake context for testing.""" | ||
|
|
||
| max_retries = 3 | ||
| retries = {} | ||
| tool_name = "failing_tool" | ||
|
|
||
| ctx = MockContext() | ||
|
|
||
| # Test when retries haven't been exhausted - should re-raise | ||
| with pytest.raises(ModelRetry): | ||
| await failing_tool(ctx, should_fail=True) | ||
|
|
||
|
|
||
| @pytest.mark.asyncio | ||
| async def test_last_model_retry_soft_fail_returns_message_when_max_retries_reached(): | ||
| """Test that the decorator returns the error message when max retries is reached.""" | ||
|
|
||
| @last_model_retry_soft_fail | ||
| async def failing_tool(_ctx: RunContext, should_fail: bool) -> str: | ||
| """Tool that can raise ModelRetry.""" | ||
| if should_fail: | ||
| raise ModelRetry("Please retry with different parameters.") | ||
| return "Success" | ||
|
|
||
| # Create a mock context with max retries already reached | ||
| class MockContext: | ||
| """Fake context for testing.""" | ||
|
|
||
| max_retries = 3 | ||
| retries = {"failing_tool": 3} | ||
| tool_name = "failing_tool" | ||
|
|
||
| ctx = MockContext() | ||
|
|
||
| # Test when retries have been exhausted - should return message | ||
| result = await failing_tool(ctx, should_fail=True) | ||
| assert result == ( | ||
| "Please retry with different parameters. " | ||
| "You must explain this to the user and not try to answer based on your knowledge." | ||
| ) | ||
|
|
||
|
|
||
| @pytest.mark.asyncio | ||
| async def test_last_model_retry_soft_fail_returns_message_when_model_cannot_retry(): | ||
| """Test that the decorator returns the error message when ModelCannotRetry is raised.""" | ||
|
|
||
| @last_model_retry_soft_fail | ||
| async def failing_tool(_ctx: RunContext, should_fail: bool) -> str: | ||
| """Tool that can raise ModelRetry.""" | ||
| if should_fail: | ||
| raise ModelCannotRetry("This is broken duh.") | ||
| return "Success" | ||
|
|
||
| # Create a mock context with max retries already reached | ||
| class MockContext: | ||
| """Fake context for testing.""" | ||
|
|
||
| max_retries = 3 | ||
| retries = {"failing_tool": 3} | ||
| tool_name = "failing_tool" | ||
|
|
||
| ctx = MockContext() | ||
|
|
||
| # Test when retries have been exhausted - should return message | ||
| result = await failing_tool(ctx, should_fail=True) | ||
| assert result == "This is broken duh." |
Oops, something went wrong.
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Fix error handling order and remove redundant timeout.
Two issues:
Critical - Wrong error handling order: Lines 209-210 call
response.json()beforeraise_for_status(). If the API returns an error status, attempting to parse JSON first may succeed with error details, but the subsequent status check will raise anyway. Check the status before attempting to parse the response.Redundant timeout: Line 207 specifies
timeoutagain, already configured in theAsyncClientconstructor at line 196.Apply this diff:
async def astore_document(self, name: str, content: str) -> None: """ Store the document content in the Albert collection. This method should handle the logic to send the document content to the Albert API. Args: name (str): The name of the document. content (str): The content of the document in Markdown format. """ async with httpx.AsyncClient(timeout=settings.ALBERT_API_TIMEOUT) as client: response = await client.post( urljoin(self._base_url, self._documents_endpoint), headers=self._headers, files={ "file": (f"{name}.md", BytesIO(content.encode("utf-8")), "text/markdown"), }, data={ "collection": int(self.collection_id), "metadata": json.dumps({"document_name": name}), # undocumented API }, - timeout=settings.ALBERT_API_TIMEOUT, ) - logger.debug(response.json()) response.raise_for_status() + logger.debug(response.json())🤖 Prompt for AI Agents