Skip to content

Commit eb82afa

Browse files
committed
add tests
1 parent 677868d commit eb82afa

File tree

3 files changed

+127
-2
lines changed

3 files changed

+127
-2
lines changed

cookbook/models/litellm/__init__.py

Whitespace-only changes.

libs/agno/agno/models/litellm/litellm.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
from dataclasses import dataclass
22
from os import getenv
3-
from typing import Any, Dict, List, Optional, Union
3+
from typing import Any, Dict, List, Optional
44

55
import litellm
66

7-
from agno.models.base import Message, Model
7+
from agno.models.base import Message
88
from agno.models.base import ModelResponse as AgnoModelResponse
99
from agno.models.openai.like import OpenAILike
1010
from agno.utils.log import logger
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,125 @@
1+
import pytest
2+
3+
from agno.agent import Agent, RunResponse
4+
from agno.exceptions import ModelProviderError
5+
from agno.models.litellm import LiteLLM
6+
7+
8+
def _assert_metrics(response: RunResponse):
9+
"""Helper function to assert metrics are present and valid"""
10+
input_tokens = response.metrics.get("input_tokens", [])
11+
output_tokens = response.metrics.get("output_tokens", [])
12+
total_tokens = response.metrics.get("total_tokens", [])
13+
14+
assert sum(input_tokens) > 0
15+
assert sum(output_tokens) > 0
16+
assert sum(total_tokens) > 0
17+
assert sum(total_tokens) == sum(input_tokens) + sum(output_tokens)
18+
19+
assert response.metrics.get("completion_tokens_details") is not None
20+
assert response.metrics.get("prompt_tokens_details") is not None
21+
22+
23+
def test_basic():
24+
"""Test basic functionality with LiteLLM"""
25+
agent = Agent(model=LiteLLM(id="gpt-4o"), markdown=True,
26+
telemetry=False, monitoring=False)
27+
28+
# Get the response
29+
response: RunResponse = agent.run("Share a 2 sentence horror story")
30+
31+
assert response.content is not None
32+
assert len(response.messages) == 3
33+
assert [m.role for m in response.messages] == [
34+
"system", "user", "assistant"]
35+
36+
_assert_metrics(response)
37+
38+
39+
def test_basic_stream():
40+
"""Test streaming functionality with LiteLLM"""
41+
agent = Agent(model=LiteLLM(id="gpt-4o"), markdown=True,
42+
telemetry=False, monitoring=False)
43+
44+
response_stream = agent.run("Share a 2 sentence horror story", stream=True)
45+
46+
# Verify it's an iterator
47+
assert hasattr(response_stream, "__iter__")
48+
49+
responses = list(response_stream)
50+
assert len(responses) > 0
51+
for response in responses:
52+
assert isinstance(response, RunResponse)
53+
assert response.content is not None
54+
55+
_assert_metrics(agent.run_response)
56+
57+
58+
@pytest.mark.asyncio
59+
async def test_async_basic():
60+
"""Test async functionality with LiteLLM"""
61+
agent = Agent(model=LiteLLM(id="gpt-4o"), markdown=True,
62+
telemetry=False, monitoring=False)
63+
64+
response = await agent.arun("Share a 2 sentence horror story")
65+
66+
assert response.content is not None
67+
assert len(response.messages) == 3
68+
assert [m.role for m in response.messages] == [
69+
"system", "user", "assistant"]
70+
_assert_metrics(response)
71+
72+
73+
@pytest.mark.asyncio
74+
async def test_async_basic_stream():
75+
"""Test async streaming functionality with LiteLLM"""
76+
agent = Agent(model=LiteLLM(id="gpt-4o"), markdown=True,
77+
telemetry=False, monitoring=False)
78+
79+
response_stream = await agent.arun("Share a 2 sentence horror story", stream=True)
80+
81+
async for response in response_stream:
82+
assert isinstance(response, RunResponse)
83+
assert response.content is not None
84+
_assert_metrics(agent.run_response)
85+
86+
87+
def test_exception_handling():
88+
"""Test error handling with invalid model ID"""
89+
agent = Agent(model=LiteLLM(id="nonexistent-model"),
90+
markdown=True, telemetry=False, monitoring=False)
91+
92+
# Should raise an exception for invalid model
93+
with pytest.raises(ModelProviderError) as exc:
94+
agent.run("Share a 2 sentence horror story")
95+
96+
assert exc.value.model_name == "LiteLLM"
97+
assert exc.value.model_id == "nonexistent-model"
98+
99+
100+
def test_with_memory():
101+
"""Test LiteLLM with agent memory"""
102+
agent = Agent(
103+
model=LiteLLM(id="gpt-4o"),
104+
add_history_to_messages=True,
105+
num_history_responses=5,
106+
markdown=True,
107+
telemetry=False,
108+
monitoring=False,
109+
)
110+
111+
# First interaction
112+
response1 = agent.run("My name is John Smith")
113+
assert response1.content is not None
114+
115+
# Second interaction should remember the name
116+
response2 = agent.run("What's my name?")
117+
assert "John Smith" in response2.content
118+
119+
# Verify memories were created
120+
assert len(agent.memory.messages) == 5
121+
assert [m.role for m in agent.memory.messages] == [
122+
"system", "user", "assistant", "user", "assistant"]
123+
124+
# Test metrics structure and types
125+
_assert_metrics(response2)

0 commit comments

Comments
 (0)