Skip to content

Commit 965ba22

Browse files
authored
Merge branch 'main' into update-ws-templates-ag-2857
2 parents 95f2cab + ba28053 commit 965ba22

File tree

34 files changed

+544
-72
lines changed

34 files changed

+544
-72
lines changed

cookbook/examples/apps/mcp_agent/README.md

+2
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,8 @@ UAgI (Universal Agent Interface) is a powerful agent application that leverages
3030
- Only works with 1 MCP server at a time
3131
- Changing MCP servers resets the agent
3232
- Only supports 2 MCP servers at the moment
33+
- Chat history is broken
34+
- MCP Cleanup is not working, so memory leaks are possible
3335

3436
## 🚀 Quick Start
3537

File renamed without changes.

cookbook/models/aws/bedrock/image_agent_bytes.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
from agno.media import Image
55
from agno.models.aws import AwsBedrock
66
from agno.tools.duckduckgo import DuckDuckGoTools
7+
from agno.utils.media import download_image
78

89
agent = Agent(
910
model=AwsBedrock(id="amazon.nova-pro-v1:0"),
@@ -13,9 +14,10 @@
1314

1415
image_path = Path(__file__).parent.joinpath("sample.jpg")
1516

17+
download_image(url="https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg", save_path=str(image_path))
18+
1619
# Read the image file content as bytes
17-
with open(image_path, "rb") as img_file:
18-
image_bytes = img_file.read()
20+
image_bytes = image_path.read_bytes()
1921

2022
agent.print_response(
2123
"Tell me about this image and give me the latest news about it.",

cookbook/models/azure/ai_foundry/image_agent_bytes.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
from agno.agent import Agent
44
from agno.media import Image
55
from agno.models.azure import AzureAIFoundry
6+
from agno.utils.media import download_image
67

78
agent = Agent(
89
model=AzureAIFoundry(id="Llama-3.2-11B-Vision-Instruct"),
@@ -11,9 +12,10 @@
1112

1213
image_path = Path(__file__).parent.joinpath("sample.jpg")
1314

15+
download_image(url="https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg", save_path=str(image_path))
16+
1417
# Read the image file content as bytes
15-
with open(image_path, "rb") as img_file:
16-
image_bytes = img_file.read()
18+
image_bytes = image_path.read_bytes()
1719

1820
agent.print_response(
1921
"Tell me about this image.",

cookbook/models/cohere/image_agent.py

+18
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
from agno.agent import Agent
2+
from agno.media import Image
3+
from agno.models.cohere import Cohere
4+
5+
agent = Agent(
6+
model=Cohere(id="c4ai-aya-vision-8b"),
7+
markdown=True,
8+
)
9+
10+
agent.print_response(
11+
"Tell me about this image.",
12+
images=[
13+
Image(
14+
url="https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg"
15+
)
16+
],
17+
stream=True,
18+
)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
from pathlib import Path
2+
3+
from agno.agent import Agent
4+
from agno.media import Image
5+
from agno.models.cohere.chat import Cohere
6+
from agno.utils.media import download_image
7+
8+
agent = Agent(
9+
model=Cohere(id="c4ai-aya-vision-8b"),
10+
markdown=True,
11+
)
12+
13+
image_path = Path(__file__).parent.joinpath("sample.jpg")
14+
15+
download_image(url="https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg", save_path=str(image_path))
16+
17+
# Read the image file content as bytes
18+
image_bytes = image_path.read_bytes()
19+
20+
agent.print_response(
21+
"Tell me about this image.",
22+
images=[
23+
Image(content=image_bytes),
24+
],
25+
stream=True,
26+
)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
from pathlib import Path
2+
3+
from agno.agent import Agent
4+
from agno.media import Image
5+
from agno.models.cohere.chat import Cohere
6+
from agno.utils.media import download_image
7+
8+
agent = Agent(
9+
model=Cohere(id="c4ai-aya-vision-8b"),
10+
markdown=True,
11+
)
12+
13+
image_path = Path(__file__).parent.joinpath("sample.jpg")
14+
15+
download_image(url="https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg", save_path=str(image_path))
16+
17+
agent.print_response(
18+
"Tell me about this image.",
19+
images=[
20+
Image(filepath=image_path),
21+
],
22+
stream=True,
23+
)

cookbook/models/ibm/watsonx/image_agent_bytes.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
from agno.agent import Agent
44
from agno.media import Image
55
from agno.models.ibm import WatsonX
6-
from agno.tools.duckduckgo import DuckDuckGoTools
76

87
agent = Agent(
98
model=WatsonX(id="meta-llama/llama-3-2-11b-vision-instruct"),
@@ -13,8 +12,7 @@
1312
image_path = Path(__file__).parent.joinpath("sample.jpg")
1413

1514
# Read the image file content as bytes
16-
with open(image_path, "rb") as img_file:
17-
image_bytes = img_file.read()
15+
image_bytes = image_path.read_bytes()
1816

1917
agent.print_response(
2018
"Tell me about this image and and give me the latest news about it.",

cookbook/models/openai/image_agent_bytes.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
from agno.media import Image
55
from agno.models.openai import OpenAIChat
66
from agno.tools.duckduckgo import DuckDuckGoTools
7+
from agno.utils.media import download_image
78

89
agent = Agent(
910
model=OpenAIChat(id="gpt-4o"),
@@ -13,9 +14,10 @@
1314

1415
image_path = Path(__file__).parent.joinpath("sample.jpg")
1516

17+
download_image(url="https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg", save_path=str(image_path))
18+
1619
# Read the image file content as bytes
17-
with open(image_path, "rb") as img_file:
18-
image_bytes = img_file.read()
20+
image_bytes = image_path.read_bytes()
1921

2022
agent.print_response(
2123
"Tell me about this image and give me the latest news about it.",

cookbook/models/together/image_agent_bytes.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,7 @@
1212
image_path = Path(__file__).parent.joinpath("sample.jpg")
1313

1414
# Read the image file content as bytes
15-
with open(image_path, "rb") as img_file:
16-
image_bytes = img_file.read()
15+
image_bytes = image_path.read_bytes()
1716

1817
agent.print_response(
1918
"Tell me about this image",

cookbook/models/xai/image_agent_bytes.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
from agno.media import Image
55
from agno.models.xai import xAI
66
from agno.tools.duckduckgo import DuckDuckGoTools
7+
from agno.utils.media import download_image
78

89
agent = Agent(
910
model=xAI(id="grok-2-vision-latest"),
@@ -13,9 +14,10 @@
1314

1415
image_path = Path(__file__).parent.joinpath("sample.jpg")
1516

17+
download_image(url="https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg", save_path=str(image_path))
18+
1619
# Read the image file content as bytes
17-
with open(image_path, "rb") as img_file:
18-
image_bytes = img_file.read()
20+
image_bytes = image_path.read_bytes()
1921

2022
agent.print_response(
2123
"Tell me about this image and give me the latest news about it.",

cookbook/tools/agentql_tools.py

+38
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
"""
2+
AgentQL Tools for scraping websites.
3+
4+
Prerequisites:
5+
- Set the environment variable `AGENTQL_API_KEY` with your AgentQL API key.
6+
You can obtain the API key from the AgentQL website:
7+
https://agentql.com/
8+
- Run `playwright install` to install a browser extension for playwright.
9+
10+
AgentQL will open up a browser instance (don't close it) and do scraping on the site.
11+
"""
12+
13+
from agno.agent import Agent
14+
from agno.models.openai import OpenAIChat
15+
from agno.tools.agentql import AgentQLTools
16+
17+
# Create agent with default AgentQL tool
18+
agent = Agent(
19+
model=OpenAIChat(id="gpt-4o"), tools=[AgentQLTools()], show_tool_calls=True
20+
)
21+
agent.print_response("https://docs.agno.com/introduction", markdown=True)
22+
23+
# Define custom AgentQL query for specific data extraction (see https://docs.agentql.com/concepts/query-language)
24+
custom_query = """
25+
{
26+
title
27+
text_content[]
28+
}
29+
"""
30+
31+
# Create AgentQL tool with custom query
32+
custom_scraper = AgentQLTools(agentql_query=custom_query, custom_scrape=True)
33+
34+
# Create agent with custom AgentQL tool
35+
custom_agent = Agent(
36+
model=OpenAIChat(id="gpt-4o"), tools=[custom_scraper], show_tool_calls=True
37+
)
38+
custom_agent.print_response("https://docs.agno.com/introduction", markdown=True)

libs/agno/agno/embedder/ollama.py

+35-14
Original file line numberDiff line numberDiff line change
@@ -5,22 +5,34 @@
55
from agno.utils.log import logger
66

77
try:
8-
import pkg_resources
8+
from ollama import Client as OllamaClient
9+
import importlib.metadata as metadata
910
from packaging import version
1011

11-
ollama_version = pkg_resources.get_distribution("ollama").version
12-
if version.parse(ollama_version).major == 0 and version.parse(ollama_version).minor < 3:
12+
# Get installed Ollama version
13+
ollama_version = metadata.version("ollama")
14+
15+
# Check version compatibility (requires v0.3.x or higher)
16+
parsed_version = version.parse(ollama_version)
17+
if parsed_version.major == 0 and parsed_version.minor < 3:
1318
import warnings
19+
warnings.warn("Only Ollama v0.3.x and above are supported", UserWarning)
20+
raise RuntimeError("Incompatible Ollama version detected")
1421

15-
warnings.warn(
16-
"We only support Ollama v0.3.x and above.",
17-
UserWarning,
18-
)
19-
raise RuntimeError("Incompatible Ollama version detected. Execution halted.")
22+
except ImportError as e:
23+
# Handle different import error scenarios
24+
if "ollama" in str(e):
25+
raise ImportError(
26+
"Ollama not installed. Install with `pip install ollama`"
27+
) from e
28+
else:
29+
raise ImportError(
30+
"Missing dependencies. Install with `pip install packaging importlib-metadata`"
31+
) from e
2032

21-
from ollama import Client as OllamaClient
22-
except (ModuleNotFoundError, ImportError):
23-
raise ImportError("`ollama` not installed. Please install using `pip install ollama`")
33+
except Exception as e:
34+
# Catch-all for unexpected errors
35+
print(f"An unexpected error occurred: {e}")
2436

2537

2638
@dataclass
@@ -53,14 +65,23 @@ def _response(self, text: str) -> Dict[str, Any]:
5365
if self.options is not None:
5466
kwargs["options"] = self.options
5567

56-
return self.client.embed(input=text, model=self.id, **kwargs) # type: ignore
68+
response = self.client.embed(input=text, model=self.id, **kwargs)
69+
if response and "embeddings" in response:
70+
embeddings = response["embeddings"]
71+
if isinstance(embeddings, list) and len(embeddings) > 0 and isinstance(embeddings[0], list):
72+
return {"embeddings": embeddings[0]} # Use the first element
73+
elif isinstance(embeddings, list) and all(isinstance(x, (int, float)) for x in embeddings):
74+
return {"embeddings": embeddings} # Return as-is if already flat
75+
return {"embeddings": []} # Return an empty list if no valid embedding is found
5776

5877
def get_embedding(self, text: str) -> List[float]:
5978
try:
6079
response = self._response(text=text)
61-
if response is None:
80+
embedding = response.get("embeddings", [])
81+
if len(embedding) != self.dimensions:
82+
logger.warning(f"Expected embedding dimension {self.dimensions}, but got {len(embedding)}")
6283
return []
63-
return response.get("embeddings", [])
84+
return embedding
6485
except Exception as e:
6586
logger.warning(e)
6687
return []

0 commit comments

Comments
 (0)