Skip to content

Commit 4d52f53

Browse files
authored
Openai model responses (#2372)
## Description Add support for OpenAI's Responses API
1 parent 9749bc9 commit 4d52f53

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

58 files changed

+1668
-5
lines changed

cookbook/examples/apps/geobuddy/app.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,9 @@
22
from pathlib import Path
33

44
import streamlit as st
5+
from geography_buddy import analyze_image
56
from PIL import Image
67

7-
from cookbook.use_cases.apps.geobuddy.geography_buddy import analyze_image
8-
98
# Streamlit App Configuration
109
st.set_page_config(
1110
page_title="Geography Location Buddy",
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
import asyncio
2+
3+
from agno.agent import Agent, RunResponse # noqa
4+
from agno.models.openai import OpenAIResponses
5+
6+
agent = Agent(model=OpenAIResponses(id="gpt-4o"), markdown=True)
7+
8+
# Get the response in a variable
9+
# run: RunResponse = agent.run("Share a 2 sentence horror story")
10+
# print(run.content)
11+
12+
# Print the response in the terminal
13+
asyncio.run(agent.aprint_response("Share a 2 sentence horror story"))
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
import asyncio
2+
from typing import Iterator # noqa
3+
4+
from agno.agent import Agent, RunResponse # noqa
5+
from agno.models.openai import OpenAIResponses
6+
7+
agent = Agent(model=OpenAIResponses(id="gpt-4o"), markdown=True)
8+
9+
# Get the response in a variable
10+
# run_response: Iterator[RunResponse] = agent.run("Share a 2 sentence horror story", stream=True)
11+
# for chunk in run_response:
12+
# print(chunk.content)
13+
14+
# Print the response in the terminal
15+
asyncio.run(agent.aprint_response("Share a 2 sentence horror story", stream=True))
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
"""Run `pip install duckduckgo-search` to install dependencies."""
2+
3+
import asyncio
4+
5+
from agno.agent import Agent
6+
from agno.models.openai import OpenAIResponses
7+
from agno.tools.duckduckgo import DuckDuckGoTools
8+
9+
agent = Agent(
10+
model=OpenAIResponses(id="gpt-4o"),
11+
tools=[DuckDuckGoTools()],
12+
show_tool_calls=True,
13+
markdown=True,
14+
)
15+
asyncio.run(agent.aprint_response("Whats happening in France?", stream=True))
+13
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
from agno.agent import Agent, RunResponse # noqa
2+
from agno.models.openai import OpenAIResponses
3+
4+
agent = Agent(model=OpenAIResponses(id="gpt-4o"), markdown=True)
5+
6+
# Get the response in a variable
7+
# run: RunResponse = agent.run("Share a 2 sentence horror story")
8+
# print(run.content)
9+
10+
# Print the response in the terminal
11+
agent.print_response("Share a 2 sentence horror story")
12+
13+
agent.run_response.metrics
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
from typing import Iterator # noqa
2+
from agno.agent import Agent, RunResponse # noqa
3+
from agno.models.openai import OpenAIResponses
4+
5+
agent = Agent(model=OpenAIResponses(id="gpt-4o"), markdown=True)
6+
7+
# Get the response in a variable
8+
# run_response: Iterator[RunResponse] = agent.run("Share a 2 sentence horror story", stream=True)
9+
# for chunk in run_response:
10+
# print(chunk.content)
11+
12+
# Print the response in the terminal
13+
agent.print_response("Share a 2 sentence horror story", stream=True)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
from agno.agent import Agent
2+
from agno.media import Image
3+
from agno.models.openai import OpenAIResponses
4+
from agno.tools.googlesearch import GoogleSearchTools
5+
6+
agent = Agent(
7+
model=OpenAIResponses(id="gpt-4o"),
8+
tools=[GoogleSearchTools()],
9+
markdown=True,
10+
)
11+
12+
agent.print_response(
13+
"Tell me about this image and give me the latest news about it.",
14+
images=[
15+
Image(
16+
url="https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg"
17+
)
18+
],
19+
stream=True,
20+
)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
from pathlib import Path
2+
3+
from agno.agent import Agent
4+
from agno.media import Image
5+
from agno.models.openai import OpenAIResponses
6+
from agno.tools.googlesearch import GoogleSearchTools
7+
from agno.utils.media import download_image
8+
9+
agent = Agent(
10+
model=OpenAIResponses(id="gpt-4o"),
11+
tools=[GoogleSearchTools()],
12+
markdown=True,
13+
)
14+
15+
image_path = Path(__file__).parent.joinpath("sample.jpg")
16+
17+
download_image(
18+
url="https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg",
19+
output_path=str(image_path),
20+
)
21+
22+
# Read the image file content as bytes
23+
image_bytes = image_path.read_bytes()
24+
25+
agent.print_response(
26+
"Tell me about this image and give me the latest news about it.",
27+
images=[
28+
Image(content=image_bytes),
29+
],
30+
stream=True,
31+
)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
from agno.agent import Agent
2+
from agno.media import Image
3+
from agno.models.openai import OpenAIResponses
4+
from agno.tools.googlesearch import GoogleSearchTools
5+
6+
agent = Agent(
7+
model=OpenAIResponses(id="gpt-4o"),
8+
tools=[GoogleSearchTools()],
9+
markdown=True,
10+
add_history_to_messages=True,
11+
num_history_responses=3,
12+
)
13+
14+
agent.print_response(
15+
"Tell me about this image and give me the latest news about it.",
16+
images=[
17+
Image(
18+
url="https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg"
19+
)
20+
],
21+
)
22+
23+
agent.print_response("Tell me where I can get more images?")
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
"""Run `pip install duckduckgo-search sqlalchemy pgvector pypdf openai` to install dependencies."""
2+
3+
from agno.agent import Agent
4+
from agno.knowledge.pdf_url import PDFUrlKnowledgeBase
5+
from agno.models.openai import OpenAIResponses
6+
from agno.vectordb.pgvector import PgVector
7+
8+
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
9+
10+
knowledge_base = PDFUrlKnowledgeBase(
11+
urls=["https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf"],
12+
vector_db=PgVector(table_name="recipes", db_url=db_url),
13+
)
14+
knowledge_base.load(recreate=True) # Comment out after first run
15+
16+
agent = Agent(
17+
model=OpenAIResponses(id="gpt-4o"),
18+
knowledge=knowledge_base,
19+
show_tool_calls=True,
20+
)
21+
agent.print_response("How to make Thai curry?", markdown=True)
+56
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
"""
2+
This recipe shows how to use personalized memories and summaries in an agent.
3+
Steps:
4+
1. Run: `./cookbook/scripts/run_pgvector.sh` to start a postgres container with pgvector
5+
2. Run: `pip install openai sqlalchemy 'psycopg[binary]' pgvector` to install the dependencies
6+
3. Run: `python cookbook/agents/personalized_memories_and_summaries.py` to run the agent
7+
"""
8+
9+
from agno.agent import Agent, AgentMemory
10+
from agno.memory.db.postgres import PgMemoryDb
11+
from agno.models.openai import OpenAIResponses
12+
from agno.storage.agent.postgres import PostgresAgentStorage
13+
from rich.pretty import pprint
14+
15+
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
16+
agent = Agent(
17+
model=OpenAIResponses(id="gpt-4o"),
18+
# Store the memories and summary in a database
19+
memory=AgentMemory(
20+
db=PgMemoryDb(table_name="agent_memory", db_url=db_url),
21+
create_user_memories=True,
22+
create_session_summary=True,
23+
),
24+
# Store agent sessions in a database
25+
storage=PostgresAgentStorage(
26+
table_name="personalized_agent_sessions", db_url=db_url
27+
),
28+
# Show debug logs so, you can see the memory being created
29+
# debug_mode=True,
30+
)
31+
32+
# -*- Share personal information
33+
agent.print_response("My name is john billings?", stream=True)
34+
# -*- Print memories
35+
pprint(agent.memory.memories)
36+
# -*- Print summary
37+
pprint(agent.memory.summary)
38+
39+
# -*- Share personal information
40+
agent.print_response("I live in nyc?", stream=True)
41+
# -*- Print memories
42+
pprint(agent.memory.memories)
43+
# -*- Print summary
44+
pprint(agent.memory.summary)
45+
46+
# -*- Share personal information
47+
agent.print_response("I'm going to a concert tomorrow?", stream=True)
48+
# -*- Print memories
49+
pprint(agent.memory.memories)
50+
# -*- Print summary
51+
pprint(agent.memory.summary)
52+
53+
# Ask about the conversation
54+
agent.print_response(
55+
"What have we been talking about, do you know my name?", stream=True
56+
)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
from agno.agent import Agent
2+
from agno.models.openai import OpenAIResponses
3+
from agno.tools.yfinance import YFinanceTools
4+
5+
agent = Agent(
6+
model=OpenAIResponses(id="o3-mini", reasoning_effort="high"),
7+
tools=[YFinanceTools(enable_all=True)],
8+
show_tool_calls=True,
9+
markdown=True,
10+
)
11+
12+
# Print the response in the terminal
13+
agent.print_response("Write a report on the NVDA, is it a good buy?", stream=True)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
"""Run `pip install duckduckgo-search sqlalchemy openai` to install dependencies."""
2+
3+
from agno.agent import Agent
4+
from agno.models.openai import OpenAIResponses
5+
from agno.storage.agent.postgres import PostgresAgentStorage
6+
from agno.tools.duckduckgo import DuckDuckGoTools
7+
8+
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
9+
10+
agent = Agent(
11+
model=OpenAIResponses(id="gpt-4o"),
12+
storage=PostgresAgentStorage(table_name="agent_sessions", db_url=db_url),
13+
tools=[DuckDuckGoTools()],
14+
add_history_to_messages=True,
15+
)
16+
agent.print_response("How many people live in Canada?")
17+
agent.print_response("What is their national anthem called?")
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
from typing import List
2+
3+
from agno.agent import Agent, RunResponse # noqa
4+
from agno.models.openai import OpenAIChat
5+
from agno.models.openai.responses import OpenAIResponses # noqa
6+
from pydantic import BaseModel, Field
7+
from rich.pretty import pprint
8+
9+
10+
class MovieScript(BaseModel):
11+
setting: str = Field(
12+
..., description="Provide a nice setting for a blockbuster movie."
13+
)
14+
ending: str = Field(
15+
...,
16+
description="Ending of the movie. If not available, provide a happy ending.",
17+
)
18+
genre: str = Field(
19+
...,
20+
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
21+
)
22+
name: str = Field(..., description="Give a name to this movie")
23+
characters: List[str] = Field(..., description="Name of characters for this movie.")
24+
storyline: str = Field(
25+
..., description="3 sentence storyline for the movie. Make it exciting!"
26+
)
27+
28+
29+
# Agent that uses JSON mode
30+
json_mode_agent = Agent(
31+
model=OpenAIResponses(id="gpt-4o"),
32+
description="You write movie scripts.",
33+
response_model=MovieScript,
34+
structured_outputs=True,
35+
)
36+
37+
# Agent that uses structured outputs
38+
structured_output_agent = Agent(
39+
model=OpenAIResponses(id="gpt-4o-2024-08-06"),
40+
description="You write movie scripts.",
41+
response_model=MovieScript,
42+
structured_outputs=True,
43+
)
44+
45+
46+
# Get the response in a variable
47+
# json_mode_response: RunResponse = json_mode_agent.run("New York")
48+
# pprint(json_mode_response.content)
49+
# structured_output_response: RunResponse = structured_output_agent.run("New York")
50+
# pprint(structured_output_response.content)
51+
52+
json_mode_agent.print_response("New York")
53+
structured_output_agent.print_response("New York")
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
"""Run `pip install duckduckgo-search` to install dependencies."""
2+
3+
from agno.agent import Agent
4+
from agno.models.openai import OpenAIResponses
5+
from agno.tools.duckduckgo import DuckDuckGoTools
6+
7+
agent = Agent(
8+
model=OpenAIResponses(id="gpt-4o"),
9+
tools=[DuckDuckGoTools()],
10+
show_tool_calls=True,
11+
markdown=True,
12+
)
13+
agent.print_response("Whats happening in France?")
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
"""Run `pip install duckduckgo-search` to install dependencies."""
2+
3+
from agno.agent import Agent
4+
from agno.models.openai import OpenAIResponses
5+
from agno.tools.duckduckgo import DuckDuckGoTools
6+
7+
agent = Agent(
8+
model=OpenAIResponses(id="gpt-4o"),
9+
tools=[DuckDuckGoTools()],
10+
show_tool_calls=True,
11+
markdown=True,
12+
)
13+
agent.print_response("Whats happening in France?", stream=True)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
"""Run `pip install duckduckgo-search` to install dependencies."""
2+
3+
from agno.agent import Agent
4+
from agno.models.openai import OpenAIResponses
5+
6+
agent = Agent(
7+
model=OpenAIResponses(id="gpt-4o", web_search=True),
8+
markdown=True,
9+
)
10+
agent.print_response("Whats happening in France?")

libs/agno/agno/agent/agent.py

+4-1
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@
3131
from agno.memory.agent import AgentMemory, AgentRun
3232
from agno.models.base import Model
3333
from agno.models.message import Message, MessageReferences
34-
from agno.models.openai.like import OpenAILike
3534
from agno.models.response import ModelResponse, ModelResponseEvent
3635
from agno.reasoning.step import NextAction, ReasoningStep, ReasoningSteps
3736
from agno.run.messages import RunMessages
@@ -2849,6 +2848,8 @@ def get_audio(self) -> Optional[List[AudioArtifact]]:
28492848
###########################################################################
28502849

28512850
def reason(self, run_messages: RunMessages) -> Iterator[RunResponse]:
2851+
from agno.models.openai.like import OpenAILike
2852+
28522853
# Yield a reasoning started event
28532854
if self.stream_intermediate_steps:
28542855
yield self.create_run_response(content="Reasoning started", event=RunEvent.reasoning_started)
@@ -3030,6 +3031,8 @@ def reason(self, run_messages: RunMessages) -> Iterator[RunResponse]:
30303031
)
30313032

30323033
async def areason(self, run_messages: RunMessages) -> Any:
3034+
from agno.models.openai.like import OpenAILike
3035+
30333036
# Yield a reasoning started event
30343037
if self.stream_intermediate_steps:
30353038
yield self.create_run_response(content="Reasoning started", event=RunEvent.reasoning_started)
+1
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,3 @@
11
from agno.models.openai.chat import OpenAIChat
22
from agno.models.openai.like import OpenAILike
3+
from agno.models.openai.responses import OpenAIResponses

0 commit comments

Comments
 (0)