Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Openai model responses #2372

Merged
merged 17 commits into from
Mar 12, 2025
3 changes: 1 addition & 2 deletions cookbook/examples/apps/geobuddy/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,9 @@
from pathlib import Path

import streamlit as st
from geography_buddy import analyze_image
from PIL import Image

from cookbook.use_cases.apps.geobuddy.geography_buddy import analyze_image

# Streamlit App Configuration
st.set_page_config(
page_title="Geography Location Buddy",
Expand Down
13 changes: 13 additions & 0 deletions cookbook/models/openai/responses/async_basic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import asyncio

from agno.agent import Agent, RunResponse # noqa
from agno.models.openai import OpenAIResponses

agent = Agent(model=OpenAIResponses(id="gpt-4o"), markdown=True)

# Get the response in a variable
# run: RunResponse = agent.run("Share a 2 sentence horror story")
# print(run.content)

# Print the response in the terminal
asyncio.run(agent.aprint_response("Share a 2 sentence horror story"))
15 changes: 15 additions & 0 deletions cookbook/models/openai/responses/async_basic_stream.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
import asyncio
from typing import Iterator # noqa

from agno.agent import Agent, RunResponse # noqa
from agno.models.openai import OpenAIResponses

agent = Agent(model=OpenAIResponses(id="gpt-4o"), markdown=True)

# Get the response in a variable
# run_response: Iterator[RunResponse] = agent.run("Share a 2 sentence horror story", stream=True)
# for chunk in run_response:
# print(chunk.content)

# Print the response in the terminal
asyncio.run(agent.aprint_response("Share a 2 sentence horror story", stream=True))
15 changes: 15 additions & 0 deletions cookbook/models/openai/responses/async_tool_use.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
"""Run `pip install duckduckgo-search` to install dependencies."""

import asyncio

from agno.agent import Agent
from agno.models.openai import OpenAIResponses
from agno.tools.duckduckgo import DuckDuckGoTools

agent = Agent(
model=OpenAIResponses(id="gpt-4o"),
tools=[DuckDuckGoTools()],
show_tool_calls=True,
markdown=True,
)
asyncio.run(agent.aprint_response("Whats happening in France?", stream=True))
13 changes: 13 additions & 0 deletions cookbook/models/openai/responses/basic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from agno.agent import Agent, RunResponse # noqa
from agno.models.openai import OpenAIResponses

agent = Agent(model=OpenAIResponses(id="gpt-4o"), markdown=True)

# Get the response in a variable
# run: RunResponse = agent.run("Share a 2 sentence horror story")
# print(run.content)

# Print the response in the terminal
agent.print_response("Share a 2 sentence horror story")

agent.run_response.metrics
13 changes: 13 additions & 0 deletions cookbook/models/openai/responses/basic_stream.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from typing import Iterator # noqa
from agno.agent import Agent, RunResponse # noqa
from agno.models.openai import OpenAIResponses

agent = Agent(model=OpenAIResponses(id="gpt-4o"), markdown=True)

# Get the response in a variable
# run_response: Iterator[RunResponse] = agent.run("Share a 2 sentence horror story", stream=True)
# for chunk in run_response:
# print(chunk.content)

# Print the response in the terminal
agent.print_response("Share a 2 sentence horror story", stream=True)
20 changes: 20 additions & 0 deletions cookbook/models/openai/responses/image_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
from agno.agent import Agent
from agno.media import Image
from agno.models.openai import OpenAIResponses
from agno.tools.googlesearch import GoogleSearchTools

agent = Agent(
model=OpenAIResponses(id="gpt-4o"),
tools=[GoogleSearchTools()],
markdown=True,
)

agent.print_response(
"Tell me about this image and give me the latest news about it.",
images=[
Image(
url="https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg"
)
],
stream=True,
)
31 changes: 31 additions & 0 deletions cookbook/models/openai/responses/image_agent_bytes.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
from pathlib import Path

from agno.agent import Agent
from agno.media import Image
from agno.models.openai import OpenAIResponses
from agno.tools.googlesearch import GoogleSearchTools
from agno.utils.media import download_image

agent = Agent(
model=OpenAIResponses(id="gpt-4o"),
tools=[GoogleSearchTools()],
markdown=True,
)

image_path = Path(__file__).parent.joinpath("sample.jpg")

download_image(
url="https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg",
output_path=str(image_path),
)

# Read the image file content as bytes
image_bytes = image_path.read_bytes()

agent.print_response(
"Tell me about this image and give me the latest news about it.",
images=[
Image(content=image_bytes),
],
stream=True,
)
23 changes: 23 additions & 0 deletions cookbook/models/openai/responses/image_agent_with_memory.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
from agno.agent import Agent
from agno.media import Image
from agno.models.openai import OpenAIResponses
from agno.tools.googlesearch import GoogleSearchTools

agent = Agent(
model=OpenAIResponses(id="gpt-4o"),
tools=[GoogleSearchTools()],
markdown=True,
add_history_to_messages=True,
num_history_responses=3,
)

agent.print_response(
"Tell me about this image and give me the latest news about it.",
images=[
Image(
url="https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg"
)
],
)

agent.print_response("Tell me where I can get more images?")
21 changes: 21 additions & 0 deletions cookbook/models/openai/responses/knowledge.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
"""Run `pip install duckduckgo-search sqlalchemy pgvector pypdf openai` to install dependencies."""

from agno.agent import Agent
from agno.knowledge.pdf_url import PDFUrlKnowledgeBase
from agno.models.openai import OpenAIResponses
from agno.vectordb.pgvector import PgVector

db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"

knowledge_base = PDFUrlKnowledgeBase(
urls=["https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf"],
vector_db=PgVector(table_name="recipes", db_url=db_url),
)
knowledge_base.load(recreate=True) # Comment out after first run

agent = Agent(
model=OpenAIResponses(id="gpt-4o"),
knowledge=knowledge_base,
show_tool_calls=True,
)
agent.print_response("How to make Thai curry?", markdown=True)
56 changes: 56 additions & 0 deletions cookbook/models/openai/responses/memory.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
"""
This recipe shows how to use personalized memories and summaries in an agent.
Steps:
1. Run: `./cookbook/scripts/run_pgvector.sh` to start a postgres container with pgvector
2. Run: `pip install openai sqlalchemy 'psycopg[binary]' pgvector` to install the dependencies
3. Run: `python cookbook/agents/personalized_memories_and_summaries.py` to run the agent
"""

from agno.agent import Agent, AgentMemory
from agno.memory.db.postgres import PgMemoryDb
from agno.models.openai import OpenAIResponses
from agno.storage.agent.postgres import PostgresAgentStorage
from rich.pretty import pprint

db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
agent = Agent(
model=OpenAIResponses(id="gpt-4o"),
# Store the memories and summary in a database
memory=AgentMemory(
db=PgMemoryDb(table_name="agent_memory", db_url=db_url),
create_user_memories=True,
create_session_summary=True,
),
# Store agent sessions in a database
storage=PostgresAgentStorage(
table_name="personalized_agent_sessions", db_url=db_url
),
# Show debug logs so, you can see the memory being created
# debug_mode=True,
)

# -*- Share personal information
agent.print_response("My name is john billings?", stream=True)
# -*- Print memories
pprint(agent.memory.memories)
# -*- Print summary
pprint(agent.memory.summary)

# -*- Share personal information
agent.print_response("I live in nyc?", stream=True)
# -*- Print memories
pprint(agent.memory.memories)
# -*- Print summary
pprint(agent.memory.summary)

# -*- Share personal information
agent.print_response("I'm going to a concert tomorrow?", stream=True)
# -*- Print memories
pprint(agent.memory.memories)
# -*- Print summary
pprint(agent.memory.summary)

# Ask about the conversation
agent.print_response(
"What have we been talking about, do you know my name?", stream=True
)
13 changes: 13 additions & 0 deletions cookbook/models/openai/responses/reasoning_o3_mini.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from agno.agent import Agent
from agno.models.openai import OpenAIResponses
from agno.tools.yfinance import YFinanceTools

agent = Agent(
model=OpenAIResponses(id="o3-mini", reasoning_effort="high"),
tools=[YFinanceTools(enable_all=True)],
show_tool_calls=True,
markdown=True,
)

# Print the response in the terminal
agent.print_response("Write a report on the NVDA, is it a good buy?", stream=True)
17 changes: 17 additions & 0 deletions cookbook/models/openai/responses/storage.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
"""Run `pip install duckduckgo-search sqlalchemy openai` to install dependencies."""

from agno.agent import Agent
from agno.models.openai import OpenAIResponses
from agno.storage.agent.postgres import PostgresAgentStorage
from agno.tools.duckduckgo import DuckDuckGoTools

db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"

agent = Agent(
model=OpenAIResponses(id="gpt-4o"),
storage=PostgresAgentStorage(table_name="agent_sessions", db_url=db_url),
tools=[DuckDuckGoTools()],
add_history_to_messages=True,
)
agent.print_response("How many people live in Canada?")
agent.print_response("What is their national anthem called?")
53 changes: 53 additions & 0 deletions cookbook/models/openai/responses/structured_output.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
from typing import List

from agno.agent import Agent, RunResponse # noqa
from agno.models.openai import OpenAIChat
from agno.models.openai.responses import OpenAIResponses # noqa
from pydantic import BaseModel, Field
from rich.pretty import pprint


class MovieScript(BaseModel):
setting: str = Field(
..., description="Provide a nice setting for a blockbuster movie."
)
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(
..., description="3 sentence storyline for the movie. Make it exciting!"
)


# Agent that uses JSON mode
json_mode_agent = Agent(
model=OpenAIResponses(id="gpt-4o"),
description="You write movie scripts.",
response_model=MovieScript,
structured_outputs=True,
)

# Agent that uses structured outputs
structured_output_agent = Agent(
model=OpenAIResponses(id="gpt-4o-2024-08-06"),
description="You write movie scripts.",
response_model=MovieScript,
structured_outputs=True,
)


# Get the response in a variable
# json_mode_response: RunResponse = json_mode_agent.run("New York")
# pprint(json_mode_response.content)
# structured_output_response: RunResponse = structured_output_agent.run("New York")
# pprint(structured_output_response.content)

json_mode_agent.print_response("New York")
structured_output_agent.print_response("New York")
13 changes: 13 additions & 0 deletions cookbook/models/openai/responses/tool_use.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
"""Run `pip install duckduckgo-search` to install dependencies."""

from agno.agent import Agent
from agno.models.openai import OpenAIResponses
from agno.tools.duckduckgo import DuckDuckGoTools

agent = Agent(
model=OpenAIResponses(id="gpt-4o"),
tools=[DuckDuckGoTools()],
show_tool_calls=True,
markdown=True,
)
agent.print_response("Whats happening in France?")
13 changes: 13 additions & 0 deletions cookbook/models/openai/responses/tool_use_stream.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
"""Run `pip install duckduckgo-search` to install dependencies."""

from agno.agent import Agent
from agno.models.openai import OpenAIResponses
from agno.tools.duckduckgo import DuckDuckGoTools

agent = Agent(
model=OpenAIResponses(id="gpt-4o"),
tools=[DuckDuckGoTools()],
show_tool_calls=True,
markdown=True,
)
agent.print_response("Whats happening in France?", stream=True)
10 changes: 10 additions & 0 deletions cookbook/models/openai/responses/websearch_builtin_tool.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
"""Run `pip install duckduckgo-search` to install dependencies."""

from agno.agent import Agent
from agno.models.openai import OpenAIResponses

agent = Agent(
model=OpenAIResponses(id="gpt-4o", web_search=True),
markdown=True,
)
agent.print_response("Whats happening in France?")
5 changes: 4 additions & 1 deletion libs/agno/agno/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@
from agno.memory.agent import AgentMemory, AgentRun
from agno.models.base import Model
from agno.models.message import Message, MessageReferences
from agno.models.openai.like import OpenAILike
from agno.models.response import ModelResponse, ModelResponseEvent
from agno.reasoning.step import NextAction, ReasoningStep, ReasoningSteps
from agno.run.messages import RunMessages
Expand Down Expand Up @@ -2849,6 +2848,8 @@ def get_audio(self) -> Optional[List[AudioArtifact]]:
###########################################################################

def reason(self, run_messages: RunMessages) -> Iterator[RunResponse]:
from agno.models.openai.like import OpenAILike

# Yield a reasoning started event
if self.stream_intermediate_steps:
yield self.create_run_response(content="Reasoning started", event=RunEvent.reasoning_started)
Expand Down Expand Up @@ -3030,6 +3031,8 @@ def reason(self, run_messages: RunMessages) -> Iterator[RunResponse]:
)

async def areason(self, run_messages: RunMessages) -> Any:
from agno.models.openai.like import OpenAILike

# Yield a reasoning started event
if self.stream_intermediate_steps:
yield self.create_run_response(content="Reasoning started", event=RunEvent.reasoning_started)
Expand Down
1 change: 1 addition & 0 deletions libs/agno/agno/models/openai/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
from agno.models.openai.chat import OpenAIChat
from agno.models.openai.like import OpenAILike
from agno.models.openai.responses import OpenAIResponses
Loading