Skip to content

Commit edb891a

Browse files
authored
Merge branch 'main' into whatsapp-integration
2 parents f403242 + d4002c6 commit edb891a

File tree

34 files changed

+1461
-65
lines changed

34 files changed

+1461
-65
lines changed

cookbook/examples/apps/github_mcp_agent/agents.py

+6-4
Original file line numberDiff line numberDiff line change
@@ -1,27 +1,29 @@
11
import os
22
from textwrap import dedent
3+
34
from agno.agent import Agent
45
from agno.tools.mcp import MCPTools
56
from mcp import ClientSession, StdioServerParameters
67
from mcp.client.stdio import stdio_client
78

9+
810
async def run_github_agent(message):
911
if not os.getenv("GITHUB_TOKEN"):
1012
return "Error: GitHub token not provided"
11-
13+
1214
try:
1315
server_params = StdioServerParameters(
1416
command="npx",
1517
args=["-y", "@modelcontextprotocol/server-github"],
1618
)
17-
19+
1820
# Create client session
1921
async with stdio_client(server_params) as (read, write):
2022
async with ClientSession(read, write) as session:
2123
# Initialize MCP toolkit
2224
mcp_tools = MCPTools(session=session)
2325
await mcp_tools.initialize()
24-
26+
2527
# Create agent
2628
agent = Agent(
2729
tools=[mcp_tools],
@@ -36,7 +38,7 @@ async def run_github_agent(message):
3638
markdown=True,
3739
show_tool_calls=True,
3840
)
39-
41+
4042
# Run agent
4143
response = await agent.arun(message)
4244
return response.content
+33-21
Original file line numberDiff line numberDiff line change
@@ -1,49 +1,58 @@
11
import asyncio
22
import os
3+
34
import streamlit as st
45
from agents import run_github_agent
6+
57
# Page config
68
st.set_page_config(page_title="🐙 GitHub MCP Agent", page_icon="🐙", layout="wide")
79

810
# Title and description
911
st.markdown("<h1 class='main-header'>🐙 GitHub MCP Agent</h1>", unsafe_allow_html=True)
10-
st.markdown("Explore GitHub repositories with natural language using the Model Context Protocol")
12+
st.markdown(
13+
"Explore GitHub repositories with natural language using the Model Context Protocol"
14+
)
1115

1216
# Setup sidebar for API key
1317
with st.sidebar:
1418
st.header("🔑 Authentication")
15-
github_token = st.text_input("GitHub Token", type="password",
16-
help="Create a token with repo scope at github.com/settings/tokens")
17-
19+
github_token = st.text_input(
20+
"GitHub Token",
21+
type="password",
22+
help="Create a token with repo scope at github.com/settings/tokens",
23+
)
24+
1825
if github_token:
1926
os.environ["GITHUB_TOKEN"] = github_token
20-
27+
2128
st.markdown("---")
2229
st.markdown("### Example Queries")
23-
30+
2431
st.markdown("**Issues**")
2532
st.markdown("- Show me issues by label")
2633
st.markdown("- What issues are being actively discussed?")
27-
34+
2835
st.markdown("**Pull Requests**")
2936
st.markdown("- What PRs need review?")
3037
st.markdown("- Show me recent merged PRs")
31-
38+
3239
st.markdown("**Repository**")
3340
st.markdown("- Show repository health metrics")
3441
st.markdown("- Show repository activity patterns")
35-
42+
3643
st.markdown("---")
37-
st.caption("Note: Always specify the repository in your query if not already selected in the main input.")
44+
st.caption(
45+
"Note: Always specify the repository in your query if not already selected in the main input."
46+
)
3847

3948
# Query input
4049
col1, col2 = st.columns([3, 1])
4150
with col1:
4251
repo = st.text_input("Repository", value="agno-agi/agno", help="Format: owner/repo")
4352
with col2:
44-
query_type = st.selectbox("Query Type", [
45-
"Issues", "Pull Requests", "Repository Activity", "Custom"
46-
])
53+
query_type = st.selectbox(
54+
"Query Type", ["Issues", "Pull Requests", "Repository Activity", "Custom"]
55+
)
4756

4857
# Create predefined queries based on type
4958
if query_type == "Issues":
@@ -55,8 +64,11 @@
5564
else:
5665
query_template = ""
5766

58-
query = st.text_area("Your Query", value=query_template,
59-
placeholder="What would you like to know about this repository?")
67+
query = st.text_area(
68+
"Your Query",
69+
value=query_template,
70+
placeholder="What would you like to know about this repository?",
71+
)
6072

6173
# Run button
6274
if st.button("🚀 Run Query", type="primary", use_container_width=True):
@@ -71,15 +83,15 @@
7183
full_query = f"{query} in {repo}"
7284
else:
7385
full_query = query
74-
86+
7587
result = asyncio.run(run_github_agent(full_query))
76-
88+
7789
# Display results in a nice container
7890
st.markdown("### Results")
7991
st.markdown(result)
8092

8193
# Display help text for first-time users
82-
if 'result' not in locals():
94+
if "result" not in locals():
8395
st.markdown(
8496
"""<div class='info-box'>
8597
<h4>How to use this app:</h4>
@@ -96,10 +108,10 @@
96108
<li>More specific queries yield better results</li>
97109
<li>This app requires Node.js to be installed (for the npx command)</li>
98110
</ul>
99-
</div>""",
100-
unsafe_allow_html=True
111+
</div>""",
112+
unsafe_allow_html=True,
101113
)
102114

103115
# Footer
104116
st.markdown("---")
105-
st.write("Built with Streamlit, Agno, and Model Context Protocol ❤️")
117+
st.write("Built with Streamlit, Agno, and Model Context Protocol ❤️")
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
output
2+
agents.db
3+
tmp
+122
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,122 @@
1+
# UAgI: Universal Agent Interface powered by MCP
2+
3+
> [!IMPORTANT]
4+
> This is a work in progress (see [open issues](#-open-issues) below), please contribute and help improve.
5+
6+
UAgI (Universal Agent Interface) is a powerful agent application that leverages the Model Context Protocol (MCP) to provide a unified interface for interacting with various MCP servers. This application allows you to connect to different data sources and tools through MCP servers, providing a seamless experience for working with external services.
7+
8+
## 🌟 Features
9+
10+
- **Multiple Model Support**: Works with various LLM providers including:
11+
- OpenAI (o3-mini, gpt-4o, gpt-4.5)
12+
- Anthropic (claude-3-7-sonnet, claude-3-7-sonnet-thinking)
13+
- Google (gemini-2.0-flash, gemini-2.0-pro)
14+
- Groq (llama-3.3-70b-versatile)
15+
16+
- **MCP Server Integration**: Connect to the following MCP servers:
17+
- GitHub: Access repositories, issues, and more
18+
- Filesystem: Browse and manipulate files on your local system
19+
20+
- **Knowledge Base**: Built-in knowledge of MCP documentation to help answer questions about the protocol
21+
22+
- **Session Management**: Save and restore chat sessions using SQLite storage
23+
24+
- **Chat History Export**: Export your conversations as markdown files
25+
26+
- **Streamlit UI**: User-friendly interface with customizable settings
27+
28+
## 🐞 Open Issues
29+
30+
- Only works with 1 MCP server at a time
31+
- Changing MCP servers resets the agent
32+
- Only supports 2 MCP servers at the moment
33+
- Chat history is broken
34+
- MCP Cleanup is not working, so memory leaks are possible
35+
36+
## 🚀 Quick Start
37+
38+
### 1. Environment Setup
39+
40+
Create and activate a virtual environment:
41+
```bash
42+
python3 -m venv .venv
43+
source .venv/bin/activate # On Windows: .venv\Scripts\activate
44+
```
45+
46+
### 2. Install Dependencies
47+
48+
```bash
49+
pip install -r cookbook/examples/apps/mcp_agent/requirements.txt
50+
```
51+
52+
### 3. Configure API Keys
53+
54+
Required:
55+
```bash
56+
export OPENAI_API_KEY=your_openai_key_here
57+
```
58+
59+
Optional (for additional models):
60+
```bash
61+
export ANTHROPIC_API_KEY=your_anthropic_key_here
62+
export GOOGLE_API_KEY=your_google_key_here
63+
export GROQ_API_KEY=your_groq_key_here
64+
```
65+
66+
For GitHub MCP server:
67+
```bash
68+
export GITHUB_TOKEN=your_github_token_here
69+
```
70+
71+
### 4. Launch the Application
72+
73+
```bash
74+
streamlit run cookbook/examples/apps/mcp_agent/app.py
75+
```
76+
77+
Visit [localhost:8501](http://localhost:8501) to access the UAgI application.
78+
79+
## 🔧 How It Works
80+
81+
UAgI connects to MCP servers using the Model Context Protocol, which standardizes how applications provide context to LLMs. When you ask a question:
82+
83+
1. The agent analyzes your request and determines which MCP tools might be helpful
84+
2. It connects to the appropriate MCP server (GitHub, Filesystem, etc.)
85+
3. The agent executes the necessary tools through the MCP server
86+
4. Results are processed and returned in a natural language response
87+
5. All interactions are saved in your session history
88+
89+
## 📚 Understanding MCP
90+
91+
The Model Context Protocol (MCP) is an open protocol that standardizes how applications provide context to LLMs. Think of MCP like a USB-C port for AI applications - it provides a standardized way to connect AI models to different data sources and tools.
92+
93+
MCP helps you build agents and complex workflows on top of LLMs by providing:
94+
- A growing list of pre-built integrations that your LLM can directly plug into
95+
- The flexibility to switch between LLM providers and vendors
96+
- Best practices for securing your data within your infrastructure
97+
98+
## 🛠️ Customization
99+
100+
### Adding New MCP Servers
101+
102+
The application is designed to be extensible. To add new MCP servers:
103+
104+
1. Update the `get_mcp_server_config()` function in `utils.py`
105+
2. Add server-specific example inputs in the `example_inputs()` function
106+
107+
### Modifying Agent Behavior
108+
109+
The agent configuration is in `agents.py`:
110+
- Adjust the agent description and instructions to change its behavior
111+
- Modify the knowledge base to include additional documentation
112+
- Add new tools or capabilities as needed
113+
114+
## 📚 Documentation
115+
116+
For more detailed information:
117+
- [Agno Documentation](https://docs.agno.com)
118+
- [Streamlit Documentation](https://docs.streamlit.io)
119+
120+
## 🤝 Support
121+
122+
Need help? Join our [Discord community](https://agno.link/discord)

cookbook/examples/apps/mcp_agent/__init__.py

Whitespace-only changes.

0 commit comments

Comments
 (0)