Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 48 additions & 0 deletions config/profiles/local_mac.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
name: local_mac
description: "Optimized profile for running ROMA locally on Apple Silicon (M-series) using Ollama."

runtime:
max_depth: 3
timeout: 600 # Extended timeout for local inference speed

agents:
atomizer:
llm:
model: "ollama_chat/llama3.1"
base_url: "http://localhost:11434"
temperature: 0.1
prediction_strategy: "chain_of_thought"

planner:
llm:
model: "ollama_chat/llama3.1"
base_url: "http://localhost:11434"
temperature: 0.2
prediction_strategy: "chain_of_thought"

executor:
llm:
model: "ollama_chat/llama3.1"
base_url: "http://localhost:11434"
temperature: 0.1
max_tokens: 4000
prediction_strategy: "react"
# Disable heavy toolkits by default to save RAM
toolkits:
- class_name: "FileToolkit"
enabled: true
- class_name: "CalculatorToolkit"
enabled: true

aggregator:
llm:
model: "ollama_chat/llama3.1"
base_url: "http://localhost:11434"
temperature: 0.1
prediction_strategy: "chain_of_thought"

verifier:
llm:
model: "ollama_chat/llama3.1"
base_url: "http://localhost:11434"
temperature: 0.0
43 changes: 43 additions & 0 deletions docs/LOCAL_QUICKSTART.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
# Local Development on Apple Silicon

Configuration guide for running ROMA locally using Ollama on macOS (M-series chips).
Optimized for zero-egress privacy and local development.

## Requirements
* macOS (Apple Silicon M1+)
* 16GB RAM (Recommended)
* [Ollama](https://ollama.com/) installed

## Setup

1. **Start Ollama Server**
Ensure the Ollama server is running in the background.
```bash
ollama serve
```

2. **Pull Base Model**
Standard profile uses Llama 3.1 8B.
```bash
ollama pull llama3.1
```

## Usage

Use the `local_mac` profile, which sets appropriate timeouts (600s) and disables heavy toolkits to conserve resources.

```bash
# Run agent with local profile
python examples/basic_agent.py --profile local_mac
```

### Validation
To verify local-only execution (privacy check):

```bash
python examples/security_audit_demo.py
```

## Configuration Notes
* **Timeouts:** Local inference can be significantly slower than cloud APIs. The `local_mac` profile extends `runtime.timeout` to 600s.
* **Model Selection:** Default is `ollama_chat/llama3.1`. Modify `config/profiles/local_mac.yaml` to use other local models (e.g., Mistral, Gemma).
52 changes: 52 additions & 0 deletions examples/doc_slave_demo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@

import asyncio
import logging
from roma_dspy.core.engine.solve import RecursiveSolver
from roma_dspy.config.schemas.root import ROMAConfig
from roma_dspy.config.schemas.base import RuntimeConfig, LLMConfig

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

async def main():
logger.info("Initializing background documentation generator...")

# Local runtime configuration
local_config = LLMConfig(
model="ollama_chat/llama3.1",
temperature=0.0,
base_url="http://localhost:11434"
)

config = ROMAConfig(runtime=RuntimeConfig(timeout=3600))
# Apply local config to all agents
for agent in ['atomizer', 'planner', 'executor', 'aggregator', 'verifier']:
getattr(config.agents, agent).llm = local_config

solver = RecursiveSolver(config=config)
target_file = "examples/security_audit_demo.py"

with open(target_file, "r") as f:
content = f.read()

task_prompt = f"""
Technical Writer Task:
Generate a comprehensive docstring for the following Python script.
include: Summary, Key Components, and Configuration Details.

Script Content:
```python
{content}
```
"""

logger.info(f"Processing {target_file}...")
try:
result = await solver.async_solve(task_prompt)
print("\n--- Generated Documentation ---\n")
print(result.content)
except Exception as e:
logger.error(f"Generation failed: {e}")

if __name__ == "__main__":
asyncio.run(main())
51 changes: 51 additions & 0 deletions examples/security_audit_demo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@

import asyncio
import logging
from roma_dspy.core.engine.solve import RecursiveSolver
from roma_dspy.config.schemas.root import ROMAConfig
from roma_dspy.config.schemas.base import RuntimeConfig, LLMConfig

# Configure standard logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

async def main():
logger.info("Starting local security audit...")

# Configure local runtime
local_config = LLMConfig(
model="ollama_chat/llama3.1",
temperature=0.0,
base_url="http://localhost:11434"
)

config = ROMAConfig(runtime=RuntimeConfig(timeout=3600))
for agent_config in [config.agents.atomizer, config.agents.planner, config.agents.executor,
config.agents.aggregator, config.agents.verifier]:
agent_config.llm = local_config

solver = RecursiveSolver(config=config)

with open("examples/vulnerable_code.py", "r") as f:
target_code = f.read()

task_prompt = f"""
Security Audit Task:
Scan the following Python code for hardcoded secrets (API keys, passwords).
Return a list of findings with line numbers and variable names. Redact actual values.

Code:
```python
{target_code}
```
"""

try:
result = await solver.async_solve(task_prompt)
print("\n--- Audit Report ---\n")
print(result.content)
except Exception as e:
logger.error(f"Audit execution failed: {e}")

if __name__ == "__main__":
asyncio.run(main())
12 changes: 12 additions & 0 deletions examples/vulnerable_code.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@

def connect_to_aws():
# Hardcoded credentials - BAD!
aws_access_key = "AKIA1234567890"
aws_secret_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
region = "us-east-1"
print(f"Connecting to {region} with {aws_access_key}")

def db_config():
# Another secret
db_password = "super_secret_db_password_!"
return db_password
14 changes: 13 additions & 1 deletion src/roma_dspy/core/engine/runtime.py
Original file line number Diff line number Diff line change
Expand Up @@ -1051,8 +1051,20 @@ def _enhance_error_context(
) -> None:
"""Enhance error with agent and task context for better debugging."""
task_id = task.task_id if task is not None else "unknown"
error_str = str(error)

# FRIENDLY ERROR PATCH: Detect local connection failures
if "ConnectionRefusedError" in error_str or "Connect call failed" in error_str:
friendly_msg = (
"\n\n"
"🚨 CONNECTION ERROR: Could not connect to local model (Ollama).\n"
"💡 TIP: Is 'ollama serve' running? Please check your terminal.\n"
" (If you are using a custom port, check your local_mac.yaml config)\n"
)
error_str = f"{friendly_msg}\nOriginal Error: {error_str}"

error_msg = (
f"[{agent_type.value.upper()}] Task '{task_id}' failed: {str(error)}"
f"[{agent_type.value.upper()}] Task '{task_id}' failed: {error_str}"
)
if hasattr(error, "args") and error.args:
error.args = (error_msg,) + error.args[1:]
Expand Down