From 5d785e1d115954f869d1e115b840002304313900 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EA=B9=80=EC=A2=85=EB=AF=BC?= Date: Fri, 16 Jan 2026 13:53:05 +0900 Subject: [PATCH] feat(infra): add local Mac profile and privacy validation demos --- config/profiles/local_mac.yaml | 48 +++++++++++++++++++++++++ docs/LOCAL_QUICKSTART.md | 43 +++++++++++++++++++++++ examples/doc_slave_demo.py | 52 ++++++++++++++++++++++++++++ examples/security_audit_demo.py | 51 +++++++++++++++++++++++++++ examples/vulnerable_code.py | 12 +++++++ src/roma_dspy/core/engine/runtime.py | 14 +++++++- 6 files changed, 219 insertions(+), 1 deletion(-) create mode 100644 config/profiles/local_mac.yaml create mode 100644 docs/LOCAL_QUICKSTART.md create mode 100644 examples/doc_slave_demo.py create mode 100644 examples/security_audit_demo.py create mode 100644 examples/vulnerable_code.py diff --git a/config/profiles/local_mac.yaml b/config/profiles/local_mac.yaml new file mode 100644 index 00000000..5ef322d5 --- /dev/null +++ b/config/profiles/local_mac.yaml @@ -0,0 +1,48 @@ +name: local_mac +description: "Optimized profile for running ROMA locally on Apple Silicon (M-series) using Ollama." + +runtime: + max_depth: 3 + timeout: 600 # Extended timeout for local inference speed + +agents: + atomizer: + llm: + model: "ollama_chat/llama3.1" + base_url: "http://localhost:11434" + temperature: 0.1 + prediction_strategy: "chain_of_thought" + + planner: + llm: + model: "ollama_chat/llama3.1" + base_url: "http://localhost:11434" + temperature: 0.2 + prediction_strategy: "chain_of_thought" + + executor: + llm: + model: "ollama_chat/llama3.1" + base_url: "http://localhost:11434" + temperature: 0.1 + max_tokens: 4000 + prediction_strategy: "react" + # Disable heavy toolkits by default to save RAM + toolkits: + - class_name: "FileToolkit" + enabled: true + - class_name: "CalculatorToolkit" + enabled: true + + aggregator: + llm: + model: "ollama_chat/llama3.1" + base_url: "http://localhost:11434" + temperature: 0.1 + prediction_strategy: "chain_of_thought" + + verifier: + llm: + model: "ollama_chat/llama3.1" + base_url: "http://localhost:11434" + temperature: 0.0 diff --git a/docs/LOCAL_QUICKSTART.md b/docs/LOCAL_QUICKSTART.md new file mode 100644 index 00000000..c807e8a3 --- /dev/null +++ b/docs/LOCAL_QUICKSTART.md @@ -0,0 +1,43 @@ +# Local Development on Apple Silicon + +Configuration guide for running ROMA locally using Ollama on macOS (M-series chips). +Optimized for zero-egress privacy and local development. + +## Requirements +* macOS (Apple Silicon M1+) +* 16GB RAM (Recommended) +* [Ollama](https://ollama.com/) installed + +## Setup + +1. **Start Ollama Server** + Ensure the Ollama server is running in the background. + ```bash + ollama serve + ``` + +2. **Pull Base Model** + Standard profile uses Llama 3.1 8B. + ```bash + ollama pull llama3.1 + ``` + +## Usage + +Use the `local_mac` profile, which sets appropriate timeouts (600s) and disables heavy toolkits to conserve resources. + +```bash +# Run agent with local profile +python examples/basic_agent.py --profile local_mac +``` + +### Validation +To verify local-only execution (privacy check): + +```bash +python examples/security_audit_demo.py +``` + +## Configuration Notes +* **Timeouts:** Local inference can be significantly slower than cloud APIs. The `local_mac` profile extends `runtime.timeout` to 600s. +* **Model Selection:** Default is `ollama_chat/llama3.1`. Modify `config/profiles/local_mac.yaml` to use other local models (e.g., Mistral, Gemma). diff --git a/examples/doc_slave_demo.py b/examples/doc_slave_demo.py new file mode 100644 index 00000000..7d9d93ec --- /dev/null +++ b/examples/doc_slave_demo.py @@ -0,0 +1,52 @@ + +import asyncio +import logging +from roma_dspy.core.engine.solve import RecursiveSolver +from roma_dspy.config.schemas.root import ROMAConfig +from roma_dspy.config.schemas.base import RuntimeConfig, LLMConfig + +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') +logger = logging.getLogger(__name__) + +async def main(): + logger.info("Initializing background documentation generator...") + + # Local runtime configuration + local_config = LLMConfig( + model="ollama_chat/llama3.1", + temperature=0.0, + base_url="http://localhost:11434" + ) + + config = ROMAConfig(runtime=RuntimeConfig(timeout=3600)) + # Apply local config to all agents + for agent in ['atomizer', 'planner', 'executor', 'aggregator', 'verifier']: + getattr(config.agents, agent).llm = local_config + + solver = RecursiveSolver(config=config) + target_file = "examples/security_audit_demo.py" + + with open(target_file, "r") as f: + content = f.read() + + task_prompt = f""" + Technical Writer Task: + Generate a comprehensive docstring for the following Python script. + include: Summary, Key Components, and Configuration Details. + + Script Content: + ```python + {content} + ``` + """ + + logger.info(f"Processing {target_file}...") + try: + result = await solver.async_solve(task_prompt) + print("\n--- Generated Documentation ---\n") + print(result.content) + except Exception as e: + logger.error(f"Generation failed: {e}") + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/security_audit_demo.py b/examples/security_audit_demo.py new file mode 100644 index 00000000..34c64d6c --- /dev/null +++ b/examples/security_audit_demo.py @@ -0,0 +1,51 @@ + +import asyncio +import logging +from roma_dspy.core.engine.solve import RecursiveSolver +from roma_dspy.config.schemas.root import ROMAConfig +from roma_dspy.config.schemas.base import RuntimeConfig, LLMConfig + +# Configure standard logging +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') +logger = logging.getLogger(__name__) + +async def main(): + logger.info("Starting local security audit...") + + # Configure local runtime + local_config = LLMConfig( + model="ollama_chat/llama3.1", + temperature=0.0, + base_url="http://localhost:11434" + ) + + config = ROMAConfig(runtime=RuntimeConfig(timeout=3600)) + for agent_config in [config.agents.atomizer, config.agents.planner, config.agents.executor, + config.agents.aggregator, config.agents.verifier]: + agent_config.llm = local_config + + solver = RecursiveSolver(config=config) + + with open("examples/vulnerable_code.py", "r") as f: + target_code = f.read() + + task_prompt = f""" + Security Audit Task: + Scan the following Python code for hardcoded secrets (API keys, passwords). + Return a list of findings with line numbers and variable names. Redact actual values. + + Code: + ```python + {target_code} + ``` + """ + + try: + result = await solver.async_solve(task_prompt) + print("\n--- Audit Report ---\n") + print(result.content) + except Exception as e: + logger.error(f"Audit execution failed: {e}") + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/vulnerable_code.py b/examples/vulnerable_code.py new file mode 100644 index 00000000..1648360b --- /dev/null +++ b/examples/vulnerable_code.py @@ -0,0 +1,12 @@ + +def connect_to_aws(): + # Hardcoded credentials - BAD! + aws_access_key = "AKIA1234567890" + aws_secret_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" + region = "us-east-1" + print(f"Connecting to {region} with {aws_access_key}") + +def db_config(): + # Another secret + db_password = "super_secret_db_password_!" + return db_password diff --git a/src/roma_dspy/core/engine/runtime.py b/src/roma_dspy/core/engine/runtime.py index 45f6c980..34767f47 100644 --- a/src/roma_dspy/core/engine/runtime.py +++ b/src/roma_dspy/core/engine/runtime.py @@ -1051,8 +1051,20 @@ def _enhance_error_context( ) -> None: """Enhance error with agent and task context for better debugging.""" task_id = task.task_id if task is not None else "unknown" + error_str = str(error) + + # FRIENDLY ERROR PATCH: Detect local connection failures + if "ConnectionRefusedError" in error_str or "Connect call failed" in error_str: + friendly_msg = ( + "\n\n" + "🚨 CONNECTION ERROR: Could not connect to local model (Ollama).\n" + "💡 TIP: Is 'ollama serve' running? Please check your terminal.\n" + " (If you are using a custom port, check your local_mac.yaml config)\n" + ) + error_str = f"{friendly_msg}\nOriginal Error: {error_str}" + error_msg = ( - f"[{agent_type.value.upper()}] Task '{task_id}' failed: {str(error)}" + f"[{agent_type.value.upper()}] Task '{task_id}' failed: {error_str}" ) if hasattr(error, "args") and error.args: error.args = (error_msg,) + error.args[1:]