diff --git a/automaton/ai_enhancer/README.md b/automaton/ai_enhancer/README.md new file mode 100644 index 000000000..5d601f1c9 --- /dev/null +++ b/automaton/ai_enhancer/README.md @@ -0,0 +1,107 @@ +# AI Bounty Description Enhancer + +Multi-LLM powered bounty description enhancer for SolFoundry. + +## Features + +- **Multi-LLM fallback** — tries OpenAI GPT-4, Anthropic Claude, and Google Gemini in order +- **Structured enhancement** — clearer requirements, acceptance criteria, code examples, complexity & timeline estimates, skill breakdown +- **Maintainer approval workflow** — enhanced descriptions are held as pending until a maintainer approves or rejects +- **FastAPI endpoints** — simple REST API to trigger, check, approve, and reject enhancements + +## Quick Start + +```bash +pip install -r automaton/ai_enhancer/requirements.txt +``` + +Set at least one API key: + +```bash +export OPENAI_API_KEY=sk-... +# export ANTHROPIC_API_KEY=... +# export GOOGLE_API_KEY=... +``` + +Mount the router in your FastAPI app: + +```python +from fastapi import FastAPI +from automaton.ai_enhancer import router + +app = FastAPI() +app.include_router(router) +``` + +## API Reference + +| Method | Endpoint | Description | +|--------|----------|-------------| +| POST | `/api/ai-enhance/{bounty_id}` | Trigger AI enhancement | +| GET | `/api/ai-enhance/{bounty_id}/status` | Check enhancement status | +| POST | `/api/ai-enhance/{bounty_id}/approve` | Approve and publish | +| POST | `/api/ai-enhance/{bounty_id}/reject` | Reject and revert | + +### Trigger Enhancement + +```bash +curl -X POST http://localhost:8000/api/ai-enhance/848 \ + -H "Content-Type: application/json" \ + -d '{"title":"Fix login","description":"Users cant log in sometimes"}' +``` + +Response: +```json +{ + "status": "pending", + "enhancement": { + "bounty_id": "848", + "enhanced_title": "Fix intermittent login authentication failure", + "enhanced_description": "...", + "clearer_requirements": ["..."], + "acceptance_criteria": ["..."], + "code_examples": ["..."], + "estimated_complexity": "M", + "estimated_timeline": "1-2 days", + "required_skills": ["..."], + "provider_used": "openai/gpt-4o" + } +} +``` + +### Approve + +```bash +curl -X POST "http://localhost:8000/api/ai-enhance/848/approve?reviewer=alice" +``` + +## Architecture + +``` +automaton/ai_enhancer/ +├── __init__.py # Package exports +├── enhancer.py # BountyEnhancer + EnhancedBounty +├── prompt_templates.py # System/user prompts + few-shot examples +├── approval_workflow.py # Maintainer approve/reject pipeline +├── router.py # FastAPI endpoints +├── requirements.txt # Python dependencies +├── README.md # This file +└── providers/ + ├── __init__.py # Exports + ├── base.py # LLMProvider ABC + ├── openai_provider.py # GPT-4 implementation + ├── anthropic_provider.py # Claude implementation + └── google_provider.py # Gemini implementation +``` + +## Configuration + +| Env Variable | Description | Required | +|---|---|---| +| `OPENAI_API_KEY` | OpenAI API key | One of three required | +| `ANTHROPIC_API_KEY` | Anthropic API key | One of three required | +| `GOOGLE_API_KEY` | Google AI API key | One of three required | + +## License + +MIT diff --git a/automaton/ai_enhancer/__init__.py b/automaton/ai_enhancer/__init__.py new file mode 100644 index 000000000..80232489e --- /dev/null +++ b/automaton/ai_enhancer/__init__.py @@ -0,0 +1,7 @@ +"""AI Bounty Description Enhancer for SolFoundry.""" + +from .enhancer import BountyEnhancer, EnhancedBounty +from .router import router + +__all__ = ["BountyEnhancer", "EnhancedBounty", "router"] +__version__ = "0.1.0" diff --git a/automaton/ai_enhancer/approval_workflow.py b/automaton/ai_enhancer/approval_workflow.py new file mode 100644 index 000000000..6e08fb430 --- /dev/null +++ b/automaton/ai_enhancer/approval_workflow.py @@ -0,0 +1,86 @@ +"""Maintainer approval workflow for enhanced bounty descriptions.""" + +from __future__ import annotations + +import logging +from dataclasses import dataclass, field +from datetime import datetime, timezone +from enum import Enum +from typing import Any, Optional + +logger = logging.getLogger(__name__) + + +class ApprovalStatus(str, Enum): + PENDING = "pending" + APPROVED = "approved" + REJECTED = "rejected" + PUBLISHED = "published" + REVERTED = "reverted" + + +@dataclass +class ApprovalRequest: + """Tracks an enhancement through the approval pipeline.""" + + bounty_id: str + enhancer_result: dict[str, Any] + status: ApprovalStatus = ApprovalStatus.PENDING + reviewer: Optional[str] = None + reviewed_at: Optional[datetime] = None + comment_id: Optional[str] = None + created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc)) + + +class ApprovalWorkflow: + """Manages the approve/reject lifecycle of enhanced bounties.""" + + def __init__(self) -> None: + # In production this would be a database; dict for now. + self._store: dict[str, ApprovalRequest] = {} + + def submit(self, bounty_id: str, enhanced: dict[str, Any]) -> ApprovalRequest: + """Store an enhanced bounty as pending maintainer approval.""" + req = ApprovalRequest(bounty_id=bounty_id, enhancer_result=enhanced) + self._store[bounty_id] = req + logger.info("Submitted enhancement for bounty %s — pending approval", bounty_id) + return req + + def get_status(self, bounty_id: str) -> Optional[ApprovalStatus]: + """Return the current approval status for a bounty.""" + req = self._store.get(bounty_id) + return req.status if req else None + + def get_request(self, bounty_id: str) -> Optional[ApprovalRequest]: + return self._store.get(bounty_id) + + def approve(self, bounty_id: str, reviewer: str) -> ApprovalRequest: + """Approve and publish the enhanced description.""" + req = self._require(bounty_id) + if req.status != ApprovalStatus.PENDING: + raise ValueError(f"Bounty {bounty_id} is {req.status.value}, not pending") + req.status = ApprovalStatus.APPROVED + req.reviewer = reviewer + req.reviewed_at = datetime.now(timezone.utc) + # Publish — in production this would update the bounty record + req.status = ApprovalStatus.PUBLISHED + logger.info("Bounty %s approved and published by %s", bounty_id, reviewer) + return req + + def reject(self, bounty_id: str, reviewer: str) -> ApprovalRequest: + """Reject and revert the enhanced description.""" + req = self._require(bounty_id) + if req.status != ApprovalStatus.PENDING: + raise ValueError(f"Bounty {bounty_id} is {req.status.value}, not pending") + req.status = ApprovalStatus.REJECTED + req.reviewer = reviewer + req.reviewed_at = datetime.now(timezone.utc) + req.status = ApprovalStatus.REVERTED + logger.info("Bounty %s rejected by %s — reverted", bounty_id, reviewer) + return req + + def _require(self, bounty_id: str) -> ApprovalRequest: + req = self._store.get(bounty_id) + if req is None: + raise KeyError(f"No enhancement found for bounty {bounty_id}") + return req diff --git a/automaton/ai_enhancer/enhancer.py b/automaton/ai_enhancer/enhancer.py new file mode 100644 index 000000000..66f7a12df --- /dev/null +++ b/automaton/ai_enhancer/enhancer.py @@ -0,0 +1,99 @@ +"""AI Bounty Description Enhancer - main logic.""" + +from __future__ import annotations + +import asyncio +import logging +from dataclasses import dataclass, field +from typing import Any, Optional + +from .providers.base import LLMProvider +from .providers.openai_provider import OpenAIProvider +from .providers.anthropic_provider import AnthropicProvider +from .providers.google_provider import GoogleProvider +from .prompt_templates import build_system_prompt, build_user_prompt + +logger = logging.getLogger(__name__) + +# Provider fallback order +DEFAULT_PROVIDER_ORDER: list[type[LLMProvider]] = [ + OpenAIProvider, + AnthropicProvider, + GoogleProvider, +] + + +@dataclass +class EnhancedBounty: + """Result of an AI enhancement pass.""" + + bounty_id: str + original_title: str + original_description: str + enhanced_title: Optional[str] = None + enhanced_description: Optional[str] = None + clearer_requirements: list[str] = field(default_factory=list) + acceptance_criteria: list[str] = field(default_factory=list) + code_examples: list[str] = field(default_factory=list) + estimated_complexity: Optional[str] = None + estimated_timeline: Optional[str] = None + required_skills: list[str] = field(default_factory=list) + provider_used: Optional[str] = None + status: str = "pending" # pending | approved | rejected + error: Optional[str] = None + + +class BountyEnhancer: + """Enhances bounty descriptions using multi-LLM analysis.""" + + def __init__( + self, + providers: Optional[list[LLMProvider]] = None, + provider_order: Optional[list[type[LLMProvider]]] = None, + ) -> None: + self.provider_order = provider_order or DEFAULT_PROVIDER_ORDER + self._providers: list[LLMProvider] = providers or [cls() for cls in self.provider_order] + + async def enhance_description(self, bounty: dict[str, Any]) -> EnhancedBounty: + """Enhance a bounty description using multi-LLM with fallback.""" + bounty_id = str(bounty.get("id", "unknown")) + result = EnhancedBounty( + bounty_id=bounty_id, + original_title=bounty.get("title", ""), + original_description=bounty.get("description", ""), + ) + + system_prompt = build_system_prompt() + user_prompt = build_user_prompt(bounty) + + last_error: Optional[Exception] = None + for provider in self._providers: + try: + logger.info("Attempting enhancement with %s", provider.name) + raw = await asyncio.wait_for( + provider.enhance(system_prompt, user_prompt, bounty), + timeout=60, + ) + self._merge_raw(result, raw, provider.name) + return result + except Exception as exc: + logger.warning("Provider %s failed: %s", provider.name, exc) + last_error = exc + continue + + result.error = f"All providers failed. Last: {last_error}" + result.status = "error" + return result + + # ------------------------------------------------------------------ + @staticmethod + def _merge_raw(result: EnhancedBounty, raw: dict, provider_name: str) -> None: + result.enhanced_title = raw.get("enhanced_title") + result.enhanced_description = raw.get("enhanced_description") + result.clearer_requirements = raw.get("clearer_requirements", []) + result.acceptance_criteria = raw.get("acceptance_criteria", []) + result.code_examples = raw.get("code_examples", []) + result.estimated_complexity = raw.get("estimated_complexity") + result.estimated_timeline = raw.get("estimated_timeline") + result.required_skills = raw.get("required_skills", []) + result.provider_used = provider_name diff --git a/automaton/ai_enhancer/prompt_templates.py b/automaton/ai_enhancer/prompt_templates.py new file mode 100644 index 000000000..ce224e236 --- /dev/null +++ b/automaton/ai_enhancer/prompt_templates.py @@ -0,0 +1,93 @@ +"""Prompt templates for bounty description enhancement.""" + +from typing import Any + +SYSTEM_PROMPT = """\ +You are an expert bounty description enhancer for a developer bounty platform (SolFoundry). +Your job is to take vague, incomplete, or poorly structured bounty descriptions and transform +them into clear, actionable specifications that developers can immediately start working on. + +## Rules +1. **Preserve original intent** — never change the scope or add requirements the author didn't imply. +2. **Be specific** — replace vague terms with concrete, measurable criteria. +3. **Generate acceptance criteria** — every bounty must have testable acceptance criteria. +4. **Estimate effort** — provide complexity (S/M/L/XL) and realistic timeline. +5. **Identify skills** — list the specific technical skills needed. +6. **Code examples** — include relevant code snippets or API usage patterns where helpful. +7. **Output valid JSON** matching the schema exactly. + +## Output JSON Schema +{ + "enhanced_title": "string — clearer title if original is vague", + "enhanced_description": "string — full enhanced description in markdown", + "clearer_requirements": ["string — list of specific requirements"], + "acceptance_criteria": ["string — testable criteria"], + "code_examples": ["string — relevant code snippets or patterns"], + "estimated_complexity": "string — one of: S, M, L, XL", + "estimated_timeline": "string — e.g. '2-3 days'", + "required_skills": ["string — specific skills needed"] +} +""" + +FEW_SHOT_BAD = """\ +Title: Fix the login bug +Description: Users can't log in sometimes. Fix it. +""" + +FEW_SHOT_GOOD = """\ +{ + "enhanced_title": "Fix intermittent OAuth2 token refresh failure on login", + "enhanced_description": "Users experience intermittent login failures when their OAuth2 refresh token has expired. The issue occurs approximately 5% of the time and is caused by a race condition in the token refresh handler.\\n\\n## Requirements\\n- Identify and fix the race condition in `auth/token_refresh.py`\\n- Ensure token refresh is atomic and retry-safe\\n- Add logging for refresh failures\\n\\n## Acceptance Criteria\\n- Login succeeds 100% over 1000 test iterations\\n- Token refresh is atomic (no partial state)\\n- Error logs capture failure context", + "clearer_requirements": [ + "Fix race condition in OAuth2 token refresh handler", + "Ensure atomic token refresh with proper locking", + "Add structured logging for all refresh failures" + ], + "acceptance_criteria": [ + "Login succeeds 100% over 1000 consecutive automated test iterations", + "Token refresh uses proper async locking — no concurrent refresh for same user", + "All refresh failures logged with correlation ID and token metadata" + ], + "code_examples": [ + "async with refresh_lock:\\n token = await oauth_client.refresh_token(refresh_token)" + ], + "estimated_complexity": "M", + "estimated_timeline": "1-2 days", + "required_skills": ["Python", "OAuth2", "Async programming", "Testing"] +} +""" + + +def build_system_prompt() -> str: + """Return the system prompt with few-shot examples.""" + return ( + SYSTEM_PROMPT + + "\n\n## Example — Bad Input\n" + + FEW_SHOT_BAD + + "\n## Example — Good Output\n" + + FEW_SHOT_GOOD + ) + + +def build_user_prompt(bounty: dict[str, Any]) -> str: + """Build the user prompt from a bounty dict.""" + title = bounty.get("title", "Untitled Bounty") + description = bounty.get("description", "") + tier = bounty.get("tier", "unknown") + reward = bounty.get("reward", "unknown") + labels = bounty.get("labels", []) + skills = bounty.get("skills", []) + + parts = [ + f"## Bounty to Enhance\n", + f"**Title:** {title}", + f"**Tier:** {tier}", + f"**Reward:** {reward}", + ] + if labels: + parts.append(f"**Labels:** {', '.join(labels)}") + if skills: + parts.append(f"**Listed Skills:** {', '.join(skills)}") + parts.append(f"\n**Description:**\n{description}") + parts.append("\nEnhance this bounty description. Output valid JSON only.") + return "\n".join(parts) diff --git a/automaton/ai_enhancer/providers/__init__.py b/automaton/ai_enhancer/providers/__init__.py new file mode 100644 index 000000000..56634944f --- /dev/null +++ b/automaton/ai_enhancer/providers/__init__.py @@ -0,0 +1,17 @@ +"""Abstract base class for LLM providers.""" + +from abc import ABC, abstractmethod +from typing import Any + + +class LLMProvider(ABC): + """Interface every LLM provider must implement.""" + + @property + @abstractmethod + def name(self) -> str: + """Human-readable provider name.""" + + @abstractmethod + async def enhance(self, system_prompt: str, user_prompt: str, bounty: dict[str, Any]) -> dict: + """Run enhancement and return structured JSON dict.""" diff --git a/automaton/ai_enhancer/providers/anthropic_provider.py b/automaton/ai_enhancer/providers/anthropic_provider.py new file mode 100644 index 000000000..383da678c --- /dev/null +++ b/automaton/ai_enhancer/providers/anthropic_provider.py @@ -0,0 +1,43 @@ +"""Anthropic Claude provider.""" + +from __future__ import annotations + +import json +import os +from typing import Any + +from .base import LLMProvider + +try: + from anthropic import AsyncAnthropic +except ImportError: # pragma: no cover + AsyncAnthropic = None # type: ignore[assignment,misc] + + +class AnthropicProvider(LLMProvider): + """Claude-based enhancement provider.""" + + def __init__(self, model: str = "claude-sonnet-4-20250514", api_key: str | None = None) -> None: + if AsyncAnthropic is None: + raise ImportError("anthropic package is required: pip install anthropic") + self.model = model + self._client = AsyncAnthropic(api_key=api_key or os.getenv("ANTHROPIC_API_KEY")) + + @property + def name(self) -> str: + return f"anthropic/{self.model}" + + async def enhance(self, system_prompt: str, user_prompt: str, bounty: dict[str, Any]) -> dict: + response = await self._client.messages.create( + model=self.model, + max_tokens=4096, + system=system_prompt, + messages=[{"role": "user", "content": user_prompt}], + ) + text = response.content[0].text + # Extract JSON from potential markdown code blocks + if "```json" in text: + text = text.split("```json")[1].split("```")[0] + elif "```" in text: + text = text.split("```")[1].split("```")[0] + return json.loads(text.strip()) diff --git a/automaton/ai_enhancer/providers/base.py b/automaton/ai_enhancer/providers/base.py new file mode 100644 index 000000000..b3a56c793 --- /dev/null +++ b/automaton/ai_enhancer/providers/base.py @@ -0,0 +1,17 @@ +"""Abstract base for LLM providers (re-exported from package).""" + +from abc import ABC, abstractmethod +from typing import Any + + +class LLMProvider(ABC): + """Interface every LLM provider must implement.""" + + @property + @abstractmethod + def name(self) -> str: + """Human-readable provider name.""" + + @abstractmethod + async def enhance(self, system_prompt: str, user_prompt: str, bounty: dict[str, Any]) -> dict: + """Run enhancement and return structured JSON dict.""" diff --git a/automaton/ai_enhancer/providers/google_provider.py b/automaton/ai_enhancer/providers/google_provider.py new file mode 100644 index 000000000..b40960920 --- /dev/null +++ b/automaton/ai_enhancer/providers/google_provider.py @@ -0,0 +1,39 @@ +"""Google Gemini provider.""" + +from __future__ import annotations + +import json +import os +from typing import Any + +from .base import LLMProvider + +try: + import google.generativeai as genai +except ImportError: # pragma: no cover + genai = None # type: ignore[assignment] + + +class GoogleProvider(LLMProvider): + """Google Gemini-based enhancement provider.""" + + def __init__(self, model: str = "gemini-2.0-flash", api_key: str | None = None) -> None: + if genai is None: + raise ImportError("google-generativeai package is required") + self.model_name = model + genai.configure(api_key=api_key or os.getenv("GOOGLE_API_KEY")) + self._model = genai.GenerativeModel(model) + + @property + def name(self) -> str: + return f"google/{self.model_name}" + + async def enhance(self, system_prompt: str, user_prompt: str, bounty: dict[str, Any]) -> dict: + prompt = f"{system_prompt}\n\n{user_prompt}" + response = await self._model.generate_content_async(prompt) + text = response.text + if "```json" in text: + text = text.split("```json")[1].split("```")[0] + elif "```" in text: + text = text.split("```")[1].split("```")[0] + return json.loads(text.strip()) diff --git a/automaton/ai_enhancer/providers/openai_provider.py b/automaton/ai_enhancer/providers/openai_provider.py new file mode 100644 index 000000000..4b15b1f4b --- /dev/null +++ b/automaton/ai_enhancer/providers/openai_provider.py @@ -0,0 +1,56 @@ +"""OpenAI GPT-4 provider.""" + +from __future__ import annotations + +import json +import os +from typing import Any + +from .base import LLMProvider + +try: + from openai import AsyncOpenAI +except ImportError: # pragma: no cover + AsyncOpenAI = None # type: ignore[assignment,misc] + +ENHANCEMENT_SCHEMA = { + "type": "object", + "properties": { + "enhanced_title": {"type": "string"}, + "enhanced_description": {"type": "string"}, + "clearer_requirements": {"type": "array", "items": {"type": "string"}}, + "acceptance_criteria": {"type": "array", "items": {"type": "string"}}, + "code_examples": {"type": "array", "items": {"type": "string"}}, + "estimated_complexity": {"type": "string"}, + "estimated_timeline": {"type": "string"}, + "required_skills": {"type": "array", "items": {"type": "string"}}, + }, + "required": ["enhanced_description", "clearer_requirements", "acceptance_criteria"], +} + + +class OpenAIProvider(LLMProvider): + """OpenAI GPT-4 based enhancement provider.""" + + def __init__(self, model: str = "gpt-4o", api_key: str | None = None) -> None: + if AsyncOpenAI is None: + raise ImportError("openai package is required: pip install openai") + self.model = model + self._client = AsyncOpenAI(api_key=api_key or os.getenv("OPENAI_API_KEY")) + + @property + def name(self) -> str: + return f"openai/{self.model}" + + async def enhance(self, system_prompt: str, user_prompt: str, bounty: dict[str, Any]) -> dict: + response = await self._client.chat.completions.create( + model=self.model, + messages=[ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt}, + ], + response_format={"type": "json_object"}, + temperature=0.3, + ) + text = response.choices[0].message.content or "{}" + return json.loads(text) diff --git a/automaton/ai_enhancer/requirements.txt b/automaton/ai_enhancer/requirements.txt new file mode 100644 index 000000000..3a04e8c74 --- /dev/null +++ b/automaton/ai_enhancer/requirements.txt @@ -0,0 +1,4 @@ +openai>=1.30.0 +anthropic>=0.30.0 +google-generativeai>=0.7.0 +fastapi>=0.110.0 diff --git a/automaton/ai_enhancer/router.py b/automaton/ai_enhancer/router.py new file mode 100644 index 000000000..9ec8df700 --- /dev/null +++ b/automaton/ai_enhancer/router.py @@ -0,0 +1,68 @@ +"""FastAPI router for AI bounty description enhancement.""" + +from __future__ import annotations + +from dataclasses import asdict +from typing import Any + +from fastapi import APIRouter, HTTPException + +from .enhancer import BountyEnhancer, EnhancedBounty +from .approval_workflow import ApprovalWorkflow + +router = APIRouter(prefix="/api/ai-enhance", tags=["ai-enhancer"]) + +# Module-level singletons — wire up with DI in production +_workflow = ApprovalWorkflow() +_enhancer = BountyEnhancer() + + +# ── Trigger enhancement ────────────────────────────────────────────── +@router.post("/{bounty_id}") +async def trigger_enhancement(bounty_id: str, bounty: dict[str, Any]) -> dict[str, Any]: + """Trigger AI enhancement for a bounty description.""" + bounty["id"] = bounty_id + result: EnhancedBounty = await _enhancer.enhance_description(bounty) + + if result.error: + raise HTTPException(status_code=502, detail=result.error) + + data = asdict(result) + _workflow.submit(bounty_id, data) + return {"status": "pending", "enhancement": data} + + +# ── Check status ───────────────────────────────────────────────────── +@router.get("/{bounty_id}/status") +async def get_status(bounty_id: str) -> dict[str, Any]: + """Check the enhancement/approval status for a bounty.""" + status = _workflow.get_status(bounty_id) + if status is None: + raise HTTPException(status_code=404, detail="No enhancement found for this bounty") + return {"bounty_id": bounty_id, "status": status.value} + + +# ── Approve ────────────────────────────────────────────────────────── +@router.post("/{bounty_id}/approve") +async def approve_enhancement(bounty_id: str, reviewer: str = "maintainer") -> dict[str, Any]: + """Approve an enhanced bounty description and publish it.""" + try: + req = _workflow.approve(bounty_id, reviewer) + except KeyError: + raise HTTPException(status_code=404, detail="No enhancement found") + except ValueError as exc: + raise HTTPException(status_code=409, detail=str(exc)) + return {"bounty_id": bounty_id, "status": req.status.value, "reviewer": reviewer} + + +# ── Reject ─────────────────────────────────────────────────────────── +@router.post("/{bounty_id}/reject") +async def reject_enhancement(bounty_id: str, reviewer: str = "maintainer") -> dict[str, Any]: + """Reject an enhanced bounty description and revert.""" + try: + req = _workflow.reject(bounty_id, reviewer) + except KeyError: + raise HTTPException(status_code=404, detail="No enhancement found") + except ValueError as exc: + raise HTTPException(status_code=409, detail=str(exc)) + return {"bounty_id": bounty_id, "status": req.status.value, "reviewer": reviewer}