Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
107 changes: 107 additions & 0 deletions automaton/ai_enhancer/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
# AI Bounty Description Enhancer

Multi-LLM powered bounty description enhancer for SolFoundry.

## Features

- **Multi-LLM fallback** — tries OpenAI GPT-4, Anthropic Claude, and Google Gemini in order
- **Structured enhancement** — clearer requirements, acceptance criteria, code examples, complexity & timeline estimates, skill breakdown
- **Maintainer approval workflow** — enhanced descriptions are held as pending until a maintainer approves or rejects
- **FastAPI endpoints** — simple REST API to trigger, check, approve, and reject enhancements

## Quick Start

```bash
pip install -r automaton/ai_enhancer/requirements.txt
```

Set at least one API key:

```bash
export OPENAI_API_KEY=sk-...
# export ANTHROPIC_API_KEY=...
# export GOOGLE_API_KEY=...
```

Mount the router in your FastAPI app:

```python
from fastapi import FastAPI
from automaton.ai_enhancer import router

app = FastAPI()
app.include_router(router)
```

## API Reference

| Method | Endpoint | Description |
|--------|----------|-------------|
| POST | `/api/ai-enhance/{bounty_id}` | Trigger AI enhancement |
| GET | `/api/ai-enhance/{bounty_id}/status` | Check enhancement status |
| POST | `/api/ai-enhance/{bounty_id}/approve` | Approve and publish |
| POST | `/api/ai-enhance/{bounty_id}/reject` | Reject and revert |

### Trigger Enhancement

```bash
curl -X POST http://localhost:8000/api/ai-enhance/848 \
-H "Content-Type: application/json" \
-d '{"title":"Fix login","description":"Users cant log in sometimes"}'
```

Response:
```json
{
"status": "pending",
"enhancement": {
"bounty_id": "848",
"enhanced_title": "Fix intermittent login authentication failure",
"enhanced_description": "...",
"clearer_requirements": ["..."],
"acceptance_criteria": ["..."],
"code_examples": ["..."],
"estimated_complexity": "M",
"estimated_timeline": "1-2 days",
"required_skills": ["..."],
"provider_used": "openai/gpt-4o"
}
}
```

### Approve

```bash
curl -X POST "http://localhost:8000/api/ai-enhance/848/approve?reviewer=alice"
```

## Architecture

```
automaton/ai_enhancer/
├── __init__.py # Package exports
├── enhancer.py # BountyEnhancer + EnhancedBounty
├── prompt_templates.py # System/user prompts + few-shot examples
├── approval_workflow.py # Maintainer approve/reject pipeline
├── router.py # FastAPI endpoints
├── requirements.txt # Python dependencies
├── README.md # This file
└── providers/
├── __init__.py # Exports
├── base.py # LLMProvider ABC
├── openai_provider.py # GPT-4 implementation
├── anthropic_provider.py # Claude implementation
└── google_provider.py # Gemini implementation
```

## Configuration

| Env Variable | Description | Required |
|---|---|---|
| `OPENAI_API_KEY` | OpenAI API key | One of three required |
| `ANTHROPIC_API_KEY` | Anthropic API key | One of three required |
| `GOOGLE_API_KEY` | Google AI API key | One of three required |

## License

MIT
7 changes: 7 additions & 0 deletions automaton/ai_enhancer/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
"""AI Bounty Description Enhancer for SolFoundry."""

from .enhancer import BountyEnhancer, EnhancedBounty
from .router import router

__all__ = ["BountyEnhancer", "EnhancedBounty", "router"]
__version__ = "0.1.0"
86 changes: 86 additions & 0 deletions automaton/ai_enhancer/approval_workflow.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
"""Maintainer approval workflow for enhanced bounty descriptions."""

from __future__ import annotations

import logging
from dataclasses import dataclass, field
from datetime import datetime, timezone
from enum import Enum
from typing import Any, Optional

logger = logging.getLogger(__name__)


class ApprovalStatus(str, Enum):
PENDING = "pending"
APPROVED = "approved"
REJECTED = "rejected"
PUBLISHED = "published"
REVERTED = "reverted"


@dataclass
class ApprovalRequest:
"""Tracks an enhancement through the approval pipeline."""

bounty_id: str
enhancer_result: dict[str, Any]
status: ApprovalStatus = ApprovalStatus.PENDING
reviewer: Optional[str] = None
reviewed_at: Optional[datetime] = None
comment_id: Optional[str] = None
created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))


class ApprovalWorkflow:
"""Manages the approve/reject lifecycle of enhanced bounties."""

def __init__(self) -> None:
# In production this would be a database; dict for now.
self._store: dict[str, ApprovalRequest] = {}

def submit(self, bounty_id: str, enhanced: dict[str, Any]) -> ApprovalRequest:
"""Store an enhanced bounty as pending maintainer approval."""
req = ApprovalRequest(bounty_id=bounty_id, enhancer_result=enhanced)
self._store[bounty_id] = req
logger.info("Submitted enhancement for bounty %s — pending approval", bounty_id)
return req

def get_status(self, bounty_id: str) -> Optional[ApprovalStatus]:
"""Return the current approval status for a bounty."""
req = self._store.get(bounty_id)
return req.status if req else None

def get_request(self, bounty_id: str) -> Optional[ApprovalRequest]:
return self._store.get(bounty_id)

def approve(self, bounty_id: str, reviewer: str) -> ApprovalRequest:
"""Approve and publish the enhanced description."""
req = self._require(bounty_id)
if req.status != ApprovalStatus.PENDING:
raise ValueError(f"Bounty {bounty_id} is {req.status.value}, not pending")
req.status = ApprovalStatus.APPROVED
req.reviewer = reviewer
req.reviewed_at = datetime.now(timezone.utc)
# Publish — in production this would update the bounty record
req.status = ApprovalStatus.PUBLISHED
logger.info("Bounty %s approved and published by %s", bounty_id, reviewer)
return req

def reject(self, bounty_id: str, reviewer: str) -> ApprovalRequest:
"""Reject and revert the enhanced description."""
req = self._require(bounty_id)
if req.status != ApprovalStatus.PENDING:
raise ValueError(f"Bounty {bounty_id} is {req.status.value}, not pending")
req.status = ApprovalStatus.REJECTED
req.reviewer = reviewer
req.reviewed_at = datetime.now(timezone.utc)
req.status = ApprovalStatus.REVERTED
logger.info("Bounty %s rejected by %s — reverted", bounty_id, reviewer)
return req

def _require(self, bounty_id: str) -> ApprovalRequest:
req = self._store.get(bounty_id)
if req is None:
raise KeyError(f"No enhancement found for bounty {bounty_id}")
return req
99 changes: 99 additions & 0 deletions automaton/ai_enhancer/enhancer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
"""AI Bounty Description Enhancer - main logic."""

from __future__ import annotations

import asyncio
import logging
from dataclasses import dataclass, field
from typing import Any, Optional

from .providers.base import LLMProvider
from .providers.openai_provider import OpenAIProvider
from .providers.anthropic_provider import AnthropicProvider
from .providers.google_provider import GoogleProvider
from .prompt_templates import build_system_prompt, build_user_prompt

logger = logging.getLogger(__name__)

# Provider fallback order
DEFAULT_PROVIDER_ORDER: list[type[LLMProvider]] = [
OpenAIProvider,
AnthropicProvider,
GoogleProvider,
]


@dataclass
class EnhancedBounty:
"""Result of an AI enhancement pass."""

bounty_id: str
original_title: str
original_description: str
enhanced_title: Optional[str] = None
enhanced_description: Optional[str] = None
clearer_requirements: list[str] = field(default_factory=list)
acceptance_criteria: list[str] = field(default_factory=list)
code_examples: list[str] = field(default_factory=list)
estimated_complexity: Optional[str] = None
estimated_timeline: Optional[str] = None
required_skills: list[str] = field(default_factory=list)
provider_used: Optional[str] = None
status: str = "pending" # pending | approved | rejected
error: Optional[str] = None


class BountyEnhancer:
"""Enhances bounty descriptions using multi-LLM analysis."""

def __init__(
self,
providers: Optional[list[LLMProvider]] = None,
provider_order: Optional[list[type[LLMProvider]]] = None,
) -> None:
self.provider_order = provider_order or DEFAULT_PROVIDER_ORDER
self._providers: list[LLMProvider] = providers or [cls() for cls in self.provider_order]

async def enhance_description(self, bounty: dict[str, Any]) -> EnhancedBounty:
"""Enhance a bounty description using multi-LLM with fallback."""
bounty_id = str(bounty.get("id", "unknown"))
result = EnhancedBounty(
bounty_id=bounty_id,
original_title=bounty.get("title", ""),
original_description=bounty.get("description", ""),
)

system_prompt = build_system_prompt()
user_prompt = build_user_prompt(bounty)

last_error: Optional[Exception] = None
for provider in self._providers:
try:
logger.info("Attempting enhancement with %s", provider.name)
raw = await asyncio.wait_for(
provider.enhance(system_prompt, user_prompt, bounty),
timeout=60,
)
self._merge_raw(result, raw, provider.name)
return result
except Exception as exc:
logger.warning("Provider %s failed: %s", provider.name, exc)
last_error = exc
continue

result.error = f"All providers failed. Last: {last_error}"
result.status = "error"
return result

# ------------------------------------------------------------------
@staticmethod
def _merge_raw(result: EnhancedBounty, raw: dict, provider_name: str) -> None:
result.enhanced_title = raw.get("enhanced_title")
result.enhanced_description = raw.get("enhanced_description")
result.clearer_requirements = raw.get("clearer_requirements", [])
result.acceptance_criteria = raw.get("acceptance_criteria", [])
result.code_examples = raw.get("code_examples", [])
result.estimated_complexity = raw.get("estimated_complexity")
result.estimated_timeline = raw.get("estimated_timeline")
result.required_skills = raw.get("required_skills", [])
result.provider_used = provider_name
93 changes: 93 additions & 0 deletions automaton/ai_enhancer/prompt_templates.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
"""Prompt templates for bounty description enhancement."""

from typing import Any

SYSTEM_PROMPT = """\
You are an expert bounty description enhancer for a developer bounty platform (SolFoundry).
Your job is to take vague, incomplete, or poorly structured bounty descriptions and transform
them into clear, actionable specifications that developers can immediately start working on.

## Rules
1. **Preserve original intent** — never change the scope or add requirements the author didn't imply.
2. **Be specific** — replace vague terms with concrete, measurable criteria.
3. **Generate acceptance criteria** — every bounty must have testable acceptance criteria.
4. **Estimate effort** — provide complexity (S/M/L/XL) and realistic timeline.
5. **Identify skills** — list the specific technical skills needed.
6. **Code examples** — include relevant code snippets or API usage patterns where helpful.
7. **Output valid JSON** matching the schema exactly.

## Output JSON Schema
{
"enhanced_title": "string — clearer title if original is vague",
"enhanced_description": "string — full enhanced description in markdown",
"clearer_requirements": ["string — list of specific requirements"],
"acceptance_criteria": ["string — testable criteria"],
"code_examples": ["string — relevant code snippets or patterns"],
"estimated_complexity": "string — one of: S, M, L, XL",
"estimated_timeline": "string — e.g. '2-3 days'",
"required_skills": ["string — specific skills needed"]
}
"""

FEW_SHOT_BAD = """\
Title: Fix the login bug
Description: Users can't log in sometimes. Fix it.
"""

FEW_SHOT_GOOD = """\
{
"enhanced_title": "Fix intermittent OAuth2 token refresh failure on login",
"enhanced_description": "Users experience intermittent login failures when their OAuth2 refresh token has expired. The issue occurs approximately 5% of the time and is caused by a race condition in the token refresh handler.\\n\\n## Requirements\\n- Identify and fix the race condition in `auth/token_refresh.py`\\n- Ensure token refresh is atomic and retry-safe\\n- Add logging for refresh failures\\n\\n## Acceptance Criteria\\n- Login succeeds 100% over 1000 test iterations\\n- Token refresh is atomic (no partial state)\\n- Error logs capture failure context",
"clearer_requirements": [
"Fix race condition in OAuth2 token refresh handler",
"Ensure atomic token refresh with proper locking",
"Add structured logging for all refresh failures"
],
"acceptance_criteria": [
"Login succeeds 100% over 1000 consecutive automated test iterations",
"Token refresh uses proper async locking — no concurrent refresh for same user",
"All refresh failures logged with correlation ID and token metadata"
],
"code_examples": [
"async with refresh_lock:\\n token = await oauth_client.refresh_token(refresh_token)"
],
"estimated_complexity": "M",
"estimated_timeline": "1-2 days",
"required_skills": ["Python", "OAuth2", "Async programming", "Testing"]
}
"""


def build_system_prompt() -> str:
"""Return the system prompt with few-shot examples."""
return (
SYSTEM_PROMPT
+ "\n\n## Example — Bad Input\n"
+ FEW_SHOT_BAD
+ "\n## Example — Good Output\n"
+ FEW_SHOT_GOOD
)


def build_user_prompt(bounty: dict[str, Any]) -> str:
"""Build the user prompt from a bounty dict."""
title = bounty.get("title", "Untitled Bounty")
description = bounty.get("description", "")
tier = bounty.get("tier", "unknown")
reward = bounty.get("reward", "unknown")
labels = bounty.get("labels", [])
skills = bounty.get("skills", [])

parts = [
f"## Bounty to Enhance\n",
f"**Title:** {title}",
f"**Tier:** {tier}",
f"**Reward:** {reward}",
]
if labels:
parts.append(f"**Labels:** {', '.join(labels)}")
if skills:
parts.append(f"**Listed Skills:** {', '.join(skills)}")
parts.append(f"\n**Description:**\n{description}")
parts.append("\nEnhance this bounty description. Output valid JSON only.")
return "\n".join(parts)
Loading
Loading